From 75caaf32a2b2bb69199480335b0422a08a2d6c6a Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Tue, 22 Jul 2025 23:34:02 -0400 Subject: [PATCH 01/21] refactor: split long functions/methods into smaller functions * Use TokenInfo objects directly instead of tuples with attributes from TokenInfo objects. * Move non-formatting functions to other modules. * Add functions to determine number of blank lines after docstring for modules, classes, functions, and attributes. * Add function to update the token indices after removing unnecessary blank lines. * Add method to insert appropriate number of blank lines after each type of docstring. --- src/docformatter/__init__.py | 11 +- src/docformatter/format.py | 932 +++++++++++----- src/docformatter/syntax.py | 997 ------------------ tests/_data/string_files/format_black.toml | 75 -- tests/_data/string_files/format_code.toml | 324 ------ .../string_files/format_code_ranges.toml | 58 - tests/_data/string_files/format_epytext.toml | 52 - tests/_data/string_files/format_lists.toml | 80 -- tests/_data/string_files/format_sphinx.toml | 285 ----- .../string_files/format_style_options.toml | 38 - tests/_data/string_files/format_urls.toml | 648 ------------ tests/_data/string_files/format_wrap.toml | 133 --- tests/formatter/test_format_black.py | 169 --- tests/formatter/test_format_code.py | 609 ----------- tests/formatter/test_format_code_ranges.py | 107 -- tests/formatter/test_format_epytext.py | 133 --- tests/formatter/test_format_lists.py | 152 --- tests/formatter/test_format_sphinx.py | 512 --------- tests/formatter/test_format_styles.py | 147 --- tests/formatter/test_format_urls.py | 674 ------------ tests/formatter/test_format_wrap.py | 407 ------- tests/test_strip_docstring.py | 222 ---- 22 files changed, 694 insertions(+), 6071 deletions(-) delete mode 100644 src/docformatter/syntax.py delete mode 100644 tests/_data/string_files/format_black.toml delete mode 100644 tests/_data/string_files/format_code.toml delete mode 100644 tests/_data/string_files/format_code_ranges.toml delete mode 100644 tests/_data/string_files/format_epytext.toml delete mode 100644 tests/_data/string_files/format_lists.toml delete mode 100644 tests/_data/string_files/format_sphinx.toml delete mode 100644 tests/_data/string_files/format_style_options.toml delete mode 100644 tests/_data/string_files/format_urls.toml delete mode 100644 tests/_data/string_files/format_wrap.toml delete mode 100644 tests/formatter/test_format_black.py delete mode 100644 tests/formatter/test_format_code.py delete mode 100644 tests/formatter/test_format_code_ranges.py delete mode 100644 tests/formatter/test_format_epytext.py delete mode 100644 tests/formatter/test_format_lists.py delete mode 100644 tests/formatter/test_format_sphinx.py delete mode 100644 tests/formatter/test_format_styles.py delete mode 100644 tests/formatter/test_format_urls.py delete mode 100644 tests/formatter/test_format_wrap.py delete mode 100644 tests/test_strip_docstring.py diff --git a/src/docformatter/__init__.py b/src/docformatter/__init__.py index 6788346b..1cef8d27 100644 --- a/src/docformatter/__init__.py +++ b/src/docformatter/__init__.py @@ -1,6 +1,9 @@ #!/usr/bin/env python # +# docformatter.__init__.py is part of the docformatter project +# # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -23,15 +26,19 @@ # SOFTWARE. """This is the docformatter package.""" + __all__ = ["__version__"] # docformatter Local Imports from .__pkginfo__ import __version__ +from .classify import * # noqa F403 +from .format import FormatResult # noqa F403 +from .format import Formatter # noqa F401 +from .patterns import * # noqa F403 from .strings import * # noqa F403 -from .syntax import * # noqa F403 from .util import * # noqa F403 +from .wrappers import * # noqa F403 # Have isort skip these they require the functions above. from .configuration import Configurater # isort: skip # noqa F401 from .encode import Encoder # isort: skip # noqa F401 -from .format import Formatter, FormatResult # isort: skip # noqa F401 diff --git a/src/docformatter/format.py b/src/docformatter/format.py index 5f5297ee..25837d0b 100644 --- a/src/docformatter/format.py +++ b/src/docformatter/format.py @@ -1,6 +1,9 @@ #!/usr/bin/env python # +# docformatter.format.py is part of the docformatter project +# # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -29,119 +32,435 @@ import collections import contextlib import io -import re import tokenize -from typing import TextIO, Tuple - -# Third Party Imports -import untokenize # type: ignore +from typing import TextIO, Union # docformatter Package Imports +import docformatter.classify as _classify import docformatter.encode as _encode +import docformatter.patterns as _patterns import docformatter.strings as _strings -import docformatter.syntax as _syntax import docformatter.util as _util +import docformatter.wrappers as _wrappers +from docformatter.constants import QUOTE_TYPES unicode = str -def _do_remove_blank_lines_after_definitions( - modified_tokens, -): - """Remove blank lines between definitions and docstrings. +def _do_remove_preceding_blank_lines( + tokens: list[tokenize.TokenInfo], + blocks: list[tuple[int, int, str]], +) -> list[tokenize.TokenInfo]: + """Remove all blank lines preceding a docstring. - Blank lines between class, method, function, and variable - definitions and the docstring will be removed. + docformatter_6.8: No blank lines before a class docstring. + docformatter_8.3: No blank lines before a module docstring. + docformatter_8.3: One blank line before a module docstring if the docstring + follows immediately after a shebang line. + docformatter_9.9: No blank lines before a function or method docstring. + docformatter_12.2: No blank lines before an attribute docstring. Parameters ---------- - modified_tokens: list - The list of tokens created from the docstring. + tokens : list + A list of tokens from the source code. + blocks : list + A list of tuples containing the index of any docstrings and the docstring type. Returns ------- - modified_tokens: list - The list of tokens with any blank lines following a variable - definition removed. + tokens : list + A list of tokens with blank lines preceding docstrings removed. """ - for _idx, _token in enumerate(modified_tokens): - if _token[0] == 3: # noqa PLR2004 - j = 1 - - # Remove newline between variable definition and docstring - # unless it's separating a docstring from: - # * A previous docstring. - # * The file's shebang. - # * The import section. - while ( - modified_tokens[_idx - j][4] == "\n" - and not (modified_tokens[_idx - j - 1][4].strip().endswith('"""')) - and not modified_tokens[_idx - j - 1][4].startswith("#!/") - and "import" not in modified_tokens[_idx - j - 1][4] - ): - modified_tokens.pop(_idx - j) - j += 1 + _num_tokens = len(tokens) + _indices_to_remove = [] + + for i in range(_num_tokens): + match = next(((s, d, t) for (s, d, t) in blocks if d == i), None) + if match: + s, d, typ = match + for j in range(d - 1, 0, -1): + # Break out of loop once we reach a class, function, method, or + # attribute. No more blank lines should be removed once we get to the + # structure the docstring is associated with. + if ( + tokens[j].type == tokenize.NAME + and tokens[j].string in ("class", "def", "async") + ) or (tokens[j].type == tokenize.OP and tokens[j].string in ("=", ":")): + break + elif ( + tokens[j].type in (tokenize.NEWLINE, tokenize.NL) + and tokens[j].line == "\n" + and not tokens[j - 1].line.startswith("#!/") + ): + _indices_to_remove.append(j) - # Remove newline between class, method, and function - # definitions and docstring. - j = 2 - while modified_tokens[_idx - j][4] == "\n" and modified_tokens[ - _idx - j - 2 - ][4].strip().startswith(("def", "class")): - modified_tokens.pop(_idx - j) - j += 1 + # We need to go in reverse order to prevent the token list indices from + # getting out of whack. For example, if _indices_to_remove = [5, 21] and we + # removed index 5 first, then old index 22 would become the new index 21 and + # the next iteration of the loop would remove the wrong token. + _indices_to_remove.sort(reverse=True) - return modified_tokens + # Loop through the token indices in reverse order and remove them from the token + # line. + for i in _indices_to_remove: + tokens.pop(i) + return tokens -def _do_remove_blank_lines_after_docstring(modified_tokens): - """Remove blank lines between docstring and first Python statement. + +def _do_update_token_indices( + tokens: list[tokenize.TokenInfo], +) -> list[tokenize.TokenInfo]: + """Update the indices of tokens after a newline that is to be removed. + + When a newline before a docstring is removed, the indices of all following tokens + must be updated to reflect the missing newline. Parameters ---------- - modified_tokens: list - The list of tokens created from the docstring. + tokens : list + A list of tokens from the source code. Returns ------- - modified_tokens: list - The list of tokens with any blank lines following a docstring - removed. + list + The updated list of tokens. """ - # Remove all newlines between docstring and first Python - # statement as long as it's not a stub function. - for _idx, _token in enumerate(modified_tokens): - j = 1 - _num_blank_lines = 0 - while modified_tokens[_idx - j][4] == "\n": - j += 1 - _num_blank_lines += 1 + _end_row = tokens[0].end[0] + _end_col = tokens[0].end[1] + _num_tokens = len(tokens) + + for i in range(1, _num_tokens): + _num_rows, _num_cols = _get_num_rows_columns(tokens[i]) + + # If the current token line is the same as the preceding token line, + # the starting row for the current token should be the same as the ending + # line for the previous token unless both lines are NEWLINES. + if tokens[i].line == tokens[i - 1].line and tokens[i - 1].type not in ( + tokenize.NEWLINE, + tokenize.NL, + ): + _start_idx, _end_idx = _get_start_end_indices( + tokens[i], + tokens[i - 1], + _num_rows, + _num_cols, + ) - with contextlib.suppress(IndexError): - _is_definition = _token[4].lstrip().startswith(("class ", "def ", "@")) - _is_docstring = modified_tokens[_idx - 2][4].strip().endswith('"""') - _after_definition = ( - modified_tokens[_idx - _num_blank_lines - 4][4] - .lstrip() - .startswith(("class", "def", "@")) + tokens[i] = tokens[i]._replace(start=_start_idx) + tokens[i] = tokens[i]._replace(end=_end_idx) + if tokens[i].type in (tokenize.NEWLINE, tokenize.NL) and tokens[ + i - 1 + ].type in (tokenize.NEWLINE, tokenize.NL): + tokens[i] = tokens[i]._replace(start=(_end_idx[0], tokens[i].start[1])) + # If the current token line is different from the preceding token line, + # the current token starting row should be one greater than the previous + # token's end row. + else: + _start_idx, _end_idx = _get_unmatched_start_end_indices( + tokens[i], + tokens[i - 1], + _num_rows, ) - _after_docstring = modified_tokens[_idx - 5][4].strip().endswith( - '"""' - ) or modified_tokens[_idx - 5][4].strip().startswith('"""') - _comment_follows = re.search(r"\"\"\" *#", modified_tokens[_idx - 4][4]) - if ( - _token[0] == 1 - and not _is_definition - and not _is_docstring - and not _comment_follows - and _after_definition - and _after_docstring - ): - for j in range(_num_blank_lines): - modified_tokens.pop(_idx - j - 1) + tokens[i] = tokens[i]._replace(start=_start_idx) + tokens[i] = tokens[i]._replace(end=_end_idx) + + return tokens + + +def _get_attribute_docstring_newlines( + tokens: list[tokenize.TokenInfo], + index: int, +) -> int: + """Return number of newlines after an attribute docstring. + + docformatter_12.1: One blank line after an attribute docstring. + docformatter_12.1.1: Two blank lines if followed by top-level class or function + definition. + + Parameters + ---------- + tokens : list + A list of tokens from the source code. + index : int + The index of the docstring token in the list of tokens. + + Returns + ------- + newlines : int + The number of newlines to insert after the docstring. + """ + _num_tokens = len(tokens) + _offset = 2 + + for i in range(index + 2, _num_tokens - index - 1): + if tokens[i].line == "\n": + _offset += 1 + else: + break + + if tokens[index + _offset].line.startswith("class") or tokens[ + index + _offset + ].line.startswith("def"): + return 2 + + return 1 + + +def _get_class_docstring_newlines( + tokens: list[tokenize.TokenInfo], + index: int, +) -> int: + """Return number of newlines after a class docstring. + + PEP_257_6.1: One blank line after a class docstring. + docformatter_6.9: Keep in-line comment after triple quotes in-line. + + Parameters + ---------- + tokens : list + A list of tokens from the source code. + index : int + The index of the docstring token in the list of tokens. + + Returns + ------- + newlines : int + The number of newlines to insert after the docstring. + """ + j = index + 1 + + # The docstring is followed by a comment. + if tokens[j].string.startswith("#"): + return 0 + + return 1 + + +def _get_function_docstring_newlines( # noqa: PLR0911 + tokens: list[tokenize.TokenInfo], + index: int, +) -> int: + """Return number of newlines after a function or method docstring. + + docformatter_9.5: No blank lines after a function or method docstring. + docformatter_9.6: One blank line after a function or method docstring if there is + an inner function definition. + docformatter_9.7: Two blank lines after a function docstring if the stub function + has no code. + docformatter_9.8: One blank line after a method docstring if the stub method has + no code. + + Parameters + ---------- + tokens : list + A list of tokens from the source code. + index : int + The index of the docstring token in the list of tokens. + + Returns + ------- + newlines : int + The number of newlines to insert after the docstring. + """ + j = index + 1 + + # The docstring is followed by a comment. + if tokens[j].string.startswith("#"): + return 0 + + # Scan ahead to skip decorators and check for def/async def + while j < len(tokens): + if tokens[j].type == tokenize.OP and tokens[j].string == "@": + # Skip to the end of the decorator line + while j < len(tokens) and tokens[j][0] != tokenize.NEWLINE: + j += 1 + j += 1 + continue + + # The docstring is followed by an attribute assignment. + if tokens[j].type == tokenize.OP: + return 0 + + # There is a line of code following the docstring. + if _classify.is_code_line(tokens[j]): + if tokens[j].start[1] == 0: + return 1 + + return 0 + + # There is a method definition or nested function or class definition following + # the docstring. + if _classify.is_nested_definition_line(tokens[j]): + return 1 + + # There is a function or class definition following the docstring. + if _classify.is_definition_line(tokens[j]): + return 2 + + j += 1 + + return 0 + + +def _get_module_docstring_newlines(black: bool = False) -> int: + """Return number of newlines after a module docstring. + + docformatter_8.2: One blank line after a module docstring. + docformatter_8.2.1: Two blank lines after a module docstring when in black mode. + + Parameters + ---------- + black : bool + Indicates whether we're using black formatting rules. + + Returns + ------- + newlines : int + The number of newlines to insert after the docstring. + """ + if black: + return 2 + + return 1 + + +def _get_newlines_by_type( + tokens: list[tokenize.TokenInfo], + index: int, + black: bool = False, +) -> int: + """Dispatch to the correct docstring formatter based on context. + + Returns the number of newlines to insert after the docstring. + + Parameters + ---------- + tokens : list + A list of tokens from the source code. + index : int + The index of the docstring token in the list of tokens. + + Returns + ------- + int + The number of newlines to insert after the docstring. + """ + if _classify.is_module_docstring(tokens, index): + return _get_module_docstring_newlines(black) + elif _classify.is_class_docstring(tokens, index): + return _get_class_docstring_newlines(tokens, index) + elif _classify.is_function_or_method_docstring(tokens, index): + return _get_function_docstring_newlines(tokens, index) + elif _classify.is_attribute_docstring(tokens, index): + return _get_attribute_docstring_newlines(tokens, index) + + return 0 # Default: probably a string literal + + +def _get_num_rows_columns(token: tokenize.TokenInfo) -> tuple[int, int]: + """Determine the number of rows and columns needed for the docstring. + + Parameters + ---------- + token : tokenize.TokenInfo + The token whose rows and columns needs to be determined. + + Returns + ------- + rows_cols : tuple(int, int) + The number of rows and columns for the token. + """ + # Find the number of rows and columns the line requires. When the docstring is + # multiple lines, we'll need to update the row in the end index appropriately. + # The number of columns is needed to properly set the end index column number. + _split_line = token.line.split("\n") + _num_rows = len(_split_line) - 1 + _num_cols = len(_split_line[_num_rows - 1]) + + return _num_rows, _num_cols + + +def _get_start_end_indices( + token: tokenize.TokenInfo, + prev_token: tokenize.TokenInfo, + num_rows: int, + num_cols: int, +) -> tuple[tuple[int, int], tuple[int, int]]: + """Determine the start and end indices for the token. - return modified_tokens + Parameters + ---------- + token : tokenize.TokenInfo + The token whose start and end indices are being determined. + prev_token : tokenize.TokenInfo + The token prior to the token whose start and end indices are being determined. + num_rows : int + The number of rows the token requires. + num_cols : int + The number of columns the token requires. + + Returns + ------- + indices : tuple(tuple(int, int), tuple(int, int)) + The start and end index for the token. + """ + _start_row = prev_token.end[0] + _start_col = token.start[1] + _end_row = _start_row + _end_col = token.end[1] + + if num_rows > 1 and _end_row != prev_token.end[0]: + _end_row = _start_row + num_rows - 1 + _end_col = num_cols + + return (_start_row, _start_col), (_end_row, _end_col) + + +def _get_unmatched_start_end_indices( + token: tokenize.TokenInfo, + prev_token: tokenize.TokenInfo, + num_rows: int, +) -> tuple[tuple[int, int], tuple[int, int]]: + """Determine the start and end indices for the token if it doesn't match the prior. + + Parameters + ---------- + token : tokenize.TokenInfo + The token whose start and end indices are being determined. + prev_token : tokenize.TokenInfo + The token prior to the token whose start and end indices are being determined. + num_rows : int + The number of rows the token requires. + + Returns + ------- + indices : tuple(tuple(int, int), tuple(int, int)) + The start and end index for the token. + """ + _start_row = prev_token.end[0] + 1 + _start_col = token.start[1] + _end_row = _start_row + _end_col = token.end[1] + + if any( + [ + _classify.is_inline_comment(token), + _classify.is_string_variable(token, prev_token), + _classify.is_newline_continuation(token, prev_token), + _classify.is_line_following_indent(token, prev_token), + _classify.is_closing_quotes(token, prev_token), + _classify.is_f_string(token, prev_token), + ] + ): + _start_row = prev_token.end[0] + + if num_rows > 1 and token.type != tokenize.INDENT: + _end_row = _end_row + num_rows - 1 + + return (_start_row, _start_col), (_end_row, _end_col) class FormatResult: @@ -150,30 +469,13 @@ class FormatResult: ok = 0 error = 1 interrupted = 2 - check_failed = 3 + format_required = 3 +# noinspection PyArgumentList class Formatter: """Format docstrings.""" - STR_QUOTE_TYPES = ( - '"""', - "'''", - ) - RAW_QUOTE_TYPES = ( - 'r"""', - 'R"""', - "r'''", - "R'''", - ) - UCODE_QUOTE_TYPES = ( - 'u"""', - 'U"""', - "u'''", - "U'''", - ) - QUOTE_TYPES = STR_QUOTE_TYPES + RAW_QUOTE_TYPES + UCODE_QUOTE_TYPES - parser = None """Parser object.""" @@ -211,12 +513,14 @@ def __init__( self.encodor = _encode.Encoder() - def do_format_standard_in(self, parser: argparse.ArgumentParser): - """Print formatted text to standard out. + self.new_tokens: list[tokenize.TokenInfo] = [] + + def do_format_standard_in(self, parser: argparse.ArgumentParser) -> None: + """Print formatted text from standard in to standard out. Parameters ---------- - parser: argparse.ArgumentParser + parser : argparse.ArgumentParser The argument parser containing the formatting options. """ if len(self.args.files) > 1: @@ -241,46 +545,190 @@ def do_format_standard_in(self, parser: argparse.ArgumentParser): self.stdout.write(formatted_source) - def do_format_files(self): + def do_format_files(self) -> Union[int, None]: """Format multiple files. Return ------ - code: int - One of the FormatResult codes. + code : int | None + One of the FormatResult return codes. """ - outcomes = collections.Counter() - for filename in _util.find_py_files( - set(self.args.files), self.args.recursive, self.args.exclude - ): + outcomes: dict[int, int] = collections.Counter() + + return_codes = [ # in order of preference + FormatResult.error, + FormatResult.format_required, + FormatResult.ok, + ] + + _files_to_format = _util.find_py_files( + list(self.args.files), self.args.recursive, self.args.exclude + ) + + is_empty = True + for filename in _files_to_format: + is_empty = False try: result = self._do_format_file(filename) outcomes[result] += 1 except OSError as exception: outcomes[FormatResult.error] += 1 + # noinspection PyTypeChecker print(unicode(exception), file=self.stderror) - return_codes = [ # in order of preference - FormatResult.error, - FormatResult.check_failed, - FormatResult.ok, - ] + # There were no files to process. + if is_empty: + outcomes[FormatResult.error] += 1 for code in return_codes: if outcomes[code]: return code - def _do_format_file(self, filename): - """Run format_code() on a file. + return 0 + + def _do_add_blank_lines( + self, + num_blank_lines: int, + start_row: int, + end_row: int, + ) -> None: + """Add the number of blank lines specified by num_blanks after the docstring. + + Parameters + ---------- + num_blank_lines : int + The number of blank lines to add. + start_row : int + The start index row for the first blank line. + end_row : int + The end index row for the first blank line. + """ + _start = (start_row, 0) + _end = (end_row, 1) + for k in range(num_blank_lines): + new_tok = tokenize.TokenInfo( + type=tokenize.NEWLINE, + string="\n", + start=_start, + end=_end, + line="\n", + ) + self.new_tokens.append(new_tok) + _start = (_end[0] + 1, 0) + _end = (_start[0], 1) + + def _do_add_formatted_docstring( + self, + token: tokenize.TokenInfo, + next_token: tokenize.TokenInfo, + docstring_type: str, + blank_line_count: int, + ) -> None: + """Add a formatted docstring to the new tokens list. + + Parameters + ---------- + token : tokenize.TokenInfo + The token representing the docstring. + next_token : tokenize.TokenInfo + The next token after the docstring. + docstring_type : str + The type of the docstring (e.g., module, class, function, attribute). + blank_line_count : int + The number of blank lines to add after the docstring. + """ + _indent = " " * token.start[1] if docstring_type != "module" else "" + _formatted = self._do_format_docstring(_indent, token.string) + _line = _indent + _formatted + + # Add a newline to the end of the docstring line unless it already + # has one or there is an in-line comment following it. + if not _line.endswith("\n") and not next_token.string.startswith("#"): + _line += "\n" + + # Add a token with the formatted docstring line. + _new_tok = tokenize.TokenInfo( + type=tokenize.STRING, + string=_formatted, + start=token.start, + end=token.end, + line=_line, + ) + self.new_tokens.append(_new_tok) + + with contextlib.suppress(IndexError): + if ( + self.new_tokens[-2].type == tokenize.INDENT + and self.new_tokens[-2].end[0] == _new_tok.start[0] + ): + self.new_tokens[-2] = self.new_tokens[-2]._replace(line=_line) + + # If a comment follows the docstring, skip adding a newline token for + # the line. + if not next_token.string.startswith("#"): + _new_tok = tokenize.TokenInfo( + type=tokenize.NEWLINE, + string="\n", + start=token.end, + end=(token.end[0], token.end[1] + 1), + line=_line, + ) + self.new_tokens.append(_new_tok) + + # Add the appropriate number of NEWLINE tokens based on the type of + # docstring. + self._do_add_blank_lines( + blank_line_count, + _new_tok.end[0] + 1, + _new_tok.end[0] + 1, + ) + + def _do_add_unformatted_docstring( + self, + token: tokenize.TokenInfo, + docstring_type: str, + ) -> None: + """Add an unformatted docstring to the new tokens list. + + Parameters + ---------- + token : tokenize.TokenInfo + The token representing the docstring. + docstring_type : str + The type of the docstring (e.g., module, class, function, attribute). + """ + _indent = " " * token.start[1] if docstring_type != "module" else "" + _line = _indent + token.string + _new_token = tokenize.TokenInfo( + type=tokenize.STRING, + string=token.string, + start=token.start, + end=token.end, + line=_line, + ) + self.new_tokens.append(_new_token) + + # Add a token for the newline after the docstring. + _new_token = tokenize.TokenInfo( + type=tokenize.NEWLINE, + string="\n", + start=token.end, + end=(token.end[0], token.end[1] + 1), + line=_line, + ) + self.new_tokens.append(_new_token) + + def _do_format_file(self, filename: str) -> int: + """Format docstrings in a file. Parameters ---------- - filename: str + filename : str The path to the file to be formatted. Return ------ - result_code: int + result_code : int One of the FormatResult codes. """ self.encodor.do_detect_encoding(filename) @@ -293,8 +741,9 @@ def _do_format_file(self, filename): show_diff = self.args.diff if source != formatted_source: - ret = FormatResult.check_failed + ret = FormatResult.format_required if self.args.check: + # noinspection PyTypeChecker print(unicode(filename), file=self.stderror) elif self.args.in_place: with self.encodor.do_open_with_encoding( @@ -320,39 +769,18 @@ def _do_format_file(self, filename): return ret - def _do_format_code(self, source): + def _do_format_code(self, source: str) -> str: """Return source code with docstrings formatted. Parameters ---------- - source: str + source : str The text from the source file. - """ - try: - _original_newline = self.encodor.do_find_newline(source.splitlines(True)) - _code = self._format_code(source) - - return _strings.normalize_line_endings( - _code.splitlines(True), _original_newline - ).rstrip(" ") - except (tokenize.TokenError, IndentationError): - return source - - def _format_code( - self, - source, - ): - """Return source code with docstrings formatted. - - Parameters - ---------- - source: str - The source code string. Returns ------- - formatted_source: str - The source code with formatted docstrings. + formatted : str + The source file text with docstrings formatted. """ if not source: return source @@ -363,55 +791,20 @@ def _format_code( if self.args.length_range is not None: assert self.args.length_range[0] > 0 and self.args.length_range[1] > 0 - modified_tokens = [] - - sio = io.StringIO(source) - previous_token_type = None - only_comments_so_far = True - try: - for ( - token_type, - token_string, - start, - end, - line, - ) in tokenize.generate_tokens(sio.readline): - _token_string = token_string - if ( - token_type == tokenize.STRING - and token_string.startswith(self.QUOTE_TYPES) - and ( - previous_token_type == tokenize.INDENT - or previous_token_type == tokenize.NEWLINE - or only_comments_so_far - ) - and _util.is_in_range(self.args.line_range, start[0], end[0]) - and _util.has_correct_length( - self.args.length_range, start[0], end[0] - ) - ): - indentation = " " * (len(line) - len(line.lstrip())) - _token_string = self._do_format_docstring( - indentation, - token_string, - ) - - if token_type not in [ - tokenize.COMMENT, - tokenize.NEWLINE, - tokenize.NL, - ]: - only_comments_so_far = False - - previous_token_type = token_type - modified_tokens.append((token_type, _token_string, start, end, line)) + _original_newline = self.encodor.do_find_newline(source.splitlines(True)) + tokens = list( + tokenize.generate_tokens(io.StringIO(source, newline="").readline) + ) - modified_tokens = _do_remove_blank_lines_after_definitions(modified_tokens) - modified_tokens = _do_remove_blank_lines_after_docstring(modified_tokens) + # Perform docstring rewriting + self._do_rewrite_docstring_blocks(tokens) + _code = tokenize.untokenize(self.new_tokens) - return untokenize.untokenize(modified_tokens) - except tokenize.TokenError: + return _strings.do_normalize_line_endings( + _code.splitlines(True), _original_newline + ).rstrip(" ") + except (tokenize.TokenError, IndentationError): return source def _do_format_docstring( # noqa PLR0911 @@ -423,17 +816,17 @@ def _do_format_docstring( # noqa PLR0911 Parameters ---------- - indentation: str + indentation : str The indentation characters for the docstring. - docstring: str + docstring : str The docstring itself. Returns ------- - docstring_formatted: str + docstring_formatted : str The docstring formatted according the various options. """ - contents, open_quote = self._do_strip_docstring(docstring) + contents, open_quote = _strings.do_strip_docstring(docstring) if ( self.args.black @@ -444,7 +837,7 @@ def _do_format_docstring( # noqa PLR0911 open_quote = f"{open_quote} " # Skip if there are nested triple double quotes - if contents.count(self.QUOTE_TYPES[0]): + if contents.count(QUOTE_TYPES[0]): return docstring # Do not modify things that start with doctests. @@ -452,36 +845,29 @@ def _do_format_docstring( # noqa PLR0911 return docstring # Do not modify docstring if the only thing it contains is a link. - _links = _syntax.do_find_links(contents) + _links = _patterns.do_find_links(contents) with contextlib.suppress(IndexError): if _links[0][0] == 0 and _links[0][1] == len(contents): return docstring - summary, description = _strings.split_summary_and_description(contents) + summary, description = _strings.do_split_summary_and_description(contents) # Leave docstrings with only field lists alone. - if _syntax.is_some_sort_of_field_list( + if _patterns.is_field_list( summary, self.args.style, ): return docstring - # Leave docstrings with underlined descriptions alone. - # TODO: Deprecate the remove_section_header method now that section headers - # are being handled. - if _syntax.remove_section_header(description).strip() != description.strip(): - return docstring - if not self.args.force_wrap and ( - _syntax.is_some_sort_of_list( + _patterns.is_type_of_list( summary, self.args.non_strict, - self.args.rest_section_adorns, self.args.style, ) - or _syntax.do_find_links(summary) + or _patterns.do_find_links(summary) ): - # Something is probably not right with the splitting. + # Something probably isn't right with the splitting. return docstring # Compensate for textwrap counting each tab in indentation as 1 @@ -530,17 +916,17 @@ def _do_format_oneline_docstring( if self.args.make_summary_multi_line: beginning = f"{open_quote}\n{indentation}" ending = f'\n{indentation}"""' - summary_wrapped = _syntax.wrap_summary( - _strings.normalize_summary(contents, self.args.non_cap), + summary_wrapped = _wrappers.do_wrap_summary( + _strings.do_normalize_summary(contents, self.args.non_cap), wrap_length=self.args.wrap_summaries, initial_indent=indentation, subsequent_indent=indentation, ).strip() return f"{beginning}{summary_wrapped}{ending}" else: - summary_wrapped = _syntax.wrap_summary( + summary_wrapped = _wrappers.do_wrap_summary( open_quote - + _strings.normalize_summary(contents, self.args.non_cap) + + _strings.do_normalize_summary(contents, self.args.non_cap) + '"""', wrap_length=self.args.wrap_summaries, initial_indent=indentation, @@ -586,13 +972,13 @@ def _do_format_multiline_docstring( indentation if self.args.pre_summary_newline else 3 * " " + indentation ) pre_summary = "\n" + indentation if self.args.pre_summary_newline else "" - summary = _syntax.wrap_summary( - _strings.normalize_summary(summary, self.args.non_cap), + summary = _wrappers.do_wrap_summary( + _strings.do_normalize_summary(summary, self.args.non_cap), wrap_length=self.args.wrap_summaries, initial_indent=initial_indent, subsequent_indent=indentation, ).lstrip() - description = _syntax.wrap_description( + description = _wrappers.do_wrap_description( description, indentation=indentation, wrap_length=self.args.wrap_descriptions, @@ -609,40 +995,92 @@ def _do_format_multiline_docstring( {indentation}"""\ ''' - def _do_strip_docstring(self, docstring: str) -> Tuple[str, str]: - """Return contents of docstring and opening quote type. - - Strips the docstring of its triple quotes, trailing white space, - and line returns. Determines type of docstring quote (either string, - raw, or unicode) and returns the opening quotes, including the type - identifier, with single quotes replaced by double quotes. + def _do_rewrite_docstring_blocks( + self, + tokens: list[tokenize.TokenInfo], + ) -> None: + """Replace all docstring blocks with properly formatted docstrings. Parameters ---------- - docstring: str - The docstring, including the opening and closing triple quotes. - - Returns - ------- - (docstring, open_quote) : tuple - The docstring with the triple quotes removed. - The opening quote type with single quotes replaced by double - quotes. + tokens : list[TokenInfo] + The tokenized Python source code. """ - docstring = docstring.strip() + blocks = _classify.do_find_docstring_blocks(tokens) + self.new_tokens = [] + skip_indices: set[int] = set() + + for i, tok in enumerate(tokens): + if i in skip_indices: + continue + + match = next(((s, d, t) for (s, d, t) in blocks if d == i), None) + if match: + s, d, typ = match + + # Skip tokens from anchor (s) up to and including the docstring (d), + # plus trailing blank lines + j = d + 1 + while j < len(tokens) and tokens[j].type in ( + tokenize.NL, + tokenize.NEWLINE, + ): + j += 1 + skip_indices.update(range(s + 1, j)) + + _docstring_token = tokens[d] + _indent = " " * _docstring_token.start[1] if typ != "module" else "" + _blank_line_count = _get_newlines_by_type( + tokens, + d, + black=self.args.black, + ) - for quote in self.QUOTE_TYPES: - if quote in self.RAW_QUOTE_TYPES + self.UCODE_QUOTE_TYPES and ( - docstring.startswith(quote) and docstring.endswith(quote[1:]) - ): - return docstring.split(quote, 1)[1].rsplit(quote[1:], 1)[ - 0 - ].strip(), quote.replace("'", '"') - elif docstring.startswith(quote) and docstring.endswith(quote): - return docstring.split(quote, 1)[1].rsplit(quote, 1)[ - 0 - ].strip(), quote.replace("'", '"') - - raise ValueError( - "docformatter only handles triple-quoted (single or double) " "strings" - ) + if _util.is_in_range( + self.args.line_range, + _docstring_token.start[0], + _docstring_token.end[0], + ) and _util.has_correct_length( + self.args.length_range, + _docstring_token.start[0], + _docstring_token.end[0], + ): + self._do_add_formatted_docstring( + _docstring_token, + tokens[i + 1], + typ, + _blank_line_count, + ) + else: + self._do_add_unformatted_docstring(_docstring_token, typ) + + if ( + ( + self.new_tokens[-2].string == tokens[i + 1].string + and _docstring_token.line == tokens[i + 1].line + ) + or tokens[i + 1].string == "\n" + or tokens[i + 1].type in (tokenize.NEWLINE, tokenize.NL) + ): + skip_indices.add(i + 1) + continue + else: + _new_tok = tok + # If it's a standalone STRING (not identified as a docstring block), + # ensure .line ends with newline + if tok.type == tokenize.STRING: + _line = tok.line + if not _line.endswith("\n"): + _line += "\n" + _new_tok = tokenize.TokenInfo( + type=tok.type, + string=tok.string, + start=tok.start, + end=tok.end, + line=_line, + ) + + self.new_tokens.append(_new_tok) + + self.new_tokens = _do_remove_preceding_blank_lines(self.new_tokens, blocks) + self.new_tokens = _do_update_token_indices(self.new_tokens) diff --git a/src/docformatter/syntax.py b/src/docformatter/syntax.py deleted file mode 100644 index 79b8cb8d..00000000 --- a/src/docformatter/syntax.py +++ /dev/null @@ -1,997 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""This module provides docformatter's syntax functions.""" - - -# Standard Library Imports -import contextlib -import re -import textwrap -from typing import Iterable, List, Tuple, Union - -DEFAULT_INDENT = 4 - -ALEMBIC_REGEX = r"^ *[a-zA-Z0-9_\- ]*: " -"""Regular expression to use for finding alembic headers.""" - -BULLET_REGEX = r"\s*[*\-+] [\S ]+" -"""Regular expression to use for finding bullet lists.""" - -ENUM_REGEX = r"\s*\d\." -"""Regular expression to use for finding enumerated lists.""" - -EPYTEXT_REGEX = r"@[a-zA-Z0-9_\-\s]+:" -"""Regular expression to use for finding Epytext-style field lists.""" - -GOOGLE_REGEX = r"^ *[a-zA-Z0-9_\- ]*:$" -"""Regular expression to use for finding Google-style field lists.""" - -LITERAL_REGEX = r"[\S ]*::" -"""Regular expression to use for finding literal blocks.""" - -NUMPY_REGEX = r"^\s[a-zA-Z0-9_\- ]+ ?: [\S ]+" -"""Regular expression to use for finding Numpy-style field lists.""" - -OPTION_REGEX = r"^-{1,2}[\S ]+ {2}\S+" -"""Regular expression to use for finding option lists.""" - -REST_REGEX = r"((\.{2}|`{2}) ?[\w.~-]+(:{2}|`{2})?[\w ]*?|`[\w.~]+`)" -"""Regular expression to use for finding reST directives.""" - -# Complete list: -# https://www.sphinx-doc.org/en/master/usage/domains/python.html#info-field-lists -SPHINX_FIELD_PATTERNS = ( - "arg|" - "cvar|" - "except|" - "ivar|" - "key|" - "meta|" - "param|" - "raise|" - "return|" - "rtype|" - "type|" - "var|" - "yield" -) - -SPHINX_REGEX = rf":({SPHINX_FIELD_PATTERNS})[a-zA-Z0-9_\-.() ]*:" -"""Regular expression to use for finding Sphinx-style field lists.""" - -URL_PATTERNS = ( - "afp|" - "apt|" - "bitcoin|" - "chrome|" - "cvs|" - "dav|" - "dns|" - "file|" - "finger|" - "fish|" - "ftp|" - "ftps|" - "git|" - "http|" - "https|" - "imap|" - "ipp|" - "ipps|" - "irc|" - "irc6|" - "ircs|" - "jar|" - "ldap|" - "ldaps|" - "mailto|" - "news|" - "nfs|" - "nntp|" - "pop|" - "rsync|" - "s3|" - "sftp|" - "shttp|" - "sip|" - "sips|" - "smb|" - "sms|" - "snmp|" - "ssh|" - "svn|" - "telnet|" - "vnc|" - "xmpp|" - "xri" -) -"""The URL patterns to look for when finding links. - -Based on the table at -""" - -# This is the regex used to find URL links: -# -# (__ |`{{2}}|`\w[\w. :\n]*|\.\. _?[\w. :]+|')? is used to find in-line links that -# should remain on a single line even if it exceeds the wrap length. -# __ is used to find to underscores followed by a single space. -# This finds patterns like: __ https://sw.kovidgoyal.net/kitty/graphics-protocol/ -# -# `{{2}} is used to find two back-tick characters. -# This finds patterns like: ``http://www.example.com`` -# -# `\w[a-zA-Z0-9. :#\n]* matches the back-tick character immediately followed by one -# letter, then followed by any number of letters, numbers, periods, spaces, colons, -# hash marks or newlines. -# This finds patterns like: `Link text `_ -# -# \.\. _?[\w. :]+ matches the pattern .. followed one space, then by zero or -# one underscore, then any number of letters, periods, spaces, or colons. -# This finds patterns like: .. _a link: https://domain.invalid/ -# -# ' matches a single quote. -# This finds patterns like: 'http://www.example.com' -# -# ? matches the previous pattern between zero or one times. -# -# ? is used to find the actual link. -# ? matches the character > between zero and one times. -URL_REGEX = ( - rf"(__ |`{{2}}|`\w[\w :#\n]*[.|\.\. _?[\w. :]+|')??" -) - -URL_SKIP_REGEX = rf"({URL_PATTERNS}):(/){{0,2}}(``|')" -"""The regex used to ignore found hyperlinks. - -URLs that don't actually contain a domain, but only the URL pattern should -be treated like simple text. This will ignore URLs like ``http://`` or 'ftp:`. - -({URL_PATTERNS}) matches one of the URL patterns. -:(/){{0,2}} matches a colon followed by up to two forward slashes. -(``|') matches a double back-tick or single quote. -""" - -HEURISTIC_MIN_LIST_ASPECT_RATIO = 0.4 -"""The minimum aspect ratio to consider a list.""" - - -def description_to_list( - description: str, - indentation: str, - wrap_length: int, -) -> List[str]: - """Convert the description to a list of wrap length lines. - - Parameters - ---------- - description : str - The docstring description. - indentation : str - The indentation (number of spaces or tabs) to place in front of each - line. - wrap_length : int - The column to wrap each line at. - - Returns - ------- - _wrapped_lines : list - A list containing each line of the description wrapped at wrap_length. - """ - # This is a description containing only one paragraph. - if len(re.findall(r"\n\n", description)) <= 0: - return textwrap.wrap( - textwrap.dedent(description), - width=wrap_length, - initial_indent=indentation, - subsequent_indent=indentation, - ) - - # This is a description containing multiple paragraphs. - _wrapped_lines = [] - for _line in description.split("\n\n"): - _wrapped_line = textwrap.wrap( - textwrap.dedent(_line), - width=wrap_length, - initial_indent=indentation, - subsequent_indent=indentation, - ) - - if _wrapped_line: - _wrapped_lines.extend(_wrapped_line) - _wrapped_lines.append("") - - with contextlib.suppress(IndexError): - if not _wrapped_lines[-1] and not _wrapped_lines[-2]: - _wrapped_lines.pop(-1) - - if ( - description[-len(indentation) - 1 : -len(indentation)] == "\n" - and description[-len(indentation) - 2 : -len(indentation)] != "\n\n" - ): - _wrapped_lines.pop(-1) - - return _wrapped_lines - - -def do_clean_url(url: str, indentation: str) -> str: - r"""Strip newlines and multiple whitespace from URL string. - - This function deals with situations such as: - - `Get\n Cookies.txt bool: - """Determine if docstring contains any reST directives. - - .. todo:: - - Currently this function only returns True/False to indicate whether a - reST directive was found. Should return a list of tuples containing - the start and end position of each reST directive found similar to the - function do_find_links(). - - Parameters - ---------- - text : str - The docstring text to test. - - Returns - ------- - is_directive : bool - Whether the docstring is a reST directive. - """ - _rest_iter = re.finditer(REST_REGEX, text) - return bool([(_rest.start(0), _rest.end(0)) for _rest in _rest_iter]) - - -def do_find_field_lists( - text: str, - style: str, -): - r"""Determine if docstring contains any field lists. - - Parameters - ---------- - text : str - The docstring description to check for field list patterns. - style : str - The field list style used. - - Returns - ------- - _field_idx, _wrap_parameters : tuple - A list of tuples with each tuple containing the starting and ending - position of each field list found in the passed description. - A boolean indicating whether long field list lines should be wrapped. - """ - _field_idx = [] - _wrap_parameters = False - - if style == "epytext": - _field_idx = [ - (_field.start(0), _field.end(0)) - for _field in re.finditer(EPYTEXT_REGEX, text) - ] - _wrap_parameters = True - elif style == "sphinx": - _field_idx = [ - (_field.start(0), _field.end(0)) - for _field in re.finditer(SPHINX_REGEX, text) - ] - _wrap_parameters = True - - return _field_idx, _wrap_parameters - - -def do_find_links(text: str) -> List[Tuple[int, int]]: - r"""Determine if docstring contains any links. - - Parameters - ---------- - text : str - The docstring description to check for link patterns. - - Returns - ------- - url_index : list - A list of tuples with each tuple containing the starting and ending - position of each URL found in the passed description. - """ - _url_iter = re.finditer(URL_REGEX, text) - return [(_url.start(0), _url.end(0)) for _url in _url_iter] - - -def do_skip_link(text: str, index: Tuple[int, int]) -> bool: - """Check if the identified URL is something other than a complete link. - - Is the identified link simply: - 1. The URL scheme pattern such as 's3://' or 'file://' or 'dns:'. - 2. The beginning of a URL link that has been wrapped by the user. - - Arguments - --------- - text : str - The description text containing the link. - index : tuple - The index in the text of the starting and ending position of the - identified link. - - Returns - ------- - _do_skip : bool - Whether to skip this link and simply treat it as a standard text word. - """ - _do_skip = re.search(URL_SKIP_REGEX, text[index[0] : index[1]]) is not None - - with contextlib.suppress(IndexError): - _do_skip = _do_skip or (text[index[0]] == "<" and text[index[1]] != ">") - - return _do_skip - - -def do_split_description( - text: str, - indentation: str, - wrap_length: int, - style: str, -) -> Union[List[str], Iterable]: - """Split the description into a list of lines. - - Parameters - ---------- - text : str - The docstring description. - indentation : str - The indentation (number of spaces or tabs) to place in front of each - line. - wrap_length : int - The column to wrap each line at. - style : str - The docstring style to use for dealing with parameter lists. - - Returns - ------- - _lines : list - A list containing each line of the description with any links put - back together. - """ - _lines: List[str] = [] - _text_idx = 0 - - # Check if the description contains any URLs. - _url_idx = do_find_links(text) - - # Check if the description contains any field lists. - _field_idx, _wrap_fields = do_find_field_lists( - text, - style, - ) - - # Field list wrapping takes precedence over URL wrapping. - _url_idx = _field_over_url( - _field_idx, - _url_idx, - ) - - if not _url_idx and not (_field_idx and _wrap_fields): - return description_to_list( - text, - indentation, - wrap_length, - ) - - if _url_idx: - _lines, _text_idx = do_wrap_urls( - text, - _url_idx, - 0, - indentation, - wrap_length, - ) - - if _field_idx: - _lines, _text_idx = do_wrap_field_lists( - text, - _field_idx, - _lines, - _text_idx, - indentation, - wrap_length, - ) - else: - # Finally, add everything after the last URL or field list directive. - _lines += _do_close_description(text, _text_idx, indentation) - - return _lines - - -def do_wrap_field_lists( # noqa: PLR0913 - text: str, - field_idx: List[Tuple[int, int]], - lines: List[str], - text_idx: int, - indentation: str, - wrap_length: int, -) -> Tuple[List[str], int]: - """Wrap field lists in the long description. - - Parameters - ---------- - text : str - The long description text. - field_idx : list - The list of field list indices found in the description text. - lines : list - The list of formatted lines in the description that come before the - first parameter list item. - text_idx : int - The index in the description of the end of the last parameter list - item. - indentation : str - The string to use to indent each line in the long description. - wrap_length : int - The line length at which to wrap long lines in the description. - - Returns - ------- - lines, text_idx : tuple - A list of the long description lines and the index in the long - description where the last parameter list item ended. - """ - lines.extend( - description_to_list( - text[text_idx : field_idx[0][0]], - indentation, - wrap_length, - ) - ) - - for _idx, __ in enumerate(field_idx): - _field_name = text[field_idx[_idx][0] : field_idx[_idx][1]] - _field_body = _do_join_field_body( - text, - field_idx, - _idx, - ) - - if len(f"{_field_name}{_field_body}") <= (wrap_length - len(indentation)): - _field = f"{_field_name}{_field_body}" - lines.append(f"{indentation}{_field}") - else: - lines.extend( - _do_wrap_field(_field_name, _field_body, indentation, wrap_length) - ) - - text_idx = field_idx[_idx][1] - - return lines, text_idx - - -def do_wrap_urls( - text: str, - url_idx: Iterable, - text_idx: int, - indentation: str, - wrap_length: int, -) -> Tuple[List[str], int]: - """Wrap URLs in the long description. - - Parameters - ---------- - text : str - The long description text. - url_idx : list - The list of URL indices found in the description text. - text_idx : int - The index in the description of the end of the last URL. - indentation : str - The string to use to indent each line in the long description. - wrap_length : int - The line length at which to wrap long lines in the description. - - Returns - ------- - _lines, _text_idx : tuple - A list of the long description lines and the index in the long - description where the last URL ended. - """ - _lines = [] - for _url in url_idx: - # Skip URL if it is simply a quoted pattern. - if do_skip_link(text, _url): - continue - - # If the text including the URL is longer than the wrap length, - # we need to split the description before the URL, wrap the pre-URL - # text, and add the URL as a separate line. - if len(text[text_idx : _url[1]]) > (wrap_length - len(indentation)): - # Wrap everything in the description before the first URL. - _lines.extend( - description_to_list( - text[text_idx : _url[0]], - indentation, - wrap_length, - ) - ) - - with contextlib.suppress(IndexError): - if text[_url[0] - len(indentation) - 2] != "\n" and not _lines[-1]: - _lines.pop(-1) - - # Add the URL making sure that the leading quote is kept with a quoted URL. - _text = f"{text[_url[0]: _url[1]]}" - with contextlib.suppress(IndexError): - if _lines[0][-1] == '"': - _lines[0] = _lines[0][:-2] - _text = f'"{text[_url[0] : _url[1]]}' - - _lines.append(f"{do_clean_url(_text, indentation)}") - - text_idx = _url[1] - - return _lines, text_idx - - -def is_some_sort_of_field_list( - text: str, - style: str, -) -> bool: - """Determine if docstring contains field lists. - - Parameters - ---------- - text : str - The docstring text. - style : str - The field list style to use. - - Returns - ------- - is_field_list : bool - Whether the field list pattern for style was found in the docstring. - """ - split_lines = text.rstrip().splitlines() - - if style == "epytext": - return any( - ( - # "@param x:" <-- Epytext style - # "@type x:" <-- Epytext style - re.match(EPYTEXT_REGEX, line) - ) - for line in split_lines - ) - elif style == "sphinx": - return any( - ( - # ":parameter: description" <-- Sphinx style - re.match(SPHINX_REGEX, line) - ) - for line in split_lines - ) - - return False - - -# pylint: disable=line-too-long -def is_some_sort_of_list( - text: str, - strict: bool, - rest_sections: str, - style: str, -) -> bool: - """Determine if docstring is a reST list. - - Notes - ----- - There are five types of lists in reST/docutils that need to be handled. - - * `Bullet lists - `_ - * `Enumerated lists - `_ - * `Definition lists - `_ - * `Field lists - `_ - * `Option lists - `_ - """ - split_lines = text.rstrip().splitlines() - - # TODO: Find a better way of doing this. Conversely, create a logger and log - # potential lists for the user to decide if they are lists or not. - # Very large number of lines but short columns probably means a list of - # items. - if ( - len(split_lines) / max([len(line.strip()) for line in split_lines] + [1]) - > HEURISTIC_MIN_LIST_ASPECT_RATIO - ) and not strict: - return True - - if is_some_sort_of_field_list(text, style): - return False - - return any( - ( - # "* parameter" <-- Bullet list - # "- parameter" <-- Bullet list - # "+ parameter" <-- Bullet list - re.match(BULLET_REGEX, line) - or - # "1. item" <-- Enumerated list - re.match(ENUM_REGEX, line) - or - # "====\ndescription\n====" <-- reST section - # "----\ndescription\n----" <-- reST section - # "description\n----" <-- reST section - re.match(rest_sections, line) - or - # "-a description" <-- Option list - # "--long description" <-- Option list - re.match(OPTION_REGEX, line) - or - # "@param x:" <-- Epytext style - # "@type x:" <-- Epytext style - re.match(EPYTEXT_REGEX, line) - or - # ":parameter: description" <-- Sphinx style - re.match(SPHINX_REGEX, line) - or - # "parameter : description" <-- Numpy style - re.match(NUMPY_REGEX, line) - or - # "word\n----" <-- Numpy headings - re.match(r"^\s*-+", line) - or - # "Args:" <-- Google style - # "parameter:" <-- Google style - re.match(GOOGLE_REGEX, line) - or - # "parameter - description" - re.match(r"[\S ]+ - \S+", line) - or - # "parameter -- description" - re.match(r"\s*\S+\s+--\s+", line) - or - # Literal block - re.match(LITERAL_REGEX, line) - or - # "@parameter" - re.match(r"^ *@[a-zA-Z0-9_\- ]*(?:(?!:).)*$", line) - or - # " c :math:`[0, `]`. - re.match(r" *\w *:[a-zA-Z0-9_\- ]*:", line) - or - # "Revision ID: >" - # "Revises: " - # "Create Date: 2023-01-06 10:13:28.156709" - re.match(ALEMBIC_REGEX, line) - ) - for line in split_lines - ) - - -def is_some_sort_of_code(text: str) -> bool: - """Return True if text looks like code.""" - return any( - len(word) > 50 and not re.match(URL_REGEX, word) # noqa: PLR2004 - for word in text.split() - ) - - -def reindent(text, indentation): - """Return reindented text that matches indentation.""" - if "\t" not in indentation: - text = text.expandtabs() - - text = textwrap.dedent(text) - - return ( - "\n".join( - [(indentation + line).rstrip() for line in text.splitlines()] - ).rstrip() - + "\n" - ) - - -def remove_section_header(text): - r"""Return text with section header removed. - - >>> remove_section_header('----\nfoo\nbar\n') - 'foo\nbar\n' - - >>> remove_section_header('===\nfoo\nbar\n') - 'foo\nbar\n' - """ - stripped = text.lstrip() - if not stripped: - return text - - first = stripped[0] - return ( - text - if ( - first.isalnum() - or first.isspace() - or stripped.splitlines()[0].strip(first).strip() - ) - else stripped.lstrip(first).lstrip() - ) - - -def strip_leading_blank_lines(text): - """Return text with leading blank lines removed.""" - split = text.splitlines() - - found = next((index for index, line in enumerate(split) if line.strip()), 0) - - return "\n".join(split[found:]) - - -def unwrap_summary(summary): - """Return summary with newlines removed in preparation for wrapping.""" - return re.sub(r"\s*\n\s*", " ", summary) - - -def wrap_summary(summary, initial_indent, subsequent_indent, wrap_length): - """Return line-wrapped summary text.""" - if wrap_length > 0: - return textwrap.fill( - unwrap_summary(summary), - width=wrap_length, - initial_indent=initial_indent, - subsequent_indent=subsequent_indent, - ).strip() - else: - return summary - - -def wrap_description( # noqa: PLR0913 - text, - indentation, - wrap_length, - force_wrap, - strict, - rest_sections, - style: str = "sphinx", -): - """Return line-wrapped description text. - - We only wrap simple descriptions. We leave doctests, multi-paragraph text, and - bulleted lists alone. - - Parameters - ---------- - text : str - The unwrapped description text. - indentation : str - The indentation string. - wrap_length : int - The line length at which to wrap long lines. - force_wrap : bool - Whether to force docformatter to wrap long lines when normally they - would remain untouched. - strict : bool - Whether to strictly follow reST syntax to identify lists. - rest_sections : str - A regular expression used to find reST section header adornments. - style : str - The name of the docstring style to use when dealing with parameter - lists (default is sphinx). - - Returns - ------- - description : str - The description wrapped at wrap_length characters. - """ - text = strip_leading_blank_lines(text) - - # Do not modify doctests at all. - if ">>>" in text: - return text - - text = reindent(text, indentation).rstrip() - - # Ignore possibly complicated cases. - if wrap_length <= 0 or ( - not force_wrap - and ( - is_some_sort_of_code(text) - or do_find_directives(text) - or is_some_sort_of_list(text, strict, rest_sections, style) - ) - ): - return text - - lines = do_split_description(text, indentation, wrap_length, style) - - return indentation + "\n".join(lines).strip() - - -def _do_close_description( - text: str, - text_idx: int, - indentation: str, -) -> List[str]: - """Wrap any description following the last URL or field list. - - Parameters - ---------- - text : str - The docstring text. - text_idx : int - The index of the last URL or field list match. - indentation : str - The indentation string to use with docstrings. - - Returns - ------- - _split_lines : str - The text input split into individual lines. - """ - _split_lines = [] - with contextlib.suppress(IndexError): - _split_lines = ( - text[text_idx + 1 :] if text[text_idx] == "\n" else text[text_idx:] - ).splitlines() - for _idx, _line in enumerate(_split_lines): - if _line not in ["", "\n", f"{indentation}"]: - _split_lines[_idx] = f"{indentation}{_line.strip()}" - - return _split_lines - - -def _do_join_field_body(text, field_idx, idx): - """Join the filed body lines into a single line that can be wrapped. - - Parameters - ---------- - text : str - The docstring long description text that contains field lists. - field_idx : list - The list of tuples containing the found field list start and end position. - - Returns - ------- - _field_body : str - The field body collapsed into a single line. - """ - try: - _field_body = text[field_idx[idx][1] : field_idx[idx + 1][0]].strip() - except IndexError: - _field_body = text[field_idx[idx][1] :].strip() - - _field_body = " ".join( - [_line.strip() for _line in _field_body.splitlines()] - ).strip() - - # Add a space before the field body unless the field body is a link. - if not _field_body.startswith("`") and _field_body: - _field_body = f" {_field_body}" - - # Is there a blank line between field lists? Keep it if so. - if text[field_idx[idx][1] : field_idx[idx][1] + 2] == "\n\n": - _field_body = "\n" - - return _field_body - - -def _do_wrap_field(field_name, field_body, indentation, wrap_length): - """Wrap complete field at wrap_length characters. - - Parameters - ---------- - field_name : str - The name text of the field. - field_body : str - The body text of the field. - indentation : str - The string to use for indentation of the first line in the field. - wrap_length : int - The number of characters at which to wrap the field. - - Returns - ------- - _wrapped_field : str - The field wrapped at wrap_length characters. - """ - if len(indentation) > DEFAULT_INDENT: - _subsequent = indentation + int(0.5 * len(indentation)) * " " - else: - _subsequent = 2 * indentation - - _wrapped_field = textwrap.wrap( - textwrap.dedent(f"{field_name}{field_body}"), - width=wrap_length, - initial_indent=indentation, - subsequent_indent=_subsequent, - ) - - for _idx, _field in enumerate(_wrapped_field): - _indent = indentation if _idx == 0 else _subsequent - _wrapped_field[_idx] = f"{_indent}{re.sub(' +', ' ', _field.strip())}" - - return _wrapped_field - - -def _field_over_url( - field_idx: List[Tuple[int, int]], - url_idx: List[Tuple[int, int]], -): - """Remove URL indices that overlap with field list indices. - - Parameters - ---------- - field_idx : list - The list of field list index tuples. - url_idx : list - The list of URL index tuples. - - Returns - ------- - url_idx : list - The url_idx list with any tuples that have indices overlapping with field - list indices removed. - """ - if not field_idx: - return url_idx - - nonoverlapping_urls = [] - - any_param_start = min(e[0] for e in field_idx) - for _key, _value in enumerate(url_idx): - if _value[1] < any_param_start: - nonoverlapping_urls.append(_value) - return nonoverlapping_urls diff --git a/tests/_data/string_files/format_black.toml b/tests/_data/string_files/format_black.toml deleted file mode 100644 index 8d9e6007..00000000 --- a/tests/_data/string_files/format_black.toml +++ /dev/null @@ -1,75 +0,0 @@ -[quote_no_space] -instring='''""" This one-line docstring will not have a leading space."""''' -outstring='''"""This one-line docstring will not have a leading space."""''' - -[quote_space] -instring='''""""This" quote starting one-line docstring will have a leading space."""''' -outstring='''""" "This" quote starting one-line docstring will have a leading space."""''' - -[quote_space_2] -instring='''""""This" quote starting one-line docstring will have a leading space. - -This long description will be wrapped at 88 characters because we passed the --black option and 88 characters is the default wrap length. -"""''' -outstring='''""" "This" quote starting one-line docstring will have a leading space. - - This long description will be wrapped at 88 characters because we - passed the --black option and 88 characters is the default wrap - length. - """''' - -[strip_blank_lines] -instring=''' - class TestClass: - - """This is a class docstring.""" - - class_attribute = 1 - - def test_method_1(self): - """This is a method docstring. - - With no blank line after it. - """ - pass - - def test_method_2(self): - - """This is a method docstring. - - With a long description followed by multiple blank lines. - """ - - - pass''' -outstring=''' - class TestClass: - """This is a class docstring.""" - - class_attribute = 1 - - def test_method_1(self): - """This is a method docstring. - - With no blank line after it. - """ - pass - - def test_method_2(self): - """This is a method docstring. - - With a long description followed by multiple blank lines. - """ - pass''' - -[issue_176] -instring='''class C: - """Class.""" #noqa - - attr: int - """Attr."""''' -outstring='''class C: - """Class.""" #noqa - - attr: int - """Attr."""''' diff --git a/tests/_data/string_files/format_code.toml b/tests/_data/string_files/format_code.toml deleted file mode 100644 index d89b480e..00000000 --- a/tests/_data/string_files/format_code.toml +++ /dev/null @@ -1,324 +0,0 @@ -[non_docstring] -instring='''x = """This -is -not a -docstring."""''' -outstring='''x = """This -is -not a -docstring."""''' - -[tabbed_indentation] -instring='''def foo(): - """ - Hello foo. - """ - if True: - x = 1''' -outstring='''def foo(): - """Hello foo.""" - if True: - x = 1''' - -[mixed_indentation] -instring='''def foo(): - """ - Hello foo. - """ - if True: - x = 1''' -outstring='''def foo(): - """Hello foo.""" - if True: - x = 1''' - -[escaped_newlines] -instring='''def foo(): - """ - Hello foo. - """ - x = \ - 1''' -outstring='''def foo(): - """Hello foo.""" - x = \ - 1''' - -[code_comments] -instring='''def foo(): - """ - Hello foo. - """ - # My comment - # My comment with escape \ - 123''' -outstring='''def foo(): - """Hello foo.""" - # My comment - # My comment with escape \ - 123''' - -[inline_comment] -instring='''def foo(): - """ - Hello foo. - """ - def test_method_no_chr_92(): the501(92) # \''' -outstring='''def foo(): - """Hello foo.""" - def test_method_no_chr_92(): the501(92) # \''' - -[raw_lowercase] -instring='''def foo(): - r""" - Hello raw foo. - """''' -outstring='''def foo(): - r"""Hello raw foo."""''' - -[raw_uppercase] -instring='''def foo(): - R""" - Hello Raw foo. - """''' -outstring='''def foo(): - R"""Hello Raw foo."""''' - -[raw_lowercase_single] -instring="""def foo(): - r''' - Hello raw foo. - '''""" -outstring='''def foo(): - r"""Hello raw foo."""''' - -[raw_uppercase_single] -instring="""def foo(): - R''' - Hello Raw foo. - '''""" -outstring='''def foo(): - R"""Hello Raw foo."""''' - -[unicode_lowercase] -instring='''def foo(): - u""" - Hello unicode foo. - """''' -outstring='''def foo(): - u"""Hello unicode foo."""''' - -[unicode_uppercase] -instring='''def foo(): - U""" - Hello Unicode foo. - """''' -outstring='''def foo(): - U"""Hello Unicode foo."""''' - -[unicode_lowercase_single] -instring="""def foo(): - u''' - Hello unicode foo. - '''""" -outstring='''def foo(): - u"""Hello unicode foo."""''' - -[unicode_uppercase_single] -instring="""def foo(): - U''' - Hello Unicode foo. - '''""" -outstring='''def foo(): - U"""Hello Unicode foo."""''' - -[nested_triple] -instring="""def foo(): - '''Hello foo. \"\"\"abc\"\"\" - '''""" -outstring="""def foo(): - '''Hello foo. \"\"\"abc\"\"\" - '''""" - -[multiple_sentences] -instring='''def foo(): - """ - Hello foo. - This is a docstring. - """''' -outstring='''def foo(): - """Hello foo. - - This is a docstring. - """''' - -[multiple_sentences_same_line] -instring='''def foo(): - """ - Hello foo. This is a docstring. - """''' -outstring='''def foo(): - """Hello foo. - - This is a docstring. - """''' - -[multiline_summary] -instring='''def foo(): - """ - Hello - foo. This is a docstring. - """''' -outstring='''def foo(): - """Hello foo. - - This is a docstring. - """''' - -[empty_lines] -instring='''def foo(): - """ - Hello - foo and this is a docstring. - - More stuff. - """''' -outstring='''def foo(): - """Hello foo and this is a docstring. - - More stuff. - """''' - -[class_empty_lines] -instring='''class Foo: - """ - Hello - foo and this is a docstring. - - More stuff. - """''' -outstring='''class Foo: - """Hello foo and this is a docstring. - - More stuff. - """''' -instring_2='''def foo(): - class Foo: - - """Summary.""" - pass''' -outstring_2='''def foo(): - class Foo: - """Summary.""" - pass''' - -[method_empty_lines] -instring='''class Foo: - def foo(self): - - - """Summary.""" - pass''' -outstring='''class Foo: - def foo(self): - """Summary.""" - pass''' - -[trailing_whitespace] -instring='''def foo(): - """ - Hello - foo and this is a docstring. - - More stuff. - """''' -outstring='''def foo(): - """Hello foo and this is a docstring. - - More stuff. - """''' - -[parameter_list] -instring='''def foo(): - """Test - one - first - two - second - """''' -outstring='''def foo(): - """Test. - - one - first - two - second - """''' - -[single_quote] -instring="""def foo(): - 'Just a regular string' -""" -outstring="""def foo(): - 'Just a regular string' -""" - -[double_quote] -instring="""def foo(): - "Just a regular string" -""" -outstring="""def foo(): - "Just a regular string" -""" - -[nested_triple_quote] -instring='''def foo(): - 'Just a """foo""" string' -''' -outstring='''def foo(): - 'Just a """foo""" string' -''' - -[first_line_assignment] -instring='''def foo(): - x = """Just a regular string. Alpha.""" -''' -outstring='''def foo(): - x = """Just a regular string. Alpha.""" -''' - -[regular_strings] -instring='''def foo(): - """ - Hello - foo and this is a docstring. - - More stuff. - """ - x = """My non-docstring - This should not be touched.""" - - """More stuff - that should not be - touched """''' -outstring='''def foo(): - """Hello foo and this is a docstring. - - More stuff. - """ - x = """My non-docstring - This should not be touched.""" - - """More stuff - that should not be - touched """''' - -[syntax_error] -instring='''""" -''' -outstring='''""" -''' - -[slash_r] -instring='''"""\r''' -outstring='''"""\r''' - -[slash_r_slash_n] -instring='''"""\r\n''' -outstring='''"""\r\n''' diff --git a/tests/_data/string_files/format_code_ranges.toml b/tests/_data/string_files/format_code_ranges.toml deleted file mode 100644 index 2d7412ef..00000000 --- a/tests/_data/string_files/format_code_ranges.toml +++ /dev/null @@ -1,58 +0,0 @@ -[range_miss] -instring=''' - def f(x): - """ This is a docstring. That should be on more lines""" - pass - def g(x): - """ Badly indented docstring""" - pass''' -outstring=''' - def f(x): - """ This is a docstring. That should be on more lines""" - pass - def g(x): - """ Badly indented docstring""" - pass''' - -[range_hit] -instring=''' -def f(x): - """ This is a docstring. That should be on more lines""" - pass -def g(x): - """ Badly indented docstring""" - pass''' -outstring=''' -def f(x): - """This is a docstring. - - That should be on more lines - """ - pass -def g(x): - """ Badly indented docstring""" - pass''' - -[length_ignore] -instring=''' -def f(x): - """This is a docstring. - - - That should be on less lines - """ - pass -def g(x): - """ Badly indented docstring""" - pass''' -outstring=''' -def f(x): - """This is a docstring. - - - That should be on less lines - """ - pass -def g(x): - """Badly indented docstring.""" - pass''' diff --git a/tests/_data/string_files/format_epytext.toml b/tests/_data/string_files/format_epytext.toml deleted file mode 100644 index 94057d09..00000000 --- a/tests/_data/string_files/format_epytext.toml +++ /dev/null @@ -1,52 +0,0 @@ -[epytext] -instring='''"""Return line-wrapped description text. - -We only wrap simple descriptions. We leave doctests, multi-paragraph text, -and bulleted lists alone. See http://www.docformatter.com/. - -@param text: the text argument. -@param indentation: the super long description for the indentation argument that will require docformatter to wrap this line. -@param wrap_length: the wrap_length argument -@param force_wrap: the force_warp argument. -@return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. -"""''' -outstring='''"""Return line-wrapped description text. - - We only wrap simple descriptions. We leave doctests, multi-paragraph text, and - bulleted lists alone. See - http://www.docformatter.com/. - - @param text: the text argument. - @param indentation: the super long description for the indentation argument that - will require docformatter to wrap this line. - @param wrap_length: the wrap_length argument - @param force_wrap: the force_warp argument. - @return: really long description text wrapped at n characters and a very long - description of the return value so we can wrap this line abcd efgh ijkl mnop - qrst uvwx yz. - """''' - -[epytext.numpy] -instring='''"""Return line-wrapped description text. - - We only wrap simple descriptions. We leave doctests, multi-paragraph text, - and bulleted lists alone. See http://www.docformatter.com/. - - @param text: the text argument. - @param indentation: the super long description for the indentation argument that will require docformatter to wrap this line. - @param wrap_length: the wrap_length argument - @param force_wrap: the force_warp argument. - @return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. -"""''' -outstring='''"""Return line-wrapped description text. - - We only wrap simple descriptions. We leave doctests, multi-paragraph text, and - bulleted lists alone. See - http://www.docformatter.com/. - - @param text: the text argument. - @param indentation: the super long description for the indentation argument that will require docformatter to wrap this line. - @param wrap_length: the wrap_length argument - @param force_wrap: the force_warp argument. - @return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. - """''' diff --git a/tests/_data/string_files/format_lists.toml b/tests/_data/string_files/format_lists.toml deleted file mode 100644 index 40e1d1bd..00000000 --- a/tests/_data/string_files/format_lists.toml +++ /dev/null @@ -1,80 +0,0 @@ -[numbered] -instring='''"""Hello. - - 1. This should be indented but it is not. The - next line should be indented too. But - this is okay. - """''' -outstring='''"""Hello. - - 1. This should be indented but it is not. The - next line should be indented too. But - this is okay. - """''' -[parameter.dash] -instring='''"""Hello. - - foo - This is a foo. This is a foo. This is a foo. This is a foo. This is. - bar - This is a bar. This is a bar. This is a bar. This is a bar. This is. - """''' -outstring='''"""Hello. - - foo - This is a foo. This is a foo. This is a foo. This is a foo. This is. - bar - This is a bar. This is a bar. This is a bar. This is a bar. This is. - """''' - -[parameter.colon] -instring='''"""Hello. - - foo: This is a foo. This is a foo. This is a foo. This is a foo. This is. - bar: This is a bar. This is a bar. This is a bar. This is a bar. This is. - """''' -outstring='''"""Hello. - - foo: This is a foo. This is a foo. This is a foo. This is a foo. This is. - bar: This is a bar. This is a bar. This is a bar. This is a bar. This is. - """''' - -[many.short.columns] -instring='''""" - one - two - three - four - five - six - seven - eight - nine - ten - eleven - """''' -outstring='''""" - one - two - three - four - five - six - seven - eight - nine - ten - eleven - """''' - -[issue_239] -instring='''"""CC. - - C. - - C - c :math:`[0, 1]`. - """''' -outstring='''"""CC. - - C. - - C - c :math:`[0, 1]`. - """''' diff --git a/tests/_data/string_files/format_sphinx.toml b/tests/_data/string_files/format_sphinx.toml deleted file mode 100644 index 4f784977..00000000 --- a/tests/_data/string_files/format_sphinx.toml +++ /dev/null @@ -1,285 +0,0 @@ -[sphinx] -instring='''"""Return line-wrapped description text. - - We only wrap simple descriptions. We leave doctests, multi-paragraph text, and bulleted lists alone. See http://www.docformatter.com/. - - :param str text: the text argument. - :param str indentation: the super long description for the indentation argument that will require docformatter to wrap this line. - :param int wrap_length: the wrap_length argument - :param bool force_wrap: the force_warp argument. - :return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. - :rtype: str -"""''' -outstring='''"""Return line-wrapped description text. - - We only wrap simple descriptions. We leave doctests, multi-paragraph text, and - bulleted lists alone. See - http://www.docformatter.com/. - - :param str text: the text argument. - :param str indentation: the super long description for the indentation argument that - will require docformatter to wrap this line. - :param int wrap_length: the wrap_length argument - :param bool force_wrap: the force_warp argument. - :return: really long description text wrapped at n characters and a very long - description of the return value so we can wrap this line abcd efgh ijkl mnop - qrst uvwx yz. - :rtype: str - """''' - -[sphinx.numpy] -instring='''"""Return line-wrapped description text. - - We only wrap simple descriptions. We leave doctests, multi-paragraph text, and bulleted lists alone. See http://www.docformatter.com/. - - :param str text: the text argument. - :param str indentation: the super long description for the indentation argument that will require docformatter to wrap this line. - :param int wrap_length: the wrap_length argument - :param bool force_wrap: the force_warp argument. - :return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. - :rtype: str -"""''' -outstring='''"""Return line-wrapped description text. - - We only wrap simple descriptions. We leave doctests, multi-paragraph text, and - bulleted lists alone. See - http://www.docformatter.com/. - - :param str text: the text argument. - :param str indentation: the super long description for the indentation argument that will require docformatter to wrap this line. - :param int wrap_length: the wrap_length argument - :param bool force_wrap: the force_warp argument. - :return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. - :rtype: str - """''' - -[issue_215] -instring='''"""Create or return existing HTTP session. - - :return: Requests :class:`~requests.Session` object - """''' -outstring='''"""Create or return existing HTTP session. - - :return: Requests :class:`~requests.Session` object - """''' - -[issue_217_222] -instring='''"""Base for all Commands. - -:param logger: Logger for console and logfile. -:param console: Facilitates console interaction and input solicitation. -:param tools: Cache of tools populated by Commands as they are required. -:param apps: Dictionary of project's Apps keyed by app name. -:param base_path: Base directory for Briefcase project. -:param data_path: Base directory for Briefcase tools, support packages, etc. -:param is_clone: Flag that Command was triggered by the user's requested Command; - for instance, RunCommand can invoke UpdateCommand and/or BuildCommand. -"""''' -outstring='''"""Base for all Commands. - - :param logger: Logger for console and logfile. - :param console: Facilitates console interaction and input solicitation. - :param tools: Cache of tools populated by Commands as they are required. - :param apps: Dictionary of project's Apps keyed by app name. - :param base_path: Base directory for Briefcase project. - :param data_path: Base directory for Briefcase tools, support packages, etc. - :param is_clone: Flag that Command was triggered by the user's requested Command; - for instance, RunCommand can invoke UpdateCommand and/or BuildCommand. - """''' - -[issue_224] -instring='''""" -Add trackers to a torrent. - -:raises NotFound404Error: - -:param torrent_hash: hash for torrent -:param urls: tracker URLs to add to torrent -:return: None -"""''' -outstring='''"""Add trackers to a torrent. - - :raises NotFound404Error: - - :param torrent_hash: hash for torrent - :param urls: tracker URLs to add to torrent - :return: None - """''' - -[issue_228] -instring='''"""Configure application requirements by writing a requirements.txt file. - - :param app: The app configuration - :param requires: The full list of requirements - :param requirements_path: The full path to a requirements.txt file that - will be written. - """''' -outstring='''"""Configure application requirements by writing a requirements.txt file. - - :param app: The app configuration - :param requires: The full list of requirements - :param requirements_path: The full path to a requirements.txt file that will be - written. - """''' - -[issue_229] -instring='''"""CC. - - :meth:`!X` - """''' -outstring='''"""CC. - - :meth:`!X` - """''' - -[issue_229_2] -instring='''"""CC. - - :math: `-` - """''' -outstring='''"""CC. - - :math: `-` - """''' - -[issue_230] -instring='''"""CC. - - :math:`-` - :param d: blabla - :param list(str) l: more blabla. - """''' -outstring= '''"""CC. - - :math:`-` - :param d: blabla - :param list(str) l: more blabla. - """''' - -[issue_232] -instring='''def function: - """ - :param x: X - :param y: Y - """''' -outstring='''def function: - """ - :param x: X - :param y: Y - """''' - -[issue_234] -instring=''' """CC. - - :math:`f(0) = 1`. XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX - """''' -outstring='''"""CC. - - :math:`f(0) = 1`. XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX - """''' - -[issue_235] -instring='''"""CC. - - C. - - C, - :math:`[0, 1]`. -"""''' -outstring='''"""CC. - - C. - - C, :math:`[0, 1]`. - """''' - -[issue_239] -instring='''""" -Summary. - - :raises InvalidRequest400Error: - :raises NotFound404Error: - :raises Conflict409Error: - - :param param: asdf - """''' -outstring='''"""Summary. - - :raises InvalidRequest400Error: - :raises NotFound404Error: - :raises Conflict409Error: - - :param param: asdf - """''' - -[issue_245] -instring='''"""Some f. - :param a: Some param. - :raises my.package.MyReallySrsError: Bad things happened. - """''' -outstring='''"""Some f. - - :param a: Some param. - :raises my.package.MyReallySrsError: Bad things happened. - """''' - -[issue_250] -instring=''' """CC. - - c. - - c c :math:`[0, 1]`. - """''' -outstring='''"""CC. - - c. - - c c :math:`[0, 1]`. - """''' - -[issue_253] -instring='''""" - My test fixture. - - :param caplog: Pytest caplog fixture. - :yield: Until test complete, then run cleanup. - """''' -outstring='''""" - My test fixture. - - :param caplog: Pytest caplog fixture. - :yield: Until test complete, then run cleanup. - """''' - -[issue_271] -instring='''""" - My test fixture. - - :ivar id: A unique identifier for the element, automatically generated upon instantiation. - :vartype id: str - :ivar created: Timestamp when the element was created, defaults to the current time. - :vartype created: datetime - :cvar modified: Timestamp when the element was last modified, can be None if not modified. - :vartype modified: Optional[datetime] - :cvar in_project: List of projects this element is part of. Direct modification is restricted. - :vartype in_project: list[Project] - :param caplog: Pytest caplog fixture. - :yield: Until test complete, then run cleanup. - """''' -outstring='''""" - My test fixture. - - :ivar id: A unique identifier for the element, automatically generated upon - instantiation. - :vartype id: str - :ivar created: Timestamp when the element was created, defaults to the current time. - :vartype created: datetime - :cvar modified: Timestamp when the element was last modified, can be None if not - modified. - :vartype modified: Optional[datetime] - :cvar in_project: List of projects this element is part of. Direct modification is - restricted. - :vartype in_project: list[Project] - :param caplog: Pytest caplog fixture. - :yield: Until test complete, then run cleanup. - """''' diff --git a/tests/_data/string_files/format_style_options.toml b/tests/_data/string_files/format_style_options.toml deleted file mode 100644 index 9f18c433..00000000 --- a/tests/_data/string_files/format_style_options.toml +++ /dev/null @@ -1,38 +0,0 @@ -[no_blank] -instring='''""" - -Hello. - - Description. - - - """''' -outstring='''"""Hello. - - Description. - """''' - -[presummary_newline] -instring='''""" - -Hello. - - Description. - - - """''' -outstring='''""" - Hello. - - Description. - """''' - -[summary_multiline] -instring='''"""This one-line docstring will be multi-line"""''' -outstring='''""" - This one-line docstring will be multi-line. - """''' - -[presummary_space] -instring='''"""This one-line docstring will have a leading space."""''' -outstring='''""" This one-line docstring will have a leading space."""''' diff --git a/tests/_data/string_files/format_urls.toml b/tests/_data/string_files/format_urls.toml deleted file mode 100644 index c898827f..00000000 --- a/tests/_data/string_files/format_urls.toml +++ /dev/null @@ -1,648 +0,0 @@ -[inline] -instring='''"""This is a docstring with a link. - - Here is an elaborate description containing a link. - `Area Under the Receiver Operating Characteristic Curve (ROC AUC) - `_. - """''' -outstring='''"""This is a docstring with a link. - - Here is an elaborate description containing a link. `Area Under the - Receiver Operating Characteristic Curve (ROC AUC) - `_. - """''' - -[inline.short] -instring='''"""This is yanf with a short link. - - See `the link `_ for more details. - """''' -outstring='''"""This is yanf with a short link. - - See `the link `_ for more details. - """''' - -[inline.long] -instring='''"""Helpful docstring. - - A larger description that starts here. https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java - A larger description that ends here. - """''' -outstring='''"""Helpful docstring. - - A larger description that starts here. - https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java - A larger description that ends here. - """''' - -[only.link] -instring='''""" - `Source of this snippet - `_. - """''' -outstring='''""" - `Source of this snippet - `_. - """''' - -[issue_75] -instring='''"""This is another docstring with `a link`_. - - .. a link: http://www.reliqual.com/wiki/how_to_use_ramstk/verification_and_validation_module/index.html. - """''' -outstring='''"""This is another docstring with `a link`_. - - .. a link: http://www.reliqual.com/wiki/how_to_use_ramstk/verification_and_validation_module/index.html. - """''' - -[issue_75_2] -instring='''"""This is another docstring with a link. - - See http://www.reliqual.com/wiki/how_to_use_ramstk/verification_and_validation_module/index.html for additional information. - """''' -outstring='''"""This is another docstring with a link. - - See - http://www.reliqual.com/wiki/how_to_use_ramstk/verification_and_validation_module/index.html - for additional information. - """''' - -[issue_75_3] -instring='''"""This is yanf with a short link. - - See http://www.reliaqual.com for examples. - """''' -outstring='''"""This is yanf with a short link. - - See http://www.reliaqual.com for examples. - """''' - -[issue_140] -instring='''"""This is a docstring with a link that causes a wrap. - - See `the link `_ for more details. - """''' -outstring='''"""This is a docstring with a link that causes a wrap. - - See - `the link `_ - for more details. - """''' - -[issue_140_2] -instring='''"""Helpful docstring. - - A larger description that starts here. - https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java - A larger description that ends here. - """''' -outstring='''"""Helpful docstring. - - A larger description that starts here. - https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java - A larger description that ends here. - """''' - -[issue_140_3] -instring='''"""Do something. - - See https://www.postgresql.org/docs/current/static/role-removal.html - """''' -outstring='''"""Do something. - - See - https://www.postgresql.org/docs/current/static/role-removal.html - """''' - -[issue_145] -instring='''""" - - .. _linspace API: https://numpy.org/doc/stable/reference/generated/numpy.linspace.html - .. _arange API: https://numpy.org/doc/stable/reference/generated/numpy.arange.html - .. _logspace API: https://numpy.org/doc/stable/reference/generated/numpy.logspace.html - """''' -outstring='''""" - - .. _linspace API: https://numpy.org/doc/stable/reference/generated/numpy.linspace.html - .. _arange API: https://numpy.org/doc/stable/reference/generated/numpy.arange.html - .. _logspace API: https://numpy.org/doc/stable/reference/generated/numpy.logspace.html - """''' - -[issue_150] -instring='''""" - Translates incoming json to a processable Entity. - - Stackoverflow reference: - """''' -outstring='''"""Translates incoming json to a processable Entity. - - Stackoverflow reference: - """''' - -[issue_157] -instring='''"""Get the Python type of a Click parameter. - - See the list of `custom types provided by Click - `_. - """''' -outstring='''"""Get the Python type of a Click parameter. - - See the list of - `custom types provided by Click `_. - """''' - -[issue_157_2] -instring='''"""Fetch parameters values from configuration file and merge them with the - defaults. - - User configuration is `merged to the context default_map as Click does - `_. - - This allow user's config to only overrides defaults. Values sets from direct - command line parameters, environment variables or interactive prompts, takes - precedence over any values from the config file. -"""''' -outstring='''"""Fetch parameters values from configuration file and merge them with the - defaults. - - User configuration is - `merged to the context default_map as Click does `_. - - This allow user's config to only overrides defaults. Values sets from direct - command line parameters, environment variables or interactive prompts, takes - precedence over any values from the config file. - """''' - -[issue_157_3] -instring='''"""Introspects current CLI and list its parameters and metadata. - - .. important:: - Click doesn't keep a list of all parsed arguments and their origin. - So we need to emulate here what's happening during CLI invokation. - But can't even to that because the raw, pre-parsed arguments are - not available anywhere. - """''' -outstring='''"""Introspects current CLI and list its parameters and metadata. - - .. important:: - Click doesn't keep a list of all parsed arguments and their origin. - So we need to emulate here what's happening during CLI invokation. - But can't even to that because the raw, pre-parsed arguments are - not available anywhere. - """''' - -[issue_157_4] -instring='''"""Search on local file system or remote URL files matching the provided pattern. - - ``pattern`` is considered as an URL only if it is parseable as such - and starts with ``http://`` or ``https://``. - - .. important:: - - This is a straight `copy of the functools.cache implementation - `_, - which is only `available in the standard library starting with Python v3.9 - `. - """''' -outstring='''"""Search on local file system or remote URL files matching the provided - pattern. - - ``pattern`` is considered as an URL only if it is parseable as such - and starts with ``http://`` or ``https://``. - - .. important:: - - This is a straight `copy of the functools.cache implementation - `_, - which is only `available in the standard library starting with Python v3.9 - `. - """''' - -[issue_157_5] -instring='''"""Locate and call the ``mpm`` CLI. - - The output must supports both `Xbar dialect - `_ - and `SwiftBar dialect `_. - """''' -outstring='''"""Locate and call the ``mpm`` CLI. - - The output must supports both - `Xbar dialect `_ - and `SwiftBar dialect `_. - """''' - -[issue_157_6] -instring='''"""Install one or more packages. - - Installation will proceed first with packages unambiguously tied to a manager. You can have an - influence on that with more precise package specifiers (like purl) and/or tighter selection of managers. - - For other untied packages, mpm will try to find the best manager to install it with. Their installation - will be attempted with each manager, in the order they were selected. If we have the certainty, by the way - of a search operation, that this package is not available from this manager, we'll skip the installation - and try the next available manager. - """''' -outstring='''"""Install one or more packages. - - Installation will proceed first with packages unambiguously tied to a manager. You - can have an influence on that with more precise package specifiers (like purl) - and/or tighter selection of managers. - - For other untied packages, mpm will try to find the best manager to install it with. - Their installation will be attempted with each manager, in the order they were - selected. If we have the certainty, by the way of a search operation, that this - package is not available from this manager, we'll skip the installation and try the - next available manager. - """''' - -[issue_157_7] -instring='''def hanging_rest_link(): - """ - `Source of this snippet - `_. - """ - -def sub_func_test(): - - def long_line_link(): - """Get the Python type of a Click parameter. - - See the list of `custom types provided by Click - `_. - """''' -outstring='''def hanging_rest_link(): - """ - `Source of this snippet - `_. - """ - -def sub_func_test(): - - def long_line_link(): - """Get the Python type of a Click parameter. - - See the list of - `custom types provided by Click `_. - """''' - -[issue_157_8] -instring='''def mixed_links(): - """Implements the minimal code necessary to locate and call the ``mpm`` CLI on the system. - - Once ``mpm`` is located, we can rely on it to produce the main output of the plugin. - - The output must supports both `Xbar dialect - `_ - and `SwiftBar dialect `_. - """ - -XKCD_MANAGER_ORDER = ("pip", "brew", "npm", "dnf", "apt", "steamcmd") -"""Sequence of package managers as defined by `XKCD #1654: Universal Install Script -`_. - -See the corresponding :issue:`implementation rationale in issue #10 <10>`. -""" - -HASH_HEADERS = ( - "Date", - "From", - "To", -) -""" -Default ordered list of headers to use to compute the unique hash of a mail. - -By default we choose to exclude: - -``Cc`` - Since ``mailman`` apparently `sometimes trims list members - `_ - from the ``Cc`` header to avoid sending duplicates. Which means that copies of mail - reflected back from the list server will have a different ``Cc`` to the copy saved by - the MUA at send-time. - -``Bcc`` - Because copies of the mail saved by the MUA at send-time will have ``Bcc``, but copies - reflected back from the list server won't. - -``Reply-To`` - Since a mail could be ``Cc``'d to two lists with different ``Reply-To`` munging - options set. -"""''' -outstring='''def mixed_links(): - """Implements the minimal code necessary to locate and call the ``mpm`` CLI - on the system. - - Once ``mpm`` is located, we can rely on it to produce the main output of the plugin. - - The output must supports both `Xbar dialect - `_ - and `SwiftBar dialect `_. - """ - -XKCD_MANAGER_ORDER = ("pip", "brew", "npm", "dnf", "apt", "steamcmd") -"""Sequence of package managers as defined by `XKCD #1654: Universal Install Script -`_. - -See the corresponding :issue:`implementation rationale in issue #10 <10>`. -""" - -HASH_HEADERS = ( - "Date", - "From", - "To", -) -"""Default ordered list of headers to use to compute the unique hash of a mail. - -By default we choose to exclude: - -``Cc`` - Since ``mailman`` apparently `sometimes trims list members - `_ - from the ``Cc`` header to avoid sending duplicates. Which means that copies of mail - reflected back from the list server will have a different ``Cc`` to the copy saved by - the MUA at send-time. - -``Bcc`` - Because copies of the mail saved by the MUA at send-time will have ``Bcc``, but copies - reflected back from the list server won't. - -``Reply-To`` - Since a mail could be ``Cc``'d to two lists with different ``Reply-To`` munging - options set. -"""''' - -[issue_157_9] -instring='''def load_conf(): - """Fetch parameters values from configuration file and merge them with the - defaults. - - User configuration is `merged to the context default_map as Click does - `_. - - This allow user's config to only overrides defaults. Values sets from direct - command line parameters, environment variables or interactive prompts, takes - precedence over any values from the config file. - """ - - -strict_selection_match = False -""" -Install sub-command try each user-selected manager until it find one providing -the package we seek to install, after which the process stop. This mean not all -managers will be called, so we allow the CLI output checks to partially match. -""" - - -platforms = {"LINUX", "MACOS", "WSL2"} -"""Homebrew core is now compatible with `Linux and Windows Subsystem for Linux -(WSL) 2 `_. -"""''' -outstring='''def load_conf(): - """Fetch parameters values from configuration file and merge them with the - defaults. - - User configuration is - `merged to the context default_map as Click does `_. - - This allow user's config to only overrides defaults. Values sets from direct - command line parameters, environment variables or interactive prompts, takes - precedence over any values from the config file. - """ - - -strict_selection_match = False -"""Install sub-command try each user-selected manager until it find one -providing the package we seek to install, after which the process stop. - -This mean not all managers will be called, so we allow the CLI output checks to -partially match. -""" - - -platforms = {"LINUX", "MACOS", "WSL2"} -"""Homebrew core is now compatible with `Linux and Windows Subsystem for Linux -(WSL) 2 `_. -"""''' - -[issue_157_10] -instring='''"""Patch and tweak `Python's standard library mail box constructors. - -`_ to set sane defaults. - -Also forces out our own message factories to add deduplication tools and utilities. -""" - - -"""Patch and tweak `Python's standard library mail box constructors -`_ to set sane defaults. - -Also forces out our own message factories to add deduplication tools and utilities. -""" - - -def generate_platforms_graph( - graph_id: str, description: str, groups: frozenset -) -> str: - """Generates an `Euler diagram `_ of platform and their - grouping. - - Euler diagrams are - `not supported by mermaid yet `_ - so we fallback on a flowchart - without arrows. - - Returns a ready to use and properly indented MyST block. - """ - - -def load_conf(self, ctx, param, path_pattern): - """Fetch parameters values from configuration file and merge them with the - defaults. - - User configuration is `merged to the context default_map as Click does - `_. - - - This allow user's config to only overrides defaults. Values sets from direct - command line parameters, environment variables or interactive prompts, takes - precedence over any values from the config file. - """ - - -def pytest_addoption(parser): - """Add custom command line options. - - Based on `Pytest's documentation examples - `_. - - By default, runs non-destructive tests and skips destructive ones. - """''' -outstring='''"""Patch and tweak `Python's standard library mail box constructors. - -`_ to set sane defaults. - -Also forces out our own message factories to add deduplication tools and utilities. -""" - -"""Patch and tweak `Python's standard library mail box constructors -`_ to set sane defaults. - -Also forces out our own message factories to add deduplication tools and utilities. -""" - - -def generate_platforms_graph( - graph_id: str, description: str, groups: frozenset -) -> str: - """Generates an `Euler diagram `_ of platform and their - grouping. - - Euler diagrams are - `not supported by mermaid yet `_ - so we fallback on a flowchart - without arrows. - - Returns a ready to use and properly indented MyST block. - """ - - -def load_conf(self, ctx, param, path_pattern): - """Fetch parameters values from configuration file and merge them with the - defaults. - - User configuration is - `merged to the context default_map as Click does `_. - - - This allow user's config to only overrides defaults. Values sets from direct - command line parameters, environment variables or interactive prompts, takes - precedence over any values from the config file. - """ - - -def pytest_addoption(parser): - """Add custom command line options. - - Based on `Pytest's documentation examples - `_. - - By default, runs non-destructive tests and skips destructive ones. - """''' - -[issue_157_11] -instring='''"""Fetch parameters values from configuration file and merge them with the defaults. - - User configuration is `merged to the context default_map as Click does - `_. - - This allow user's config to only overrides defaults. Values sets from direct - command line parameters, environment variables or interactive prompts, takes - precedence over any values from the config file. - """''' -outstring='''"""Fetch parameters values from configuration file and merge them with the - defaults. - - User configuration is - `merged to the context default_map as Click does `_. - - This allow user's config to only overrides defaults. Values sets from direct - command line parameters, environment variables or interactive prompts, takes - precedence over any values from the config file. - """''' - -[issue_159] -instring='''"""Blah blah. - - This will normally be used with https://aaaaaaaa.bbb.ccccccccc.com/xxxxx/xxx_xxxxxxxxxxx to generate the xxx - """''' -outstring='''"""Blah blah. - - This will normally be used with - https://aaaaaaaa.bbb.ccccccccc.com/xxxxx/xxx_xxxxxxxxxxx - to generate the xxx - """''' - -[issue_180] -instring='''"""Django settings for webapp project. - - Generated by 'django-admin startproject' using Django 4.1.1. - - For more information on this file, see - https://docs.djangoproject.com/en/4.1/topics/settings/ - - For the full list of settings and their values, see - https://docs.djangoproject.com/en/4.1/ref/settings/ - """''' -outstring='''"""Django settings for webapp project. - - Generated by 'django-admin startproject' using Django 4.1.1. - - For more information on this file, see - https://docs.djangoproject.com/en/4.1/topics/settings/ - - For the full list of settings and their values, see - https://docs.djangoproject.com/en/4.1/ref/settings/ - """''' - -[issue_189] -instring='''"""This method doesn't do anything. - - https://example.com/this-is-just-a-long-url/designed-to-trigger/the-wrapping-of-the-description - """''' -outstring='''"""This method doesn't do anything. - - https://example.com/this-is-just-a-long-url/designed-to-trigger/the-wrapping-of-the-description - """''' - -[issue_199] -instring='''""" - This is a short desription. - - Here is a link to the github issue - https://github.com/PyCQA/docformatter/issues/199 - - This is a long description. - """''' -outstring='''"""This is a short desription. - - Here is a link to the github issue - https://github.com/PyCQA/docformatter/issues/199 - - This is a long description. - """''' - -[issue_210] -instring='''"""Short description. - -This graphics format generates terminal escape codes that transfer -PNG data to a TTY using the `kitty graphics protocol`__. - -__ https://sw.kovidgoyal.net/kitty/graphics-protocol/ -"""''' -outstring='''"""Short description. - - This graphics format generates terminal escape codes that transfer - PNG data to a TTY using the `kitty graphics protocol`__. - - __ https://sw.kovidgoyal.net/kitty/graphics-protocol/ - """''' - -[issue_218] -instring='''"""Construct a candidate project URL from the bundle and app name. - -It's not a perfect guess, but it's better than having "https://example.com". - -:param bundle: The bundle identifier. -:param app_name: The app name. -:returns: The candidate project URL -"""''' -outstring='''"""Construct a candidate project URL from the bundle and app name. - - It's not a perfect guess, but it's better than having - "https://example.com". - - :param bundle: The bundle identifier. - :param app_name: The app name. - :returns: The candidate project URL - """''' diff --git a/tests/_data/string_files/format_wrap.toml b/tests/_data/string_files/format_wrap.toml deleted file mode 100644 index 9acb1116..00000000 --- a/tests/_data/string_files/format_wrap.toml +++ /dev/null @@ -1,133 +0,0 @@ -[unwrap] -instring='''"This - - is - -a sentence."''' -outstring='''"This is a sentence."''' - -[weird_punctuation] -instring='''"""Creates and returns four was awakens to was created tracked - ammonites was the fifty, arithmetical four was pyrotechnic to - pyrotechnic physicists. `four' falsified x falsified ammonites - to awakens to. `created' to ancestor was four to x dynamo to was - four ancestor to physicists(). - """''' -outstring='''"""Creates and returns four was awakens to was created tracked ammonites - was the fifty, arithmetical four was pyrotechnic to pyrotechnic physicists. - - `four' falsified x falsified ammonites to awakens to. `created' to - ancestor was four to x dynamo to was four ancestor to physicists(). - """''' - -[description_wrap] -instring='''"""Hello. - - This should be indented but it is not. The - next line should be indented too. But - this is okay. - - """''' -outstring='''"""Hello. - - This should be indented but it is not. The next line should be - indented too. But this is okay. - """''' - -[ignore_doctest] -instring='''"""Hello. - - >>> 4 - 4 - """''' -outstring='''"""Hello. - - >>> 4 - 4 - """''' - -[ignore_summary_doctest] -instring='''""" - >>> 4 - 4 - - """''' -outstring='''""" - >>> 4 - 4 - - """''' - -[same_indentation_doctest] -instring='''"""Foo bar bing bang. - - >>> tests = DocTestFinder().find(_TestClass) - >>> runner = DocTestRunner(verbose=False) - >>> tests.sort(key = lambda test: test.name) - - """''' -outstring='''"""Foo bar bing bang. - - >>> tests = DocTestFinder().find(_TestClass) - >>> runner = DocTestRunner(verbose=False) - >>> tests.sort(key = lambda test: test.name) - """''' - -[force_wrap] -instring='''""" -num_iterations is the number of updates - instead of a better definition of convergence. -"""''' -outstring='''"""num_iterations is the number of updates - - instead of a better definition of - convergence."""''' - -[summary_wrap_tab] -instring=''' """Some summary x x x x."""''' -outstring='''"""Some summary x x x - x."""''' - -[one_line_wrap_newline] -instring='''"""This one-line docstring will be multi-line because it's quite long."""''' -outstring='''"""This one-line docstring will be multi-line because it's quite - long. - """''' - -[one_line_no_wrap] -instring='''"""This one-line docstring will not be wrapped and quotes will be in-line."""''' -outstring='''"""This one-line docstring will not be wrapped and quotes will be in-line."""''' - -[class_attribute_wrap] -instring='''class TestClass: - """This is a class docstring.""" - - test_int = 1 - """This is a very, very, very long docstring that should really be reformatted nicely by docformatter."""''' -outstring='''class TestClass: - """This is a class docstring.""" - - test_int = 1 - """This is a very, very, very long docstring that should really be - reformatted nicely by docformatter."""''' - -[issue_79] -instring='''def function2(): - """Hello yeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeet - -v."""''' -outstring='''def function2(): - """Hello yeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeet - -v."""''' - -[issue_127] -instring='''"""My awesome function. - - This line is quite long. In fact is it longer than one hundred and twenty characters so it should be wrapped but it is not. - - It doesn't wrap because of this line and the blank line in between! Delete them and it will wrap. - """''' -outstring='''"""My awesome function. - - This line is quite long. In fact is it longer than one hundred and twenty characters so it should be wrapped but it - is not. - - It doesn't wrap because of this line and the blank line in between! Delete them and it will wrap. - """''' diff --git a/tests/formatter/test_format_black.py b/tests/formatter/test_format_black.py deleted file mode 100644 index 9bd20a4a..00000000 --- a/tests/formatter/test_format_black.py +++ /dev/null @@ -1,169 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_format_black.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the Formatter class with the --black option.""" - - -# Standard Library Imports -import contextlib -import sys - -with contextlib.suppress(ImportError): - if sys.version_info >= (3, 11): - # Standard Library Imports - import tomllib - else: - # Third Party Imports - import tomli as tomllib - -# Third Party Imports -import pytest - -# docformatter Package Imports -from docformatter import Formatter - -INDENTATION = " " - - -class TestFormatWrapBlack: - """Class for testing _do_format_docstring() with line wrapping and black option.""" - - with open("tests/_data/string_files/format_black.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--black", - "", - ] - ], - ) - def test_format_docstring_black( - self, - test_args, - args, - ): - """Format with black options when --black specified. - - Add a space between the opening quotes and the summary if content starts with a - quote. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["quote_no_space"]["instring"] - outstring = self.TEST_STRINGS["quote_no_space"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["quote_space"]["instring"] - outstring = self.TEST_STRINGS["quote_space"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["quote_space_2"]["instring"] - outstring = self.TEST_STRINGS["quote_space_2"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--black", - "", - ] - ], - ) - def test_format_code_strip_blank_lines( - self, - test_args, - args, - ): - """Blank lines are stripped in black mode.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["strip_blank_lines"]["instring"] - outstring = self.TEST_STRINGS["strip_blank_lines"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--black", - "", - ] - ], - ) - def test_format_docstring_black_keep_newline_after_comment( - self, - test_args, - args, - ): - """Retain the newline after a docstring with an inline comment. - - See issue #176. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_176"]["instring"] - outstring = self.TEST_STRINGS["issue_176"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) diff --git a/tests/formatter/test_format_code.py b/tests/formatter/test_format_code.py deleted file mode 100644 index a62664c4..00000000 --- a/tests/formatter/test_format_code.py +++ /dev/null @@ -1,609 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_format_code.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the Formattor._format_code() method.""" - -# Standard Library Imports -import contextlib -import sys - -with contextlib.suppress(ImportError): - if sys.version_info >= (3, 11): - # Standard Library Imports - import tomllib - else: - # Third Party Imports - import tomli as tomllib - -# Third Party Imports -import pytest - -# docformatter Package Imports -from docformatter import Formatter - - -class TestFormatCode: - """Class for testing _format_code() with no arguments.""" - - with open("tests/_data/string_files/format_code.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_should_ignore_non_docstring(self, test_args, args): - """Should ignore triple quoted strings that are assigned values.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["non_docstring"]["instring"] - outstring = self.TEST_STRINGS["non_docstring"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_empty_string(self, test_args, args): - """Should do nothing with an empty string.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - assert not uut._format_code("") - assert not uut._format_code("") - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_tabs(self, test_args, args): - """Should retain tabbed indentation.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["tabbed_indentation"]["instring"] - outstring = self.TEST_STRINGS["tabbed_indentation"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_mixed_tabs(self, test_args, args): - """Should retain mixed tabbed and spaced indentation.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["mixed_indentation"]["instring"] - outstring = self.TEST_STRINGS["mixed_indentation"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_escaped_newlines(self, test_args, args): - """Should leave escaped newlines in code untouched.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["escaped_newlines"]["instring"] - outstring = self.TEST_STRINGS["escaped_newlines"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_comments(self, test_args, args): - """Should leave comments as is.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["code_comments"]["instring"] - outstring = self.TEST_STRINGS["code_comments"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_escaped_newline_in_inline_comment(self, test_args, args): - """Should leave code with inline comment as is.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["inline_comment"]["instring"] - outstring = self.TEST_STRINGS["inline_comment"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_raw_docstring_double_quotes(self, test_args, args): - """Should format raw docstrings with triple double quotes. - - See requirement PEP_257_2. See issue #54 for request to handle raw docstrings. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["raw_lowercase"]["instring"] - outstring = self.TEST_STRINGS["raw_lowercase"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - instring = self.TEST_STRINGS["raw_uppercase"]["instring"] - outstring = self.TEST_STRINGS["raw_uppercase"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_raw_docstring_single_quotes(self, test_args, args): - """Should format raw docstrings with triple single quotes. - - See requirement PEP_257_2. See issue #54 for request to handle raw docstrings. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["raw_lowercase_single"]["instring"] - outstring = self.TEST_STRINGS["raw_lowercase_single"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - instring = self.TEST_STRINGS["raw_uppercase_single"]["instring"] - outstring = self.TEST_STRINGS["raw_uppercase_single"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_unicode_docstring_double_quotes(self, test_args, args): - """Should format unicode docstrings with triple double quotes. - - See requirement PEP_257_3. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["unicode_lowercase"]["instring"] - outstring = self.TEST_STRINGS["unicode_lowercase"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - instring = self.TEST_STRINGS["unicode_uppercase"]["instring"] - outstring = self.TEST_STRINGS["unicode_uppercase"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_unicode_docstring_single_quotes(self, test_args, args): - """Should format unicode docstrings with triple single quotes. - - See requirement PEP_257_3. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["unicode_lowercase_single"]["instring"] - outstring = self.TEST_STRINGS["unicode_lowercase_single"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - instring = self.TEST_STRINGS["unicode_uppercase_single"]["instring"] - outstring = self.TEST_STRINGS["unicode_uppercase_single"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_skip_nested(self, test_args, args): - """Should ignore nested triple quotes.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["nested_triple"]["instring"] - outstring = self.TEST_STRINGS["nested_triple"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_multiple_sentences(self, test_args, args): - """Should create multi-line docstring from multiple sentences.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["multiple_sentences"]["instring"] - outstring = self.TEST_STRINGS["multiple_sentences"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_multiple_sentences_same_line(self, test_args, args): - """Should create multi-line docstring from multiple sentences.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["multiple_sentences_same_line"]["instring"] - outstring = self.TEST_STRINGS["multiple_sentences_same_line"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_multiple_sentences_multi_line_summary( - self, test_args, args - ): - """Should put summary line on a single line.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["multiline_summary"]["instring"] - outstring = self.TEST_STRINGS["multiline_summary"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_empty_lines(self, test_args, args): - """Summary line on one line when wrapped, followed by empty line.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["empty_lines"]["instring"] - outstring = self.TEST_STRINGS["empty_lines"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_empty_lines_class_docstring(self, test_args, args): - """No blank lines before a class's docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["class_empty_lines"]["instring"] - outstring = self.TEST_STRINGS["class_empty_lines"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - instring = self.TEST_STRINGS["class_empty_lines"]["instring_2"] - outstring = self.TEST_STRINGS["class_empty_lines"]["outstring_2"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_empty_lines_method_docstring(self, test_args, args): - """No blank lines before a method's docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["method_empty_lines"]["instring"] - outstring = self.TEST_STRINGS["method_empty_lines"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_trailing_whitespace(self, test_args, args): - """Should strip trailing whitespace.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["trailing_whitespace"]["instring"] - outstring = self.TEST_STRINGS["trailing_whitespace"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_parameters_list(self, test_args, args): - """Should treat parameters list as elaborate description.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["parameter_list"]["instring"] - outstring = self.TEST_STRINGS["parameter_list"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_ignore_code_with_single_quote(self, test_args, args): - """Single single quote on first line of code should remain untouched. - - See requirement PEP_257_1. See issue #66 for example of docformatter breaking - code when encountering single quote. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["single_quote"]["instring"] - outstring = self.TEST_STRINGS["single_quote"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_ignore_code_with_double_quote(self, test_args, args): - """Single double quotes on first line of code should remain untouched. - - See requirement PEP_257_1. See issue #66 for example of docformatter breaking - code when encountering single quote. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["double_quote"]["instring"] - outstring = self.TEST_STRINGS["double_quote"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_should_skip_nested_triple_quotes(self, test_args, args): - """Should ignore triple quotes nested in a string.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["nested_triple_quote"]["instring"] - outstring = self.TEST_STRINGS["nested_triple_quote"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_assignment_on_first_line(self, test_args, args): - """Should ignore triple quotes in variable assignment.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["first_line_assignment"]["instring"] - outstring = self.TEST_STRINGS["first_line_assignment"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_regular_strings_too(self, test_args, args): - """Should ignore triple quoted strings after the docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["regular_strings"]["instring"] - outstring = self.TEST_STRINGS["regular_strings"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_syntax_error(self, test_args, args): - """Should ignore single set of triple quotes followed by newline.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["syntax_error"]["instring"] - outstring = self.TEST_STRINGS["syntax_error"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_syntax_error_case_slash_r(self, test_args, args): - """Should ignore single set of triple quotes followed by return.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["slash_r"]["instring"] - outstring = self.TEST_STRINGS["slash_r"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_syntax_error_case_slash_r_slash_n(self, test_args, args): - """Should ignore single triple quote followed by return, newline.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["slash_r_slash_n"]["instring"] - outstring = self.TEST_STRINGS["slash_r_slash_n"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) diff --git a/tests/formatter/test_format_code_ranges.py b/tests/formatter/test_format_code_ranges.py deleted file mode 100644 index 4ffb4c49..00000000 --- a/tests/formatter/test_format_code_ranges.py +++ /dev/null @@ -1,107 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_format_code_ranges.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the Formattor._format_code() method with ranges and lengths.""" - -# Standard Library Imports -import contextlib -import sys - -with contextlib.suppress(ImportError): - if sys.version_info >= (3, 11): - # Standard Library Imports - import tomllib - else: - # Third Party Imports - import tomli as tomllib - -# Third Party Imports -import pytest - -# docformatter Package Imports -from docformatter import Formatter - - -class TestFormatCodeRanges: - """Class for testing _format_code() with the line_range or length_range - arguments.""" - - with open("tests/_data/string_files/format_code_ranges.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--range", "1", "1", ""]]) - def test_format_code_range_miss(self, test_args, args): - """Should leave docstrings outside line range as is.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["range_miss"]["instring"] - outstring = self.TEST_STRINGS["range_miss"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--range", "1", "2", ""]]) - def test_format_code_range_hit(self, test_args, args): - """Should format docstrings within line_range.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["range_hit"]["instring"] - outstring = self.TEST_STRINGS["range_hit"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--docstring-length", "1", "1", ""]]) - def test_format_code_docstring_length(self, test_args, args): - """Should leave docstrings outside length_range as is.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["length_ignore"]["instring"] - outstring = self.TEST_STRINGS["length_ignore"]["outstring"] - - assert outstring == uut._format_code( - instring, - ) diff --git a/tests/formatter/test_format_epytext.py b/tests/formatter/test_format_epytext.py deleted file mode 100644 index 4971b2f5..00000000 --- a/tests/formatter/test_format_epytext.py +++ /dev/null @@ -1,133 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_format_epytext.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the Formatter class.""" - - -# Standard Library Imports -import contextlib -import sys - -with contextlib.suppress(ImportError): - if sys.version_info >= (3, 11): - # Standard Library Imports - import tomllib - else: - # Third Party Imports - import tomli as tomllib - -# Third Party Imports -import pytest - -# docformatter Package Imports -from docformatter import Formatter - -INDENTATION = " " - - -class TestFormatWrapEpytext: - """Class for testing _do_format_docstring() with line wrapping and Epytext lists.""" - - with open("tests/_data/string_files/format_epytext.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "--style", - "epytext", - "", - ] - ], - ) - def test_format_docstring_epytext_style( - self, - test_args, - args, - ): - """Wrap epytext style parameter lists. - - See requirement docformatter_10.6.2 - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["epytext"]["instring"] - outstring = self.TEST_STRINGS["epytext"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "--style", - "numpy", - "", - ] - ], - ) - def test_format_docstring_non_epytext_style( - self, - test_args, - args, - ): - """Ignore wrapping epytext style parameter lists when not using epytext style. - - See requirement docformatter_10.6.1 - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["epytext"]["numpy"]["instring"] - outstring = self.TEST_STRINGS["epytext"]["numpy"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) diff --git a/tests/formatter/test_format_lists.py b/tests/formatter/test_format_lists.py deleted file mode 100644 index cefd6f08..00000000 --- a/tests/formatter/test_format_lists.py +++ /dev/null @@ -1,152 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_format_lists.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the Formatter class.""" - - -# Standard Library Imports -import contextlib -import sys - -with contextlib.suppress(ImportError): - if sys.version_info >= (3, 11): - # Standard Library Imports - import tomllib - else: - # Third Party Imports - import tomli as tomllib - -# Third Party Imports -import pytest - -# docformatter Package Imports -from docformatter import Formatter - -INDENTATION = " " - - -class TestFormatLists: - """Class for testing format_docstring() with lists in the docstring.""" - - with open("tests/_data/string_files/format_lists.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--wrap-descriptions", "72", ""]]) - def test_format_docstring_should_ignore_numbered_lists(self, test_args, args): - """Ignore lists beginning with numbers.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["numbered"]["instring"] - outstring = self.TEST_STRINGS["numbered"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--wrap-descriptions", "72", ""]]) - def test_format_docstring_should_ignore_parameter_lists(self, test_args, args): - """Ignore lists beginning with -.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["parameter"]["dash"]["instring"] - outstring = self.TEST_STRINGS["parameter"]["dash"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", [["--wrap-descriptions", "72", "--style", "numpy", ""]] - ) - def test_format_docstring_should_ignore_colon_parameter_lists( - self, test_args, args - ): - """Ignore lists beginning with :""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["parameter"]["colon"]["instring"] - outstring = self.TEST_STRINGS["parameter"]["colon"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_should_leave_list_alone(self, test_args, args): - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["many"]["short"]["columns"]["instring"] - outstring = self.TEST_STRINGS["many"]["short"]["columns"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_should_leave_list_alone_with_rest(self, test_args, args): - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_239"]["instring"] - outstring = self.TEST_STRINGS["issue_239"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) diff --git a/tests/formatter/test_format_sphinx.py b/tests/formatter/test_format_sphinx.py deleted file mode 100644 index c8fc1b9a..00000000 --- a/tests/formatter/test_format_sphinx.py +++ /dev/null @@ -1,512 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_format_sphinx.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the Formatter class.""" - - -# Standard Library Imports -import contextlib -import sys - -with contextlib.suppress(ImportError): - if sys.version_info >= (3, 11): - # Standard Library Imports - import tomllib - else: - # Third Party Imports - import tomli as tomllib - -# Third Party Imports -import pytest - -# docformatter Package Imports -from docformatter import Formatter - -INDENTATION = " " - - -class TestFormatWrapSphinx: - """Class for testing _do_format_docstring() with line wrapping and Sphinx lists.""" - - with open("tests/_data/string_files/format_sphinx.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "--style", - "sphinx", - "", - ] - ], - ) - def test_format_docstring_sphinx_style( - self, - test_args, - args, - ): - """Wrap sphinx style parameter lists. - - See requirement docformatter_10.4.2 and issue #230. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["sphinx"]["instring"] - outstring = self.TEST_STRINGS["sphinx"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - # Issue #230 required adding parenthesis to the SPHINX_REGEX. - instring = self.TEST_STRINGS["issue_230"]["instring"] - outstring = self.TEST_STRINGS["issue_230"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "--style", - "numpy", - "", - ] - ], - ) - def test_format_docstring_non_sphinx_style( - self, - test_args, - args, - ): - """Ignore wrapping sphinx style parameter lists when not using sphinx style. - - See requirement docformatter_10.4.1 - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["sphinx"]["numpy"]["instring"] - outstring = self.TEST_STRINGS["sphinx"]["numpy"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "", - ] - ], - ) - def test_format_docstring_sphinx_style_remove_excess_whitespace( - self, - test_args, - args, - ): - """Should remove unneeded whitespace. - - See issue #217 and #222 - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_217_222"]["instring"] - outstring = self.TEST_STRINGS["issue_217_222"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "", - ] - ], - ) - def test_format_docstring_sphinx_style_two_directives_in_row( - self, - test_args, - args, - ): - """Should remove unneeded whitespace. - - See issue #215. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_215"]["instring"] - outstring = self.TEST_STRINGS["issue_215"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "", - ] - ], - ) - def test_format_docstring_sphinx_style_field_body_is_blank( - self, - test_args, - args, - ): - """Retain newline after the field list when it's in the original docstring. - - Also do not return a field body that is just whitespace. - - See docformatter_10.4.3.2, issue #224, and issue #239. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_224"]["instring"] - outstring = self.TEST_STRINGS["issue_224"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_239"]["instring"] - outstring = self.TEST_STRINGS["issue_239"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "", - ] - ], - ) - def test_format_docstring_sphinx_style_field_name_included_wrap_length( - self, - test_args, - args, - ): - """Should consider field name, not just field body, when wrapping. - - See issue #228. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_228"]["instring"] - outstring = self.TEST_STRINGS["issue_228"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "", - ] - ], - ) - def test_format_docstring_sphinx_style_field_body_is_a_link( - self, - test_args, - args, - ): - """Should not add a space after the field name when the body is a link. - - See docformatter_10.4.3.1, issue #229, issue #234, and issue #235. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_229"]["instring"] - outstring = self.TEST_STRINGS["issue_229"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_229_2"]["instring"] - outstring = self.TEST_STRINGS["issue_229_2"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_234"]["instring"] - outstring = self.TEST_STRINGS["issue_234"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_235"]["instring"] - outstring = self.TEST_STRINGS["issue_235"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "", - ] - ], - ) - def test_format_docstring_sphinx_style_field_name_has_periods( - self, - test_args, - args, - ): - """Should format sphinx field names containing a period. - - See issue #245. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_245"]["instring"] - outstring = self.TEST_STRINGS["issue_245"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "", - ] - ], - ) - def test_format_docstring_sphinx_style_ignore_directive( - self, - test_args, - args, - ): - """Should not identify inline directives as sphinx field names. - - See issue #250. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_250"]["instring"] - outstring = self.TEST_STRINGS["issue_250"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "120", - "--wrap-summaries", - "120", - "--pre-summary-newline", - "--black", - "", - ] - ], - ) - def test_format_docstring_sphinx_style_recognize_yield( - self, - test_args, - args, - ): - """Should identify `yield` as sphinx field name. - - See issue #253. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_253"]["instring"] - outstring = self.TEST_STRINGS["issue_253"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "--pre-summary-newline", - "", - ] - ], - ) - def test_format_docstring_sphinx_style_recognize_more_sphinx_fields( - self, - test_args, - args, - ): - """Should identify more sphinx field. - - See issue #271 - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_271"]["instring"] - outstring = self.TEST_STRINGS["issue_271"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) diff --git a/tests/formatter/test_format_styles.py b/tests/formatter/test_format_styles.py deleted file mode 100644 index 9c7a046f..00000000 --- a/tests/formatter/test_format_styles.py +++ /dev/null @@ -1,147 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_format_styles.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the Formatter class with various style options.""" - - -# Standard Library Imports -import contextlib -import sys - -with contextlib.suppress(ImportError): - if sys.version_info >= (3, 11): - # Standard Library Imports - import tomllib - else: - # Third Party Imports - import tomli as tomllib - -# Third Party Imports -import pytest - -# docformatter Package Imports -from docformatter import Formatter - -INDENTATION = " " - - -class TestFormatStyleOptions: - """Class for testing format_docstring() when requesting style options.""" - - with open("tests/_data/string_files/format_style_options.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_no_post_description_blank( - self, - test_args, - args, - ): - """Remove blank lines before closing triple quotes.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["no_blank"]["instring"] - outstring = self.TEST_STRINGS["no_blank"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--pre-summary-newline", ""]]) - def test_format_docstring_with_pre_summary_newline( - self, - test_args, - args, - ): - """Remove blank line before summary.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["presummary_newline"]["instring"] - outstring = self.TEST_STRINGS["presummary_newline"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--make-summary-multi-line", ""]]) - def test_format_docstring_make_summary_multi_line( - self, - test_args, - args, - ): - """Place the one-line docstring between triple quotes.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["summary_multiline"]["instring"] - outstring = self.TEST_STRINGS["summary_multiline"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--pre-summary-space", ""]]) - def test_format_docstring_pre_summary_space( - self, - test_args, - args, - ): - """Place a space between the opening quotes and the summary.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["presummary_space"]["instring"] - outstring = self.TEST_STRINGS["presummary_space"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) diff --git a/tests/formatter/test_format_urls.py b/tests/formatter/test_format_urls.py deleted file mode 100644 index aca4ee10..00000000 --- a/tests/formatter/test_format_urls.py +++ /dev/null @@ -1,674 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_format_urls.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the Formatter class.""" - - -# Standard Library Imports -import contextlib -import sys - -with contextlib.suppress(ImportError): - if sys.version_info >= (3, 11): - # Standard Library Imports - import tomllib - else: - # Third Party Imports - import tomli as tomllib - -# Third Party Imports -import pytest - -# docformatter Package Imports -from docformatter import Formatter - -INDENTATION = " " - - -class TestFormatWrapURL: - """Class for testing _do_format_docstring() with line wrapping and URLs.""" - - with open("tests/_data/string_files/format_urls.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-descriptions", "72", ""]], - ) - def test_format_docstring_with_inline_links( - self, - test_args, - args, - ): - """Preserve links instead of wrapping them. - - See requirement docformatter_10.1.3. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["inline"]["instring"] - outstring = self.TEST_STRINGS["inline"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-descriptions", "72", ""]], - ) - def test_format_docstring_with_short_inline_link( - self, - test_args, - args, - ): - """Short in-line links will remain untouched. - - See requirement docformatter_10.1.3.1. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["inline"]["short"]["instring"] - outstring = self.TEST_STRINGS["inline"]["short"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-descriptions", "72", ""]], - ) - def test_format_docstring_with_long_inline_link( - self, - test_args, - args, - ): - """Should move long in-line links to line by themselves.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["inline"]["long"]["instring"] - outstring = self.TEST_STRINGS["inline"]["long"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-descriptions", "72", ""]], - ) - def test_format_docstring_with_only_link( - self, - test_args, - args, - ): - """Should format docstring containing only a link.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["only"]["link"]["instring"] - outstring = self.TEST_STRINGS["only"]["link"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-descriptions", "72", ""]], - ) - def test_format_docstring_with_target_links( - self, - test_args, - args, - ): - """Preserve links instead of wrapping them. - - See requirement docformatter_10.1.3, issue #75, issue #145. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_75"]["instring"] - outstring = self.TEST_STRINGS["issue_75"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_145"]["instring"] - outstring = self.TEST_STRINGS["issue_145"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "72", - "", - ] - ], - ) - def test_format_docstring_with_simple_link( - self, - test_args, - args, - ): - """Preserve links instead of wrapping them. - - See requirement docformatter_10.1.3, issue #75. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_75_2"]["instring"] - outstring = self.TEST_STRINGS["issue_75_2"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "72", - "", - ] - ], - ) - def test_format_docstring_with_short_link( - self, - test_args, - args, - ): - """Short links will remain untouched. - - See requirement docformatter_10.1.3, issue #75. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_75_3"]["instring"] - outstring = self.TEST_STRINGS["issue_75_3"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-descriptions", "72", ""]], - ) - def test_format_docstring_with_inline_link_retain_spaces( - self, - test_args, - args, - ): - """In-line links shouldn't remove blank spaces between words. - - See issue #140. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_140"]["instring"] - outstring = self.TEST_STRINGS["issue_140"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_140_2"]["instring"] - outstring = self.TEST_STRINGS["issue_140_2"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_140_3"]["instring"] - outstring = self.TEST_STRINGS["issue_140_3"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-descriptions", "72", ""]], - ) - def test_format_docstring_link_after_colon( - self, - test_args, - args, - ): - """In-line links shouldn't be put on next line when following a colon. - - See issue #150. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_150"]["instring"] - outstring = self.TEST_STRINGS["issue_150"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "", - ] - ], - ) - def test_format_docstring_keep_inline_link_together( - self, - test_args, - args, - ): - """Keep in-line links together with the display text. - - See issue #157. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_157"]["instring"] - outstring = self.TEST_STRINGS["issue_157"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "", - ] - ], - ) - def test_format_docstring_keep_inline_link_together_two_paragraphs( - self, - test_args, - args, - ): - """Keep in-line links together with the display text. - - If there is another paragraph following the in-line link, don't strip the - newline in between. - - See issue #157. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_157_2"]["instring"] - outstring = self.TEST_STRINGS["issue_157_2"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_157_3"]["instring"] - outstring = self.TEST_STRINGS["issue_157_3"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_157_4"]["instring"] - outstring = self.TEST_STRINGS["issue_157_4"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_157_5"]["instring"] - outstring = self.TEST_STRINGS["issue_157_5"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_157_6"]["instring"] - outstring = self.TEST_STRINGS["issue_157_6"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - instring = self.TEST_STRINGS["issue_157_7"]["instring"] - outstring = self.TEST_STRINGS["issue_157_7"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - instring = self.TEST_STRINGS["issue_157_8"]["instring"] - outstring = self.TEST_STRINGS["issue_157_8"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - instring = self.TEST_STRINGS["issue_157_9"]["instring"] - outstring = self.TEST_STRINGS["issue_157_9"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - instring = self.TEST_STRINGS["issue_157_10"]["instring"] - outstring = self.TEST_STRINGS["issue_157_10"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - instring = self.TEST_STRINGS["issue_157_11"]["instring"] - outstring = self.TEST_STRINGS["issue_157_11"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "", - ] - ], - ) - def test_format_docstring_link_no_delete_words( - self, - test_args, - args, - ): - """Should not delete words when wrapping a URL. - - See issue #159. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_159"]["instring"] - outstring = self.TEST_STRINGS["issue_159"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "88", - "--wrap-summaries", - "88", - "", - ] - ], - ) - def test_format_docstring_link_no_newline_after_link( - self, - test_args, - args, - ): - """Links should have no newline before or after them. - - See issue #180. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_180"]["instring"] - outstring = self.TEST_STRINGS["issue_180"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "72", - "", - ] - ], - ) - def test_format_docstring_with_only_link_in_description( - self, - test_args, - args, - ): - """No index error when only link in long description. - - See issue #189. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_189"]["instring"] - outstring = self.TEST_STRINGS["issue_189"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_no_indent_string_on_newline( - self, - test_args, - args, - ): - """Should not add the indentation string to a newline. - - See issue #199. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_199"]["instring"] - outstring = self.TEST_STRINGS["issue_199"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_short_anonymous_link( - self, - test_args, - args, - ): - """Anonymous link references should not be wrapped into the link. - - See issue #210. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_210"]["instring"] - outstring = self.TEST_STRINGS["issue_210"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_quoted_link( - self, - test_args, - args, - ): - """Anonymous link references should not be wrapped into the link. - - See issue #218. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_218"]["instring"] - outstring = self.TEST_STRINGS["issue_218"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) diff --git a/tests/formatter/test_format_wrap.py b/tests/formatter/test_format_wrap.py deleted file mode 100644 index 196c2553..00000000 --- a/tests/formatter/test_format_wrap.py +++ /dev/null @@ -1,407 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_format_wrap.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the Formatter class with the --wrap options.""" - - -# Standard Library Imports -import contextlib -import itertools -import random -import sys - -with contextlib.suppress(ImportError): - if sys.version_info >= (3, 11): - # Standard Library Imports - import tomllib - else: - # Third Party Imports - import tomli as tomllib - -# Third Party Imports -import pytest - -# docformatter Package Imports -import docformatter -from docformatter import Formatter - -# docformatter Local Imports -from .. import generate_random_docstring - -INDENTATION = " " - - -class TestFormatWrap: - """Class for testing _do_format_docstring() with line wrapping.""" - - with open("tests/_data/string_files/format_wrap.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - def test_unwrap_summary(self): - """Remove newline and tab characters.""" - - instring = self.TEST_STRINGS["unwrap"]["instring"] - outstring = self.TEST_STRINGS["unwrap"]["outstring"] - - assert outstring == docformatter.unwrap_summary( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_wrap( - self, - test_args, - args, - ): - """Wrap the docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - # This function uses `random` so make sure each run of this test is - # repeatable. - random.seed(0) - - min_line_length = 50 - for max_length, num_indents in itertools.product( - range(min_line_length, 100), range(20) - ): - indentation = " " * num_indents - uut.args.wrap_summaries = max_length - formatted_text = indentation + uut._do_format_docstring( - indentation=indentation, - docstring=generate_random_docstring( - max_word_length=min_line_length // 2 - ), - ) - for line in formatted_text.split("\n"): - # It is not the formatter's fault if a word is too long to - # wrap. - if len(line.split()) > 1: - assert len(line) <= max_length - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--wrap-summaries", "79", ""]]) - def test_format_docstring_with_weird_indentation_and_punctuation( - self, - test_args, - args, - ): - """Wrap and dedent docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["weird_punctuation"]["instring"] - outstring = self.TEST_STRINGS["weird_punctuation"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--wrap-descriptions", "72", ""]]) - def test_format_docstring_with_description_wrapping( - self, - test_args, - args, - ): - """Wrap description at 72 characters.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["description_wrap"]["instring"] - outstring = self.TEST_STRINGS["description_wrap"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--wrap-descriptions", "72", ""]]) - def test_format_docstring_should_ignore_doctests( - self, - test_args, - args, - ): - """Leave doctests alone.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["ignore_doctest"]["instring"] - outstring = self.TEST_STRINGS["ignore_doctest"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--wrap-descriptions", "72", ""]]) - def test_format_docstring_should_ignore_doctests_in_summary( - self, - test_args, - args, - ): - """Leave doctests alone if they're in the summary.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["ignore_summary_doctest"]["instring"] - outstring = self.TEST_STRINGS["ignore_summary_doctest"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--wrap-descriptions", "72", ""]]) - def test_format_docstring_should_maintain_indentation_of_doctest( - self, - test_args, - args, - ): - """Don't change indentation of doctest lines.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["same_indentation_doctest"]["instring"] - outstring = self.TEST_STRINGS["same_indentation_doctest"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [ - [ - "--wrap-descriptions", - "72", - "--wrap-summaries", - "50", - "--force-wrap", - "", - ] - ], - ) - def test_force_wrap( - self, - test_args, - args, - ): - """Force even lists to be wrapped.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["force_wrap"]["instring"] - outstring = self.TEST_STRINGS["force_wrap"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-summaries", "30", "--tab-width", "4", ""]], - ) - def test_format_docstring_with_summary_only_and_wrap_and_tab_indentation( - self, - test_args, - args, - ): - """Should account for length of tab when wrapping. - - See PR #69. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["summary_wrap_tab"]["instring"] - outstring = self.TEST_STRINGS["summary_wrap_tab"]["outstring"] - - assert outstring == uut._do_format_docstring( - "\t\t", - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-summaries", "69", "--close-quotes-on-newline", ""]], - ) - def test_format_docstring_for_multi_line_summary_alone( - self, - test_args, - args, - ): - """Place closing quotes on newline when wrapping one-liner.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["one_line_wrap_newline"]["instring"] - outstring = self.TEST_STRINGS["one_line_wrap_newline"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", - [["--wrap-summaries", "88", "--close-quotes-on-newline", ""]], - ) - def test_format_docstring_for_one_line_summary_alone_but_too_long( - self, - test_args, - args, - ): - """""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["one_line_no_wrap"]["instring"] - outstring = self.TEST_STRINGS["one_line_no_wrap"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_class_attributes(self, test_args, args): - """Wrap long class attribute docstrings.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["class_attribute_wrap"]["instring"] - outstring = self.TEST_STRINGS["class_attribute_wrap"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_no_newline_in_summary_with_symbol(self, test_args, args): - """Wrap summary with symbol should not add newline. - - See issue #79. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_79"]["instring"] - outstring = self.TEST_STRINGS["issue_79"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize( - "args", [["--wrap-descriptions", "120", "--wrap-summaries", "120", ""]] - ) - def test_format_docstring_with_multi_paragraph_description( - self, - test_args, - args, - ): - """Wrap each paragraph in the long description separately. - - See issue #127. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_127"]["instring"] - outstring = self.TEST_STRINGS["issue_127"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) diff --git a/tests/test_strip_docstring.py b/tests/test_strip_docstring.py deleted file mode 100644 index 613fa45e..00000000 --- a/tests/test_strip_docstring.py +++ /dev/null @@ -1,222 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_strip_docstring.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing the _do_strip_docstring() method.""" - - -# Standard Library Imports -import sys - -# Third Party Imports -import pytest - -# docformatter Package Imports -from docformatter import Formatter - -INDENTATION = " " - - -class TestStripDocstring: - """Class for testing _do_strip_docstring().""" - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_strip_docstring( - self, - test_args, - args, - ): - """Strip triple double quotes from docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - docstring, open_quote = uut._do_strip_docstring( - ''' - """Hello. - - """ - - ''' - ) - assert docstring == "Hello." - assert open_quote == '"""' - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_strip_docstring_with_triple_single_quotes( - self, - test_args, - args, - ): - """Strip triple single quotes from docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - docstring, open_quote = uut._do_strip_docstring( - """ - '''Hello. - - ''' - - """ - ) - assert docstring == "Hello." - assert open_quote == '"""' - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_strip_docstring_with_empty_string( - self, - test_args, - args, - ): - """Return series of six double quotes when passed empty string.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - docstring, open_quote = uut._do_strip_docstring('""""""') - assert not docstring - assert open_quote == '"""' - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_strip_docstring_with_raw_string( - self, - test_args, - args, - ): - """Return docstring and raw open quote.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - docstring, open_quote = uut._do_strip_docstring('r"""foo"""') - assert docstring == "foo" - assert open_quote == 'r"""' - - docstring, open_quote = uut._do_strip_docstring("R'''foo'''") - assert docstring == "foo" - assert open_quote == 'R"""' - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_strip_docstring_with_unicode_string( - self, - test_args, - args, - ): - """Return docstring and unicode open quote.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - docstring, open_quote = uut._do_strip_docstring("u'''foo'''") - assert docstring == "foo" - assert open_quote == 'u"""' - - docstring, open_quote = uut._do_strip_docstring('U"""foo"""') - assert docstring == "foo" - assert open_quote == 'U"""' - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_strip_docstring_with_unknown( - self, - test_args, - args, - ): - """Raise ValueError with single quotes.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - with pytest.raises(ValueError): - uut._do_strip_docstring("foo") - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_strip_docstring_with_single_quotes( - self, - test_args, - args, - ): - """Raise ValueError when strings begin with single single quotes. - - See requirement PEP_257_1. See issue #66 for example of docformatter breaking - code when encountering single quote. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - with pytest.raises(ValueError): - uut._do_strip_docstring("'hello\\''") - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_strip_docstring_with_double_quotes( - self, - test_args, - args, - ): - """Raise ValueError when strings begin with single double quotes. - - See requirement PEP_257_1. See issue #66 for example of docformatter breaking - code when encountering single quote. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - with pytest.raises(ValueError): - uut._do_strip_docstring('"hello\\""') From a31913c304aa13b2c0c814f86ccf330febda7a02 Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Tue, 22 Jul 2025 23:46:59 -0400 Subject: [PATCH 02/21] refactor: add new module with functions for classifying docstring types --- src/docformatter/classify.py | 479 +++++++++++++++++++++++++++++++++++ 1 file changed, 479 insertions(+) create mode 100644 src/docformatter/classify.py diff --git a/src/docformatter/classify.py b/src/docformatter/classify.py new file mode 100644 index 00000000..f9466603 --- /dev/null +++ b/src/docformatter/classify.py @@ -0,0 +1,479 @@ +#!/usr/bin/env python +# +# docformatter.classify.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's classification functions.""" + + +# Standard Library Imports +import sys +import tokenize +from tokenize import TokenInfo +from typing import Union + +# docformatter Package Imports +from docformatter.constants import MAX_PYTHON_VERSION + +PY312 = (sys.version_info[0], sys.version_info[1]) > MAX_PYTHON_VERSION + + +def do_find_docstring_blocks(tokens: list[TokenInfo]) -> list[tuple[int, int, str]]: + """Identify all docstring blocks and their anchor points. + + Parameters + ---------- + tokens (list[TokenInfo]): + A list of tokenized Python source code. + + Returns + ------- + list[tuple[int, int, str]]: + A list of tuples representing each docstring block. Each tuple contains: + - anchor_index (int): Index of the anchor (class, def, async def, or + assignment). + - string_index (int): Index of the docstring token. + - docstring_type (str): One of "module", "class", "function", or + "attribute". + """ + docstring_blocks = [] + + for i, token in enumerate(tokens): + if ( + token.type != tokenize.STRING + or not ( + token.string.startswith('"""') + or token.string.startswith('r"""') + or token.string.startswith('R"""') + or token.string.startswith('u"""') + or token.string.startswith('U"""') + or token.string.startswith("'''") + or token.string.startswith("r'''") + or token.string.startswith("R'''") + or token.string.startswith("u'''") + or token.string.startswith("U'''") + ) + or " = " in token.line + ): + continue + + if is_module_docstring(tokens, i): + docstring_blocks.append((0, i, "module")) + continue + + if is_attribute_docstring(tokens, i): + anchor_idx = _do_find_anchor_index(tokens, i, target="attribute") + if anchor_idx is not None: + docstring_blocks.append((anchor_idx, i, "attribute")) + continue + + if is_class_docstring(tokens, i): + anchor_idx = _do_find_anchor_index(tokens, i, target="class") + if anchor_idx is not None: + docstring_blocks.append((anchor_idx, i, "class")) + continue + + if is_function_or_method_docstring(tokens, i): + anchor_idx = _do_find_anchor_index(tokens, i, target="def") + if anchor_idx is not None: + docstring_blocks.append((anchor_idx, i, "function")) + continue + + # If adjacent docstrings have the same anchor index, remove the second one as + # there can only be one docstring per anchor. + i = 1 + while i < len(docstring_blocks): + if docstring_blocks[i][0] == docstring_blocks[i - 1][0]: + docstring_blocks.pop(i) + i += 1 + + return docstring_blocks + + +def _do_find_anchor_index( + tokens: list[TokenInfo], + docstring_index: int, + target: str, +) -> Union[int, None]: + """Walk backward from a docstring to find the matching anchor. + + The matching anchor would be one of `class`, `def`, `async def`, or an assignment. + + Parameters + ---------- + tokens (list[TokenInfo]): + A list of tokenized Python source code. + docstring_index (int): + Index of the STRING token representing the docstring. + target (str): + One of "class", "def", or "attribute" indicating what to search for. + + Returns + ------- + int | None: + Index of the anchor token if found, otherwise None. + """ + i = docstring_index - 1 + saw_decorator = False + + while i >= 0: + tok = tokens[i] + + if tok.type == tokenize.OP and tok.string == "@": + saw_decorator = True + + if target == "class" and tok.type == tokenize.NAME and tok.string == "class": + return i + + if target == "def" and tok.type == tokenize.NAME and tok.string == "def": + # Handle @decorator above def + if saw_decorator: + while i > 0 and tokens[i - 1].type != tokenize.NEWLINE: + i -= 1 + return i + + if target == "attribute": + if tok.type == tokenize.NAME: + return i + + i -= 1 + + return None + + +def is_attribute_docstring( + tokens: list[tokenize.TokenInfo], + index: int, +) -> bool: + """Return True if the string token is an attribute docstring. + + Parameters + ---------- + tokens : list[TokenInfo] + A list of tokenized Python source code. + index : int + Index of the anchor token. + + Returns + ------- + True if attribute docstring, False otherwise. + """ + if index < 2: # noqa: PLR2004 + return False + + # Step 1: Find the previous NEWLINE before the docstring + k = index - 1 + while k > 0 and tokens[k].type != tokenize.NEWLINE: + k -= 1 + + # Step 2: Check for '=' or ':' on the line *before* the docstring + seen_equal_or_colon = False + for tok in tokens[0:index]: + if tok.type == tokenize.OP and tok.string == "=" and '"""' not in tok.line: + seen_equal_or_colon = True + break + else: + seen_equal_or_colon = False + + if not seen_equal_or_colon: + return False + + return True + + +def is_class_docstring( + tokens: list[tokenize.TokenInfo], + index: int, +) -> bool: + """Determine if docstring is a class docstring.""" + # Walk backward to find the most recent `class` keyword before the string, + # without crossing over a `def`, `async`, or another block + for i in range(index - 1, -1, -1): + tok = tokens[i] + if tok.type == tokenize.NAME and tok.string == "class": + return True + if tok.type == tokenize.NAME and tok.string in ("def", "async"): + return False # Hit enclosing function or method first. + if tok.type == tokenize.OP and tok.string == "=": + return False # Hit assignment, not a class docstring. + + return False + + +def is_closing_quotes( + token: tokenize.TokenInfo, prev_token: tokenize.TokenInfo +) -> bool: + """Determine if token is a closing quote for a docstring. + + Parameters + ---------- + token : tokenize.TokenInfo + The token to check. + prev_token : tokenize.TokenInfo + The previous token in the stream. + + Returns + ------- + bool + True if the token is a closing quote for a docstring, False otherwise. + """ + _offset = prev_token.line.split("\n")[-1] + if prev_token.line.endswith("\n"): + _offset = prev_token.line.split("\n")[-2] + + if ( + token.line.strip() == '"""' + and token.type == tokenize.NEWLINE + or token.line == _offset + ): + return True + + return False + + +def is_code_line(token: tokenize.TokenInfo) -> bool: + """Determine if token is a line of code. + + Parameters + ---------- + token : tokenize.TokenInfo + The token to check. + + Returns + ------- + bool + True if the token is a code line, False otherwise. + """ + if token.type == tokenize.NAME and not ( + token.line.strip().startswith("def ") + or token.line.strip().startswith("async ") + or token.line.strip().startswith("class ") + ): + return True + + return False + + +def is_definition_line(token: tokenize.TokenInfo) -> bool: + """Determine if token is a class or function/method definition line. + + Parameters + ---------- + token : tokenize.TokenInfo + The token to check. + + Returns + ------- + bool + True if the token is a definition line, False otherwise. + """ + if token.type == tokenize.NAME and ( + token.line.strip().startswith("def ") + or token.line.strip().startswith("async ") + or token.line.strip().startswith("class ") + ): + return True + + return False + + +def is_f_string(token: tokenize.TokenInfo, prev_token: tokenize.TokenInfo) -> bool: + """Determine if token is an f-string. + + Parameters + ---------- + token : tokenize.TokenInfo + The token to check. + prev_token : tokenize.TokenInfo + The previous token in the stream. + + Returns + ------- + bool + True if the token is an f-string, False otherwise. + """ + if PY312: + if tokenize.FSTRING_MIDDLE in [token.type, prev_token.type]: + return True + + return False + + +def is_function_or_method_docstring( + tokens: list[tokenize.TokenInfo], + index: int, +) -> bool: + """Determine if docstring is a function or method docstring.""" + for i in range(index - 1, -1, -1): + tok = tokens[i] + if tok.type == tokenize.NAME and tok.string in ("def", "async"): + return True + if tok.type == tokenize.NAME and tok.string == "class": + return False # hit enclosing class first + + return False + + +def is_inline_comment(token: tokenize.TokenInfo) -> bool: + """Determine if token is an inline comment. + + Parameters + ---------- + token : tokenize.TokenInfo + The token to check. + + Returns + ------- + bool + True if the token is an inline comment, False otherwise. + """ + if token.line.strip().startswith('"""') and token.string.startswith("#"): + return True + return False + + +def is_line_following_indent( + token: tokenize.TokenInfo, + prev_token: tokenize.TokenInfo, +) -> bool: + """Determine if token is a line that follows an indent. + + Parameters + ---------- + token : tokenize.TokenInfo + The token to check. + prev_token : tokenize.TokenInfo + The previous token in the stream. + + Returns + ------- + bool + True if the token is a line that follows an indent, False otherwise. + """ + if prev_token.type == tokenize.INDENT and prev_token.line in token.line: + return True + + return False + + +def is_module_docstring( + tokens: list[tokenize.TokenInfo], + index: int, +) -> bool: + """Determine if docstring is a module docstring.""" + # No code tokens before the string + for k in range(index): + if tokens[k][0] not in ( + tokenize.ENCODING, + tokenize.COMMENT, + tokenize.NEWLINE, + tokenize.NL, + ): + return False + return True + + +def is_nested_definition_line(token: tokenize.TokenInfo) -> bool: + """Determine if token is a nested class or function/method definition line. + + Parameters + ---------- + token : tokenize.TokenInfo + The token to check. + + Returns + ------- + bool + True if the token is a nested definition line, False otherwise. + """ + if token.type == tokenize.NAME and ( + token.line.startswith(" def ") + or token.line.startswith(" async ") + or token.line.startswith(" class ") + ): + return True + + return False + + +def is_newline_continuation( + token: tokenize.TokenInfo, + prev_token: tokenize.TokenInfo, +) -> bool: + """Determine if token is a continuation of a previous line. + + Parameters + ---------- + token : tokenize.TokenInfo + The token to check. + prev_token : tokenize.TokenInfo + The previous token in the stream. + + Returns + ------- + bool + True if the token is a continuation of a previous line, False otherwise. + """ + if ( + token.type in (tokenize.NEWLINE, tokenize.NL) + and token.line.strip() in prev_token.line.strip() + and token.line != "\n" + ): + return True + + return False + + +def is_string_variable( + token: tokenize.TokenInfo, + prev_token: tokenize.TokenInfo, +) -> bool: + """Determine if token is a string variable assignment. + + Parameters + ---------- + token : tokenize.TokenInfo + The token to check. + prev_token : tokenize.TokenInfo + The previous token in the stream. + + Returns + ------- + bool + True if the token is a string variable assignment, False otherwise. + """ + # TODO: The AWAIT token is removed in Python 3.13 and later. Only Python 3.9 + # seems to generate the AWAIT token, so we can safely remove the check for it when + # support for Python 3.9 is dropped in April 2026. + try: + _token_types = (tokenize.AWAIT, tokenize.OP) + except AttributeError: + _token_types = (tokenize.OP,) # type: ignore + + if prev_token.type in _token_types and ( + '= """' in token.line or token.line in prev_token.line + ): + return True + + return False From 86ffee2ed4735f6d920d268417c0f06c2522fb0c Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Tue, 22 Jul 2025 23:47:31 -0400 Subject: [PATCH 03/21] refactor: add new module with constant values --- src/docformatter/constants.py | 232 ++++++++++++++++++++++++++++++++++ 1 file changed, 232 insertions(+) create mode 100644 src/docformatter/constants.py diff --git a/src/docformatter/constants.py b/src/docformatter/constants.py new file mode 100644 index 00000000..0860ebe2 --- /dev/null +++ b/src/docformatter/constants.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python +# +# docformatter.constants.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's constants.""" + + +# TODO: Move these constants to the configuration file and/or command line. +ABBREVIATIONS = ( + "e.g.", + "i.e.", + "et. al.", + "etc.", + "Dr.", + "Mr.", + "Mrs.", + "Ms.", +) + +ALEMBIC_REGEX = r"^(Revision ID|Revises|Create Date): {0,}" +"""Regular expression to use for finding alembic headers.""" + +BULLET_REGEX = r"\s*[*\-+] [\S ]+" +"""Regular expression to use for finding bullet lists.""" + +CODE_PATTERN_REGEX = ( + r"^ {0,}(assert|async|await|break|class|continue|def|del|do|elif|else|except|" + r"finally|for|global|if|import|lambda|pass|print|raise|return|super|try|while|" + r"with|yield)" +) +"""Regular expression to use for finding code patterns.""" + +ENUM_REGEX = r"\s*\d\." +"""Regular expression to use for finding enumerated lists.""" + +EPYTEXT_REGEX = r"@[a-zA-Z0-9_\-\s]+:" +"""Regular expression to use for finding Epytext-style field lists.""" + +GOOGLE_REGEX = r"^ *[a-zA-Z0-9_\- ]*:$" +"""Regular expression to use for finding Google-style field lists.""" + +LITERAL_REGEX = r"[\S ]*::" +"""Regular expression to use for finding literal blocks.""" + +NUMPY_REGEX = r"^\s[a-zA-Z0-9_\- ]+ ?: [\S ]+" +"""Regular expression to use for finding Numpy-style field lists.""" + +NUMPY_SECTION_REGEX = ( + r"^ *?(Parameters|Other Parameters|Returns|Raises|See " + r"Also|Notes|Examples|References|Yields|Warns|Warnings|Receives)\n[- ]+" +) +"""Regular expression to use for finding Numpy section headers.""" + +OPTION_REGEX = r"^ {0,}-{1,2}[\S ]+ \w+" +"""Regular expression to use for finding option lists.""" + +REST_REGEX = r"((\.{2}|`{2}) ?[\w.~-]+(:{2}|`{2})?[\w ]*?|`[\w.~]+`)" +"""Regular expression to use for finding reST directives.""" + +REST_SECTION_REGEX = ( + r"(^ *[#\*=\-^\'\"\+_\~`\.\:]+\n)?[\w ]+\n *[#\*=\-^\'\"\+_\~`\.\:]+" +) +"""Regular expression to use for finding reST section headers.""" + +# Complete list: +# https://www.sphinx-doc.org/en/master/usage/domains/python.html#info-field-lists +SPHINX_FIELD_PATTERNS = ( + "arg|" + "cvar|" + "except|" + "ivar|" + "key|" + "meta|" + "param|" + "raise|" + "return|" + "rtype|" + "type|" + "var|" + "yield" +) + +SPHINX_REGEX = rf":({SPHINX_FIELD_PATTERNS})[a-zA-Z0-9_\-.() ]*:" +"""Regular expression to use for finding Sphinx-style field lists.""" + +URL_PATTERNS = ( + "afp|" + "apt|" + "bitcoin|" + "chrome|" + "cvs|" + "dav|" + "dns|" + "file|" + "finger|" + "fish|" + "ftp|" + "ftps|" + "git|" + "http|" + "https|" + "imap|" + "ipp|" + "ipps|" + "irc|" + "irc6|" + "ircs|" + "jar|" + "ldap|" + "ldaps|" + "mailto|" + "news|" + "nfs|" + "nntp|" + "pop|" + "rsync|" + "s3|" + "sftp|" + "shttp|" + "sip|" + "sips|" + "smb|" + "sms|" + "snmp|" + "ssh|" + "svn|" + "telnet|" + "vnc|" + "xmpp|" + "xri" +) +"""The URL patterns to look for when finding links. + +Based on the table at +""" + +# This is the regex used to find URL links: +# +# (__ |`{{2}}|`\w[\w. :\n]*|\.\. _?[\w. :]+|')? is used to find in-line links that +# should remain on a single line even if it exceeds the wrap length. +# __ is used to find to underscores followed by a single space. +# This finds patterns like: __ https://sw.kovidgoyal.net/kitty/graphics-protocol/ +# +# `{{2}} is used to find two back-tick characters. +# This finds patterns like: ``http://www.example.com`` +# +# `\w[a-zA-Z0-9. :#\n]* matches the back-tick character immediately followed by one +# letter, then followed by any number of letters, numbers, periods, spaces, colons, +# hash marks or newlines. +# This finds patterns like: `Link text `_ +# +# \.\. _?[\w. :]+ matches the pattern .. followed one space, then by zero or +# one underscore, then any number of letters, periods, spaces, or colons. +# This finds patterns like: .. _a link: https://domain.invalid/ +# +# ' matches a single quote. +# This finds patterns like: 'http://www.example.com' +# +# ? matches the previous pattern between zero or one times. +# +# ? is used to find the actual link. +# ? matches the character > between zero and one times. +URL_REGEX = ( + rf"(__ |`{{2}}|`\w[\w :#\n]*[.|\.\. _?[\w. :]+|')??" +) + +URL_SKIP_REGEX = rf"({URL_PATTERNS}):(/){{0,2}}(``|')" +"""The regex used to ignore found hyperlinks. + +URLs that don't actually contain a domain, but only the URL pattern should +be treated like simple text. This will ignore URLs like ``http://`` or 'ftp:`. + +({URL_PATTERNS}) matches one of the URL patterns. +:(/){{0,2}} matches a colon followed by up to two forward slashes. +(``|') matches a double back-tick or single quote. +""" + +# Keep these constants as constants. +MAX_PYTHON_VERSION = (3, 11) + +DEFAULT_INDENT = 4 +"""The default indentation for docformatter.""" + +HEURISTIC_MIN_LIST_ASPECT_RATIO = 0.4 +"""The minimum aspect ratio to consider a list.""" + +STR_QUOTE_TYPES = ( + '"""', + "'''", +) +RAW_QUOTE_TYPES = ( + 'r"""', + 'R"""', + "r'''", + "R'''", +) +UCODE_QUOTE_TYPES = ( + 'u"""', + 'U"""', + "u'''", + "U'''", +) +QUOTE_TYPES = STR_QUOTE_TYPES + RAW_QUOTE_TYPES + UCODE_QUOTE_TYPES From 9f45b298ea0512a596318d76572907e3856b819e Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Tue, 22 Jul 2025 23:48:45 -0400 Subject: [PATCH 04/21] refactor: add new module with functions for detecting various types of patterns --- src/docformatter/patterns/__init__.py | 36 ++++ src/docformatter/patterns/fields.py | 229 +++++++++++++++++++++++++ src/docformatter/patterns/headers.py | 143 ++++++++++++++++ src/docformatter/patterns/lists.py | 233 ++++++++++++++++++++++++++ src/docformatter/patterns/misc.py | 124 ++++++++++++++ src/docformatter/patterns/rest.py | 58 +++++++ src/docformatter/patterns/url.py | 84 ++++++++++ 7 files changed, 907 insertions(+) create mode 100644 src/docformatter/patterns/__init__.py create mode 100644 src/docformatter/patterns/fields.py create mode 100644 src/docformatter/patterns/headers.py create mode 100644 src/docformatter/patterns/lists.py create mode 100644 src/docformatter/patterns/misc.py create mode 100644 src/docformatter/patterns/rest.py create mode 100644 src/docformatter/patterns/url.py diff --git a/src/docformatter/patterns/__init__.py b/src/docformatter/patterns/__init__.py new file mode 100644 index 00000000..b7e7a29f --- /dev/null +++ b/src/docformatter/patterns/__init__.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# +# docformatter.patterns.__init__.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This is the docformatter patterns package.""" + + +# docformatter Local Imports +from .fields import * # noqa F403 +from .headers import * # noqa F403 +from .lists import * # noqa F403 +from .misc import * # noqa F403 +from .rest import * # noqa F403 +from .url import * # noqa F403 diff --git a/src/docformatter/patterns/fields.py b/src/docformatter/patterns/fields.py new file mode 100644 index 00000000..49fe4b55 --- /dev/null +++ b/src/docformatter/patterns/fields.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python +# +# docformatter.patterns.fields.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's field list pattern recognition functions.""" + + +# Standard Library Imports +import re +from re import Match +from typing import Union + +# docformatter Package Imports +from docformatter.constants import ( + EPYTEXT_REGEX, + GOOGLE_REGEX, + NUMPY_REGEX, + SPHINX_REGEX, +) + + +def do_find_field_lists( + text: str, + style: str, +) -> tuple[list[tuple[int, int]], bool]: + r"""Determine if docstring contains any field lists. + + Parameters + ---------- + text : str + The docstring description to check for field list patterns. + style : str + The field list style used. + + Returns + ------- + _field_idx, _wrap_parameters : tuple + A list of tuples with each tuple containing the starting and ending + position of each field list found in the passed description. + A boolean indicating whether long field list lines should be wrapped. + """ + _field_idx = [] + _wrap_parameters = False + + if style == "epytext": + _field_idx = [ + (_field.start(0), _field.end(0)) + for _field in re.finditer(EPYTEXT_REGEX, text) + ] + _wrap_parameters = True + elif style == "sphinx": + _field_idx = [ + (_field.start(0), _field.end(0)) + for _field in re.finditer(SPHINX_REGEX, text) + ] + _wrap_parameters = True + + return _field_idx, _wrap_parameters + + +def is_field_list( + text: str, + style: str, +) -> bool: + """Determine if docstring contains field lists. + + Parameters + ---------- + text : str + The docstring text. + style : str + The field list style to use. + + Returns + ------- + is_field_list : bool + Whether the field list pattern for style was found in the docstring. + """ + split_lines = text.rstrip().splitlines() + + if style == "epytext": + return any(is_epytext_field_list(line) for line in split_lines) + elif style == "sphinx": + return any(is_sphinx_field_list(line) for line in split_lines) + + return False + + +def is_epytext_field_list(line: str) -> Union[Match[str], None]: + """Check if the line is an Epytext field list. + + Parameters + ---------- + line : str + The line to check for Epytext field list patterns. + + Notes + ----- + Epytext field lists have the following pattern: + @param x: + @type x: + + Returns + ------- + Match[str] | None + A match object if the line matches an Epytext field list pattern, None + otherwise. + """ + return re.match(EPYTEXT_REGEX, line) + + +def is_google_field_list(line: str) -> Union[Match[str], None]: + """Check if the line is a Google field list. + + Parameters + ---------- + line: str + The line to check for Google field list patterns. + + Notes + ----- + Google field lists have the following pattern: + x (int): Description of x. + + Returns + ------- + Match[str] | None + A match object if the line matches a Google field list pattern, None otherwise. + """ + return re.match(GOOGLE_REGEX, line) + + +def is_numpy_field_list(line: str) -> Union[Match[str], None]: + """Check if the line is a NumPy field list. + + Parameters + ---------- + line: str + The line to check for NumPy field list patterns. + + Notes + ----- + NumPy field lists have the following pattern: + x : int + Description of x. + + Returns + ------- + Match[str] | None + A match object if the line matches a NumPy field list pattern, None otherwise. + """ + return re.match(NUMPY_REGEX, line) + + +def is_sphinx_field_list(line: str) -> Union[Match[str], None]: + """Check if the line is a Sphinx field list. + + Parameters + ---------- + line: str + The line to check for Sphinx field list patterns. + + Notes + ----- + Sphinx field lists have the following pattern: + :parameter: description + + Returns + ------- + Match[str] | None + A match object if the line matches a Sphinx field list pattern, None otherwise. + """ + return re.match(SPHINX_REGEX, line) + + +# TODO: Add a USER_DEFINED_REGEX to constants.py and use that instead of the +# hardcoded patterns. +def is_user_defined_field_list(line: str) -> Union[Match[str], None]: + """Check if the line is a user-defined field list. + + Parameters + ---------- + line: str + The line to check for user-defined field list patterns. + + Notes + ----- + User-defined field lists have the following pattern: + parameter - description + parameter -- description + @parameter description + + These patterns were in the original docformatter code. These patterns do not + conform to any common docstring styles. There is no documented reason they were + included and are retained for historical purposes. + + Returns + ------- + Match[str] | None + A match object if the line matches a user-defined field list pattern, None + otherwise. + """ + return ( + re.match(r"[\S ]+ - \S+", line) + or re.match(r"\s*\S+\s+--\s+", line) + or re.match(r"^ *@[a-zA-Z0-9_\- ]*(?:(?!:).)*$", line) + ) diff --git a/src/docformatter/patterns/headers.py b/src/docformatter/patterns/headers.py new file mode 100644 index 00000000..df7f3743 --- /dev/null +++ b/src/docformatter/patterns/headers.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python +# +# docformatter.patterns.headers.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's header pattern recognition functions.""" + + +# Standard Library Imports +import re +from re import Match +from typing import Union + +# docformatter Package Imports +from docformatter.constants import ( + ALEMBIC_REGEX, + NUMPY_SECTION_REGEX, + REST_SECTION_REGEX, +) + + +def is_alembic_header(line: str) -> Union[Match[str], None]: + """Check if the line is an Alembic header. + + Parameters + ---------- + line : str + The line to check for Alembic header patterns. + + Notes + ----- + Alembic headers have the following pattern: + Revision ID: > + Revises: + Create Date: 2023-01-06 10:13:28.156709 + + Returns + ------- + bool + True if the line matches an Alembic header pattern, False otherwise. + """ + return re.match(ALEMBIC_REGEX, line) + + +def is_numpy_section_header(line: str) -> Union[Match[str], None]: + r"""Check if the line is a NumPy section header. + + Parameters + ---------- + line : str + The line to check for NumPy section header patterns. + + Notes + ----- + NumPy section headers have the following pattern: + header\n---- + + The following NumPy section headers are recognized: + + * Parameters + * Other Parameters + * Receives + * Returns + * Yields + * Raises + * Warns + * Warnings + * Notes + * See Also + * Examples + + Returns + ------- + Match[str] | None + A match object if the line matches a NumPy section header pattern, None + otherwise. + """ + return re.match(NUMPY_SECTION_REGEX, line) + + +def is_rest_section_header(line: str) -> Union[Match[str], None]: + r"""Check if the line is a reST section header. + + Parameters + ---------- + line : str + The line to check for reST section header patterns. + + Notes + ----- + reST section headers have the following patterns: + ====\ndescription\n==== + ----\ndescription\n---- + description\n---- + + The following adornments used in Python documentation are supported (see + https://devguide.python.org/documentation/markup/#sections): + + #, for parts + *, for chapters + =, for sections + -, for subsections + ^, for subsubsections + + The following additional docutils recommended adornments are supported (see + https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#sections): + + ', single quote + ", double quote + +, plus sign + _, underscore + ~, tilde + `, backtick + ., period + :, colon + + Returns + ------- + bool + True if the line matches a reST section header pattern, False otherwise. + """ + return re.match(REST_SECTION_REGEX, line) diff --git a/src/docformatter/patterns/lists.py b/src/docformatter/patterns/lists.py new file mode 100644 index 00000000..2e53ef56 --- /dev/null +++ b/src/docformatter/patterns/lists.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python +# +# docformatter.patterns.lists.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's list pattern recognition functions.""" + + +# Standard Library Imports +import re +from re import Match +from typing import Union + +# docformatter Package Imports +from docformatter.constants import ( + BULLET_REGEX, + ENUM_REGEX, + HEURISTIC_MIN_LIST_ASPECT_RATIO, + OPTION_REGEX, +) + +# docformatter Local Imports +from .fields import ( + is_epytext_field_list, + is_field_list, + is_google_field_list, + is_numpy_field_list, + is_sphinx_field_list, + is_user_defined_field_list, +) +from .headers import is_alembic_header, is_numpy_section_header, is_rest_section_header +from .misc import is_inline_math, is_literal_block + + +def is_type_of_list( + text: str, + strict: bool, + style: str, +) -> bool: + """Determine if docstring line is a list. + + Parameters + ---------- + text : str + The text to check for potential lists. + strict : bool + Whether to strictly adhere to the wrap length argument. If True, + even heuristic lists will be wrapped. + style : str + The docstring style in use. One of 'epytext', 'sphinx', numpy', or 'googlw'. + + Returns + ------- + bool + True if a list pattern is identified, False otherwise. + """ + split_lines = text.rstrip().splitlines() + + if is_heuristic_list(text, strict): + return True + + if is_field_list(text, style): + return False + + return any( + ( + is_bullet_list(line) + or is_enumerated_list(line) + or is_rest_section_header(line) + or is_option_list(line) + or is_epytext_field_list(line) + or is_sphinx_field_list(line) + or is_numpy_field_list(line) + or is_numpy_section_header(line) + or is_google_field_list(line) + or is_user_defined_field_list(line) + or is_literal_block(line) + or is_inline_math(line) + or is_alembic_header(line) + ) + for line in split_lines + ) + + +def is_bullet_list(line: str) -> Union[Match[str], None]: + """Check if the line is a bullet list item. + + Parameters + ---------- + line : str + The line to check for bullet list patterns. + + Notes + ----- + Bullet list items have the following pattern: + - item + * item + + item + + See `_ + + Returns + ------- + Match[str] | None + A match object if the line matches a bullet list pattern, None otherwise. + """ + return re.match(BULLET_REGEX, line) + + +def is_definition_list(line: str) -> Union[Match[str], None]: + """Check if the line is a definition list item. + + Parameters + ---------- + line : str + The line to check for definition list patterns. + + Notes + ----- + Definition list items have the following pattern: + term: definition + + See `_ + + Returns + ------- + Match[str] | None + A match object if the line matches a definition list pattern, None otherwise. + """ + return re.match(ENUM_REGEX, line) + + +def is_enumerated_list(line: str) -> Union[Match[str], None]: + """Check if the line is an enumerated list item. + + Parameters + ---------- + line : str + The line to check for enumerated list patterns. + + Notes + ----- + Enumerated list items have the following pattern: + 1. item + 2. item + + See `_ + + Returns + ------- + Match[str] | None + A match object if the line matches an enumerated list pattern, None otherwise. + """ + return re.match(ENUM_REGEX, line) + + +def is_heuristic_list(text: str, strict: bool) -> bool: + """Check if the line is a heuristic list item. + + Heuristic lists are identified by a long number of lines with short columns. + + Parameters + ---------- + text : str + The text to check for heuristic list patterns. + strict: bool + If True, the function will return False. + If False, it will return True if the text has a high aspect ratio, + indicating it is likely a list. + + Returns + ------- + Match[str] | None + A match object if the line matches a heuristic list pattern, None otherwise. + """ + split_lines = text.rstrip().splitlines() + + # TODO: Find a better way of doing this. Conversely, create a logger and log + # potential lists for the user to decide if they are lists or not. + # Very large number of lines but short columns probably means a list of + # items. + if ( + len(split_lines) / max([len(line.strip()) for line in split_lines] + [1]) + > HEURISTIC_MIN_LIST_ASPECT_RATIO + ) and not strict: + return True + + return False + + +def is_option_list(line: str) -> Union[Match[str], None]: + """Check if the line is an option list item. + + Parameters + ---------- + line : str + The line to check for option list patterns. + + Notes + ----- + Option list items have the following pattern: + -a, --all: Show all items. + -h, --help: Show help message. + + See `_ + + Returns + ------- + Match[str] | None + A match object if the line matches an option list pattern, None otherwise. + """ + return re.match(OPTION_REGEX, line) diff --git a/src/docformatter/patterns/misc.py b/src/docformatter/patterns/misc.py new file mode 100644 index 00000000..2a6ec866 --- /dev/null +++ b/src/docformatter/patterns/misc.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python +# +# docformatter.patterns.misc.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's miscellaneous pattern recognition functions.""" + + +# Standard Library Imports +import re +from re import Match +from typing import Union + +# docformatter Package Imports +from docformatter.constants import LITERAL_REGEX, URL_REGEX + + +# TODO: Create INLINE_MATH_REGEX in constants.py and use it here. +def is_inline_math(line: str) -> Union[Match[str], None]: + """Check if the line is an inline math expression. + + Parameters + ---------- + line : str + The line to check for inline math patterns. + + Notes + ----- + Inline math expressions have the following pattern: + c :math:`[0, `]` + + Returns + ------- + Match[str] | None + A match object if the line matches an inline math pattern, None otherwise. + """ + return re.match(r" *\w *:[a-zA-Z0-9_\- ]*:", line) + + +def is_literal_block(line: str) -> Union[Match[str], None]: + """Check if the line is a literal block. + + Parameters + ---------- + line : str + The line to check for literal block patterns. + + Notes + ----- + Literal blocks have the following pattern: + :: + code + + Returns + ------- + Match[str] | None + A match object if the line matches a literal block pattern, None otherwise. + """ + return re.match(LITERAL_REGEX, line) + + +def is_probably_beginning_of_sentence(line: str) -> Union[Match[str], None, bool]: + """Determine if the line begins a sentence. + + Parameters + ---------- + line : str + The line to be tested. + + Returns + ------- + is_beginning : bool + True if this token is the beginning of a sentence, False otherwise. + """ + # Check heuristically for a parameter list. + for token in ["@", "-", r"\*"]: + if re.search(rf"\s{token}\s", line): + return True + + stripped_line = line.strip() + is_beginning_of_sentence = re.match(r"^[-@\)]", stripped_line) + is_pydoc_ref = re.match(r"^:\w+:", stripped_line) + + return is_beginning_of_sentence and not is_pydoc_ref + + +def is_some_sort_of_code(text: str) -> bool: + """Return True if the text looks like code. + + Parameters + ---------- + text : str + The text to check for code patterns. + + Returns + ------- + is_code : bool + True if the text contains and code patterns, False otherwise. + """ + return any( + len(word) > 50 and not re.match(URL_REGEX, word) # noqa: PLR2004 + for word in text.split() + ) diff --git a/src/docformatter/patterns/rest.py b/src/docformatter/patterns/rest.py new file mode 100644 index 00000000..dd891147 --- /dev/null +++ b/src/docformatter/patterns/rest.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# +# docformatter.patterns.rest.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's reST directive pattern recognition functions.""" + + +# Standard Library Imports +import re + +# docformatter Package Imports +from docformatter.constants import REST_REGEX + + +def do_find_directives(text: str) -> bool: + """Determine if docstring contains any reST directives. + + .. todo:: + + Currently this function only returns True/False to indicate whether a + reST directive was found. Should return a list of tuples containing + the start and end position of each reST directive found similar to the + function do_find_links(). + + Parameters + ---------- + text : str + The docstring text to test. + + Returns + ------- + is_directive : bool + Whether the docstring is a reST directive. + """ + _rest_iter = re.finditer(REST_REGEX, text) + return bool([(_rest.start(0), _rest.end(0)) for _rest in _rest_iter]) diff --git a/src/docformatter/patterns/url.py b/src/docformatter/patterns/url.py new file mode 100644 index 00000000..5a09dd2c --- /dev/null +++ b/src/docformatter/patterns/url.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# +# docformatter.patterns.url.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's URL pattern recognition functions.""" + + +# Standard Library Imports +import contextlib +import re +from typing import List, Tuple + +# docformatter Package Imports +from docformatter.constants import URL_REGEX, URL_SKIP_REGEX + + +def do_find_links(text: str) -> List[Tuple[int, int]]: + r"""Determine if docstring contains any links. + + Parameters + ---------- + text : str + The docstring description to check for link patterns. + + Returns + ------- + url_index : list + A list of tuples with each tuple containing the starting and ending + position of each URL found in the passed description. + """ + _url_iter = re.finditer(URL_REGEX, text) + return [(_url.start(0), _url.end(0)) for _url in _url_iter] + + +def do_skip_link(text: str, index: Tuple[int, int]) -> bool: + """Check if the identified URL is other than a complete link. + + Parameters + ---------- + text : str + The description text containing the link. + index : tuple + The index in the text of the starting and ending position of the + identified link. + + Notes + ----- + Is the identified link simply: + 1. The URL scheme pattern such as 's3://' or 'file://' or 'dns:'. + 2. The beginning of a URL link that has been wrapped by the user. + + Returns + ------- + _do_skip : bool + Whether to skip this link and simpley treat it as a standard text word. + """ + _do_skip = re.search(URL_SKIP_REGEX, text[index[0] : index[1]]) is not None + + with contextlib.suppress(IndexError): + _do_skip = _do_skip or (text[index[0]] == "<" and text[index[1]] != ">") + + return _do_skip From 3e931a174bd9d005f46d3c328e235ab367fc60ae Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Tue, 22 Jul 2025 23:59:02 -0400 Subject: [PATCH 05/21] refactor: add new module with functions for wrapping various elements --- src/docformatter/wrappers/__init__.py | 34 +++++ src/docformatter/wrappers/description.py | 133 +++++++++++++++++ src/docformatter/wrappers/fields.py | 174 +++++++++++++++++++++++ src/docformatter/wrappers/summary.py | 50 +++++++ src/docformatter/wrappers/url.py | 101 +++++++++++++ 5 files changed, 492 insertions(+) create mode 100644 src/docformatter/wrappers/__init__.py create mode 100644 src/docformatter/wrappers/description.py create mode 100644 src/docformatter/wrappers/fields.py create mode 100644 src/docformatter/wrappers/summary.py create mode 100644 src/docformatter/wrappers/url.py diff --git a/src/docformatter/wrappers/__init__.py b/src/docformatter/wrappers/__init__.py new file mode 100644 index 00000000..e3cc0744 --- /dev/null +++ b/src/docformatter/wrappers/__init__.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# +# docformatter.wrappers.__init__.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This is the docformatter wrappers package.""" + + +# docformatter Local Imports +from .description import * # noqa F403 +from .fields import * # noqa F403 +from .summary import * # noqa F403 +from .url import * # noqa F403 diff --git a/src/docformatter/wrappers/description.py b/src/docformatter/wrappers/description.py new file mode 100644 index 00000000..d7ee4aca --- /dev/null +++ b/src/docformatter/wrappers/description.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +# +# docformatter.wrappers.description.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's description wrapper functions.""" + + +# Standard Library Imports +import contextlib +from typing import List + +# docformatter Package Imports +import docformatter.patterns as _patterns +import docformatter.strings as _strings + + +def do_wrap_description( # noqa: PLR0913 + text, + indentation, + wrap_length, + force_wrap, + strict, + rest_sections, + style: str = "sphinx", +): + """Return line-wrapped description text. + + We only wrap simple descriptions. We leave doctests, multi-paragraph text, and + bulleted lists alone. + + Parameters + ---------- + text : str + The unwrapped description text. + indentation : str + The indentation string. + wrap_length : int + The line length at which to wrap long lines. + force_wrap : bool + Whether to force docformatter to wrap long lines when normally they + would remain untouched. + strict : bool + Whether to strictly follow reST syntax to identify lists. + rest_sections : str + A regular expression used to find reST section header adornments. + style : str + The name of the docstring style to use when dealing with parameter + lists (default is sphinx). + + Returns + ------- + description : str + The description wrapped at wrap_length characters. + """ + text = _strings.do_strip_leading_blank_lines(text) + + # TODO: Don't wrap the doctests, but wrap the remainder of the docstring. + # Do not modify docstrings with doctests at all. + if ">>>" in text: + return text + + text = _strings.do_reindent(text, indentation).rstrip() + + # TODO: Don't wrap the code section or the lists, but wrap everything else. + # Ignore possibly complicated cases. + if wrap_length <= 0 or ( + not force_wrap + and ( + _patterns.is_some_sort_of_code(text) + or _patterns.do_find_directives(text) + or _patterns.is_type_of_list(text, strict, style) + ) + ): + return text + + lines = _strings.do_split_description(text, indentation, wrap_length, style) + + return indentation + "\n".join(lines).strip() + + +def do_close_description( + text: str, + text_idx: int, + indentation: str, +) -> List[str]: + """Wrap any description following the last URL or field list. + + Parameters + ---------- + text : str + The docstring text. + text_idx : int + The index of the last URL or field list match. + indentation : str + The indentation string to use with docstrings. + + Returns + ------- + _split_lines : list + The text input split into individual lines. + """ + _split_lines = [] + with contextlib.suppress(IndexError): + _split_lines = ( + text[text_idx + 1 :] if text[text_idx] == "\n" else text[text_idx:] + ).splitlines() + for _idx, _line in enumerate(_split_lines): + if _line not in ["", "\n", f"{indentation}"]: + _split_lines[_idx] = f"{indentation}{_line.strip()}" + + return _split_lines diff --git a/src/docformatter/wrappers/fields.py b/src/docformatter/wrappers/fields.py new file mode 100644 index 00000000..00964aa4 --- /dev/null +++ b/src/docformatter/wrappers/fields.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python +# +# docformatter.wrappers.fields.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's field list wrapper functions.""" + + +# Standard Library Imports +import re +import textwrap +from typing import List, Tuple + +# docformatter Package Imports +import docformatter.strings as _strings +from docformatter.constants import DEFAULT_INDENT + + +def do_wrap_field_lists( # noqa: PLR0913 + text: str, + field_idx: List[Tuple[int, int]], + lines: List[str], + text_idx: int, + indentation: str, + wrap_length: int, +) -> Tuple[List[str], int]: + """Wrap field lists in the long description. + + Parameters + ---------- + text : str + The long description text. + field_idx : list + The list of field list indices found in the description text. + lines : list + The list of formatted lines in the description that come before the + first parameter list item. + text_idx : int + The index in the description of the end of the last parameter list + item. + indentation : str + The string to use to indent each line in the long description. + wrap_length : int + The line length at which to wrap long lines in the description. + + Returns + ------- + lines, text_idx : tuple + A list of the long description lines and the index in the long + description where the last parameter list item ended. + """ + lines.extend( + _strings.description_to_list( + text[text_idx : field_idx[0][0]], + indentation, + wrap_length, + ) + ) + + for _idx, _field_idx in enumerate(field_idx): + _field_name = text[_field_idx[0] : _field_idx[1]] + _field_body = _do_join_field_body( + text, + field_idx, + _idx, + ) + + if len(f"{_field_name}{_field_body}") <= (wrap_length - len(indentation)): + _field = f"{_field_name}{_field_body}" + lines.append(f"{indentation}{_field}") + else: + lines.extend( + _do_wrap_field(_field_name, _field_body, indentation, wrap_length) + ) + + text_idx = _field_idx[1] + + return lines, text_idx + + +def _do_join_field_body(text: str, field_idx: list[tuple[int, int]], idx: int) -> str: + """Join the filed body lines into a single line that can be wrapped. + + Parameters + ---------- + text : str + The docstring long description text that contains field lists. + field_idx : list + The list of tuples containing the found field list start and end position. + idx : int + The index of the tuple in the field_idx list to extract the field body. + + Returns + ------- + _field_body : str + The field body collapsed into a single line. + """ + try: + _field_body = text[field_idx[idx][1] : field_idx[idx + 1][0]].strip() + except IndexError: + _field_body = text[field_idx[idx][1] :].strip() + + _field_body = " ".join( + [_line.strip() for _line in _field_body.splitlines()] + ).strip() + + # Add a space before the field body unless the field body is a link. + if not _field_body.startswith("`") and _field_body: + _field_body = f" {_field_body}" + + # Is there a blank line between field lists? Keep it if so. + if text[field_idx[idx][1] : field_idx[idx][1] + 2] == "\n\n": + _field_body = "\n" + + return _field_body + + +def _do_wrap_field(field_name, field_body, indentation, wrap_length): + """Wrap complete field at wrap_length characters. + + Parameters + ---------- + field_name : str + The name text of the field. + field_body : str + The body text of the field. + indentation : str + The string to use for indentation of the first line in the field. + wrap_length : int + The number of characters at which to wrap the field. + + Returns + ------- + _wrapped_field : str + The field wrapped at wrap_length characters. + """ + if len(indentation) > DEFAULT_INDENT: + _subsequent = indentation + int(0.5 * len(indentation)) * " " + else: + _subsequent = 2 * indentation + + _wrapped_field = textwrap.wrap( + textwrap.dedent(f"{field_name}{field_body}"), + width=wrap_length, + initial_indent=indentation, + subsequent_indent=_subsequent, + ) + + for _idx, _field in enumerate(_wrapped_field): + _indent = indentation if _idx == 0 else _subsequent + _wrapped_field[_idx] = f"{_indent}{re.sub(' +', ' ', _field.strip())}" + + return _wrapped_field diff --git a/src/docformatter/wrappers/summary.py b/src/docformatter/wrappers/summary.py new file mode 100644 index 00000000..fabfda07 --- /dev/null +++ b/src/docformatter/wrappers/summary.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# +# docformatter.wrappers.summary.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's summary wrapper functions.""" + + +# Standard Library Imports +import re +import textwrap + + +def do_unwrap_summary(summary): + """Return summary with newlines removed in preparation for wrapping.""" + return re.sub(r"\s*\n\s*", " ", summary) + + +def do_wrap_summary(summary, initial_indent, subsequent_indent, wrap_length): + """Return line-wrapped summary text.""" + if wrap_length > 0: + return textwrap.fill( + do_unwrap_summary(summary), + width=wrap_length, + initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + ).strip() + else: + return summary diff --git a/src/docformatter/wrappers/url.py b/src/docformatter/wrappers/url.py new file mode 100644 index 00000000..946a4f33 --- /dev/null +++ b/src/docformatter/wrappers/url.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# +# docformatter.wrappers.url.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""This module provides docformatter's URL wrapper functions.""" + + +# Standard Library Imports +import contextlib +from typing import Iterable, List, Tuple + +# docformatter Package Imports +import docformatter.patterns as _patterns +import docformatter.strings as _strings + + +def do_wrap_urls( + text: str, + url_idx: Iterable, + text_idx: int, + indentation: str, + wrap_length: int, +) -> Tuple[List[str], int]: + """Wrap URLs in the long description. + + Parameters + ---------- + text : str + The long description text. + url_idx : list + The list of URL indices found in the description text. + text_idx : int + The index in the description of the end of the last URL. + indentation : str + The string to use to indent each line in the long description. + wrap_length : int + The line length at which to wrap long lines in the description. + + Returns + ------- + _lines, _text_idx : tuple + A list of the long description lines and the index in the long + description where the last URL ended. + """ + _lines = [] + for _url in url_idx: + # Skip URL if it is simply a quoted pattern. + if _patterns.do_skip_link(text, _url): + continue + + # If the text including the URL is longer than the wrap length, + # we need to split the description before the URL, wrap the pre-URL + # text, and add the URL as a separate line. + if len(text[text_idx : _url[1]]) > (wrap_length - len(indentation)): + # Wrap everything in the description before the first URL. + _lines.extend( + _strings.description_to_list( + text[text_idx : _url[0]], + indentation, + wrap_length, + ) + ) + + with contextlib.suppress(IndexError): + if text[_url[0] - len(indentation) - 2] != "\n" and not _lines[-1]: + _lines.pop(-1) + + # Add the URL making sure that the leading quote is kept with a quoted URL. + _text = f"{text[_url[0]: _url[1]]}" + with contextlib.suppress(IndexError): + if _lines[0][-1] == '"': + _lines[0] = _lines[0][:-2] + _text = f'"{text[_url[0] : _url[1]]}' + + _lines.append(f"{_strings.do_clean_excess_whitespace(_text, indentation)}") + + text_idx = _url[1] + + return _lines, text_idx From b9585ad8245c8b238c3d996bc17241e721465fda Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Wed, 23 Jul 2025 00:00:52 -0400 Subject: [PATCH 06/21] refactor: move string manipulation functions from syntax module --- src/docformatter/strings.py | 306 ++++++++++++++++++++++++++++++------ 1 file changed, 255 insertions(+), 51 deletions(-) diff --git a/src/docformatter/strings.py b/src/docformatter/strings.py index f3965457..f6bbf95a 100644 --- a/src/docformatter/strings.py +++ b/src/docformatter/strings.py @@ -1,6 +1,9 @@ #!/usr/bin/env python # +# docformatter.strings.py is part of the docformatter project +# # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -21,28 +24,116 @@ # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -"""This module provides docformatter string functions.""" +"""This module provides docformatter string manipulation functions.""" # Standard Library Imports import contextlib import re -from typing import List, Match, Optional, Union - -# TODO: Read this from the configuration file or command line. -ABBREVIATIONS = ( - "e.g.", - "i.e.", - "et. al.", - "etc.", - "Dr.", - "Mr.", - "Mrs.", - "Ms.", +import textwrap +from typing import Iterable, List, Optional, Tuple, Union + +# docformatter Package Imports +import docformatter.patterns as _patterns +import docformatter.util as _util +import docformatter.wrappers as _wrappers +from docformatter.constants import ( + ABBREVIATIONS, + QUOTE_TYPES, + RAW_QUOTE_TYPES, + UCODE_QUOTE_TYPES, ) -def find_shortest_indentation(lines: List[str]) -> str: +def description_to_list( + description: str, + indentation: str, + wrap_length: int, +) -> List[str]: + """Convert the description to a list of wrap length lines. + + Parameters + ---------- + description : str + The docstring description. + indentation : str + The indentation (number of spaces or tabs) to place in front of each + line. + wrap_length : int + The column to wrap each line at. + + Returns + ------- + _wrapped_lines : list + A list containing each line of the description wrapped at wrap_length. + """ + # This is a description containing only one paragraph. + if len(re.findall(r"\n\n", description)) <= 0: + return textwrap.wrap( + textwrap.dedent(description), + width=wrap_length, + initial_indent=indentation, + subsequent_indent=indentation, + ) + + # This is a description containing multiple paragraphs. + _wrapped_lines = [] + for _line in description.split("\n\n"): + _wrapped_line = textwrap.wrap( + textwrap.dedent(_line), + width=wrap_length, + initial_indent=indentation, + subsequent_indent=indentation, + ) + + if _wrapped_line: + _wrapped_lines.extend(_wrapped_line) + _wrapped_lines.append("") + + with contextlib.suppress(IndexError): + if not _wrapped_lines[-1] and not _wrapped_lines[-2]: + _wrapped_lines.pop(-1) + + if ( + description[-len(indentation) - 1 : -len(indentation)] == "\n" + and description[-len(indentation) - 2 : -len(indentation)] != "\n\n" + ): + _wrapped_lines.pop(-1) + + return _wrapped_lines + + +def do_clean_excess_whitespace(text: str, indentation: str) -> str: + r"""Strip newlines and multiple whitespace from a string. + + This function deals with situations such as: + + `Get\n Cookies.txt str: """Determine the shortest indentation in a list of lines. Parameters @@ -70,32 +161,7 @@ def find_shortest_indentation(lines: List[str]) -> str: return indentation or "" -def is_probably_beginning_of_sentence(line: str) -> Union[Match[str], None, bool]: - """Determine if the line begins a sentence. - - Parameters - ---------- - line : str - The line to be tested. - - Returns - ------- - is_beginning : bool - True if this token is the beginning of a sentence. - """ - # Check heuristically for a parameter list. - for token in ["@", "-", r"\*"]: - if re.search(rf"\s{token}\s", line): - return True - - stripped_line = line.strip() - is_beginning_of_sentence = re.match(r"^[-@\)]", stripped_line) - is_pydoc_ref = re.match(r"^:\w+:", stripped_line) - - return is_beginning_of_sentence and not is_pydoc_ref - - -def normalize_line(line: str, newline: str) -> str: +def do_normalize_line(line: str, newline: str) -> str: """Return line with fixed ending, if ending was present in line. Otherwise, does nothing. @@ -117,15 +183,15 @@ def normalize_line(line: str, newline: str) -> str: return stripped + newline if stripped != line else line -def normalize_line_endings(lines, newline): +def do_normalize_line_endings(lines, newline): """Return fixed line endings. All lines will be modified to use the most common line ending. """ - return "".join([normalize_line(line, newline) for line in lines]) + return "".join([do_normalize_line(line, newline) for line in lines]) -def normalize_summary(summary: str, noncap: Optional[List[str]] = None) -> str: +def do_normalize_summary(summary: str, noncap: Optional[List[str]] = None) -> str: """Return normalized docstring summary. A normalized docstring summary will have the first word capitalized and @@ -173,7 +239,98 @@ def normalize_summary(summary: str, noncap: Optional[List[str]] = None) -> str: return summary -def split_first_sentence(text): +def do_reindent(text, indentation): + """Return reindented text that matches indentation.""" + if "\t" not in indentation: + text = text.expandtabs() + + text = textwrap.dedent(text) + + return ( + "\n".join( + [(indentation + line).rstrip() for line in text.splitlines()] + ).rstrip() + + "\n" + ) + + +def do_split_description( + text: str, + indentation: str, + wrap_length: int, + style: str, +) -> Union[List[str], Iterable]: + """Split the description into a list of lines. + + Parameters + ---------- + text : str + The docstring description. + indentation : str + The indentation (number of spaces or tabs) to place in front of each + line. + wrap_length : int + The column to wrap each line at. + style : str + The docstring style to use for dealing with parameter lists. + + Returns + ------- + _lines : list + A list containing each line of the description with any links put + back together. + """ + _lines: List[str] = [] + _text_idx = 0 + + # Check if the description contains any URLs. + _url_idx = _patterns.do_find_links(text) + + # Check if the description contains any field lists. + _field_idx, _wrap_fields = _patterns.do_find_field_lists( + text, + style, + ) + + # Field list wrapping takes precedence over URL wrapping. + _url_idx = _util.prefer_field_over_url( + _field_idx, + _url_idx, + ) + + if not _url_idx and not (_field_idx and _wrap_fields): + return description_to_list( + text, + indentation, + wrap_length, + ) + + if _url_idx: + _lines, _text_idx = _wrappers.do_wrap_urls( + text, + _url_idx, + 0, + indentation, + wrap_length, + ) + + if _field_idx: + _lines, _text_idx = _wrappers.do_wrap_field_lists( + text, + _field_idx, + _lines, + _text_idx, + indentation, + wrap_length, + ) + else: + # Finally, add everything after the last URL or field list directive. + _lines += _wrappers.do_close_description(text, _text_idx, indentation) + + return _lines + + +def do_split_first_sentence(text): """Split text into first sentence and the rest. Return a tuple (sentence, rest). @@ -212,7 +369,7 @@ def split_first_sentence(text): return sentence, delimiter + rest -def split_summary(lines) -> List[str]: +def do_split_summary(lines) -> List[str]: """Split multi-sentence summary into the first sentence and the rest.""" if not lines or not lines[0].strip(): return lines @@ -221,7 +378,6 @@ def split_summary(lines) -> List[str]: tokens = re.split(r"(\s+)", text) # Keep whitespace for accurate rejoining sentence = [] - rest = [] i = 0 while i < len(tokens): @@ -247,20 +403,20 @@ def split_summary(lines) -> List[str]: return lines -def split_summary_and_description(contents): +def do_split_summary_and_description(contents): """Split docstring into summary and description. Return tuple (summary, description). """ split_lines = contents.rstrip().splitlines() - split_lines = split_summary(split_lines) + split_lines = do_split_summary(split_lines) for index in range(1, len(split_lines)): # Empty line separation would indicate the rest is the description or # symbol on second line probably is a description with a list. if not split_lines[index].strip() or ( index + 1 < len(split_lines) - and is_probably_beginning_of_sentence(split_lines[index + 1]) + and _patterns.is_probably_beginning_of_sentence(split_lines[index + 1]) ): return ( "\n".join(split_lines[:index]).strip(), @@ -268,11 +424,59 @@ def split_summary_and_description(contents): ) # Break on first sentence. - split = split_first_sentence(contents) + split = do_split_first_sentence(contents) if split[0].strip() and split[1].strip(): return ( split[0].strip(), - find_shortest_indentation(split[1].splitlines()[1:]) + split[1].strip(), + do_find_shortest_indentation(split[1].splitlines()[1:]) + split[1].strip(), ) return contents, "" + + +def do_strip_docstring(docstring: str) -> Tuple[str, str]: + """Return contents of docstring and opening quote type. + + Strips the docstring of its triple quotes, trailing white space, + and line returns. Determine the type of docstring quote (either string, + raw, or Unicode) and returns the opening quotes, including the type + identifier, with single quotes replaced by double quotes. + + Parameters + ---------- + docstring: str + The docstring, including the opening and closing triple quotes. + + Returns + ------- + (docstring, open_quote) : tuple + The docstring with the triple quotes removed. + The opening quote type with single quotes replaced by double + quotes. + """ + docstring = docstring.strip() + + for quote in QUOTE_TYPES: + if quote in RAW_QUOTE_TYPES + UCODE_QUOTE_TYPES and ( + docstring.startswith(quote) and docstring.endswith(quote[1:]) + ): + return docstring.split(quote, 1)[1].rsplit(quote[1:], 1)[ + 0 + ].strip(), quote.replace("'", '"') + elif docstring.startswith(quote) and docstring.endswith(quote): + return docstring.split(quote, 1)[1].rsplit(quote, 1)[ + 0 + ].strip(), quote.replace("'", '"') + + raise ValueError( + "docformatter only handles triple-quoted (single or double) strings" + ) + + +def do_strip_leading_blank_lines(text): + """Return text with leading blank lines removed.""" + split = text.splitlines() + + found = next((index for index, line in enumerate(split) if line.strip()), 0) + + return "\n".join(split[found:]) From f2e8b9bc4f7168c0fb96a8e5e59326ed22763056 Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Wed, 23 Jul 2025 00:02:01 -0400 Subject: [PATCH 07/21] refactor: add function to have field lists take precedence over URL strings --- src/docformatter/util.py | 111 ++++++++++++++++++++++++++------------- 1 file changed, 74 insertions(+), 37 deletions(-) diff --git a/src/docformatter/util.py b/src/docformatter/util.py index f397c3bb..008f7cf7 100644 --- a/src/docformatter/util.py +++ b/src/docformatter/util.py @@ -1,6 +1,9 @@ #!/usr/bin/env python # +# docformatter.util.py is part of the docformatter project +# # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -23,10 +26,12 @@ # SOFTWARE. """This module provides docformatter utility functions.""" + # Standard Library Imports import os import re import sysconfig +from typing import List, Tuple unicode = str @@ -37,57 +42,58 @@ def find_py_files(sources, recursive, exclude=None): """Find Python source files. Parameters - - sources: iterable with paths as strings. - - recursive: drill down directories if True. - - exclude: string based on which directories and files are excluded. + ---------- + sources : list + Paths to files and/or directories to search. + recursive : bool + Drill down directories if True. + exclude : list + Which directories and files are excluded. - Return: yields paths to found files. + Returns + ------- + list of files found. """ - def not_hidden(name): - """Return True if file 'name' isn't .hidden.""" - return not name.startswith(".") + def is_hidden(name): + """Return True if file 'name' is .hidden.""" + return os.path.basename(os.path.abspath(name)).startswith(".") - def is_excluded(name, exclude): + def is_excluded(name, excluded): """Return True if file 'name' is excluded.""" return ( - any( - re.search(re.escape(str(e)), name, re.IGNORECASE) - for e in exclude - ) - if exclude + any(re.search(re.escape(str(e)), name, re.IGNORECASE) for e in excluded) + if excluded else False ) - for name in sorted(sources): - if recursive and os.path.isdir(name): - for root, dirs, children in os.walk(unicode(name)): - dirs[:] = [ - d - for d in dirs - if not_hidden(d) and not is_excluded(d, _PYTHON_LIBS) - ] - dirs[:] = sorted( - [d for d in dirs if not is_excluded(d, exclude)] - ) + for _name in sorted(sources): + if recursive and os.path.isdir(_name): + for root, dirs, children in os.walk(unicode(_name)): + if is_excluded(root, exclude): + break + files = sorted( [ - f - for f in children - if not_hidden(f) and not is_excluded(f, exclude) + _file + for _file in children + if not is_hidden(_file) + and not is_excluded(_file, exclude) + and _file.endswith(".py") ] ) for filename in files: - if filename.endswith(".py") and not is_excluded( - root, exclude - ): - yield os.path.join(root, filename) - else: - yield name + yield os.path.join(root, filename) + elif ( + _name.endswith(".py") + and not is_hidden(_name) + and not is_excluded(_name, exclude) + ): + yield _name def has_correct_length(length_range, start, end): - """Determine if the line under test is within desired docstring length. + """Determine if the line under test is within the desired docstring length. This function is used with the --docstring-length min_rows max_rows argument. @@ -104,7 +110,8 @@ def has_correct_length(length_range, start, end): Returns ------- correct_length: bool - True if is correct length or length range is None, else False + True if the docstring has the correct length or length range is None, + otherwise False """ if length_range is None: return True @@ -136,6 +143,36 @@ def is_in_range(line_range, start, end): if line_range is None: return True return any( - line_range[0] <= line_no <= line_range[1] - for line_no in range(start, end + 1) + line_range[0] <= line_no <= line_range[1] for line_no in range(start, end + 1) ) + + +def prefer_field_over_url( + field_idx: List[Tuple[int, int]], + url_idx: List[Tuple[int, int]], +): + """Remove URL indices that overlap with field list indices. + + Parameters + ---------- + field_idx : list + The list of field list index tuples. + url_idx : list + The list of URL index tuples. + + Returns + ------- + url_idx : list + The url_idx list with any tuples that have indices overlapping with field + list indices removed. + """ + if not field_idx: + return url_idx + + nonoverlapping_urls = [] + + any_param_start = min(e[0] for e in field_idx) + for _key, _value in enumerate(url_idx): + if _value[1] < any_param_start: + nonoverlapping_urls.append(_value) + return nonoverlapping_urls From b3b1fef01be81bf5ca25757e2259e391ec84df11 Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Wed, 23 Jul 2025 00:02:49 -0400 Subject: [PATCH 08/21] chore: add copyright info for new maintainer --- src/docformatter/__main__.py | 5 ++++- src/docformatter/__pkginfo__.py | 4 ++++ src/docformatter/encode.py | 4 ++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/docformatter/__main__.py b/src/docformatter/__main__.py index 150e4180..ce7aaea0 100755 --- a/src/docformatter/__main__.py +++ b/src/docformatter/__main__.py @@ -1,6 +1,9 @@ #!/usr/bin/env python # +# docformatter.__main__.py is part of the docformatter project +# # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -135,7 +138,7 @@ def _main(argv, standard_out, standard_error, standard_in): def main(): - """Run main entry point.""" + """Run the main entry point.""" # SIGPIPE is not available on Windows. with contextlib.suppress(AttributeError): # Exit on broken pipe. diff --git a/src/docformatter/__pkginfo__.py b/src/docformatter/__pkginfo__.py index 8a93cec2..a882835e 100644 --- a/src/docformatter/__pkginfo__.py +++ b/src/docformatter/__pkginfo__.py @@ -1,6 +1,9 @@ #!/usr/bin/env python # +# docformatter.patterns.__pkginfo__.py is part of the docformatter project +# # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowlans # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -23,4 +26,5 @@ # SOFTWARE. """Package information for docformatter.""" + __version__ = "1.7.7" diff --git a/src/docformatter/encode.py b/src/docformatter/encode.py index 30e10420..a4a63845 100644 --- a/src/docformatter/encode.py +++ b/src/docformatter/encode.py @@ -1,6 +1,9 @@ #!/usr/bin/env python # +# docformatter.encode.py is part of the docformatter project +# # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -23,6 +26,7 @@ # SOFTWARE. """This module provides docformatter's Encoder class.""" + # Standard Library Imports import collections import locale From b0cefedbc94ccd4f8fa0700b452a9a7bef5b27fc Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Wed, 23 Jul 2025 00:05:58 -0400 Subject: [PATCH 09/21] refactor: rename attributes --- src/docformatter/configuration.py | 71 +++++++++++++------------------ 1 file changed, 30 insertions(+), 41 deletions(-) diff --git a/src/docformatter/configuration.py b/src/docformatter/configuration.py index 1d0ad720..69106365 100644 --- a/src/docformatter/configuration.py +++ b/src/docformatter/configuration.py @@ -1,6 +1,9 @@ #!/usr/bin/env python # +# docformatter.configuration.py is part of the docformatter project +# # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -32,19 +35,13 @@ from configparser import ConfigParser from typing import Dict, List, Union -TOMLLIB_INSTALLED = False -TOMLI_INSTALLED = False with contextlib.suppress(ImportError): if sys.version_info >= (3, 11): # Standard Library Imports import tomllib - - TOMLLIB_INSTALLED = True else: # Third Party Imports - import tomli - - TOMLI_INSTALLED = True + import tomli as tomllib # docformatter Package Imports from docformatter import __pkginfo__ @@ -56,7 +53,7 @@ class Configurater: parser: argparse.ArgumentParser = argparse.ArgumentParser() """Parser object.""" - flargs_dct: Dict[str, Union[bool, float, int, str]] = {} + flargs: Dict[str, Union[bool, float, int, str]] = {} """Dictionary of configuration file arguments.""" configuration_file_lst = [ @@ -101,21 +98,21 @@ def do_parse_arguments(self) -> None: "-i", "--in-place", action="store_true", - default=self.flargs_dct.get("in-place", "false").lower() == "true", + default=self.flargs.get("in-place", "false").lower() == "true", help="make changes to files instead of printing diffs", ) changes.add_argument( "-c", "--check", action="store_true", - default=self.flargs_dct.get("check", "false").lower() == "true", + default=self.flargs.get("check", "false").lower() == "true", help="only check and report incorrectly formatted files", ) self.parser.add_argument( "-d", "--diff", action="store_true", - default=self.flargs_dct.get("diff", "false").lower() == "true", + default=self.flargs.get("diff", "false").lower() == "true", help="when used with `--check` or `--in-place`, also what changes " "would be made", ) @@ -123,14 +120,14 @@ def do_parse_arguments(self) -> None: "-r", "--recursive", action="store_true", - default=self.flargs_dct.get("recursive", "false").lower() == "true", + default=self.flargs.get("recursive", "false").lower() == "true", help="drill down directories recursively", ) self.parser.add_argument( "-e", "--exclude", nargs="*", - default=self.flargs_dct.get("exclude", None), + default=self.flargs.get("exclude", None), help="in recursive mode, exclude directories and files by names", ) self.parser.add_argument( @@ -138,14 +135,14 @@ def do_parse_arguments(self) -> None: "--non-cap", action="store", nargs="*", - default=self.flargs_dct.get("non-cap", None), + default=self.flargs.get("non-cap", None), help="list of words not to capitalize when they appear as the first word " "in the summary", ) self.parser.add_argument( "--black", action="store_true", - default=self.flargs_dct.get("black", "false").lower() == "true", + default=self.flargs.get("black", "false").lower() == "true", help="make formatting compatible with standard black options " "(default: False)", ) @@ -166,7 +163,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "-s", "--style", - default=self.flargs_dct.get("style", "sphinx"), + default=self.flargs.get("style", "sphinx"), help="name of the docstring style to use when formatting " "parameter lists (default: sphinx)", ) @@ -174,14 +171,14 @@ def do_parse_arguments(self) -> None: "--rest-section-adorns", type=str, dest="rest_section_adorns", - default=self.flargs_dct.get( + default=self.flargs.get( "rest_section_adorns", r"[!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~]{4,}" ), help="regex for identifying reST section header adornments", ) self.parser.add_argument( "--wrap-summaries", - default=int(self.flargs_dct.get("wrap-summaries", _default_wrap_summaries)), + default=int(self.flargs.get("wrap-summaries", _default_wrap_summaries)), type=int, metavar="length", help="wrap long summary lines at this length; " @@ -191,7 +188,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "--wrap-descriptions", default=int( - self.flargs_dct.get("wrap-descriptions", _default_wrap_descriptions) + self.flargs.get("wrap-descriptions", _default_wrap_descriptions) ), type=int, metavar="length", @@ -202,7 +199,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "--force-wrap", action="store_true", - default=self.flargs_dct.get("force-wrap", "false").lower() == "true", + default=self.flargs.get("force-wrap", "false").lower() == "true", help="force descriptions to be wrapped even if it may " "result in a mess (default: False)", ) @@ -211,7 +208,7 @@ def do_parse_arguments(self) -> None: type=int, dest="tab_width", metavar="width", - default=int(self.flargs_dct.get("tab-width", 1)), + default=int(self.flargs.get("tab-width", 1)), help="tabs in indentation are this many characters when " "wrapping lines (default: 1)", ) @@ -219,21 +216,20 @@ def do_parse_arguments(self) -> None: "--blank", dest="post_description_blank", action="store_true", - default=self.flargs_dct.get("blank", "false").lower() == "true", + default=self.flargs.get("blank", "false").lower() == "true", help="add blank line after description (default: False)", ) self.parser.add_argument( "--pre-summary-newline", action="store_true", - default=self.flargs_dct.get("pre-summary-newline", "false").lower() - == "true", + default=self.flargs.get("pre-summary-newline", "false").lower() == "true", help="add a newline before the summary of a multi-line docstring " "(default: False)", ) self.parser.add_argument( "--pre-summary-space", action="store_true", - default=self.flargs_dct.get( + default=self.flargs.get( "pre-summary-space", _default_pre_summary_space ).lower() == "true", @@ -242,7 +238,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "--make-summary-multi-line", action="store_true", - default=self.flargs_dct.get("make-summary-multi-line", "false").lower() + default=self.flargs.get("make-summary-multi-line", "false").lower() == "true", help="add a newline before and after the summary of a one-line " "docstring (default: False)", @@ -250,7 +246,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "--close-quotes-on-newline", action="store_true", - default=self.flargs_dct.get("close-quotes-on-newline", "false").lower() + default=self.flargs.get("close-quotes-on-newline", "false").lower() == "true", help="place closing triple quotes on a new-line when a " "one-line docstring wraps to two or more lines " @@ -260,7 +256,7 @@ def do_parse_arguments(self) -> None: "--range", metavar="line", dest="line_range", - default=self.flargs_dct.get("range", None), + default=self.flargs.get("range", None), type=int, nargs=2, help="apply docformatter to docstrings between these " @@ -270,7 +266,7 @@ def do_parse_arguments(self) -> None: "--docstring-length", metavar="length", dest="length_range", - default=self.flargs_dct.get("docstring-length", None), + default=self.flargs.get("docstring-length", None), type=int, nargs=2, help="apply docformatter to docstrings of given length range " @@ -279,7 +275,7 @@ def do_parse_arguments(self) -> None: self.parser.add_argument( "--non-strict", action="store_true", - default=self.flargs_dct.get("non-strict", "false").lower() == "true", + default=self.flargs.get("non-strict", "false").lower() == "true", help="don't strictly follow reST syntax to identify lists (see " "issue #67) (default: False)", ) @@ -329,11 +325,7 @@ def _do_read_configuration_file(self) -> None: fullpath, ext = os.path.splitext(self.config_file) filename = os.path.basename(fullpath) - if ( - ext == ".toml" - and (TOMLI_INSTALLED or TOMLLIB_INSTALLED) - and filename == "pyproject" - ): + if ext == ".toml" and filename == "pyproject": self._do_read_toml_configuration() if (ext == ".cfg" and filename == "setup") or ( @@ -344,14 +336,11 @@ def _do_read_configuration_file(self) -> None: def _do_read_toml_configuration(self) -> None: """Load configuration information from a *.toml file.""" with open(self.config_file, "rb") as f: - if TOMLI_INSTALLED: - config = tomli.load(f) - elif TOMLLIB_INSTALLED: - config = tomllib.load(f) + config = tomllib.load(f) result = config.get("tool", {}).get("docformatter", None) if result is not None: - self.flargs_dct = { + self.flargs = { k: v if isinstance(v, list) else str(v) for k, v in result.items() } @@ -366,7 +355,7 @@ def _do_read_parser_configuration(self) -> None: "docformatter", ]: if _section in config.sections(): - self.flargs_dct = { + self.flargs = { k: v if isinstance(v, list) else str(v) for k, v in config[_section].items() } From 13c63d292b6478b9cb467c48c2995cf030401395 Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Wed, 23 Jul 2025 00:07:01 -0400 Subject: [PATCH 10/21] chore: add integration test mark --- poetry.lock | 246 ++++++++++++++++++++++++++----------------------- pyproject.toml | 10 ++ 2 files changed, 142 insertions(+), 114 deletions(-) diff --git a/poetry.lock b/poetry.lock index 602d2314..c836013e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "alabaster" @@ -141,14 +141,14 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "certifi" -version = "2025.4.26" +version = "2025.6.15" description = "Python package for providing Mozilla's CA Bundle." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" groups = ["dev"] files = [ - {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, - {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, + {file = "certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"}, + {file = "certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b"}, ] [[package]] @@ -341,7 +341,7 @@ description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" groups = ["linting"] -markers = "python_version < \"3.11\"" +markers = "python_version == \"3.9\"" files = [ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, @@ -352,15 +352,15 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "click" -version = "8.2.0" +version = "8.2.1" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.10" groups = ["linting"] -markers = "python_version >= \"3.11\"" +markers = "python_version >= \"3.10\"" files = [ - {file = "click-8.2.0-py3-none-any.whl", hash = "sha256:6b303f0b2aa85f1cb4e5303078fadcbcd4e476f114fab9b5007005711839325c"}, - {file = "click-8.2.0.tar.gz", hash = "sha256:f5452aeddd9988eefa20f90f05ab66f17fce1ee2a36907fd30b05bbb5953814d"}, + {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, + {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, ] [package.dependencies] @@ -452,7 +452,7 @@ description = "cryptography is a package which provides cryptographic recipes an optional = false python-versions = ">=3.7" groups = ["dev"] -markers = "python_version < \"3.11\" and sys_platform == \"linux\"" +markers = "python_version == \"3.9\" and sys_platform == \"linux\"" files = [ {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, @@ -498,63 +498,63 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "cryptography" -version = "44.0.3" +version = "45.0.4" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.7" groups = ["dev"] -markers = "python_version >= \"3.11\" and sys_platform == \"linux\"" -files = [ - {file = "cryptography-44.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:962bc30480a08d133e631e8dfd4783ab71cc9e33d5d7c1e192f0b7c06397bb88"}, - {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffc61e8f3bf5b60346d89cd3d37231019c17a081208dfbbd6e1605ba03fa137"}, - {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58968d331425a6f9eedcee087f77fd3c927c88f55368f43ff7e0a19891f2642c"}, - {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:e28d62e59a4dbd1d22e747f57d4f00c459af22181f0b2f787ea83f5a876d7c76"}, - {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af653022a0c25ef2e3ffb2c673a50e5a0d02fecc41608f4954176f1933b12359"}, - {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:157f1f3b8d941c2bd8f3ffee0af9b049c9665c39d3da9db2dc338feca5e98a43"}, - {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:c6cd67722619e4d55fdb42ead64ed8843d64638e9c07f4011163e46bc512cf01"}, - {file = "cryptography-44.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b424563394c369a804ecbee9b06dfb34997f19d00b3518e39f83a5642618397d"}, - {file = "cryptography-44.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c91fc8e8fd78af553f98bc7f2a1d8db977334e4eea302a4bfd75b9461c2d8904"}, - {file = "cryptography-44.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:25cd194c39fa5a0aa4169125ee27d1172097857b27109a45fadc59653ec06f44"}, - {file = "cryptography-44.0.3-cp37-abi3-win32.whl", hash = "sha256:3be3f649d91cb182c3a6bd336de8b61a0a71965bd13d1a04a0e15b39c3d5809d"}, - {file = "cryptography-44.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:3883076d5c4cc56dbef0b898a74eb6992fdac29a7b9013870b34efe4ddb39a0d"}, - {file = "cryptography-44.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:5639c2b16764c6f76eedf722dbad9a0914960d3489c0cc38694ddf9464f1bb2f"}, - {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3ffef566ac88f75967d7abd852ed5f182da252d23fac11b4766da3957766759"}, - {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:192ed30fac1728f7587c6f4613c29c584abdc565d7417c13904708db10206645"}, - {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7d5fe7195c27c32a64955740b949070f21cba664604291c298518d2e255931d2"}, - {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3f07943aa4d7dad689e3bb1638ddc4944cc5e0921e3c227486daae0e31a05e54"}, - {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb90f60e03d563ca2445099edf605c16ed1d5b15182d21831f58460c48bffb93"}, - {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:ab0b005721cc0039e885ac3503825661bd9810b15d4f374e473f8c89b7d5460c"}, - {file = "cryptography-44.0.3-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:3bb0847e6363c037df8f6ede57d88eaf3410ca2267fb12275370a76f85786a6f"}, - {file = "cryptography-44.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b0cc66c74c797e1db750aaa842ad5b8b78e14805a9b5d1348dc603612d3e3ff5"}, - {file = "cryptography-44.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6866df152b581f9429020320e5eb9794c8780e90f7ccb021940d7f50ee00ae0b"}, - {file = "cryptography-44.0.3-cp39-abi3-win32.whl", hash = "sha256:c138abae3a12a94c75c10499f1cbae81294a6f983b3af066390adee73f433028"}, - {file = "cryptography-44.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:5d186f32e52e66994dce4f766884bcb9c68b8da62d61d9d215bfe5fb56d21334"}, - {file = "cryptography-44.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:cad399780053fb383dc067475135e41c9fe7d901a97dd5d9c5dfb5611afc0d7d"}, - {file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:21a83f6f35b9cc656d71b5de8d519f566df01e660ac2578805ab245ffd8523f8"}, - {file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fc3c9babc1e1faefd62704bb46a69f359a9819eb0292e40df3fb6e3574715cd4"}, - {file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:e909df4053064a97f1e6565153ff8bb389af12c5c8d29c343308760890560aff"}, - {file = "cryptography-44.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dad80b45c22e05b259e33ddd458e9e2ba099c86ccf4e88db7bbab4b747b18d06"}, - {file = "cryptography-44.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:479d92908277bed6e1a1c69b277734a7771c2b78633c224445b5c60a9f4bc1d9"}, - {file = "cryptography-44.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:896530bc9107b226f265effa7ef3f21270f18a2026bc09fed1ebd7b66ddf6375"}, - {file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9b4d4a5dbee05a2c390bf212e78b99434efec37b17a4bff42f50285c5c8c9647"}, - {file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02f55fb4f8b79c1221b0961488eaae21015b69b210e18c386b69de182ebb1259"}, - {file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:dd3db61b8fe5be220eee484a17233287d0be6932d056cf5738225b9c05ef4fff"}, - {file = "cryptography-44.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:978631ec51a6bbc0b7e58f23b68a8ce9e5f09721940933e9c217068388789fe5"}, - {file = "cryptography-44.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:5d20cc348cca3a8aa7312f42ab953a56e15323800ca3ab0706b8cd452a3a056c"}, - {file = "cryptography-44.0.3.tar.gz", hash = "sha256:fe19d8bc5536a91a24a8133328880a41831b6c5df54599a8417b62fe015d3053"}, +markers = "python_version >= \"3.10\" and sys_platform == \"linux\"" +files = [ + {file = "cryptography-45.0.4-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:425a9a6ac2823ee6e46a76a21a4e8342d8fa5c01e08b823c1f19a8b74f096069"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:680806cf63baa0039b920f4976f5f31b10e772de42f16310a6839d9f21a26b0d"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4ca0f52170e821bc8da6fc0cc565b7bb8ff8d90d36b5e9fdd68e8a86bdf72036"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f3fe7a5ae34d5a414957cc7f457e2b92076e72938423ac64d215722f6cf49a9e"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:25eb4d4d3e54595dc8adebc6bbd5623588991d86591a78c2548ffb64797341e2"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce1678a2ccbe696cf3af15a75bb72ee008d7ff183c9228592ede9db467e64f1b"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:49fe9155ab32721b9122975e168a6760d8ce4cffe423bcd7ca269ba41b5dfac1"}, + {file = "cryptography-45.0.4-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:2882338b2a6e0bd337052e8b9007ced85c637da19ef9ecaf437744495c8c2999"}, + {file = "cryptography-45.0.4-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:23b9c3ea30c3ed4db59e7b9619272e94891f8a3a5591d0b656a7582631ccf750"}, + {file = "cryptography-45.0.4-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b0a97c927497e3bc36b33987abb99bf17a9a175a19af38a892dc4bbb844d7ee2"}, + {file = "cryptography-45.0.4-cp311-abi3-win32.whl", hash = "sha256:e00a6c10a5c53979d6242f123c0a97cff9f3abed7f064fc412c36dc521b5f257"}, + {file = "cryptography-45.0.4-cp311-abi3-win_amd64.whl", hash = "sha256:817ee05c6c9f7a69a16200f0c90ab26d23a87701e2a284bd15156783e46dbcc8"}, + {file = "cryptography-45.0.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:964bcc28d867e0f5491a564b7debb3ffdd8717928d315d12e0d7defa9e43b723"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6a5bf57554e80f75a7db3d4b1dacaa2764611ae166ab42ea9a72bcdb5d577637"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:46cf7088bf91bdc9b26f9c55636492c1cce3e7aaf8041bbf0243f5e5325cfb2d"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7bedbe4cc930fa4b100fc845ea1ea5788fcd7ae9562e669989c11618ae8d76ee"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:eaa3e28ea2235b33220b949c5a0d6cf79baa80eab2eb5607ca8ab7525331b9ff"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:7ef2dde4fa9408475038fc9aadfc1fb2676b174e68356359632e980c661ec8f6"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:6a3511ae33f09094185d111160fd192c67aa0a2a8d19b54d36e4c78f651dc5ad"}, + {file = "cryptography-45.0.4-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:06509dc70dd71fa56eaa138336244e2fbaf2ac164fc9b5e66828fccfd2b680d6"}, + {file = "cryptography-45.0.4-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5f31e6b0a5a253f6aa49be67279be4a7e5a4ef259a9f33c69f7d1b1191939872"}, + {file = "cryptography-45.0.4-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:944e9ccf67a9594137f942d5b52c8d238b1b4e46c7a0c2891b7ae6e01e7c80a4"}, + {file = "cryptography-45.0.4-cp37-abi3-win32.whl", hash = "sha256:c22fe01e53dc65edd1945a2e6f0015e887f84ced233acecb64b4daadb32f5c97"}, + {file = "cryptography-45.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:627ba1bc94f6adf0b0a2e35d87020285ead22d9f648c7e75bb64f367375f3b22"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a77c6fb8d76e9c9f99f2f3437c1a4ac287b34eaf40997cfab1e9bd2be175ac39"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7aad98a25ed8ac917fdd8a9c1e706e5a0956e06c498be1f713b61734333a4507"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3530382a43a0e524bc931f187fc69ef4c42828cf7d7f592f7f249f602b5a4ab0"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:6b613164cb8425e2f8db5849ffb84892e523bf6d26deb8f9bb76ae86181fa12b"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:96d4819e25bf3b685199b304a0029ce4a3caf98947ce8a066c9137cc78ad2c58"}, + {file = "cryptography-45.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b97737a3ffbea79eebb062eb0d67d72307195035332501722a9ca86bab9e3ab2"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4828190fb6c4bcb6ebc6331f01fe66ae838bb3bd58e753b59d4b22eb444b996c"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:03dbff8411206713185b8cebe31bc5c0eb544799a50c09035733716b386e61a4"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51dfbd4d26172d31150d84c19bbe06c68ea4b7f11bbc7b3a5e146b367c311349"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:0339a692de47084969500ee455e42c58e449461e0ec845a34a6a9b9bf7df7fb8"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:0cf13c77d710131d33e63626bd55ae7c0efb701ebdc2b3a7952b9b23a0412862"}, + {file = "cryptography-45.0.4-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:bbc505d1dc469ac12a0a064214879eac6294038d6b24ae9f71faae1448a9608d"}, + {file = "cryptography-45.0.4.tar.gz", hash = "sha256:7405ade85c83c37682c8fe65554759800a4a8c54b2d96e0f8ad114d31b808d57"}, ] [package.dependencies] -cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} +cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""} [package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=3.0.0) ; python_version >= \"3.8\""] +docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""] docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] -nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_version >= \"3.8\""] -pep8test = ["check-sdist ; python_version >= \"3.8\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] +nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""] +pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi (>=2024)", "cryptography-vectors (==44.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test = ["certifi (>=2024)", "cryptography-vectors (==45.0.4)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] [[package]] @@ -751,18 +751,18 @@ test = ["portend", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-c [[package]] name = "jaraco-functools" -version = "4.1.0" +version = "4.2.1" description = "Functools like those found in stdlib" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "jaraco.functools-4.1.0-py3-none-any.whl", hash = "sha256:ad159f13428bc4acbf5541ad6dec511f91573b90fba04df61dafa2a1231cf649"}, - {file = "jaraco_functools-4.1.0.tar.gz", hash = "sha256:70f7e0e2ae076498e212562325e805204fc092d7b4c17e0e86c959e249701a9d"}, + {file = "jaraco_functools-4.2.1-py3-none-any.whl", hash = "sha256:590486285803805f4b1f99c60ca9e94ed348d4added84b74c7a12885561e524e"}, + {file = "jaraco_functools-4.2.1.tar.gz", hash = "sha256:be634abfccabce56fa3053f8c7ebe37b682683a4ee7793670ced17bab0087353"}, ] [package.dependencies] -more-itertools = "*" +more_itertools = "*" [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] @@ -1165,19 +1165,19 @@ type = ["mypy (>=1.14.1)"] [[package]] name = "pluggy" -version = "1.5.0" +version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["dev", "testing"] files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, ] [package.extras] dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] +testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "py" @@ -1193,14 +1193,14 @@ files = [ [[package]] name = "pycodestyle" -version = "2.13.0" +version = "2.14.0" description = "Python style guide checker" optional = false python-versions = ">=3.9" groups = ["linting"] files = [ - {file = "pycodestyle-2.13.0-py2.py3-none-any.whl", hash = "sha256:35863c5974a271c7a726ed228a14a4f6daf49df369d8c50cd9a6f58a5e143ba9"}, - {file = "pycodestyle-2.13.0.tar.gz", hash = "sha256:c8415bf09abe81d9c7f872502a6eee881fbe85d8763dd5b9924bb0a01d67efae"}, + {file = "pycodestyle-2.14.0-py2.py3-none-any.whl", hash = "sha256:dd6bf7cb4ee77f8e016f9c8e74a35ddd9f67e1d5fd4184d86c3b98e07099f42d"}, + {file = "pycodestyle-2.14.0.tar.gz", hash = "sha256:c4b5b517d278089ff9d0abdec919cd97262a3367449ea1c8b49b91529167b783"}, ] [[package]] @@ -1218,14 +1218,14 @@ files = [ [[package]] name = "pydantic" -version = "2.11.4" +version = "2.11.7" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" groups = ["linting"] files = [ - {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"}, - {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"}, + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, ] [package.dependencies] @@ -1370,14 +1370,14 @@ toml = ["tomli (>=1.2.3) ; python_version < \"3.11\""] [[package]] name = "pygments" -version = "2.19.1" +version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" groups = ["dev", "linting"] files = [ - {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, - {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, ] [package.extras] @@ -1455,6 +1455,24 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] +[[package]] +name = "pytest-order" +version = "1.3.0" +description = "pytest plugin to run your tests in a specific order" +optional = false +python-versions = ">=3.7" +groups = ["testing"] +files = [ + {file = "pytest_order-1.3.0-py3-none-any.whl", hash = "sha256:2cd562a21380345dd8d5774aa5fd38b7849b6ee7397ca5f6999bbe6e89f07f6e"}, + {file = "pytest_order-1.3.0.tar.gz", hash = "sha256:51608fec3d3ee9c0adaea94daa124a5c4c1d2bb99b00269f098f414307f23dde"}, +] + +[package.dependencies] +pytest = [ + {version = ">=5.0", markers = "python_version < \"3.10\""}, + {version = ">=6.2.4", markers = "python_version >= \"3.10\""}, +] + [[package]] name = "pywin32-ctypes" version = "0.2.3" @@ -1490,19 +1508,19 @@ md = ["cmarkgfm (>=0.8.0)"] [[package]] name = "requests" -version = "2.32.3" +version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, + {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, + {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, ] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" +charset_normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" @@ -1562,14 +1580,14 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rstcheck" -version = "6.2.4" +version = "6.2.5" description = "Checks syntax of reStructuredText and code blocks nested within it" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["linting"] files = [ - {file = "rstcheck-6.2.4-py3-none-any.whl", hash = "sha256:23de2575ba0af1adcddea87a20d69187f0fb9dd8270f59eb98d63461c95375a7"}, - {file = "rstcheck-6.2.4.tar.gz", hash = "sha256:384942563dfbfcc85903a587ecf050447217c46b51e266ed3fe51371bc599015"}, + {file = "rstcheck-6.2.5-py3-none-any.whl", hash = "sha256:09af9555cf05f23651189154066d483ced25d36ebb3f01dc3a5d27524e4a5fdc"}, + {file = "rstcheck-6.2.5.tar.gz", hash = "sha256:122b6d6b953fa1a09d7e7de42ac5d8938da291c6f68351ace6166bb50fc3bd6c"}, ] [package.dependencies] @@ -1578,22 +1596,22 @@ typer = ">=0.12.0" [package.extras] dev = ["rstcheck[docs,sphinx,testing,toml,type-check]", "tox (>=3.15)"] -docs = ["myst-parser (>=3)", "sphinx (>=5.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-click (>=4.0.3)", "sphinx-rtd-theme (>=1.2)", "sphinxcontrib-spelling (>=7.3)"] -sphinx = ["sphinx (>=5.0)"] +docs = ["myst-parser (>=3)", "sphinx (>=6.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-click (>=4.0.3)", "sphinx-rtd-theme (>=1.2)", "sphinxcontrib-spelling (>=7.3)"] +sphinx = ["sphinx (>=6.0)"] testing = ["coverage-conditional-plugin (>=0.5)", "coverage[toml] (>=6.0)", "pytest (>=7.2)", "pytest-cov (>=3.0)", "pytest-randomly (>=3.0)", "pytest-sugar (>=0.9.5)"] toml = ["tomli (>=2.0) ; python_version <= \"3.10\""] type-check = ["mypy (>=1.0)"] [[package]] name = "rstcheck-core" -version = "1.2.1" +version = "1.2.2" description = "Checks syntax of reStructuredText and code blocks nested within it" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["linting"] files = [ - {file = "rstcheck-core-1.2.1.tar.gz", hash = "sha256:9b330020d912e2864f23f332c1a0569463ca3b06b8fee7b7bdd201b055f7f831"}, - {file = "rstcheck_core-1.2.1-py3-none-any.whl", hash = "sha256:1c100de418b6c9e14d9cf6558644d0ab103fdc447f891313882d02df3a3c52ba"}, + {file = "rstcheck_core-1.2.2-py3-none-any.whl", hash = "sha256:2af6be0f91c8bed88f05ae9695331dc0f329ac379e98d469f4ff4536ef95e718"}, + {file = "rstcheck_core-1.2.2.tar.gz", hash = "sha256:9e4842efcc32fe6dbe1767bf1f96225495369c82a2b5e0ed2969082f1ed56c9e"}, ] [package.dependencies] @@ -1602,8 +1620,8 @@ pydantic = ">=2" [package.extras] dev = ["rstcheck-core[docs,sphinx,testing,toml,type-check,yaml]", "tox (>=3.15)"] -docs = ["m2r2 (>=0.3.2)", "sphinx (>=5.0,!=7.2.5)", "sphinx-autobuild (>=2021.3.14)", "sphinx-autodoc-typehints (>=1.15)", "sphinx-rtd-theme (>=1.2)", "sphinxcontrib-apidoc (>=0.3)", "sphinxcontrib-spelling (>=7.3)"] -sphinx = ["sphinx (>=5.0)"] +docs = ["myst-parser (>=3)", "sphinx (>=6.0,!=7.2.5)", "sphinx-autobuild (>=2021.3.14)", "sphinx-autodoc-typehints (>=1.15)", "sphinx-rtd-theme (>=1.2)", "sphinxcontrib-apidoc (>=0.3)", "sphinxcontrib-spelling (>=7.3)"] +sphinx = ["sphinx (>=6.0)"] testing = ["coverage-conditional-plugin (>=0.5)", "coverage[toml] (>=6.0)", "pytest (>=7.2)", "pytest-cov (>=3.0)", "pytest-mock (>=3.7)", "pytest-randomly (>=3.0)", "pytest-sugar (>=0.9.5)"] toml = ["tomli (>=2.0) ; python_version <= \"3.10\""] type-check = ["mypy (>=1.0)", "types-PyYAML (>=6.0.0)", "types-docutils (>=0.18)"] @@ -1866,18 +1884,18 @@ files = [ {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] -markers = {main = "python_version < \"3.11\" and extra == \"tomli\"", dev = "extra == \"tomli\" and python_version < \"3.11\"", linting = "python_version < \"3.11\"", testing = "python_version < \"3.11\""} +markers = {main = "python_version < \"3.11\" and extra == \"tomli\"", dev = "python_version < \"3.11\"", linting = "python_version < \"3.11\"", testing = "python_version < \"3.11\""} [[package]] name = "tomlkit" -version = "0.13.2" +version = "0.13.3" description = "Style preserving TOML library" optional = false python-versions = ">=3.8" groups = ["linting"] files = [ - {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, - {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, + {file = "tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0"}, + {file = "tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1"}, ] [[package]] @@ -1931,14 +1949,14 @@ urllib3 = ">=1.26.0" [[package]] name = "typer" -version = "0.15.3" +version = "0.16.0" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." optional = false python-versions = ">=3.7" groups = ["linting"] files = [ - {file = "typer-0.15.3-py3-none-any.whl", hash = "sha256:c86a65ad77ca531f03de08d1b9cb67cd09ad02ddddf4b34745b5008f43b239bd"}, - {file = "typer-0.15.3.tar.gz", hash = "sha256:818873625d0569653438316567861899f7e9972f2e6e0c16dab608345ced713c"}, + {file = "typer-0.16.0-py3-none-any.whl", hash = "sha256:1f79bed11d4d02d4310e3c1b7ba594183bcedb0ac73b27a9e5f28f6fb5b98855"}, + {file = "typer-0.16.0.tar.gz", hash = "sha256:af377ffaee1dbe37ae9440cb4e8f11686ea5ce4e9bae01b84ae7c63b87f1dd3b"}, ] [package.dependencies] @@ -1949,27 +1967,27 @@ typing-extensions = ">=3.7.4.3" [[package]] name = "typing-extensions" -version = "4.13.2" -description = "Backported and Experimental Type Hints for Python 3.8+" +version = "4.14.0" +description = "Backported and Experimental Type Hints for Python 3.9+" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["dev", "linting", "testing"] files = [ - {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, - {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, + {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, + {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, ] markers = {dev = "python_version < \"3.11\"", testing = "python_version < \"3.11\""} [[package]] name = "typing-inspection" -version = "0.4.0" +version = "0.4.1" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" groups = ["linting"] files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, + {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, + {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, ] [package.dependencies] @@ -1988,14 +2006,14 @@ files = [ [[package]] name = "urllib3" -version = "2.4.0" +version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, - {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] @@ -2116,14 +2134,14 @@ files = [ [[package]] name = "zipp" -version = "3.21.0" +version = "3.23.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, - {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, + {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, + {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, ] [package.extras] @@ -2131,7 +2149,7 @@ check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \" cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] type = ["pytest-mypy"] [extras] @@ -2140,4 +2158,4 @@ tomli = ["tomli"] [metadata] lock-version = "2.1" python-versions = "^3.9" -content-hash = "f64c322b0824ab694a476a9ff492a6b739c5d466acb9e4f7cce96a5bf7cb3069" +content-hash = "c089b933a4b6fe34cea947626ca20700c5718c12054224407332919fcbef9654" diff --git a/pyproject.toml b/pyproject.toml index e277ff3c..539f046d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,7 @@ coverage = {extras = ["toml"], version = "^6.4.0"} mock = "^4.0.0" pytest = "^7.1.0" pytest-cov = "^4.0.0" +pytest-order = "^1.3.0" [tool.poetry.group.linting.dependencies] autopep8 = "^2.0.0" @@ -107,6 +108,7 @@ convention = "pep257" [tool.pytest.ini_options] markers = [ "unit: mark the test as a unit test.", + "integration: mark the test as an integration test.", "system: mark the test as a system test.", ] @@ -216,6 +218,7 @@ deps = mock pytest pytest-cov + pytest-order tomli untokenize setenv = @@ -230,6 +233,13 @@ commands = --cov-config={toxinidir}/pyproject.toml \ --cov-branch \ {toxinidir}/tests/ + pytest -s -x -c {toxinidir}/pyproject.toml \ + -m integration \ + --cache-clear \ + --cov=docformatter \ + --cov-config={toxinidir}/pyproject.toml \ + --cov-branch \ + {toxinidir}/tests/ pytest -s -x -c {toxinidir}/pyproject.toml \ -m system \ --cache-clear \ From 745df980dba8e72f04631b6c2ff19dec02ed5a83 Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Wed, 23 Jul 2025 00:13:23 -0400 Subject: [PATCH 11/21] test: update and add tests for format module --- tests/_data/pyproject.toml | 5 +- tests/_data/string_files/do_format_code.toml | 949 ++++++++++++++- .../string_files/do_format_docstrings.toml | 1082 +++++++++++++++-- .../_data/string_files/format_functions.toml | 333 +++++ tests/_data/string_files/format_methods.toml | 105 ++ tests/conftest.py | 1 + tests/formatter/test_do_format_code.py | 510 ++------ tests/formatter/test_do_format_docstring.py | 494 +++----- tests/formatter/test_format_functions.py | 310 +++++ tests/formatter/test_format_methods.py | 296 +++++ tests/test_syntax_functions.py | 236 ---- 11 files changed, 3223 insertions(+), 1098 deletions(-) create mode 100644 tests/_data/string_files/format_functions.toml create mode 100644 tests/_data/string_files/format_methods.toml create mode 100644 tests/formatter/test_format_functions.py create mode 100644 tests/formatter/test_format_methods.py delete mode 100644 tests/test_syntax_functions.py diff --git a/tests/_data/pyproject.toml b/tests/_data/pyproject.toml index 008373a2..eecede68 100644 --- a/tests/_data/pyproject.toml +++ b/tests/_data/pyproject.toml @@ -1,3 +1,4 @@ [tool.docformatter] -recursive = true -wrap-summaries = 82 +wrap-summaries = 79 +wrap-descriptions = 72 +blank = false diff --git a/tests/_data/string_files/do_format_code.toml b/tests/_data/string_files/do_format_code.toml index 3aef33f7..e72d6928 100644 --- a/tests/_data/string_files/do_format_code.toml +++ b/tests/_data/string_files/do_format_code.toml @@ -1,13 +1,14 @@ [one_line] -instring='''def foo(): +source='''def foo(): """ Hello foo. """''' -outstring='''def foo(): - """Hello foo."""''' +expected='''def foo(): + """Hello foo.""" +''' [module_docstring] -instring='''#!/usr/env/bin python +source='''#!/usr/env/bin python """This is a module docstring. @@ -20,7 +21,7 @@ docstring. this is not."""''' -outstring='''#!/usr/env/bin python +expected='''#!/usr/env/bin python """This is a module docstring. 1. One @@ -33,35 +34,37 @@ is not."""''' [newline_module_variable] -instring=''' +source=''' CONST = 123 """docstring for CONST.""" ''' -outstring=''' +expected=''' CONST = 123 - """docstring for CONST.""" + """Docstring for CONST.""" + ''' [class_docstring] -instring=''' +source=''' class TestClass: """This is a class docstring. :cvar test_int: a class attribute. ..py.method: big_method() """ ''' -outstring=''' +expected=''' class TestClass: """This is a class docstring. :cvar test_int: a class attribute. ..py.method: big_method() """ + ''' [newline_class_variable] -instring=''' +source=''' class TestClass: """This is a class docstring.""" @@ -75,7 +78,7 @@ instring=''' """This is a second class variable docstring.""" ''' -outstring=''' +expected=''' class TestClass: """This is a class docstring.""" @@ -84,10 +87,26 @@ outstring=''' test_var2 = 1 """This is a second class variable docstring.""" + +''' + +[class_attribute_wrap] +source='''class TestClass: + """This is a class docstring.""" + + test_int = 1 + """This is a very, very, very long docstring that should really be reformatted nicely by docformatter."""''' +expected='''class TestClass: + """This is a class docstring.""" + + test_int = 1 + """This is a very, very, very long docstring that should really be + reformatted nicely by docformatter.""" + ''' [newline_outside_docstring] -instring=''' +source=''' def new_function(): """Description of function.""" found = next( @@ -96,7 +115,7 @@ def new_function(): return "\n".join(split[found:]) ''' -outstring=''' +expected=''' def new_function(): """Description of function.""" found = next( @@ -107,63 +126,525 @@ def new_function(): ''' [preserve_line_ending] -instring=''' +source=''' def foo():\r """\r - Hello\r - foo. This is a docstring.\r + Hello foo. This is a docstring.\r """\r ''' -outstring='''def foo():\r +expected='''def foo():\r """\r - Hello\r - foo. This is a docstring.\r + Hello foo. + + This is a docstring.\r """\r ''' +[non_docstring] +source='''x = """This +is +not a +docstring."""''' +expected='''x = """This +is +not a +docstring."""''' + +[tabbed_indentation] +source='''def foo(): + """ + Hello foo. + """ + if True: + x = 1''' +expected='''def foo(): + """Hello foo.""" + if True: + x = 1''' + +[mixed_indentation] +source='''def foo(): + """ + Hello foo. + """ + if True: + x = 1''' +expected='''def foo(): + """Hello foo.""" + if True: + x = 1''' + +[escaped_newlines] +source='''def foo(): + """ + Hello foo. + """ + x = \ + 1''' +expected='''def foo(): + """Hello foo.""" + x =\ + 1''' +# Python 3.13+ seems to handle this differently. +expected313='''def foo(): + """Hello foo.""" + x = \ + 1''' + +[code_comments] +source='''def foo(): + """ + Hello foo. + """ + # My comment + # My comment with escape \ + 123''' +expected='''def foo(): + """Hello foo.""" + # My comment + # My comment with escape \ + 123''' + +[inline_comment] +source='''def foo(): + """ + Hello foo. + """ + def test_method_no_chr_92(): the501(92) # \''' +expected='''def foo(): + """Hello foo.""" + + def test_method_no_chr_92(): the501(92) # \''' + +[raw_lowercase] +source='''def foo(): + r""" + Hello raw foo. + """''' +expected='''def foo(): + r"""Hello raw foo.""" +''' + +[raw_uppercase] +source='''def foo(): + R""" + Hello Raw foo. + """''' +expected='''def foo(): + R"""Hello Raw foo.""" +''' + +[raw_lowercase_single] +source="""def foo(): + r''' + Hello raw foo. + '''""" +expected='''def foo(): + r"""Hello raw foo.""" +''' + +[raw_uppercase_single] +source="""def foo(): + R''' + Hello Raw foo. + '''""" +expected='''def foo(): + R"""Hello Raw foo.""" +''' + +[unicode_lowercase] +source='''def foo(): + u""" + Hello unicode foo. + """''' +expected='''def foo(): + u"""Hello unicode foo.""" +''' + + +[unicode_uppercase] +source='''def foo(): + U""" + Hello Unicode foo. + """''' +expected='''def foo(): + U"""Hello Unicode foo.""" +''' + +[unicode_lowercase_single] +source="""def foo(): + u''' + Hello unicode foo. + '''""" +expected='''def foo(): + u"""Hello unicode foo.""" +''' + +[unicode_uppercase_single] +source="""def foo(): + U''' + Hello Unicode foo. + '''""" +expected='''def foo(): + U"""Hello Unicode foo.""" +''' + +[nested_triple] +source="""def foo(): + '''Hello foo. \"\"\"abc\"\"\" + '''""" +expected="""def foo(): + '''Hello foo. \"\"\"abc\"\"\" + ''' +""" + +[multiple_sentences] +source='''def foo(): + """ + Hello foo. + This is a docstring. + """''' +expected='''def foo(): + """Hello foo. + + This is a docstring. + """ +''' + +[multiple_sentences_same_line] +source='''def foo(): + """ + Hello foo. This is a docstring. + """''' +expected='''def foo(): + """Hello foo. + + This is a docstring. + """ +''' + +[multiline_summary] +source='''def foo(): + """ + Hello + foo. This is a docstring. + """''' +expected='''def foo(): + """Hello foo. + + This is a docstring. + """ +''' + +[empty_lines] +source='''def foo(): + """ + Hello + foo and this is a docstring. + + More stuff. + """''' +expected='''def foo(): + """Hello foo and this is a docstring. + + More stuff. + """ +''' + +[class_empty_lines] +source='''class Foo: + """ + Hello + foo and this is a docstring. + + More stuff. + """''' +expected='''class Foo: + """Hello foo and this is a docstring. + + More stuff. + """ + +''' + +[class_empty_lines_2] +source='''def foo(): + class Foo: + + """Summary.""" + pass''' +expected='''def foo(): + class Foo: + """Summary.""" + + pass''' + +[method_empty_lines] +source='''class Foo: + def foo(self): + + + """Summary.""" + pass''' +expected='''class Foo: + def foo(self): + """Summary.""" + pass''' + +[trailing_whitespace] +source='''def foo(): + """ + Hello + foo and this is a docstring. + + More stuff. + """''' +expected='''def foo(): + """Hello foo and this is a docstring. + + More stuff. + """ +''' + +[parameter_list] +source='''def foo(): + """Test + one - first + two - second + """''' +expected='''def foo(): + """Test. + + one - first + two - second + """ +''' + +[single_quote] +source="""def foo(): + 'Just a regular string' +""" +expected="""def foo(): + 'Just a regular string' +""" + +[double_quote] +source="""def foo(): + "Just a regular string" +""" +expected="""def foo(): + "Just a regular string" +""" + +[nested_triple_quote] +source='''def foo(): + 'Just a """foo""" string' +''' +expected='''def foo(): + 'Just a """foo""" string' +''' + +[first_line_assignment] +source='''def foo(): + x = """Just a regular string. Alpha.""" +''' +expected='''def foo(): + x = """Just a regular string. Alpha.""" +''' + +[regular_strings] +source='''def foo(): + """ + Hello + foo and this is a docstring. + + More stuff. + """ + x = """My non-docstring + This should not be touched.""" + + """More stuff + that should not be + touched """''' +expected='''def foo(): + """Hello foo and this is a docstring. + + More stuff. + """ + x = """My non-docstring + This should not be touched.""" + + """More stuff + that should not be + touched """''' + +[syntax_error] +source='''""" +''' +expected='''""" +''' + +[slash_r] +source='''"""\r''' +expected='''"""\r''' + +[slash_r_slash_n] +source='''"""\r\n''' +expected='''"""\r\n''' + +[strip_blank_lines] +source=''' + class TestClass: + + """This is a class docstring.""" + + class_attribute = 1 + + def test_method_1(self): + """This is a method docstring. + + With no blank line after it. + """ + pass + + def test_method_2(self): + + """This is a method docstring. + + With a long description followed by multiple blank lines. + """ + + + pass''' +expected=''' + class TestClass: + """This is a class docstring.""" + + class_attribute = 1 + + def test_method_1(self): + """This is a method docstring. + + With no blank line after it. + """ + pass + + def test_method_2(self): + """This is a method docstring. + + With a long description followed by multiple blank lines. + """ + pass''' + +[range_miss] +source=''' + def f(x): + """ This is a docstring. That should be on more lines""" + pass + def g(x): + """ Badly indented docstring""" + pass''' +expected=''' + def f(x): + """ This is a docstring. That should be on more lines""" + pass + def g(x): + """ Badly indented docstring""" + pass''' + +[range_hit] +source=''' +def f(x): + """ This is a docstring. That should be on more lines""" + pass +def g(x): + """ Badly indented docstring""" + pass''' +expected=''' +def f(x): + """This is a docstring. + + That should be on more lines + """ + pass +def g(x): + """ Badly indented docstring""" + pass''' + +[length_ignore] +source=''' +def f(x): + """This is a docstring. + + + That should be on less lines + """ + pass +def g(x): + """ Badly indented docstring""" + pass''' +expected=''' +def f(x): + """This is a docstring. + + + That should be on less lines + """ + pass +def g(x): + """Badly indented docstring.""" + pass''' [issue_51] -instring='''def my_func(): +source='''def my_func(): -"""Summary of my function.""" -pass''' -outstring='''def my_func(): -"""Summary of my function.""" -pass''' + """Summary of my function.""" + pass''' +expected='''def my_func(): + """Summary of my function.""" + pass''' [issue_51_2] -instring=''' +source=''' def crash_rocket(location): # pragma: no cover """This is a docstring following an in-line comment.""" return location''' -outstring=''' +expected=''' def crash_rocket(location): # pragma: no cover """This is a docstring following an in-line comment.""" return location''' +[issue_79] +source='''def function2(): + """Hello yeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeet -v."""''' +expected='''def function2(): + """Hello yeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeet + -v.""" +''' + [issue_97] -instring='''def pytest_addoption(parser: pytest.Parser) -> " +source='''def pytest_addoption(parser: pytest.Parser) -> " None: register_toggle.pytest_addoption(parser) ''' -outstring='''def pytest_addoption(parser: pytest.Parser) -> " +expected='''def pytest_addoption(parser: pytest.Parser) -> " None: register_toggle.pytest_addoption(parser) ''' [issue_97_2] -instring='''def pytest_addoption(parser: pytest.Parser) -> +source='''def pytest_addoption(parser: pytest.Parser) -> None: # pragma: no cover register_toggle.pytest_addoption(parser) ''' -outstring='''def pytest_addoption(parser: pytest.Parser) -> +expected='''def pytest_addoption(parser: pytest.Parser) -> None: # pragma: no cover register_toggle.pytest_addoption(parser) ''' [issue_130] -instring=''' +source=''' class TestClass: """This is a class docstring.""" @@ -178,7 +659,7 @@ class TestClass: pass ''' -outstring=''' +expected=''' class TestClass: """This is a class docstring.""" @@ -191,7 +672,7 @@ class TestClass: ''' [issue_139] -instring=''' +source=''' class TestClass: """This is a class docstring. @@ -199,29 +680,30 @@ class TestClass: ..py.method: big_method() """ ''' -outstring=''' +expected=''' class TestClass: """This is a class docstring. :cvar test_int: a class attribute. ..py.method: big_method() """ + ''' [issue_139_2] -instring=""" +source=""" class TestClass: variable = 1 """ -outstring=""" +expected=""" class TestClass: variable = 1 """ [issue_156] -instring=''' +source=''' def test_wps3_process_step_io_data_or_href(): """Validates that \'data\' literal values and \'href\' file references are both handled as input for workflow steps corresponding to a WPS-3 process.""" @@ -231,7 +713,7 @@ def test_wps3_process_step_io_data_or_href(): method = method.upper() ''' -outstring=''' +expected=''' def test_wps3_process_step_io_data_or_href(): """Validates that \'data\' literal values and \'href\' file references are both handled as input for workflow steps corresponding to a WPS-3 @@ -243,8 +725,44 @@ def test_wps3_process_step_io_data_or_href(): method = method.upper() ''' +[issue_156_2] +source='''class AcceptHeader(ExtendedSchemaNode): + # ok to use name in this case because target key in the mapping must + # be that specific value but cannot have a field named with this format + name = "Accept" + schema_type = String + missing = drop + default = ContentType.APP_JSON # defaults to JSON for easy use within browsers + + + class AcceptLanguageHeader(ExtendedSchemaNode): + # ok to use name in this case because target key in the mapping must + # be that specific value but cannot have a field named with this format + name = "Accept-Language" + schema_type = String + missing = drop + default = AcceptLanguage.EN_CA + # FIXME: oneOf validator for supported languages (?)''' +expected='''class AcceptHeader(ExtendedSchemaNode): + # ok to use name in this case because target key in the mapping must + # be that specific value but cannot have a field named with this format + name = "Accept" + schema_type = String + missing = drop + default = ContentType.APP_JSON # defaults to JSON for easy use within browsers + + + class AcceptLanguageHeader(ExtendedSchemaNode): + # ok to use name in this case because target key in the mapping must + # be that specific value but cannot have a field named with this format + name = "Accept-Language" + schema_type = String + missing = drop + default = AcceptLanguage.EN_CA + # FIXME: oneOf validator for supported languages (?)''' + [issue_156_173] -instring=''' +source=''' class Foo: @abstractmethod @@ -258,7 +776,7 @@ class Foo: """This is a second description.""" ''' -outstring=''' +expected=''' class Foo: @abstractmethod @@ -268,23 +786,358 @@ class Foo: @abstractmethod def baz(self): """This is a second description.""" +''' + +[issue_157_7] +source='''def hanging_rest_link(): + """ + `Source of this snippet + `_. + """ + +def sub_func_test(): + + def long_line_link(): + """Get the Python type of a Click parameter. + + See the list of `custom types provided by Click + `_. + """''' +expected='''def hanging_rest_link(): + """ + `Source of this snippet + `_. + """ + + +def sub_func_test(): + + def long_line_link(): + """Get the Python type of a Click parameter. + + See the list of + `custom types provided by Click `_. + """ +''' + +[issue_157_8] +source='''def mixed_links(): + """Implements the minimal code necessary to locate and call the ``mpm`` CLI on the system. + + Once ``mpm`` is located, we can rely on it to produce the main output of the plugin. + + The output must supports both `Xbar dialect + `_ + and `SwiftBar dialect `_. + """ + +XKCD_MANAGER_ORDER = ("pip", "brew", "npm", "dnf", "apt", "steamcmd") +"""Sequence of package managers as defined by `XKCD #1654: Universal Install Script +`_. + +See the corresponding :issue:`implementation rationale in issue #10 <10>`. +""" + +HASH_HEADERS = ( + "Date", + "From", + "To", +) +""" +Default ordered list of headers to use to compute the unique hash of a mail. + +By default we choose to exclude: + +``Cc`` + Since ``mailman`` apparently `sometimes trims list members + `_ + from the ``Cc`` header to avoid sending duplicates. Which means that copies of mail + reflected back from the list server will have a different ``Cc`` to the copy saved by + the MUA at send-time. + +``Bcc`` + Because copies of the mail saved by the MUA at send-time will have ``Bcc``, but copies + reflected back from the list server won't. + +``Reply-To`` + Since a mail could be ``Cc``'d to two lists with different ``Reply-To`` munging + options set. +"""''' +expected='''def mixed_links(): + """Implements the minimal code necessary to locate and call the ``mpm`` CLI + on the system. + + Once ``mpm`` is located, we can rely on it to produce the main output of the plugin. + + The output must supports both `Xbar dialect + `_ + and `SwiftBar dialect `_. + """ + +XKCD_MANAGER_ORDER = ("pip", "brew", "npm", "dnf", "apt", "steamcmd") +"""Sequence of package managers as defined by `XKCD #1654: Universal Install Script +`_. + +See the corresponding :issue:`implementation rationale in issue #10 <10>`. +""" + +HASH_HEADERS = ( + "Date", + "From", + "To", +) +"""Default ordered list of headers to use to compute the unique hash of a mail. + +By default we choose to exclude: + +``Cc`` + Since ``mailman`` apparently `sometimes trims list members + `_ + from the ``Cc`` header to avoid sending duplicates. Which means that copies of mail + reflected back from the list server will have a different ``Cc`` to the copy saved by + the MUA at send-time. + +``Bcc`` + Because copies of the mail saved by the MUA at send-time will have ``Bcc``, but copies + reflected back from the list server won't. + +``Reply-To`` + Since a mail could be ``Cc``'d to two lists with different ``Reply-To`` munging + options set. +""" +''' + +[issue_157_9] +source='''def load_conf(): + """Fetch parameters values from configuration file and merge them with the + defaults. + + User configuration is `merged to the context default_map as Click does + `_. + + This allow user's config to only overrides defaults. Values sets from direct + command line parameters, environment variables or interactive prompts, takes + precedence over any values from the config file. + """ + + +strict_selection_match = False +""" +Install sub-command try each user-selected manager until it find one providing +the package we seek to install, after which the process stop. This mean not all +managers will be called, so we allow the CLI output checks to partially match. +""" + + +platforms = {"LINUX", "MACOS", "WSL2"} +"""Homebrew core is now compatible with `Linux and Windows Subsystem for Linux +(WSL) 2 `_. +"""''' +expected='''def load_conf(): + """Fetch parameters values from configuration file and merge them with the + defaults. + + User configuration is + `merged to the context default_map as Click does `_. + + This allow user's config to only overrides defaults. Values sets from direct + command line parameters, environment variables or interactive prompts, takes + precedence over any values from the config file. + """ + +strict_selection_match = False +"""Install sub-command try each user-selected manager until it find one +providing the package we seek to install, after which the process stop. + +This mean not all managers will be called, so we allow the CLI output checks to +partially match. +""" +platforms = {"LINUX", "MACOS", "WSL2"} +"""Homebrew core is now compatible with `Linux and Windows Subsystem for Linux +(WSL) 2 `_. +""" ''' +[issue_157_10] +source='''"""Patch and tweak `Python's standard library mail box constructors. + +`_ to set sane defaults. + +Also forces out our own message factories to add deduplication tools and utilities. +""" + + +"""Patch and tweak `Python's standard library mail box constructors +`_ to set sane defaults. + +Also forces out our own message factories to add deduplication tools and utilities. +""" + + +def generate_platforms_graph( + graph_id: str, description: str, groups: frozenset +) -> str: + """Generates an `Euler diagram `_ of platform and their + grouping. + + Euler diagrams are + `not supported by mermaid yet `_ + so we fallback on a flowchart + without arrows. + + Returns a ready to use and properly indented MyST block. + """ + + +def load_conf(self, ctx, param, path_pattern): + """Fetch parameters values from configuration file and merge them with the + defaults. + + User configuration is `merged to the context default_map as Click does + `_. + + + This allow user's config to only overrides defaults. Values sets from direct + command line parameters, environment variables or interactive prompts, takes + precedence over any values from the config file. + """ + + +def pytest_addoption(parser): + """Add custom command line options. + + Based on `Pytest's documentation examples + `_. + + By default, runs non-destructive tests and skips destructive ones. + """''' +expected='''"""Patch and tweak `Python's standard library mail box constructors. + +`_ to set sane defaults. + +Also forces out our own message factories to add deduplication tools and utilities. +""" + +"""Patch and tweak `Python's standard library mail box constructors +`_ to set sane defaults. + +Also forces out our own message factories to add deduplication tools and utilities. +""" + + +def generate_platforms_graph( + graph_id: str, description: str, groups: frozenset +) -> str: + """Generates an `Euler diagram `_ of platform and their + grouping. + + Euler diagrams are + `not supported by mermaid yet `_ + so we fallback on a flowchart + without arrows. + + Returns a ready to use and properly indented MyST block. + """ + + +def load_conf(self, ctx, param, path_pattern): + """Fetch parameters values from configuration file and merge them with the + defaults. + + User configuration is + `merged to the context default_map as Click does `_. + + + This allow user's config to only overrides defaults. Values sets from direct + command line parameters, environment variables or interactive prompts, takes + precedence over any values from the config file. + """ + + +def pytest_addoption(parser): + """Add custom command line options. + + Based on `Pytest's documentation examples + `_. + + By default, runs non-destructive tests and skips destructive ones. + """ +''' + +[issue_176] +source='''def Class1: + """Class.""" #noqa + + attribute = a + """Attr.""" + + +def Class2: + """Class.""" + + attribute = a + """Attr.""" + + +def Class3: + """Class docstring. + + With long description. + """ #noqa + + attribute = a + """Attr."""''' +expected='''def Class1: + """Class.""" #noqa + + attribute = a + """Attr.""" + + +def Class2: + """Class.""" + attribute = a + """Attr.""" + + +def Class3: + """Class docstring. + + With long description. + """ #noqa + + attribute = a + """Attr.""" +''' + +[issue_176_black] +source='''class C: + """Class.""" #noqa + + attr: int + """Attr."""''' +expected='''class C: + """Class.""" #noqa + + attr: int + """Attr."""''' + [issue_187] -instring=''' +source=''' #!/usr/bin/env python """a.py""" ''' -outstring=''' +expected=''' #!/usr/bin/env python """a.py.""" + ''' [issue_203] -instring=''' +source=''' #!/usr/bin/env python import os @@ -293,7 +1146,7 @@ from typing import Iterator """Don't remove this comment, it's cool.""" IMPORTANT_CONSTANT = "potato" ''' -outstring=''' +expected=''' #!/usr/bin/env python import os @@ -304,10 +1157,10 @@ IMPORTANT_CONSTANT = "potato" ''' [issue_243] -instring='''def foo(bar): +source='''def foo(bar): """Return `foo` using `bar`. Description.""" ''' -outstring='''def foo(bar): +expected='''def foo(bar): """Return `foo` using `bar`. Description. diff --git a/tests/_data/string_files/do_format_docstrings.toml b/tests/_data/string_files/do_format_docstrings.toml index 5cc17359..17345cdf 100644 --- a/tests/_data/string_files/do_format_docstrings.toml +++ b/tests/_data/string_files/do_format_docstrings.toml @@ -1,25 +1,25 @@ [one_line] -instring='''""" +source='''""" Hello. """''' -outstring='''"""Hello."""''' +expected='''"""Hello."""''' [summary_end_quote] -instring='''""" +source='''""" "Hello" """''' -outstring='''""""Hello"."""''' +expected='''""""Hello"."""''' [bad_indentation] -instring='''"""Hello. +source='''"""Hello. This should be indented but it is not. The next line should be indented too. And this too. """''' -outstring='''"""Hello. +expected='''"""Hello. This should be indented but it is not. The next line should be indented too. @@ -27,7 +27,7 @@ outstring='''"""Hello. """''' [too_much_indentation] -instring='''"""Hello. +source='''"""Hello. This should be dedented. @@ -36,7 +36,7 @@ instring='''"""Hello. 3. And this. """''' -outstring='''"""Hello. +expected='''"""Hello. This should be dedented. @@ -46,13 +46,13 @@ outstring='''"""Hello. """''' [trailing_whitespace] -instring='''"""Hello. - - This should be not have trailing whitespace. The - next line should not have trailing whitespace either. - +source='''"""Hello. + + This should be not have trailing whitespace. The + next line should not have trailing whitespace either. + """''' -outstring='''"""Hello. +expected='''"""Hello. This should be not have trailing whitespace. The next line should not have trailing whitespace @@ -60,88 +60,537 @@ outstring='''"""Hello. """''' [empty_docstring] -instring='''""""""''' -outstring='''""""""''' +source='''""""""''' +expected='''""""""''' [no_summary_period] -instring='''""" +source='''""" Hello """''' -outstring='''"""Hello."""''' +expected='''"""Hello."""''' [single_quotes] -instring="""''' +source="""''' Hello. '''""" -outstring='''"""Hello."""''' +expected='''"""Hello."""''' [single_quotes_multiline] -instring="""''' +source="""''' Return x factorial. This uses math.factorial. '''""" -outstring='''"""Return x factorial. +expected='''"""Return x factorial. This uses math.factorial. """''' [skip_underlined_summary] -instring='''""" +source='''""" Foo bar ------- This is more. """''' -outstring='''""" +expected='''""" Foo bar ------- This is more. + """''' + +[no_blank] +source='''""" + +Hello. + + Description. + + + """''' +expected='''"""Hello. + + Description. + """''' + +[presummary_newline] +source='''""" + +Hello. + + Description. + + + """''' +expected='''""" + Hello. + + Description. + """''' + +[summary_multiline] +source='''"""This one-line docstring will be multi-line"""''' +expected='''""" + This one-line docstring will be multi-line. + """''' + +[presummary_space] +source='''"""This one-line docstring will have a leading space."""''' +expected='''""" This one-line docstring will have a leading space."""''' + +# Examples to test when passing --black to docformatter. +[quote_no_space_black] +source='''""" This one-line docstring will not have a leading space."""''' +expected='''"""This one-line docstring will not have a leading space."""''' + +[quote_space_black] +source='''""""This" quote starting one-line docstring will have a leading space."""''' +expected='''""" "This" quote starting one-line docstring will have a leading space."""''' + +[quote_space_multiline_black] +source='''""""This" quote starting one-line docstring will have a leading space. + +This long description will be wrapped at 88 characters because we passed the --black option and 88 characters is the default wrap length. +"""''' +expected='''""" "This" quote starting one-line docstring will have a leading space. + + This long description will be wrapped at 88 characters because we + passed the --black option and 88 characters is the default wrap + length. + """''' + +# Examples to test with Epytext style docstrings. +[epytext] +source='''"""Return line-wrapped description text. + +We only wrap simple descriptions. We leave doctests, multi-paragraph text, +and bulleted lists alone. See http://www.docformatter.com/. + +@param text: the text argument. +@param indentation: the super long description for the indentation argument that will require docformatter to wrap this line. +@param wrap_length: the wrap_length argument +@param force_wrap: the force_warp argument. +@return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. +"""''' +expected='''"""Return line-wrapped description text. + + We only wrap simple descriptions. We leave doctests, multi-paragraph text, and + bulleted lists alone. See + http://www.docformatter.com/. + + @param text: the text argument. + @param indentation: the super long description for the indentation argument that + will require docformatter to wrap this line. + @param wrap_length: the wrap_length argument + @param force_wrap: the force_warp argument. + @return: really long description text wrapped at n characters and a very long + description of the return value so we can wrap this line abcd efgh ijkl mnop + qrst uvwx yz. + """''' + +[epytext_numpy] +source='''"""Return line-wrapped description text. + + We only wrap simple descriptions. We leave doctests, multi-paragraph text, + and bulleted lists alone. See http://www.docformatter.com/. + + @param text: the text argument. + @param indentation: the super long description for the indentation argument that will require docformatter to wrap this line. + @param wrap_length: the wrap_length argument + @param force_wrap: the force_warp argument. + @return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. +"""''' +expected='''"""Return line-wrapped description text. + + We only wrap simple descriptions. We leave doctests, multi-paragraph text, and + bulleted lists alone. See + http://www.docformatter.com/. + + @param text: the text argument. + @param indentation: the super long description for the indentation argument that will require docformatter to wrap this line. + @param wrap_length: the wrap_length argument + @param force_wrap: the force_warp argument. + @return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. + """''' + +# Examples to test with Sphinx style docstrings. +[sphinx] +source='''"""Return line-wrapped description text. + + We only wrap simple descriptions. We leave doctests, multi-paragraph text, and bulleted lists alone. See http://www.docformatter.com/. + + :param str text: the text argument. + :param str indentation: the super long description for the indentation argument that will require docformatter to wrap this line. + :param int wrap_length: the wrap_length argument + :param bool force_wrap: the force_warp argument. + :return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. + :rtype: str +"""''' +expected='''"""Return line-wrapped description text. + + We only wrap simple descriptions. We leave doctests, multi-paragraph text, and + bulleted lists alone. See + http://www.docformatter.com/. + + :param str text: the text argument. + :param str indentation: the super long description for the indentation argument that + will require docformatter to wrap this line. + :param int wrap_length: the wrap_length argument + :param bool force_wrap: the force_warp argument. + :return: really long description text wrapped at n characters and a very long + description of the return value so we can wrap this line abcd efgh ijkl mnop + qrst uvwx yz. + :rtype: str + """''' + +[sphinx_numpy] +source='''"""Return line-wrapped description text. + + We only wrap simple descriptions. We leave doctests, multi-paragraph text, and bulleted lists alone. See http://www.docformatter.com/. + + :param str text: the text argument. + :param str indentation: the super long description for the indentation argument that will require docformatter to wrap this line. + :param int wrap_length: the wrap_length argument + :param bool force_wrap: the force_warp argument. + :return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. + :rtype: str +"""''' +expected='''"""Return line-wrapped description text. + + We only wrap simple descriptions. We leave doctests, multi-paragraph text, and + bulleted lists alone. See + http://www.docformatter.com/. + + :param str text: the text argument. + :param str indentation: the super long description for the indentation argument that will require docformatter to wrap this line. + :param int wrap_length: the wrap_length argument + :param bool force_wrap: the force_warp argument. + :return: really long description text wrapped at n characters and a very long description of the return value so we can wrap this line abcd efgh ijkl mnop qrst uvwx yz. + :rtype: str + """''' + +# Examples to test when formatting lists. +[numbered_list] +source='''"""Hello. + + 1. This should be indented but it is not. The + next line should be indented too. But + this is okay. + """''' +expected='''"""Hello. + + 1. This should be indented but it is not. The + next line should be indented too. But + this is okay. + """''' + +[parameter_dash] +source='''"""Hello. + + foo - This is a foo. This is a foo. This is a foo. This is a foo. This is. + bar - This is a bar. This is a bar. This is a bar. This is a bar. This is. + """''' +expected='''"""Hello. + + foo - This is a foo. This is a foo. This is a foo. This is a foo. This is. + bar - This is a bar. This is a bar. This is a bar. This is a bar. This is. + """''' + +[parameter_colon] +source='''"""Hello. + + foo: This is a foo. This is a foo. This is a foo. This is a foo. This is. + bar: This is a bar. This is a bar. This is a bar. This is a bar. This is. + """''' +expected='''"""Hello. + + foo: This is a foo. This is a foo. This is a foo. This is a foo. This is. + bar: This is a bar. This is a bar. This is a bar. This is a bar. This is. + """''' + +[many_short_columns] +source='''""" + one + two + three + four + five + six + seven + eight + nine + ten + eleven + """''' +expected='''""" + one + two + three + four + five + six + seven + eight + nine + ten + eleven + """''' + +# Examples to test when formatter URLs. +[inline] +source='''"""This is a docstring with a link. + + Here is an elaborate description containing a link. + `Area Under the Receiver Operating Characteristic Curve (ROC AUC) + `_. + """''' +expected='''"""This is a docstring with a link. + + Here is an elaborate description containing a link. `Area Under the + Receiver Operating Characteristic Curve (ROC AUC) + `_. + """''' + +[inline_short] +source='''"""This is yanf with a short link. + + See `the link `_ for more details. + """''' +expected='''"""This is yanf with a short link. + + See `the link `_ for more details. + """''' + +[inline_long] +source='''"""Helpful docstring. + + A larger description that starts here. https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java + A larger description that ends here. + """''' +expected='''"""Helpful docstring. + + A larger description that starts here. + https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java + A larger description that ends here. + """''' + +[only_link] +source='''""" + `Source of this snippet + `_. + """''' +expected='''""" + `Source of this snippet + `_. + """''' + +# Examples to test when wrapping. +[weird_punctuation] +source='''"""Creates and returns four was awakens to was created tracked + ammonites was the fifty, arithmetical four was pyrotechnic to + pyrotechnic physicists. `four' falsified x falsified ammonites + to awakens to. `created' to ancestor was four to x dynamo to was + four ancestor to physicists(). + """''' +expected='''"""Creates and returns four was awakens to was created tracked ammonites + was the fifty, arithmetical four was pyrotechnic to pyrotechnic physicists. + + `four' falsified x falsified ammonites to awakens to. `created' to + ancestor was four to x dynamo to was four ancestor to physicists(). + """''' + +[description_wrap] +source='''"""Hello. + + This should be indented but it is not. The + next line should be indented too. But + this is okay. + + """''' +expected='''"""Hello. + + This should be indented but it is not. The next line should be + indented too. But this is okay. + """''' + +[ignore_doctest] +source='''"""Hello. + + >>> 4 + 4 + """''' +expected='''"""Hello. + + >>> 4 + 4 + """''' + +[ignore_summary_doctest] +source='''""" + >>> 4 + 4 + + """''' +expected='''""" + >>> 4 + 4 + + """''' + +[same_indentation_doctest] +source='''"""Foo bar bing bang. + + >>> tests = DocTestFinder().find(_TestClass) + >>> runner = DocTestRunner(verbose=False) + >>> tests.sort(key = lambda test: test.name) + + """''' +expected='''"""Foo bar bing bang. + + >>> tests = DocTestFinder().find(_TestClass) + >>> runner = DocTestRunner(verbose=False) + >>> tests.sort(key = lambda test: test.name) + """''' + +[force_wrap] +source='''""" +num_iterations is the number of updates - instead of a better definition of convergence. +"""''' +expected='''"""num_iterations is the number of updates - + instead of a better definition of + convergence."""''' + +[summary_wrap_tab] +source=''' """Some summary x x x x."""''' +expected='''"""Some summary x x x + x."""''' + +[one_line_wrap_newline] +source='''"""This one-line docstring will be multi-line because it's quite long."""''' +expected='''"""This one-line docstring will be multi-line because it's quite + long. + """''' + +[one_line_no_wrap] +source='''"""This one-line docstring will not be wrapped and quotes will be in-line."""''' +expected='''"""This one-line docstring will not be wrapped and quotes will be in-line."""''' + +# Add examples from docformatter issues on GitHub. +[issue_75] +source='''"""This is another docstring with `a link`_. + + .. a link: http://www.reliqual.com/wiki/how_to_use_ramstk/verification_and_validation_module/index.html. + """''' +expected='''"""This is another docstring with `a link`_. + + .. a link: http://www.reliqual.com/wiki/how_to_use_ramstk/verification_and_validation_module/index.html. + """''' + +[issue_75_2] +source='''"""This is another docstring with a link. + + See http://www.reliqual.com/wiki/how_to_use_ramstk/verification_and_validation_module/index.html for additional information. + """''' +expected='''"""This is another docstring with a link. + + See + http://www.reliqual.com/wiki/how_to_use_ramstk/verification_and_validation_module/index.html + for additional information. + """''' + +[issue_75_3] +source='''"""This is yanf with a short link. + + See http://www.reliaqual.com for examples. + """''' +expected='''"""This is yanf with a short link. + + See http://www.reliaqual.com for examples. + """''' + +[issue_127] +source='''"""My awesome function. + + This line is quite long. In fact is it longer than one hundred and twenty characters so it should be wrapped but it is not. + + It doesn't wrap because of this line and the blank line in between! Delete them and it will wrap. + """''' +expected='''"""My awesome function. + + This line is quite long. In fact is it longer than one hundred and twenty characters so it should be wrapped but it + is not. + + It doesn't wrap because of this line and the blank line in between! Delete them and it will wrap. + """''' + +[issue_140] +source='''"""This is a docstring with a link that causes a wrap. + + See `the link `_ for more details. + """''' +expected='''"""This is a docstring with a link that causes a wrap. + + See + `the link `_ + for more details. + """''' + +[issue_140_2] +source='''"""Helpful docstring. + + A larger description that starts here. + https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java + A larger description that ends here. + """''' +expected='''"""Helpful docstring. + + A larger description that starts here. + https://github.com/apache/kafka/blob/2.5/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java + A larger description that ends here. + """''' + +[issue_140_3] +source='''"""Do something. + + See https://www.postgresql.org/docs/current/static/role-removal.html + """''' +expected='''"""Do something. + + See + https://www.postgresql.org/docs/current/static/role-removal.html + """''' + +[issue_145] +source='''""" + + .. _linspace API: https://numpy.org/doc/stable/reference/generated/numpy.linspace.html + .. _arange API: https://numpy.org/doc/stable/reference/generated/numpy.arange.html + .. _logspace API: https://numpy.org/doc/stable/reference/generated/numpy.logspace.html + """''' +expected='''""" + .. _linspace API: https://numpy.org/doc/stable/reference/generated/numpy.linspace.html + .. _arange API: https://numpy.org/doc/stable/reference/generated/numpy.arange.html + .. _logspace API: https://numpy.org/doc/stable/reference/generated/numpy.logspace.html """''' -[issue_156] -instring='''class AcceptHeader(ExtendedSchemaNode): - # ok to use name in this case because target key in the mapping must - # be that specific value but cannot have a field named with this format - name = "Accept" - schema_type = String - missing = drop - default = ContentType.APP_JSON # defaults to JSON for easy use within browsers - - - class AcceptLanguageHeader(ExtendedSchemaNode): - # ok to use name in this case because target key in the mapping must - # be that specific value but cannot have a field named with this format - name = "Accept-Language" - schema_type = String - missing = drop - default = AcceptLanguage.EN_CA - # FIXME: oneOf validator for supported languages (?)''' -outstring='''class AcceptHeader(ExtendedSchemaNode): - # ok to use name in this case because target key in the mapping must - # be that specific value but cannot have a field named with this format - name = "Accept" - schema_type = String - missing = drop - default = ContentType.APP_JSON # defaults to JSON for easy use within browsers - - - class AcceptLanguageHeader(ExtendedSchemaNode): - # ok to use name in this case because target key in the mapping must - # be that specific value but cannot have a field named with this format - name = "Accept-Language" - schema_type = String - missing = drop - default = AcceptLanguage.EN_CA - # FIXME: oneOf validator for supported languages (?)''' +[issue_150] +source='''""" + Translates incoming json to a processable Entity. + + Stackoverflow reference: + """''' +expected='''"""Translates incoming json to a processable Entity. + + Stackoverflow reference: + """''' [issue_157] -instring='''""".. code-block:: shell-session +source='''""".. code-block:: shell-session â–º apm --version apm 2.6.2 @@ -151,7 +600,7 @@ instring='''""".. code-block:: shell-session python 2.7.16 git 2.33.0 """''' -outstring='''""".. code-block:: shell-session +expected='''""".. code-block:: shell-session â–º apm --version apm 2.6.2 @@ -162,80 +611,515 @@ outstring='''""".. code-block:: shell-session git 2.33.0 """''' -[issue_176] -instring='''def Class1: - """Class.""" #noqa +[issue_157_url] +source='''"""Get the Python type of a Click parameter. + + See the list of `custom types provided by Click + `_. + """''' +expected='''"""Get the Python type of a Click parameter. + + See the list of + `custom types provided by Click `_. + """''' + +[issue_157_2] +source='''"""Fetch parameters values from configuration file and merge them with the + defaults. + + User configuration is `merged to the context default_map as Click does + `_. + + This allow user's config to only overrides defaults. Values sets from direct + command line parameters, environment variables or interactive prompts, takes + precedence over any values from the config file. +"""''' +expected='''"""Fetch parameters values from configuration file and merge them with the + defaults. + + User configuration is + `merged to the context default_map as Click does `_. + + This allow user's config to only overrides defaults. Values sets from direct + command line parameters, environment variables or interactive prompts, takes + precedence over any values from the config file. + """''' + +[issue_157_3] +source='''"""Introspects current CLI and list its parameters and metadata. + + .. important:: + Click doesn't keep a list of all parsed arguments and their origin. + So we need to emulate here what's happening during CLI invokation. + But can't even to that because the raw, pre-parsed arguments are + not available anywhere. + """''' +expected='''"""Introspects current CLI and list its parameters and metadata. + + .. important:: + Click doesn't keep a list of all parsed arguments and their origin. + So we need to emulate here what's happening during CLI invokation. + But can't even to that because the raw, pre-parsed arguments are + not available anywhere. + """''' + +[issue_157_4] +source='''"""Search on local file system or remote URL files matching the provided pattern. + + ``pattern`` is considered as an URL only if it is parseable as such + and starts with ``http://`` or ``https://``. + + .. important:: + + This is a straight `copy of the functools.cache implementation + `_, + which is only `available in the standard library starting with Python v3.9 + `. + """''' +expected='''"""Search on local file system or remote URL files matching the provided + pattern. + + ``pattern`` is considered as an URL only if it is parseable as such + and starts with ``http://`` or ``https://``. + + .. important:: + + This is a straight `copy of the functools.cache implementation + `_, + which is only `available in the standard library starting with Python v3.9 + `. + """''' + +[issue_157_5] +source='''"""Locate and call the ``mpm`` CLI. + + The output must supports both `Xbar dialect + `_ + and `SwiftBar dialect `_. + """''' +expected='''"""Locate and call the ``mpm`` CLI. + + The output must supports both + `Xbar dialect `_ + and `SwiftBar dialect `_. + """''' + +[issue_157_6] +source='''"""Install one or more packages. + + Installation will proceed first with packages unambiguously tied to a manager. You can have an + influence on that with more precise package specifiers (like purl) and/or tighter selection of managers. + + For other untied packages, mpm will try to find the best manager to install it with. Their installation + will be attempted with each manager, in the order they were selected. If we have the certainty, by the way + of a search operation, that this package is not available from this manager, we'll skip the installation + and try the next available manager. + """''' +expected='''"""Install one or more packages. + + Installation will proceed first with packages unambiguously tied to a manager. You + can have an influence on that with more precise package specifiers (like purl) + and/or tighter selection of managers. - attribute - """Attr.""" + For other untied packages, mpm will try to find the best manager to install it with. + Their installation will be attempted with each manager, in the order they were + selected. If we have the certainty, by the way of a search operation, that this + package is not available from this manager, we'll skip the installation and try the + next available manager. + """''' +[issue_157_11] +source='''"""Fetch parameters values from configuration file and merge them with the defaults. -def Class2: - """Class.""" + User configuration is `merged to the context default_map as Click does + `_. - attribute - """Attr.""" + This allow user's config to only overrides defaults. Values sets from direct + command line parameters, environment variables or interactive prompts, takes + precedence over any values from the config file. + """''' +expected='''"""Fetch parameters values from configuration file and merge them with the + defaults. + User configuration is + `merged to the context default_map as Click does `_. -def Class3: - """Class docstring. + This allow user's config to only overrides defaults. Values sets from direct + command line parameters, environment variables or interactive prompts, takes + precedence over any values from the config file. + """''' - With long description. - """ #noqa +[issue_159] +source='''"""Blah blah. - attribute - """Attr."""''' -outstring='''def Class1: - """Class.""" #noqa + This will normally be used with https://aaaaaaaa.bbb.ccccccccc.com/xxxxx/xxx_xxxxxxxxxxx to generate the xxx + """''' +expected='''"""Blah blah. - attribute - """Attr.""" + This will normally be used with + https://aaaaaaaa.bbb.ccccccccc.com/xxxxx/xxx_xxxxxxxxxxx + to generate the xxx + """''' +[issue_180] +source='''"""Django settings for webapp project. -def Class2: - """Class.""" + Generated by 'django-admin startproject' using Django 4.1.1. - attribute - """Attr.""" + For more information on this file, see + https://docs.djangoproject.com/en/4.1/topics/settings/ + For the full list of settings and their values, see + https://docs.djangoproject.com/en/4.1/ref/settings/ + """''' +expected='''"""Django settings for webapp project. + + Generated by 'django-admin startproject' using Django 4.1.1. -def Class3: - """Class docstring. + For more information on this file, see + https://docs.djangoproject.com/en/4.1/topics/settings/ - With long description. - """ #noqa + For the full list of settings and their values, see + https://docs.djangoproject.com/en/4.1/ref/settings/ + """''' - attribute - """Attr."""''' +[issue_189] +source='''"""This method doesn't do anything. + + https://example.com/this-is-just-a-long-url/designed-to-trigger/the-wrapping-of-the-description + """''' +expected='''"""This method doesn't do anything. + + https://example.com/this-is-just-a-long-url/designed-to-trigger/the-wrapping-of-the-description + """''' [issue_193] -instring='''""" +source='''""" eBay kinda suss """''' -outstring='''"""eBay kinda suss."""''' +expected='''"""eBay kinda suss."""''' + +[issue_199] +source='''""" + This is a short desription. + + Here is a link to the github issue + https://github.com/PyCQA/docformatter/issues/199 + + This is a long description. + """''' +expected='''"""This is a short desription. + + Here is a link to the github issue + https://github.com/PyCQA/docformatter/issues/199 + + This is a long description. + """''' + +[issue_210] +source='''"""Short description. + +This graphics format generates terminal escape codes that transfer +PNG data to a TTY using the `kitty graphics protocol`__. + +__ https://sw.kovidgoyal.net/kitty/graphics-protocol/ +"""''' +expected='''"""Short description. + + This graphics format generates terminal escape codes that transfer + PNG data to a TTY using the `kitty graphics protocol`__. + + __ https://sw.kovidgoyal.net/kitty/graphics-protocol/ + """''' + +[issue_215] +source='''"""Create or return existing HTTP session. + + :return: Requests :class:`~requests.Session` object + """''' +expected='''"""Create or return existing HTTP session. + + :return: Requests :class:`~requests.Session` object + """''' + +[issue_217_222] +source='''"""Base for all Commands. + +:param logger: Logger for console and logfile. +:param console: Facilitates console interaction and input solicitation. +:param tools: Cache of tools populated by Commands as they are required. +:param apps: Dictionary of project's Apps keyed by app name. +:param base_path: Base directory for Briefcase project. +:param data_path: Base directory for Briefcase tools, support packages, etc. +:param is_clone: Flag that Command was triggered by the user's requested Command; + for instance, RunCommand can invoke UpdateCommand and/or BuildCommand. +"""''' +expected='''"""Base for all Commands. + + :param logger: Logger for console and logfile. + :param console: Facilitates console interaction and input solicitation. + :param tools: Cache of tools populated by Commands as they are required. + :param apps: Dictionary of project's Apps keyed by app name. + :param base_path: Base directory for Briefcase project. + :param data_path: Base directory for Briefcase tools, support packages, etc. + :param is_clone: Flag that Command was triggered by the user's requested Command; + for instance, RunCommand can invoke UpdateCommand and/or BuildCommand. + """''' + +[issue_218] +source='''"""Construct a candidate project URL from the bundle and app name. + +It's not a perfect guess, but it's better than having "https://example.com". + +:param bundle: The bundle identifier. +:param app_name: The app name. +:returns: The candidate project URL +"""''' +expected='''"""Construct a candidate project URL from the bundle and app name. -[issue_263] -[issue_263.sphinx] + It's not a perfect guess, but it's better than having + "https://example.com". + + :param bundle: The bundle identifier. + :param app_name: The app name. + :returns: The candidate project URL + """''' + +[issue_224] +source='''""" +Add trackers to a torrent. + +:raises NotFound404Error: + +:param torrent_hash: hash for torrent +:param urls: tracker URLs to add to torrent +:return: None +"""''' +expected='''"""Add trackers to a torrent. + + :raises NotFound404Error: + + :param torrent_hash: hash for torrent + :param urls: tracker URLs to add to torrent + :return: None + """''' + +[issue_228] +source='''"""Configure application requirements by writing a requirements.txt file. + + :param app: The app configuration + :param requires: The full list of requirements + :param requirements_path: The full path to a requirements.txt file that + will be written. + """''' +expected='''"""Configure application requirements by writing a requirements.txt file. + + :param app: The app configuration + :param requires: The full list of requirements + :param requirements_path: The full path to a requirements.txt file that will be + written. + """''' + +[issue_229] +source='''"""CC. + + :meth:`!X` + """''' +expected='''"""CC. + + :meth:`!X` + """''' + +[issue_229_2] +source='''"""CC. + + :math: `-` + """''' +expected='''"""CC. + + :math: `-` + """''' + +[issue_230] +source='''"""CC. + + :math:`-` + :param d: blabla + :param list(str) l: more blabla. + """''' +expected= '''"""CC. + + :math:`-` + :param d: blabla + :param list(str) l: more blabla. + """''' + +[issue_232] +source='''def function: + """ + :param x: X + :param y: Y + """''' +expected='''def function: + """ + :param x: X + :param y: Y + """''' + +[issue_234] +source=''' """CC. + + :math:`f(0) = 1`. XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX + """''' +expected='''"""CC. + + :math:`f(0) = 1`. XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX + """''' + +[issue_235] +source='''"""CC. + + C. + + C, + :math:`[0, 1]`. +"""''' +expected='''"""CC. + + C. + + C, :math:`[0, 1]`. + """''' + +[issue_239] +source='''"""CC. + + C. + + C + c :math:`[0, 1]`. + """''' +expected='''"""CC. + + C. + + C + c :math:`[0, 1]`. + """''' + +[issue_239_sphinx] +source='''""" +Summary. + + :raises InvalidRequest400Error: + :raises NotFound404Error: + :raises Conflict409Error: + + :param param: asdf + """''' +expected='''"""Summary. + + :raises InvalidRequest400Error: + :raises NotFound404Error: + :raises Conflict409Error: + + :param param: asdf + """''' + +[issue_245] +source='''"""Some f. + :param a: Some param. + :raises my.package.MyReallySrsError: Bad things happened. + """''' +expected='''"""Some f. + + :param a: Some param. + :raises my.package.MyReallySrsError: Bad things happened. + """''' + +[issue_250] +source=''' """CC. + + c. + + c c :math:`[0, 1]`. + """''' +expected='''"""CC. + + c. + + c c :math:`[0, 1]`. + """''' + +[issue_253] +source='''""" + My test fixture. + + :param caplog: Pytest caplog fixture. + :yield: Until test complete, then run cleanup. + """''' +expected='''""" + My test fixture. + + :param caplog: Pytest caplog fixture. + :yield: Until test complete, then run cleanup. + """''' + +[issue_263_sphinx] # the `xx.\n\n` ensures there are a summary and a description sections # the `:param a:` creates a field # the `b`s create text that is long enough to trigger a line wrap without being so long that they count as code # the `s3://cccc.` is a url -instring='''"""xx. +source='''"""xx. :param a: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb s3://cccc. """''' -outstring='''"""xx. +expected='''"""xx. :param a: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb s3://cccc. """''' -[issue_263.epytext] -instring='''"""xx. + +[issue_263_epytext] +source='''"""xx. @param a: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb s3://cccc. """''' -outstring='''"""xx. +expected='''"""xx. @param a: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb s3://cccc. """''' + +[issue_271] +source='''""" + My test fixture. + + :ivar id: A unique identifier for the element, automatically generated upon instantiation. + :vartype id: str + :ivar created: Timestamp when the element was created, defaults to the current time. + :vartype created: datetime + :cvar modified: Timestamp when the element was last modified, can be None if not modified. + :vartype modified: Optional[datetime] + :cvar in_project: List of projects this element is part of. Direct modification is restricted. + :vartype in_project: list[Project] + :param caplog: Pytest caplog fixture. + :yield: Until test complete, then run cleanup. + """''' +expected='''""" + My test fixture. + + :ivar id: A unique identifier for the element, automatically generated upon + instantiation. + :vartype id: str + :ivar created: Timestamp when the element was created, defaults to the current time. + :vartype created: datetime + :cvar modified: Timestamp when the element was last modified, can be None if not + modified. + :vartype modified: Optional[datetime] + :cvar in_project: List of projects this element is part of. Direct modification is + restricted. + :vartype in_project: list[Project] + :param caplog: Pytest caplog fixture. + :yield: Until test complete, then run cleanup. + """''' diff --git a/tests/_data/string_files/format_functions.toml b/tests/_data/string_files/format_functions.toml new file mode 100644 index 00000000..1dcf33cb --- /dev/null +++ b/tests/_data/string_files/format_functions.toml @@ -0,0 +1,333 @@ +# In this file, token lists have the following information: +# [type, string, start, end, line] +# for creating a TokenInfo() object. + +[module_docstring_followed_by_string] +expected = 1 + +[module_docstring_followed_by_code] +expected = 1 + +[module_docstring_followed_by_comment_then_code] +expected = 1 + +[module_docstring_followed_by_comment_then_string] +expected = 1 + +[module_docstring_in_black] +expected = 2 + +[class_docstring_followed_by_statement] +source = ''' +class MyClass: + """Class docstring.""" + x = 42 +''' +expected = 1 + +[class_docstring_followed_by_def] +source = ''' +class MyClass: + """Class docstring.""" + + def method(self): + pass +''' +expected = 1 + +[class_docstring_with_decorator] +source = ''' +class A: + """Docstring.""" + + @classmethod + def foo(cls): pass +''' +expected = 1 + +[class_docstring_with_class_variable] +source = ''' +class A: + """Docstring.""" + version = "1.0" +''' +expected = 1 + +[function_with_expr] +source = ''' +def foo(): + """Docstring.""" + return 42 +''' +expected = 0 + +[function_with_inner_def] +source = ''' +def foo(): + """Docstring.""" + + def inner(): pass +''' +expected = 1 + +[function_with_inner_async_def] +source='''def foo(): + """ + Hello foo. + This is a docstring. + """ + + async def inner(): pass +''' +expected = 1 + +[function_with_decorator_and_def] +source = ''' +def foo(): + """Docstring.""" + + @staticmethod + def inner(): pass +''' +expected = 1 + +[function_with_decorator_and_async_def] +source = ''' +def foo(): + """Docstring.""" + + @log + async def inner(): pass +''' +expected = 1 + +[function_docstring_with_inner_class] +source = ''' +def foo(): + """Function docstring.""" + + class Bar: + pass +''' +expected = 1 + +[attribute_docstring_single_line] +source = '''class MyClass: + x = 1 + """"This is an attribute.""" +''' +expected = 1 + +[attribute_docstring_multi_line] +source = '''class MyClass: + x = 1 + """"This is an attribute. + + It has a multi-line docstring. + """ +''' +expected = 1 + +[attribute_docstring_outside_class] +source = '''x = 1 +"""This is an attribute outside a class.""" + +class MyClass: + pass +''' +expected = 2 + +[attribute_docstring_inside_method] +source = '''class MyClass: + def method(self): + x = 1 + """This is an attribute inside a method.""" +''' +expected = 1 + +[attribute_docstring_with_comment] +source = '''class MyClass: + x = 1 + """This is an attribute.""" # This is a comment. +''' +expected = 1 + +[attribute_docstring_multiple_assignments] +source = '''class MyClass: + x = y = 2 + """This is an attribute with multiple assignments.""" +''' +expected = 1 + +[attribute_docstring_equiv_expr] +source = '''class MyClass: + x = 1 + """This is an attribute.""" + y = x + 1 +''' +expected = 1 + +[get_newlines_by_type_module_docstring] +source = '"""Module docstring."""' +expected = 1 + +[get_newlines_by_type_module_docstring_black] +source = '"""Module docstring."""' +expected = 2 + +[get_newlines_by_type_class_docstring] +source = ''' +class MyClass: + """Class docstring.""" + x = 42 +''' +expected = 1 + +[get_newlines_by_type_function_docstring] +source = ''' +def foo(): + """Docstring.""" + return 42 +''' +expected = 0 + +[get_newlines_by_type_attribute_docstring] +source = '''x = 1 +"""Docstring for x.""" +''' +expected = 1 + +[get_num_rows_columns] +token = [5, " ", [3, 10], [3, 40], ''' This is +the last line in +the docstring.""" +'''] +expected = [3, 17] + +[get_start_end_indices] +prev_token = [ + 3, '''"""Hello foo and this is a docstring.\n\n More stuff.\n """''', + [2, 4], [2, 7], + ''' """Hello foo and this is a docstring.\n\n More stuff.\n """\n'''] +token = [ + 4, "\n", [7, 7], [7, 8], + ''' """Hello foo and this is a docstring.\n\n More stuff.\n """\n'''] +expected = [[2, 7], [2, 8]] + +[do_remove_preceding_blank_lines_module] +source = '''#!/usr/bin/env python + + +"""This is a module docstring.""" +''' +expected = ["#!/usr/bin/env python", + "\n", + "\n", + '"""This is a module docstring."""', + "\n", + "", +] + +[do_remove_preceding_blank_lines_class] +source = ''' + class TestClass: + """This is a class docstring. + + :cvar test_int: a class attribute. + ..py.method: big_method() + """ + +''' +expected = [" ", + "class", + "TestClass", + ":", + "\n", + " ", + '''"""This is a class docstring. + + :cvar test_int: a class attribute. + ..py.method: big_method() + """''', + "\n", + "\n", + "", + "", + "", +] + +[do_remove_preceding_blank_lines_function] +source = ''' +def test_function(): + + + +"""This is a function docstring.""" +pass +''' +expected = ["def", + "test_function", + "(", + ")", + ":", + "\n", + '"""This is a function docstring."""', + "\n", + "pass", + "\n", + "" +] + +[do_remove_preceding_blank_lines_attribute] +source = ''' + CONST = 123 + + """Docstring for CONST.""" +''' +expected = [ + " ", + "CONST", + "=", + "123", + "\n", + '"""Docstring for CONST."""', + "\n", + "", + "", +] + +[get_unmatched_start_end_indices] +prev_token = [5, " ", [2, 0], [2, 4], ''' """This is a docstring.\n'''] +token = [ + 3, '''"""This is a docstring.\n\n\n That should be on less lines\n"""''', + [3, 4], [6, 7], + ''' """This is a docstring.\n\n\n That should be on less lines\n """'''] +expected = [[2, 4], [6, 7]] + +[do_update_token_indices] +tokens = [ + [1, 'def', [1, 0], [1, 3], 'def foo():\n'], + [1, 'foo', [1, 4], [1, 7], 'def foo():\n'], + [55, '(', [1, 7], [1, 8], 'def foo():\n'], + [55, ')', [1, 8], [1, 9], 'def foo():\n'], + [55, ':', [1, 9], [1, 10], 'def foo():\n'], + [4, '\n', [1, 10], [1, 11], 'def foo():\n'], + [5, ' ', [3, 0], [3, 4], ' """Hello foo."""\n'], + [3, '"""Hello foo."""', [3, 4], [5, 7], ' """Hello foo."""\n'], + [4, '\n', [5, 7], [5, 8], ' """Hello foo."""\n'], + [6, '', [6, 0], [6, 0], ''], + [0, '', [6, 0], [6, 0], ''] +] +expected = [ + [[1, 0], [1, 3]], + [[1, 4], [1, 7]], + [[1, 7], [1, 8]], + [[1, 8], [1, 9]], + [[1, 9], [1, 10]], + [[1, 10], [1, 11]], + [[2, 0], [2, 4]], + [[2, 4], [2, 7]], + [[2, 7], [2, 8]], + [[3, 0], [3, 0]], + [[3, 0], [3, 0]] +] diff --git a/tests/_data/string_files/format_methods.toml b/tests/_data/string_files/format_methods.toml new file mode 100644 index 00000000..10435fcd --- /dev/null +++ b/tests/_data/string_files/format_methods.toml @@ -0,0 +1,105 @@ +# In this file, token lists have the following information: +# [type, string, start, end, line] +# for creating a TokenInfo() object. +[do_add_unformatted_docstring] +token = [ + 3, '''"""This is a docstring.\n\n\n That should be on less lines\n"""''', + [3, 4], [6, 7], + ''' """This is a docstring.\n\n\n That should be on less lines\n """'''] + +[do_add_formatted_docstring] +token = [ + 3, '''"""This is a docstring.\n"""''', + [3, 4], [6, 7], + ''' """This is a docstring.\n """'''] +next_token = [5, "\b", [2, 0], [2, 4], ''' """This is a docstring.\n'''] + +[do_format_oneline_docstring] +source = "This is a one-line docstring." +expected = '"""This is a one-line docstring."""' + +[do_format_oneline_docstring_that_ends_in_quote] +source ='"Hello"' +expected = '''""""Hello"."""''' + +[do_format_oneline_docstring_with_wrap] +source = "This is a long one-line summary that will need to be wrapped because we're going to pass the --wrap-summaries argument." +expected = ''' +"""This is a long one-line summary that will need to be wrapped + because we're going to pass the --wrap-summaries argument."""''' + +[do_format_oneline_docstring_with_quotes_newline] +source = "This is a long one-line summary that will have the closing quotes on a separate line because we're going to pass the --close-quotes-on-newline argument." +expected = ''' +"""This is a long one-line summary that will have the closing quotes on a + separate line because we're going to pass the --close-quotes-on-newline + argument. + """''' + +[do_format_oneline_docstring_make_multiline] +source = "This is one-line docstring and we're going to pass the --make-summary-multi-line argument to see what happens." +expected = ''' +""" + This is one-line docstring and we're going to pass the --make-summary- + multi-line argument to see what happens. + """''' + +[do_format_multiline_docstring] +source = [ + "This is the summary of a multiline docstring.", + "This is the long description part of the same multiline docstring."] +expected = '''"""This is the summary of a multiline docstring. + + This is the long description part of the same multiline docstring. + """''' + +[do_format_multiline_docstring_pre_summary_newline] +source = [ + "This is the summary of a multiline docstring.", + "This is the long description part of the same multiline docstring."] +expected = '''""" + This is the summary of a multiline docstring. + + This is the long description part of the same multiline docstring. + """''' + +[do_format_multiline_docstring_post_description_blank] +source = [ + "This is the summary of a multiline docstring.", + "This is the long description part of the same multiline docstring."] +expected = '''"""This is the summary of a multiline docstring. + + This is the long description part of the same multiline docstring. + + """''' + +[do_rewrite_docstring_blocks] +tokens = [ + [1, "def", [1, 0], [1, 3], "def foo():\n"], + [1, "foo", [1, 4], [1, 7], "def foo():\n"], + [55, "(", [1, 7], [1, 8], "def foo():\n"], + [55, ")", [1, 8], [1, 9], "def foo():\n"], + [55, ":", [1, 9], [1, 10], "def foo():\n"], + [4, "\n", [1, 10], [1, 11], "def foo():\n"], + [5, " ", [3, 0], [3, 4], ''' """Hello foo."""\n'''], + [3, '"""Hello foo."""', [3, 4], [5, 7], ''' """Hello foo."""\n'''], + [4, "\n", [5, 7], [5, 8], ''' """Hello foo."""\n'''], + [6, "", [6, 0], [6, 0], ""], + [0, "", [6, 0], [6, 0], ""] +] +expected = [ + [1, "def", [1, 0], [1, 3], "def foo():\n"], + [1, "foo", [1, 4], [1, 7], "def foo():\n"], + [55, "(", [1, 7], [1, 8], "def foo():\n"], + [55, ")", [1, 8], [1, 9], "def foo():\n"], + [55, ":", [1, 9], [1, 10], "def foo():\n"], + [4, "\n", [1, 10], [1, 11], "def foo():\n"], + [5, " ", [2, 0], [2, 4], ''' """Hello foo.""" +'''], + [3, '"""Hello foo."""', [2, 4], [2, 7], ''' """Hello foo.""" +'''], + [4, "\n", [2, 7], [2, 8], ''' """Hello foo.""" +'''], + [6, "", [3, 0], [3, 0], ""], + [0, "", [3, 0], [3, 0], ""] +] diff --git a/tests/conftest.py b/tests/conftest.py index 762d2461..b06abfc0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -4,6 +4,7 @@ # tests.conftest.py is part of the docformatter project # # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the diff --git a/tests/formatter/test_do_format_code.py b/tests/formatter/test_do_format_code.py index edd0457e..c8faaa0f 100644 --- a/tests/formatter/test_do_format_code.py +++ b/tests/formatter/test_do_format_code.py @@ -1,9 +1,10 @@ # pylint: skip-file # type: ignore # -# tests.test_do_format_code.py is part of the docformatter project +# tests.formatter.test_do_format_code.py is part of the docformatter project # # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -24,7 +25,7 @@ # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -"""Module for testing the Formattor._do_format_code() method.""" +"""Module for testing the Formattor _do_format_code method.""" # Standard Library Imports import contextlib @@ -44,404 +45,111 @@ # docformatter Package Imports from docformatter import Formatter - -class TestDoFormatCode: - """Class for testing _do_format_code() with no arguments.""" - - with open("tests/_data/string_files/do_format_code.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_do_format_code(self, test_args, args): - """Should place one-liner on single line.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["one_line"]["instring"] - outstring = self.TEST_STRINGS["one_line"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_do_format_code_with_module_docstring(self, test_args, args): - """Should format module docstrings.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["module_docstring"]["instring"] - outstring = self.TEST_STRINGS["module_docstring"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_strip_blank_line_after_module_variable( - self, - test_args, - args, - ): - """Strip newlines between module variable definition and docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["newline_module_variable"]["instring"] - outstring = self.TEST_STRINGS["newline_module_variable"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_class_docstring(self, test_args, args): - """Format class docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["class_docstring"]["instring"] - outstring = self.TEST_STRINGS["class_docstring"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_strip_blank_line_after_class_variable( - self, - test_args, - args, - ): - """Strip any newlines between a class variable definition and docstring. - - See requirement . - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["newline_class_variable"]["instring"] - outstring = self.TEST_STRINGS["newline_class_variable"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_do_format_code_keep_newlines_outside_docstring(self, test_args, args): - """Should keep newlines in code following docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["newline_outside_docstring"]["instring"] - outstring = self.TEST_STRINGS["newline_outside_docstring"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_dominant_line_ending_style_preserved(self, test_args, args): - """Should retain carriage return line endings.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["preserve_line_ending"]["instring"] - outstring = self.TEST_STRINGS["preserve_line_ending"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_additional_empty_line_before_doc(self, test_args, args): - """Should remove empty line between function def and docstring. - - See issue #51. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_51"]["instring"] - outstring = self.TEST_STRINGS["issue_51"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_extra_newline_following_comment(self, test_args, args): - """Should remove extra newline following in-line comment. - - See issue #51. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_51_2"]["instring"] - outstring = self.TEST_STRINGS["issue_51_2"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_no_docstring(self, test_args, args): - """Should leave code as is if there is no docstring. - - See issue #97. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_97"]["instring"] - outstring = self.TEST_STRINGS["issue_97"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - instring = self.TEST_STRINGS["issue_97_2"]["instring"] - outstring = self.TEST_STRINGS["issue_97_2"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_class_docstring_remove_blank_line(self, test_args, args): - """Remove blank line before class docstring. - - See issue #139. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_139"]["instring"] - outstring = self.TEST_STRINGS["issue_139"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_class_docstring_keep_blank_line(self, test_args, args): - """Keep blank line after class definition if there is no docstring. - - See issue #139. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_139_2"]["instring"] - outstring = self.TEST_STRINGS["issue_139_2"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_strip_blank_line_after_method_docstring( - self, - test_args, - args, - ): - """Strip any newlines after a method docstring. - - See requirement PEP_257_4.4, issue #130. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_130"]["instring"] - outstring = self.TEST_STRINGS["issue_130"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_do_not_touch_function_no_docstring( - self, - test_args, - args, - ): - """Do not remove newlines in functions with no docstring. - - See issue #156. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_156"]["instring"] - outstring = self.TEST_STRINGS["issue_156"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_do_format_code_keep_newline_for_stub_functions(self, test_args, args): - """Should keep newline after docstring in stub functions. - - See issue #156 and issue #173. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_156_173"]["instring"] - outstring = self.TEST_STRINGS["issue_156_173"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_keep_newline_after_shebang( - self, +NO_ARGS = [""] + +with open("tests/_data/string_files/do_format_code.toml", "rb") as f: + TEST_STRINGS = tomllib.load(f) + + +@pytest.mark.integration +@pytest.mark.order(7) +@pytest.mark.parametrize( + "test_key, args", + [ + ("one_line", NO_ARGS), + ("module_docstring", NO_ARGS), + ("newline_module_variable", NO_ARGS), + ("class_docstring", NO_ARGS), + ("newline_class_variable", NO_ARGS), + ("newline_outside_docstring", NO_ARGS), + pytest.param( + "preserve_line_ending", + NO_ARGS, + marks=pytest.mark.skipif( + sys.platform != "win32", reason="Not running on Windows" + ), + ), + ("non_docstring", NO_ARGS), + ("tabbed_indentation", NO_ARGS), + ("mixed_indentation", NO_ARGS), + ("escaped_newlines", NO_ARGS), + ("code_comments", NO_ARGS), + ("inline_comment", NO_ARGS), + ("raw_lowercase", NO_ARGS), + ("raw_uppercase", NO_ARGS), + ("raw_lowercase_single", NO_ARGS), + ("raw_uppercase_single", NO_ARGS), + ("unicode_lowercase", NO_ARGS), + ("unicode_uppercase", NO_ARGS), + ("unicode_lowercase_single", NO_ARGS), + ("unicode_uppercase_single", NO_ARGS), + ("nested_triple", NO_ARGS), + ("multiple_sentences", NO_ARGS), + ("multiple_sentences_same_line", NO_ARGS), + ("multiline_summary", NO_ARGS), + ("empty_lines", NO_ARGS), + ("class_empty_lines", NO_ARGS), + ("class_empty_lines_2", NO_ARGS), + ("method_empty_lines", NO_ARGS), + ("trailing_whitespace", NO_ARGS), + ("parameter_list", NO_ARGS), + ("single_quote", NO_ARGS), + ("double_quote", NO_ARGS), + ("nested_triple_quote", NO_ARGS), + ("first_line_assignment", NO_ARGS), + ("regular_strings", NO_ARGS), + ("syntax_error", NO_ARGS), + ("slash_r", NO_ARGS), + ("slash_r_slash_n", NO_ARGS), + ("strip_blank_lines", ["--black", ""]), + ("range_miss", ["--range", "1", "1", ""]), + ("range_hit", ["--range", "1", "2", ""]), + ("length_ignore", ["--docstring-length", "1", "1", ""]), + ("class_attribute_wrap", NO_ARGS), + ("issue_51", NO_ARGS), + ("issue_51_2", NO_ARGS), + ( + "issue_79", + NO_ARGS + + [ + "--wrap-summaries", + "100", + "--wrap-descriptions", + "100", + ], + ), + ("issue_97", NO_ARGS), + ("issue_97_2", NO_ARGS), + ("issue_130", NO_ARGS), + ("issue_139", NO_ARGS), + ("issue_139_2", NO_ARGS), + ("issue_156", NO_ARGS), + ("issue_156_2", NO_ARGS), + ("issue_156_173", NO_ARGS), + ("issue_157_7", ["--wrap-descriptions", "88", ""]), + ("issue_157_8", ["--wrap-descriptions", "88", ""]), + ("issue_157_9", ["--wrap-descriptions", "88", ""]), + ("issue_157_10", ["--wrap-descriptions", "88", ""]), + ("issue_176", NO_ARGS), + ("issue_176_black", NO_ARGS), + ("issue_187", NO_ARGS), + ("issue_203", NO_ARGS), + ("issue_243", NO_ARGS), + ], +) +def test_do_format_code(test_key, test_args, args): + uut = Formatter( test_args, - args, - ): - """Do not remove newlines following the shebang. - - See issue #187. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_187"]["instring"] - outstring = self.TEST_STRINGS["issue_187"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_keep_newline_after_import( - self, - test_args, - args, - ): - """Do not remove newlines following the import section. - - See issue #203. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_203"]["instring"] - outstring = self.TEST_STRINGS["issue_203"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_code_with_backtick_in_summary( - self, - test_args, - args, - ): - """Format docstring with summary containing backticks. - - See issue #243. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_243"]["instring"] - outstring = self.TEST_STRINGS["issue_243"]["outstring"] + sys.stderr, + sys.stdin, + sys.stdout, + ) + + source = TEST_STRINGS[test_key]["source"] + if test_key == "escaped_newlines" and sys.version_info >= (3, 13): + expected = TEST_STRINGS[test_key]["expected313"] + else: + expected = TEST_STRINGS[test_key]["expected"] - assert outstring == uut._do_format_code( - instring, - ) + result = uut._do_format_code(source) + assert result == expected, f"\nFailed {test_key}\nExpected {expected}\nGot {result}" diff --git a/tests/formatter/test_do_format_docstring.py b/tests/formatter/test_do_format_docstring.py index 5bf544b8..50765fdc 100644 --- a/tests/formatter/test_do_format_docstring.py +++ b/tests/formatter/test_do_format_docstring.py @@ -1,9 +1,10 @@ # pylint: skip-file # type: ignore # -# tests.test_format_docstring.py is part of the docformatter project +# tests.formatter.test_do_format_docstring.py is part of the docformatter project # # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -24,11 +25,13 @@ # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -"""Module for testing the Formatter class.""" +"""Module for testing the Formatter _do_format_docstring method.""" # Standard Library Imports import contextlib +import itertools +import random import sys with contextlib.suppress(ImportError): @@ -43,318 +46,185 @@ import pytest # docformatter Package Imports -from docformatter import Formatter - -INDENTATION = " " - - -class TestFormatDocstring: - """Class for testing _do_format_docstring() with no arguments.""" - - with open("tests/_data/string_files/do_format_docstrings.toml", "rb") as f: - TEST_STRINGS = tomllib.load(f) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_one_line_docstring(self, test_args, args): - """Return one-line docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["one_line"]["instring"] - outstring = self.TEST_STRINGS["one_line"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_summary_that_ends_in_quote(self, test_args, args): - """Return one-line docstring with period after quote.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["summary_end_quote"]["instring"] - outstring = self.TEST_STRINGS["summary_end_quote"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--wrap-descriptions", "44", ""]]) - def test_format_docstring_with_bad_indentation(self, test_args, args): - """Add spaces to indentation when too few.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["bad_indentation"]["instring"] - outstring = self.TEST_STRINGS["bad_indentation"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_too_much_indentation(self, test_args, args): - """Remove spaces from indentation when too many.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["too_much_indentation"]["instring"] - outstring = self.TEST_STRINGS["too_much_indentation"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--wrap-descriptions", "52", ""]]) - def test_format_docstring_with_trailing_whitespace(self, test_args, args): - """Remove trailing white space.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["trailing_whitespace"]["instring"] - outstring = self.TEST_STRINGS["trailing_whitespace"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_empty_docstring(self, test_args, args): - """Do nothing with empty docstring.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["empty_docstring"]["instring"] - outstring = self.TEST_STRINGS["empty_docstring"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_no_period(self, test_args, args): - """Add period to end of one-line and summary line.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["no_summary_period"]["instring"] - outstring = self.TEST_STRINGS["no_summary_period"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_single_quotes(self, test_args, args): - """Replace single triple quotes with triple double quotes.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["single_quotes"]["instring"] - outstring = self.TEST_STRINGS["single_quotes"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_with_single_quotes_multi_line(self, test_args, args): - """Replace single triple quotes with triple double quotes.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["single_quotes_multiline"]["instring"] - outstring = self.TEST_STRINGS["single_quotes_multiline"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_leave_underlined_summaries_alone(self, test_args, args): - """Leave underlined summary lines as is.""" - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["skip_underlined_summary"]["instring"] - outstring = self.TEST_STRINGS["skip_underlined_summary"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_leave_blank_line_after_variable_def( - self, +from docformatter.format import Formatter + +# docformatter Local Imports +from .. import generate_random_docstring + +NO_ARGS = [""] +WRAP_DESC_72 = ["--wrap-descriptions", "72", ""] +WRAP_DESC_88 = ["--wrap-descriptions", "88", ""] +WRAP_BOTH_88 = ["--wrap-descriptions", "88", "--wrap-summaries", "88", ""] + +with open("tests/_data/string_files/do_format_docstrings.toml", "rb") as f: + TEST_STRINGS = tomllib.load(f) + + +@pytest.mark.integration +@pytest.mark.order(4) +@pytest.mark.parametrize( + "test_key, args", + [ + ("one_line", NO_ARGS), + ("summary_end_quote", NO_ARGS), + ("bad_indentation", ["--wrap-descriptions", "44", ""]), + ("too_much_indentation", NO_ARGS), + ("trailing_whitespace", ["--wrap-descriptions", "52", ""]), + ("empty_docstring", NO_ARGS), + ("no_summary_period", NO_ARGS), + ("single_quotes", NO_ARGS), + ("single_quotes_multiline", NO_ARGS), + pytest.param( + "skip_underlined_summary", + NO_ARGS, + marks=pytest.mark.skip( + reason="LEGACY: Underlined summaries should now be processed as " + "section headers." + ), + ), + ("no_blank", NO_ARGS), + ("presummary_newline", ["--pre-summary-newline", ""]), + ("summary_multiline", ["--make-summary-multi-line", ""]), + ("presummary_space", ["--pre-summary-space", ""]), + ("quote_no_space_black", ["--black", ""]), + ("quote_space_black", ["--black", ""]), + ("quote_space_multiline_black", ["--black", ""]), + ("epytext", ["--style", "epytext"] + WRAP_BOTH_88), + ("epytext_numpy", ["--style", "numpy"] + WRAP_BOTH_88), + ("sphinx", ["--style", "sphinx"] + WRAP_BOTH_88), + ("sphinx_numpy", ["--style", "numpy"] + WRAP_BOTH_88), + ("numbered_list", WRAP_DESC_72), + ("parameter_dash", WRAP_DESC_72), + ("parameter_colon", ["--style", "numpy"] + WRAP_DESC_72), + ("many_short_columns", NO_ARGS), + ("inline", WRAP_DESC_72), + ("inline_short", WRAP_DESC_72), + ("inline_long", WRAP_DESC_72), + ("only_link", WRAP_DESC_72), + ("weird_punctuation", ["--wrap-summaries", "79", ""]), + ("description_wrap", WRAP_DESC_72), + ("ignore_doctest", WRAP_DESC_72), + ("ignore_summary_doctest", WRAP_DESC_72), + ("same_indentation_doctest", WRAP_DESC_72), + ( + "force_wrap", + ["--wrap-descriptions", "72", "--wrap-summaries", "50", "--force-wrap", ""], + ), + ("summary_wrap_tab", ["--wrap-summaries", "30", "--tab-width", "4", ""]), + ( + "one_line_wrap_newline", + ["--wrap-summaries", "69", "--close-quotes-on-newline", ""], + ), + ( + "one_line_no_wrap", + ["--wrap-summaries", "88", "--close-quotes-on-newline", ""], + ), + ("issue_75", WRAP_DESC_72), + ("issue_75_2", WRAP_DESC_72), + ("issue_75_3", WRAP_DESC_72), + ("issue_127", ["--wrap-descriptions", "120", "--wrap-summaries", "120", ""]), + ("issue_140", WRAP_DESC_72), + ("issue_140_2", WRAP_DESC_72), + ("issue_140_3", WRAP_DESC_72), + ("issue_145", WRAP_DESC_72), + ("issue_150", WRAP_DESC_72), + ("issue_157", NO_ARGS), + ("issue_157_url", WRAP_DESC_88), + ("issue_157_2", WRAP_DESC_88), + ("issue_157_3", WRAP_DESC_88), + ("issue_157_4", WRAP_DESC_88), + ("issue_157_5", WRAP_DESC_88), + ("issue_157_6", WRAP_DESC_88), + ("issue_157_11", WRAP_DESC_88), + ("issue_159", WRAP_BOTH_88), + ("issue_159", WRAP_BOTH_88), + ("issue_180", WRAP_BOTH_88), + ("issue_189", WRAP_DESC_72), + ("issue_193", ["--non-cap", "eBay", "iPad", "-c", ""]), + ("issue_199", NO_ARGS), + ("issue_210", NO_ARGS), + ("issue_218", NO_ARGS), + ("issue_230", ["--style", "sphinx"] + WRAP_BOTH_88), + ("issue_215", WRAP_BOTH_88), + ("issue_217_222", WRAP_BOTH_88), + ("issue_224", WRAP_BOTH_88), + ("issue_228", WRAP_BOTH_88), + ("issue_229", WRAP_BOTH_88), + ("issue_229_2", WRAP_BOTH_88), + ("issue_234", WRAP_BOTH_88), + ("issue_235", WRAP_BOTH_88), + ("issue_239", [""]), + ("issue_239_sphinx", WRAP_BOTH_88), + ("issue_245", WRAP_BOTH_88), + ("issue_250", WRAP_BOTH_88), + ( + "issue_253", + [ + "--wrap-descriptions", + "120", + "--wrap-summaries", + "120", + "--pre-summary-newline", + "--black", + "", + ], + ), + ("issue_263_sphinx", NO_ARGS), + ("issue_263_epytext", ["-s", "epytext"] + NO_ARGS), + ("issue_271", ["--pre-summary-newline"] + WRAP_BOTH_88), + ], +) +def test_do_format_docstring(test_key, test_args, args): + uut = Formatter( test_args, - args, - ): - """Leave blank lines after any variable beginning with 'def'. - - See issue #156. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_156"]["instring"] - outstring = self.TEST_STRINGS["issue_156"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_leave_directive_alone(self, test_args, args): - """Leave docstrings that have a reST directive in the summary alone. - - See issue #157. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_157"]["instring"] - outstring = self.TEST_STRINGS["issue_157"]["outstring"] + sys.stderr, + sys.stdin, + sys.stdout, + ) - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) + source = TEST_STRINGS[test_key]["source"] + expected = TEST_STRINGS[test_key]["expected"] - @pytest.mark.unit - @pytest.mark.parametrize("args", [[""]]) - def test_format_docstring_leave_blank_line_after_comment( - self, + if test_key == "summary_wrap_tab": + _indentation = "\t\t" + else: + _indentation = " " + result = uut._do_format_docstring( + _indentation, + source, + ) + assert result == expected, f"\nFailed {test_key}\nExpected {expected}\nGot {result}" + + +@pytest.mark.integration +@pytest.mark.parametrize("args", [[""]]) +def test_do_format_docstring_random_with_wrap( + test_args, + args, +): + uut = Formatter( test_args, - args, + sys.stderr, + sys.stdin, + sys.stdout, + ) + + # This function uses `random` so make sure each run of this test is + # repeatable. + random.seed(0) + + min_line_length = 50 + for max_length, num_indents in itertools.product( + range(min_line_length, 100), range(20) ): - """Leave blank lines after docstring followed by a comment. - - See issue #176. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_176"]["instring"] - outstring = self.TEST_STRINGS["issue_176"]["outstring"] - - assert outstring == uut._do_format_code( - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--non-cap", "eBay", "iPad", "-c", ""]]) - def test_format_docstring_with_non_cap_words(self, test_args, args): - """Capitalize words not found in the non_cap list. - - See issue #193. - """ - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_193"]["instring"] - outstring = self.TEST_STRINGS["issue_193"]["outstring"] - - assert outstring == uut._do_format_docstring( - INDENTATION, - instring, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("args", [["--style", "sphinx", ""], ["--style", "epytext", ""]]) - def test_do_not_double_process_urls(self, test_args, args): - """Do not double-process urls in fields - - See issue #263 - """ - style = args[1] - - uut = Formatter( - test_args, - sys.stderr, - sys.stdin, - sys.stdout, - ) - - instring = self.TEST_STRINGS["issue_263"][style]["instring"] - outstring = self.TEST_STRINGS["issue_263"][style]["outstring"] - - assert outstring == uut._do_format_docstring(INDENTATION, instring, ) + indentation = " " * num_indents + uut.args.wrap_summaries = max_length + formatted_text = indentation + uut._do_format_docstring( + indentation=indentation, + docstring=generate_random_docstring(max_word_length=min_line_length // 2), + ) + for line in formatted_text.split("\n"): + # It is not the formatter's fault if a word is too long to + # wrap. + if len(line.split()) > 1: + assert len(line) <= max_length diff --git a/tests/formatter/test_format_functions.py b/tests/formatter/test_format_functions.py new file mode 100644 index 00000000..6456da79 --- /dev/null +++ b/tests/formatter/test_format_functions.py @@ -0,0 +1,310 @@ +# pylint: skip-file +# type: ignore +# +# tests.formatter.test_format_functions.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""Module for testing formatting functions.""" + +# Standard Library Imports +import contextlib +import sys +import tokenize +from io import BytesIO, StringIO + +with contextlib.suppress(ImportError): + if sys.version_info >= (3, 11): + # Standard Library Imports + import tomllib + else: + # Third Party Imports + import tomli as tomllib + +# Third Party Imports +import pytest + +# docformatter Package Imports +from docformatter import format as _format + +with open("tests/_data/string_files/format_functions.toml", "rb") as f: + TEST_STRINGS = tomllib.load(f) + + +def _get_tokens(source): + return list(tokenize.tokenize(BytesIO(source.encode()).readline)) + + +def _get_docstring_token_and_index(tokens): + for i, tok in enumerate(tokens): + if tok.type == tokenize.STRING: + return i + raise ValueError("No docstring found in token stream.") + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key, black", + [ + ("module_docstring_followed_by_string", False), + ("module_docstring_followed_by_code", False), + ("module_docstring_followed_by_comment_then_code", False), + ("module_docstring_followed_by_comment_then_string", False), + ("module_docstring_in_black", True), + ], +) +def test_module_docstring_newlines(test_key, black): + expected = TEST_STRINGS[test_key]["expected"] + + result = _format._get_module_docstring_newlines(black) + assert ( + result == expected + ), f"\nFailed {test_key}:\nExpected {expected}\nGot {result}" + + +@pytest.mark.unit +@pytest.mark.order(4) +@pytest.mark.parametrize( + "test_key, classifier", + [ + ( + "class_docstring_followed_by_statement", + _format._get_class_docstring_newlines, + ), + ("class_docstring_followed_by_def", _format._get_class_docstring_newlines), + ("class_docstring_with_decorator", _format._get_class_docstring_newlines), + ("class_docstring_with_class_variable", _format._get_class_docstring_newlines), + ("function_with_expr", _format._get_function_docstring_newlines), + ("function_with_inner_def", _format._get_function_docstring_newlines), + ("function_with_inner_async_def", _format._get_function_docstring_newlines), + ("function_with_decorator_and_def", _format._get_function_docstring_newlines), + ( + "function_with_decorator_and_async_def", + _format._get_function_docstring_newlines, + ), + ( + "function_docstring_with_inner_class", + _format._get_function_docstring_newlines, + ), + ("attribute_docstring_single_line", _format._get_attribute_docstring_newlines), + ("attribute_docstring_multi_line", _format._get_attribute_docstring_newlines), + ( + "attribute_docstring_outside_class", + _format._get_attribute_docstring_newlines, + ), + ( + "attribute_docstring_inside_method", + _format._get_attribute_docstring_newlines, + ), + ("attribute_docstring_with_comment", _format._get_attribute_docstring_newlines), + ( + "attribute_docstring_multiple_assignments", + _format._get_attribute_docstring_newlines, + ), + ("attribute_docstring_equiv_expr", _format._get_attribute_docstring_newlines), + ], +) +def test_get_docstring_newlines(test_key, classifier): + source = TEST_STRINGS[test_key]["source"] + expected = TEST_STRINGS[test_key]["expected"] + + tokens = _get_tokens(source) + index = _get_docstring_token_and_index(tokens) + + result = classifier(tokens, index) + assert ( + result == expected + ), f"\nFailed {test_key}:\nExpected {expected}\nGot {result}" + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key", + [ + "get_num_rows_columns", + ], +) +def test_get_num_rows_columns(test_key): + token = tokenize.TokenInfo( + type=TEST_STRINGS[test_key]["token"][0], + string=TEST_STRINGS[test_key]["token"][1], + start=TEST_STRINGS[test_key]["token"][2], + end=TEST_STRINGS[test_key]["token"][3], + line=TEST_STRINGS[test_key]["token"][4], + ) + expected = TEST_STRINGS[test_key]["expected"] + + result = _format._get_num_rows_columns(token) + assert ( + result[0] == expected[0] + ), f"\nFailed {test_key}\nExpected {expected[0]} rows\nGot {result[0]} rows" + assert ( + result[1] == expected[1] + ), f"\nFailed {test_key}\nExpected {expected[1]} columns\nGot {result[1]} columns" + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key", + [ + "get_start_end_indices", + ], +) +def test_get_start_end_indices(test_key): + prev_token = tokenize.TokenInfo( + type=TEST_STRINGS[test_key]["prev_token"][0], + string=TEST_STRINGS[test_key]["prev_token"][1], + start=TEST_STRINGS[test_key]["prev_token"][2], + end=TEST_STRINGS[test_key]["prev_token"][3], + line=TEST_STRINGS[test_key]["prev_token"][4], + ) + token = tokenize.TokenInfo( + type=TEST_STRINGS[test_key]["token"][0], + string=TEST_STRINGS[test_key]["token"][1], + start=TEST_STRINGS[test_key]["token"][2], + end=TEST_STRINGS[test_key]["token"][3], + line=TEST_STRINGS[test_key]["token"][4], + ) + expected = TEST_STRINGS[test_key]["expected"] + + result = _format._get_start_end_indices(token, prev_token, 3, 17) + for i in 0, 1: + for j in 0, 1: + assert ( + result[i][j] == expected[i][j] + ), f"\nFailed {test_key}\nExpected {expected[i][j]}\nGot {result[i][j]}" + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key, block", + [ + ("do_remove_preceding_blank_lines_module", [(0, 4, "module")]), + ("do_remove_preceding_blank_lines_class", [(0, 7, "class")]), + ("do_remove_preceding_blank_lines_function", [(0, 9, "function")]), + ("do_remove_preceding_blank_lines_attribute", [(1, 6, "attribute")]), + ], +) +def test_do_remove_preceding_blank_lines(test_key, block): + source = TEST_STRINGS[test_key]["source"] + expected = TEST_STRINGS[test_key]["expected"] + + tokens = list(tokenize.generate_tokens(StringIO(source, newline="").readline)) + + result = _format._do_remove_preceding_blank_lines(tokens, block) + for _idx in range(len(result)): + assert ( + result[_idx].string == expected[_idx] + ), f"\nFailed {test_key}\nExpected {expected[_idx]}\nGot {result[_idx].string}" + + +@pytest.mark.integration +@pytest.mark.order(5) +@pytest.mark.parametrize( + "test_key, black", + [ + ("get_newlines_by_type_module_docstring", False), + ("get_newlines_by_type_module_docstring_black", True), + ("get_newlines_by_type_class_docstring", False), + ("get_newlines_by_type_function_docstring", False), + ("get_newlines_by_type_attribute_docstring", False), + ], +) +def test_get_newlines_by_type(test_key, black): + source = TEST_STRINGS[test_key]["source"] + expected = TEST_STRINGS[test_key]["expected"] + + tokens = _get_tokens(source) + index = _get_docstring_token_and_index(tokens) + + result = _format._get_newlines_by_type(tokens, index, black) + assert result == expected, f"\nFailed {test_key}\nExpected {expected}\nGot {result}" + + +@pytest.mark.integration +@pytest.mark.order(4) +@pytest.mark.parametrize( + "test_key", + [ + "get_unmatched_start_end_indices", + ], +) +def test_get_unmatched_start_end_indices(test_key): + prev_token = tokenize.TokenInfo( + type=TEST_STRINGS[test_key]["prev_token"][0], + string=TEST_STRINGS[test_key]["prev_token"][1], + start=TEST_STRINGS[test_key]["prev_token"][2], + end=TEST_STRINGS[test_key]["prev_token"][3], + line=TEST_STRINGS[test_key]["prev_token"][4], + ) + token = tokenize.TokenInfo( + type=TEST_STRINGS[test_key]["token"][0], + string=TEST_STRINGS[test_key]["token"][1], + start=TEST_STRINGS[test_key]["token"][2], + end=TEST_STRINGS[test_key]["token"][3], + line=TEST_STRINGS[test_key]["token"][4], + ) + expected = TEST_STRINGS[test_key]["expected"] + + result = _format._get_unmatched_start_end_indices(token, prev_token, 4) + for i in 0, 1: + for j in 0, 1: + assert ( + result[i][j] == expected[i][j] + ), f"\nFailed {test_key}\nExpected {expected[i][j]}\nGot {result[i][j]}" + + +@pytest.mark.integration +@pytest.mark.order(5) +@pytest.mark.parametrize( + "test_key", + [ + "do_update_token_indices", + ], +) +def test_do_update_token_indices(test_key): + tokens = [] + for token in TEST_STRINGS[test_key]["tokens"]: + tokens.append( + tokenize.TokenInfo( + type=token[0], + string=token[1], + start=token[2], + end=token[3], + line=token[4], + ) + ) + expected = TEST_STRINGS[test_key]["expected"] + + result = _format._do_update_token_indices(tokens) + for idx, _expected in enumerate(expected): + # We convert the start and end tuples to lists because we can't store tuples + # in a TOML file. + assert list(result[idx].start) == _expected[0], ( + f"\nFailed {test_key} start index\n" + f"Expected {expected[0]}\nGot {result[idx].start}" + ) + assert list(result[idx].end) == _expected[1], ( + f"\nFailed {test_key} end index\n" + f"Expected {expected[1]}\nGot {result[idx].end}" + ) diff --git a/tests/formatter/test_format_methods.py b/tests/formatter/test_format_methods.py new file mode 100644 index 00000000..c60042a7 --- /dev/null +++ b/tests/formatter/test_format_methods.py @@ -0,0 +1,296 @@ +# pylint: skip-file +# type: ignore +# +# tests.formatter.test_format_methods.py is part of the docformatter project +# +# Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""Module for testing various Formatter class methods.""" + +# Standard Library Imports +import contextlib +import sys +import tokenize +from io import BytesIO +from tokenize import TokenInfo + +with contextlib.suppress(ImportError): + if sys.version_info >= (3, 11): + # Standard Library Imports + import tomllib + else: + # Third Party Imports + import tomli as tomllib + +# Third Party Imports +import pytest + +# docformatter Package Imports +from docformatter.format import Formatter + +with open("tests/_data/string_files/format_methods.toml", "rb") as f: + TEST_STRINGS = tomllib.load(f) + + +def _get_tokens(source): + return list(tokenize.tokenize(BytesIO(source.encode()).readline)) + + +def _get_docstring_token_and_index(tokens): + for i, tok in enumerate(tokens): + if tok.type == tokenize.STRING: + return i + raise ValueError("No docstring found in token stream.") + + +@pytest.mark.unit +@pytest.mark.parametrize("args", [[""]]) +def test_do_add_blank_lines(args): + uut = Formatter( + args, + sys.stderr, + sys.stdin, + sys.stdout, + ) + uut._do_add_blank_lines(2, 2, 2) + + assert uut.new_tokens == [ + TokenInfo(type=4, string="\n", start=(2, 0), end=(2, 1), line="\n"), + TokenInfo(type=4, string="\n", start=(3, 0), end=(3, 1), line="\n"), + ] + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key", + [ + "do_add_unformatted_docstring", + ], +) +@pytest.mark.parametrize("args", [[""]]) +def test_do_add_unformatted_docstring(test_key, args): + uut = Formatter( + args, + sys.stderr, + sys.stdin, + sys.stdout, + ) + token = tokenize.TokenInfo( + type=TEST_STRINGS[test_key]["token"][0], + string=TEST_STRINGS[test_key]["token"][1], + start=tuple(TEST_STRINGS[test_key]["token"][2]), + end=tuple(TEST_STRINGS[test_key]["token"][3]), + line=TEST_STRINGS[test_key]["token"][4], + ) + expected = [ + TokenInfo( + type=3, + string='"""This is a docstring.\\n\\n\\n That should be on less lines\\n"""', + start=(3, 4), + end=(6, 7), + line=' """This is a docstring.\\n\\n\\n That should be on less lines\\n"""', + ), + TokenInfo( + type=4, + string="\n", + start=(6, 7), + end=(6, 8), + line=' """This is a docstring.\\n\\n\\n That should be on less lines\\n"""', + ), + ] + + uut._do_add_unformatted_docstring(token, "function") + assert ( + uut.new_tokens == expected + ), f"\nFailed {test_key}\nExpected {expected}\nGot {uut.new_tokens}" + + +@pytest.mark.integration +@pytest.mark.order(5) +@pytest.mark.parametrize( + "test_key, args", + [ + ("do_add_formatted_docstring", [""]), + ], +) +def test_do_add_formatted_docstring(test_key, test_args, args): + uut = Formatter( + test_args, + sys.stderr, + sys.stdin, + sys.stdout, + ) + + token = tokenize.TokenInfo( + type=TEST_STRINGS[test_key]["token"][0], + string=TEST_STRINGS[test_key]["token"][1], + start=tuple(TEST_STRINGS[test_key]["token"][2]), + end=tuple(TEST_STRINGS[test_key]["token"][3]), + line=TEST_STRINGS[test_key]["token"][4], + ) + next_token = tokenize.TokenInfo( + type=TEST_STRINGS[test_key]["next_token"][0], + string=TEST_STRINGS[test_key]["next_token"][1], + start=tuple(TEST_STRINGS[test_key]["next_token"][2]), + end=tuple(TEST_STRINGS[test_key]["next_token"][3]), + line=TEST_STRINGS[test_key]["next_token"][4], + ) + expected = [ + tokenize.TokenInfo( + type=3, + string='"""This is a docstring.\\n."""', + start=(3, 4), + end=(6, 7), + line=' """This is a docstring.\\n."""\n', + ), + tokenize.TokenInfo( + type=4, + string="\n", + start=(6, 7), + end=(6, 8), + line=' """This is a docstring.\\n."""\n', + ), + tokenize.TokenInfo(type=4, string="\n", start=(7, 0), end=(7, 1), line="\n"), + ] + + uut._do_add_formatted_docstring(token, next_token, "function", 1) + assert ( + uut.new_tokens == expected + ), f"\nFailed {test_key}\nExpected {expected}\nGot {uut.new_tokens}" + + +@pytest.mark.integration +@pytest.mark.order(3) +@pytest.mark.parametrize( + "test_key, args", + [ + ("do_format_oneline_docstring", [""]), + ("do_format_oneline_docstring_that_ends_in_quote", [""]), + ("do_format_oneline_docstring_with_wrap", ["--wrap-summaries", "72", ""]), + ( + "do_format_oneline_docstring_with_quotes_newline", + ["--close-quotes-on-newline", ""], + ), + ( + "do_format_oneline_docstring_make_multiline", + ["--make-summary-multi-line", ""], + ), + ], +) +def test_format_one_line_docstring(test_key, test_args, args): + uut = Formatter( + test_args, + sys.stderr, + sys.stdin, + sys.stdout, + ) + + source = TEST_STRINGS[test_key]["source"] + expected = TEST_STRINGS[test_key]["expected"] + + result = uut._do_format_oneline_docstring( + " ", + source, + '"""', + ) + assert result == expected, f"\nFailed {test_key}\nExpected {expected}\nGot {result}" + + +@pytest.mark.integration +@pytest.mark.parametrize( + "test_key, args", + [ + ("do_format_multiline_docstring", [""]), + ( + "do_format_multiline_docstring_pre_summary_newline", + ["--pre-summary-newline", ""], + ), + ( + "do_format_multiline_docstring_post_description_blank", + ["--blank", ""], + ), + ], +) +def test_format_multiline_docstring(test_key, test_args, args): + uut = Formatter( + test_args, + sys.stderr, + sys.stdin, + sys.stdout, + ) + + source = TEST_STRINGS[test_key]["source"] + expected = TEST_STRINGS[test_key]["expected"] + + result = uut._do_format_multiline_docstring( + " ", + source[0], + source[1], + '"""', + ) + assert result == expected, f"\nFailed {test_key}\nExpected {expected}\nGot {result}" + + +@pytest.mark.integration +@pytest.mark.order(6) +@pytest.mark.parametrize( + "test_key, args", + [ + ("do_rewrite_docstring_blocks", [""]), + ], +) +def test_do_rewrite_docstring_blocks(test_key, test_args, args): + uut = Formatter( + test_args, + sys.stderr, + sys.stdin, + sys.stdout, + ) + + tokens = [] + for token in TEST_STRINGS[test_key]["tokens"]: + tokens.append( + tokenize.TokenInfo( + type=token[0], + string=token[1], + start=tuple(token[2]), + end=tuple(token[3]), + line=token[4], + ) + ) + expected = [] + for token in TEST_STRINGS[test_key]["expected"]: + expected.append( + tokenize.TokenInfo( + type=token[0], + string=token[1], + start=tuple(token[2]), + end=tuple(token[3]), + line=token[4], + ) + ) + + uut._do_rewrite_docstring_blocks(tokens) + assert ( + uut.new_tokens == expected + ), f"\nFailed {test_key}\nExpected {expected}\nGot {uut.new_tokens}" diff --git a/tests/test_syntax_functions.py b/tests/test_syntax_functions.py deleted file mode 100644 index 24d45a9d..00000000 --- a/tests/test_syntax_functions.py +++ /dev/null @@ -1,236 +0,0 @@ -# pylint: skip-file -# type: ignore -# -# tests.test_syntax_functions.py is part of the docformatter project -# -# Copyright (C) 2012-2023 Steven Myint -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -"""Module for testing functions that deal with syntax. - -This module contains tests for syntax functions. Syntax functions are -those: - - do_clean_link() - do_find_directives() - do_find_links() - do_skip_link() -""" -import textwrap - -# Third Party Imports -import pytest - -# docformatter Package Imports -import docformatter -from docformatter import do_split_description - - -class TestURLHandlers: - """Class for testing the URL handling functions. - - Includes tests for: - - - do_clean_link() - - do_find_links() - - do_skip_link() - """ - - @pytest.mark.unit - def test_find_in_line_link(self): - """Should find link pattern in a text block.""" - assert [(53, 162)] == docformatter.do_find_links( - "The text file can be retrieved via the Chrome plugin `Get \ -Cookies.txt ` while browsing." - ) - assert [(95, 106), (110, 123)] == docformatter.do_find_links( - "``pattern`` is considered as an URL only if it is parseable as such\ - and starts with ``http://`` or ``https://``." - ) - - @pytest.mark.unit - def test_skip_link_with_manual_wrap(self): - """Should skip a link that has been manually wrapped by the user.""" - assert docformatter.do_skip_link( - "``pattern`` is considered as an URL only if it is parseable as such\ - and starts with ``http://`` or ``https://``.", - (95, 106), - ) - assert docformatter.do_skip_link( - "``pattern`` is considered as an URL only if it is parseable as such\ - and starts with ``http://`` or ``https://``.", - (110, 123), - ) - - @pytest.mark.unit - def test_do_clean_link(self): - """Should remove line breaks from links.""" - assert ( - " `Get Cookies.txt `" - ) == docformatter.do_clean_url( - "`Get \ -Cookies.txt `", - " ", - ) - - assert ( - " `custom types provided by Click `_." - ) == docformatter.do_clean_url( - "`custom types provided by Click\ - `_.", - " ", - ) - - -class TestreSTHandlers: - """Class for testing the reST directive handling functions. - - Includes tests for: - - - do_find_directives() - """ - - @pytest.mark.unit - def test_find_in_line_directives(self): - """Should find reST directieves in a text block.""" - assert docformatter.do_find_directives( - "These are some reST directives that need to be retained even if it means not wrapping the line they are found on.\ - Constructs and returns a :class:`QuadraticCurveTo `.\ - Register ``..click:example::`` and ``.. click:run::`` directives, augmented with ANSI coloring." - ) - - @pytest.mark.unit - def test_find_double_dot_directives(self): - """Should find reST directives preceeded by ..""" - assert docformatter.do_find_directives( - ".. _linspace API: https://numpy.org/doc/stable/reference/generated/numpy.linspace.html\ - .. _arange API: https://numpy.org/doc/stable/reference/generated/numpy.arange.html\ - .. _logspace API: https://numpy.org/doc/stable/reference/generated/numpy.logspace.html" - ) - - assert docformatter.do_find_directives( - "``pattern`` is considered as an URL only if it is parseable as such" - "and starts with ``http://`` or ``https://``." - "" - ".. important::" - "" - "This is a straight `copy of the functools.cache implementation" - "`_," - "hich is only `available in the standard library starting with Python v3.9" - "`." - ) - - @pytest.mark.unit - def test_find_double_backtick_directives(self): - """Should find reST directives preceeded by ``.""" - assert docformatter.do_find_directives( - "By default we choose to exclude:" - "" - "``Cc``" - " Since ``mailman`` apparently `sometimes trims list members" - " `_" - " from the ``Cc`` header to avoid sending duplicates. Which means that copies of mail" - " reflected back from the list server will have a different ``Cc`` to the copy saved by" - " the MUA at send-time." - "" - "``Bcc``" - " Because copies of the mail saved by the MUA at send-time will have ``Bcc``, but copies" - " reflected back from the list server won't." - "" - "``Reply-To``" - " Since a mail could be ``Cc``'d to two lists with different ``Reply-To`` munging" - "options set." - ) - - -class TestSplitDescription: - """Class for testing the function to process the description - - Includes tests for: - - do_split_description() - - """ - med_str = "m"*40 # long enough that 2 won't fit on a line - indent = " " - - def do_test(self, text): - return do_split_description(textwrap.dedent(text), self.indent, 72, "sphinx") - - def indent_all(self, strs): - return [self.indent + s for s in strs] - - @pytest.mark.unit - def test_split_description_url_outside_param(self): - assert self.do_test( - f"""\ - {self.med_str} https://{self.med_str} - :param a: {self.med_str} - """ - ) == self.indent_all([ - self.med_str, - f"https://{self.med_str}", - f":param a: {self.med_str}", - ]) - - @pytest.mark.unit - def test_split_description_single_url_in_param(self): - assert self.do_test( - f"""\ - {self.med_str} - :param a: {self.med_str} https://{self.med_str}a - """ - ) == self.indent_all([ - self.med_str, - f":param a: {self.med_str}", - self.indent + f"https://{self.med_str}a", - ]) - - @pytest.mark.unit - def test_split_description_single_url_in_multiple_params(self): - assert self.do_test( - f"""\ - {self.med_str} - :param a: {self.med_str} https://{self.med_str}a - :param b: {self.med_str} https://{self.med_str}b - """ - ) == self.indent_all([ - self.med_str, - f":param a: {self.med_str}", - self.indent + f"https://{self.med_str}a", - f":param b: {self.med_str}", - self.indent + f"https://{self.med_str}b", - ]) - - @pytest.mark.unit - def test_split_description_multiple_urls_in_param(self): - assert self.do_test( - f"""\ - {self.med_str} - :param a: {self.med_str} https://{self.med_str}0 https://{self.med_str}1 - """ - ) == self.indent_all([ - self.med_str, - f":param a: {self.med_str}", - self.indent + f"https://{self.med_str}0", - self.indent + f"https://{self.med_str}1", - ]) From b48500f828e15c3b267156a7de2f884a755a7929 Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Wed, 23 Jul 2025 00:15:27 -0400 Subject: [PATCH 12/21] test: update and add tests for encoding module --- tests/test_encoding_functions.py | 195 ++++++++++++------------------- 1 file changed, 76 insertions(+), 119 deletions(-) diff --git a/tests/test_encoding_functions.py b/tests/test_encoding_functions.py index d1748ed5..f16f4623 100644 --- a/tests/test_encoding_functions.py +++ b/tests/test_encoding_functions.py @@ -1,9 +1,10 @@ # pylint: skip-file # type: ignore # -# tests.test_encoding_functions.py is part of the docformatter project +# tests.test.encoding_functions.py is part of the docformatter project # # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -24,48 +25,97 @@ # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -"""Module for testing functions used determine file encodings. - -Encoding functions are: - - - detect_encoding() - - find_newline() - - open_with_encoding() -""" +"""Module for testing functions used to determine file encodings.""" # Standard Library Imports +import contextlib import io import sys +with contextlib.suppress(ImportError): + if sys.version_info >= (3, 11): + # Standard Library Imports + import tomllib + else: + # Third Party Imports + import tomli as tomllib + # Third Party Imports import pytest # docformatter Package Imports from docformatter import Encoder -SYSTEM_ENCODING = sys.getdefaultencoding() +with open("tests/_data/string_files/encoding_functions.toml", "rb") as f: + TEST_STRINGS = tomllib.load(f) + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key", + [ + "find_newline_only_cr", + "find_newline_only_lf", + "find_newline_only_crlf", + "find_newline_cr1_and_lf2", + "find_newline_cr1_and_crlf2", + "find_newline_should_default_to_lf_empty", + "find_newline_should_default_to_lf_blank", + "find_dominant_newline", + ], +) +def test_do_find_newline(test_key): + uut = Encoder() + + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + result = uut.do_find_newline(source) + assert result == expected, f"\nFailed {test_key}\nExpected {expected}\nGot {result}" @pytest.mark.usefixtures("temporary_file") -class TestDetectEncoding: - """Class for testing the detect_encoding() function.""" +class TestDoOpenWithEncoding: + """Class for testing the do_open_with_encoding function.""" @pytest.mark.unit @pytest.mark.parametrize("contents", ["# -*- coding: utf-8 -*-\n"]) - def test_detect_encoding_with_explicit_utf_8( - self, temporary_file, contents - ): - """Return utf-8 when explicitly set in file.""" + def test_do_open_with_utf_8_encoding(self, temporary_file, contents): + """Return TextIOWrapper object when opening file with encoding.""" + uut = Encoder() + + assert isinstance( + uut.do_open_with_encoding(temporary_file), + io.TextIOWrapper, + ) + + @pytest.mark.unit + @pytest.mark.parametrize("contents", ["# -*- coding: utf-8 -*-\n"]) + def test_do_open_with_wrong_encoding(self, temporary_file, contents): + """Raise LookupError when passed unknown encoding.""" + uut = Encoder() + uut.encoding = "cr1252" + + with pytest.raises(LookupError): + uut.do_open_with_encoding(temporary_file) + + +@pytest.mark.usefixtures("temporary_file") +class TestDoDetectEncoding: + """Class for testing the detect_encoding() function.""" + + @pytest.mark.integration + @pytest.mark.parametrize("contents", ["# -*- coding: utf-8 -*-\n"]) + def test_do_detect_encoding_with_explicit_utf_8(self, temporary_file, contents): + """Return utf-8 when explicitly set in the file.""" uut = Encoder() uut.do_detect_encoding(temporary_file) assert "utf_8" == uut.encoding - @pytest.mark.unit - @pytest.mark.parametrize( - "contents", ["# Wow! docformatter is super-cool.\n"] - ) - def test_detect_encoding_with_non_explicit_setting( + @pytest.mark.integration + @pytest.mark.parametrize("contents", ["# Wow! docformatter is super-cool.\n"]) + def test_do_detect_encoding_with_non_explicit_setting( self, temporary_file, contents ): """Return default system encoding when encoding not explicitly set.""" @@ -74,119 +124,26 @@ def test_detect_encoding_with_non_explicit_setting( assert "ascii" == uut.encoding - @pytest.mark.unit + @pytest.mark.integration @pytest.mark.parametrize("contents", ["# -*- coding: blah -*-"]) - def test_detect_encoding_with_bad_encoding(self, temporary_file, contents): + def test_do_detect_encoding_with_bad_encoding(self, temporary_file, contents): """Default to latin-1 when unknown encoding detected.""" uut = Encoder() uut.do_detect_encoding(temporary_file) assert "ascii" == uut.encoding - @pytest.mark.unit + @pytest.mark.integration @pytest.mark.parametrize("contents", [""]) - def test_detect_encoding_with_undetectable_encoding(self, temporary_file): + def test_do_detect_encoding_with_undetectable_encoding(self, temporary_file): """Default to latin-1 when encoding detection fails.""" uut = Encoder() # Simulate a file with undetectable encoding with open(temporary_file, "wb") as file: # Binary content unlikely to have a detectable encoding - file.write(b"\xFF\xFE\xFD\xFC\x00\x00\x00\x00") + file.write(b"\xff\xfe\xfd\xfc\x00\x00\x00\x00") uut.do_detect_encoding(temporary_file) assert uut.encoding == uut.DEFAULT_ENCODING - - -class TestFindNewline: - """Class for testing the find_newline() function.""" - - @pytest.mark.unit - def test_find_newline_only_cr(self): - """Return carriage return as newline type.""" - uut = Encoder() - source = ["print 1\r", "print 2\r", "print3\r"] - - assert uut.CR == uut.do_find_newline(source) - - @pytest.mark.unit - def test_find_newline_only_lf(self): - """Return line feed as newline type.""" - uut = Encoder() - source = ["print 1\n", "print 2\n", "print3\n"] - - assert uut.LF == uut.do_find_newline(source) - - @pytest.mark.unit - def test_find_newline_only_crlf(self): - """Return carriage return, line feed as newline type.""" - uut = Encoder() - source = ["print 1\r\n", "print 2\r\n", "print3\r\n"] - - assert uut.CRLF == uut.do_find_newline(source) - - @pytest.mark.unit - def test_find_newline_cr1_and_lf2(self): - """Favor line feed over carriage return when both are found.""" - uut = Encoder() - source = ["print 1\n", "print 2\r", "print3\n"] - - assert uut.LF == uut.do_find_newline(source) - - @pytest.mark.unit - def test_find_newline_cr1_and_crlf2(self): - """Favor carriage return, line feed when mix of newline types.""" - uut = Encoder() - source = ["print 1\r\n", "print 2\r", "print3\r\n"] - - assert uut.CRLF == uut.do_find_newline(source) - - @pytest.mark.unit - def test_find_newline_should_default_to_lf(self): - """Default to line feed when no newline type found.""" - uut = Encoder() - - assert uut.LF == uut.do_find_newline([]) - assert uut.LF == uut.do_find_newline(["", ""]) - - @pytest.mark.unit - def test_find_dominant_newline(self): - """Should detect carriage return as the dominant line endings.""" - uut = Encoder() - - goes_in = '''\ -def foo():\r - """\r - Hello\r - foo. This is a docstring.\r - """\r -''' - assert uut.CRLF == uut.do_find_newline(goes_in.splitlines(True)) - - -@pytest.mark.usefixtures("temporary_file") -class TestOpenWithEncoding: - """Class for testing the open_with_encoding() function.""" - - @pytest.mark.unit - @pytest.mark.parametrize("contents", ["# -*- coding: utf-8 -*-\n"]) - def test_open_with_utf_8_encoding(self, temporary_file, contents): - """Return TextIOWrapper object when opening file with encoding.""" - uut = Encoder() - uut.do_detect_encoding(temporary_file) - - assert isinstance( - uut.do_open_with_encoding(temporary_file), - io.TextIOWrapper, - ) - - @pytest.mark.unit - @pytest.mark.parametrize("contents", ["# -*- coding: utf-8 -*-\n"]) - def test_open_with_wrong_encoding(self, temporary_file, contents): - """Raise LookupError when passed unknown encoding.""" - uut = Encoder() - uut.encoding = "cr1252" - - with pytest.raises(LookupError): - uut.do_open_with_encoding(temporary_file) From f6039117abd3af7d9bc69e18988ff7ccf029339a Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Wed, 23 Jul 2025 00:16:28 -0400 Subject: [PATCH 13/21] test: update and add tests for string module --- tests/test_string_functions.py | 804 +++++++++++++-------------------- 1 file changed, 313 insertions(+), 491 deletions(-) diff --git a/tests/test_string_functions.py b/tests/test_string_functions.py index 0e83d37b..a61bf3d1 100644 --- a/tests/test_string_functions.py +++ b/tests/test_string_functions.py @@ -4,6 +4,7 @@ # tests.test_string_functions.py is part of the docformatter project # # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -24,502 +25,323 @@ # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -"""Module for testing functions that manipulate text. - -This module contains tests for string functions. String functions are -those: - - reindent() - find_shortest_indentation() - normalize_line() - normalize_line_endings() - normalize_summary() - remove_section_headers() - split_first_sentence() - split_summary() - split_summary_and_description() - strip_leading_blank_lines() - strip_quotes() - strip_newlines() -""" +"""Module for testing functions that manipulate text.""" + +# Standard Library Imports +import contextlib +import sys + +with contextlib.suppress(ImportError): + if sys.version_info >= (3, 11): + # Standard Library Imports + import tomllib + else: + # Third Party Imports + import tomli as tomllib # Third Party Imports import pytest # docformatter Package Imports -import docformatter - - -class TestIndenters: - """Class for testing the indentation related function. - - Includes tests for: - - - reindent() - - find_shortest_indentation() - """ - - @pytest.mark.unit - def test_reindent(self): - """Should add four spaces to the beginning of each docstring line.""" - assert """\ - This should be dedented. - - 1. This too. - 2. And this. -""" == docformatter.reindent( - """\ - This should be dedented. - - 1. This too. - 2. And this. - """, - indentation=" ", - ) - - @pytest.mark.unit - def test_reindent_should_expand_tabs_to_indentation(self): - """Should convert tabs to indentation type (four spaces).""" - assert """\ - This should be dedented. - - 1. This too. - 2. And this. -""" == docformatter.reindent( - """\ - This should be dedented. - - 1. This too. - \t2. And this. - """, - indentation=" ", - ) - - @pytest.mark.unit - def test_reindent_with_no_indentation_expand_tabs(self): - """Should convert tabs to indentation type (four spaces).""" - assert """\ -The below should be indented with spaces: - - 1. This too. - 2. And this. -""" == docformatter.reindent( - """\ -The below should be indented with spaces: - -\t1. This too. -\t2. And this. - """, - indentation="", - ) - - @pytest.mark.unit - def test_reindent_should_maintain_indentation(self): - """Should make no changes with existing indentation same as type.""" - description = """\ - Parameters: - - - a - - b -""" - assert description == docformatter.reindent( - description, - indentation=" ", - ) - - @pytest.mark.unit - def test_reindent_tab_indentation(self): - """Should maintain tabs for the indentation.""" - assert """\ -\tThis should be indented with a tab. - -\tSo should this. -""" == docformatter.reindent( - """\ -\tThis should be indented with a tab. - -\tSo should this. - """, - indentation="\t", - ) - - @pytest.mark.unit - def testfind_shortest_indentation(self): - """Should find the shortest indentation to be one space.""" - assert " " == docformatter.find_shortest_indentation( - [" ", " b", " a"], - ) - - -class TestNormalizers: - """Class for testing the string normalizing functions. - - Includes tests for: - - - normalize_line() - - normalize_line_endings() - - normalize_summary() - """ - - @pytest.mark.unit - def test_normalize_summary(self): - """Add period and strip spaces to line.""" - assert "This is a sentence." == docformatter.normalize_summary( - "This is a sentence " - ) - - @pytest.mark.unit - def test_normalize_summary_multiline(self): - """Add period to line even with line return character.""" - assert "This \n\t is\na sentence." == docformatter.normalize_summary( - "This \n\t is\na sentence " - ) - - @pytest.mark.unit - def test_normalize_summary_with_different_punctuation(self): - """Do not add period for line ending in question mark.""" - summary = "This is a question?" - assert summary == docformatter.normalize_summary(summary) - - @pytest.mark.unit - def test_normalize_summary_formatted_as_title(self): - """Do not add period for markup title (line begins with #). - - See issue #56. - """ - summary = "# This is a title" - assert summary == docformatter.normalize_summary(summary) - - @pytest.mark.unit - def test_normalize_summary_capitalize_first_letter(self): - """Capitalize the first letter of the summary. - - See issue #76. See requirement docformatter_4.5.1. - """ - assert ( - "This is a summary that needs to be capped." - == docformatter.normalize_summary( - "this is a summary that needs to be capped" - ) +from docformatter.strings import ( + description_to_list, + do_clean_excess_whitespace, + do_find_shortest_indentation, + do_normalize_line, + do_normalize_line_endings, + do_normalize_summary, + do_reindent, + do_split_description, + do_split_first_sentence, + do_split_summary, + do_split_summary_and_description, + do_strip_docstring, + do_strip_leading_blank_lines, +) + +with open("tests/_data/string_files/string_functions.toml", "rb") as f: + TEST_STRINGS = tomllib.load(f) + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key, indentation", + [ + ("do_reindent", " "), + ("do_reindent_should_expand_tabs_to_indentation", " "), + ("do_reindent_with_no_indentation_expand_tabs", ""), + ("do_reindent_should_maintain_indentation", " "), + ("do_reindent_tab_indentation", "\t"), + ], +) +def test_do_reindent(test_key, indentation): + """Test the do_reindent function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + result = do_reindent(source, indentation) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.unit +def test_do_find_shortest_indentation(): + """Test the do_find_shorted_indentation function.""" + assert " " == do_find_shortest_indentation( + [" ", " b", " a"], + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key", + [ + "do_normalize_summary", + "do_normalize_summary_multiline", + "do_normalize_summary_question_mark", + "do_normalize_summary_exclamation_point", + "do_normalize_summary_with_title", + "do_normalize_summary_capitalize_first_letter", + "do_normalize_summary_with_proprer_noun", + "do_normalize_summary_capitalize_first_letter_with_period", + "do_normalize_summary_dont_capitalize_first_letter_if_variable", + ], +) +def test_do_normalize_summary(test_key): + """Test the do_normalize_summary function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + result = do_normalize_summary(source) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize("test_key", ["do_normalize_line"]) +def test_do_normalize_line(test_key): + """Test the do_normalize_line function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + result = do_normalize_line(source, "\n") + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key, indentation, wrap_length", [("description_to_list", " ", 72)] +) +def test_description_to_list(test_key, indentation, wrap_length): + """Test the description_to_list function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + result = description_to_list(source, indentation, wrap_length) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key", + [ + "do_split_first_sentence", + "do_split_first_sentence_2", + "do_split_first_sentence_3", + ], +) +def test_do_split_first_sentence(test_key): + """Test the do_split_first_sentence function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + # We convert the tuple return to a list since we can't store a tuple in a TOML file. + result = list(do_split_first_sentence(source)) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key", + [ + "do_split_summary", + "do_split_summary_2", + "do_split_multi_sentence_summary", + "do_split_multi_sentence_summary_2", + ], +) +def test_do_split_summary(test_key): + """Test the do_split_summary function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + # We convert the tuple returned to a list since we can't store a tuple in a TOML + # file. + result = do_split_summary(source) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key", + [ + "do_strip_docstring", + "do_strip_docstring_triple_single_quotes", + "do_strip_docstring_empty_string", + "do_strip_docstring_raw_string", + "do_strip_docstring_raw_string_2", + "do_strip_docstring_unicode_string", + "do_strip_docstring_unicode_string_2", + "do_strip_docstring_with_unknown", + "do_strip_docstring_with_single_quotes", + "do_strip_docstring_with_double_quotes", + ], +) +def test_do_strip_docstring(test_key): + """Test the do_strip_docstring function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + raises = TEST_STRINGS[test_key].get("raises") + + if raises: + with pytest.raises(eval(raises)): + do_strip_docstring(source) + else: + # We convert the tuple returned to a list since we can't store a tuple in a TOML + # file. + result = list(do_strip_docstring(source)) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" ) - assert "Don't lower case I'm." == docformatter.normalize_summary( - "don't lower case I'm" - ) - - @pytest.mark.unit - def test_normalize_summary_capitalize_first_letter_with_period(self): - """Capitalize the first letter of the summary even when ends in period. - - See issue #184. See requirement docformatter_4.5.1. - """ - assert ( - "This is a summary that needs to be capped." - == docformatter.normalize_summary( - "this is a summary that needs to be capped." - ) - ) - - @pytest.mark.unit - def test_normalize_summary_dont_capitalize_first_letter_if_variable(self): - """Capitalize the first word unless it looks like a variable.""" - assert ( - "num_iterations should not be capitalized in this summary." - == docformatter.normalize_summary( - "num_iterations should not be capitalized in this summary" - ) - ) - - -class TestSplitters: - """Class for testing the string splitting function. - - Includes tests for: - - - split_first_sentence() - - split_summary_and_description() - """ - - @pytest.mark.unit - def test_split_first_sentence(self): - """""" - assert ( - "This is a sentence.", - " More stuff. And more stuff. .!@#$%", - ) == docformatter.split_first_sentence( - "This is a sentence. More stuff. And more stuff. .!@#$%" - ) - - assert ( - "This e.g. sentence.", - " More stuff. And more stuff. .!@#$%", - ) == docformatter.split_first_sentence( - "This e.g. sentence. More stuff. And more stuff. .!@#$%" - ) - - assert ( - "This is the first:", - "\none\ntwo", - ) == docformatter.split_first_sentence("This is the first:\none\ntwo") - - @pytest.mark.unit - def test_split_one_sentence_summary(self): - """A single sentence summary should be returned as is. - - See issue #283. - """ - assert [ - "This is a sentence.", - "", - ] == docformatter.split_summary(["This is a sentence.", ""]) - - assert [ - "This e.g. a sentence.", - "", - ] == docformatter.split_summary(["This e.g. a sentence.", ""]) - - @pytest.mark.unit - def test_split_multi_sentence_summary(self): - """A multi-sentence summary should return only the first as the summary. - - See issue #283. - """ - assert [ - "This is a sentence.", - "", - "This is another.", - ] == docformatter.split_summary(["This is a sentence. This is another.", ""]) - - assert [ - "This e.g. a sentence.", - "", - "This is another.", - ] == docformatter.split_summary(["This e.g. a sentence. This is another.", ""]) - - @pytest.mark.unit - def test_split_summary_and_description(self): - """""" - assert ( - "This is the first.", - "This is the second. This is the third.", - ) == docformatter.split_summary_and_description( - "This is the first. This is the second. This is the third." - ) - - @pytest.mark.unit - def test_split_summary_and_description_complex(self): - """""" - assert ( - "This is the first", - "\nThis is the second. This is the third.", - ) == docformatter.split_summary_and_description( - "This is the first\n\nThis is the second. This is the third." - ) - - @pytest.mark.unit - def test_split_summary_and_description_more_complex(self): - """""" - assert ( - "This is the first.", - "This is the second. This is the third.", - ) == docformatter.split_summary_and_description( - "This is the first.\nThis is the second. This is the third." - ) - - @pytest.mark.unit - def test_split_summary_and_description_with_list(self): - """""" - assert ( - "This is the first", - "- one\n- two", - ) == docformatter.split_summary_and_description( - "This is the first\n- one\n- two" - ) - - @pytest.mark.unit - def test_split_summary_and_description_with_list_of_parameters(self): - """""" - assert ( - "This is the first", - "one - one\ntwo - two", - ) == docformatter.split_summary_and_description( - "This is the first\none - one\ntwo - two" - ) - - @pytest.mark.unit - def test_split_summary_and_description_with_capital(self): - """""" - assert ( - "This is the first\nWashington", - "", - ) == docformatter.split_summary_and_description("This is the first\nWashington") - - @pytest.mark.unit - def test_split_summary_and_description_with_list_on_other_line(self): - """""" - assert ( - "Test", - " test\n @blah", - ) == docformatter.split_summary_and_description( - """\ - Test - test - @blah -""" - ) - - @pytest.mark.unit - def test_split_summary_and_description_with_other_symbol(self): - """""" - assert ( - "This is the first", - "@ one\n@ two", - ) == docformatter.split_summary_and_description( - "This is the first\n@ one\n@ two" - ) - - @pytest.mark.unit - def test_split_summary_and_description_with_colon(self): - """""" - assert ( - "This is the first:", - "one\ntwo", - ) == docformatter.split_summary_and_description("This is the first:\none\ntwo") - - @pytest.mark.unit - def test_split_summary_and_description_with_exclamation(self): - """""" - assert ( - "This is the first!", - "one\ntwo", - ) == docformatter.split_summary_and_description("This is the first!\none\ntwo") - - @pytest.mark.unit - def test_split_summary_and_description_with_question_mark(self): - """""" - assert ( - "This is the first?", - "one\ntwo", - ) == docformatter.split_summary_and_description("This is the first?\none\ntwo") - - @pytest.mark.unit - def test_split_summary_and_description_with_quote(self): - """""" - assert ( - 'This is the first\n"one".', - "", - ) == docformatter.split_summary_and_description('This is the first\n"one".') - - assert ( - "This is the first\n'one'.", - "", - ) == docformatter.split_summary_and_description("This is the first\n'one'.") - - assert ( - "This is the first\n``one``.", - "", - ) == docformatter.split_summary_and_description("This is the first\n``one``.") - - @pytest.mark.unit - def test_split_summary_and_description_with_punctuation(self): - """""" - assert ( - ( - """\ -Try this and this and this and this and this and this and this at - https://example.com/""", - """ - Parameters - ---------- - email : string""", - ) - == docformatter.split_summary_and_description( - """\ - Try this and this and this and this and this and this and this at - https://example.com/ - - Parameters - ---------- - email : string -""" - ) - ) - - @pytest.mark.unit - def test_split_summary_and_description_without_punctuation(self): - """""" - assert ( - ( - """\ -Try this and this and this and this and this and this and this at - this other line""", - """ - Parameters - ---------- - email : string""", - ) - == docformatter.split_summary_and_description( - """\ - Try this and this and this and this and this and this and this at - this other line - - Parameters - ---------- - email : string -""" - ) - ) - @pytest.mark.unit - def test_split_summary_and_description_with_abbreviation(self): - """""" - for text in [ - "Test e.g. now", - "Test foo, bar, etc. now", - "Test i.e. now", - "Test Dr. now", - "Test Mr. now", - "Test Mrs. now", - "Test Ms. now", - ]: - assert (text, "") == docformatter.split_summary_and_description(text) - - @pytest.mark.unit - def test_split_summary_and_description_with_url(self): - """Retain URL on second line with summary.""" - text = '''\ -"""Sequence of package managers as defined by `XKCD #1654: Universal Install Script -`_. - -See the corresponding :issue:`implementation rationale in issue #10 <10>`. -"""\ -''' - assert ( - '"""Sequence of package managers as defined by `XKCD #1654: Universal Install Script\n' - "`_.", - "\nSee the corresponding :issue:`implementation rationale in issue #10 <10>`." - '\n"""', - ) == docformatter.split_summary_and_description(text) - - -class TestStrippers: - """Class for testing the string stripping functions. - - Includes tests for: - - - strip_leading_blank_lines() - - strip_quotes() - - strip_newlines() - - strip_docstring() - """ - - @pytest.mark.unit - def test_remove_section_header(self): - """Remove section header directives.""" - assert "foo\nbar\n" == docformatter.remove_section_header("----\nfoo\nbar\n") - - line = "foo\nbar\n" - assert line == docformatter.remove_section_header(line) - - line = " \nfoo\nbar\n" - assert line == docformatter.remove_section_header(line) +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key", + [ + "do_strip_leading_blank_lines", + ], +) +def test_do_strip_leading_blank_lines(test_key): + """Test the do_strup_leading_blank_lines function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + result = do_strip_leading_blank_lines(source) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.unit +@pytest.mark.parametrize( + "test_key, indentation", + [ + ("do_clean_excess_whitespace", " "), + ], +) +def test_do_clean_excess_whitespace(test_key, indentation): + """Test the do_clean_excess_whitespace function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + result = do_clean_excess_whitespace(source, indentation) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.integration +@pytest.mark.order(5) +@pytest.mark.parametrize("test_key", ["do_normalize_line_endings"]) +def test_do_normalize_line_endings(test_key): + """Test the do_normalize_line_endings function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + result = do_normalize_line_endings(source, "\n") + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.integration +@pytest.mark.order(1) +@pytest.mark.parametrize( + "test_key, indentation, wrap_length, style", + [ + ("do_split_description_url_outside_param", " ", 72, "sphinx"), + ("do_split_description_single_url_in_param", " ", 72, "sphinx"), + ("do_split_description_single_url_in_multiple_params", " ", 72, "sphinx"), + ("do_split_description_multiple_urls_in_param", " ", 72, "sphinx"), + ], +) +def test_do_split_description(test_key, indentation, wrap_length, style): + """Test the do_split_description function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + result = do_split_description(source, indentation, wrap_length, style) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) + + +@pytest.mark.integration +@pytest.mark.order(3) +@pytest.mark.parametrize( + "test_key", + [ + "do_split_summary_and_description", + "do_split_summary_and_description_complex", + "do_split_summary_and_description_more_complex", + "do_split_summary_and_description_with_list", + "do_split_summary_and_description_with_list_of_parameters", + "do_split_summary_and_description_with_capital", + "do_split_summary_and_description_with_list_on_other_line", + "do_split_summary_and_description_with_other_symbol", + "do_split_summary_and_description_with_colon", + "do_split_summary_and_description_with_exclamation", + "do_split_summary_and_description_with_question_mark", + "do_split_summary_and_description_with_double_quote", + "do_split_summary_and_description_with_single_quote", + "do_split_summary_and_description_with_double_backtick", + "do_split_summary_and_description_with_punctuation", + "do_split_summary_and_description_without_punctuation", + "do_split_summary_and_description_with_abbreviation", + "do_split_summary_and_description_with_url", + ], +) +def test_do_split_summary_and_description(test_key): + """Test the do_split_summary_and_description function.""" + source = TEST_STRINGS[test_key]["instring"] + expected = TEST_STRINGS[test_key]["expected"] + + # We convert the tuple returned to a list since we can't store a tuple in a TOML + # file. + result = list(do_split_summary_and_description(source)) + + assert result == expected, ( + f"\nFailed {test_key}:\nExpected {expected}" f"\nGot {result}" + ) From 11bfcefde6c4b92a8fc1cdbd72da11f2439d6761 Mon Sep 17 00:00:00 2001 From: Doyle Rowland Date: Wed, 23 Jul 2025 00:18:25 -0400 Subject: [PATCH 14/21] test: update and add tests for utility module --- tests/test_utility_functions.py | 697 +++++--------------------------- 1 file changed, 94 insertions(+), 603 deletions(-) diff --git a/tests/test_utility_functions.py b/tests/test_utility_functions.py index e127f33a..71c6bf25 100644 --- a/tests/test_utility_functions.py +++ b/tests/test_utility_functions.py @@ -4,6 +4,7 @@ # tests.test_utility_functions.py is part of the docformatter project # # Copyright (C) 2012-2023 Steven Myint +# Copyright (C) 2023-2025 Doyle "weibullguy" Rowland # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -24,614 +25,104 @@ # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -"""Module for testing utility functions used when processing docstrings. +"""Module for testing utility functions used when processing docstrings.""" -This module contains tests for utility functions. Utility functions are: - - find_py_files() - - has_correct_length() - - is_in_range() - - is_probably_beginning_of_sentence() - - is_some_sort_of_list() - - is_some_sort_of_code() -""" +# Standard Library Imports +import contextlib +import sys -try: - from unittest.mock import patch -except ImportError: - from mock import patch +with contextlib.suppress(ImportError): + if sys.version_info >= (3, 11): + # Standard Library Imports + import tomllib + else: + # Third Party Imports + import tomli as tomllib # Third Party Imports import pytest # docformatter Package Imports -import docformatter - -REST_SECTION_REGEX = r"[=\-`:'\"~^_*+#<>]{4,}" - - -class TestFindPyFiles: - """Class for testing the find_py_files() function.""" - - @pytest.mark.unit - def test_is_hidden(self): - """Skip files that are .hidden.""" - assert docformatter.find_py_files("not_hidden", ".hidden_file.py") - - @pytest.mark.xfail( - reason="function only checks for python files in recursive mode." - ) - def test_non_recursive_ignore_non_py_files(self): - """Only process python (*.py) files.""" - sources = ["one.py", "two.py", "three.toml"] - - test_only_py = list(docformatter.find_py_files(sources, False)) - assert test_only_py == ["one.py", "two.py"] - - @pytest.mark.unit - def test_recursive_ignore_non_py_files(self): - """Only process python (*.py) files when recursing directories.""" - sources = {"/root"} - patch_data = [ - ("/root", [], ["one.py", "two.py", "three.toml"]), - ] - - with patch("os.walk", return_value=patch_data), patch( - "os.path.isdir", return_value=True - ): - test_only_py = list(docformatter.find_py_files(sources, True)) - assert test_only_py == ["/root/one.py", "/root/two.py"] - - @pytest.mark.unit - def test_is_excluded(self): - """Skip excluded *.py files.""" - sources = {"/root"} - patch_data = [ - ("/root", ["folder_one", "folder_two"], []), - ("/root/folder_one", ["folder_three"], ["one.py"]), - ("/root/folder_one/folder_three", [], ["three.py"]), - ("/root/folder_two", [], ["two.py"]), - ] - - with patch("os.walk", return_value=patch_data), patch( - "os.path.isdir", return_value=True - ): - test_exclude_one = list( - docformatter.find_py_files(sources, True, ["folder_one"]) - ) - assert test_exclude_one == ["/root/folder_two/two.py"] - test_exclude_two = list( - docformatter.find_py_files(sources, True, ["folder_two"]) - ) - assert test_exclude_two == [ - "/root/folder_one/one.py", - "/root/folder_one/folder_three/three.py", - ] - test_exclude_three = list( - docformatter.find_py_files(sources, True, ["folder_three"]) - ) - assert test_exclude_three == [ - "/root/folder_one/one.py", - "/root/folder_two/two.py", - ] - test_exclude_py = list(docformatter.find_py_files(sources, True, ".py")) - assert not test_exclude_py - test_exclude_two_and_three = list( - docformatter.find_py_files( - sources, True, ["folder_two", "folder_three"] - ) - ) - assert test_exclude_two_and_three == ["/root/folder_one/one.py"] - test_exclude_files = list( - docformatter.find_py_files(sources, True, ["one.py", "two.py"]) - ) - assert test_exclude_files == ["/root/folder_one/folder_three/three.py"] - - @pytest.mark.unit - def test_nothing_is_excluded(self): - """Include all *.py files found.""" - sources = {"/root"} - patch_data = [ - ("/root", ["folder_one", "folder_two"], []), - ("/root/folder_one", ["folder_three"], ["one.py"]), - ("/root/folder_one/folder_three", [], ["three.py"]), - ("/root/folder_two", [], ["two.py"]), - ] - - with patch("os.walk", return_value=patch_data), patch( - "os.path.isdir", return_value=True - ): - test_exclude_nothing = list(docformatter.find_py_files(sources, True, [])) - assert test_exclude_nothing == [ - "/root/folder_one/one.py", - "/root/folder_one/folder_three/three.py", - "/root/folder_two/two.py", - ] - test_exclude_nothing = list(docformatter.find_py_files(sources, True)) - assert test_exclude_nothing == [ - "/root/folder_one/one.py", - "/root/folder_one/folder_three/three.py", - "/root/folder_two/two.py", - ] - - -class TestHasCorrectLength: - """Class for testing the has_correct_length() function.""" - - @pytest.mark.unit - def test_has_correct_length_none(self): - """Return True when passed line_length=None.""" - assert docformatter.has_correct_length(None, 1, 9) - - @pytest.mark.unit - def test_has_correct_length(self): - """Return True if the line is within the line_length.""" - assert docformatter.has_correct_length([1, 3], 3, 5) - assert docformatter.has_correct_length([1, 1], 1, 1) - assert docformatter.has_correct_length([1, 10], 5, 10) - - @pytest.mark.unit - def test_not_correct_length(self): - """Return False if the line is outside the line_length.""" - assert not docformatter.has_correct_length([1, 1], 2, 9) - assert not docformatter.has_correct_length([10, 20], 2, 9) - - -class TestIsInRange: - """Class for testing the is_in_range() function.""" - - @pytest.mark.unit - def test_is_in_range_none(self): - """Return True when passed line_range=None.""" - assert docformatter.is_in_range(None, 1, 9) - - @pytest.mark.unit - def test_is_in_range(self): - """Return True if the line is within the line_range.""" - assert docformatter.is_in_range([1, 4], 3, 5) - assert docformatter.is_in_range([1, 4], 4, 10) - assert docformatter.is_in_range([2, 10], 1, 2) - - @pytest.mark.unit - def test_not_in_range(self): - """Return False if the line outside the line_range.""" - assert not docformatter.is_in_range([1, 1], 2, 9) - assert not docformatter.is_in_range([10, 20], 1, 9) - - -class TestIsProbablySentence: - """Class for testing the is_probably_beginning_of_senstence() function.""" - - @pytest.mark.unit - def test_is_probably_beginning_of_sentence(self): - """Ignore special characters as sentence starters.""" - assert docformatter.is_probably_beginning_of_sentence( - "- This is part of a list." - ) - - assert not docformatter.is_probably_beginning_of_sentence( - "(this just continues an existing sentence)." - ) - - @pytest.mark.unit - def test_is_probably_beginning_of_sentence_pydoc_ref(self): - """Ignore colon as sentence starter.""" - assert not docformatter.is_probably_beginning_of_sentence( - ":see:MyClass This is not the start of a sentence." - ) - - -class TestDoFindLinks: - """Class for testing the do_find_links() function.""" - - @pytest.mark.unit - def test_do_find_file_system_link(self): - """Identify afp://, nfs://, smb:// as a link.""" - text = "This is an Apple Filing Protocol URL pattern: afp://[[:][/[]]" - assert docformatter.do_find_links(text) == [(46, 86)] - text = "This is an Network File System URL pattern: nfs://server<:port>/" - assert docformatter.do_find_links(text) == [(44, 70)] - text = "This is an Samba URL pattern: smb://[@][:][/[]][?=[;=]]" - assert docformatter.do_find_links(text) == [(30, 111)] - - @pytest.mark.unit - def test_do_find_miscellaneous_link(self): - """Identify apt:, bitcoin:, chrome://, and jar: as a link.""" - text = "This is an apt URL pattern: apt:docformatter" - assert docformatter.do_find_links(text) == [(28, 44)] - text = "This is a bitcoin URL pattern: bitcoin:
[?[amount=][&][label=