summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZach Smith <subsetpark@gmail.com>2016-06-04 14:26:00 -0400
committerZach Smith <subsetpark@gmail.com>2016-06-04 14:26:00 -0400
commit05e7a5caacb71b61c3c7b33cbd266a1f448e2f37 (patch)
tree3f3c0cbba4692fe1e0bd9c38cfcef4e55a004b4c
parente472761b5954ac1c51092e9f8bab397122140fe4 (diff)
parent83ffef5da6f5b2fa919fd1374b05fdc95875dfd5 (diff)
downloadpycco-05e7a5caacb71b61c3c7b33cbd266a1f448e2f37.tar.gz
Merge pull request #96 from goosemo/feature/Allow-recursing-directories
Feature/allow recursing directories
-rw-r--r--pycco/main.py31
-rw-r--r--tests/test_pycco.py35
2 files changed, 57 insertions, 9 deletions
diff --git a/pycco/main.py b/pycco/main.py
index 6a3415e..416178d 100644
--- a/pycco/main.py
+++ b/pycco/main.py
@@ -244,7 +244,8 @@ def highlight(sections, language, preserve_paths=True, outdir=None):
output = output.replace(highlight_start, "").replace(highlight_end, "")
fragments = re.split(language["divider_html"], output)
for i, section in enumerate(sections):
- section["code_html"] = highlight_start + shift(fragments, "") + highlight_end
+ section["code_html"] = highlight_start + \
+ shift(fragments, "") + highlight_end
try:
docs_text = unicode(section["docs_text"])
except UnicodeError:
@@ -280,7 +281,8 @@ def generate_html(source, sections, preserve_paths=True, outdir=None):
csspath = path.relpath(path.join(outdir, "pycco.css"), path.split(dest)[0])
for sect in sections:
- sect["code_html"] = re.sub(r"\{\{", r"__DOUBLE_OPEN_STACHE__", sect["code_html"])
+ sect["code_html"] = re.sub(
+ r"\{\{", r"__DOUBLE_OPEN_STACHE__", sect["code_html"])
rendered = pycco_template({
"title": title,
@@ -364,7 +366,8 @@ for ext, l in languages.items():
# The mirror of `divider_text` that we expect Pygments to return. We can split
# on this to recover the original sections.
- l["divider_html"] = re.compile(r'\n*<span class="c[1]?">' + l["symbol"] + 'DIVIDER</span>\n*')
+ l["divider_html"] = re.compile(
+ r'\n*<span class="c[1]?">' + l["symbol"] + 'DIVIDER</span>\n*')
# Get the Pygments Lexer for this language.
l["lexer"] = lexers.get_lexer_by_name(l["name"])
@@ -438,7 +441,8 @@ def remove_control_chars(s):
# Sanitization regexp copied from
# http://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python
from pycco.compat import pycco_unichr
- control_chars = ''.join(map(pycco_unichr, list(range(0, 32)) + list(range(127, 160))))
+ control_chars = ''.join(
+ map(pycco_unichr, list(range(0, 32)) + list(range(127, 160))))
control_char_re = re.compile(u'[{}]'.format(re.escape(control_chars)))
return control_char_re.sub('', s)
@@ -461,6 +465,23 @@ highlight_start = "<div class=\"highlight\"><pre>"
highlight_end = "</pre></div>"
+def _flatten_sources(sources):
+ """
+ This function will iterate through the list of sources and if a directory
+ is encountered it will walk the tree for any files
+ """
+ _sources = []
+
+ for source in sources:
+ if os.path.isdir(source):
+ for dirpath, _, filenames in os.walk(source):
+ _sources.extend([os.path.join(dirpath, f) for f in filenames])
+ else:
+ _sources.append(source)
+
+ return _sources
+
+
def process(sources, preserve_paths=True, outdir=None, language=None, encoding="utf8", index=False):
"""For each source file passed as argument, generate the documentation."""
@@ -469,7 +490,7 @@ def process(sources, preserve_paths=True, outdir=None, language=None, encoding="
# Make a copy of sources given on the command line. `main()` needs the
# original list when monitoring for changed files.
- sources = sorted(sources)
+ sources = sorted(_flatten_sources(sources))
# Proceed to generating the documentation.
if sources:
diff --git a/tests/test_pycco.py b/tests/test_pycco.py
index 02ef008..6db8265 100644
--- a/tests/test_pycco.py
+++ b/tests/test_pycco.py
@@ -33,7 +33,8 @@ def test_shift(fragments, default):
@given(text(), booleans(), text(min_size=1))
@example("/foo", True, "0")
def test_destination(filepath, preserve_paths, outdir):
- dest = p.destination(filepath, preserve_paths=preserve_paths, outdir=outdir)
+ dest = p.destination(
+ filepath, preserve_paths=preserve_paths, outdir=outdir)
assert dest.startswith(outdir)
assert dest.endswith(".html")
@@ -65,12 +66,14 @@ def test_comment_with_only_cross_ref():
source = '''# ==Link Target==\n\ndef test_link():\n """[[testing.py#link-target]]"""\n pass'''
sections = p.parse(source, PYTHON)
p.highlight(sections, PYTHON, outdir=tempfile.gettempdir())
- assert sections[1]['docs_html'] == '<p><a href="testing.html#link-target">testing.py</a></p>'
+ assert sections[1][
+ 'docs_html'] == '<p><a href="testing.html#link-target">testing.py</a></p>'
@given(text(), text())
def test_get_language_specify_language(source, code):
- assert p.get_language(source, code, language="python") == p.languages['.py']
+ assert p.get_language(
+ source, code, language="python") == p.languages['.py']
with pytest.raises(ValueError):
p.get_language(source, code, language="non-existent")
@@ -99,7 +102,8 @@ def test_get_language_bad_code(code):
@given(text(max_size=64))
def test_ensure_directory(dir_name):
- tempdir = os.path.join(tempfile.gettempdir(), str(int(time.time())), dir_name)
+ tempdir = os.path.join(tempfile.gettempdir(),
+ str(int(time.time())), dir_name)
# Use sanitization from function, but only for housekeeping. We
# pass in the unsanitized string to the function.
@@ -161,3 +165,26 @@ def test_generate_index(path_lists, outdir_list):
file_paths = [os.path.join(*path_list) for path_list in path_lists]
outdir = os.path.join(*outdir_list)
generate_index.generate_index(file_paths, outdir=outdir)
+
+
+def test_flatten_sources(tmpdir):
+ sources = [str(tmpdir)]
+ expected_sources = []
+
+ # Setup the base dir
+ td = tmpdir.join("test.py")
+ td.write("#!/bin/env python")
+ expected_sources.append(str(td))
+
+ # Make some more directories, each with a file present
+ for d in ["foo", "bar", "buzz"]:
+ dd = tmpdir.mkdir(d)
+ dummy_file = dd.join("test.py")
+ dummy_file.write("#!/bin/env python")
+ expected_sources.append(str(dummy_file))
+
+ # Get the flattened version of the base directory
+ flattened = p._flatten_sources(sources)
+
+ # Make sure that the lists are the same
+ assert sorted(expected_sources) == sorted(flattened)