diff options
| author | Georg Brandl <georg@python.org> | 2011-01-03 15:55:56 +0100 |
|---|---|---|
| committer | Georg Brandl <georg@python.org> | 2011-01-03 15:55:56 +0100 |
| commit | 9a73cdb2531e41d53e1bfad00c8eb9fd30b73082 (patch) | |
| tree | 932d5122c258e20ec6dfb0bb58edfa458e6020f9 /tests | |
| parent | 15fc7d01c13610f5bbf8ddbcde280d675b45dd54 (diff) | |
| parent | 683fc2a17849fdab3683edb7f583b7132e1e40d4 (diff) | |
| download | sphinx-9a73cdb2531e41d53e1bfad00c8eb9fd30b73082.tar.gz | |
merge with 1.0
Diffstat (limited to 'tests')
37 files changed, 1141 insertions, 953 deletions
diff --git a/tests/etree13/ElementTree.py b/tests/etree13/ElementTree.py index d3732504..f459c7f8 100644 --- a/tests/etree13/ElementTree.py +++ b/tests/etree13/ElementTree.py @@ -1425,12 +1425,16 @@ class XMLParser(object): err.position = value.lineno, value.offset raise err - def _fixtext(self, text): - # convert text string to ascii, if possible - try: - return text.encode("ascii") - except UnicodeError: + if sys.version_info >= (3, 0): + def _fixtext(self, text): return text + else: + def _fixtext(self, text): + # convert text string to ascii, if possible + try: + return text.encode("ascii") + except UnicodeError: + return text def _fixname(self, key): # expand qname, and convert name string to ascii, if possible diff --git a/tests/path.py b/tests/path.py index ceb895f5..8e9afeaa 100644 --- a/tests/path.py +++ b/tests/path.py @@ -1,953 +1,196 @@ -""" path.py - An object representing a path to a file or directory. - -Example: - -from path import path -d = path('/home/guido/bin') -for f in d.files('*.py'): - f.chmod(0755) - -This module requires Python 2.2 or later. - - -URL: http://www.jorendorff.com/articles/python/path -Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!) -Date: 9 Mar 2007 +#!/usr/bin/env python +# coding: utf-8 """ + path + ~~~~ + :copyright: Copyright 2010 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +import os +import sys +import shutil +from codecs import open -# TODO -# - Tree-walking functions don't avoid symlink loops. Matt Harrison -# sent me a patch for this. -# - Bug in write_text(). It doesn't support Universal newline mode. -# - Better error message in listdir() when self isn't a -# directory. (On Windows, the error message really sucks.) -# - Make sure everything has a good docstring. -# - Add methods for regex find and replace. -# - guess_content_type() method? -# - Perhaps support arguments to touch(). - -from __future__ import generators - -import sys, warnings, os, fnmatch, glob, shutil, codecs - -__version__ = '2.2' -__all__ = ['path'] - -# Platform-specific support for path.owner -if os.name == 'nt': - try: - import win32security - except ImportError: - win32security = None -else: - try: - import pwd - except ImportError: - pwd = None - -# Pre-2.3 support. Are unicode filenames supported? -_base = str -_getcwd = os.getcwd -try: - if os.path.supports_unicode_filenames: - _base = unicode - _getcwd = os.getcwdu -except AttributeError: - pass - -# Pre-2.3 workaround for booleans -try: - True, False -except NameError: - True, False = 1, 0 - -# Pre-2.3 workaround for basestring. -try: - basestring -except NameError: - basestring = (str, unicode) - -# Universal newline support -_textmode = 'r' -if hasattr(file, 'newlines'): - _textmode = 'U' - - -class TreeWalkWarning(Warning): - pass - -class path(_base): - """ Represents a filesystem path. - - For documentation on individual methods, consult their - counterparts in os.path. - """ - - # --- Special Python methods. - def __repr__(self): - return 'path(%s)' % _base.__repr__(self) +FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding() - # Adding a path and a string yields a path. - def __add__(self, more): - try: - resultStr = _base.__add__(self, more) - except TypeError: #Python bug - resultStr = NotImplemented - if resultStr is NotImplemented: - return resultStr - return self.__class__(resultStr) - - def __radd__(self, other): - if isinstance(other, basestring): - return self.__class__(other.__add__(self)) - else: - return NotImplemented - # The / operator joins paths. - def __div__(self, rel): - """ fp.__div__(rel) == fp / rel == fp.joinpath(rel) +class path(str): + """ + Represents a path which behaves like a string. + """ + if sys.version_info < (3, 0): + def __new__(cls, s, encoding=FILESYSTEMENCODING, errors='strict'): + if isinstance(s, unicode): + s = s.encode(encoding, errors=errors) + return str.__new__(cls, s) + return str.__new__(cls, s) - Join two path components, adding a separator character if - needed. + @property + def parent(self): """ - return self.__class__(os.path.join(self, rel)) - - # Make the / operator work even when true division is enabled. - __truediv__ = __div__ - - def getcwd(cls): - """ Return the current working directory as a path object. """ - return cls(_getcwd()) - getcwd = classmethod(getcwd) - - - # --- Operations on path strings. - - isabs = os.path.isabs - def abspath(self): return self.__class__(os.path.abspath(self)) - def normcase(self): return self.__class__(os.path.normcase(self)) - def normpath(self): return self.__class__(os.path.normpath(self)) - def realpath(self): return self.__class__(os.path.realpath(self)) - def expanduser(self): return self.__class__(os.path.expanduser(self)) - def expandvars(self): return self.__class__(os.path.expandvars(self)) - def dirname(self): return self.__class__(os.path.dirname(self)) - basename = os.path.basename - - def expand(self): - """ Clean up a filename by calling expandvars(), - expanduser(), and normpath() on it. - - This is commonly everything needed to clean up a filename - read from a configuration file, for example. + The name of the directory the file or directory is in. """ - return self.expandvars().expanduser().normpath() - - def _get_namebase(self): - base, ext = os.path.splitext(self.name) - return base - - def _get_ext(self): - f, ext = os.path.splitext(_base(self)) - return ext - - def _get_drive(self): - drive, r = os.path.splitdrive(self) - return self.__class__(drive) - - parent = property( - dirname, None, None, - """ This path's parent directory, as a new path object. - - For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib') - """) - - name = property( - basename, None, None, - """ The name of this file or directory without the full path. - - For example, path('/usr/local/lib/libpython.so').name == 'libpython.so' - """) + return self.__class__(os.path.dirname(self)) - namebase = property( - _get_namebase, None, None, - """ The same as path.name, but with one file extension stripped off. - - For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz', - but path('/home/guido/python.tar.gz').namebase == 'python.tar' - """) - - ext = property( - _get_ext, None, None, - """ The file extension, for example '.py'. """) - - drive = property( - _get_drive, None, None, - """ The drive specifier, for example 'C:'. - This is always empty on systems that don't use drive specifiers. - """) - - def splitpath(self): - """ p.splitpath() -> Return (p.parent, p.name). """ - parent, child = os.path.split(self) - return self.__class__(parent), child - - def splitdrive(self): - """ p.splitdrive() -> Return (p.drive, <the rest of p>). - - Split the drive specifier from this path. If there is - no drive specifier, p.drive is empty, so the return value - is simply (path(''), p). This is always the case on Unix. + def abspath(self): """ - drive, rel = os.path.splitdrive(self) - return self.__class__(drive), rel - - def splitext(self): - """ p.splitext() -> Return (p.stripext(), p.ext). - - Split the filename extension from this path and return - the two parts. Either part may be empty. - - The extension is everything from '.' to the end of the - last path segment. This has the property that if - (a, b) == p.splitext(), then a + b == p. + Returns the absolute path. """ - filename, ext = os.path.splitext(self) - return self.__class__(filename), ext - - def stripext(self): - """ p.stripext() -> Remove one file extension from the path. + return self.__class__(os.path.abspath(self)) - For example, path('/home/guido/python.tar.gz').stripext() - returns path('/home/guido/python.tar'). + def isabs(self): """ - return self.splitext()[0] - - if hasattr(os.path, 'splitunc'): - def splitunc(self): - unc, rest = os.path.splitunc(self) - return self.__class__(unc), rest - - def _get_uncshare(self): - unc, r = os.path.splitunc(self) - return self.__class__(unc) - - uncshare = property( - _get_uncshare, None, None, - """ The UNC mount point for this path. - This is empty for paths on local drives. """) - - def joinpath(self, *args): - """ Join two or more path components, adding a separator - character (os.sep) if needed. Returns a new path - object. - """ - return self.__class__(os.path.join(self, *args)) - - def splitall(self): - r""" Return a list of the path components in this path. - - The first item in the list will be a path. Its value will be - either os.curdir, os.pardir, empty, or the root directory of - this path (for example, '/' or 'C:\\'). The other items in - the list will be strings. - - path.path.joinpath(*result) will yield the original path. - """ - parts = [] - loc = self - while loc != os.curdir and loc != os.pardir: - prev = loc - loc, child = prev.splitpath() - if loc == prev: - break - parts.append(child) - parts.append(loc) - parts.reverse() - return parts - - def relpath(self): - """ Return this path as a relative path, - based from the current working directory. - """ - cwd = self.__class__(os.getcwd()) - return cwd.relpathto(self) - - def relpathto(self, dest): - """ Return a relative path from self to dest. - - If there is no relative path from self to dest, for example if - they reside on different drives in Windows, then this returns - dest.abspath(). - """ - origin = self.abspath() - dest = self.__class__(dest).abspath() - - orig_list = origin.normcase().splitall() - # Don't normcase dest! We want to preserve the case. - dest_list = dest.splitall() - - if orig_list[0] != os.path.normcase(dest_list[0]): - # Can't get here from there. - return dest - - # Find the location where the two paths start to differ. - i = 0 - for start_seg, dest_seg in zip(orig_list, dest_list): - if start_seg != os.path.normcase(dest_seg): - break - i += 1 - - # Now i is the point where the two paths diverge. - # Need a certain number of "os.pardir"s to work up - # from the origin to the point of divergence. - segments = [os.pardir] * (len(orig_list) - i) - # Need to add the diverging part of dest_list. - segments += dest_list[i:] - if len(segments) == 0: - # If they happen to be identical, use os.curdir. - relpath = os.curdir - else: - relpath = os.path.join(*segments) - return self.__class__(relpath) - - # --- Listing, searching, walking, and matching - - def listdir(self, pattern=None): - """ D.listdir() -> List of items in this directory. - - Use D.files() or D.dirs() instead if you want a listing - of just files or just subdirectories. + Returns ``True`` if the path is absolute. + """ + return os.path.isabs(self) - The elements of the list are path objects. + def isdir(self): + """ + Returns ``True`` if the path is a directory. + """ + return os.path.isdir(self) - With the optional 'pattern' argument, this only lists - items whose names match the given pattern. + def isfile(self): """ - names = os.listdir(self) - if pattern is not None: - names = fnmatch.filter(names, pattern) - return [self / child for child in names] + Returns ``True`` if the path is a file. + """ + return os.path.isfile(self) - def dirs(self, pattern=None): - """ D.dirs() -> List of this directory's subdirectories. + def islink(self): + """ + Returns ``True`` if the path is a symbolic link. + """ + return os.path.islink(self) - The elements of the list are path objects. - This does not walk recursively into subdirectories - (but see path.walkdirs). + def ismount(self): + """ + Returns ``True`` if the path is a mount point. + """ + return os.path.ismount(self) - With the optional 'pattern' argument, this only lists - directories whose names match the given pattern. For - example, d.dirs('build-*'). + def rmtree(self, ignore_errors=False, onerror=None): """ - return [p for p in self.listdir(pattern) if p.isdir()] + Removes the file or directory and any files or directories it may + contain. - def files(self, pattern=None): - """ D.files() -> List of the files in this directory. + :param ignore_errors: + If ``True`` errors are silently ignored, otherwise an exception + is raised in case an error occurs. - The elements of the list are path objects. - This does not walk into subdirectories (see path.walkfiles). + :param onerror: + A callback which gets called with the arguments `func`, `path` and + `exc_info`. `func` is one of :func:`os.listdir`, :func:`os.remove` + or :func:`os.rmdir`. `path` is the argument to the function which + caused it to fail and `exc_info` is a tuple as returned by + :func:`sys.exc_info`. + """ + shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror) - With the optional 'pattern' argument, this only lists files - whose names match the given pattern. For example, - d.files('*.pyc'). + def copytree(self, destination, symlinks=False): """ + Recursively copy a directory to the given `destination`. If the given + `destination` does not exist it will be created. - return [p for p in self.listdir(pattern) if p.isfile()] + :param symlinks: + If ``True`` symbolic links in the source tree result in symbolic + links in the destination tree otherwise the contents of the files + pointed to by the symbolic links are copied. + """ + shutil.copytree(self, destination, symlinks=symlinks) - def walk(self, pattern=None, errors='strict'): - """ D.walk() -> iterator over files and subdirs, recursively. + def movetree(self, destination): + """ + Recursively move the file or directory to the given `destination` + similar to the Unix "mv" command. - The iterator yields path objects naming each child item of - this directory and its descendants. This requires that - D.isdir(). + If the `destination` is a file it may be overwritten depending on the + :func:`os.rename` semantics. + """ + shutil.move(self, destination) - This performs a depth-first traversal of the directory tree. - Each directory is returned just before all its children. + move = movetree - The errors= keyword argument controls behavior when an - error occurs. The default is 'strict', which causes an - exception. The other allowed values are 'warn', which - reports the error via warnings.warn(), and 'ignore'. + def unlink(self): """ - if errors not in ('strict', 'warn', 'ignore'): - raise ValueError("invalid errors parameter") - - try: - childList = self.listdir() - except Exception: - if errors == 'ignore': - return - elif errors == 'warn': - warnings.warn( - "Unable to list directory '%s': %s" - % (self, sys.exc_info()[1]), - TreeWalkWarning) - return - else: - raise - - for child in childList: - if pattern is None or child.fnmatch(pattern): - yield child - try: - isdir = child.isdir() - except Exception: - if errors == 'ignore': - isdir = False - elif errors == 'warn': - warnings.warn( - "Unable to access '%s': %s" - % (child, sys.exc_info()[1]), - TreeWalkWarning) - isdir = False - else: - raise - - if isdir: - for item in child.walk(pattern, errors): - yield item - - def walkdirs(self, pattern=None, errors='strict'): - """ D.walkdirs() -> iterator over subdirs, recursively. - - With the optional 'pattern' argument, this yields only - directories whose names match the given pattern. For - example, mydir.walkdirs('*test') yields only directories - with names ending in 'test'. - - The errors= keyword argument controls behavior when an - error occurs. The default is 'strict', which causes an - exception. The other allowed values are 'warn', which - reports the error via warnings.warn(), and 'ignore'. - """ - if errors not in ('strict', 'warn', 'ignore'): - raise ValueError("invalid errors parameter") + Removes a file. + """ + os.unlink(self) + def write_text(self, text, **kwargs): + """ + Writes the given `text` to the file. + """ + f = open(self, 'w', **kwargs) try: - dirs = self.dirs() - except Exception: - if errors == 'ignore': - return - elif errors == 'warn': - warnings.warn( - "Unable to list directory '%s': %s" - % (self, sys.exc_info()[1]), - TreeWalkWarning) - return - else: - raise - - for child in dirs: - if pattern is None or child.fnmatch(pattern): - yield child - for subsubdir in child.walkdirs(pattern, errors): - yield subsubdir - - def walkfiles(self, pattern=None, errors='strict'): - """ D.walkfiles() -> iterator over files in D, recursively. - - The optional argument, pattern, limits the results to files - with names that match the pattern. For example, - mydir.walkfiles('*.tmp') yields only files with the .tmp - extension. - """ - if errors not in ('strict', 'warn', 'ignore'): - raise ValueError("invalid errors parameter") + f.write(text) + finally: + f.close() + def text(self, **kwargs): + """ + Returns the text in the file. + """ + f = open(self, mode='U', **kwargs) try: - childList = self.listdir() - except Exception: - if errors == 'ignore': - return - elif errors == 'warn': - warnings.warn( - "Unable to list directory '%s': %s" - % (self, sys.exc_info()[1]), - TreeWalkWarning) - return - else: - raise - - for child in childList: - try: - isfile = child.isfile() - isdir = not isfile and child.isdir() - except: - if errors == 'ignore': - continue - elif errors == 'warn': - warnings.warn( - "Unable to access '%s': %s" - % (self, sys.exc_info()[1]), - TreeWalkWarning) - continue - else: - raise - - if isfile: - if pattern is None or child.fnmatch(pattern): - yield child - elif isdir: - for f in child.walkfiles(pattern, errors): - yield f - - def fnmatch(self, pattern): - """ Return True if self.name matches the given pattern. - - pattern - A filename pattern with wildcards, - for example '*.py'. - """ - return fnmatch.fnmatch(self.name, pattern) - - def glob(self, pattern): - """ Return a list of path objects that match the pattern. - - pattern - a path relative to this directory, with wildcards. - - For example, path('/users').glob('*/bin/*') returns a list - of all the files users have in their bin directories. - """ - cls = self.__class__ - return [cls(s) for s in glob.glob(_base(self / pattern))] - - - # --- Reading or writing an entire file at once. - - def open(self, mode='r'): - """ Open this file. Return a file object. """ - return file(self, mode) + return f.read() + finally: + f.close() def bytes(self): - """ Open this file, read all bytes, return them as a string. """ - f = self.open('rb') + """ + Returns the bytes in the file. + """ + f = open(self, mode='rb') try: return f.read() finally: f.close() def write_bytes(self, bytes, append=False): - """ Open this file and write the given bytes to it. + """ + Writes the given `bytes` to the file. - Default behavior is to overwrite any existing file. - Call p.write_bytes(bytes, append=True) to append instead. + :param append: + If ``True`` given `bytes` are added at the end of the file. """ if append: mode = 'ab' else: mode = 'wb' - f = self.open(mode) + f = open(self, mode=mode) try: f.write(bytes) finally: f.close() - def text(self, encoding=None, errors='strict'): - r""" Open this file, read it in, return the content as a string. - - This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r' - are automatically translated to '\n'. - - Optional arguments: - - encoding - The Unicode encoding (or character set) of - the file. If present, the content of the file is - decoded and returned as a unicode object; otherwise - it is returned as an 8-bit str. - errors - How to handle Unicode errors; see help(str.decode) - for the options. Default is 'strict'. + def exists(self): """ - if encoding is None: - # 8-bit - f = self.open(_textmode) - try: - return f.read() - finally: - f.close() - else: - # Unicode - f = codecs.open(self, 'r', encoding, errors) - # (Note - Can't use 'U' mode here, since codecs.open - # doesn't support 'U' mode, even in Python 2.3.) - try: - t = f.read() - finally: - f.close() - return (t.replace(u'\r\n', u'\n') - .replace(u'\r\x85', u'\n') - .replace(u'\r', u'\n') - .replace(u'\x85', u'\n') - .replace(u'\u2028', u'\n')) - - def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False): - r""" Write the given text to this file. - - The default behavior is to overwrite any existing file; - to append instead, use the 'append=True' keyword argument. - - There are two differences between path.write_text() and - path.write_bytes(): newline handling and Unicode handling. - See below. - - Parameters: - - - text - str/unicode - The text to be written. - - - encoding - str - The Unicode encoding that will be used. - This is ignored if 'text' isn't a Unicode string. - - - errors - str - How to handle Unicode encoding errors. - Default is 'strict'. See help(unicode.encode) for the - options. This is ignored if 'text' isn't a Unicode - string. - - - linesep - keyword argument - str/unicode - The sequence of - characters to be used to mark end-of-line. The default is - os.linesep. You can also specify None; this means to - leave all newlines as they are in 'text'. - - - append - keyword argument - bool - Specifies what to do if - the file already exists (True: append to the end of it; - False: overwrite it.) The default is False. - - - --- Newline handling. - - write_text() converts all standard end-of-line sequences - ('\n', '\r', and '\r\n') to your platform's default end-of-line - sequence (see os.linesep; on Windows, for example, the - end-of-line marker is '\r\n'). - - If you don't like your platform's default, you can override it - using the 'linesep=' keyword argument. If you specifically want - write_text() to preserve the newlines as-is, use 'linesep=None'. - - This applies to Unicode text the same as to 8-bit text, except - there are three additional standard Unicode end-of-line sequences: - u'\x85', u'\r\x85', and u'\u2028'. - - (This is slightly different from when you open a file for - writing with fopen(filename, "w") in C or file(filename, 'w') - in Python.) - - - --- Unicode - - If 'text' isn't Unicode, then apart from newline handling, the - bytes are written verbatim to the file. The 'encoding' and - 'errors' arguments are not used and must be omitted. - - If 'text' is Unicode, it is first converted to bytes using the - specified 'encoding' (or the default encoding if 'encoding' - isn't specified). The 'errors' argument applies only to this - conversion. - - """ - if isinstance(text, unicode): - if linesep is not None: - # Convert all standard end-of-line sequences to - # ordinary newline characters. - text = (text.replace(u'\r\n', u'\n') - .replace(u'\r\x85', u'\n') - .replace(u'\r', u'\n') - .replace(u'\x85', u'\n') - .replace(u'\u2028', u'\n')) - text = text.replace(u'\n', linesep) - if encoding is None: - encoding = sys.getdefaultencoding() - bytes = text.encode(encoding, errors) - else: - # It is an error to specify an encoding if 'text' is - # an 8-bit string. - assert encoding is None - - if linesep is not None: - text = (text.replace('\r\n', '\n') - .replace('\r', '\n')) - bytes = text.replace('\n', linesep) - - self.write_bytes(bytes, append) - - def lines(self, encoding=None, errors='strict', retain=True): - r""" Open this file, read all lines, return them in a list. - - Optional arguments: - encoding - The Unicode encoding (or character set) of - the file. The default is None, meaning the content - of the file is read as 8-bit characters and returned - as a list of (non-Unicode) str objects. - errors - How to handle Unicode errors; see help(str.decode) - for the options. Default is 'strict' - retain - If true, retain newline characters; but all newline - character combinations ('\r', '\n', '\r\n') are - translated to '\n'. If false, newline characters are - stripped off. Default is True. - - This uses 'U' mode in Python 2.3 and later. - """ - if encoding is None and retain: - f = self.open(_textmode) - try: - return f.readlines() - finally: - f.close() - else: - return self.text(encoding, errors).splitlines(retain) - - def write_lines(self, lines, encoding=None, errors='strict', - linesep=os.linesep, append=False): - r""" Write the given lines of text to this file. - - By default this overwrites any existing file at this path. - - This puts a platform-specific newline sequence on every line. - See 'linesep' below. - - lines - A list of strings. - - encoding - A Unicode encoding to use. This applies only if - 'lines' contains any Unicode strings. - - errors - How to handle errors in Unicode encoding. This - also applies only to Unicode strings. - - linesep - The desired line-ending. This line-ending is - applied to every line. If a line already has any - standard line ending ('\r', '\n', '\r\n', u'\x85', - u'\r\x85', u'\u2028'), that will be stripped off and - this will be used instead. The default is os.linesep, - which is platform-dependent ('\r\n' on Windows, '\n' on - Unix, etc.) Specify None to write the lines as-is, - like file.writelines(). - - Use the keyword argument append=True to append lines to the - file. The default is to overwrite the file. Warning: - When you use this with Unicode data, if the encoding of the - existing data in the file is different from the encoding - you specify with the encoding= parameter, the result is - mixed-encoding data, which can really confuse someone trying - to read the file later. + Returns ``True`` if the path exist. """ - if append: - mode = 'ab' - else: - mode = 'wb' - f = self.open(mode) - try: - for line in lines: - isUnicode = isinstance(line, unicode) - if linesep is not None: - # Strip off any existing line-end and add the - # specified linesep string. - if isUnicode: - if line[-2:] in (u'\r\n', u'\x0d\x85'): - line = line[:-2] - elif line[-1:] in (u'\r', u'\n', - u'\x85', u'\u2028'): - line = line[:-1] - else: - if line[-2:] == '\r\n': - line = line[:-2] - elif line[-1:] in ('\r', '\n'): - line = line[:-1] - line += linesep - if isUnicode: - if encoding is None: - encoding = sys.getdefaultencoding() - line = line.encode(encoding, errors) - f.write(line) - finally: - f.close() - - # --- Methods for querying the filesystem. - - exists = os.path.exists - isdir = os.path.isdir - isfile = os.path.isfile - islink = os.path.islink - ismount = os.path.ismount - - if hasattr(os.path, 'samefile'): - samefile = os.path.samefile - - getatime = os.path.getatime - atime = property( - getatime, None, None, - """ Last access time of the file. """) - - getmtime = os.path.getmtime - mtime = property( - getmtime, None, None, - """ Last-modified time of the file. """) - - if hasattr(os.path, 'getctime'): - getctime = os.path.getctime - ctime = property( - getctime, None, None, - """ Creation time of the file. """) - - getsize = os.path.getsize - size = property( - getsize, None, None, - """ Size of the file, in bytes. """) - - if hasattr(os, 'access'): - def access(self, mode): - """ Return true if current user has access to this path. - - mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK - """ - return os.access(self, mode) - - def stat(self): - """ Perform a stat() system call on this path. """ - return os.stat(self) - - def lstat(self): - """ Like path.stat(), but do not follow symbolic links. """ - return os.lstat(self) - - def get_owner(self): - r""" Return the name of the owner of this file or directory. - - This follows symbolic links. - - On Windows, this returns a name of the form ur'DOMAIN\User Name'. - On Windows, a group can own a file or directory. - """ - if os.name == 'nt': - if win32security is None: - raise Exception("path.owner requires win32all to be installed") - desc = win32security.GetFileSecurity( - self, win32security.OWNER_SECURITY_INFORMATION) - sid = desc.GetSecurityDescriptorOwner() - account, domain, typecode = win32security.LookupAccountSid(None, sid) - return domain + u'\\' + account - else: - if pwd is None: - raise NotImplementedError("path.owner is not implemented on this platform.") - st = self.stat() - return pwd.getpwuid(st.st_uid).pw_name - - owner = property( - get_owner, None, None, - """ Name of the owner of this file or directory. """) - - if hasattr(os, 'statvfs'): - def statvfs(self): - """ Perform a statvfs() system call on this path. """ - return os.statvfs(self) - - if hasattr(os, 'pathconf'): - def pathconf(self, name): - return os.pathconf(self, name) - - - # --- Modifying operations on files and directories - - def utime(self, times): - """ Set the access and modified times of this file. """ - os.utime(self, times) - - def chmod(self, mode): - os.chmod(self, mode) + return os.path.exists(self) - if hasattr(os, 'chown'): - def chown(self, uid, gid): - os.chown(self, uid, gid) - - def rename(self, new): - os.rename(self, new) - - def renames(self, new): - os.renames(self, new) - - - # --- Create/delete operations on directories - - def mkdir(self, mode=0777): - os.mkdir(self, mode) + def lexists(self): + """ + Returns ``True`` if the path exists unless it is a broken symbolic + link. + """ + return os.path.lexists(self) def makedirs(self, mode=0777): + """ + Recursively create directories. + """ os.makedirs(self, mode) - def rmdir(self): - os.rmdir(self) - - def removedirs(self): - os.removedirs(self) - - - # --- Modifying operations on files - - def touch(self): - """ Set the access/modified times of this file to the current time. - Create the file if it does not exist. + def joinpath(self, *args): """ - fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666) - os.close(fd) - os.utime(self, None) - - def remove(self): - os.remove(self) - - def unlink(self): - os.unlink(self) - - - # --- Links - - if hasattr(os, 'link'): - def link(self, newpath): - """ Create a hard link at 'newpath', pointing to this file. """ - os.link(self, newpath) - - if hasattr(os, 'symlink'): - def symlink(self, newlink): - """ Create a symbolic link at 'newlink', pointing here. """ - os.symlink(self, newlink) - - if hasattr(os, 'readlink'): - def readlink(self): - """ Return the path to which this symbolic link points. - - The result may be an absolute or a relative path. - """ - return self.__class__(os.readlink(self)) - - def readlinkabs(self): - """ Return the path to which this symbolic link points. - - The result is always an absolute path. - """ - p = self.readlink() - if p.isabs(): - return p - else: - return (self.parent / p).abspath() - - - # --- High-level functions from shutil - - copyfile = shutil.copyfile - copymode = shutil.copymode - copystat = shutil.copystat - copy = shutil.copy - copy2 = shutil.copy2 - copytree = shutil.copytree - if hasattr(shutil, 'move'): - move = shutil.move - rmtree = shutil.rmtree - - - # --- Special stuff from os - - if hasattr(os, 'chroot'): - def chroot(self): - os.chroot(self) + Joins the path with the argument given and returns the result. + """ + return self.__class__(os.path.join(self, *map(self.__class__, args))) - if hasattr(os, 'startfile'): - def startfile(self): - os.startfile(self) + __div__ = __truediv__ = joinpath + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, str.__repr__(self)) diff --git a/tests/root/bom.po b/tests/root/bom.po new file mode 100644 index 00000000..c6025eb1 --- /dev/null +++ b/tests/root/bom.po @@ -0,0 +1,12 @@ +#, fuzzy +msgid "" +msgstr "" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +msgid "File with UTF-8 BOM" +msgstr "Datei mit UTF-8" + +msgid "This file has a UTF-8 \"BOM\"." +msgstr "This file has umlauts: äöü." diff --git a/tests/root/conf.py b/tests/root/conf.py index a50029e5..b97ddfcc 100644 --- a/tests/root/conf.py +++ b/tests/root/conf.py @@ -22,7 +22,7 @@ copyright = '2010, Georg Brandl & Team' version = '0.6' release = '0.6alpha1' today_fmt = '%B %d, %Y' -#unused_docs = [] +# unused_docs = [] exclude_patterns = ['_build', '**/excluded.*'] keep_warnings = True pygments_style = 'sphinx' @@ -49,6 +49,11 @@ latex_documents = [ latex_additional_files = ['svgimg.svg'] +texinfo_documents = [ + ('contents', 'SphinxTests', 'Sphinx Tests', + 'Georg Brandl \\and someone else', 'Sphinx Testing', 'Miscellaneous'), +] + value_from_conf_py = 84 coverage_c_path = ['special/*.h'] diff --git a/tests/root/contents.txt b/tests/root/contents.txt index e052e04b..280953b4 100644 --- a/tests/root/contents.txt +++ b/tests/root/contents.txt @@ -26,6 +26,7 @@ Contents: extensions doctest extensions + versioning/index Python <http://python.org/> diff --git a/tests/root/doctest.txt b/tests/root/doctest.txt index 35cdd589..ba9a72c5 100644 --- a/tests/root/doctest.txt +++ b/tests/root/doctest.txt @@ -30,7 +30,7 @@ Special directives .. testcode:: - print 1+1 + print(1+1) .. testoutput:: @@ -50,30 +50,31 @@ Special directives .. testsetup:: * - from math import floor + def squared(x): + return x * x .. doctest:: - >>> floor(1.2) - 1.0 + >>> squared(2) + 4 .. testcode:: - print floor(1.2) + print(squared(2)) .. testoutput:: - 1.0 + 4 - >>> floor(1.2) - 1.0 + >>> squared(2) + 4 * options for testcode/testoutput blocks .. testcode:: :hide: - print 'Output text.' + print('Output text.') .. testoutput:: :hide: @@ -85,36 +86,38 @@ Special directives .. testsetup:: group1 - from math import ceil + def add(x, y): + return x + y - ``ceil`` is now known in "group1", but not in others. + + ``add`` is now known in "group1", but not in others. .. doctest:: group1 - >>> ceil(0.8) - 1.0 + >>> add(1, 1) + 2 .. doctest:: group2 - >>> ceil(0.8) + >>> add(1, 1) Traceback (most recent call last): ... - NameError: name 'ceil' is not defined + NameError: name 'add' is not defined Interleaving testcode/testoutput: .. testcode:: group1 - print ceil(0.8) + print(squared(3)) .. testcode:: group2 - print floor(0.8) + print(squared(4)) .. testoutput:: group1 - 1.0 + 9 .. testoutput:: group2 - 0.0 + 16 diff --git a/tests/root/literal.inc b/tests/root/literal.inc index d5b9890c..694f15ed 100644 --- a/tests/root/literal.inc +++ b/tests/root/literal.inc @@ -1,7 +1,7 @@ # Literally included file using Python highlighting # -*- coding: utf-8 -*- -foo = u"Including Unicode characters: üöä" +foo = "Including Unicode characters: üöä" class Foo: pass diff --git a/tests/root/markup.txt b/tests/root/markup.txt index da71cf86..fab6d78c 100644 --- a/tests/root/markup.txt +++ b/tests/root/markup.txt @@ -142,6 +142,8 @@ Adding \n to test unescaping. Test :abbr:`abbr (abbreviation)` and another :abbr:`abbr (abbreviation)`. +Testing the :index:`index` role, also available with +:index:`explicit <pair: title; explicit>` title. .. _with: diff --git a/tests/root/subdir.po b/tests/root/subdir.po new file mode 100644 index 00000000..f515f220 --- /dev/null +++ b/tests/root/subdir.po @@ -0,0 +1,9 @@ +#, fuzzy +msgid "" +msgstr "" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +msgid "Including in subdir" +msgstr "translation" diff --git a/tests/root/versioning/added.txt b/tests/root/versioning/added.txt new file mode 100644 index 00000000..22a70739 --- /dev/null +++ b/tests/root/versioning/added.txt @@ -0,0 +1,20 @@ +Versioning test text +==================== + +So the thing is I need some kind of text - not the lorem ipsum stuff, that +doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find +a good text for that under public domain so I thought the easiest solution is +to write one by myself. It's not really interesting, in fact it is *really* +boring. + +Anyway I need more than one paragraph, at least three for the original +document, I think, and another one for two different ones. + +So the previous paragraph was a bit short because I don't want to test this +only on long paragraphs, I hope it was short enough to cover most stuff. +Anyway I see this lacks ``some markup`` so I have to add a **little** bit. + +Woho another paragraph, if this test fails we really have a problem because +this means the algorithm itself fails and not the diffing algorithm which is +pretty much doomed anyway as it probably fails for some kind of language +respecting certain nodes anyway but we can't work around that anyway. diff --git a/tests/root/versioning/deleted.txt b/tests/root/versioning/deleted.txt new file mode 100644 index 00000000..a1a9c4c9 --- /dev/null +++ b/tests/root/versioning/deleted.txt @@ -0,0 +1,12 @@ +Versioning test text +==================== + +So the thing is I need some kind of text - not the lorem ipsum stuff, that +doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find +a good text for that under public domain so I thought the easiest solution is +to write one by myself. It's not really interesting, in fact it is *really* +boring. + +So the previous paragraph was a bit short because I don't want to test this +only on long paragraphs, I hope it was short enough to cover most stuff. +Anyway I see this lacks ``some markup`` so I have to add a **little** bit. diff --git a/tests/root/versioning/deleted_end.txt b/tests/root/versioning/deleted_end.txt new file mode 100644 index 00000000..f30e6300 --- /dev/null +++ b/tests/root/versioning/deleted_end.txt @@ -0,0 +1,11 @@ +Versioning test text +==================== + +So the thing is I need some kind of text - not the lorem ipsum stuff, that +doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find +a good text for that under public domain so I thought the easiest solution is +to write one by myself. It's not really interesting, in fact it is *really* +boring. + +Anyway I need more than one paragraph, at least three for the original +document, I think, and another one for two different ones. diff --git a/tests/root/versioning/index.txt b/tests/root/versioning/index.txt new file mode 100644 index 00000000..9d098f75 --- /dev/null +++ b/tests/root/versioning/index.txt @@ -0,0 +1,13 @@ +Versioning Stuff +================ + +.. toctree:: + + original + added + insert + deleted + deleted_end + modified + insert_beginning + insert_similar diff --git a/tests/root/versioning/insert.txt b/tests/root/versioning/insert.txt new file mode 100644 index 00000000..1c157cc9 --- /dev/null +++ b/tests/root/versioning/insert.txt @@ -0,0 +1,18 @@ +Versioning test text +==================== + +So the thing is I need some kind of text - not the lorem ipsum stuff, that +doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find +a good text for that under public domain so I thought the easiest solution is +to write one by myself. It's not really interesting, in fact it is *really* +boring. + +So this paragraph is just something I inserted in this document to test if our +algorithm notices that this paragraph is not just a changed version. + +Anyway I need more than one paragraph, at least three for the original +document, I think, and another one for two different ones. + +So the previous paragraph was a bit short because I don't want to test this +only on long paragraphs, I hope it was short enough to cover most stuff. +Anyway I see this lacks ``some markup`` so I have to add a **little** bit. diff --git a/tests/root/versioning/insert_beginning.txt b/tests/root/versioning/insert_beginning.txt new file mode 100644 index 00000000..57102a76 --- /dev/null +++ b/tests/root/versioning/insert_beginning.txt @@ -0,0 +1,18 @@ +Versioning test text +==================== + +Apperantly inserting a paragraph at the beginning of a document caused +problems earlier so this document should be used to test that. + +So the thing is I need some kind of text - not the lorem ipsum stuff, that +doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find +a good text for that under public domain so I thought the easiest solution is +to write one by myself. It's not really interesting, in fact it is *really* +boring. + +Anyway I need more than one paragraph, at least three for the original +document, I think, and another one for two different ones. + +So the previous paragraph was a bit short because I don't want to test this +only on long paragraphs, I hope it was short enough to cover most stuff. +Anyway I see this lacks ``some markup`` so I have to add a **little** bit. diff --git a/tests/root/versioning/insert_similar.txt b/tests/root/versioning/insert_similar.txt new file mode 100644 index 00000000..ee9b5305 --- /dev/null +++ b/tests/root/versioning/insert_similar.txt @@ -0,0 +1,17 @@ +Versioning test text +==================== + +So the thing is I need some kind of text - not the lorem ipsum stuff, that +doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find +a good text for that under public domain so I thought the easiest solution is +to write one by myself. It's not really interesting, in fact it is *really* +boring. + +Anyway I need more + +Anyway I need more than one paragraph, at least three for the original +document, I think, and another one for two different ones. + +So the previous paragraph was a bit short because I don't want to test this +only on long paragraphs, I hope it was short enough to cover most stuff. +Anyway I see this lacks ``some markup`` so I have to add a **little** bit. diff --git a/tests/root/versioning/modified.txt b/tests/root/versioning/modified.txt new file mode 100644 index 00000000..49cdad93 --- /dev/null +++ b/tests/root/versioning/modified.txt @@ -0,0 +1,17 @@ +Versioning test text +==================== + +So the thing is I need some kind of text - not the lorem ipsum stuff, that +doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find +a good text for that under public domain so I thought the easiest solution is +to write one by myself. Inserting something silly as a modification, btw. have +you seen the typo below?. It's not really interesting, in fact it is *really* +boring. + +Anyway I need more than one paragraph, at least three for the original +document, I think, and another one for two different ones. So this is a small +modification by adding something to this paragraph. + +So the previous paragraph was a bit short because I don't want to test this +only on long paragraphs, I hoep it was short enough to cover most stuff. +Anyway I see this lacks ``some markup`` so I have to add a **little** bit. diff --git a/tests/root/versioning/original.txt b/tests/root/versioning/original.txt new file mode 100644 index 00000000..b3fe0609 --- /dev/null +++ b/tests/root/versioning/original.txt @@ -0,0 +1,15 @@ +Versioning test text +==================== + +So the thing is I need some kind of text - not the lorem ipsum stuff, that +doesn't work out that well - to test :mod:`sphinx.versioning`. I couldn't find +a good text for that under public domain so I thought the easiest solution is +to write one by myself. It's not really interesting, in fact it is *really* +boring. + +Anyway I need more than one paragraph, at least three for the original +document, I think, and another one for two different ones. + +So the previous paragraph was a bit short because I don't want to test this +only on long paragraphs, I hope it was short enough to cover most stuff. +Anyway I see this lacks ``some markup`` so I have to add a **little** bit. diff --git a/tests/run.py b/tests/run.py index 0cb41442..50567fbc 100755 --- a/tests/run.py +++ b/tests/run.py @@ -11,7 +11,17 @@ """ import sys -from os import path +from os import path, chdir, listdir + +if sys.version_info >= (3, 0): + print('Copying and converting sources to build/lib/tests...') + from distutils.util import copydir_run_2to3 + testroot = path.dirname(__file__) or '.' + newroot = path.join(testroot, path.pardir, 'build') + newroot = path.join(newroot, listdir(newroot)[0], 'tests') + copydir_run_2to3(testroot, newroot) + # switch to the converted dir so nose tests the right tests + chdir(newroot) # always test the sphinx package from this directory sys.path.insert(0, path.join(path.dirname(__file__), path.pardir)) @@ -19,8 +29,8 @@ sys.path.insert(0, path.join(path.dirname(__file__), path.pardir)) try: import nose except ImportError: - print "The nose package is needed to run the Sphinx test suite." + print("The nose package is needed to run the Sphinx test suite.") sys.exit(1) -print "Running Sphinx test suite..." +print("Running Sphinx test suite...") nose.main() diff --git a/tests/test_application.py b/tests/test_application.py index 3d287a57..d1154863 100644 --- a/tests/test_application.py +++ b/tests/test_application.py @@ -45,9 +45,11 @@ def test_output(): app = TestApp(status=status, warning=warnings) try: status.truncate(0) # __init__ writes to status + status.seek(0) app.info("Nothing here...") assert status.getvalue() == "Nothing here...\n" status.truncate(0) + status.seek(0) app.info("Nothing here...", True) assert status.getvalue() == "Nothing here..." diff --git a/tests/test_autosummary.py b/tests/test_autosummary.py index 7e309367..20fb06e0 100644 --- a/tests/test_autosummary.py +++ b/tests/test_autosummary.py @@ -9,8 +9,6 @@ :license: BSD, see LICENSE for details. """ -import string - from util import * from sphinx.ext.autosummary import mangle_signature @@ -27,7 +25,7 @@ def test_mangle_signature(): (a, b, c='foobar()', d=123) :: (a, b[, c, d]) """ - TEST = [map(string.strip, x.split("::")) for x in TEST.split("\n") + TEST = [map(lambda x: x.strip(), x.split("::")) for x in TEST.split("\n") if '::' in x] for inp, outp in TEST: res = mangle_signature(inp).strip().replace(u"\u00a0", " ") diff --git a/tests/test_build_gettext.py b/tests/test_build_gettext.py new file mode 100644 index 00000000..ab68289e --- /dev/null +++ b/tests/test_build_gettext.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +""" + test_build_gettext + ~~~~~~~~~~~~~~~~~~ + + Test the build process with gettext builder with the test root. + + :copyright: Copyright 2010 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import gettext +import os +from subprocess import Popen, PIPE + +from util import * +from util import SkipTest + + +def teardown_module(): + (test_root / '_build').rmtree(True) + + +@with_app(buildername='gettext') +def test_all(app): + # Generic build; should fail only when the builder is horribly broken. + app.builder.build_all() + + +@with_app(buildername='gettext') +def test_build(app): + # Do messages end up in the correct location? + app.builder.build(['extapi', 'subdir/includes']) + # top-level documents end up in a message catalog + assert (app.outdir / 'extapi.pot').isfile() + # directory items are grouped into sections + assert (app.outdir / 'subdir.pot').isfile() + + +@with_app(buildername='gettext') +def test_gettext(app): + app.builder.build(['markup']) + + (app.outdir / 'en' / 'LC_MESSAGES').makedirs() + cwd = os.getcwd() + os.chdir(app.outdir) + try: + try: + p = Popen(['msginit', '--no-translator', '-i', 'markup.pot', + '--locale', 'en_US'], + stdout=PIPE, stderr=PIPE) + except OSError: + raise SkipTest # most likely msginit was not found + else: + stdout, stderr = p.communicate() + if p.returncode != 0: + print stdout + print stderr + assert False, 'msginit exited with return code %s' % \ + p.returncode + assert (app.outdir / 'en_US.po').isfile(), 'msginit failed' + try: + p = Popen(['msgfmt', 'en_US.po', '-o', + os.path.join('en', 'LC_MESSAGES', 'test_root.mo')], + stdout=PIPE, stderr=PIPE) + except OSError: + raise SkipTest # most likely msgfmt was not found + else: + stdout, stderr = p.communicate() + if p.returncode != 0: + print stdout + print stderr + assert False, 'msgfmt exited with return code %s' % \ + p.returncode + assert (app.outdir / 'en' / 'LC_MESSAGES' / 'test_root.mo').isfile(), \ + 'msgfmt failed' + finally: + os.chdir(cwd) + + _ = gettext.translation('test_root', app.outdir, languages=['en']).gettext + assert _("Testing various markup") == u"Testing various markup" diff --git a/tests/test_build_html.py b/tests/test_build_html.py index 80656c9a..df4d5ad5 100644 --- a/tests/test_build_html.py +++ b/tests/test_build_html.py @@ -12,6 +12,7 @@ import os import re import htmlentitydefs +import sys from StringIO import StringIO try: @@ -40,7 +41,7 @@ http://www.python.org/logo.png %(root)s/includes.txt:\\d*: \\(WARNING/2\\) Encoding 'utf-8-sig' used for \ reading included file u'.*?wrongenc.inc' seems to be wrong, try giving an \ :encoding: option\\n? -%(root)s/includes.txt:4: WARNING: download file not readable: nonexisting.png +%(root)s/includes.txt:4: WARNING: download file not readable: .*?nonexisting.png %(root)s/objects.txt:\\d*: WARNING: using old C markup; please migrate to \ new-style markup \(e.g. c:function instead of cfunction\), see \ http://sphinx.pocoo.org/domains.html @@ -53,6 +54,11 @@ HTML_WARNINGS = ENV_WARNINGS + """\ %(root)s/markup.txt:: WARNING: invalid pair index entry u'keyword; ' """ +if sys.version_info >= (3, 0): + ENV_WARNINGS = remove_unicode_literals(ENV_WARNINGS) + HTML_WARNINGS = remove_unicode_literals(HTML_WARNINGS) + + def tail_check(check): rex = re.compile(check) def checker(nodes): @@ -234,7 +240,7 @@ if pygments: (".//div[@class='inc-lines highlight-text']//pre", r'^class Foo:\n pass\nclass Bar:\n$'), (".//div[@class='inc-startend highlight-text']//pre", - ur'^foo = u"Including Unicode characters: üöä"\n$'), + ur'^foo = "Including Unicode characters: üöä"\n$'), (".//div[@class='inc-preappend highlight-text']//pre", r'(?m)^START CODE$'), (".//div[@class='inc-pyobj-dedent highlight-python']//span", diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py index 4405395a..6c1ccad9 100644 --- a/tests/test_build_latex.py +++ b/tests/test_build_latex.py @@ -32,6 +32,9 @@ None:None: WARNING: no matching candidate for image URI u'foo.\\*' WARNING: invalid pair index entry u'' """ +if sys.version_info >= (3, 0): + LATEX_WARNINGS = remove_unicode_literals(LATEX_WARNINGS) + @with_app(buildername='latex', warning=latex_warnfile, cleanenv=True) def test_latex(app): diff --git a/tests/test_build_texinfo.py b/tests/test_build_texinfo.py new file mode 100644 index 00000000..2b2c8efd --- /dev/null +++ b/tests/test_build_texinfo.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +""" + test_build_texinfo + ~~~~~~~~~~~~~~~~~~ + + Test the build process with Texinfo builder with the test root. + + :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os +import re +import sys +from StringIO import StringIO +from subprocess import Popen, PIPE + +from util import * +from test_build_html import ENV_WARNINGS + + +def teardown_module(): + (test_root / '_build').rmtree(True) + + +texinfo_warnfile = StringIO() + +TEXINFO_WARNINGS = ENV_WARNINGS + +if sys.version_info >= (3, 0): + TEXINFO_WARNINGS = remove_unicode_literals(TEXINFO_WARNINGS) + + +@with_app(buildername='texinfo', warning=texinfo_warnfile, cleanenv=True) +def test_texinfo(app): + app.builder.build_all() + texinfo_warnings = texinfo_warnfile.getvalue().replace(os.sep, '/') + texinfo_warnings_exp = TEXINFO_WARNINGS % {'root': app.srcdir} + assert re.match(texinfo_warnings_exp + '$', texinfo_warnings), \ + 'Warnings don\'t match:\n' + \ + '--- Expected (regex):\n' + texinfo_warnings_exp + \ + '--- Got:\n' + texinfo_warnings + # now, try to run makeinfo over it + cwd = os.getcwd() + os.chdir(app.outdir) + try: + try: + p = Popen(['makeinfo', '--no-split', 'SphinxTests.texi'], + stdout=PIPE, stderr=PIPE) + except OSError: + pass # most likely makeinfo was not found + else: + stdout, stderr = p.communicate() + retcode = p.returncode + if retcode != 0: + print stdout + print stderr + del app.cleanup_trees[:] + assert False, 'makeinfo exited with return code %s' % retcode + finally: + os.chdir(cwd) diff --git a/tests/test_config.py b/tests/test_config.py index cb4e1105..b5f88a6f 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -9,6 +9,7 @@ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ +import sys from util import * @@ -84,11 +85,23 @@ def test_extension_values(app): @with_tempdir def test_errors_warnings(dir): # test the error for syntax errors in the config file - write_file(dir / 'conf.py', 'project = \n') + write_file(dir / 'conf.py', u'project = \n', 'ascii') raises_msg(ConfigError, 'conf.py', Config, dir, 'conf.py', {}, None) + # test the automatic conversion of 2.x only code in configs + write_file(dir / 'conf.py', u'# -*- coding: utf-8\n\n' + u'project = u"Jägermeister"\n', 'utf-8') + cfg = Config(dir, 'conf.py', {}, None) + cfg.init_values() + assert cfg.project == u'Jägermeister' + # test the warning for bytestrings with non-ascii content - write_file(dir / 'conf.py', '# -*- coding: latin-1\nproject = "foo\xe4"\n') + # bytestrings with non-ascii content are a syntax error in python3 so we + # skip the test there + if sys.version_info >= (3, 0): + return + write_file(dir / 'conf.py', u'# -*- coding: latin-1\nproject = "fooä"\n', + 'latin-1') cfg = Config(dir, 'conf.py', {}, None) warned = [False] def warn(msg): diff --git a/tests/test_coverage.py b/tests/test_coverage.py index 1262ebf5..cb831635 100644 --- a/tests/test_coverage.py +++ b/tests/test_coverage.py @@ -33,7 +33,7 @@ def test_build(app): assert 'api.h' in c_undoc assert ' * Py_SphinxTest' in c_undoc - undoc_py, undoc_c = pickle.loads((app.outdir / 'undoc.pickle').text()) + undoc_py, undoc_c = pickle.loads((app.outdir / 'undoc.pickle').bytes()) assert len(undoc_c) == 1 # the key is the full path to the header file, which isn't testable assert undoc_c.values()[0] == [('function', 'Py_SphinxTest')] diff --git a/tests/test_env.py b/tests/test_env.py index e91d8e33..a8d6bac2 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -8,6 +8,7 @@ :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ +import sys from util import * @@ -54,8 +55,10 @@ def test_images(): app._warning.reset() htmlbuilder = StandaloneHTMLBuilder(app) htmlbuilder.post_process_images(tree) - assert "no matching candidate for image URI u'foo.*'" in \ - app._warning.content[-1] + image_uri_message = "no matching candidate for image URI u'foo.*'" + if sys.version_info >= (3, 0): + image_uri_message = remove_unicode_literals(image_uri_message) + assert image_uri_message in app._warning.content[-1] assert set(htmlbuilder.images.keys()) == \ set(['subdir/img.png', 'img.png', 'subdir/simg.png', 'svgimg.svg']) assert set(htmlbuilder.images.values()) == \ @@ -64,8 +67,7 @@ def test_images(): app._warning.reset() latexbuilder = LaTeXBuilder(app) latexbuilder.post_process_images(tree) - assert "no matching candidate for image URI u'foo.*'" in \ - app._warning.content[-1] + assert image_uri_message in app._warning.content[-1] assert set(latexbuilder.images.keys()) == \ set(['subdir/img.png', 'subdir/simg.png', 'img.png', 'img.pdf', 'svgimg.pdf']) diff --git a/tests/test_intersphinx.py b/tests/test_intersphinx.py index 3b50cc78..990e35bd 100644 --- a/tests/test_intersphinx.py +++ b/tests/test_intersphinx.py @@ -11,7 +11,10 @@ import zlib import posixpath -from cStringIO import StringIO +try: + from io import BytesIO +except ImportError: + from cStringIO import StringIO as BytesIO from docutils import nodes @@ -28,23 +31,23 @@ inventory_v1 = '''\ # Version: 1.0 module mod foo.html module.cls class foo.html -''' +'''.encode('utf-8') inventory_v2 = '''\ # Sphinx inventory version 2 # Project: foo # Version: 2.0 # The remainder of this file is compressed with zlib. -''' + zlib.compress('''\ +'''.encode('utf-8') + zlib.compress('''\ module1 py:module 0 foo.html#module-module1 Long Module desc module2 py:module 0 foo.html#module-$ - module1.func py:function 1 sub/foo.html#$ - CFunc c:function 2 cfunc.html#CFunc - -''') +'''.encode('utf-8')) def test_read_inventory_v1(): - f = StringIO(inventory_v1) + f = BytesIO(inventory_v1) f.readline() invdata = read_inventory_v1(f, '/util', posixpath.join) assert invdata['py:module']['module'] == \ @@ -54,12 +57,12 @@ def test_read_inventory_v1(): def test_read_inventory_v2(): - f = StringIO(inventory_v2) + f = BytesIO(inventory_v2) f.readline() invdata1 = read_inventory_v2(f, '/util', posixpath.join) # try again with a small buffer size to test the chunking algorithm - f = StringIO(inventory_v2) + f = BytesIO(inventory_v2) f.readline() invdata2 = read_inventory_v2(f, '/util', posixpath.join, bufsize=5) diff --git a/tests/test_intl.py b/tests/test_intl.py new file mode 100644 index 00000000..9459a1b7 --- /dev/null +++ b/tests/test_intl.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +""" + test_intl + ~~~~~~~~~ + + Test message patching for internationalization purposes. Runs the text + builder in the test root. + + :copyright: Copyright 2010 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from subprocess import Popen, PIPE + +from util import * +from util import SkipTest + + +def setup_module(): + (test_root / 'xx' / 'LC_MESSAGES').makedirs() + # Compile all required catalogs into binary format (*.mo). + for catalog in 'bom', 'subdir': + try: + p = Popen(['msgfmt', test_root / '%s.po' % catalog, '-o', + test_root / 'xx' / 'LC_MESSAGES' / '%s.mo' % catalog], + stdout=PIPE, stderr=PIPE) + except OSError: + raise SkipTest # most likely msgfmt was not found + else: + stdout, stderr = p.communicate() + if p.returncode != 0: + print stdout + print stderr + assert False, 'msgfmt exited with return code %s' % p.returncode + assert (test_root / 'xx' / 'LC_MESSAGES' / ('%s.mo' % catalog) + ).isfile(), 'msgfmt failed' + + +def teardown_module(): + (test_root / '_build').rmtree(True) + (test_root / 'xx').rmtree(True) + + +@with_app(buildername='text', + confoverrides={'language': 'xx', 'locale_dirs': ['.']}) +def test_simple(app): + app.builder.build(['bom']) + result = (app.outdir / 'bom.txt').text(encoding='utf-8') + expect = (u"\nDatei mit UTF-8" + u"\n***************\n" # underline matches new translation + u"\nThis file has umlauts: äöü.\n") + assert result == expect + + +@with_app(buildername='text', + confoverrides={'language': 'xx', 'locale_dirs': ['.']}) +def test_subdir(app): + app.builder.build(['subdir/includes']) + result = (app.outdir / 'subdir' / 'includes.txt').text(encoding='utf-8') + assert result.startswith(u"\ntranslation\n***********\n\n") diff --git a/tests/test_markup.py b/tests/test_markup.py index 31817df6..092113bb 100644 --- a/tests/test_markup.py +++ b/tests/test_markup.py @@ -17,6 +17,7 @@ from docutils import frontend, utils, nodes from docutils.parsers import rst from sphinx.util import texescape +from sphinx.util.pycompat import b from sphinx.writers.html import HTMLWriter, SmartyPantsHTMLTranslator from sphinx.writers.latex import LaTeXWriter, LaTeXTranslator @@ -50,7 +51,7 @@ class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator): def verify_re(rst, html_expected, latex_expected): - document = utils.new_document('test data', settings) + document = utils.new_document(b('test data'), settings) document['file'] = 'dummy' parser.parse(rst, document) for msg in document.traverse(nodes.system_message): diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py index cb40d27c..3e942744 100644 --- a/tests/test_quickstart.py +++ b/tests/test_quickstart.py @@ -36,8 +36,13 @@ def mock_raw_input(answers, needanswer=False): return '' return raw_input +try: + real_raw_input = raw_input +except NameError: + real_raw_input = input + def teardown_module(): - qs.raw_input = __builtin__.raw_input + qs.term_input = real_raw_input qs.TERM_ENCODING = getattr(sys.stdin, 'encoding', None) coloron() @@ -51,7 +56,7 @@ def test_do_prompt(): 'Q5': 'no', 'Q6': 'foo', } - qs.raw_input = mock_raw_input(answers) + qs.term_input = mock_raw_input(answers) try: qs.do_prompt(d, 'k1', 'Q1') except AssertionError: @@ -79,13 +84,18 @@ def test_quickstart_defaults(tempdir): 'Author name': 'Georg Brandl', 'Project version': '0.1', } - qs.raw_input = mock_raw_input(answers) + qs.term_input = mock_raw_input(answers) qs.inner_main([]) conffile = tempdir / 'conf.py' assert conffile.isfile() ns = {} - execfile(conffile, ns) + f = open(conffile, 'U') + try: + code = compile(f.read(), conffile, 'exec') + finally: + f.close() + exec code in ns assert ns['extensions'] == [] assert ns['templates_path'] == ['_templates'] assert ns['source_suffix'] == '.rst' @@ -112,8 +122,8 @@ def test_quickstart_all_answers(tempdir): 'Root path': tempdir, 'Separate source and build': 'y', 'Name prefix for templates': '.', - 'Project name': 'STASI\xe2\x84\xa2', - 'Author name': 'Wolfgang Sch\xc3\xa4uble & G\'Beckstein', + 'Project name': u'STASI™'.encode('utf-8'), + 'Author name': u'Wolfgang Schäuble & G\'Beckstein'.encode('utf-8'), 'Project version': '2.0', 'Project release': '2.0.1', 'Source file suffix': '.txt', @@ -131,14 +141,19 @@ def test_quickstart_all_answers(tempdir): 'Create Windows command file': 'no', 'Do you want to use the epub builder': 'yes', } - qs.raw_input = mock_raw_input(answers, needanswer=True) + qs.term_input = mock_raw_input(answers, needanswer=True) qs.TERM_ENCODING = 'utf-8' qs.inner_main([]) conffile = tempdir / 'source' / 'conf.py' assert conffile.isfile() ns = {} - execfile(conffile, ns) + f = open(conffile, 'U') + try: + code = compile(f.read(), conffile, 'exec') + finally: + f.close() + exec code in ns assert ns['extensions'] == ['sphinx.ext.autodoc', 'sphinx.ext.doctest'] assert ns['templates_path'] == ['.templates'] assert ns['source_suffix'] == '.txt' @@ -156,6 +171,10 @@ def test_quickstart_all_answers(tempdir): assert ns['man_pages'] == [ ('contents', 'stasi', u'STASI™ Documentation', [u'Wolfgang Schäuble & G\'Beckstein'], 1)] + assert ns['texinfo_documents'] == [ + ('contents', 'STASI', u'STASI™ Documentation', + u'Wolfgang Schäuble & G\'Beckstein', 'STASI', + 'One line description of project.', 'Miscellaneous'),] assert (tempdir / 'build').isdir() assert (tempdir / 'source' / '.static').isdir() diff --git a/tests/test_search.py b/tests/test_search.py index 0b5b158b..c0750366 100644 --- a/tests/test_search.py +++ b/tests/test_search.py @@ -13,6 +13,7 @@ from docutils import frontend, utils from docutils.parsers import rst from sphinx.search import IndexBuilder +from sphinx.util.pycompat import b settings = parser = None @@ -31,7 +32,7 @@ test that non-comments are indexed: fermion ''' def test_wordcollector(): - doc = utils.new_document('test data', settings) + doc = utils.new_document(b('test data'), settings) doc['file'] = 'dummy' parser.parse(FILE_CONTENTS, doc) diff --git a/tests/test_searchadapters.py b/tests/test_searchadapters.py new file mode 100644 index 00000000..cf5accb9 --- /dev/null +++ b/tests/test_searchadapters.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +""" + test_searchadapters + ~~~~~~~~~~~~~~~~~~~ + + Test the Web Support Package search adapters. + + :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os, sys +from StringIO import StringIO + +from nose import SkipTest + +from sphinx.websupport import WebSupport + +from test_websupport import sqlalchemy_missing +from util import * + + +def clear_builddir(): + (test_root / 'websupport').rmtree(True) + + +def teardown_module(): + (test_root / 'generated').rmtree(True) + clear_builddir() + + +def search_adapter_helper(adapter): + clear_builddir() + + settings = {'builddir': os.path.join(test_root, 'websupport'), + 'status': StringIO(), + 'warning': StringIO()} + settings.update({'srcdir': test_root, + 'search': adapter}) + support = WebSupport(**settings) + support.build() + + s = support.search + + # Test the adapters query method. A search for "Epigraph" should return + # one result. + results = s.query(u'Epigraph') + assert len(results) == 1, \ + '%s search adapter returned %s search result(s), should have been 1'\ + % (adapter, len(results)) + + # Make sure documents are properly updated by the search adapter. + s.init_indexing(changed=['markup']) + s.add_document(u'markup', u'title', u'SomeLongRandomWord') + s.finish_indexing() + # Now a search for "Epigraph" should return zero results. + results = s.query(u'Epigraph') + assert len(results) == 0, \ + '%s search adapter returned %s search result(s), should have been 0'\ + % (adapter, len(results)) + # A search for "SomeLongRandomWord" should return one result. + results = s.query(u'SomeLongRandomWord') + assert len(results) == 1, \ + '%s search adapter returned %s search result(s), should have been 1'\ + % (adapter, len(results)) + # Make sure it works through the WebSupport API + html = support.get_search_results(u'SomeLongRandomWord') + + +@skip_unless_importable('xapian', 'needs xapian bindings installed') +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +def test_xapian(): + search_adapter_helper('xapian') + + +@skip_unless_importable('whoosh', 'needs whoosh package installed') +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +def test_whoosh(): + search_adapter_helper('whoosh') diff --git a/tests/test_versioning.py b/tests/test_versioning.py new file mode 100644 index 00000000..923da203 --- /dev/null +++ b/tests/test_versioning.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +""" + test_versioning + ~~~~~~~~~~~~~~~ + + Test the versioning implementation. + + :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +import pickle + +from util import * + +from docutils.statemachine import ViewList +from docutils.parsers.rst.directives.html import MetaBody + +from sphinx import addnodes +from sphinx.versioning import add_uids, merge_doctrees, get_ratio + +def setup_module(): + global app, original, original_uids + app = TestApp() + app.builder.env.app = app + app.connect('doctree-resolved', on_doctree_resolved) + app.build() + original = doctrees['versioning/original'] + original_uids = [n.uid for n in add_uids(original, is_paragraph)] + +def teardown_module(): + app.cleanup() + (test_root / '_build').rmtree(True) + +doctrees = {} + +def on_doctree_resolved(app, doctree, docname): + doctrees[docname] = doctree + +def is_paragraph(node): + return node.__class__.__name__ == 'paragraph' + +def test_get_ratio(): + assert get_ratio('', 'a') + assert get_ratio('a', '') + +def test_add_uids(): + assert len(original_uids) == 3 + +def test_picklablility(): + # we have to modify the doctree so we can pickle it + copy = original.copy() + copy.reporter = None + copy.transformer = None + copy.settings.warning_stream = None + copy.settings.env = None + copy.settings.record_dependencies = None + for metanode in copy.traverse(MetaBody.meta): + metanode.__class__ = addnodes.meta + loaded = pickle.loads(pickle.dumps(copy, pickle.HIGHEST_PROTOCOL)) + assert all(getattr(n, 'uid', False) for n in loaded.traverse(is_paragraph)) + +def test_modified(): + modified = doctrees['versioning/modified'] + new_nodes = list(merge_doctrees(original, modified, is_paragraph)) + uids = [n.uid for n in modified.traverse(is_paragraph)] + assert not new_nodes + assert original_uids == uids + +def test_added(): + added = doctrees['versioning/added'] + new_nodes = list(merge_doctrees(original, added, is_paragraph)) + uids = [n.uid for n in added.traverse(is_paragraph)] + assert len(new_nodes) == 1 + assert original_uids == uids[:-1] + +def test_deleted(): + deleted = doctrees['versioning/deleted'] + new_nodes = list(merge_doctrees(original, deleted, is_paragraph)) + uids = [n.uid for n in deleted.traverse(is_paragraph)] + assert not new_nodes + assert original_uids[::2] == uids + +def test_deleted_end(): + deleted_end = doctrees['versioning/deleted_end'] + new_nodes = list(merge_doctrees(original, deleted_end, is_paragraph)) + uids = [n.uid for n in deleted_end.traverse(is_paragraph)] + assert not new_nodes + assert original_uids[:-1] == uids + +def test_insert(): + insert = doctrees['versioning/insert'] + new_nodes = list(merge_doctrees(original, insert, is_paragraph)) + uids = [n.uid for n in insert.traverse(is_paragraph)] + assert len(new_nodes) == 1 + assert original_uids[0] == uids[0] + assert original_uids[1:] == uids[2:] + +def test_insert_beginning(): + insert_beginning = doctrees['versioning/insert_beginning'] + new_nodes = list(merge_doctrees(original, insert_beginning, is_paragraph)) + uids = [n.uid for n in insert_beginning.traverse(is_paragraph)] + assert len(new_nodes) == 1 + assert len(uids) == 4 + assert original_uids == uids[1:] + assert original_uids[0] != uids[0] + +def test_insert_similar(): + insert_similar = doctrees['versioning/insert_similar'] + new_nodes = list(merge_doctrees(original, insert_similar, is_paragraph)) + uids = [n.uid for n in insert_similar.traverse(is_paragraph)] + assert len(new_nodes) == 1 + assert new_nodes[0].rawsource == u'Anyway I need more' + assert original_uids[0] == uids[0] + assert original_uids[1:] == uids[2:] diff --git a/tests/test_websupport.py b/tests/test_websupport.py new file mode 100644 index 00000000..c0933436 --- /dev/null +++ b/tests/test_websupport.py @@ -0,0 +1,279 @@ +# -*- coding: utf-8 -*- +""" + test_websupport + ~~~~~~~~~~~~~~~ + + Test the Web Support Package + + :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os +from StringIO import StringIO + +try: + from functools import wraps +except ImportError: + # functools is new in 2.5 + wraps = lambda f: (lambda w: w) + +from nose import SkipTest + +from sphinx.websupport import WebSupport +from sphinx.websupport.errors import * +from sphinx.websupport.storage import StorageBackend +from sphinx.websupport.storage.differ import CombinedHtmlDiff +try: + from sphinx.websupport.storage.sqlalchemystorage import Session, \ + SQLAlchemyStorage, Comment, CommentVote + from sphinx.websupport.storage.sqlalchemy_db import Node + sqlalchemy_missing = False +except ImportError: + sqlalchemy_missing = True + +from util import * + + +default_settings = {'builddir': os.path.join(test_root, 'websupport'), + 'status': StringIO(), + 'warning': StringIO()} + +def teardown_module(): + (test_root / 'generated').rmtree(True) + (test_root / 'websupport').rmtree(True) + + +def with_support(*args, **kwargs): + """Make a WebSupport object and pass it the test.""" + settings = default_settings.copy() + settings.update(kwargs) + + def generator(func): + @wraps(func) + def new_func(*args2, **kwargs2): + support = WebSupport(**settings) + func(support, *args2, **kwargs2) + return new_func + return generator + + +class NullStorage(StorageBackend): + pass + + +@with_support(storage=NullStorage()) +def test_no_srcdir(support): + """Make sure the correct exception is raised if srcdir is not given.""" + raises(RuntimeError, support.build) + + +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +@with_support(srcdir=test_root) +def test_build(support): + support.build() + + +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +@with_support() +def test_get_document(support): + raises(DocumentNotFoundError, support.get_document, 'nonexisting') + + contents = support.get_document('contents') + assert contents['title'] and contents['body'] \ + and contents['sidebar'] and contents['relbar'] + + +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +@with_support() +def test_comments(support): + session = Session() + nodes = session.query(Node).all() + first_node = nodes[0] + second_node = nodes[1] + + # Create a displayed comment and a non displayed comment. + comment = support.add_comment('First test comment', + node_id=first_node.id, + username='user_one') + hidden_comment = support.add_comment('Hidden comment', + node_id=first_node.id, + displayed=False) + # Make sure that comments can't be added to a comment where + # displayed == False, since it could break the algorithm that + # converts a nodes comments to a tree. + raises(CommentNotAllowedError, support.add_comment, 'Not allowed', + parent_id=str(hidden_comment['id'])) + # Add a displayed and not displayed child to the displayed comment. + support.add_comment('Child test comment', parent_id=str(comment['id']), + username='user_one') + support.add_comment('Hidden child test comment', + parent_id=str(comment['id']), displayed=False) + # Add a comment to another node to make sure it isn't returned later. + support.add_comment('Second test comment', + node_id=second_node.id, + username='user_two') + + # Access the comments as a moderator. + data = support.get_data(first_node.id, moderator=True) + comments = data['comments'] + children = comments[0]['children'] + assert len(comments) == 2 + assert comments[1]['text'] == 'Hidden comment' + assert len(children) == 2 + assert children[1]['text'] == 'Hidden child test comment' + + # Access the comments without being a moderator. + data = support.get_data(first_node.id) + comments = data['comments'] + children = comments[0]['children'] + assert len(comments) == 1 + assert comments[0]['text'] == 'First test comment' + assert len(children) == 1 + assert children[0]['text'] == 'Child test comment' + + +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +@with_support() +def test_voting(support): + session = Session() + nodes = session.query(Node).all() + node = nodes[0] + + comment = support.get_data(node.id)['comments'][0] + + def check_rating(val): + data = support.get_data(node.id) + comment = data['comments'][0] + assert comment['rating'] == val, '%s != %s' % (comment['rating'], val) + + support.process_vote(comment['id'], 'user_one', '1') + support.process_vote(comment['id'], 'user_two', '1') + support.process_vote(comment['id'], 'user_three', '1') + check_rating(3) + support.process_vote(comment['id'], 'user_one', '-1') + check_rating(1) + support.process_vote(comment['id'], 'user_one', '0') + check_rating(2) + + # Make sure a vote with value > 1 or < -1 can't be cast. + raises(ValueError, support.process_vote, comment['id'], 'user_one', '2') + raises(ValueError, support.process_vote, comment['id'], 'user_one', '-2') + + # Make sure past voting data is associated with comments when they are + # fetched. + data = support.get_data(str(node.id), username='user_two') + comment = data['comments'][0] + assert comment['vote'] == 1, '%s != 1' % comment['vote'] + + +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +@with_support() +def test_proposals(support): + session = Session() + node = session.query(Node).first() + + data = support.get_data(node.id) + + source = data['source'] + proposal = source[:5] + source[10:15] + 'asdf' + source[15:] + + comment = support.add_comment('Proposal comment', + node_id=node.id, + proposal=proposal) + + +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +@with_support() +def test_user_delete_comments(support): + def get_comment(): + session = Session() + node = session.query(Node).first() + session.close() + return support.get_data(node.id)['comments'][0] + + comment = get_comment() + assert comment['username'] == 'user_one' + # Make sure other normal users can't delete someone elses comments. + raises(UserNotAuthorizedError, support.delete_comment, + comment['id'], username='user_two') + # Now delete the comment using the correct username. + support.delete_comment(comment['id'], username='user_one') + comment = get_comment() + assert comment['username'] == '[deleted]' + assert comment['text'] == '[deleted]' + + +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +@with_support() +def test_moderator_delete_comments(support): + def get_comment(): + session = Session() + node = session.query(Node).first() + session.close() + return support.get_data(node.id, moderator=True)['comments'][1] + + comment = get_comment() + support.delete_comment(comment['id'], username='user_two', + moderator=True) + comment = get_comment() + assert comment['username'] == '[deleted]' + assert comment['text'] == '[deleted]' + + +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +@with_support() +def test_update_username(support): + support.update_username('user_two', 'new_user_two') + session = Session() + comments = session.query(Comment).\ + filter(Comment.username == 'user_two').all() + assert len(comments) == 0 + votes = session.query(CommentVote).\ + filter(CommentVote.username == 'user_two') + assert len(comments) == 0 + comments = session.query(Comment).\ + filter(Comment.username == 'new_user_two').all() + assert len(comments) == 1 + votes = session.query(CommentVote).\ + filter(CommentVote.username == 'new_user_two') + assert len(comments) == 1 + + +called = False +def moderation_callback(comment): + global called + called = True + + +@skip_if(sqlalchemy_missing, 'needs sqlalchemy') +@with_support(moderation_callback=moderation_callback) +def test_moderation(support): + session = Session() + nodes = session.query(Node).all() + node = nodes[7] + session.close() + accepted = support.add_comment('Accepted Comment', node_id=node.id, + displayed=False) + rejected = support.add_comment('Rejected comment', node_id=node.id, + displayed=False) + # Make sure the moderation_callback is called. + assert called == True + # Make sure the user must be a moderator. + raises(UserNotAuthorizedError, support.accept_comment, accepted['id']) + raises(UserNotAuthorizedError, support.reject_comment, accepted['id']) + support.accept_comment(accepted['id'], moderator=True) + support.reject_comment(rejected['id'], moderator=True) + comments = support.get_data(node.id)['comments'] + assert len(comments) == 1 + comments = support.get_data(node.id, moderator=True)['comments'] + assert len(comments) == 1 + + +def test_differ(): + source = 'Lorem ipsum dolor sit amet,\nconsectetur adipisicing elit,\n' \ + 'sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.' + prop = 'Lorem dolor sit amet,\nconsectetur nihil adipisicing elit,\n' \ + 'sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.' + differ = CombinedHtmlDiff(source, prop) + differ.make_html() diff --git a/tests/util.py b/tests/util.py index 1b24af0e..d56f3464 100644 --- a/tests/util.py +++ b/tests/util.py @@ -11,6 +11,8 @@ import sys import StringIO import tempfile import shutil +import re +from codecs import open try: from functools import wraps @@ -23,15 +25,15 @@ from sphinx.ext.autodoc import AutoDirective from path import path -from nose import tools +from nose import tools, SkipTest __all__ = [ - 'test_root', - 'raises', 'raises_msg', 'Struct', + 'test_root', 'raises', 'raises_msg', + 'skip_if', 'skip_unless', 'skip_unless_importable', 'Struct', 'ListOutput', 'TestApp', 'with_app', 'gen_with_app', 'path', 'with_tempdir', 'write_file', - 'sprint', + 'sprint', 'remove_unicode_literals', ] @@ -69,6 +71,30 @@ def raises_msg(exc, msg, func, *args, **kwds): raise AssertionError('%s did not raise %s' % (func.__name__, _excstr(exc))) +def skip_if(condition, msg=None): + """Decorator to skip test if condition is true.""" + def deco(test): + @tools.make_decorator(test) + def skipper(*args, **kwds): + if condition: + raise SkipTest(msg or 'conditional skip') + return test(*args, **kwds) + return skipper + return deco + +def skip_unless(condition, msg=None): + """Decorator to skip test if condition is false.""" + return skip_if(not condition, msg) + +def skip_unless_importable(module, msg=None): + """Decorator to skip test if module is not importable.""" + try: + __import__(module) + except ImportError: + return skip_if(True, msg) + else: + return skip_if(False, msg) + class Struct(object): def __init__(self, **kwds): @@ -191,11 +217,21 @@ def with_tempdir(func): return new_func -def write_file(name, contents): - f = open(str(name), 'wb') +def write_file(name, contents, encoding=None): + if encoding is None: + mode = 'wb' + if isinstance(contents, unicode): + contents = contents.encode('ascii') + else: + mode = 'w' + f = open(str(name), 'wb', encoding=encoding) f.write(contents) f.close() def sprint(*args): sys.stderr.write(' '.join(map(str, args)) + '\n') + +_unicode_literals_re = re.compile(r'u(".*?")|u(\'.*?\')') +def remove_unicode_literals(s): + return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s) |
