summaryrefslogtreecommitdiff
path: root/paste
diff options
context:
space:
mode:
authorianb <devnull@localhost>2005-04-22 03:18:20 +0000
committerianb <devnull@localhost>2005-04-22 03:18:20 +0000
commit36593a8d40cd1417d2927a78cdbe621090b6e8a5 (patch)
tree74dc42b93bb512593869fd57f8be99c198d8c108 /paste
parent7752a3102fc8aa68ee7e349495bc2e9345c77c00 (diff)
downloadpaste-36593a8d40cd1417d2927a78cdbe621090b6e8a5.tar.gz
Renamed package itself
Diffstat (limited to 'paste')
-rw-r--r--paste/3rd-party/README.txt14
-rw-r--r--paste/3rd-party/new_python/python/UserDict.py164
-rw-r--r--paste/3rd-party/new_python/python/__init__.py1
-rw-r--r--paste/3rd-party/new_python/python/doctest.py2665
-rw-r--r--paste/3rd-party/new_python/python/string.py531
-rw-r--r--paste/__init__.py1
-rw-r--r--paste/app_setup.py410
-rw-r--r--paste/app_templates/__init__.py1
-rw-r--r--paste/app_templates/webkit_zpt/__init__.py1
-rw-r--r--paste/app_templates/webkit_zpt/command.py43
-rw-r--r--paste/app_templates/webkit_zpt/description.txt4
-rw-r--r--paste/app_templates/webkit_zpt/servlet_template/templates/+servlet_name+.pt7
-rw-r--r--paste/app_templates/webkit_zpt/servlet_template/web/+servlet_name+.py7
-rw-r--r--paste/app_templates/webkit_zpt/template/__init__.py5
-rw-r--r--paste/app_templates/webkit_zpt/template/server.conf19
-rw-r--r--paste/app_templates/webkit_zpt/template/sitepage.py33
-rw-r--r--paste/app_templates/webkit_zpt/template/templates/generic_error.pt7
-rw-r--r--paste/app_templates/webkit_zpt/template/templates/index.pt22
-rw-r--r--paste/app_templates/webkit_zpt/template/templates/standard_template.pt29
-rw-r--r--paste/app_templates/webkit_zpt/template/web/__init__.py6
-rw-r--r--paste/app_templates/webkit_zpt/template/web/index.py9
-rw-r--r--paste/app_templates/webkit_zpt/template/web/static/stylesheet.css13
-rw-r--r--paste/cgiserver.py113
-rw-r--r--paste/cgitb_catcher.py95
-rw-r--r--paste/configmiddleware.py7
-rw-r--r--paste/default_config.conf1
-rw-r--r--paste/echo.py59
-rw-r--r--paste/error_middleware.py132
-rw-r--r--paste/exceptions/__init__.py1
-rw-r--r--paste/exceptions/collector.py458
-rw-r--r--paste/exceptions/formatter.py126
-rw-r--r--paste/exceptions/reporter.py122
-rw-r--r--paste/exceptions/serial_number_generator.py114
-rw-r--r--paste/exceptions/tests/__init__.py1
-rw-r--r--paste/exceptions/tests/test_formatter.py79
-rw-r--r--paste/exceptions/tests/test_reporter.py46
-rw-r--r--paste/gzipper.py65
-rw-r--r--paste/httpexceptions.py233
-rw-r--r--paste/lint.py259
-rw-r--r--paste/login.py251
-rw-r--r--paste/pycgiwrapper.py182
-rw-r--r--paste/pyconfig.py168
-rw-r--r--paste/recursive.py110
-rw-r--r--paste/reloader.py111
-rw-r--r--paste/scgiserver.py148
-rwxr-xr-xpaste/server.py269
-rw-r--r--paste/server_script_template.py19
-rw-r--r--paste/session.py156
-rw-r--r--paste/tests/__init__.py1
-rwxr-xr-xpaste/tests/doctest_webapp.py403
-rwxr-xr-xpaste/tests/echotest.py109
-rw-r--r--paste/tests/fixture.py307
-rw-r--r--paste/tests/pyconfig_data/context.py8
-rw-r--r--paste/tests/pyconfig_data/deriv.conf3
-rw-r--r--paste/tests/pyconfig_data/nest1.conf3
-rw-r--r--paste/tests/pyconfig_data/nest2.conf2
-rw-r--r--paste/tests/pyconfig_data/one.py2
-rw-r--r--paste/tests/test_authentication.py76
-rw-r--r--paste/tests/test_error_middleware.py63
-rw-r--r--paste/tests/test_pyconfig.py46
-rw-r--r--paste/tests/test_urlparser.py83
-rw-r--r--paste/tests/urlparser_data/__init__.py1
-rw-r--r--paste/tests/urlparser_data/deep/index.html1
-rw-r--r--paste/tests/urlparser_data/deep/sub/Main.txt1
-rw-r--r--paste/tests/urlparser_data/find_file/index.txt1
-rw-r--r--paste/tests/urlparser_data/find_file/test2.html1
-rw-r--r--paste/tests/urlparser_data/hook/__init__.py10
-rw-r--r--paste/tests/urlparser_data/hook/app.py5
-rw-r--r--paste/tests/urlparser_data/hook/index.py4
-rw-r--r--paste/tests/urlparser_data/not_found/__init__.py1
-rw-r--r--paste/tests/urlparser_data/not_found/recur/__init__.py9
-rw-r--r--paste/tests/urlparser_data/not_found/recur/isfound.txt1
-rw-r--r--paste/tests/urlparser_data/not_found/simple/__init__.py3
-rw-r--r--paste/tests/urlparser_data/not_found/simple/found.txt1
-rw-r--r--paste/tests/urlparser_data/not_found/user/__init__.py12
-rw-r--r--paste/tests/urlparser_data/not_found/user/list.py3
-rw-r--r--paste/tests/urlparser_data/python/__init__.py1
-rw-r--r--paste/tests/urlparser_data/python/simpleapp.py6
-rw-r--r--paste/tests/urlparser_data/python/stream.py7
-rw-r--r--paste/tests/urlparser_data/python/sub/__init__.py1
-rw-r--r--paste/tests/urlparser_data/python/sub/simpleapp.py6
-rw-r--r--paste/threaded.py436
-rw-r--r--paste/twisted_wsgi.py247
-rw-r--r--paste/urlparser.py329
-rw-r--r--paste/util/__init__.py1
-rw-r--r--paste/util/classinstance.py34
-rw-r--r--paste/util/filemixin.py50
-rw-r--r--paste/util/thirdparty.py42
-rw-r--r--paste/util/threadedprint.py213
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/CSVJoiner.py21
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/CSVParser.py265
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/DBPool.py128
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/DataTable.py804
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/DateInterval.py70
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/DictForArgs.py209
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/Error.py50
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/Funcs.py325
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/M2PickleRPC.py67
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/MixIn.py70
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/NamedValueAccess.py570
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/ParamFactory.py22
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/PickleCache.py220
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/PickleRPC.py428
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/PropertiesObject.py155
-rw-r--r--paste/webkit/FakeWebware/MiscUtils/__init__.py68
-rw-r--r--paste/webkit/FakeWebware/README.txt7
-rw-r--r--paste/webkit/FakeWebware/WebKit/HTTPServlet.py1
-rw-r--r--paste/webkit/FakeWebware/WebKit/Page.py1
-rw-r--r--paste/webkit/FakeWebware/WebKit/__init__.py1
-rw-r--r--paste/webkit/FakeWebware/WebUtils/Funcs.py148
-rw-r--r--paste/webkit/FakeWebware/WebUtils/__init__.py1
-rw-r--r--paste/webkit/FakeWebware/__init__.py1
-rw-r--r--paste/webkit/__init__.py1
-rw-r--r--paste/webkit/examples/EchoServlet.py57
-rw-r--r--paste/webkit/examples/__init__.py1
-rw-r--r--paste/webkit/test_wkfixture.py66
-rw-r--r--paste/webkit/wkapplication.py18
-rw-r--r--paste/webkit/wkcommon.py187
-rw-r--r--paste/webkit/wkrequest.py344
-rw-r--r--paste/webkit/wkresponse.py267
-rw-r--r--paste/webkit/wkservlet.py430
-rw-r--r--paste/webkit/wksession.py47
-rw-r--r--paste/webkit/wktransaction.py96
-rw-r--r--paste/webkit/wsgiwebkit.py67
-rw-r--r--paste/wsgilib.py252
125 files changed, 15315 insertions, 0 deletions
diff --git a/paste/3rd-party/README.txt b/paste/3rd-party/README.txt
new file mode 100644
index 0000000..0526e70
--- /dev/null
+++ b/paste/3rd-party/README.txt
@@ -0,0 +1,14 @@
+This directory contains packages useful to Paste users, who may not
+feel like installing those packages. The module util.thirdparty has
+functions for pulling these modules into the path, but also respecting
+any packages the user installed on their own.
+
+To use this, create a directory package_name-files, and then install
+the package into that directory, probably like::
+
+ cd PackageName
+ python setup.py install \
+ --install-lib=path/to/3rd-party/package_name-files
+
+These files should *not* go into the repository! But they should go
+into the installation package.
diff --git a/paste/3rd-party/new_python/python/UserDict.py b/paste/3rd-party/new_python/python/UserDict.py
new file mode 100644
index 0000000..35f86fc
--- /dev/null
+++ b/paste/3rd-party/new_python/python/UserDict.py
@@ -0,0 +1,164 @@
+"""A more or less complete user-defined wrapper around dictionary objects."""
+
+class UserDict:
+ def __init__(self, dict=None, **kwargs):
+ self.data = {}
+ if dict is not None:
+ if not hasattr(dict,'keys'):
+ dict = type({})(dict) # make mapping from a sequence
+ self.update(dict)
+ if len(kwargs):
+ self.update(kwargs)
+ def __repr__(self): return repr(self.data)
+ def __cmp__(self, dict):
+ if isinstance(dict, UserDict):
+ return cmp(self.data, dict.data)
+ else:
+ return cmp(self.data, dict)
+ def __len__(self): return len(self.data)
+ def __getitem__(self, key): return self.data[key]
+ def __setitem__(self, key, item): self.data[key] = item
+ def __delitem__(self, key): del self.data[key]
+ def clear(self): self.data.clear()
+ def copy(self):
+ if self.__class__ is UserDict:
+ return UserDict(self.data)
+ import copy
+ data = self.data
+ try:
+ self.data = {}
+ c = copy.copy(self)
+ finally:
+ self.data = data
+ c.update(self)
+ return c
+ def keys(self): return self.data.keys()
+ def items(self): return self.data.items()
+ def iteritems(self): return self.data.iteritems()
+ def iterkeys(self): return self.data.iterkeys()
+ def itervalues(self): return self.data.itervalues()
+ def values(self): return self.data.values()
+ def has_key(self, key): return self.data.has_key(key)
+ def update(self, dict):
+ if isinstance(dict, UserDict):
+ self.data.update(dict.data)
+ elif isinstance(dict, type(self.data)):
+ self.data.update(dict)
+ else:
+ for k, v in dict.items():
+ self[k] = v
+ def get(self, key, failobj=None):
+ if not self.has_key(key):
+ return failobj
+ return self[key]
+ def setdefault(self, key, failobj=None):
+ if not self.has_key(key):
+ self[key] = failobj
+ return self[key]
+ def pop(self, key, *args):
+ return self.data.pop(key, *args)
+ def popitem(self):
+ return self.data.popitem()
+ def __contains__(self, key):
+ return key in self.data
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+ fromkeys = classmethod(fromkeys)
+
+class IterableUserDict(UserDict):
+ def __iter__(self):
+ return iter(self.data)
+
+class DictMixin:
+ # Mixin defining all dictionary methods for classes that already have
+ # a minimum dictionary interface including getitem, setitem, delitem,
+ # and keys. Without knowledge of the subclass constructor, the mixin
+ # does not define __init__() or copy(). In addition to the four base
+ # methods, progressively more efficiency comes with defining
+ # __contains__(), __iter__(), and iteritems().
+
+ # second level definitions support higher levels
+ def __iter__(self):
+ for k in self.keys():
+ yield k
+ def has_key(self, key):
+ try:
+ value = self[key]
+ except KeyError:
+ return False
+ return True
+ def __contains__(self, key):
+ return self.has_key(key)
+
+ # third level takes advantage of second level definitions
+ def iteritems(self):
+ for k in self:
+ yield (k, self[k])
+ def iterkeys(self):
+ return self.__iter__()
+
+ # fourth level uses definitions from lower levels
+ def itervalues(self):
+ for _, v in self.iteritems():
+ yield v
+ def values(self):
+ return [v for _, v in self.iteritems()]
+ def items(self):
+ return list(self.iteritems())
+ def clear(self):
+ for key in self.keys():
+ del self[key]
+ def setdefault(self, key, default):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError, "pop expected at most 2 arguments, got "\
+ + repr(1 + len(args))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+ def popitem(self):
+ try:
+ k, v = self.iteritems().next()
+ except StopIteration:
+ raise KeyError, 'container is empty'
+ del self[k]
+ return (k, v)
+ def update(self, other):
+ # Make progressively weaker assumptions about "other"
+ if hasattr(other, 'iteritems'): # iteritems saves memory and lookups
+ for k, v in other.iteritems():
+ self[k] = v
+ elif hasattr(other, '__iter__'): # iter saves memory
+ for k in other:
+ self[k] = other[k]
+ else:
+ for k in other.keys():
+ self[k] = other[k]
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+ def __repr__(self):
+ return repr(dict(self.iteritems()))
+ def __cmp__(self, other):
+ if other is None:
+ return 1
+ if isinstance(other, DictMixin):
+ other = dict(other.iteritems())
+ return cmp(dict(self.iteritems()), other)
+ def __len__(self):
+ return len(self.keys())
diff --git a/paste/3rd-party/new_python/python/__init__.py b/paste/3rd-party/new_python/python/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/3rd-party/new_python/python/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/3rd-party/new_python/python/doctest.py b/paste/3rd-party/new_python/python/doctest.py
new file mode 100644
index 0000000..0a13d77
--- /dev/null
+++ b/paste/3rd-party/new_python/python/doctest.py
@@ -0,0 +1,2665 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
+# Major enhancements and refactoring by:
+# Jim Fulton
+# Edward Loper
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+
+r"""Module doctest -- a framework for running examples in docstrings.
+
+In simplest use, end each module M to be tested with:
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+if __name__ == "__main__":
+ _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=True" to testmod, or prohibit
+it by passing "verbose=False". In either of those cases, sys.argv is not
+examined by testmod.
+
+There are a variety of other ways to run doctests, including integration
+with the unittest framework, and support for running non-Python text
+files containing doctests. There are also many ways to override parts
+of doctest's default behaviors. See the Library Reference Manual for
+details.
+"""
+
+__docformat__ = 'reStructuredText en'
+
+__all__ = [
+ # 0, Option Flags
+ 'register_optionflag',
+ 'DONT_ACCEPT_TRUE_FOR_1',
+ 'DONT_ACCEPT_BLANKLINE',
+ 'NORMALIZE_WHITESPACE',
+ 'ELLIPSIS',
+ 'IGNORE_EXCEPTION_DETAIL',
+ 'COMPARISON_FLAGS',
+ 'REPORT_UDIFF',
+ 'REPORT_CDIFF',
+ 'REPORT_NDIFF',
+ 'REPORT_ONLY_FIRST_FAILURE',
+ 'REPORTING_FLAGS',
+ # 1. Utility Functions
+ 'is_private',
+ # 2. Example & DocTest
+ 'Example',
+ 'DocTest',
+ # 3. Doctest Parser
+ 'DocTestParser',
+ # 4. Doctest Finder
+ 'DocTestFinder',
+ # 5. Doctest Runner
+ 'DocTestRunner',
+ 'OutputChecker',
+ 'DocTestFailure',
+ 'UnexpectedException',
+ 'DebugRunner',
+ # 6. Test Functions
+ 'testmod',
+ 'testfile',
+ 'run_docstring_examples',
+ # 7. Tester
+ 'Tester',
+ # 8. Unittest Support
+ 'DocTestSuite',
+ 'DocFileSuite',
+ 'set_unittest_reportflags',
+ # 9. Debugging Support
+ 'script_from_examples',
+ 'testsource',
+ 'debug_src',
+ 'debug',
+]
+
+import __future__
+
+import sys, traceback, inspect, linecache, os, re, types
+import unittest, difflib, pdb, tempfile
+import warnings
+from StringIO import StringIO
+
+# Don't whine about the deprecated is_private function in this
+# module's tests.
+warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
+ __name__, 0)
+
+# There are 4 basic classes:
+# - Example: a <source, want> pair, plus an intra-docstring line number.
+# - DocTest: a collection of examples, parsed from a docstring, plus
+# info about where the docstring came from (name, filename, lineno).
+# - DocTestFinder: extracts DocTests from a given object's docstring and
+# its contained objects' docstrings.
+# - DocTestRunner: runs DocTest cases, and accumulates statistics.
+#
+# So the basic picture is:
+#
+# list of:
+# +------+ +---------+ +-------+
+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
+# +------+ +---------+ +-------+
+# | Example |
+# | ... |
+# | Example |
+# +---------+
+
+# Option constants.
+
+OPTIONFLAGS_BY_NAME = {}
+def register_optionflag(name):
+ flag = 1 << len(OPTIONFLAGS_BY_NAME)
+ OPTIONFLAGS_BY_NAME[name] = flag
+ return flag
+
+DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
+DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
+NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
+ELLIPSIS = register_optionflag('ELLIPSIS')
+IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
+
+COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
+ DONT_ACCEPT_BLANKLINE |
+ NORMALIZE_WHITESPACE |
+ ELLIPSIS |
+ IGNORE_EXCEPTION_DETAIL)
+
+REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
+REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
+REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
+REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
+
+REPORTING_FLAGS = (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF |
+ REPORT_ONLY_FIRST_FAILURE)
+
+# Special string markers for use in `want` strings:
+BLANKLINE_MARKER = '<BLANKLINE>'
+ELLIPSIS_MARKER = '...'
+
+######################################################################
+## Table of Contents
+######################################################################
+# 1. Utility Functions
+# 2. Example & DocTest -- store test cases
+# 3. DocTest Parser -- extracts examples from strings
+# 4. DocTest Finder -- extracts test cases from objects
+# 5. DocTest Runner -- runs test cases
+# 6. Test Functions -- convenient wrappers for testing
+# 7. Tester Class -- for backwards compatibility
+# 8. Unittest Support
+# 9. Debugging Support
+# 10. Example Usage
+
+######################################################################
+## 1. Utility Functions
+######################################################################
+
+def is_private(prefix, base):
+ """prefix, base -> true iff name prefix + "." + base is "private".
+
+ Prefix may be an empty string, and base does not contain a period.
+ Prefix is ignored (although functions you write conforming to this
+ protocol may make use of it).
+ Return true iff base begins with an (at least one) underscore, but
+ does not both begin and end with (at least) two underscores.
+
+ >>> is_private("a.b", "my_func")
+ False
+ >>> is_private("____", "_my_func")
+ True
+ >>> is_private("someclass", "__init__")
+ False
+ >>> is_private("sometypo", "__init_")
+ True
+ >>> is_private("x.y.z", "_")
+ True
+ >>> is_private("_x.y.z", "__")
+ False
+ >>> is_private("", "") # senseless but consistent
+ False
+ """
+ warnings.warn("is_private is deprecated; it wasn't useful; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning, stacklevel=2)
+ return base[:1] == "_" and not base[:2] == "__" == base[-2:]
+
+def _extract_future_flags(globs):
+ """
+ Return the compiler-flags associated with the future features that
+ have been imported into the given namespace (globs).
+ """
+ flags = 0
+ for fname in __future__.all_feature_names:
+ feature = globs.get(fname, None)
+ if feature is getattr(__future__, fname):
+ flags |= feature.compiler_flag
+ return flags
+
+def _normalize_module(module, depth=2):
+ """
+ Return the module specified by `module`. In particular:
+ - If `module` is a module, then return module.
+ - If `module` is a string, then import and return the
+ module with that name.
+ - If `module` is None, then return the calling module.
+ The calling module is assumed to be the module of
+ the stack frame at the given depth in the call stack.
+ """
+ if inspect.ismodule(module):
+ return module
+ elif isinstance(module, (str, unicode)):
+ return __import__(module, globals(), locals(), ["*"])
+ elif module is None:
+ return sys.modules[sys._getframe(depth).f_globals['__name__']]
+ else:
+ raise TypeError("Expected a module, string, or None")
+
+def _indent(s, indent=4):
+ """
+ Add the given number of space characters to the beginning every
+ non-blank line in `s`, and return the result.
+ """
+ # This regexp matches the start of non-blank lines:
+ return re.sub('(?m)^(?!$)', indent*' ', s)
+
+def _exception_traceback(exc_info):
+ """
+ Return a string containing a traceback message for the given
+ exc_info tuple (as returned by sys.exc_info()).
+ """
+ # Get a traceback message.
+ excout = StringIO()
+ exc_type, exc_val, exc_tb = exc_info
+ traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
+ return excout.getvalue()
+
+# Override some StringIO methods.
+class _SpoofOut(StringIO):
+ def getvalue(self):
+ result = StringIO.getvalue(self)
+ # If anything at all was written, make sure there's a trailing
+ # newline. There's no way for the expected output to indicate
+ # that a trailing newline is missing.
+ if result and not result.endswith("\n"):
+ result += "\n"
+ # Prevent softspace from screwing up the next test case, in
+ # case they used print with a trailing comma in an example.
+ if hasattr(self, "softspace"):
+ del self.softspace
+ return result
+
+ def truncate(self, size=None):
+ StringIO.truncate(self, size)
+ if hasattr(self, "softspace"):
+ del self.softspace
+
+# Worst-case linear-time ellipsis matching.
+def _ellipsis_match(want, got):
+ """
+ Essentially the only subtle case:
+ >>> _ellipsis_match('aa...aa', 'aaa')
+ False
+ """
+ if ELLIPSIS_MARKER not in want:
+ return want == got
+
+ # Find "the real" strings.
+ ws = want.split(ELLIPSIS_MARKER)
+ assert len(ws) >= 2
+
+ # Deal with exact matches possibly needed at one or both ends.
+ startpos, endpos = 0, len(got)
+ w = ws[0]
+ if w: # starts with exact match
+ if got.startswith(w):
+ startpos = len(w)
+ del ws[0]
+ else:
+ return False
+ w = ws[-1]
+ if w: # ends with exact match
+ if got.endswith(w):
+ endpos -= len(w)
+ del ws[-1]
+ else:
+ return False
+
+ if startpos > endpos:
+ # Exact end matches required more characters than we have, as in
+ # _ellipsis_match('aa...aa', 'aaa')
+ return False
+
+ # For the rest, we only need to find the leftmost non-overlapping
+ # match for each piece. If there's no overall match that way alone,
+ # there's no overall match period.
+ for w in ws:
+ # w may be '' at times, if there are consecutive ellipses, or
+ # due to an ellipsis at the start or end of `want`. That's OK.
+ # Search for an empty string succeeds, and doesn't change startpos.
+ startpos = got.find(w, startpos, endpos)
+ if startpos < 0:
+ return False
+ startpos += len(w)
+
+ return True
+
+def _comment_line(line):
+ "Return a commented form of the given line"
+ line = line.rstrip()
+ if line:
+ return '# '+line
+ else:
+ return '#'
+
+class _OutputRedirectingPdb(pdb.Pdb):
+ """
+ A specialized version of the python debugger that redirects stdout
+ to a given stream when interacting with the user. Stdout is *not*
+ redirected when traced code is executed.
+ """
+ def __init__(self, out):
+ self.__out = out
+ pdb.Pdb.__init__(self)
+
+ def trace_dispatch(self, *args):
+ # Redirect stdout to the given stream.
+ save_stdout = sys.stdout
+ sys.stdout = self.__out
+ # Call Pdb's trace dispatch method.
+ try:
+ return pdb.Pdb.trace_dispatch(self, *args)
+ finally:
+ sys.stdout = save_stdout
+
+# [XX] Normalize with respect to os.path.pardir?
+def _module_relative_path(module, path):
+ if not inspect.ismodule(module):
+ raise TypeError, 'Expected a module: %r' % module
+ if path.startswith('/'):
+ raise ValueError, 'Module-relative files may not have absolute paths'
+
+ # Find the base directory for the path.
+ if hasattr(module, '__file__'):
+ # A normal module/package
+ basedir = os.path.split(module.__file__)[0]
+ elif module.__name__ == '__main__':
+ # An interactive session.
+ if len(sys.argv)>0 and sys.argv[0] != '':
+ basedir = os.path.split(sys.argv[0])[0]
+ else:
+ basedir = os.curdir
+ else:
+ # A module w/o __file__ (this includes builtins)
+ raise ValueError("Can't resolve paths relative to the module " +
+ module + " (it has no __file__)")
+
+ # Combine the base directory and the path.
+ return os.path.join(basedir, *(path.split('/')))
+
+######################################################################
+## 2. Example & DocTest
+######################################################################
+## - An "example" is a <source, want> pair, where "source" is a
+## fragment of source code, and "want" is the expected output for
+## "source." The Example class also includes information about
+## where the example was extracted from.
+##
+## - A "doctest" is a collection of examples, typically extracted from
+## a string (such as an object's docstring). The DocTest class also
+## includes information about where the string was extracted from.
+
+class Example:
+ """
+ A single doctest example, consisting of source code and expected
+ output. `Example` defines the following attributes:
+
+ - source: A single Python statement, always ending with a newline.
+ The constructor adds a newline if needed.
+
+ - want: The expected output from running the source code (either
+ from stdout, or a traceback in case of exception). `want` ends
+ with a newline unless it's empty, in which case it's an empty
+ string. The constructor adds a newline if needed.
+
+ - exc_msg: The exception message generated by the example, if
+ the example is expected to generate an exception; or `None` if
+ it is not expected to generate an exception. This exception
+ message is compared against the return value of
+ `traceback.format_exception_only()`. `exc_msg` ends with a
+ newline unless it's `None`. The constructor adds a newline
+ if needed.
+
+ - lineno: The line number within the DocTest string containing
+ this Example where the Example begins. This line number is
+ zero-based, with respect to the beginning of the DocTest.
+
+ - indent: The example's indentation in the DocTest string.
+ I.e., the number of space characters that preceed the
+ example's first prompt.
+
+ - options: A dictionary mapping from option flags to True or
+ False, which is used to override default options for this
+ example. Any option flags not contained in this dictionary
+ are left at their default value (as specified by the
+ DocTestRunner's optionflags). By default, no options are set.
+ """
+ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+ options=None):
+ # Normalize inputs.
+ if not source.endswith('\n'):
+ source += '\n'
+ if want and not want.endswith('\n'):
+ want += '\n'
+ if exc_msg is not None and not exc_msg.endswith('\n'):
+ exc_msg += '\n'
+ # Store properties.
+ self.source = source
+ self.want = want
+ self.lineno = lineno
+ self.indent = indent
+ if options is None: options = {}
+ self.options = options
+ self.exc_msg = exc_msg
+
+class DocTest:
+ """
+ A collection of doctest examples that should be run in a single
+ namespace. Each `DocTest` defines the following attributes:
+
+ - examples: the list of examples.
+
+ - globs: The namespace (aka globals) that the examples should
+ be run in.
+
+ - name: A name identifying the DocTest (typically, the name of
+ the object whose docstring this DocTest was extracted from).
+
+ - filename: The name of the file that this DocTest was extracted
+ from, or `None` if the filename is unknown.
+
+ - lineno: The line number within filename where this DocTest
+ begins, or `None` if the line number is unavailable. This
+ line number is zero-based, with respect to the beginning of
+ the file.
+
+ - docstring: The string that the examples were extracted from,
+ or `None` if the string is unavailable.
+ """
+ def __init__(self, examples, globs, name, filename, lineno, docstring):
+ """
+ Create a new DocTest containing the given examples. The
+ DocTest's globals are initialized with a copy of `globs`.
+ """
+ assert not isinstance(examples, basestring), \
+ "DocTest no longer accepts str; use DocTestParser instead"
+ self.examples = examples
+ self.docstring = docstring
+ self.globs = globs.copy()
+ self.name = name
+ self.filename = filename
+ self.lineno = lineno
+
+ def __repr__(self):
+ if len(self.examples) == 0:
+ examples = 'no examples'
+ elif len(self.examples) == 1:
+ examples = '1 example'
+ else:
+ examples = '%d examples' % len(self.examples)
+ return ('<DocTest %s from %s:%s (%s)>' %
+ (self.name, self.filename, self.lineno, examples))
+
+
+ # This lets us sort tests by name:
+ def __cmp__(self, other):
+ if not isinstance(other, DocTest):
+ return -1
+ return cmp((self.name, self.filename, self.lineno, id(self)),
+ (other.name, other.filename, other.lineno, id(other)))
+
+######################################################################
+## 3. DocTestParser
+######################################################################
+
+class DocTestParser:
+ """
+ A class used to parse strings containing doctest examples.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+ _EXAMPLE_RE = re.compile(r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P<source>
+ (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
+ (?:\n [ ]* \.\.\. .*)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*>>>) # Not a line starting with PS1
+ .*$\n? # But any other line
+ )*)
+ ''', re.MULTILINE | re.VERBOSE)
+
+ # A regular expression for handling `want` strings that contain
+ # expected exceptions. It divides `want` into three pieces:
+ # - the traceback header line (`hdr`)
+ # - the traceback stack (`stack`)
+ # - the exception message (`msg`), as generated by
+ # traceback.format_exception_only()
+ # `msg` may have multiple lines. We assume/require that the
+ # exception message is the first non-indented line starting with a word
+ # character following the traceback header line.
+ _EXCEPTION_RE = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+ # A callable returning a true value iff its argument is a blank line
+ # or contains a single comment.
+ _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append( Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options) )
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def get_doctest(self, string, globs, name, filename, lineno):
+ """
+ Extract all doctest examples from the given string, and
+ collect them into a `DocTest` object.
+
+ `globs`, `name`, `filename`, and `lineno` are attributes for
+ the new `DocTest` object. See the documentation for `DocTest`
+ for more information.
+ """
+ return DocTest(self.get_examples(string, name), globs,
+ name, filename, lineno, string)
+
+ def get_examples(self, string, name='<string>'):
+ """
+ Extract all doctest examples from the given string, and return
+ them as a list of `Example` objects. Line numbers are
+ 0-based, because it's most common in doctests that nothing
+ interesting appears on the same line as opening triple-quote,
+ and so the first interesting line is called \"line 1\" then.
+
+ The optional argument `name` is a name identifying this
+ string, and is only used for error messages.
+ """
+ return [x for x in self.parse(string, name)
+ if isinstance(x, Example)]
+
+ def _parse_example(self, m, name, lineno):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
+ source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ # This regular expression looks for option directives in the
+ # source code of an example. Option directives are comments
+ # starting with "doctest:". Warning: this may give false
+ # positives for string-literals that contain the string
+ # "#doctest:". Eliminating these false positives would require
+ # actually parsing the string; but we limit them by ignoring any
+ # line containing "#doctest:" that is *followed* by a quote mark.
+ _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+ re.MULTILINE)
+
+ def _find_options(self, source, name, lineno):
+ """
+ Return a dictionary containing option overrides extracted from
+ option directives in the given source string.
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ options = {}
+ # (note: with the current regexp, this will match at most once:)
+ for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+ option_strings = m.group(1).replace(',', ' ').split()
+ for option in option_strings:
+ if (option[0] not in '+-' or
+ option[1:] not in OPTIONFLAGS_BY_NAME):
+ raise ValueError('line %r of the doctest for %s '
+ 'has an invalid option: %r' %
+ (lineno+1, name, option))
+ flag = OPTIONFLAGS_BY_NAME[option[1:]]
+ options[flag] = (option[0] == '+')
+ if options and self._IS_BLANK_OR_COMMENT(source):
+ raise ValueError('line %r of the doctest for %s has an option '
+ 'directive on a line with no example: %r' %
+ (lineno, name, source))
+ return options
+
+ # This regular expression finds the indentation of every non-blank
+ # line in a string.
+ _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
+
+ def _min_indent(self, s):
+ "Return the minimum indentation of any non-blank line in `s`"
+ indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+ if len(indents) > 0:
+ return min(indents)
+ else:
+ return 0
+
+ def _check_prompt_blank(self, lines, indent, name, lineno):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+ """
+ for i, line in enumerate(lines):
+ if len(line) >= indent+4 and line[indent+3] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:indent+3], line))
+
+ def _check_prefix(self, lines, prefix, name, lineno):
+ """
+ Check that every line in the given list starts with the given
+ prefix; if any line does not, then raise a ValueError.
+ """
+ for i, line in enumerate(lines):
+ if line and not line.startswith(prefix):
+ raise ValueError('line %r of the docstring for %s has '
+ 'inconsistent leading whitespace: %r' %
+ (lineno+i+1, name, line))
+
+
+######################################################################
+## 4. DocTest Finder
+######################################################################
+
+class DocTestFinder:
+ """
+ A class used to extract the DocTests that are relevant to a given
+ object, from its docstring and the docstrings of its contained
+ objects. Doctests can currently be extracted from the following
+ object types: modules, functions, classes, methods, staticmethods,
+ classmethods, and properties.
+ """
+
+ def __init__(self, verbose=False, parser=DocTestParser(),
+ recurse=True, _namefilter=None, exclude_empty=True):
+ """
+ Create a new doctest finder.
+
+ The optional argument `parser` specifies a class or
+ function that should be used to create new DocTest objects (or
+ objects that implement the same interface as DocTest). The
+ signature for this factory function should match the signature
+ of the DocTest constructor.
+
+ If the optional argument `recurse` is false, then `find` will
+ only examine the given object, and not any contained objects.
+
+ If the optional argument `exclude_empty` is false, then `find`
+ will include tests for objects with empty docstrings.
+ """
+ self._parser = parser
+ self._verbose = verbose
+ self._recurse = recurse
+ self._exclude_empty = exclude_empty
+ # _namefilter is undocumented, and exists only for temporary backward-
+ # compatibility support of testmod's deprecated isprivate mess.
+ self._namefilter = _namefilter
+
+ def find(self, obj, name=None, module=None, globs=None,
+ extraglobs=None):
+ """
+ Return a list of the DocTests that are defined by the given
+ object's docstring, or by any of its contained objects'
+ docstrings.
+
+ The optional parameter `module` is the module that contains
+ the given object. If the module is not specified or is None, then
+ the test finder will attempt to automatically determine the
+ correct module. The object's module is used:
+
+ - As a default namespace, if `globs` is not specified.
+ - To prevent the DocTestFinder from extracting DocTests
+ from objects that are imported from other modules.
+ - To find the name of the file containing the object.
+ - To help find the line number of the object within its
+ file.
+
+ Contained objects whose module does not match `module` are ignored.
+
+ If `module` is False, no attempt to find the module will be made.
+ This is obscure, of use mostly in tests: if `module` is False, or
+ is None but cannot be found automatically, then all objects are
+ considered to belong to the (non-existent) module, so all contained
+ objects will (recursively) be searched for doctests.
+
+ The globals for each DocTest is formed by combining `globs`
+ and `extraglobs` (bindings in `extraglobs` override bindings
+ in `globs`). A new copy of the globals dictionary is created
+ for each DocTest. If `globs` is not specified, then it
+ defaults to the module's `__dict__`, if specified, or {}
+ otherwise. If `extraglobs` is not specified, then it defaults
+ to {}.
+
+ """
+ # If name was not specified, then extract it from the object.
+ if name is None:
+ name = getattr(obj, '__name__', None)
+ if name is None:
+ raise ValueError("DocTestFinder.find: name must be given "
+ "when obj.__name__ doesn't exist: %r" %
+ (type(obj),))
+
+ # Find the module that contains the given object (if obj is
+ # a module, then module=obj.). Note: this may fail, in which
+ # case module will be None.
+ if module is False:
+ module = None
+ elif module is None:
+ module = inspect.getmodule(obj)
+
+ # Read the module's source code. This is used by
+ # DocTestFinder._find_lineno to find the line number for a
+ # given object's docstring.
+ try:
+ file = inspect.getsourcefile(obj) or inspect.getfile(obj)
+ source_lines = linecache.getlines(file)
+ if not source_lines:
+ source_lines = None
+ except TypeError:
+ source_lines = None
+
+ # Initialize globals, and merge in extraglobs.
+ if globs is None:
+ if module is None:
+ globs = {}
+ else:
+ globs = module.__dict__.copy()
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ # Recursively expore `obj`, extracting DocTests.
+ tests = []
+ self._find(tests, obj, name, module, source_lines, globs, {})
+ return tests
+
+ def _filter(self, obj, prefix, base):
+ """
+ Return true if the given object should not be examined.
+ """
+ return (self._namefilter is not None and
+ self._namefilter(prefix, base))
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.func_globals
+ elif inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+ if self._verbose:
+ print 'Finding tests in %s' % name
+
+ # If we've already processed this object, then ignore it.
+ if id(obj) in seen:
+ return
+ seen[id(obj)] = 1
+
+ # Find a test for this object, and add it to the list of tests.
+ test = self._get_test(obj, name, module, globs, source_lines)
+ if test is not None:
+ tests.append(test)
+
+ # Look for tests in a module's contained objects.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ valname = '%s.%s' % (name, valname)
+ # Recurse to functions & classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val)) and
+ self._from_module(module, val)):
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a module's __test__ dictionary.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in getattr(obj, '__test__', {}).items():
+ if not isinstance(valname, basestring):
+ raise ValueError("DocTestFinder.find: __test__ keys "
+ "must be strings: %r" %
+ (type(valname),))
+ if not (inspect.isfunction(val) or inspect.isclass(val) or
+ inspect.ismethod(val) or inspect.ismodule(val) or
+ isinstance(val, basestring)):
+ raise ValueError("DocTestFinder.find: __test__ values "
+ "must be strings, functions, methods, "
+ "classes, or modules: %r" %
+ (type(val),))
+ valname = '%s.__test__.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if inspect.isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).im_func
+
+ # Recurse to methods, properties, and nested classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val) or
+ isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ def _get_test(self, obj, name, module, globs, source_lines):
+ """
+ Return a DocTest for the given object, if it defines a docstring;
+ otherwise, return None.
+ """
+ # Extract the object's docstring. If it doesn't have one,
+ # then return None (no test for this object).
+ if isinstance(obj, basestring):
+ docstring = obj
+ else:
+ try:
+ if obj.__doc__ is None:
+ docstring = ''
+ else:
+ docstring = obj.__doc__
+ if not isinstance(docstring, basestring):
+ docstring = str(docstring)
+ except (TypeError, AttributeError):
+ docstring = ''
+
+ # Find the docstring's location in the file.
+ lineno = self._find_lineno(obj, source_lines)
+
+ # Don't bother if the docstring is empty.
+ if self._exclude_empty and not docstring:
+ return None
+
+ # Return a DocTest for this object.
+ if module is None:
+ filename = None
+ else:
+ filename = getattr(module, '__file__', module.__name__)
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ return self._parser.get_doctest(docstring, globs, name,
+ filename, lineno)
+
+ def _find_lineno(self, obj, source_lines):
+ """
+ Return a line number of the given object's docstring. Note:
+ this method assumes that the object has a docstring.
+ """
+ lineno = None
+
+ # Find the line number for modules.
+ if inspect.ismodule(obj):
+ lineno = 0
+
+ # Find the line number for classes.
+ # Note: this could be fooled if a class is defined multiple
+ # times in a single file.
+ if inspect.isclass(obj):
+ if source_lines is None:
+ return None
+ pat = re.compile(r'^\s*class\s*%s\b' %
+ getattr(obj, '__name__', '-'))
+ for i, line in enumerate(source_lines):
+ if pat.match(line):
+ lineno = i
+ break
+
+ # Find the line number for functions & methods.
+ if inspect.ismethod(obj): obj = obj.im_func
+ if inspect.isfunction(obj): obj = obj.func_code
+ if inspect.istraceback(obj): obj = obj.tb_frame
+ if inspect.isframe(obj): obj = obj.f_code
+ if inspect.iscode(obj):
+ lineno = getattr(obj, 'co_firstlineno', None)-1
+
+ # Find the line number where the docstring starts. Assume
+ # that it's the first line that begins with a quote mark.
+ # Note: this could be fooled by a multiline function
+ # signature, where a continuation line begins with a quote
+ # mark.
+ if lineno is not None:
+ if source_lines is None:
+ return lineno+1
+ pat = re.compile('(^|.*:)\s*\w*("|\')')
+ for lineno in range(lineno, len(source_lines)):
+ if pat.match(source_lines[lineno]):
+ return lineno
+
+ # We couldn't find the line number.
+ return None
+
+######################################################################
+## 5. DocTest Runner
+######################################################################
+
+class DocTestRunner:
+ """
+ A class used to run DocTest test cases, and accumulate statistics.
+ The `run` method is used to process a single DocTest case. It
+ returns a tuple `(f, t)`, where `t` is the number of test cases
+ tried, and `f` is the number of test cases that failed.
+
+ >>> tests = DocTestFinder().find(_TestClass)
+ >>> runner = DocTestRunner(verbose=False)
+ >>> for test in tests:
+ ... print runner.run(test)
+ (0, 2)
+ (0, 1)
+ (0, 2)
+ (0, 2)
+
+ The `summarize` method prints a summary of all the test cases that
+ have been run by the runner, and returns an aggregated `(f, t)`
+ tuple:
+
+ >>> runner.summarize(verbose=1)
+ 4 items passed all tests:
+ 2 tests in _TestClass
+ 2 tests in _TestClass.__init__
+ 2 tests in _TestClass.get
+ 1 tests in _TestClass.square
+ 7 tests in 4 items.
+ 7 passed and 0 failed.
+ Test passed.
+ (0, 7)
+
+ The aggregated number of tried examples and failed examples is
+ also available via the `tries` and `failures` attributes:
+
+ >>> runner.tries
+ 7
+ >>> runner.failures
+ 0
+
+ The comparison between expected outputs and actual outputs is done
+ by an `OutputChecker`. This comparison may be customized with a
+ number of option flags; see the documentation for `testmod` for
+ more information. If the option flags are insufficient, then the
+ comparison may also be customized by passing a subclass of
+ `OutputChecker` to the constructor.
+
+ The test runner's display output can be controlled in two ways.
+ First, an output function (`out) can be passed to
+ `TestRunner.run`; this function will be called with strings that
+ should be displayed. It defaults to `sys.stdout.write`. If
+ capturing the output is not sufficient, then the display output
+ can be also customized by subclassing DocTestRunner, and
+ overriding the methods `report_start`, `report_success`,
+ `report_unexpected_exception`, and `report_failure`.
+ """
+ # This divider string is used to separate failure messages, and to
+ # separate sections of the summary.
+ DIVIDER = "*" * 70
+
+ def __init__(self, checker=None, verbose=None, optionflags=0):
+ """
+ Create a new test runner.
+
+ Optional keyword arg `checker` is the `OutputChecker` that
+ should be used to compare the expected outputs and actual
+ outputs of doctest examples.
+
+ Optional keyword arg 'verbose' prints lots of stuff if true,
+ only failures if false; by default, it's true iff '-v' is in
+ sys.argv.
+
+ Optional argument `optionflags` can be used to control how the
+ test runner compares expected output to actual output, and how
+ it displays failures. See the documentation for `testmod` for
+ more information.
+ """
+ self._checker = checker or OutputChecker()
+ if verbose is None:
+ verbose = '-v' in sys.argv
+ self._verbose = verbose
+ self.optionflags = optionflags
+ self.original_optionflags = optionflags
+
+ # Keep track of the examples we've run.
+ self.tries = 0
+ self.failures = 0
+ self._name2ft = {}
+
+ # Create a fake output target for capturing doctest output.
+ self._fakeout = _SpoofOut()
+
+ #/////////////////////////////////////////////////////////////////
+ # Reporting methods
+ #/////////////////////////////////////////////////////////////////
+
+ def report_start(self, out, test, example):
+ """
+ Report that the test runner is about to process the given
+ example. (Only displays a message if verbose=True)
+ """
+ if self._verbose:
+ if example.want:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting:\n' + _indent(example.want))
+ else:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting nothing\n')
+
+ def report_success(self, out, test, example, got):
+ """
+ Report that the given example ran successfully. (Only
+ displays a message if verbose=True)
+ """
+ if self._verbose:
+ out("ok\n")
+
+ def report_failure(self, out, test, example, got):
+ """
+ Report that the given example failed.
+ """
+ out(self._failure_header(test, example) +
+ self._checker.output_difference(example, got, self.optionflags))
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ """
+ Report that the given example raised an unexpected exception.
+ """
+ out(self._failure_header(test, example) +
+ 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+ def _failure_header(self, test, example):
+ out = [self.DIVIDER]
+ if test.filename:
+ if test.lineno is not None and example.lineno is not None:
+ lineno = test.lineno + example.lineno + 1
+ else:
+ lineno = '?'
+ out.append('File "%s", line %s, in %s' %
+ (test.filename, lineno, test.name))
+ else:
+ out.append('Line %s, in %s' % (example.lineno+1, test.name))
+ out.append('Failed example:')
+ source = example.source
+ out.append(_indent(source))
+ return '\n'.join(out)
+
+ #/////////////////////////////////////////////////////////////////
+ # DocTest Running
+ #/////////////////////////////////////////////////////////////////
+
+ def __run(self, test, compileflags, out):
+ """
+ Run the examples in `test`. Write the outcome of each example
+ with one of the `DocTestRunner.report_*` methods, using the
+ writer function `out`. `compileflags` is the set of compiler
+ flags that should be used to execute examples. Return a tuple
+ `(f, t)`, where `t` is the number of examples tried, and `f`
+ is the number of examples that failed. The examples are run
+ in the namespace `test.globs`.
+ """
+ # Keep track of the number of failures and tries.
+ failures = tries = 0
+
+ # Save the option flags (since option directives can be used
+ # to modify them).
+ original_optionflags = self.optionflags
+
+ SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
+
+ check = self._checker.check_output
+
+ # Process each example.
+ for examplenum, example in enumerate(test.examples):
+
+ # If REPORT_ONLY_FIRST_FAILURE is set, then supress
+ # reporting after the first failure.
+ quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+ failures > 0)
+
+ # Merge in the example's options.
+ self.optionflags = original_optionflags
+ if example.options:
+ for (optionflag, val) in example.options.items():
+ if val:
+ self.optionflags |= optionflag
+ else:
+ self.optionflags &= ~optionflag
+
+ # Record that we started this example.
+ tries += 1
+ if not quiet:
+ self.report_start(out, test, example)
+
+ # Use a special filename for compile(), so we can retrieve
+ # the source code during interactive debugging (see
+ # __patched_linecache_getlines).
+ filename = '<doctest %s[%d]>' % (test.name, examplenum)
+
+ # Run the example in the given context (globs), and record
+ # any exception that gets raised. (But don't intercept
+ # keyboard interrupts.)
+ try:
+ # Don't blink! This is where the user's code gets run.
+ exec compile(example.source, filename, "single",
+ compileflags, 1) in test.globs
+ self.debugger.set_continue() # ==== Example Finished ====
+ exception = None
+ except KeyboardInterrupt:
+ raise
+ except:
+ exception = sys.exc_info()
+ self.debugger.set_continue() # ==== Example Finished ====
+
+ got = self._fakeout.getvalue() # the actual output
+ self._fakeout.truncate(0)
+ outcome = FAILURE # guilty until proved innocent or insane
+
+ # If the example executed without raising any exceptions,
+ # verify its output.
+ if exception is None:
+ if check(example.want, got, self.optionflags):
+ outcome = SUCCESS
+
+ # The example raised an exception: check if it was expected.
+ else:
+ exc_info = sys.exc_info()
+ exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
+ if not quiet:
+ got += _exception_traceback(exc_info)
+
+ # If `example.exc_msg` is None, then we weren't expecting
+ # an exception.
+ if example.exc_msg is None:
+ outcome = BOOM
+
+ # We expected an exception: see whether it matches.
+ elif check(example.exc_msg, exc_msg, self.optionflags):
+ outcome = SUCCESS
+
+ # Another chance if they didn't care about the detail.
+ elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+ m1 = re.match(r'[^:]*:', example.exc_msg)
+ m2 = re.match(r'[^:]*:', exc_msg)
+ if m1 and m2 and check(m1.group(0), m2.group(0),
+ self.optionflags):
+ outcome = SUCCESS
+
+ # Report the outcome.
+ if outcome is SUCCESS:
+ if not quiet:
+ self.report_success(out, test, example, got)
+ elif outcome is FAILURE:
+ if not quiet:
+ self.report_failure(out, test, example, got)
+ failures += 1
+ elif outcome is BOOM:
+ if not quiet:
+ self.report_unexpected_exception(out, test, example,
+ exc_info)
+ failures += 1
+ else:
+ assert False, ("unknown outcome", outcome)
+
+ # Restore the option flags (in case they were modified)
+ self.optionflags = original_optionflags
+
+ # Record and return the number of failures and tries.
+ self.__record_outcome(test, failures, tries)
+ return failures, tries
+
+ def __record_outcome(self, test, f, t):
+ """
+ Record the fact that the given DocTest (`test`) generated `f`
+ failures out of `t` tried examples.
+ """
+ f2, t2 = self._name2ft.get(test.name, (0,0))
+ self._name2ft[test.name] = (f+f2, t+t2)
+ self.failures += f
+ self.tries += t
+
+ __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
+ r'(?P<name>[\w\.]+)'
+ r'\[(?P<examplenum>\d+)\]>$')
+ def __patched_linecache_getlines(self, filename):
+ m = self.__LINECACHE_FILENAME_RE.match(filename)
+ if m and m.group('name') == self.test.name:
+ example = self.test.examples[int(m.group('examplenum'))]
+ return example.source.splitlines(True)
+ else:
+ return self.save_linecache_getlines(filename)
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ """
+ Run the examples in `test`, and display the results using the
+ writer function `out`.
+
+ The examples are run in the namespace `test.globs`. If
+ `clear_globs` is true (the default), then this namespace will
+ be cleared after the test runs, to help with garbage
+ collection. If you would like to examine the namespace after
+ the test completes, then use `clear_globs=False`.
+
+ `compileflags` gives the set of flags that should be used by
+ the Python compiler when running the examples. If not
+ specified, then it will default to the set of future-import
+ flags that apply to `globs`.
+
+ The output of each example is checked using
+ `DocTestRunner.check_output`, and the results are formatted by
+ the `DocTestRunner.report_*` methods.
+ """
+ self.test = test
+
+ if compileflags is None:
+ compileflags = _extract_future_flags(test.globs)
+
+ save_stdout = sys.stdout
+ if out is None:
+ out = save_stdout.write
+ sys.stdout = self._fakeout
+
+ # Patch pdb.set_trace to restore sys.stdout during interactive
+ # debugging (so it's not still redirected to self._fakeout).
+ # Note that the interactive output will go to *our*
+ # save_stdout, even if that's not the real sys.stdout; this
+ # allows us to write test cases for the set_trace behavior.
+ save_set_trace = pdb.set_trace
+ self.debugger = _OutputRedirectingPdb(save_stdout)
+ self.debugger.reset()
+ pdb.set_trace = self.debugger.set_trace
+
+ # Patch linecache.getlines, so we can see the example's source
+ # when we're inside the debugger.
+ self.save_linecache_getlines = linecache.getlines
+ linecache.getlines = self.__patched_linecache_getlines
+
+ try:
+ return self.__run(test, compileflags, out)
+ finally:
+ sys.stdout = save_stdout
+ pdb.set_trace = save_set_trace
+ linecache.getlines = self.save_linecache_getlines
+ if clear_globs:
+ test.globs.clear()
+
+ #/////////////////////////////////////////////////////////////////
+ # Summarization
+ #/////////////////////////////////////////////////////////////////
+ def summarize(self, verbose=None):
+ """
+ Print a summary of all the test cases that have been run by
+ this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+ the total number of failed examples, and `t` is the total
+ number of tried examples.
+
+ The optional `verbose` argument controls how detailed the
+ summary is. If the verbosity is not specified, then the
+ DocTestRunner's verbosity is used.
+ """
+ if verbose is None:
+ verbose = self._verbose
+ notests = []
+ passed = []
+ failed = []
+ totalt = totalf = 0
+ for x in self._name2ft.items():
+ name, (f, t) = x
+ assert f <= t
+ totalt += t
+ totalf += f
+ if t == 0:
+ notests.append(name)
+ elif f == 0:
+ passed.append( (name, t) )
+ else:
+ failed.append(x)
+ if verbose:
+ if notests:
+ print len(notests), "items had no tests:"
+ notests.sort()
+ for thing in notests:
+ print " ", thing
+ if passed:
+ print len(passed), "items passed all tests:"
+ passed.sort()
+ for thing, count in passed:
+ print " %3d tests in %s" % (count, thing)
+ if failed:
+ print self.DIVIDER
+ print len(failed), "items had failures:"
+ failed.sort()
+ for thing, (f, t) in failed:
+ print " %3d of %3d in %s" % (f, t, thing)
+ if verbose:
+ print totalt, "tests in", len(self._name2ft), "items."
+ print totalt - totalf, "passed and", totalf, "failed."
+ if totalf:
+ print "***Test Failed***", totalf, "failures."
+ elif verbose:
+ print "Test passed."
+ return totalf, totalt
+
+ #/////////////////////////////////////////////////////////////////
+ # Backward compatibility cruft to maintain doctest.master.
+ #/////////////////////////////////////////////////////////////////
+ def merge(self, other):
+ d = self._name2ft
+ for name, (f, t) in other._name2ft.items():
+ if name in d:
+ print "*** DocTestRunner.merge: '" + name + "' in both" \
+ " testers; summing outcomes."
+ f2, t2 = d[name]
+ f = f + f2
+ t = t + t2
+ d[name] = f, t
+
+class OutputChecker:
+ """
+ A class used to check the whether the actual output from a doctest
+ example matches the expected output. `OutputChecker` defines two
+ methods: `check_output`, which compares a given pair of outputs,
+ and returns true if they match; and `output_difference`, which
+ returns a string describing the differences between two outputs.
+ """
+ def check_output(self, want, got, optionflags):
+ """
+ Return True iff the actual output from an example (`got`)
+ matches the expected output (`want`). These strings are
+ always considered to match if they are identical; but
+ depending on what option flags the test runner is using,
+ several non-exact match types are also possible. See the
+ documentation for `TestRunner` for more information about
+ option flags.
+ """
+ # Handle the common case first, for efficiency:
+ # if they're string-identical, always return true.
+ if got == want:
+ return True
+
+ # The values True and False replaced 1 and 0 as the return
+ # value for boolean comparisons in Python 2.3.
+ if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
+ if (got,want) == ("True\n", "1\n"):
+ return True
+ if (got,want) == ("False\n", "0\n"):
+ return True
+
+ # <BLANKLINE> can be used as a special sequence to signify a
+ # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ # Replace <BLANKLINE> in want with a blank line.
+ want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
+ '', want)
+ # If a line in got contains only spaces, then remove the
+ # spaces.
+ got = re.sub('(?m)^\s*?$', '', got)
+ if got == want:
+ return True
+
+ # This flag causes doctest to ignore any differences in the
+ # contents of whitespace strings. Note that this can be used
+ # in conjunction with the ELLIPSIS flag.
+ if optionflags & NORMALIZE_WHITESPACE:
+ got = ' '.join(got.split())
+ want = ' '.join(want.split())
+ if got == want:
+ return True
+
+ # The ELLIPSIS flag says to let the sequence "..." in `want`
+ # match any substring in `got`.
+ if optionflags & ELLIPSIS:
+ if _ellipsis_match(want, got):
+ return True
+
+ # We didn't find any match; return false.
+ return False
+
+ # Should we do a fancy diff?
+ def _do_a_fancy_diff(self, want, got, optionflags):
+ # Not unless they asked for a fancy diff.
+ if not optionflags & (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF):
+ return False
+
+ # If expected output uses ellipsis, a meaningful fancy diff is
+ # too hard ... or maybe not. In two real-life failures Tim saw,
+ # a diff was a major help anyway, so this is commented out.
+ # [todo] _ellipsis_match() knows which pieces do and don't match,
+ # and could be the basis for a kick-ass diff in this case.
+ ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
+ ## return False
+
+ # ndiff does intraline difference marking, so can be useful even
+ # for 1-line differences.
+ if optionflags & REPORT_NDIFF:
+ return True
+
+ # The other diff types need at least a few lines to be helpful.
+ return want.count('\n') > 2 and got.count('\n') > 2
+
+ def output_difference(self, example, got, optionflags):
+ """
+ Return a string describing the differences between the
+ expected output for a given example (`example`) and the actual
+ output (`got`). `optionflags` is the set of option flags used
+ to compare `want` and `got`.
+ """
+ want = example.want
+ # If <BLANKLINE>s are being used, then replace blank lines
+ # with <BLANKLINE> in the actual output string.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
+
+ # Check if we should use diff.
+ if self._do_a_fancy_diff(want, got, optionflags):
+ # Split want & got into lines.
+ want_lines = want.splitlines(True) # True == keep line ends
+ got_lines = got.splitlines(True)
+ # Use difflib to find their differences.
+ if optionflags & REPORT_UDIFF:
+ diff = difflib.unified_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'unified diff with -expected +actual'
+ elif optionflags & REPORT_CDIFF:
+ diff = difflib.context_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'context diff with expected followed by actual'
+ elif optionflags & REPORT_NDIFF:
+ engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
+ diff = list(engine.compare(want_lines, got_lines))
+ kind = 'ndiff with -expected +actual'
+ else:
+ assert 0, 'Bad diff option'
+ # Remove trailing whitespace on diff output.
+ diff = [line.rstrip() + '\n' for line in diff]
+ return 'Differences (%s):\n' % kind + _indent(''.join(diff))
+
+ # If we're not using diff, then simply list the expected
+ # output followed by the actual output.
+ if want and got:
+ return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
+ elif want:
+ return 'Expected:\n%sGot nothing\n' % _indent(want)
+ elif got:
+ return 'Expected nothing\nGot:\n%s' % _indent(got)
+ else:
+ return 'Expected nothing\nGot nothing\n'
+
+class DocTestFailure(Exception):
+ """A DocTest example has failed in debugging mode.
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - got: the actual output
+ """
+ def __init__(self, test, example, got):
+ self.test = test
+ self.example = example
+ self.got = got
+
+ def __str__(self):
+ return str(self.test)
+
+class UnexpectedException(Exception):
+ """A DocTest example has encountered an unexpected exception
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - exc_info: the exception info
+ """
+ def __init__(self, test, example, exc_info):
+ self.test = test
+ self.example = example
+ self.exc_info = exc_info
+
+ def __str__(self):
+ return str(self.test)
+
+class DebugRunner(DocTestRunner):
+ r"""Run doc tests but raise an exception as soon as there is a failure.
+
+ If an unexpected exception occurs, an UnexpectedException is raised.
+ It contains the test, the example, and the original exception:
+
+ >>> runner = DebugRunner(verbose=False)
+ >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
+ ... {}, 'foo', 'foo.py', 0)
+ >>> try:
+ ... runner.run(test)
+ ... except UnexpectedException, failure:
+ ... pass
+
+ >>> failure.test is test
+ True
+
+ >>> failure.example.want
+ '42\n'
+
+ >>> exc_info = failure.exc_info
+ >>> raise exc_info[0], exc_info[1], exc_info[2]
+ Traceback (most recent call last):
+ ...
+ KeyError
+
+ We wrap the original exception to give the calling application
+ access to the test and example information.
+
+ If the output doesn't match, then a DocTestFailure is raised:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 1
+ ... >>> x
+ ... 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> try:
+ ... runner.run(test)
+ ... except DocTestFailure, failure:
+ ... pass
+
+ DocTestFailure objects provide access to the test:
+
+ >>> failure.test is test
+ True
+
+ As well as to the example:
+
+ >>> failure.example.want
+ '2\n'
+
+ and the actual output:
+
+ >>> failure.got
+ '1\n'
+
+ If a failure or error occurs, the globals are left intact:
+
+ >>> del test.globs['__builtins__']
+ >>> test.globs
+ {'x': 1}
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 2
+ ... >>> raise KeyError
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> runner.run(test)
+ Traceback (most recent call last):
+ ...
+ UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
+
+ >>> del test.globs['__builtins__']
+ >>> test.globs
+ {'x': 2}
+
+ But the globals are cleared if there is no error:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+
+ >>> runner.run(test)
+ (0, 1)
+
+ >>> test.globs
+ {}
+
+ """
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ r = DocTestRunner.run(self, test, compileflags, out, False)
+ if clear_globs:
+ test.globs.clear()
+ return r
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ raise UnexpectedException(test, example, exc_info)
+
+ def report_failure(self, out, test, example, got):
+ raise DocTestFailure(test, example, got)
+
+######################################################################
+## 6. Test Functions
+######################################################################
+# These should be backwards compatible.
+
+# For backward compatibility, a global instance of a DocTestRunner
+# class, updated by testmod.
+master = None
+
+def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None,
+ raise_on_error=False, exclude_empty=False):
+ """m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None, raise_on_error=False,
+ exclude_empty=False
+
+ Test examples in docstrings in functions and classes reachable
+ from module m (or the current module if m is not supplied), starting
+ with m.__doc__. Unless isprivate is specified, private names
+ are not skipped.
+
+ Also test examples reachable from dict m.__test__ if it exists and is
+ not None. m.__test__ maps names to functions, classes and strings;
+ function and class docstrings are tested even if the name is private;
+ strings are tested directly, as if they were docstrings.
+
+ Return (#failures, #tests).
+
+ See doctest.__doc__ for an overview.
+
+ Optional keyword arg "name" gives the name of the module; by default
+ use m.__name__.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use m.__dict__. A copy of this
+ dict is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used. This is new in 2.4.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. This is new in 2.3. Possible values (see the
+ docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Deprecated in Python 2.4:
+ Optional keyword arg "isprivate" specifies a function used to
+ determine whether a name is private. The default function is
+ treat all functions as public. Optionally, "isprivate" can be
+ set to doctest.is_private to skip over functions marked as private
+ using the underscore naming convention; see its docs for details.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if isprivate is not None:
+ warnings.warn("the isprivate argument is deprecated; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning)
+
+ # If no module was given, then use __main__.
+ if m is None:
+ # DWA - m will still be None if this wasn't invoked from the command
+ # line, in which case the following TypeError is about as good an error
+ # as we should expect
+ m = sys.modules.get('__main__')
+
+ # Check that we were actually given a module.
+ if not inspect.ismodule(m):
+ raise TypeError("testmod: module required; %r" % (m,))
+
+ # If no name was given, then use the module's name.
+ if name is None:
+ name = m.__name__
+
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def testfile(filename, module_relative=True, name=None, package=None,
+ globs=None, verbose=None, report=True, optionflags=0,
+ extraglobs=None, raise_on_error=False, parser=DocTestParser()):
+ """
+ Test examples in the given file. Return (#failures, #tests).
+
+ Optional keyword arg "module_relative" specifies how filenames
+ should be interpreted:
+
+ - If "module_relative" is True (the default), then "filename"
+ specifies a module-relative path. By default, this path is
+ relative to the calling module's directory; but if the
+ "package" argument is specified, then it is relative to that
+ package. To ensure os-independence, "filename" should use
+ "/" characters to separate path segments, and should not
+ be an absolute path (i.e., it may not begin with "/").
+
+ - If "module_relative" is False, then "filename" specifies an
+ os-specific path. The path may be absolute or relative (to
+ the current working directory).
+
+ Optional keyword arg "name" gives the name of the test; by default
+ use the file's basename.
+
+ Optional keyword argument "package" is a Python package or the
+ name of a Python package whose directory should be used as the
+ base directory for a module relative filename. If no package is
+ specified, then the calling module's directory is used as the base
+ directory for module relative filenames. It is an error to
+ specify "package" if "module_relative" is False.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use {}. A copy of this dict
+ is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. Possible values (see the docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Optional keyword arg "parser" specifies a DocTestParser (or
+ subclass) that should be used to extract tests from the files.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path
+ if module_relative:
+ package = _normalize_module(package)
+ filename = _module_relative_path(package, filename)
+
+ # If no name was given, then use the file's name.
+ if name is None:
+ name = os.path.basename(filename)
+
+ # Assemble the globals.
+ if globs is None:
+ globs = {}
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ # Read the file, convert it to a test, and run it.
+ s = open(filename).read()
+ test = parser.get_doctest(s, globs, name, filename, 0)
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def run_docstring_examples(f, globs, verbose=False, name="NoName",
+ compileflags=None, optionflags=0):
+ """
+ Test examples in the given object's docstring (`f`), using `globs`
+ as globals. Optional argument `name` is used in failure messages.
+ If the optional argument `verbose` is true, then generate output
+ even if there are no failures.
+
+ `compileflags` gives the set of flags that should be used by the
+ Python compiler when running the examples. If not specified, then
+ it will default to the set of future-import flags that apply to
+ `globs`.
+
+ Optional keyword arg `optionflags` specifies options for the
+ testing and output. See the documentation for `testmod` for more
+ information.
+ """
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(verbose=verbose, recurse=False)
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+ for test in finder.find(f, name, globs=globs):
+ runner.run(test, compileflags=compileflags)
+
+######################################################################
+## 7. Tester
+######################################################################
+# This is provided only for backwards compatibility. It's not
+# actually used in any way.
+
+class Tester:
+ def __init__(self, mod=None, globs=None, verbose=None,
+ isprivate=None, optionflags=0):
+
+ warnings.warn("class Tester is deprecated; "
+ "use class doctest.DocTestRunner instead",
+ DeprecationWarning, stacklevel=2)
+ if mod is None and globs is None:
+ raise TypeError("Tester.__init__: must specify mod or globs")
+ if mod is not None and not inspect.ismodule(mod):
+ raise TypeError("Tester.__init__: mod must be a module; %r" %
+ (mod,))
+ if globs is None:
+ globs = mod.__dict__
+ self.globs = globs
+
+ self.verbose = verbose
+ self.isprivate = isprivate
+ self.optionflags = optionflags
+ self.testfinder = DocTestFinder(_namefilter=isprivate)
+ self.testrunner = DocTestRunner(verbose=verbose,
+ optionflags=optionflags)
+
+ def runstring(self, s, name):
+ test = DocTestParser().get_doctest(s, self.globs, name, None, None)
+ if self.verbose:
+ print "Running string", name
+ (f,t) = self.testrunner.run(test)
+ if self.verbose:
+ print f, "of", t, "examples failed in string", name
+ return (f,t)
+
+ def rundoc(self, object, name=None, module=None):
+ f = t = 0
+ tests = self.testfinder.find(object, name, module=module,
+ globs=self.globs)
+ for test in tests:
+ (f2, t2) = self.testrunner.run(test)
+ (f,t) = (f+f2, t+t2)
+ return (f,t)
+
+ def rundict(self, d, name, module=None):
+ import new
+ m = new.module(name)
+ m.__dict__.update(d)
+ if module is None:
+ module = False
+ return self.rundoc(m, name, module)
+
+ def run__test__(self, d, name):
+ import new
+ m = new.module(name)
+ m.__test__ = d
+ return self.rundoc(m, name)
+
+ def summarize(self, verbose=None):
+ return self.testrunner.summarize(verbose)
+
+ def merge(self, other):
+ self.testrunner.merge(other.testrunner)
+
+######################################################################
+## 8. Unittest Support
+######################################################################
+
+_unittest_reportflags = 0
+
+def set_unittest_reportflags(flags):
+ """Sets the unittest option flags.
+
+ The old flag is returned so that a runner could restore the old
+ value if it wished to:
+
+ >>> old = _unittest_reportflags
+ >>> set_unittest_reportflags(REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE) == old
+ True
+
+ >>> import doctest
+ >>> doctest._unittest_reportflags == (REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE)
+ True
+
+ Only reporting flags can be set:
+
+ >>> set_unittest_reportflags(ELLIPSIS)
+ Traceback (most recent call last):
+ ...
+ ValueError: ('Only reporting flags allowed', 8)
+
+ >>> set_unittest_reportflags(old) == (REPORT_NDIFF |
+ ... REPORT_ONLY_FIRST_FAILURE)
+ True
+ """
+ global _unittest_reportflags
+
+ if (flags & REPORTING_FLAGS) != flags:
+ raise ValueError("Only reporting flags allowed", flags)
+ old = _unittest_reportflags
+ _unittest_reportflags = flags
+ return old
+
+
+class DocTestCase(unittest.TestCase):
+
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None):
+
+ unittest.TestCase.__init__(self)
+ self._dt_optionflags = optionflags
+ self._dt_checker = checker
+ self._dt_test = test
+ self._dt_setUp = setUp
+ self._dt_tearDown = tearDown
+
+ def setUp(self):
+ test = self._dt_test
+
+ if self._dt_setUp is not None:
+ self._dt_setUp(test)
+
+ def tearDown(self):
+ test = self._dt_test
+
+ if self._dt_tearDown is not None:
+ self._dt_tearDown(test)
+
+ test.globs.clear()
+
+ def runTest(self):
+ test = self._dt_test
+ old = sys.stdout
+ new = StringIO()
+ optionflags = self._dt_optionflags
+
+ if not (optionflags & REPORTING_FLAGS):
+ # The option flags don't include any reporting flags,
+ # so add the default reporting flags
+ optionflags |= _unittest_reportflags
+
+ runner = DocTestRunner(optionflags=optionflags,
+ checker=self._dt_checker, verbose=False)
+
+ try:
+ runner.DIVIDER = "-"*70
+ failures, tries = runner.run(
+ test, out=new.write, clear_globs=False)
+ finally:
+ sys.stdout = old
+
+ if failures:
+ raise self.failureException(self.format_failure(new.getvalue()))
+
+ def format_failure(self, err):
+ test = self._dt_test
+ if test.lineno is None:
+ lineno = 'unknown line number'
+ else:
+ lineno = '%s' % test.lineno
+ lname = '.'.join(test.name.split('.')[-1:])
+ return ('Failed doctest test for %s\n'
+ ' File "%s", line %s, in %s\n\n%s'
+ % (test.name, test.filename, lineno, lname, err)
+ )
+
+ def debug(self):
+ r"""Run the test case without results and without catching exceptions
+
+ The unit test framework includes a debug method on test cases
+ and test suites to support post-mortem debugging. The test code
+ is run in such a way that errors are not caught. This way a
+ caller can catch the errors and initiate post-mortem debugging.
+
+ The DocTestCase provides a debug method that raises
+ UnexpectedException errors if there is an unexepcted
+ exception:
+
+ >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
+ ... {}, 'foo', 'foo.py', 0)
+ >>> case = DocTestCase(test)
+ >>> try:
+ ... case.debug()
+ ... except UnexpectedException, failure:
+ ... pass
+
+ The UnexpectedException contains the test, the example, and
+ the original exception:
+
+ >>> failure.test is test
+ True
+
+ >>> failure.example.want
+ '42\n'
+
+ >>> exc_info = failure.exc_info
+ >>> raise exc_info[0], exc_info[1], exc_info[2]
+ Traceback (most recent call last):
+ ...
+ KeyError
+
+ If the output doesn't match, then a DocTestFailure is raised:
+
+ >>> test = DocTestParser().get_doctest('''
+ ... >>> x = 1
+ ... >>> x
+ ... 2
+ ... ''', {}, 'foo', 'foo.py', 0)
+ >>> case = DocTestCase(test)
+
+ >>> try:
+ ... case.debug()
+ ... except DocTestFailure, failure:
+ ... pass
+
+ DocTestFailure objects provide access to the test:
+
+ >>> failure.test is test
+ True
+
+ As well as to the example:
+
+ >>> failure.example.want
+ '2\n'
+
+ and the actual output:
+
+ >>> failure.got
+ '1\n'
+
+ """
+
+ self.setUp()
+ runner = DebugRunner(optionflags=self._dt_optionflags,
+ checker=self._dt_checker, verbose=False)
+ runner.run(self._dt_test)
+ self.tearDown()
+
+ def id(self):
+ return self._dt_test.name
+
+ def __repr__(self):
+ name = self._dt_test.name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return "Doctest: " + self._dt_test.name
+
+def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+ **options):
+ """
+ Convert doctest tests for a module to a unittest test suite.
+
+ This converts each documentation string in a module that
+ contains doctest tests to a unittest test case. If any of the
+ tests in a doc string fail, then the test case fails. An exception
+ is raised showing the name of the file containing the test and a
+ (sometimes approximate) line number.
+
+ The `module` argument provides the module to be tested. The argument
+ can be either a module or a module name.
+
+ If no argument is given, the calling module is used.
+
+ A number of options may be provided as keyword arguments:
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+ """
+
+ if test_finder is None:
+ test_finder = DocTestFinder()
+
+ module = _normalize_module(module)
+ tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
+ if globs is None:
+ globs = module.__dict__
+ if not tests:
+ # Why do we want to do this? Because it reveals a bug that might
+ # otherwise be hidden.
+ raise ValueError(module, "has no tests")
+
+ tests.sort()
+ suite = unittest.TestSuite()
+ for test in tests:
+ if len(test.examples) == 0:
+ continue
+ if not test.filename:
+ filename = module.__file__
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ test.filename = filename
+ suite.addTest(DocTestCase(test, **options))
+
+ return suite
+
+class DocFileCase(DocTestCase):
+
+ def id(self):
+ return '_'.join(self._dt_test.name.split('.'))
+
+ def __repr__(self):
+ return self._dt_test.filename
+ __str__ = __repr__
+
+ def format_failure(self, err):
+ return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
+ % (self._dt_test.name, self._dt_test.filename, err)
+ )
+
+def DocFileTest(path, module_relative=True, package=None,
+ globs=None, parser=DocTestParser(), **options):
+ if globs is None:
+ globs = {}
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path.
+ if module_relative:
+ package = _normalize_module(package)
+ path = _module_relative_path(package, path)
+
+ # Find the file and read it.
+ name = os.path.basename(path)
+ doc = open(path).read()
+
+ # Convert it to a test, and wrap it in a DocFileCase.
+ test = parser.get_doctest(doc, globs, name, path, 0)
+ return DocFileCase(test, **options)
+
+def DocFileSuite(*paths, **kw):
+ """A unittest suite for one or more doctest files.
+
+ The path to each doctest file is given as a string; the
+ interpretation of that string depends on the keyword argument
+ "module_relative".
+
+ A number of options may be provided as keyword arguments:
+
+ module_relative
+ If "module_relative" is True, then the given file paths are
+ interpreted as os-independent module-relative paths. By
+ default, these paths are relative to the calling module's
+ directory; but if the "package" argument is specified, then
+ they are relative to that package. To ensure os-independence,
+ "filename" should use "/" characters to separate path
+ segments, and may not be an absolute path (i.e., it may not
+ begin with "/").
+
+ If "module_relative" is False, then the given file paths are
+ interpreted as os-specific paths. These paths may be absolute
+ or relative (to the current working directory).
+
+ package
+ A Python package or the name of a Python package whose directory
+ should be used as the base directory for module relative paths.
+ If "package" is not specified, then the calling module's
+ directory is used as the base directory for module relative
+ filenames. It is an error to specify "package" if
+ "module_relative" is False.
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+
+ parser
+ A DocTestParser (or subclass) that should be used to extract
+ tests from the files.
+ """
+ suite = unittest.TestSuite()
+
+ # We do this here so that _normalize_module is called at the right
+ # level. If it were called in DocFileTest, then this function
+ # would be the caller and we might guess the package incorrectly.
+ if kw.get('module_relative', True):
+ kw['package'] = _normalize_module(kw.get('package'))
+
+ for path in paths:
+ suite.addTest(DocFileTest(path, **kw))
+
+ return suite
+
+######################################################################
+## 9. Debugging Support
+######################################################################
+
+def script_from_examples(s):
+ r"""Extract script from text with examples.
+
+ Converts text with examples to a Python script. Example input is
+ converted to regular code. Example output and all other words
+ are converted to comments:
+
+ >>> text = '''
+ ... Here are examples of simple math.
+ ...
+ ... Python has super accurate integer addition
+ ...
+ ... >>> 2 + 2
+ ... 5
+ ...
+ ... And very friendly error messages:
+ ...
+ ... >>> 1/0
+ ... To Infinity
+ ... And
+ ... Beyond
+ ...
+ ... You can use logic if you want:
+ ...
+ ... >>> if 0:
+ ... ... blah
+ ... ... blah
+ ... ...
+ ...
+ ... Ho hum
+ ... '''
+
+ >>> print script_from_examples(text)
+ # Here are examples of simple math.
+ #
+ # Python has super accurate integer addition
+ #
+ 2 + 2
+ # Expected:
+ ## 5
+ #
+ # And very friendly error messages:
+ #
+ 1/0
+ # Expected:
+ ## To Infinity
+ ## And
+ ## Beyond
+ #
+ # You can use logic if you want:
+ #
+ if 0:
+ blah
+ blah
+ #
+ # Ho hum
+ """
+ output = []
+ for piece in DocTestParser().parse(s):
+ if isinstance(piece, Example):
+ # Add the example's source code (strip trailing NL)
+ output.append(piece.source[:-1])
+ # Add the expected output:
+ want = piece.want
+ if want:
+ output.append('# Expected:')
+ output += ['## '+l for l in want.split('\n')[:-1]]
+ else:
+ # Add non-example text.
+ output += [_comment_line(l)
+ for l in piece.split('\n')[:-1]]
+
+ # Trim junk on both ends.
+ while output and output[-1] == '#':
+ output.pop()
+ while output and output[0] == '#':
+ output.pop(0)
+ # Combine the output, and return it.
+ return '\n'.join(output)
+
+def testsource(module, name):
+ """Extract the test sources from a doctest docstring as a script.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the doc string with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ tests = DocTestFinder().find(module)
+ test = [t for t in tests if t.name == name]
+ if not test:
+ raise ValueError(name, "not found in tests")
+ test = test[0]
+ testsrc = script_from_examples(test.docstring)
+ return testsrc
+
+def debug_src(src, pm=False, globs=None):
+ """Debug a single doctest docstring, in argument `src`'"""
+ testsrc = script_from_examples(src)
+ debug_script(testsrc, pm, globs)
+
+def debug_script(src, pm=False, globs=None):
+ "Debug a test script. `src` is the script, as a string."
+ import pdb
+
+ # Note that tempfile.NameTemporaryFile() cannot be used. As the
+ # docs say, a file so created cannot be opened by name a second time
+ # on modern Windows boxes, and execfile() needs to open it.
+ srcfilename = tempfile.mktemp(".py", "doctestdebug")
+ f = open(srcfilename, 'w')
+ f.write(src)
+ f.close()
+
+ try:
+ if globs:
+ globs = globs.copy()
+ else:
+ globs = {}
+
+ if pm:
+ try:
+ execfile(srcfilename, globs, globs)
+ except:
+ print sys.exc_info()[1]
+ pdb.post_mortem(sys.exc_info()[2])
+ else:
+ # Note that %r is vital here. '%s' instead can, e.g., cause
+ # backslashes to get treated as metacharacters on Windows.
+ pdb.run("execfile(%r)" % srcfilename, globs, globs)
+
+ finally:
+ os.remove(srcfilename)
+
+def debug(module, name, pm=False):
+ """Debug a single doctest docstring.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the docstring with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ testsrc = testsource(module, name)
+ debug_script(testsrc, pm, module.__dict__)
+
+######################################################################
+## 10. Example Usage
+######################################################################
+class _TestClass:
+ """
+ A pointless class, for sanity-checking of docstring testing.
+
+ Methods:
+ square()
+ get()
+
+ >>> _TestClass(13).get() + _TestClass(-12).get()
+ 1
+ >>> hex(_TestClass(13).square().get())
+ '0xa9'
+ """
+
+ def __init__(self, val):
+ """val -> _TestClass object with associated value val.
+
+ >>> t = _TestClass(123)
+ >>> print t.get()
+ 123
+ """
+
+ self.val = val
+
+ def square(self):
+ """square() -> square TestClass's associated value
+
+ >>> _TestClass(13).square().get()
+ 169
+ """
+
+ self.val = self.val ** 2
+ return self
+
+ def get(self):
+ """get() -> return TestClass's associated value.
+
+ >>> x = _TestClass(-42)
+ >>> print x.get()
+ -42
+ """
+
+ return self.val
+
+__test__ = {"_TestClass": _TestClass,
+ "string": r"""
+ Example of a string object, searched as-is.
+ >>> x = 1; y = 2
+ >>> x + y, x * y
+ (3, 2)
+ """,
+
+ "bool-int equivalence": r"""
+ In 2.2, boolean expressions displayed
+ 0 or 1. By default, we still accept
+ them. This can be disabled by passing
+ DONT_ACCEPT_TRUE_FOR_1 to the new
+ optionflags argument.
+ >>> 4 == 4
+ 1
+ >>> 4 == 4
+ True
+ >>> 4 > 4
+ 0
+ >>> 4 > 4
+ False
+ """,
+
+ "blank lines": r"""
+ Blank lines can be marked with <BLANKLINE>:
+ >>> print 'foo\n\nbar\n'
+ foo
+ <BLANKLINE>
+ bar
+ <BLANKLINE>
+ """,
+
+ "ellipsis": r"""
+ If the ellipsis flag is used, then '...' can be used to
+ elide substrings in the desired output:
+ >>> print range(1000) #doctest: +ELLIPSIS
+ [0, 1, 2, ..., 999]
+ """,
+
+ "whitespace normalization": r"""
+ If the whitespace normalization flag is used, then
+ differences in whitespace are ignored.
+ >>> print range(30) #doctest: +NORMALIZE_WHITESPACE
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29]
+ """,
+ }
+
+def _test():
+ r = unittest.TextTestRunner()
+ r.run(DocTestSuite())
+
+if __name__ == "__main__":
+ _test()
diff --git a/paste/3rd-party/new_python/python/string.py b/paste/3rd-party/new_python/python/string.py
new file mode 100644
index 0000000..7c0e001
--- /dev/null
+++ b/paste/3rd-party/new_python/python/string.py
@@ -0,0 +1,531 @@
+"""A collection of string operations (most are no longer used).
+
+Warning: most of the code you see here isn't normally used nowadays.
+Beginning with Python 1.6, many of these functions are implemented as
+methods on the standard string object. They used to be implemented by
+a built-in module called strop, but strop is now obsolete itself.
+
+Public module variables:
+
+whitespace -- a string containing all characters considered whitespace
+lowercase -- a string containing all characters considered lowercase letters
+uppercase -- a string containing all characters considered uppercase letters
+letters -- a string containing all characters considered letters
+digits -- a string containing all characters considered decimal digits
+hexdigits -- a string containing all characters considered hexadecimal digits
+octdigits -- a string containing all characters considered octal digits
+punctuation -- a string containing all characters considered punctuation
+printable -- a string containing all characters considered printable
+
+"""
+
+# Some strings for ctype-style character classification
+whitespace = ' \t\n\r\v\f'
+lowercase = 'abcdefghijklmnopqrstuvwxyz'
+uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+letters = lowercase + uppercase
+ascii_lowercase = lowercase
+ascii_uppercase = uppercase
+ascii_letters = ascii_lowercase + ascii_uppercase
+digits = '0123456789'
+hexdigits = digits + 'abcdef' + 'ABCDEF'
+octdigits = '01234567'
+punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
+printable = digits + letters + punctuation + whitespace
+
+# Case conversion helpers
+# Use str to convert Unicode literal in case of -U
+# Note that Cookie.py bogusly uses _idmap :(
+l = map(chr, xrange(256))
+_idmap = str('').join(l)
+del l
+
+# Functions which aren't available as string methods.
+
+# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
+# See also regsub.capwords().
+def capwords(s, sep=None):
+ """capwords(s, [sep]) -> string
+
+ Split the argument into words using split, capitalize each
+ word using capitalize, and join the capitalized words using
+ join. Note that this replaces runs of whitespace characters by
+ a single space.
+
+ """
+ return (sep or ' ').join([x.capitalize() for x in s.split(sep)])
+
+
+# Construct a translation string
+_idmapL = None
+def maketrans(fromstr, tostr):
+ """maketrans(frm, to) -> string
+
+ Return a translation table (a string of 256 bytes long)
+ suitable for use in string.translate. The strings frm and to
+ must be of the same length.
+
+ """
+ if len(fromstr) != len(tostr):
+ raise ValueError, "maketrans arguments must have same length"
+ global _idmapL
+ if not _idmapL:
+ _idmapL = map(None, _idmap)
+ L = _idmapL[:]
+ fromstr = map(ord, fromstr)
+ for i in range(len(fromstr)):
+ L[fromstr[i]] = tostr[i]
+ return ''.join(L)
+
+
+
+####################################################################
+import re as _re
+
+class _multimap:
+ """Helper class for combining multiple mappings.
+
+ Used by .{safe_,}substitute() to combine the mapping and keyword
+ arguments.
+ """
+ def __init__(self, primary, secondary):
+ self._primary = primary
+ self._secondary = secondary
+
+ def __getitem__(self, key):
+ try:
+ return self._primary[key]
+ except KeyError:
+ return self._secondary[key]
+
+
+class _TemplateMetaclass(type):
+ pattern = r"""
+ %(delim)s(?:
+ (?P<escaped>%(delim)s) | # Escape sequence of two delimiters
+ (?P<named>%(id)s) | # delimiter and a Python identifier
+ {(?P<braced>%(id)s)} | # delimiter and a braced identifier
+ (?P<invalid>) # Other ill-formed delimiter exprs
+ )
+ """
+
+ def __init__(cls, name, bases, dct):
+ super(_TemplateMetaclass, cls).__init__(name, bases, dct)
+ if 'pattern' in dct:
+ pattern = cls.pattern
+ else:
+ pattern = _TemplateMetaclass.pattern % {
+ 'delim' : _re.escape(cls.delimiter),
+ 'id' : cls.idpattern,
+ }
+ cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
+
+
+class Template:
+ """A string class for supporting $-substitutions."""
+ __metaclass__ = _TemplateMetaclass
+
+ delimiter = '$'
+ idpattern = r'[_a-z][_a-z0-9]*'
+
+ def __init__(self, template):
+ self.template = template
+
+ # Search for $$, $identifier, ${identifier}, and any bare $'s
+
+ def _invalid(self, mo):
+ i = mo.start('invalid')
+ lines = self.template[:i].splitlines(True)
+ if not lines:
+ colno = 1
+ lineno = 1
+ else:
+ colno = i - len(''.join(lines[:-1]))
+ lineno = len(lines)
+ raise ValueError('Invalid placeholder in string: line %d, col %d' %
+ (lineno, colno))
+
+ def substitute(self, *args, **kws):
+ if len(args) > 1:
+ raise TypeError('Too many positional arguments')
+ if not args:
+ mapping = kws
+ elif kws:
+ mapping = _multimap(kws, args[0])
+ else:
+ mapping = args[0]
+ # Helper function for .sub()
+ def convert(mo):
+ # Check the most common path first.
+ named = mo.group('named') or mo.group('braced')
+ if named is not None:
+ val = mapping[named]
+ # We use this idiom instead of str() because the latter will
+ # fail if val is a Unicode containing non-ASCII characters.
+ return '%s' % val
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ self._invalid(mo)
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+ def safe_substitute(self, *args, **kws):
+ if len(args) > 1:
+ raise TypeError('Too many positional arguments')
+ if not args:
+ mapping = kws
+ elif kws:
+ mapping = _multimap(kws, args[0])
+ else:
+ mapping = args[0]
+ # Helper function for .sub()
+ def convert(mo):
+ named = mo.group('named')
+ if named is not None:
+ try:
+ # We use this idiom instead of str() because the latter
+ # will fail if val is a Unicode containing non-ASCII
+ return '%s' % mapping[named]
+ except KeyError:
+ return self.delimiter + named
+ braced = mo.group('braced')
+ if braced is not None:
+ try:
+ return '%s' % mapping[braced]
+ except KeyError:
+ return self.delimiter + '{' + braced + '}'
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ return self.delimiter
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
+
+
+
+####################################################################
+# NOTE: Everything below here is deprecated. Use string methods instead.
+# This stuff will go away in Python 3.0.
+
+# Backward compatible names for exceptions
+index_error = ValueError
+atoi_error = ValueError
+atof_error = ValueError
+atol_error = ValueError
+
+# convert UPPER CASE letters to lower case
+def lower(s):
+ """lower(s) -> string
+
+ Return a copy of the string s converted to lowercase.
+
+ """
+ return s.lower()
+
+# Convert lower case letters to UPPER CASE
+def upper(s):
+ """upper(s) -> string
+
+ Return a copy of the string s converted to uppercase.
+
+ """
+ return s.upper()
+
+# Swap lower case letters and UPPER CASE
+def swapcase(s):
+ """swapcase(s) -> string
+
+ Return a copy of the string s with upper case characters
+ converted to lowercase and vice versa.
+
+ """
+ return s.swapcase()
+
+# Strip leading and trailing tabs and spaces
+def strip(s, chars=None):
+ """strip(s [,chars]) -> string
+
+ Return a copy of the string s with leading and trailing
+ whitespace removed.
+ If chars is given and not None, remove characters in chars instead.
+ If chars is unicode, S will be converted to unicode before stripping.
+
+ """
+ return s.strip(chars)
+
+# Strip leading tabs and spaces
+def lstrip(s, chars=None):
+ """lstrip(s [,chars]) -> string
+
+ Return a copy of the string s with leading whitespace removed.
+ If chars is given and not None, remove characters in chars instead.
+
+ """
+ return s.lstrip(chars)
+
+# Strip trailing tabs and spaces
+def rstrip(s, chars=None):
+ """rstrip(s [,chars]) -> string
+
+ Return a copy of the string s with trailing whitespace removed.
+ If chars is given and not None, remove characters in chars instead.
+
+ """
+ return s.rstrip(chars)
+
+
+# Split a string into a list of space/tab-separated words
+def split(s, sep=None, maxsplit=-1):
+ """split(s [,sep [,maxsplit]]) -> list of strings
+
+ Return a list of the words in the string s, using sep as the
+ delimiter string. If maxsplit is given, splits at no more than
+ maxsplit places (resulting in at most maxsplit+1 words). If sep
+ is not specified or is None, any whitespace string is a separator.
+
+ (split and splitfields are synonymous)
+
+ """
+ return s.split(sep, maxsplit)
+splitfields = split
+
+# Split a string into a list of space/tab-separated words
+def rsplit(s, sep=None, maxsplit=-1):
+ """rsplit(s [,sep [,maxsplit]]) -> list of strings
+
+ Return a list of the words in the string s, using sep as the
+ delimiter string, starting at the end of the string and working
+ to the front. If maxsplit is given, at most maxsplit splits are
+ done. If sep is not specified or is None, any whitespace string
+ is a separator.
+ """
+ return s.rsplit(sep, maxsplit)
+
+# Join fields with optional separator
+def join(words, sep = ' '):
+ """join(list [,sep]) -> string
+
+ Return a string composed of the words in list, with
+ intervening occurrences of sep. The default separator is a
+ single space.
+
+ (joinfields and join are synonymous)
+
+ """
+ return sep.join(words)
+joinfields = join
+
+# Find substring, raise exception if not found
+def index(s, *args):
+ """index(s, sub [,start [,end]]) -> int
+
+ Like find but raises ValueError when the substring is not found.
+
+ """
+ return s.index(*args)
+
+# Find last substring, raise exception if not found
+def rindex(s, *args):
+ """rindex(s, sub [,start [,end]]) -> int
+
+ Like rfind but raises ValueError when the substring is not found.
+
+ """
+ return s.rindex(*args)
+
+# Count non-overlapping occurrences of substring
+def count(s, *args):
+ """count(s, sub[, start[,end]]) -> int
+
+ Return the number of occurrences of substring sub in string
+ s[start:end]. Optional arguments start and end are
+ interpreted as in slice notation.
+
+ """
+ return s.count(*args)
+
+# Find substring, return -1 if not found
+def find(s, *args):
+ """find(s, sub [,start [,end]]) -> in
+
+ Return the lowest index in s where substring sub is found,
+ such that sub is contained within s[start,end]. Optional
+ arguments start and end are interpreted as in slice notation.
+
+ Return -1 on failure.
+
+ """
+ return s.find(*args)
+
+# Find last substring, return -1 if not found
+def rfind(s, *args):
+ """rfind(s, sub [,start [,end]]) -> int
+
+ Return the highest index in s where substring sub is found,
+ such that sub is contained within s[start,end]. Optional
+ arguments start and end are interpreted as in slice notation.
+
+ Return -1 on failure.
+
+ """
+ return s.rfind(*args)
+
+# for a bit of speed
+_float = float
+_int = int
+_long = long
+
+# Convert string to float
+def atof(s):
+ """atof(s) -> float
+
+ Return the floating point number represented by the string s.
+
+ """
+ return _float(s)
+
+
+# Convert string to integer
+def atoi(s , base=10):
+ """atoi(s [,base]) -> int
+
+ Return the integer represented by the string s in the given
+ base, which defaults to 10. The string s must consist of one
+ or more digits, possibly preceded by a sign. If base is 0, it
+ is chosen from the leading characters of s, 0 for octal, 0x or
+ 0X for hexadecimal. If base is 16, a preceding 0x or 0X is
+ accepted.
+
+ """
+ return _int(s, base)
+
+
+# Convert string to long integer
+def atol(s, base=10):
+ """atol(s [,base]) -> long
+
+ Return the long integer represented by the string s in the
+ given base, which defaults to 10. The string s must consist
+ of one or more digits, possibly preceded by a sign. If base
+ is 0, it is chosen from the leading characters of s, 0 for
+ octal, 0x or 0X for hexadecimal. If base is 16, a preceding
+ 0x or 0X is accepted. A trailing L or l is not accepted,
+ unless base is 0.
+
+ """
+ return _long(s, base)
+
+
+# Left-justify a string
+def ljust(s, width, *args):
+ """ljust(s, width[, fillchar]) -> string
+
+ Return a left-justified version of s, in a field of the
+ specified width, padded with spaces as needed. The string is
+ never truncated. If specified the fillchar is used instead of spaces.
+
+ """
+ return s.ljust(width, *args)
+
+# Right-justify a string
+def rjust(s, width, *args):
+ """rjust(s, width[, fillchar]) -> string
+
+ Return a right-justified version of s, in a field of the
+ specified width, padded with spaces as needed. The string is
+ never truncated. If specified the fillchar is used instead of spaces.
+
+ """
+ return s.rjust(width, *args)
+
+# Center a string
+def center(s, width, *args):
+ """center(s, width[, fillchar]) -> string
+
+ Return a center version of s, in a field of the specified
+ width. padded with spaces as needed. The string is never
+ truncated. If specified the fillchar is used instead of spaces.
+
+ """
+ return s.center(width, *args)
+
+# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
+# Decadent feature: the argument may be a string or a number
+# (Use of this is deprecated; it should be a string as with ljust c.s.)
+def zfill(x, width):
+ """zfill(x, width) -> string
+
+ Pad a numeric string x with zeros on the left, to fill a field
+ of the specified width. The string x is never truncated.
+
+ """
+ if not isinstance(x, basestring):
+ x = repr(x)
+ return x.zfill(width)
+
+# Expand tabs in a string.
+# Doesn't take non-printing chars into account, but does understand \n.
+def expandtabs(s, tabsize=8):
+ """expandtabs(s [,tabsize]) -> string
+
+ Return a copy of the string s with all tab characters replaced
+ by the appropriate number of spaces, depending on the current
+ column, and the tabsize (default 8).
+
+ """
+ return s.expandtabs(tabsize)
+
+# Character translation through look-up table.
+def translate(s, table, deletions=""):
+ """translate(s,table [,deletions]) -> string
+
+ Return a copy of the string s, where all characters occurring
+ in the optional argument deletions are removed, and the
+ remaining characters have been mapped through the given
+ translation table, which must be a string of length 256. The
+ deletions argument is not allowed for Unicode strings.
+
+ """
+ if deletions:
+ return s.translate(table, deletions)
+ else:
+ # Add s[:0] so that if s is Unicode and table is an 8-bit string,
+ # table is converted to Unicode. This means that table *cannot*
+ # be a dictionary -- for that feature, use u.translate() directly.
+ return s.translate(table + s[:0])
+
+# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
+def capitalize(s):
+ """capitalize(s) -> string
+
+ Return a copy of the string s with only its first character
+ capitalized.
+
+ """
+ return s.capitalize()
+
+# Substring replacement (global)
+def replace(s, old, new, maxsplit=-1):
+ """replace (str, old, new[, maxsplit]) -> string
+
+ Return a copy of string str with all occurrences of substring
+ old replaced by new. If the optional argument maxsplit is
+ given, only the first maxsplit occurrences are replaced.
+
+ """
+ return s.replace(old, new, maxsplit)
+
+
+# Try importing optional built-in module "strop" -- if it exists,
+# it redefines some string operations that are 100-1000 times faster.
+# It also defines values for whitespace, lowercase and uppercase
+# that match <ctype.h>'s definitions.
+
+try:
+ from strop import maketrans, lowercase, uppercase, whitespace
+ letters = lowercase + uppercase
+except ImportError:
+ pass # Use the original versions
diff --git a/paste/__init__.py b/paste/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/app_setup.py b/paste/app_setup.py
new file mode 100644
index 0000000..3d84463
--- /dev/null
+++ b/paste/app_setup.py
@@ -0,0 +1,410 @@
+#!/usr/bin/env python
+
+import optparse
+import fnmatch
+import os
+import sys
+from cStringIO import StringIO
+import re
+from paste.util.thirdparty import load_new_module
+string = load_new_module('string', (2, 4))
+
+from paste import pyconfig
+from paste import urlparser
+
+class InvalidCommand(Exception):
+ pass
+
+def find_template_info(args):
+ """
+ Given command-line arguments, this finds the app template
+ (paste.app_templates.<template_name>.command). It looks for a
+ -t or --template option (but ignores all other options), and if
+ none then looks in server.conf for a template_name option.
+
+ Returns server_conf_fn, template_name, template_dir, module
+ """
+ template_name = None
+ template_name, rest = find_template_option(args)
+ server_conf_fn = None
+ if template_name:
+ next_template_name, rest = find_template_option(rest)
+ if next_template_name:
+ raise InvalidCommand(
+ 'You cannot give two templates on the commandline '
+ '(first I found %r, then %r)'
+ % (template_name, next_template_name))
+ else:
+ server_conf_fn, template_name = find_template_config(args)
+ if not template_name:
+ raise InvalidCommand(
+ 'No template given (provide --template=name or run this command '
+ 'from a directory containing server.conf)')
+ if not re.search(r'^[a-zA-Z_][a-zA-Z0-9_]*$', template_name):
+ raise InvalidCommand(
+ 'The template name %r is invalid; template names can contain only '
+ 'letters, numbers, and _.' % template_name)
+ try:
+ template_mod = load_template(template_name)
+ except ImportError, e:
+ raise InvalidCommand(
+ 'No template exists by the name %r (%s)' % (template_name, e))
+ return (server_conf_fn, template_name, os.path.dirname(template_mod.__file__), template_mod)
+
+def find_template_option(args):
+ copy = args[:]
+ while copy:
+ if copy[0] == '--':
+ return None, copy
+ if copy[0] == '-t' or copy[0] == '--template':
+ if not copy[1:]:
+ raise InvalidCommand(
+ '%s needs to be followed with a template name' % copy[0])
+ return copy[1], copy[2:]
+ if copy[0].startswith('-t'):
+ return copy[0][2:], copy[1:]
+ if copy[0].startswith('--template='):
+ return copy[0][len('--template='):], copy[1:]
+ copy.pop(0)
+ return None, []
+
+def find_template_config(args):
+ conf_fn = os.path.join(os.getcwd(), 'server.conf')
+ if not os.path.exists(conf_fn):
+ return None
+ conf = pyconfig.Config()
+ conf.load(conf_fn)
+ return conf_fn, conf.get('app_template')
+
+def load_template(template_name):
+ base = os.path.join(os.path.dirname(__file__), 'app_templates')
+ full_name = 'paste.app_templates.%s.command' % template_name
+ errors = StringIO()
+ mod = urlparser.load_module_from_name(
+ None, os.path.join(base, template_name, 'command'),
+ full_name, errors)
+ if mod is None:
+ raise InvalidCommand(
+ 'Cannot load module: %s' % errors.getvalue())
+ return mod
+
+def run(args):
+ try:
+ server_conf_fn, name, dir, mod = find_template_info(args)
+ except InvalidCommand, e:
+ print str(e)
+ return 2
+ return mod.run(args, name, dir, mod, server_conf_fn)
+
+class CommandRunner(object):
+
+ def __init__(self):
+ self.commands = {}
+ self.command_aliases = {}
+ self.register_standard_commands()
+
+ def run(self, argv, template_name, template_dir, template_module,
+ server_conf_fn):
+ self.server_conf_fn = server_conf_fn
+ invoked_as = argv[0]
+ args = argv[1:]
+ for i in range(len(args)):
+ if not args[i].startswith('-'):
+ # this must be a command
+ command = args[i].lower()
+ del args[i]
+ break
+ else:
+ # no command found
+ self.invalid('No COMMAND given (try "%s help")'
+ % os.path.basename(invoked_as))
+ real_command = self.command_aliases.get(command, command)
+ if real_command not in self.commands.keys():
+ self.invalid('COMMAND %s unknown' % command)
+ runner = self.commands[real_command](
+ invoked_as, command, args, self,
+ template_name, template_dir, template_module)
+ runner.run()
+
+ def register(self, command):
+ name = command.name
+ self.commands[name] = command
+ for alias in command.aliases:
+ self.command_aliases[alias] = name
+
+ def invalid(self, msg, code=2):
+ print msg
+ sys.exit(code)
+
+ def register_standard_commands(self):
+ # @@: these commands shouldn't require a template
+ self.register(CommandHelp)
+ self.register(CommandList)
+
+############################################################
+## Command framework
+############################################################
+
+def standard_parser(verbose=True, simulate=True, interactive=False):
+ parser = optparse.OptionParser()
+ if verbose:
+ parser.add_option('-v', '--verbose',
+ help='Be verbose (multiple times for more verbosity)',
+ action='count',
+ dest='verbose',
+ default=0)
+ if simulate:
+ parser.add_option('-n', '--simulate',
+ help="Don't actually do anything (implies -v)",
+ action='store_true',
+ dest='simulate')
+ if interactive:
+ parser.add_option('-i', '--interactive',
+ help="Ask before doing anything (use twice to be more careful)",
+ action="count",
+ dest="interactive",
+ default=0)
+ parser.add_option('-t', '--template',
+ help='Use this template',
+ metavar='NAME',
+ dest='template_name')
+ return parser
+
+class Command(object):
+
+ min_args = 0
+ min_args_error = 'You must provide at least %(min_args)s arguments'
+ max_args = 0
+ max_args_error = 'You must provide no more than %(max_args)s arguments'
+ aliases = ()
+ required_args = []
+ description = None
+
+ def __init__(self, invoked_as, command_name, args, runner,
+ template_name, template_dir, template_module):
+ self.invoked_as = invoked_as
+ self.command_name = command_name
+ self.raw_args = args
+ self.runner = runner
+ self.template_name = template_name
+ self.template_dir = template_dir
+ self.template_module = template_module
+
+ def run(self):
+ self.parser.usage = "%%prog [options]\n%s" % self.summary
+ self.parser.prog = '%s %s' % (
+ os.path.basename(self.invoked_as),
+ self.command_name)
+ if self.description:
+ self.parser.description = self.description
+ self.options, self.args = self.parser.parse_args(self.raw_args)
+ if (getattr(self.options, 'simulate', False)
+ and not self.options.verbose):
+ self.options.verbose = 1
+ if self.min_args is not None and len(self.args) < self.min_args:
+ self.runner.invalid(
+ self.min_args_error % {'min_args': self.min_args,
+ 'actual_args': len(self.args)})
+ if self.max_args is not None and len(self.args) > self.max_args:
+ self.runner.invalid(
+ self.max_args_error % {'max_args': self.max_args,
+ 'actual_args': len(self.args)})
+ for var_name, option_name in self.required_args:
+ if not getattr(self.options, var_name, None):
+ self.runner.invalid(
+ 'You must provide the option %s' % option_name)
+ self.command()
+
+ def ask(self, prompt, safe=False, default=True):
+ if self.options.interactive >= 2:
+ default = safe
+ if default:
+ prompt += ' [Y/n]? '
+ else:
+ prompt += ' [y/N]? '
+ while 1:
+ response = raw_input(prompt).strip()
+ if not response.strip():
+ return default
+ if response and response[0].lower() in ('y', 'n'):
+ return response[0].lower() == 'y'
+ print 'Y or N please'
+
+ def _get_prog_name(self):
+ return os.path.basename(self.invoked_as)
+ prog_name = property(_get_prog_name)
+
+############################################################
+## Standard commands
+############################################################
+
+class CommandList(Command):
+
+ name = 'list'
+ summary = 'Show available templates'
+
+ parser = standard_parser(simulate=False)
+
+ max_args = 1
+
+ def command(self):
+ any = False
+ app_template_dir = os.path.join(os.path.dirname(__file__), 'app_templates')
+ for name in os.listdir(app_template_dir):
+ dir = os.path.join(app_template_dir, name)
+ if not os.path.exists(os.path.join(dir, 'description.txt')):
+ if self.options.verbose >= 2:
+ print 'Skipping %s (no description.txt)' % dir
+ continue
+ if self.args and not fnmatch.fnmatch(name, self.args[0]):
+ continue
+ if not self.options.verbose:
+ print '%s: %s\n' % (name, self.template_description().splitlines()[0])
+ else:
+ return '%s: %s\n' % (self.name, self.template_description())
+ # @@: for verbosity >= 2 we should give lots of metadata
+ any = True
+ if not any:
+ print 'No application templates found'
+
+ def template_description(self):
+ f = open(os.path.join(self.template_dir, 'description.txt'))
+ content = f.read().strip()
+ f.close()
+ return content
+
+class CommandHelp(Command):
+
+ name = 'help'
+ summary = 'Show help'
+
+ parser = standard_parser(verbose=False)
+
+ max_args = 1
+
+ def command(self):
+ if self.args:
+ self.runner.run([self.invoked_as, self.args[0], '-h'],
+ self.template_name, self.template_dir,
+ self.template_module,
+ self.runner.server_conf_fn)
+ else:
+ print 'Available commands:'
+ print ' (use "%s help COMMAND" or "%s COMMAND -h" ' % (
+ self.prog_name, self.prog_name)
+ print ' for more information)'
+ items = self.runner.commands.items()
+ items.sort()
+ max_len = max([len(cn) for cn, c in items])
+ for command_name, command in items:
+ print '%s:%s %s' % (command_name,
+ ' '*(max_len-len(command_name)),
+ command.summary)
+ if command.aliases:
+ print '%s (Aliases: %s)' % (
+ ' '*max_len, ', '.join(command.aliases))
+
+############################################################
+## Optional helper commands
+############################################################
+
+class CommandCreate(Command):
+
+ name = 'create'
+ summary = 'Create application from template'
+
+ max_args = 1
+ min_args = 1
+
+ parser = standard_parser()
+
+ default_options = {
+ 'server': 'wsgiutils',
+ 'verbose': True,
+ 'reload': True,
+ 'debug': True,
+ }
+
+ def command(self):
+ self.output_dir = self.args[0]
+ self.create(self.output_dir)
+ if self.options.verbose:
+ print 'Now do:'
+ print ' cd %s' % self.options.output_dir
+ print ' wsgi-server'
+
+ def create(self, output_dir):
+ file_dir = os.path.join(self.template_dir, 'template')
+ if not os.path.exists(file_dir):
+ raise OSError(
+ 'No %s directory, I don\'t know what to do next' % file_dir)
+ template_options = self.default_options.copy()
+ template_options.update(self.options.__dict__)
+ template_options['app_name'] = os.path.basename(output_dir)
+ template_options['base_dir'] = output_dir
+ template_options['absolute_base_dir'] = os.path.abspath(output_dir)
+ template_options['absolute_parent'] = os.path.dirname(
+ os.path.abspath(output_dir))
+ template_options['template_name'] = self.template_name
+ self.copy_dir(file_dir, output_dir, template_options, self.options.verbose,
+ self.options.simulate)
+
+ def copy_dir(self, *args, **kw):
+ copy_dir(*args, **kw)
+
+def copy_dir(source, dest, vars, verbosity, simulate):
+ names = os.listdir(source)
+ names.sort()
+ if not os.path.exists(dest):
+ if verbosity >= 1:
+ print 'Creating %s/' % dest
+ if not simulate:
+ os.makedirs(dest)
+ elif verbosity >= 2:
+ print 'Directory %s exists' % dest
+ for name in names:
+ full = os.path.join(source, name)
+ if name.startswith('.'):
+ if verbosity >= 2:
+ print 'Skipping hidden file %s' % full
+ continue
+ dest_full = os.path.join(dest, _substitute_filename(name, vars))
+ if os.path.isdir(full):
+ if verbosity:
+ print 'Recursing into %s' % full
+ copy_dir(full, dest_full, vars, verbosity, simulate)
+ continue
+ f = open(full, 'rb')
+ content = f.read()
+ f.close()
+ content = _substitute_content(content, vars)
+ if verbosity:
+ print 'Copying %s to %s' % (full, dest_full)
+ f = open(dest_full, 'wb')
+ f.write(content)
+ f.close()
+
+def _substitute_filename(fn, vars):
+ for var, value in vars.items():
+ fn = fn.replace('+%s+' % var, str(value))
+ return fn
+
+def _substitute_content(content, vars):
+ tmpl = string.Template(content)
+ return tmpl.substitute(TypeMapper(vars))
+
+class TypeMapper(dict):
+
+ def __getitem__(self, item):
+ if item.startswith('str_'):
+ return repr(str(self[item[4:]]))
+ elif item.startswith('bool_'):
+ if self[item[5:]]:
+ return 'True'
+ else:
+ return 'False'
+ else:
+ return dict.__getitem__(self, item)
+
+if __name__ == '__main__':
+ run(sys.argv)
diff --git a/paste/app_templates/__init__.py b/paste/app_templates/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/app_templates/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/app_templates/webkit_zpt/__init__.py b/paste/app_templates/webkit_zpt/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/app_templates/webkit_zpt/command.py b/paste/app_templates/webkit_zpt/command.py
new file mode 100644
index 0000000..dff1b60
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/command.py
@@ -0,0 +1,43 @@
+import os
+from paste import app_setup
+from paste import pyconfig
+
+the_runner = app_setup.CommandRunner()
+the_runner.register(app_setup.CommandCreate)
+
+class CommandServlet(app_setup.Command):
+
+ name = 'servlet'
+ summary = 'Create a new servlet and template'
+ max_args = 1
+ min_args = 1
+
+ parser = app_setup.standard_parser()
+
+ def command(self):
+ servlet_fn = os.path.splitext(self.args[0])[0]
+ config = {}
+ if '/' in servlet_fn or '\\' in servlet_fn:
+ servlet_name = os.path.basename(servlet_fn)
+ else:
+ servlet_name = servlet_fn
+ if self.runner.server_conf_fn:
+ output_dir = os.path.dirname(self.runner.server_conf_fn)
+ config = pyconfig.Config()
+ config.load(self.runner.server_conf_fn)
+ else:
+ output_dir = os.getcwd()
+ source_dir = os.path.join(self.template_dir, 'servlet_template')
+ template_options = config.copy()
+ template_options.update(self.options.__dict__)
+ template_options.update({
+ 'servlet_name': servlet_name,
+ 'servlet_fn': servlet_fn,
+ })
+ app_setup.copy_dir(
+ source_dir, output_dir, template_options,
+ self.options.verbose, self.options.simulate)
+
+the_runner.register(CommandServlet)
+
+run = the_runner.run
diff --git a/paste/app_templates/webkit_zpt/description.txt b/paste/app_templates/webkit_zpt/description.txt
new file mode 100644
index 0000000..5385771
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/description.txt
@@ -0,0 +1,4 @@
+WebKit and Zope Page Template framework
+
+This sets up a basic Webware/WebKit application, using Zope Page
+Templates, Webware Components, and ZPTKit.
diff --git a/paste/app_templates/webkit_zpt/servlet_template/templates/+servlet_name+.pt b/paste/app_templates/webkit_zpt/servlet_template/templates/+servlet_name+.pt
new file mode 100644
index 0000000..e2825e5
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/servlet_template/templates/+servlet_name+.pt
@@ -0,0 +1,7 @@
+<html metal:use-macro="here/standard_template.pt/macros/page">
+<metal:body fill-slot="body">
+
+
+
+</metal:body>
+</html> \ No newline at end of file
diff --git a/paste/app_templates/webkit_zpt/servlet_template/web/+servlet_name+.py b/paste/app_templates/webkit_zpt/servlet_template/web/+servlet_name+.py
new file mode 100644
index 0000000..52ae6f9
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/servlet_template/web/+servlet_name+.py
@@ -0,0 +1,7 @@
+from $app_name.sitepage import SitePage
+
+class $servlet_name(SitePage):
+
+ def setup(self):
+ self.options.title = $str_servlet_name
+
diff --git a/paste/app_templates/webkit_zpt/template/__init__.py b/paste/app_templates/webkit_zpt/template/__init__.py
new file mode 100644
index 0000000..0454673
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/template/__init__.py
@@ -0,0 +1,5 @@
+from paste.util.thirdparty import add_package
+
+add_package('ZopePageTemplates')
+add_package('Component')
+add_package('ZPTKit')
diff --git a/paste/app_templates/webkit_zpt/template/server.conf b/paste/app_templates/webkit_zpt/template/server.conf
new file mode 100644
index 0000000..5935785
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/template/server.conf
@@ -0,0 +1,19 @@
+# -*- python -*- Note: this file is in Python syntax
+
+import os
+
+app_template = $str_template_name
+app_name = $str_app_name
+webkit_dir = os.path.join($str_absolute_base_dir, 'web')
+sys_path = [$str_absolute_parent]
+
+## Server options:
+
+verbose = $bool_verbose
+# The name of the server-type to start:
+server = $str_server
+# If true, files will be regularly polled and the server restarted
+# if files are modified:
+reload = $bool_reload
+# If true, tracebacks will be shown in the browser:
+debug = $bool_debug
diff --git a/paste/app_templates/webkit_zpt/template/sitepage.py b/paste/app_templates/webkit_zpt/template/sitepage.py
new file mode 100644
index 0000000..7ab073c
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/template/sitepage.py
@@ -0,0 +1,33 @@
+import os
+from Component import CPage
+from Component.notify import NotifyComponent
+from ZPTKit import ZPTComponent
+
+class SitePage(CPage):
+
+ components = [
+ ZPTComponent([os.path.join(os.path.dirname(__file__),
+ 'templates')]),
+ NotifyComponent()]
+
+ def title(self):
+ return self.options.get('title', CPage.title(self))
+
+ def awake(self, trans):
+ CPage.awake(self, trans)
+ self.baseURL = self.request().environ()['$app_name.base_url']
+ self.baseStaticURL = self.baseURL + '/static'
+ self.setup()
+
+ def setup(self):
+ pass
+
+ def sleep(self, trans):
+ self.teardown()
+ CPage.sleep(self, trans)
+
+ def teardown(self):
+ pass
+
+ def writeHTML(self):
+ self.writeTemplate()
diff --git a/paste/app_templates/webkit_zpt/template/templates/generic_error.pt b/paste/app_templates/webkit_zpt/template/templates/generic_error.pt
new file mode 100644
index 0000000..7b32820
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/template/templates/generic_error.pt
@@ -0,0 +1,7 @@
+<html metal:use-macro="here/standard_template.pt/macros/page">
+<metal:body fill-slot="body">
+
+<tal:block replace="structure options/error_message" />
+
+</metal:body>
+</html> \ No newline at end of file
diff --git a/paste/app_templates/webkit_zpt/template/templates/index.pt b/paste/app_templates/webkit_zpt/template/templates/index.pt
new file mode 100644
index 0000000..ff16ada
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/template/templates/index.pt
@@ -0,0 +1,22 @@
+<html metal:use-macro="here/standard_template.pt/macros/page">
+<metal:body fill-slot="body">
+
+<p>
+Congratulations, you have set up your new application instance.
+This page serves as an example; feel free to overwrite it.
+</p>
+
+<p>
+These are all the environmental variables defined:
+</p>
+
+<table border=1>
+ <tr tal:repeat="var options/vars">
+ <td tal:content="python: var[0]">Var Name</td>
+ <td><tt tal:content="python: var[1]">Var value</tt></td>
+ </tr>
+</table>
+
+
+</metal:body>
+</html> \ No newline at end of file
diff --git a/paste/app_templates/webkit_zpt/template/templates/standard_template.pt b/paste/app_templates/webkit_zpt/template/templates/standard_template.pt
new file mode 100644
index 0000000..4e75a5c
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/template/templates/standard_template.pt
@@ -0,0 +1,29 @@
+<metal:tpl define-macro="page">
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
+ "http://www.w3.org/TR/REC-html40/loose.dtd">
+<html lang="en-US">
+<head>
+<title tal:content="servlet/title">title</title>
+<link rel="stylesheet" type="text/css"
+ tal:attributes="href python: servlet.baseStaticURL + '/stylesheet.css'">
+
+<metal:slot define-slot="extra_head"></metal:slot>
+
+</head>
+<body>
+
+<h1><tal:mark replace="servlet/title"/></h1>
+
+<span tal:replace="structure servlet/messageText">
+This is where the notification messages go.
+</span>
+
+<div id="content">
+<metal:tpl define-slot="body">
+[This page has not customized its "body" slot]
+</metal:tpl>
+</div>
+
+</body>
+</html>
+</metal:tpl>
diff --git a/paste/app_templates/webkit_zpt/template/web/__init__.py b/paste/app_templates/webkit_zpt/template/web/__init__.py
new file mode 100644
index 0000000..1454f37
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/template/web/__init__.py
@@ -0,0 +1,6 @@
+import os
+from paste import wsgilib
+
+def urlparser_hook(environ):
+ if not environ.has_key('${app_name}.base_url'):
+ environ['${app_name}.base_url'] = environ['SCRIPT_NAME']
diff --git a/paste/app_templates/webkit_zpt/template/web/index.py b/paste/app_templates/webkit_zpt/template/web/index.py
new file mode 100644
index 0000000..e095943
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/template/web/index.py
@@ -0,0 +1,9 @@
+from $app_name.sitepage import SitePage
+
+class index(SitePage):
+
+ def setup(self):
+ self.options.vars = self.request().environ().items()
+ self.options.vars.sort()
+ self.options.title = 'Welcome to your new app'
+
diff --git a/paste/app_templates/webkit_zpt/template/web/static/stylesheet.css b/paste/app_templates/webkit_zpt/template/web/static/stylesheet.css
new file mode 100644
index 0000000..d70e9ec
--- /dev/null
+++ b/paste/app_templates/webkit_zpt/template/web/static/stylesheet.css
@@ -0,0 +1,13 @@
+body {
+ font-family: Helvetica, Arial, sans-serif;
+}
+
+pre {
+ overflow: auto;
+}
+
+div.notifyMessage {
+ border: 2px solid black;
+ background-color: #007700;
+ color: #ffffff;
+}
diff --git a/paste/cgiserver.py b/paste/cgiserver.py
new file mode 100644
index 0000000..83d257c
--- /dev/null
+++ b/paste/cgiserver.py
@@ -0,0 +1,113 @@
+"""
+cgi WSGI server
+===============
+
+Usage
+-----
+
+The CGI script is the configuration and glue for this server.
+Typically you will write a CGI script like::
+
+ #!/usr/bin/env python
+ from paste.cgiserver import run_with_cgi
+ # Assuming app is your WSGI application object...
+ from myapplication import app
+ run_with_cgi(app)
+
+"""
+
+import os, sys
+
+def run_with_cgi(application,
+ use_cgitb=True,
+ redirect_stdout=False):
+ stdout = sys.stdout
+
+ if use_cgitb:
+ import cgitb
+ cgitb.enable()
+
+ environ = dict(os.environ)
+ environ['wsgi.input'] = sys.stdin
+ environ['wsgi.errors'] = sys.stderr
+ environ['wsgi.version'] = (1, 0)
+ environ['wsgi.multithread'] = False
+ environ['wsgi.multiprocess'] = True
+ environ['wsgi.run_once'] = True
+
+ if os.environ.get('HTTPS', 'off').lower() in ('on', '1'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ if redirect_stdout:
+ sys.stdout = sys.stderr
+
+ headers_set = []
+ headers_sent = []
+ result = None
+
+ def write(data):
+ assert headers_set, "write() before start_response()"
+
+ if not headers_sent:
+ # Before the first output, send the stored headers
+ status, response_headers = headers_sent[:] = headers_set
+
+ # See if Content-Length is given.
+ found = False
+ for name, value in response_headers:
+ if name.lower() == 'content-length':
+ found = True
+ break
+
+ # If not given, try to deduce it if the iterator implements
+ # __len__ and is of length 1. (data will be result[0] in this
+ # case.)
+ if not found and result is not None:
+ try:
+ if len(result) == 1:
+ response_headers.append(('Content-Length',
+ str(len(data))))
+ except:
+ pass
+
+ stdout.write('Status: %s\r\n' % status)
+ for header in response_headers:
+ stdout.write('%s: %s\r\n' % header)
+ stdout.write('\r\n')
+
+ stdout.write(data)
+ stdout.flush()
+
+ def start_response(status, response_headers, exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception if headers sent
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None # avoid dangling circular ref
+ else:
+ assert not headers_set, "Headers already set!"
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ result = application(environ, start_response)
+ try:
+ for data in result:
+ if data: # don't send headers until body appears
+ write(data)
+ if not headers_sent:
+ write('') # send headers now if body was empty
+ finally:
+ if hasattr(result, 'close'):
+ result.close()
+
+if __name__ == '__main__':
+ def myapp(environ, start_response):
+ start_response('200 OK', [('Content-Type', 'text/plain')])
+ return ['Hello World!\n']
+
+ run_with_cgi(myapp)
diff --git a/paste/cgitb_catcher.py b/paste/cgitb_catcher.py
new file mode 100644
index 0000000..b8f4915
--- /dev/null
+++ b/paste/cgitb_catcher.py
@@ -0,0 +1,95 @@
+"""
+WSGI middleware
+
+Captures any exceptions and prints a pretty report. See the cgitb
+documentation for more:
+ http://python.org/doc/current/lib/module-cgitb.html
+"""
+
+import cgitb
+from cStringIO import StringIO
+import sys
+import traceback
+
+class DummyFile(object):
+ pass
+
+def middleware(application, **kw):
+
+ def start_application(environ, start_response):
+ started = []
+
+ def detect_start_response(status, headers):
+ started.append(start_response(status, headers))
+ return started[0]
+
+ try:
+ app_iter = application(environ, start_response)
+ return catching_iter(app_iter)
+ except:
+ if not started:
+ write = start_response('500 Internal Server Error',
+ [('content-type', 'text/html')])
+ else:
+ write = started[0]
+ dummy_file = DummyFile()
+ dummy_file.write = write
+ dummy_file = StringIO()
+ hook = cgitb.Hook(**kw)
+ hook.file = dummy_file
+ hook(*sys.exc_info())
+ return [dummy_file.getvalue()]
+
+ def catching_iter(iter):
+ if not iter:
+ raise StopIteration
+ try:
+ for v in iter:
+ yield iter
+ except:
+ exc = sys.exc_info()
+ dummy_file = StringIO()
+ hook = cgitb.Hook(**kw)
+ hook.file = dummy_file
+ hook(*exc)
+ yield dummy_file.getvalue()
+
+ return start_application
+
+def simple_middleware(application, **kw):
+
+ def start_application(environ, start_response):
+ started = []
+
+ def detect_start_response(status, headers):
+ started.append(start_response(status, headers))
+ return started[0]
+
+ try:
+ app_iter = application(environ, start_response)
+ return catching_iter(app_iter)
+ except:
+ if not started:
+ write = start_response('500 Internal Server Error',
+ [('content-type', 'text/html')])
+ else:
+ write = started[0]
+
+ out = String()
+ traceback.print_exc(file=out)
+ return ['<html><body><pre>%s</pre></body></html>'
+ % out.getvalue()]
+
+ def catching_iter(iter):
+ if not iter:
+ raise StopIteration
+ try:
+ for v in iter:
+ yield iter
+ except:
+ exc = sys.exc_info()
+ dummy_file = StringIO()
+ traceback.print_exc(file=dummy_file)
+ yield dummy_file.getvalue()
+
+ return start_application
diff --git a/paste/configmiddleware.py b/paste/configmiddleware.py
new file mode 100644
index 0000000..20226f6
--- /dev/null
+++ b/paste/configmiddleware.py
@@ -0,0 +1,7 @@
+def config_middleware(app, config):
+
+ def replacement_app(environ, start_response):
+ environ['paste.config'] = config.copy()
+ return app(environ, start_response)
+
+ return replacement_app
diff --git a/paste/default_config.conf b/paste/default_config.conf
new file mode 100644
index 0000000..4403c5a
--- /dev/null
+++ b/paste/default_config.conf
@@ -0,0 +1 @@
+index_names = ['index', 'Index', 'main', 'Main']
diff --git a/paste/echo.py b/paste/echo.py
new file mode 100644
index 0000000..1a1c91f
--- /dev/null
+++ b/paste/echo.py
@@ -0,0 +1,59 @@
+r"""\
+WSGI application
+
+Does things as requested. Takes variables:
+
+header.header-name=value, like
+ header.location=http://yahoo.com
+
+error=code, like
+ error=301 (temporary redirect)
+ error=assert (assertion error)
+
+environ=true,
+ display all the environmental variables, like
+ key=str(value)\n
+
+message=string
+ display string
+"""
+
+import cgi
+import httpexceptions
+
+def application(environ, start_response):
+ form = cgi.FieldStorage(fp=environ['wsgi.input'],
+ environ=environ,
+ keep_blank_values=True)
+ headers = {}
+ for key in form.keys():
+ if key.startswith('header.'):
+ headers[key[len('header.'):]] = form[key].value
+
+ if form.getvalue('error') and form['error'].value != 'iter':
+ if form['error'].value == 'assert':
+ assert 0, "I am asserting zero!"
+ raise httpexceptions.get_exception(int(form['error'].value))(
+ headers=headers)
+
+ if form.getvalue('environ'):
+ write = start_response('200 OK', [('Content-type', 'text/plain')])
+ items = environ.items()
+ items.sort()
+ return ['%s=%s\n' % (name, value)
+ for name, value in items]
+
+ if form.has_key('message'):
+ write = start_response('200 OK', [('Content-type', 'text/plain')])
+ write(form['message'].value)
+ return []
+
+ if form.getvalue('error') == 'iter':
+ return BadIter()
+
+ write = start_response('200 OK', [('Content-type', 'text/html')])
+ return ['hello world!']
+
+class BadIter(object):
+ def __iter__(self):
+ assert 0, "I am assert zero in the iterator!"
diff --git a/paste/error_middleware.py b/paste/error_middleware.py
new file mode 100644
index 0000000..e2f6958
--- /dev/null
+++ b/paste/error_middleware.py
@@ -0,0 +1,132 @@
+import sys
+import traceback
+import cgi
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+from paste.exceptions import formatter, collector, reporter
+
+class ErrorMiddleware(object):
+
+ def __init__(self, application, show_exceptions=True,
+ email_exceptions_to=[], smtp_server='localhost'):
+ self.application = application
+ self.show_exceptions = show_exceptions
+ self.email_exceptions_to = email_exceptions_to
+ self.smtp_server = smtp_server
+
+ def __call__(self, environ, start_response):
+ # We want to be careful about not sending headers twice,
+ # and the content type that the app has committed to (if there
+ # is an exception in the iterator body of the response)
+ started = []
+
+ def detect_start_response(status, headers):
+ started.append(True)
+ return start_response(status, headers)
+
+ try:
+ app_iter = self.application(environ, detect_start_response)
+ return self.catching_iter(app_iter, environ)
+ except:
+ if not started:
+ start_response('500 Internal Server Error',
+ [('content-type', 'text/html')])
+ # @@: it would be nice to deal with bad content types here
+ dummy_file = StringIO()
+ response = self.exception_handler(sys.exc_info(), environ)
+ return [response]
+
+ def catching_iter(self, iter, environ):
+ if not iter:
+ raise StopIteration
+ error_on_close = False
+ try:
+ for v in iter:
+ yield v
+ if hasattr(iter, 'close'):
+ error_on_close = True
+ iter.close()
+ except:
+ response = self.exception_handler(sys.exc_info(), environ)
+ if not error_on_close and hasattr(iter, 'close'):
+ try:
+ iter.close()
+ except:
+ close_response = self.exception_handler(
+ sys.exc_info(), environ)
+ response += (
+ '<hr noshade>Error in .close():<br>%s'
+ % close_response)
+ yield response
+
+ def exception_handler(self, exc_info, environ):
+ reported = False
+ exc_data = collector.collect_exception(*exc_info)
+ conf = environ.get('paste.config', {})
+ extra_data = ''
+ if conf.get('error_email'):
+ rep = reporter.EmailReporter(
+ to_addresses=conf['error_email'],
+ from_address=conf.get('error_email_from', 'errors@localhost'),
+ smtp_server=conf.get('smtp_server', 'localhost'),
+ subject_prefix=conf.get('error_subject_prefix', ''))
+ extra_data += self.send_report(rep, exc_data)
+ reported = True
+ if conf.get('error_log'):
+ rep = reporter.LogReporter(
+ filename=conf['error_log'])
+ extra_data += self.send_report(rep, exc_data)
+ # Well, this isn't really true, is it?
+ reported = True
+ if conf.get('show_exceptions_in_error_log', True):
+ rep = reporter.FileReporter(
+ file=environ['wsgi.errors'])
+ extra_data += self.send_report(rep, exc_data)
+ # Well, this isn't really true, is it?
+ reported = True
+ if conf.get('debug', False):
+ html = self.error_template(
+ formatter.format_html(exc_data), extra_data)
+ reported = True
+ else:
+ html = self.error_template(
+ '''
+ An error occurred. See the error logs for more information.
+ (Turn debug on to display exception reports here)
+ ''', '')
+ if not reported:
+ stderr = environ['wsgi.errors']
+ err_report = formatter.format_text(exc_data, show_hidden_frames=True)
+ err_report += '\n' + '-'*60 + '\n'
+ stderr.write(err_report)
+ return html
+
+ def error_template(self, exception, extra):
+ return '''
+ <html>
+ <head>
+ <title>Server Error</title>
+ </head>
+ <body>
+ <h1>Server Error</h1>
+ %s
+ %s
+ </body>
+ </html>''' % (exception, extra)
+
+ def send_report(self, reporter, exc_data):
+ try:
+ reporter.report(exc_data)
+ except:
+ output = StringIO()
+ traceback.print_exc(file=output)
+ return """
+ <p>Additionally an error occurred while sending the %s report:
+
+ <pre>%s</pre>
+ </p>""" % (
+ cgi.escape(str(reporter)), output.getvalue())
+ else:
+ return ''
diff --git a/paste/exceptions/__init__.py b/paste/exceptions/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/exceptions/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/exceptions/collector.py b/paste/exceptions/collector.py
new file mode 100644
index 0000000..7d8eaf3
--- /dev/null
+++ b/paste/exceptions/collector.py
@@ -0,0 +1,458 @@
+##############################################################################
+#
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+## Originally zExceptions.ExceptionFormatter from Zope;
+## Modified by Ian Bicking, Imaginary Landscape, 2005
+"""
+An exception collector that finds traceback information plus
+supplements
+"""
+
+import sys
+import cgi
+import traceback
+import time
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+import linecache
+import serial_number_generator
+
+DEBUG_EXCEPTION_FORMATTER = True
+DEBUG_IDENT_PREFIX = 'E-'
+
+class ExceptionCollector:
+
+ """
+ Produces a data structure that can be used by formatters to
+ display exception reports.
+
+ Magic variables:
+
+ If you define one of these variables in your local scope, you can
+ add information to tracebacks that happen in that context. This
+ allows applications to add all sorts of extra information about
+ the context of the error, including URLs, environmental variables,
+ users, hostnames, etc. These are the variables we look for:
+
+ ``__traceback_supplement__``:
+ You can define this locally or globally (unlike all the other
+ variables, which must be defined locally).
+
+ ``__traceback_supplement__`` is a tuple of ``(factory, arg1,
+ arg2...)``. When there is an exception, ``factory(arg1, arg2,
+ ...)`` is called, and the resulting object is inspected for
+ supplemental information.
+
+ ``__traceback_info__``:
+ This information is added to the traceback, usually fairly
+ literally.
+
+ ``__traceback_hide__``:
+ If set and true, this indicates that the frame should be
+ hidden from abbreviated tracebacks. This way you can hide
+ some of the complexity of the larger framework and let the
+ user focus on their own errors.
+
+ ``__traceback_stop__``:
+ If set and true, then all frames before this should be hidden
+ (as though they had ``__traceback_hide__`` set). This way
+ you can, for instance, hide an entire server.
+
+ ``__traceback_start__``:
+ Start up the traceback again if ``__traceback_stop__`` has
+ come into play.
+
+ ``__traceback_reporter__``:
+ This should be a reporter object (see the reporter module),
+ or a list/tuple of reporter objects. All reporters found this
+ way will be given the exception, innermost first.
+
+ The actually interpretation of these values is largely up to the
+ reporters and formatters.
+
+ collect_exception(*sys.exc_info()) will return an object with
+ several attributes:
+
+ ``frames``:
+ A list of frames
+ ``exception_formatted``:
+ The formatted exception, generally a full traceback
+ ``exception_type``:
+ The string type of the exception, like ``'ValueError'``
+ ``exception_value``:
+ The string value of the exception, like ``'x not in list'``
+ ``identification_code``:
+ A hash of the exception data meant to identify the general
+ exception, so that it shares this code with other exceptions
+ that derive from the same problem. The code is a hash of
+ all the module names and function names in the traceback,
+ plus exception_type. This should be shown to users so they
+ can refer to the exception later. (@@: should it include a
+ portion that allows identification of the specific instance
+ of the exception as well?)
+
+ The list of frames goes innermost first. Each frame has these
+ attributes; some values may be None if they could not be
+ determined.
+
+ ``modname``:
+ the name of the module
+ ``filename``:
+ the filename of the module
+ ``lineno``:
+ the line of the error
+ ``revision``:
+ the contents of __version__ or __revision__
+ ``name``:
+ the function name
+ ``supplement``:
+ an object created from ``__traceback_supplement__``
+ ``supplement_exception``:
+ a simple traceback of any exception ``__traceback_supplement__``
+ created
+ ``traceback_info``:
+ the str() of any ``__traceback_info__`` variable found in the local
+ scope (@@: should it str()-ify it or not?)
+ ``traceback_hide``:
+ the value of any ``__traceback_hide__`` variable
+ ``traceback_stop``:
+ the value of any ``__traceback_stop__`` variable
+ ``traceback_start``:
+ the value of any ``__traceback_start__`` variable
+ ``traceback_log``:
+ the value of any ``__traceback_log__`` variable
+
+
+ ``__traceback_supplement__`` is thrown away, but a fixed
+ set of attributes are captured; each of these attributes is
+ optional.
+
+ ``object``:
+ the name of the object being visited
+ ``source_url``:
+ the original URL requested
+ ``line``:
+ the line of source being executed (for interpreters, like ZPT)
+ ``column``:
+ the column of source being executed
+ ``expression``:
+ the expression being evaluated (also for interpreters)
+ ``warnings``:
+ a list of (string) warnings to be displayed
+ ``getInfo``:
+ a function/method that takes no arguments, and returns a string
+ describing any extra information
+
+ These are used to create an object with attributes of the same
+ names (``getInfo`` becomes a string attribute, not a method).
+ ``__traceback_supplement__`` implementations should be careful to
+ produce values that are relatively static and unlikely to cause
+ further errors in the reporting system -- any complex
+ introspection should go in ``getInfo()`` and should ultimately
+ return a string.
+
+ Note that all attributes are optional, and under certain
+ circumstances may be None or may not exist at all -- the collector
+ can only do a best effort, but must avoid creating any exceptions
+ itself.
+
+ Formatters may want to use ``__traceback_hide__`` as a hint to
+ hide frames that are part of the 'framework' or underlying system;
+ any frames that precede ``__traceback_stop__`` should be treated
+ similarly. Completely hiding these frames may be confusing, but
+ it allows an abbreviated view of the exception that may highlight
+ problems (it is advised that a complete traceback also be
+ generated). If the last frame has one of these variables set, you
+ should probably ignore the variables entirely, as it means there
+ is an unexpected error in the framework.
+
+ TODO:
+
+ More attributes in __traceback_supplement__? Maybe an attribute
+ that gives a list of local variables that should also be
+ collected? Also, attributes that would be explicitly meant for
+ the entire request, not just a single frame. Right now some of
+ the fixed set of attributes (e.g., source_url) are meant for this
+ use, but there's no explicit way for the supplement to indicate
+ new values, e.g., logged-in user, HTTP referrer, environment, etc.
+ Also, the attributes that do exist are Zope/Web oriented.
+
+ More information on frames? cgitb, for instance, produces
+ extensive information on local variables. There exists the
+ possibility that getting this information may cause side effects,
+ which can make debugging more difficult; but it also provides
+ fodder for post-mortem debugging. However, the collector is not
+ meant to be configurable, but to capture everything it can and let
+ the formatters be configurable. Maybe this would have to be a
+ configuration value, or maybe it could be indicated by another
+ magical variable (which would probably mean 'show all local
+ variables below this frame')
+ """
+
+ show_revisions = 0
+
+ def __init__(self, limit=None):
+ self.limit = limit
+
+ def getLimit(self):
+ limit = self.limit
+ if limit is None:
+ limit = getattr(sys, 'tracebacklimit', None)
+ return limit
+
+ def getRevision(self, globals):
+ if not self.show_revisions:
+ return None
+ revision = globals.get('__revision__', None)
+ if revision is None:
+ # Incorrect but commonly used spelling
+ revision = globals.get('__version__', None)
+
+ if revision is not None:
+ try:
+ revision = str(revision).strip()
+ except:
+ revision = '???'
+ return revision
+
+ def collectSupplement(self, supplement, tb):
+ result = {}
+
+ for name in ('object', 'source_url', 'line', 'column',
+ 'expression', 'warnings'):
+ result[name] = getattr(supplement, name, None)
+
+ func = getattr(supplement, 'getInfo', None)
+ if func:
+ result['info'] = func()
+ else:
+ result['info'] = None
+ return SupplementaryData(**result)
+
+ def collectLine(self, tb):
+ f = tb.tb_frame
+ lineno = tb.tb_lineno
+ co = f.f_code
+ filename = co.co_filename
+ name = co.co_name
+ locals = f.f_locals
+ globals = f.f_globals
+
+ data = {}
+ data['modname'] = globals.get('__name__', None)
+ data['filename'] = filename
+ data['lineno'] = lineno
+ data['revision'] = self.getRevision(globals)
+ data['name'] = name
+
+ # Output a traceback supplement, if any.
+ if locals.has_key('__traceback_supplement__'):
+ # Use the supplement defined in the function.
+ tbs = locals['__traceback_supplement__']
+ elif globals.has_key('__traceback_supplement__'):
+ # Use the supplement defined in the module.
+ # This is used by Scripts (Python).
+ tbs = globals['__traceback_supplement__']
+ else:
+ tbs = None
+ if tbs is not None:
+ factory = tbs[0]
+ args = tbs[1:]
+ try:
+ supp = factory(*args)
+ data['supplement'] = self.collectSupplement(supp, tb)
+ except:
+ if DEBUG_EXCEPTION_FORMATTER:
+ out = StringIO()
+ traceback.print_exc(file=out)
+ text = out.getvalue()
+ data['supplement_exception'] = text
+ # else just swallow the exception.
+
+ try:
+ tbi = locals.get('__traceback_info__', None)
+ if tbi is not None:
+ data['traceback_info'] = str(tbi)
+ except:
+ pass
+
+ marker = []
+ for name in ('__traceback_hide__', '__traceback_stop__',
+ '__traceback_start__', '__traceback_log__'):
+ try:
+ tbh = locals.get(name, marker)
+ if tbh is not marker:
+ data[name[2:-2]] = tbh
+ except:
+ pass
+
+ return data
+
+ def collectExceptionOnly(self, etype, value):
+ return traceback.format_exception_only(etype, value)
+
+ def collectException(self, etype, value, tb, limit=None):
+ # The next line provides a way to detect recursion.
+ __exception_formatter__ = 1
+ frames = []
+ ident_data = []
+ if limit is None:
+ limit = self.getLimit()
+ n = 0
+ while tb is not None and (limit is None or n < limit):
+ if tb.tb_frame.f_locals.get('__exception_formatter__'):
+ # Stop recursion.
+ result.append('(Recursive formatException() stopped)\n')
+ break
+ data = self.collectLine(tb)
+ frame = ExceptionFrame(**data)
+ frames.append(frame)
+ ident_data.append(frame.modname or '?')
+ ident_data.append(frame.name or '?')
+ tb = tb.tb_next
+ n = n + 1
+ ident_data.append(str(etype))
+ ident = serial_number_generator.hash_identifier(
+ ' '.join(ident_data), length=5, upper=True,
+ prefix=DEBUG_IDENT_PREFIX)
+
+ result = CollectedException(
+ frames=frames,
+ exception_formatted=self.collectExceptionOnly(etype, value),
+ exception_type=str(etype),
+ exception_value=str(value),
+ identification_code=ident,
+ date=time.localtime())
+ return result
+
+limit = 200
+
+class Bunch:
+
+ """
+ A generic container
+ """
+
+ def __init__(self, **attrs):
+ for name, value in attrs.items():
+ setattr(self, name, value)
+
+ def __repr__(self):
+ name = '<%s ' % self.__class__.__name__
+ name += ' '.join(['%s=%r' % (name, str(value)[:30])
+ for name, value in self.__dict__.items()
+ if not name.startswith('_')])
+ return name + '>'
+
+class CollectedException(Bunch):
+ """
+ This is the result of collection the exception; it contains copies
+ of data of interest.
+ """
+ # A list of frames (ExceptionFrame instances), innermost last:
+ frames = []
+ # The result of traceback.format_exception_only; this looks
+ # like a normal traceback you'd see in the interactive interpreter
+ exception_formatted = None
+ # The *string* representation of the type of the exception
+ # (@@: should we give the # actual class? -- we can't keep the
+ # actual exception around, but the class should be safe)
+ # Something like 'ValueError'
+ exception_type = None
+ # The string representation of the exception, from ``str(e)``.
+ exception_value = None
+ # An identifier which should more-or-less classify this particular
+ # exception, including where in the code it happened.
+ identification_code = None
+ # The date, as time.localtime() returns:
+ date = None
+
+class SupplementaryData(Bunch):
+ """
+ The result of __traceback_supplement__. We don't keep the
+ supplement object around, for fear of GC problems and whatnot.
+ (@@: Maybe I'm being too superstitious about copying only specific
+ information over)
+ """
+
+ # These attributes are copied from the object, or left as None
+ # if the object doesn't have these attributes:
+ object = None
+ source_url = None
+ line = None
+ column = None
+ expression = None
+ warnings = None
+ # This is the *return value* of supplement.getInfo():
+ info = None
+
+class ExceptionFrame(Bunch):
+ """
+ This represents one frame of the exception. Each frame is a
+ context in the call stack, typically represented by a line
+ number and module name in the traceback.
+ """
+
+ # The name of the module; can be None, especially when the code
+ # isn't associated with a module.
+ modname = None
+ # The filename (@@: when no filename, is it None or '?'?)
+ filename = None
+ # Line number
+ lineno = None
+ # The value of __revision__ or __version__ -- but only if
+ # show_revision = True (by defaut it is false). (@@: Why not
+ # collect this?)
+ revision = None
+ # The name of the function with the error (@@: None or '?' when
+ # unknown?)
+ name = None
+ # A SupplementaryData object, if __traceback_supplement__ was found
+ # (and produced no errors)
+ supplement = None
+ # If accessing __traceback_supplement__ causes any error, the
+ # plain-text traceback is stored here
+ supplement_exception = None
+ # The str() of any __traceback_info__ value found
+ traceback_info = None
+ # The value of __traceback_hide__ and __traceback_stop__ variables:
+ traceback_hide = False
+ traceback_stop = False
+
+ def get_source_line(self):
+ """
+ Return the source of the current line of this frame. You
+ probably want to .strip() it as well, as it is likely to have
+ leading whitespace.
+ """
+ if not self.filename or not self.lineno:
+ return None
+ return linecache.getline(self.filename, self.lineno)
+
+if hasattr(sys, 'tracebacklimit'):
+ limit = min(limit, sys.tracebacklimit)
+
+col = ExceptionCollector()
+
+def collect_exception(t, v, tb, limit=None):
+ """
+ Use like:
+
+ try:
+ blah blah
+ except:
+ exc_data = collect_exception(*sys.exc_info())
+ """
+ return col.collectException(t, v, tb, limit=limit)
diff --git a/paste/exceptions/formatter.py b/paste/exceptions/formatter.py
new file mode 100644
index 0000000..e71030d
--- /dev/null
+++ b/paste/exceptions/formatter.py
@@ -0,0 +1,126 @@
+"""
+Formatters for the exception data that comes from ExceptionCollector.
+"""
+
+import cgi
+import serial_number_generator
+
+def html_quote(s):
+ return cgi.escape(s, True)
+
+class AbstractFormatter:
+
+ general_data_order = ['object', 'source_url']
+
+ def __init__(self, show_hidden_frames=False,
+ trim_source_paths=()):
+ self.show_hidden_frames = show_hidden_frames
+ self.trim_source_paths = trim_source_paths
+
+ def format_collected_data(self, exc_data):
+ general_data = {}
+ lines = []
+ show_hidden_frames = self.show_hidden_frames
+ last = exc_data.frames[-1]
+ if last.traceback_hide or last.traceback_stop:
+ # If the last frame was supposed to have been hidden,
+ # there's clearly a problem in the hidden portion of
+ # the framework itself
+ show_hidden_frames = True
+ for frame in exc_data.frames:
+ if frame.traceback_hide and not show_hidden_frames:
+ continue
+ sup = frame.supplement
+ if sup:
+ if sup.object:
+ general_data['object'] = self.format_sup_object(
+ sup.object)
+ if sup.source_url:
+ general_data['source_url'] = self.format_sup_url(
+ sup.source_url)
+ if sup.line:
+ lines.append(self.format_sup_line_pos(self.line, self.column))
+ if sup.expression:
+ lines.append(self.format_sup_expression(sup.expression))
+ if sup.warnings:
+ for warning in sup.warnings:
+ lines.append(self.format_sup_warning(warning))
+ if sup.info:
+ lines.extend(self.format_sup_info(sup.info))
+ filename = frame.filename
+ if filename and self.trim_source_paths:
+ for path, repl in self.trim_source_paths:
+ if filename.startswith(path):
+ filename = repl + filename[len(path):]
+ break
+ lines.append(self.format_source_line(
+ filename or '?',
+ frame.lineno or '?',
+ frame.name or '?'))
+ source = frame.get_source_line()
+ if source:
+ lines.append(self.format_source(source))
+ exc_info = self.format_exception_info(
+ exc_data.exception_type,
+ exc_data.exception_value)
+ general_data = general_data.items()
+ general_data.sort(
+ lambda a, b, self=self:
+ cmp(self.general_data_order.index(a[0]),
+ self.general_data_order.index(b[0])))
+ return self.format_combine(general_data, lines, exc_info)
+
+class TextFormatter(AbstractFormatter):
+
+ def quote(self, s):
+ return s
+ def emphasize(self, s):
+ return s
+ def format_sup_object(self, name):
+ return 'In object: %s' % self.quote(name)
+ def format_sup_url(self, url):
+ return 'URL: %s' % self.quote(url)
+ def format_sup_line_pos(self, line, column):
+ if column:
+ return 'Line %i, Column %i' % (line, column)
+ else:
+ return 'Line %i' % line
+ def format_sup_expression(self, expr):
+ return 'In expression: %s' % self.quote(expr)
+ def format_sup_warning(self, warning):
+ return 'Warning: %s' % self.quote(warning)
+ def format_sup_info(self, info):
+ return [self.quote(info)]
+ def format_source_line(self, filename, lineno, name):
+ return 'File %r, line %s in %s' % (filename, lineno, name)
+ def format_source(self, source_line):
+ return ' ' + self.quote(source_line.strip())
+ def format_exception_info(self, etype, evalue):
+ return self.emphasize(
+ '%s: %s' % (self.quote(etype), self.quote(evalue)))
+ def format_combine(self, general_data, lines, exc_info):
+ lines[:0] = [value for name, value in general_data]
+ lines.append(exc_info)
+ return self.format_combine_lines(lines)
+ def format_combine_lines(self, lines):
+ return '\n'.join(lines)
+
+class HTMLFormatter(TextFormatter):
+
+ def quote(self, s):
+ return html_quote(s)
+ def emphasize(self, s):
+ return '<b>%s</b>' % s
+ def format_sup_url(self, url):
+ return 'URL: <a href="%s">%s</a>' % (url, url)
+ def format_combine_lines(self, lines):
+ return '<br>\n'.join(lines)
+ def format_source_line(self, filename, lineno, name):
+ return 'File %r, line %s in <tt>%s</tt>' % (filename, lineno, name)
+ def format_source(self, source_line):
+ return '&nbsp;&nbsp;<tt>%s</tt>' % self.quote(source_line.strip())
+
+def format_html(exc_data, **ops):
+ return HTMLFormatter(**ops).format_collected_data(exc_data)
+def format_text(exc_data, **ops):
+ return TextFormatter(**ops).format_collected_data(exc_data)
diff --git a/paste/exceptions/reporter.py b/paste/exceptions/reporter.py
new file mode 100644
index 0000000..8568147
--- /dev/null
+++ b/paste/exceptions/reporter.py
@@ -0,0 +1,122 @@
+from email.MIMEText import MIMEText
+from email.MIMEMultipart import MIMEMultipart
+import smtplib
+import time
+import formatter
+
+class Reporter:
+
+ def __init__(self, **conf):
+ for name, value in conf.items():
+ if not hasattr(self, name):
+ raise TypeError(
+ "The keyword argument %s was not expected"
+ % name)
+ setattr(self, name, value)
+ self.check_params()
+
+ def check_params(self):
+ pass
+
+ def format_date(self, exc_data):
+ return time.strftime('%c', exc_data.date)
+
+ def format_html(self, exc_data, **kw):
+ return formatter.format_html(exc_data, **kw)
+
+ def format_text(self, exc_data, **kw):
+ return formatter.format_text(exc_data, **kw)
+
+class EmailReporter(Reporter):
+
+ to_addresses = None
+ from_address = None
+ smtp_server = 'localhost'
+ subject_prefix = ''
+
+ def report(self, exc_data):
+ msg = self.assemble_email(exc_data)
+ server = smtplib.SMTP(self.smtp_server)
+ server.sendmail(self.from_address,
+ self.to_addresses, str(msg))
+ server.quit()
+
+ def check_params(self):
+ if not self.to_addresses:
+ raise ValueError("You must set to_addresses")
+ if not self.from_address:
+ raise ValueError("You must set from_address")
+ if isinstance(self.to_addresses, (str, unicode)):
+ self.to_addresses = [self.to_addresses]
+
+ def assemble_email(self, exc_data):
+ short_html_version = self.format_html(
+ exc_data, show_hidden_frames=False)
+ long_html_version = self.format_html(
+ exc_data, show_hidden_frames=True)
+ text_version = self.format_text(
+ exc_data, show_hidden_frames=False)
+ msg = MIMEMultipart()
+ msg.set_type('multipart/alternative')
+ msg.preamble = msg.epilogue = ''
+ text_msg = MIMEText(text_version)
+ text_msg.set_type('text/plain')
+ text_msg.set_param('charset', 'ASCII')
+ msg.attach(text_msg)
+ html_msg = MIMEText(short_html_version)
+ html_msg.set_type('text/html')
+ # @@: Correct character set?
+ html_msg.set_param('charset', 'UTF-8')
+ html_long = MIMEText(long_html_version)
+ html_long.set_type('text/html')
+ html_long.set_param('charset', 'UTF-8')
+ msg.attach(html_msg)
+ msg.attach(html_long)
+ msg['Subject'] = '%s%s: %s' % (
+ self.subject_prefix, exc_data.exception_type,
+ exc_data.exception_value)
+ msg['From'] = self.from_address
+ msg['To'] = ', '.join(self.to_addresses)
+ return msg
+
+class LogReporter(Reporter):
+
+ filename = None
+ show_hidden_frames = True
+
+ def check_params(self):
+ assert self.filename is not None, (
+ "You must give a filename")
+
+ def report(self, exc_data):
+ text = self.format_text(
+ exc_data, show_hidden_frames=self.show_hidden_frames)
+ f = open(self.filename, 'a')
+ try:
+ f.write(text + '\n' + '-'*60 + '\n')
+ finally:
+ f.close()
+
+class FileReporter(Reporter):
+
+ file = None
+ show_hidden_frames = True
+
+ def check_params(self):
+ assert self.file is not None, (
+ "You must give a file object")
+
+ def report(self, exc_data):
+ text = self.format_text(
+ exc_data, show_hidden_frames=self.show_hidden_frames)
+ print text
+ self.file.write(text + '\n' + '-'*60 + '\n')
+
+class WSGIAppReporter(Reporter):
+
+ def __init__(self, exc_data):
+ self.exc_data = exc_data
+
+ def __call__(self, environ, start_response):
+ start_response('500 Server Error', [('Content-type', 'text/html')])
+ return [formatter.format_html(exc_data)]
diff --git a/paste/exceptions/serial_number_generator.py b/paste/exceptions/serial_number_generator.py
new file mode 100644
index 0000000..0389b3a
--- /dev/null
+++ b/paste/exceptions/serial_number_generator.py
@@ -0,0 +1,114 @@
+"""
+Creates a human-readable identifier, using numbers and digits,
+avoiding ambiguous numbers and letters. hash_identifier can be used
+to create compact representations that are unique for a certain string
+(or concatenation of strings)
+"""
+
+import md5
+
+good_characters = "023456789abcdefghjkmnpqrtuvwxyz"
+
+base = len(good_characters)
+
+def make_identifier(number):
+ """
+ Encodes a number as an identifier.
+ """
+ if not isinstance(number, (int, long)):
+ raise ValueError(
+ "You can only make identifiers out of integers (not %r)"
+ % number)
+ if number < 0:
+ raise ValueError(
+ "You cannot make identifiers out of negative numbers: %r"
+ % number)
+ result = []
+ while number:
+ next = number % base
+ result.append(good_characters[next])
+ # Note, this depends on integer rounding of results:
+ number = number / base
+ return ''.join(result)
+
+def hash_identifier(s, length, pad=True, hasher=md5, prefix='',
+ group=None, upper=False):
+ """
+ Hashes the string (with the given hashing module), then turns that
+ hash into an identifier of the given length (using modulo to
+ reduce the length of the identifier). If ``pad`` is False, then
+ the minimum-length identifier will be used; otherwise the
+ identifier will be padded with 0's as necessary.
+
+ ``prefix`` will be added last, and does not count towards the
+ target length. ``group`` will group the characters with ``-`` in
+ the given lengths, and also does not count towards the target
+ length. E.g., ``group=4`` will cause a identifier like
+ ``a5f3-hgk3-asdf``. Grouping occurs before the prefix.
+ """
+ if length > 26 and hasher is md5:
+ raise ValueError, (
+ "md5 cannot create hashes longer than 26 characters in "
+ "length (you gave %s)" % length)
+ if isinstance(s, unicode):
+ s = s.encode('utf-8')
+ h = hasher.new(str(s))
+ bin_hash = h.digest()
+ modulo = base ** length
+ number = 0
+ for c in list(bin_hash):
+ number = (number * 256 + ord(c)) % modulo
+ ident = make_identifier(number)
+ if pad:
+ ident = good_characters[0]*(length-len(ident)) + ident
+ if group:
+ parts = []
+ while ident:
+ parts.insert(0, ident[-group:])
+ ident = ident[:-group]
+ ident = '-'.join(parts)
+ if upper:
+ ident = ident.upper()
+ return prefix + ident
+
+# doctest tests:
+__test__ = {
+ 'make_identifier': """
+ >>> make_identifier(0)
+ ''
+ >>> make_identifier(1000)
+ '922'
+ >>> make_identifier(-100)
+ Traceback (most recent call last):
+ ...
+ ValueError: You cannot make identifiers out of negative numbers: -100
+ >>> make_identifier('test')
+ Traceback (most recent call last):
+ ...
+ ValueError: You can only make identifiers out of integers (not 'test')
+ >>> make_identifier(1000000000000)
+ '5bqderb62'
+ """,
+ 'hash_identifier': """
+ >>> hash_identifier(0, 5)
+ 'fg35w'
+ >>> hash_identifier(0, 10)
+ 'fg35w4t7yv'
+ >>> hash_identifier('this is a test of a long string', 5)
+ 'qpvbe'
+ >>> hash_identifier(0, 26)
+ 'fg35w4t7yvwr8rxpr3g06xj7cf'
+ >>> hash_identifier(0, 30)
+ Traceback (most recent call last):
+ ...
+ ValueError: md5 cannot create hashes longer than 26 characters in length (you gave 30)
+ >>> hash_identifier(0, 10, group=4)
+ 'fg-35w4-t7yv'
+ >>> hash_identifier(0, 10, group=4, upper=True, prefix='M-')
+ 'M-FG-35W4-T7YV'
+ """}
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
+
diff --git a/paste/exceptions/tests/__init__.py b/paste/exceptions/tests/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/exceptions/tests/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/exceptions/tests/test_formatter.py b/paste/exceptions/tests/test_formatter.py
new file mode 100644
index 0000000..6a9b255
--- /dev/null
+++ b/paste/exceptions/tests/test_formatter.py
@@ -0,0 +1,79 @@
+from paste.exceptions import formatter
+from paste.exceptions import collector
+import sys
+import os
+
+class Mock(object):
+ def __init__(self, **kw):
+ for name, value in kw.items():
+ setattr(self, name, value)
+
+class Supplement(Mock):
+
+ object = 'test_object'
+ source_url = 'http://whatever.com'
+ info = 'This is some supplemental information'
+ args = ()
+ def getInfo(self):
+ return self.info
+
+ def __call__(self, *args):
+ self.args = args
+ return self
+
+class BadSupplement(Supplement):
+
+ def getInfo(self):
+ raise ValueError("This supplemental info is buggy")
+
+def call_error(sup):
+ 1 + 2
+ __traceback_supplement__ = (sup, ())
+ assert 0, "I am an error"
+
+def raise_error(sup='default'):
+ if sup == 'default':
+ sup = Supplement()
+ for i in range(10):
+ __traceback_info__ = i
+ if i == 5:
+ call_error(sup=sup)
+
+def format(type='html', **ops):
+ data = collector.collect_exception(*sys.exc_info())
+ report = getattr(formatter, 'format_' + type)(data, **ops)
+ return report
+
+formats = ('html', 'text')
+
+def test_excersize():
+ for f in formats:
+ try:
+ raise_error()
+ except:
+ format(f)
+
+def test_content():
+ for f in formats:
+ try:
+ raise_error()
+ except:
+ result = format(f)
+ print result
+ assert 'test_object' in result
+ assert 'http://whatever.com' in result
+ assert 'This is some supplemental information' in result
+ assert 'raise_error' in result
+ assert 'call_error' in result
+ assert '5' in result
+ assert 'test_content' in result
+
+def test_trim():
+ current = os.path.abspath(os.getcwd())
+ for f in formats:
+ try:
+ raise_error()
+ except:
+ result = format(f, trim_source_paths=[(current, '.')])
+ assert current not in result
+ assert '/test_formatter.py' in result
diff --git a/paste/exceptions/tests/test_reporter.py b/paste/exceptions/tests/test_reporter.py
new file mode 100644
index 0000000..da6c470
--- /dev/null
+++ b/paste/exceptions/tests/test_reporter.py
@@ -0,0 +1,46 @@
+import sys
+import os
+from paste.exceptions.reporter import *
+from paste.exceptions import collector
+
+def setup_file(fn, content=None):
+ fn = os.path.join(os.path.dirname(__file__), 'reporter_output', fn)
+ if os.path.exists(fn):
+ os.unlink(fn)
+ if content is not None:
+ f = open(fn, 'wb')
+ f.write(content)
+ f.close()
+ return fn
+
+def test_logger():
+ fn = setup_file('test_logger.log')
+ rep = LogReporter(
+ filename=fn,
+ show_hidden_frames=False)
+ try:
+ int('a')
+ except:
+ exc_data = collector.collect_exception(*sys.exc_info())
+ else:
+ assert 0
+ rep.report(exc_data)
+ content = open(fn).read()
+ assert len(content.splitlines()) == 4
+ assert 'ValueError' in content
+ assert 'int(): a' in content
+ assert 'test_reporter.py' in content
+ assert 'test_logger' in content
+
+ try:
+ 1 / 0
+ except:
+ exc_data = collector.collect_exception(*sys.exc_info())
+ else:
+ assert 0
+ rep.report(exc_data)
+ content = open(fn).read()
+ print content
+ assert len(content.splitlines()) == 8
+ assert 'ZeroDivisionError' in content
+
diff --git a/paste/gzipper.py b/paste/gzipper.py
new file mode 100644
index 0000000..45fae50
--- /dev/null
+++ b/paste/gzipper.py
@@ -0,0 +1,65 @@
+"""
+WSGI middleware
+
+Gzip-encodes the response.
+"""
+
+import gzip
+from cStringIO import StringIO
+import wsgilib
+
+class GzipOutput(object):
+ pass
+
+class middleware(object):
+
+ def __init__(self, application, compress_level=5):
+ self.application = application
+ self.compress_level = compress_level
+
+ def __call__(self, environ, start_response):
+ if 'gzip' not in environ.get('HTTP_ACCEPT_ENCODING'):
+ # nothing for us to do, so this middleware will
+ # be a no-op:
+ return self.application(environ, start_response)
+ response = GzipResponse(start_response, self.compress_level)
+ app_iter = self.application(environ,
+ response.gzip_start_response)
+ try:
+ if app_iter:
+ response.finish_response(app_iter)
+ finally:
+ response.close()
+ return None
+
+class GzipResponse(object):
+
+ def __init__(self, start_response, compress_level):
+ self.start_response = start_response
+ self.compress_level = compress_level
+ self.gzip_fileobj = None
+
+ def gzip_start_response(self, status, headers):
+ # This isn't part of the spec yet:
+ if wsgilib.has_header(headers, 'content-encoding'):
+ # we won't double-encode
+ return self.start_response(status, headers)
+
+ headers.append(('content-encoding', 'gzip'))
+ raw_writer = self.start_response(status, headers)
+ dummy_fileobj = GzipOutput()
+ dummy_fileobj.write = raw_writer
+ self.gzip_fileobj = gzip.GzipFile('', 'wb', self.compress_level,
+ dummy_fileobj)
+ return self.gzip_fileobj.write
+
+ def finish_response(self, app_iter):
+ try:
+ for s in app_iter:
+ self.gzip_fileobj.write(s)
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+
+ def close(self):
+ self.gzip_fileobj.close()
diff --git a/paste/httpexceptions.py b/paste/httpexceptions.py
new file mode 100644
index 0000000..0396f43
--- /dev/null
+++ b/paste/httpexceptions.py
@@ -0,0 +1,233 @@
+"""
+WSGI middleware
+
+Processes Python exceptions that relate to HTTP exceptions. This
+defines a set of extensions, all subclasses of HTTPException, and a
+middleware (`middleware`) that catches these exceptions and turns them
+into proper responses.
+"""
+
+import types
+
+class HTTPException(Exception):
+ code = None
+ title = None
+ message = None
+ # @@: not currently used:
+ required_headers = ()
+ def __init__(self, headers=None, message=None):
+ self.headers = headers
+ if message is not None:
+ self.message = message
+
+ def html(self, environ):
+ message = self.message
+ args = environ.copy()
+ if self.headers:
+ environ.update(self.headers)
+ message = message % args
+ return ('<html><head><title>%(title)s</title></head>\n'
+ '<body>\n'
+ '<h1>%(title)s</h1>\n'
+ '<p>%(message)s</p>\n'
+ '<hr noshade>\n'
+ '<div align="right">WSGI server</div>\n'
+ '</body></html>\n'
+ % {'title': self.title,
+ 'code': self.code,
+ 'message': message})
+
+ def __repr__(self):
+ return '<%s %s; code=%s>' % (self.__class__.__name__,
+ self.title, self.code)
+
+class _HTTPMove(HTTPException):
+ required_headers = ('location',)
+ message = ('The resource has been moved to <a href="%(location)s">'
+ '%(location)s</a>; you should be redirected automatically.')
+
+class HTTPMovedPermanently(_HTTPMove):
+ code = 301
+ title = 'Moved Permanently'
+
+class HTTPFound(_HTTPMove):
+ code = 302
+ title = 'Found'
+
+# This one is safe after a POST (the redirected location will be
+# retrieved with GET):
+class HTTPSeeOther(_HTTPMove):
+ code = 303
+ title = 'See Other'
+
+class HTTPNotModified(HTTPException):
+ # @@: but not always (HTTP section 14.18.1)...?
+ required_headers = ('date',)
+ code = 304
+ title = 'Not Modified'
+ message = ''
+ # @@: should include date header, optionally other headers
+
+class HTTPUserProxy(_HTTPMove):
+ # @@: OK, not a move, but looks a little like one
+ code = 305
+ title = 'Use Proxy'
+ message = ('This resource must be accessed through the proxy located '
+ 'at <a href="%(location)s">%(location)s</a>')
+
+class HTTPTemporaryRedirect(_HTTPMove):
+ code = 307
+ title = 'Temporary Redirect'
+
+class HTTPBadRequest(HTTPException):
+ code = 400
+ title = 'Bad Request'
+ message = ('The server could not understand your request')
+
+class HTTPUnauthorized(HTTPException):
+ required_headers = ('WWW-Authenticate',)
+ code = 401
+ title = 'Unauthorized'
+ # @@: should require WWW-Authenticate header
+ message = ('Authorization is required to access this resource; '
+ 'you must login.')
+
+class HTTPForbidden(HTTPException):
+ code = 403
+ title = 'Forbidden'
+ message = ('Access was denied to this resource.')
+
+class HTTPNotFound(HTTPException):
+ code = 404
+ title = 'Not Found'
+ message = ('The resource could not be found.')
+
+class HTTPMethodNotAllowed(HTTPException):
+ required_headers = ('allowed',)
+ code = 405
+ title = 'Method Not Allowed'
+ message = ('The method %(REQUEST_METHOD)s is not allowed for this '
+ 'resource.')
+
+class HTTPNotAcceptable(HTTPException):
+ code = 406
+ title = 'Not Acceptable'
+ message = ('The resource could not be generated that was acceptable '
+ 'to your browser (content of type %(HTTP_ACCEPT)s).')
+
+class HTTPConfict(HTTPException):
+ code = 409
+ title = 'Conflict'
+ message = ('There was a conflict when trying to complete your '
+ 'request.')
+
+class HTTPGone(HTTPException):
+ code = 410
+ title = 'Gone'
+ message = ('This resource is no longer available. No forwarding '
+ 'address is aavailable.')
+
+class HTTPLengthRequired(HTTPException):
+ code = 411
+ title = 'Length Required'
+ message = ('Content-Length header required.')
+
+class HTTPPreconditionFailed(HTTPException):
+ code = 412
+ title = 'Precondition Failed'
+ message = ('Request precondition failed.')
+
+class HTTPRequestEntityTooLarge(HTTPException):
+ code = 413
+ title = 'Request Entity Too Large'
+ message = ('The body of your request was too large for this server.')
+
+class HTTPRequestURITooLong(HTTPException):
+ code = 414
+ title = 'Request-URI Too Long'
+ message = ('The request URI was too long for this server.')
+
+class HTTPUnsupportedMediaType(HTTPException):
+ code = 415
+ title = 'Unsupported Media Type'
+ message = ('The request media type %(CONTENT_TYPE)s is not '
+ 'supported by this server.')
+
+class HTTPRequestRangeNotSatisfiable(HTTPException):
+ code = 416
+ title = 'Request Range Not Satisfiable'
+ message = ('The Range requested is not available.')
+
+class HTTPExpectationFailed(HTTPException):
+ code = 417
+ title = 'Expectation Failed'
+ message = ('Expectation failed.')
+
+class HTTPServerError(HTTPException):
+ code = 500
+ title = 'Internal Server Error'
+ message = ('An internal server error occurred.')
+
+class HTTPNotImplemented(HTTPException):
+ coded = 501
+ title = 'Not Implemented'
+ message = ('The request method %(REQUEST_METHOD)s is not implemented '
+ 'for this server.')
+
+class HTTPBadGateway(HTTPException):
+ code = 502
+ title = 'Bad Gateway'
+ message = ('Bad gateway.')
+
+class HTTPServiceUnavailable(HTTPException):
+ code = 503
+ title = 'Service Unavailable'
+ message = ('The server is currently unavailable. Please try again '
+ 'at a later time.')
+
+class HTTPGatewayTimeout(HTTPException):
+ code = 504
+ title = 'Gateway Timeout'
+ message = ('The gateway has timed out.')
+
+class HTTPHttpVersionNotSupported(HTTPException):
+ code = 505
+ title = 'HTTP Version Not Supported'
+ message = ('The HTTP version is not supported.')
+
+_exceptions = {}
+for name, value in globals().items():
+ if (isinstance(value, (type, types.ClassType)) and
+ issubclass(value, HTTPException) and
+ value.code):
+ _exceptions[value.code] = value
+def get_exception(code):
+ return _exceptions[code]
+
+############################################################
+## Middleware implementation:
+############################################################
+
+def middleware(application):
+
+ def start_application(environ, start_response):
+ app_started = []
+ def checked_start_response(status, headers):
+ app_started.append(None)
+ return start_response(status, headers)
+
+ try:
+ return application(environ, start_response)
+ except HTTPException, e:
+ if app_started:
+ # They've already started the response, so we can't
+ # do the right thing anymore.
+ raise
+ headers = {'content-type': 'text/html'}
+ if e.headers:
+ headers.update(e.headers)
+ write = start_response('%s %s' % (e.code, e.title),
+ headers.items())
+ return [e.html(environ)]
+
+ return start_application
diff --git a/paste/lint.py b/paste/lint.py
new file mode 100644
index 0000000..0eac637
--- /dev/null
+++ b/paste/lint.py
@@ -0,0 +1,259 @@
+"""
+A lint of sorts; an anal middleware that checks for WSGI compliance
+both in the server and the application (but otherwise does not effect
+the request, it just looks at the communication).
+"""
+
+import re
+import sys
+from types import *
+
+header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
+bad_header_value_re = re.compile(r'[\000-\037]')
+
+def middleware(application):
+ def lint_app(*args, **kw):
+ assert len(args) == 2, "Two arguments required"
+ assert not kw, "No keyword arguments allowed"
+ environ, start_response = args
+
+ check_environ(environ)
+
+ # We use this to check if the application returns without
+ # calling start_response:
+ start_response_started = []
+
+ def start_response_wrapper(*args, **kw):
+ assert len(args) == 2 or len(args) == 3, "Invalid number of arguments: %s" % args
+ assert not kw, "No keyword arguments allowed"
+ status = args[0]
+ headers = args[1]
+ if len(args) == 3:
+ exc_info = args[2]
+ else:
+ exc_info = None
+
+ check_status(status)
+ check_headers(headers)
+ check_content_type(status, headers)
+ check_exc_info(exc_info)
+
+ start_response_started.append(None)
+ return WriteWrapper(start_response(*args))
+
+ environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
+ environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
+
+ iterator = application(environ, start_response_wrapper)
+ assert start_response_started, (
+ "The application returned, but did not call start_response()")
+ assert iterator is not None and iterator != False, \
+ "The application must return an iterator, if only an empty list"
+
+ check_iterator(iterator)
+
+ return IteratorWrapper(iterator)
+
+ return lint_app
+
+class InputWrapper:
+
+ def __init__(self, wsgi_input):
+ self.input = wsgi_input
+
+ def read(self, *args):
+ assert len(args) <= 1
+ v = self.input.read(*args)
+ assert type(v) is type("")
+ return v
+
+ def readline(self):
+ v = self.input.readline()
+ assert type(v) is type("")
+ return v
+
+ def readlines(self, *args):
+ assert len(args) <= 1
+ lines = self.input.readlines(*args)
+ assert type(lines) is type([])
+ for line in lines:
+ assert type(line) is type("")
+ return lines
+
+ def __iter__(self):
+ while 1:
+ line = self.readline()
+ if not line:
+ return
+ yield line
+
+ def close(self):
+ assert 0, "input.close() must not be called"
+
+class ErrorWrapper:
+
+ def __init__(self, wsgi_errors):
+ self.errors = wsgi_errors
+
+ def write(self, s):
+ assert type(s) is type("")
+ self.errors.write(s)
+
+ def flush(self):
+ self.errors.flush()
+
+ def writelines(self, seq):
+ for line in seq:
+ self.write(line)
+
+ def close(self):
+ assert 0, "errors.close() must not be called"
+
+class WriteWrapper:
+
+ def __init__(self, wsgi_writer):
+ self.writer = wsgi_writer
+
+ def __call__(self, s):
+ assert type(s) is type("")
+ self.writer(s)
+
+class PartialIteratorWrapper:
+
+ def __init__(self, wsgi_iterator):
+ self.iterator = iterator
+
+ def __iter__(self):
+ # We want to make sure __iter__ is called
+ return IteratorWrapper(self.iterator)
+
+class IteratorWrapper:
+
+ def __init__(self, wsgi_iterator):
+ self.original_iterator = wsgi_iterator
+ self.iterator = iter(wsgi_iterator)
+ self.closed = False
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ assert not self.closed, \
+ "Iterator read after closed"
+ return self.iterator.next()
+
+ def close(self):
+ self.closed = True
+ if hasattr(self.original_iterator, 'close'):
+ self.original_iterator.close()
+
+ def __del__(self):
+ if not self.closed:
+ sys.stderr.write("Iterator garbage collected without being closed")
+ assert self.closed, \
+ "Iterator garbage collected without being closed"
+
+def check_environ(environ):
+ assert type(environ) is DictType, \
+ "Environment is not of the right type: %r (environment: %r)" % (type(environ), environ)
+
+ for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
+ 'wsgi.version', 'wsgi.input', 'wsgi.errors',
+ 'wsgi.multithread', 'wsgi.multiprocess',
+ 'wsgi.run_once']:
+ assert environ.has_key(key), \
+ "Environment missing required key: %r" % key
+
+ for key in environ.keys():
+ if '.' in key:
+ # Extension, we don't care about its type
+ continue
+ assert type(environ[key]) is StringType, \
+ "Environmental variable %s is not a string: %r (value: %r)" % (type(environ[key]), environ[key])
+
+ assert type(environ['wsgi.version']) is TupleType, \
+ "wsgi.version should be a tuple (%r)" % environ['wsgi.version']
+ assert environ['wsgi.url_scheme'] in ('http', 'https'), \
+ "wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme']
+
+ check_input(environ['wsgi.input'])
+ check_errors(environ['wsgi.errors'])
+
+ # @@: these need filling out:
+ assert environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'POST'), "Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD']
+
+ assert (not environ.get('SCRIPT_NAME')
+ or environ['SCRIPT_NAME'].startswith('/')), \
+ "SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME']
+ assert (not environ.get('PATH_INFO')
+ or environ['PATH_INFO'].startswith('/')), \
+ "PATH_INFO doesn't start with /: %s" % environ['PATH_INFO']
+ if environ.get('CONTENT_LENGTH'):
+ assert int(environ['CONTENT_LENGTH']) >= 0, "Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH']
+
+ if not environ.get('SCRIPT_NAME'):
+ assert environ.has_key('PATH_INFO'), \
+ "One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO should at least be '/' if SCRIPT_NAME is empty)"
+ assert environ.get('SCRIPT_NAME') != '/', \
+ "SCRIPT_NAME cannot be '/'; it should instead be '', and PATH_INFO should be '/'"
+
+def check_input(wsgi_input):
+ for attr in ['read', 'readline', 'readlines', '__iter__']:
+ assert hasattr(wsgi_input, attr), \
+ "wsgi.input (%r) doesn't have the attribute %s" % (wsgi_input, attr)
+
+def check_errors(wsgi_errors):
+ for attr in ['flush', 'write', 'writelines']:
+ assert hasattr(wsgi_errors, attr), \
+ "wsgi.errors (%r) doesn't have the attributes %s" % (wsgi_errors, attr)
+
+def check_status(status):
+ assert type(status) is StringType, \
+ "Status must be a string (not %r)" % status
+ # Implicitly check that we can turn it into an integer:
+ status_int = int(status.split(None, 1)[0])
+ assert status_int >= 100, "Status code is invalid: %r" % status_int
+
+def check_headers(headers):
+ assert type(headers) is ListType, \
+ "Headers (%r) must be of type list: %r" % (headers, type(headers))
+ header_names = {}
+ for item in headers:
+ assert type(item) is TupleType, \
+ "Individual headers (%r) must be of type tuple: %r" % (item, type(item))
+ assert len(item) == 2
+ name, value = item
+ assert name.lower() != 'status', \
+ "The Status header cannot be used; it conflicts with CGI script, and HTTP status is not given through headers (value: %r)." % value
+ header_names[name.lower()] = None
+ assert '\n' not in name and ':' not in name, \
+ "Header names may not contain ':' or '\\n': %r" % name
+ assert header_re.search(name), "Bad header name: %r" % name
+ assert not name.endswith('-') and not name.endswith('_'), \
+ "Names may not end in '-' or '_': %r" % name
+ assert not bad_header_value_re.search(value), \
+ "Bad header value: %r (bad char: %r)" % (value, bad_header_value_re.search(value).group(0))
+
+def check_content_type(status, headers):
+ code = int(status.split(None, 1)[0])
+ if code == 204:
+ # 204 No Content is the only code where there's no body,
+ # and so it doesn't need a content-type header.
+ # @@: Not 100% sure this is the only case where a content-type
+ # header can be left out
+ return
+ for name, value in headers:
+ if name.lower() == 'content-type':
+ return
+ assert 0, "No Content-Type header found in headers (%s)" % headers
+
+def check_exc_info(exc_info):
+ assert not exc_info or type(exc_info) is type(()), "exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info))
+ # More exc_info checks?
+
+def check_iterator(iterator):
+ # Technically a string is legal, which is why it's a really bad
+ # idea, because it may cause the response to be returned
+ # character-by-character
+ assert not isinstance(iterator, str), \
+ "You should not return a string as your application iterator, instead return a single-item list containing that string."
diff --git a/paste/login.py b/paste/login.py
new file mode 100644
index 0000000..7e6fcac
--- /dev/null
+++ b/paste/login.py
@@ -0,0 +1,251 @@
+"""
+Login/authentication middleware
+
+NOT YET FINISHED
+"""
+
+import wsgilib
+import sha
+
+def middleware(
+ application,
+ http_login=False,
+ http_realm='Secure Website',
+ http_overwrite_realm=True,
+ http_and_cookie=True,
+ cookie_prefix='',
+ login_page='_login/login_form',
+ logout_page='_login/logout_form',
+ secret=None,
+ authenticator=None,
+ ):
+ """
+ Configuration:
+
+ http_login:
+ If true, then we'll prefer HTTP Basic logins, passing a 401 to
+ the user. If false, we'll use form logins with Cookie
+ authentication.
+ http_realm:
+ The realm to use. If http_overwrite_realm is true then we will
+ force this to be the realm (even if the application supplies
+ its own realm).
+ http_and_cookie:
+ If true, we'll give the user a login cookie even if they use
+ HTTP. Then we don't have to throw a 401 on every page to get
+ them to re-login.
+ cookie_prefix:
+ Used before all cookie names; like a domain.
+ login_page:
+ If using cookie login and we get a 401, we'll turn it into a
+ 200 and do an internal redirect to this page (using recursive).
+ logout_page:
+ Ditto the logout (logout will at some point be triggered with
+ another key we add to the environment).
+ secret:
+ We use this for signing cookies. We'll generate it automatically
+ if it's not provided explicitly (set it explicitly to be sure
+ it is stable).
+ authenticator:
+ When we do HTTP logins we need to tell if they are using the
+ correct login immediately. See the Authenticator object for
+ the framework of an implementation.
+
+ When you require a login, return a 401 error. When a login has
+ occurred, the logged-in username will be in REMOTE_USER. When the
+ user is logged in, but denied access, use a 403 error (not a 401).
+ It might be useful to have another middleware that wraps an application
+ and returns a 401 error, based on parsing the URL.
+
+ Currently, the login form, if used, is rendered at the URL requested
+ by the user, instead of issuing an HTTP redirect. This will require
+ some attention to caching issues, but allows forms to be POSTed without
+ losing data after the login (as long as the login page contains the
+ appropriate hidden fields.)
+
+ Also, the cookie is not deleted on an unsuccessful login attempt.
+
+ The cookie is issued with path '/' and no expiration date. This
+ should probably be overridable.
+
+ Environment variables used:
+ paste.login.signer:
+ signer, created from UsernameSigner class
+ paste.login._dologin:
+ user name to be logged in, either from HTTP auth
+ or from form submission (XXX form not implement)
+ paste.login._doredirect:
+ login page to which to redirect
+ paste.login._loginredirect:
+ set to True iff _doredirect set and login_page is
+ relative, else undefined. Used where?
+ """
+
+ if http_login:
+ assert authenticator, (
+ "You must provide an authenticator argument if you "
+ "are using http_login")
+ if secret is None:
+ secret = create_secret()
+ cookie_name = cookie_prefix + '_login_auth'
+
+ signer = UsernameSigner(secret)
+
+ def login_application(environ, start_response):
+ orig_script_name = environ['SCRIPT_NAME']
+ orig_path_info = environ['PATH_INFO']
+ cookies = wsgilib.get_cookies(environ)
+ cookie = cookies.get(cookie_name)
+ username = None
+ environ['paste.login.signer'] = signer
+ if cookie and cookie.value:
+ username = signer.check_signature(
+ cookie.value, environ['wsgi.errors'])
+ authenticatee = (
+ environ.get('HTTP_AUTHORIZATION') or
+ environ.get('HTTP_CGI_AUTHORIZATION'))
+ if (not username
+ and authenticator
+ and authenticatee):
+ username = authenticator().check_basic_auth(authenticatee)
+ if http_and_cookie:
+ environ['paste.login._dologin'] = username
+ if username:
+ environ['REMOTE_USER'] = username
+
+ def login_start_response(status, headers):
+ if environ.get('paste.login._dologin'):
+ cookie = SimpleCookie(cookie_name,
+ signer.make_signature(username),
+ '/')
+ headers.append(('Set-Cookie', str(cookie)))
+ del environ['paste.login._dologin']
+ status_int = int(status.split(None, 1)[0].strip())
+ if status_int == 401 and http_login:
+ if (http_overwrite_realm
+ or not wsgilib.has_header(headers, 'www-authenticate')):
+ headers.append(('WWW-Authenticate', 'Basic realm="%s"' % http_realm))
+ elif status_int == 401:
+ status = '200 OK'
+ if login_page.startswith('/'):
+ assert environ.has_key('paste.recursive.include'), (
+ "You must use the recursive middleware to "
+ "use a non-relative page for the login_page")
+ environ['paste.login._doredirect'] = login_page
+ return garbage_writer
+ return start_response(status, headers)
+
+ app_iter = application(environ, login_start_response)
+
+ if environ.get('paste.login._doredirect'):
+ page_name = environ['paste.login._doredirect']
+ del environ['paste.login._doredirect']
+ eat_app_iter(app_iter)
+ if login_page.startswith('/'):
+ app_iter = environ['paste.recursive.forward'](
+ login_page[1:])
+ else:
+ # Don't use recursive, since login page is
+ # internal to
+ new_environ = environ.copy()
+ new_environ['SCRIPT_NAME'] = orig_script_name
+ new_environ['PATH_INFO'] = '/' + login_page
+ new_environ['paste.login._loginredirect'] = True
+ app_iter = login_application(new_environ, start_response)
+ return app_iter
+
+ return login_application
+
+
+def encodestrip (s):
+ return s.encode('base64').strip ('\n')
+
+class UsernameSigner(object):
+
+ def __init__(self, secret):
+ self.secret = secret
+ def digest (self, username):
+ return sha.new(self.secret+username).digest()
+
+ def __call__(self, username):
+ return encodestrip (self.digest(username))
+
+ def check_signature(self, b64value, errors):
+ value = b64value.decode ('base64')
+ if ' ' not in value:
+ errors.write('Badly formatted cookie: %r\n' % value)
+ return None
+ signature, username = value.split(' ', 1)
+ sig_hash = self.digest(username)
+ if sig_hash == signature:
+ return username
+ errors.write('Bad signature: %r\n' % value)
+ return None
+
+ def make_signature (self, username):
+ return encodestrip (self.digest(username) + " " + username)
+
+ def login_user(self, username, environ):
+ """
+ Adds a username so that the login middleware will later set
+ the user to be logged in (with a cookie).
+ """
+ environ['paste.login._dologin'] = username
+
+class SimpleCookie(object):
+ def __init__ (self, cookie_name, signed_val, path):
+ self.cookie_name = cookie_name
+ self.signed_val = signed_val
+ self.path = '/'
+ def __str__ (self):
+ return "%s=%s; Path=%s" % (self.cookie_name,
+ self.signed_val, self.path)
+
+class Authenticator(object):
+
+ """
+ This is the basic framework for an authenticating object.
+ """
+
+ def check_basic_auth(self, auth):
+ """Returns either the authenticated username or, if unauthorized,
+ None."""
+ assert auth.lower().startswith('basic ')
+ type, auth = auth.split()
+ auth = auth.strip().decode('base64')
+ username, password = auth.split(':')
+ if self.check_auth (username, password):
+ return username
+ return None
+
+ def check_auth(self, username, password):
+ raise NotImplementedError
+
+
+########################################
+## Utility functions
+########################################
+
+def create_secret():
+ # @@: obviously not a good secret generator: should be randomized
+ # somehow, and maybe store the secret somewhere for later use.
+ return 'secret'
+
+def garbage_writer(s):
+ """
+ When we don't care about the written output.
+ """
+ pass
+
+def eat_app_iter(app_iter):
+ """
+ When we don't care about the iterated output.
+ """
+ try:
+ for s in app_iter:
+ pass
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+
+
diff --git a/paste/pycgiwrapper.py b/paste/pycgiwrapper.py
new file mode 100644
index 0000000..eca2102
--- /dev/null
+++ b/paste/pycgiwrapper.py
@@ -0,0 +1,182 @@
+"""
+WSGI middleware
+
+Wraps a Python CGI script. Can handle multi-threading for basic CGI
+scripts. May effect other parts of the system that use the cgi module
+(though it attempts not to). Doesn't handle other kinds of CGI
+scripts, which would actually require spawning a separate process.
+"""
+# @@: still untested
+
+import cgi
+import sys
+try:
+ import threading
+ import thread
+except ImportError:
+ threading = None
+threadedprint = None
+from cStringIO import StringIO
+import os
+import rfc822
+import imp
+from UserDict import DictMixin
+
+_cgi_hook_installed = False
+_stdout_hook_installed = False
+_environs = {}
+_real_environ = os.environ
+
+class CGIWrapper(object):
+
+ if threading:
+ threading_lock = threading.Lock()
+
+ def __init__(self, cgi_filename):
+ self.cgi_filename = cgi_filename
+
+ def __call__(self, environ, start_response):
+ if environ['wsgi.multithread']:
+ output = self.threaded_std(environ['wsgi.input'])
+ else:
+ output = self.non_threaded_std(environ['wsgi.input'])
+ self.install_cgi_hook()
+ name = contextName()
+ try:
+ _environs[name] = environ
+ self.run_script()
+ finally:
+ if _environs.has_key(name):
+ del _environs[name]
+ if environ['wsgi.multithread']:
+ self.remove_threaded_std()
+ else:
+ self.remove_non_threaded_std()
+ parseable = StringIO(output.getvalue())
+ message = rfc822.Message(parseable)
+ body = parseable.read()
+ #sys.__stdout__.write('Content-type: text/html\n')
+ #sys.__stdout__.flush()
+
+ #sys.__stdout__.write(str(message) + body)
+ #sys.__stdout__.flush()
+ status = message.getheader('status', None)
+ if status is None:
+ status = '200 OK'
+ else:
+ del message['status']
+ headers = message.items()
+ writer = start_response(status, headers)
+ return [body]
+
+ suffix_info = [t for t in imp.get_suffixes() if t[0] == '.py'][0]
+
+ def run_script(self):
+ f = open(self.cgi_filename, self.suffix_info[1])
+ try:
+ mod = imp.load_module('__main__', f, self.cgi_filename,
+ self.suffix_info)
+ except SystemExit:
+ pass
+ f.close()
+
+ def threaded_std(self, input):
+ self.install_threading()
+ output = StringIO()
+ threadedprint.register(output)
+ threadedprint.registerInput(input)
+
+ def remove_threaded_std(self):
+ threadedprint.deregister()
+
+ def non_threaded_std(self, input):
+ output = StringIO()
+ sys.stdout = output
+ sys.stdin = input
+ return output
+
+ def remove_non_threaded_std(self):
+ sys.stdout = sys.__stdout__
+ sys.stdin = sys.__stdin__
+
+ def install_threading(self):
+ global threadedprint, _stdout_hook_installed
+ """
+ Installs an alternate version of sys.stdout
+ """
+ if _stdout_hook_installed:
+ return
+ self.threading_lock.acquire()
+ try:
+ if _stdout_hook_installed:
+ return
+ from util import threadedprint
+ threadedprint.install(
+ default=sys.stdout)
+ _stdout_hook_installed = True
+ finally:
+ self.threading_lock.release()
+
+ def install_cgi_hook(self):
+ global _cgi_hook_installed
+ if _cgi_hook_installed:
+ return
+ if threading:
+ self.threading_lock.acquire()
+ try:
+ if _cgi_hook_installed:
+ return
+ cgi.FieldStorage = FieldStorageWrapper
+ os.environ = EnvironWrapper()
+ _cgi_hook_installed = True
+ finally:
+ if threading:
+ self.threading_lock.release()
+
+def contextName():
+ if not threading:
+ return None
+ else:
+ return thread.get_ident()
+
+_real_FieldStorage = cgi.FieldStorage
+
+class FieldStorageWrapper(_real_FieldStorage):
+
+ def __init__(self, fp=None, headers=None, outerboundary="",
+ environ=os.environ, keep_blank_values=0, strict_parsing=0):
+ if fp is None:
+ # @@: Should I look for sys.stdin too?
+ # Or should I be replacing sys.stdin entirely?
+ fp = _environs[contextName()]['wsgi.input']
+ if environ is os.environ:
+ environ = _environs[contextName()]
+ _real_FieldStorage.__init__(
+ self,
+ fp=fp, headers=headers,
+ outerboundary=outerboundary, environ=environ,
+ keep_blank_values=keep_blank_values,
+ strict_parsing=strict_parsing)
+
+class EnvironWrapper(DictMixin):
+
+ def __getitem__(self, key):
+ try:
+ d = _environs[contextName()]
+ except KeyError:
+ return _real_environ[key]
+ else:
+ return d[key]
+
+ def keys(self):
+ try:
+ return _environs[contextName()].keys()
+ except KeyError:
+ return _real_environ.keys()
+
+ def copy(self):
+ try:
+ return _environs[contextName()].copy()
+ except KeyError:
+ return _real_environ.copy()
+
diff --git a/paste/pyconfig.py b/paste/pyconfig.py
new file mode 100644
index 0000000..c3715a7
--- /dev/null
+++ b/paste/pyconfig.py
@@ -0,0 +1,168 @@
+"""
+Python-syntax configuration loader and abstractor
+
+Usage::
+
+ conf = Config()
+ conf.load('file1.py')
+ conf.load('file2.py')
+
+Loads files as Python files, gets all global variables as configuration
+keys. You can load multiple files, which will overwrite previous
+values (but will not delete previous values). You can use attribute
+or dictionary access to get values.
+"""
+
+import types
+import os
+from wsgikit.util import thirdparty
+UserDict = thirdparty.load_new_module('UserDict', (2, 3))
+
+def load(filename):
+ conf = Config()
+ conf.load(filename)
+ return conf
+
+class NoContext:
+ pass
+
+class BadCommandLine(Exception):
+ pass
+
+class Config(UserDict.DictMixin):
+
+ def __init__(self):
+ self.namespaces = []
+
+ def __getitem__(self, attr):
+ for space in self.namespaces:
+ if space.has_key(attr):
+ return space[attr]
+ raise KeyError(
+ "Configuration key %r not found" % attr)
+
+ def __setitem__(self, attr, value):
+ self.namespaces[0][attr] = value
+
+ def keys(self):
+ keys = {}
+ for ns in self.namespaces:
+ for key in ns.keys():
+ keys[key] = None
+ return keys.keys()
+
+ def copy(self):
+ namespaces = [d.copy() for d in self.namespaces]
+ new = self.__class__()
+ new.namespaces = namespaces
+ return new
+
+ def load(self, filename, default=False):
+ f = open(filename, 'rb')
+ content = f.read()
+ f.close()
+ namespace = {}
+ for key in self:
+ namespace[key] = self[key]
+ orig = namespace.copy()
+ namespace['__file__'] = os.path.abspath(filename)
+ exec content in namespace
+ for name in namespace.keys():
+ if (hasattr(__builtins__, name)
+ or name.startswith('_')):
+ del namespace[name]
+ continue
+ if orig.has_key(name) and namespace[name] is orig[name]:
+ del namespace[name]
+ continue
+ if isinstance(namespace[name], types.ModuleType):
+ del namespace[name]
+ continue
+ self.load_dict(namespace, default)
+
+ def load_dict(self, d, default=False):
+ if default:
+ self.namespaces.insert(default, d)
+ else:
+ self.namespaces.insert(0, d)
+
+ def load_commandline(self, items, bool_options, aliases={}, default=False):
+ """
+ Loads options from the command line. bool_options take no arguments,
+ everything else is supposed to take arguments. aliases is a mapping
+ of arguments to other arguments. All -'s are turned to _, like
+ --config-file=... becomes config_file. Any extra arguments are
+ returned as a list.
+ """
+ options = {}
+ args = []
+ while items:
+ if items[0] == '--':
+ args.extend(items[1:])
+ break
+ elif items[0].startswith('--'):
+ name = items[0][2:]
+ value = None
+ if '=' in name:
+ name, value = name.split('=', 1)
+ name = aliases.get(name, name)
+ if (name in bool_options
+ or name.replace('-', '_') in bool_options):
+ if value is not None:
+ raise BadCommandLine(
+ "%s does not take any arguments"
+ % items[0])
+ options[name] = True
+ items.pop(0)
+ continue
+ if value is None:
+ if len(items) <= 1:
+ raise BadCommandLine(
+ "%s takes an argument, but no argument given"
+ % items[0])
+ value = items[1]
+ items.pop(0)
+ items.pop(0)
+ value = self.convert_commandline(value)
+ options[name] = value
+ elif items[0].startswith('-'):
+ orig = items[0]
+ name = items[0][1:]
+ items.pop(0)
+ if '=' in name:
+ raise BadCommandLine(
+ "Single-character options may not have arguments (%r)"
+ % orig)
+ for i in range(len(name)):
+ op_name = aliases.get(name[i], name[i])
+ if op_name in bool_options:
+ options[op_name] = True
+ else:
+ if i != len(name)-1:
+ raise BadCommandLine(
+ "-%s takes an argument, it cannot be followed "
+ "by other options (in %s)"
+ % (name[i], orig))
+ if not items:
+ raise BadCommandLine(
+ "-%s takes an argument, but no argument given"
+ % name[i])
+ value = self.convert_commandline(items[0])
+ items.pop(0)
+ options[op_name] = value
+ break
+ else:
+ args.append(items[0])
+ items.pop(0)
+ for key in options.keys():
+ options[key.replace('-', '_')] = options[key]
+ self.load_dict(options, default)
+ return args
+
+ def convert_commandline(self, value):
+ try:
+ return int(value)
+ except ValueError:
+ pass
+ return value
+
diff --git a/paste/recursive.py b/paste/recursive.py
new file mode 100644
index 0000000..bfe8206
--- /dev/null
+++ b/paste/recursive.py
@@ -0,0 +1,110 @@
+"""
+A WSGI middleware that allows for recursive and forwarded calls.
+All these calls go to the same 'application', but presumably that
+application acts differently with different URLs. The forwarded
+URLs must be relative to this container.
+
+The forwarder is available through
+``environ['paste.recursive.forward'](path, extra_environ=None)``,
+the second argument is a dictionary of values to be added to the
+request, overwriting any keys. The forward will call start_response;
+thus you must *not* call it after you have sent any output to the
+server. Also, it will return an iterator that must be returned up the
+stack. You may need to use exceptions to guarantee that this iterator
+will be passed back through the application.
+
+The includer is available through
+``environ['paste.recursive.include'](path, extra_environ=None)``.
+It is like forwarder, except it completes the request and returns a
+response object. The response object has three public attributes:
+status, headers, and body. The status is a string, headers is a list
+of (header_name, header_value) tuples, and the body is a string.
+"""
+
+from cStringIO import StringIO
+
+class RecursiveMiddleware(object):
+
+ def __init__(self, application):
+ self.application = application
+
+ def __call__(self, environ, start_response):
+ environ['paste.recursive.forward'] = Forwarder(
+ self.application, environ, start_response)
+ environ['paste.recursive.include'] = Includer(
+ self.application, environ, start_response)
+ return self.application(environ, start_response)
+
+class Recursive(object):
+
+ def __init__(self, application, environ, start_response):
+ self.application = application
+ self.original_environ = environ.copy()
+ self.previous_environ = environ
+ self.start_response = start_response
+
+ def __call__(self, path, new_environ=None):
+ environ = self.original_environ.copy()
+ if new_environ:
+ environ.update(new_environ)
+ environ['paste.recursive.previous_environ'] = self.previous_environ
+ base_path = self.original_environ.get('SCRIPT_NAME')
+ if path.startswith('/'):
+ assert path.startswith(base_path), "You can only forward requests to resources under the path %r (not %r)" % (base_path, path)
+ path = path[len(base_path)+1:]
+ assert not path.startswith('/')
+ path_info = '/' + path
+ environ['PATH_INFO'] = path_info
+ return self.activate(environ)
+
+class Forwarder(Recursive):
+
+ def activate(self, environ):
+ environ['wsgi.errors'].write('Forwarding to %r\n' % (environ['SCRIPT_NAME'] + environ['PATH_INFO']))
+ return self.application(environ, self.start_response)
+
+class Includer(Recursive):
+
+ def activate(self, environ):
+ environ['wsgi.errors'].write('Including %r\n' % (environ['SCRIPT_NAME'] + environ['PATH_INFO']))
+ response = IncludedResponse
+ def start_response(status, headers):
+ response.status = status
+ response.headers = headers
+ return response.write
+ app_iter = self.application(environ, start_response)
+ try:
+ for s in app_iter:
+ response.write(s)
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ response.close()
+ return response
+
+class IncludedResponse(object):
+
+ def __init__(self):
+ self.headers = None
+ self.status = None
+ self.output = StringIO()
+ self.str = None
+
+ def close(self):
+ self.str = self.output.getvalue()
+ self.output.close()
+ self.output = None
+
+ def write(self):
+ assert self.output is not None, "This response has already been closed and no further data can be written."
+ self.output.write()
+
+ def __str__(self):
+ return self.body
+
+ def body__get(self):
+ if self.str is None:
+ return self.output.getvalue()
+ else:
+ return self.str
+ body = property(body__get)
diff --git a/paste/reloader.py b/paste/reloader.py
new file mode 100644
index 0000000..4753f90
--- /dev/null
+++ b/paste/reloader.py
@@ -0,0 +1,111 @@
+"""
+Use this like::
+
+ import reloader
+ reloader.install()
+
+Then make sure your server is installed with a shell script like::
+
+ err=3
+ while test "$err" -eq 3 ; do
+ python server.py
+ err="$?"
+ done
+
+or restart in Python (server.py does this). Use the watch_file(filename)
+function to cause a reload/restart for other other non-Python files (e.g.,
+configuration files).
+"""
+
+import os
+import sys
+import time
+import threading
+import atexit
+from paste.util.classinstance import classinstancemethod
+
+def install(poll_interval=1, raise_keyboard_interrupt=True):
+ mon = Monitor(poll_interval=poll_interval,
+ raise_keyboard_interrupt=raise_keyboard_interrupt)
+ t = threading.Thread(target=mon.periodic_reload)
+ t.start()
+
+class Monitor:
+
+ instances = []
+
+ def __init__(self, poll_interval, raise_keyboard_interrupt):
+ self.module_mtimes = {}
+ atexit.register(self.atexit)
+ self.keep_running = True
+ self.poll_interval = poll_interval
+ self.raise_keyboard_interrupt = raise_keyboard_interrupt
+ self.extra_files = []
+ self.instances.append(self)
+
+ def atexit(self):
+ self.keep_running = False
+ if self.raise_keyboard_interrupt:
+ # This exception is somehow magic, because it applies
+ # to more threads and situations (like socket.accept)
+ # that a mere SystemExit will not.
+ raise KeyboardInterrupt("Exiting process")
+
+ def periodic_reload(self):
+ while 1:
+ if not self.keep_running:
+ break
+ if not self.check_reload():
+ os._exit(3)
+ break
+ time.sleep(self.poll_interval)
+
+ def check_reload(self):
+ filenames = self.extra_files[:]
+ for name, module in sys.modules.items():
+ try:
+ filenames.append(module.__file__)
+ except AttributeError:
+ continue
+ for filename in filenames:
+ mtime = os.stat(filename).st_mtime
+ if filename.endswith('.pyc') and os.path.exists(filename[:-1]):
+ mtime = max(os.stat(filename[:-1]).st_mtime, mtime)
+ if not self.module_mtimes.has_key(filename):
+ self.module_mtimes[filename] = mtime
+ elif self.module_mtimes[filename] < mtime:
+ print >> sys.stderr, (
+ "%s changed; reloading..." % filename)
+ return False
+ return True
+
+ def watch_file(self, cls, filename):
+ filename = os.path.abspath(filename)
+ if self is None:
+ for instance in cls.instances:
+ instance.watch_file(filename)
+ else:
+ self.extra_files.append(filename)
+
+ watch_file = classinstancemethod(watch_file)
+
+watch_file = Monitor.watch_file
+
+def awake_select(addrs):
+ """
+ When using socket.select(), processes will not quit. By connecting
+ to the addresses locally we move the code out of the select, where
+ it can terminate properly.
+
+ (For now, KeyboardInterrupt seems to terminate these selects
+ properly)
+ """
+
+ for addr in addrs:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect(addr)
+ sock.close()
+ except:
+ pass
+
diff --git a/paste/scgiserver.py b/paste/scgiserver.py
new file mode 100644
index 0000000..15fbef2
--- /dev/null
+++ b/paste/scgiserver.py
@@ -0,0 +1,148 @@
+#! /usr/bin/env python
+"""
+SCGI-->WSGI application proxy, "SWAP".
+
+(Originally written by Titus Brown.)
+
+This lets an SCGI front-end like mod_scgi be used to execute WSGI
+application objects. To use it, subclass the SWAP class like so:
+
+
+ class TestAppHandler(swap.SWAP):
+ def __init__(self, *args, **kwargs):
+ self.prefix = '/canal'
+ self.app_obj = TestAppClass
+ swap.SWAP.__init__(self, *args, **kwargs)
+
+where 'TestAppClass' is the application object from WSGI and '/canal'
+is the prefix for what is served by the SCGI Web-server-side process.
+
+Then execute the SCGI handler "as usual" by doing something like this
+
+ scgi_server.SCGIServer(TestAppHandler, port=4000).serve()
+
+and point mod_scgi (or whatever your SCGI front end is) at port 4000.
+
+Kudos to the WSGI folk for writing a nice PEP & the Quixote folk for
+writing a nice extensible SCGI server for Python!
+"""
+
+import sys
+import time
+import os
+from scgi import scgi_server
+
+def debug(msg):
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S",
+ time.localtime(time.time()))
+ sys.stderr.write("[%s] %s\n" % (timestamp, msg))
+
+class SWAP(scgi_server.SCGIHandler):
+ """
+ SCGI->WSGI application proxy: let an SCGI server execute WSGI
+ application objects.
+ """
+ app_obj = None
+ prefix = None
+
+ def __init__(self, *args, **kwargs):
+ assert self.app_obj, "must set app_obj"
+ assert self.prefix, "must set prefix"
+ args = (self,) + args
+ scgi_server.SCGIHandler.__init__(*args, **kwargs)
+
+ def handle_connection(self, conn):
+ """
+ Handle an individual connection.
+ """
+ input = conn.makefile("r")
+ output = conn.makefile("w")
+
+ environ = self.read_env(input)
+ environ['wsgi.input'] = input
+ environ['wsgi.errors'] = sys.stderr
+ environ['wsgi.version'] = (1,0)
+ environ['wsgi.multithread'] = False
+ environ['wsgi.multiprocess'] = True
+ environ['wsgi.run_once'] = False
+
+ # dunno how SCGI does HTTPS signalling; can't test it myself... @CTB
+ if environ.get('HTTPS','off') in ('on','1'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ ## SCGI does some weird environ manglement. We need to set
+ ## SCRIPT_NAME from 'prefix' and then set PATH_INFO from
+ ## REQUEST_URI.
+
+ prefix = self.prefix
+ path = environ['REQUEST_URI'][len(prefix):]
+
+ environ['SCRIPT_NAME'] = prefix
+ environ['PATH_INFO'] = path
+
+ headers_set = []
+ headers_sent = []
+ chunks = []
+ def write(data):
+ chunks.append(data)
+
+ def start_response(status,response_headers,exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception if headers sent
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None # avoid dangling circular ref
+ elif headers_set:
+ raise AssertionError("Headers already set!")
+
+ headers_set[:] = [status,response_headers]
+ return write
+
+ ###
+
+ result = self.app_obj(environ, start_response)
+ try:
+ for data in result:
+ chunks.append(data)
+
+ # Before the first output, send the stored headers
+ if not headers_set:
+ # Error -- the app never called start_response
+ status = '500 Server Error'
+ response_headers = [('Content-type', 'text/html')]
+ chunks = ["XXX start_response never called"]
+ else:
+ status, response_headers = headers_sent[:] = headers_set
+
+ output.write('Status: %s\r\n' % status)
+ for header in response_headers:
+ output.write('%s: %s\r\n' % header)
+ output.write('\r\n')
+
+ for data in chunks:
+ output.write(data)
+ finally:
+ if hasattr(result,'close'):
+ result.close()
+
+ # SCGI backends use connection closing to signal 'fini'.
+ try:
+ input.close()
+ output.close()
+ conn.close()
+ except IOError, err:
+ debug("IOError while closing connection ignored: %s" % err)
+
+
+def serve_application (application, prefix, port):
+ class SCGIAppHandler(SWAP):
+ def __init__ (self, *args, **kwargs):
+ self.prefix = prefix
+ self.app_obj = application
+ SWAP.__init__(self, *args, **kwargs)
+
+ scgi_server.SCGIServer(SCGIAppHandler, port=port).serve()
diff --git a/paste/server.py b/paste/server.py
new file mode 100755
index 0000000..878a301
--- /dev/null
+++ b/paste/server.py
@@ -0,0 +1,269 @@
+#!/usr/bin/env python
+"""
+A generic Paste server, useable for multiple backends
+"""
+
+help_message = """\
+usage: %(program)s [OPTIONS] servername
+Runs a server with the given options. The following servers are available:
+
+OPTIONS
+-f FILENAME
+--config-file=FILENAME
+ The configuration file (default: no configuration).
+-h Help
+--server=NAME
+ Name is one of:
+ twisted:
+ Runs an HTTP server. Use --port for the port (default: 8080),
+ and --host for the interface (default: all interfaces).
+ wsgiutils:
+ Runs an HTTP server. Use --port and --host.
+ cgi:
+ Creates a CGI script -- outputs the script to stdout.
+--webkit-dir=PATH
+ Serves Webware servlets (or other applications) out of PATH
+--debug -D
+ Turn on debugging (shows errors in the browser)
+"""
+
+import sys
+import os
+from paste import reloader
+from paste import wsgilib
+
+# This way you can run this out of a checkout, and we'll fix up
+# the path...
+try:
+ here = os.path.normpath(os.path.abspath(__file__))
+except NameError:
+ here = os.path.normpath(os.path.abspath(sys.argv[0]))
+try:
+ import paste
+except ImportError:
+ sys.path.append(os.path.dirname(os.path.dirname(here)))
+ import paste
+paste_path = os.path.normpath(
+ os.path.dirname(os.path.abspath(paste.__file__)))
+
+if os.path.dirname(here) != paste_path:
+ sys.stderr.write(
+ 'Warning: server.py is running out of %s, but paste is loaded '
+ 'out of %s\n' % (here, paste_path))
+
+from paste.pyconfig import Config
+from paste.configmiddleware import config_middleware
+from paste.webkit import wsgiwebkit
+from paste.util import thirdparty
+
+servers = {}
+
+default_ops = {
+ 'port': 8080,
+ 'host': 'localhost',
+ 'verbose': False,
+ 'quiet': False,
+ 'reload': False,
+ }
+
+reloader_environ_key = 'WSGI_RELOADER_SHOULD_RUN'
+
+default_config_fn = os.path.join(os.path.dirname(__file__),
+ 'default_config.conf')
+
+def load_commandline(args, allow_reload=True):
+ conf = Config()
+ # We use conf.verbose early, so we set it now:
+ conf.load_dict(default_ops, default=True)
+ args = conf.load_commandline(
+ args, bool_options=['help', 'verbose', 'reload', 'debug', 'quiet',
+ 'no_verbose'],
+ aliases={'h': 'help', 'v': 'verbose', 'f': 'config_file',
+ 'D': 'debug', 'q': 'quiet'})
+ if conf.get('help'):
+ print help()
+ return None, 0
+ if conf.get('no_verbose'):
+ conf['verbose'] = False
+ load_conf(conf, default_config_fn, True)
+ reloader.watch_file(default_config_fn)
+ if not conf.get('no_server_conf') and os.path.exists('server.conf'):
+ load_conf(conf, 'server.conf', True)
+ reloader.watch_file('server.conf')
+ if conf.get('config_file'):
+ load_conf(conf, conf['config_file'], True)
+ reloader.watch_file(conf['config_file'])
+ if conf['quiet']:
+ conf['verbose'] = False
+ server = conf.get('server')
+ if not server:
+ server_ops = servers.keys()
+ server_ops.sort()
+ print "Missing --server=name, one of: %s" % ', '.join(server_ops)
+ return None, 0
+ if conf['reload'] and allow_reload:
+ if os.environ.get(reloader_environ_key):
+ if conf['verbose']:
+ print "Running reloading file monitor"
+ reloader.install(conf.get('reload_interval', 1), False)
+ else:
+ try:
+ return restart_with_reloader(conf)
+ except KeyboardInterrupt:
+ return None, 0
+ if conf.get('sys_path'):
+ update_sys_path(conf['sys_path'], conf['verbose'])
+ app = make_app(conf)
+ return conf, app
+
+def run_commandline(args):
+ conf, app = load_commandline(args)
+ if conf is None:
+ return app
+ return run_server(conf, app)
+
+def run_server(conf, app):
+ server = servers[conf['server']]
+ if conf['verbose']:
+ print "Starting server."
+ try:
+ server(conf, app)
+ except KeyboardInterrupt:
+ # This is an okay error
+ pass
+ return 0
+
+
+def load_conf(conf, filename, default=False):
+ if isinstance(filename, (list, tuple)):
+ for fn in filename:
+ load_conf(conf, fn, default=default)
+ return
+ if os.path.exists(filename):
+ if conf['verbose']:
+ print 'Loading configuration from %s' % filename
+ conf.load(filename, default=default)
+
+def update_sys_path(paths, verbose):
+ if isinstance(paths, (str, unicode)):
+ paths = [paths]
+ for path in paths:
+ path = os.path.abspath(path)
+ if path not in sys.path:
+ if verbose:
+ print 'Adding %s to path' % path
+ sys.path.append(path)
+
+def help():
+ program = sys.argv[0]
+ return help_message % {'program': program}
+
+def twisted_serve(conf, app):
+ from paste.twisted_wsgi import serve_application
+ serve_application(
+ app, port=int(conf.get('port', 8080)))
+
+servers['twisted'] = twisted_serve
+
+def scgi_serve(conf, app):
+ thirdparty.add_package('scgi')
+ from paste.scgiserver import serve_application
+ prefix = conf.get('scgi_prefix', '/')
+ serve_application(app, prefix, port=int(conf.get('port', 4000)))
+
+servers['scgi'] = scgi_serve
+
+def wsgiutils_serve(conf, app):
+ thirdparty.add_package('wsgiutils')
+ from wsgiutils import wsgiServer
+ server = wsgiServer.WSGIServer(
+ (conf.get('host', 'localhost'),
+ int(conf.get('port', 8080))), {'': app})
+ server.serve_forever()
+
+servers['wsgiutils'] = wsgiutils_serve
+
+def cgi_serve(conf, app):
+ replacements = {}
+ replacements['default_config_fn'] = os.path.abspath(default_config_fn)
+
+ # Ideally, other_conf should be any options that came from the
+ # command-line.
+ # @@: This assumes too much about the ordering of namespaces.
+ other_conf = dict(conf.namespaces[-2])
+ # Not a good idea to let 'verbose' through, but this doesn't really
+ # stop any sourced configs from setting it either...
+ if other_conf.has_key('verbose'):
+ del other_conf['verbose']
+ replacements['other_conf'] = other_conf
+
+ template_fn = os.path.join(os.path.dirname(__file__),
+ 'server_script_template.py')
+ template = open(template_fn).read()
+ for name, value in replacements.items():
+ template = template.replace('@@' + name + '@@', repr(value))
+
+ print "#!%s" % sys.executable
+ print template
+ print "if __name__ == '__main__':"
+ print " from paste.cgiserver import run_with_cgi"
+ print " run_with_cgi(app)"
+
+servers['cgi'] = cgi_serve
+
+def console_server(conf, app):
+ url = conf.get('url', '/')
+ query_string = ''
+ if '?' in url:
+ url, query_string = url.split('?', 1)
+ quiet = conf.get('quiet', False)
+ status, headers, content, errors = wsgilib.raw_interactive(
+ app, url, QUERY_STRING=query_string)
+ any_header = False
+ if not quiet or int(status.split()[0]) != 200:
+ print 'Status:', status
+ any_header = True
+ for header, value in headers:
+ if quiet and (
+ header.lower() in ('content-type', 'content-length')
+ or (header.lower() == 'set-cookie'
+ and value.startswith('_SID_'))):
+ continue
+ print '%s: %s' % (header, value)
+ any_header = True
+ if any_header:
+ print
+ if conf.get('compact', False):
+ # Remove empty lines
+ content = '\n'.join([l for l in content.splitlines()
+ if l.strip()])
+ print content
+ if errors:
+ sys.stderr.write('-'*25 + ' Errors ' + '-'*25 + '\n')
+ sys.stderr.write(errors + '\n')
+
+servers['console'] = console_server
+
+def make_app(conf):
+ if conf.get('webkit_dir'):
+ app = wsgiwebkit.webkit(conf['webkit_dir'], use_lint=conf.get('lint'))
+ else:
+ print "You must provide --webkit-dir"
+ sys.exit(2)
+ return config_middleware(app, conf)
+
+def restart_with_reloader(conf):
+ if conf['verbose']:
+ print "Restarting process with reloading on"
+ while 1:
+ args = [sys.executable] + sys.argv
+ new_environ = os.environ.copy()
+ new_environ[reloader_environ_key] = 'true'
+ exit_code = os.spawnve(os.P_WAIT, sys.executable,
+ args, new_environ)
+ if exit_code != 3:
+ return None, exit_code
+ print "Exit code 3; restarting server"
+
+if __name__ == '__main__':
+ sys.exit(run_commandline(sys.argv[1:]))
diff --git a/paste/server_script_template.py b/paste/server_script_template.py
new file mode 100644
index 0000000..504e371
--- /dev/null
+++ b/paste/server_script_template.py
@@ -0,0 +1,19 @@
+import os
+from paste import server
+from paste.pyconfig import Config
+from paste.configmiddleware import config_middleware
+from paste.webkit import wsgiwebkit
+
+conf = Config()
+conf.load_dict(server.default_ops, default=True)
+conf.load_dict(@@other_conf@@)
+server.load_conf(conf, @@default_config_fn@@)
+if not conf.get('no_server_conf') and os.path.exists('server.conf'):
+ server.load_conf(conf, 'server.conf')
+if conf.get('config_file'):
+ server.load_conf(conf, conf['config_file'])
+if conf.get('sys_path'):
+ server.update_sys_path(conf['sys_path'], conf['verbose'])
+
+app = wsgiwebkit.webkit(conf['webkit_dir'], use_lint=conf.get('lint'))
+app = config_middleware(app, conf)
diff --git a/paste/session.py b/paste/session.py
new file mode 100644
index 0000000..72ba8d3
--- /dev/null
+++ b/paste/session.py
@@ -0,0 +1,156 @@
+"""
+Creates a session object; then in your application, use::
+
+ environ['paste.session.factory']()
+
+This will return a dictionary. The contents of this dictionary will
+be saved to disk when the request is completed. The session will be
+created when you first fetch the session dictionary, and a cookie will
+be sent in that case. There's current no way to use sessions without
+cookies, and there's no way to delete a session except to clear its
+data.
+
+@@: This doesn't do any locking, and may cause problems when a single
+session is accessed concurrently. Also, it loads and saves the
+session for each request, with no caching. Also, sessions aren't
+expired.
+"""
+
+from Cookie import SimpleCookie
+import time
+import random
+import os
+import md5
+try:
+ import cPickle
+except ImportError:
+ import pickle as cPickle
+import wsgilib
+
+class SessionMiddleware(object):
+
+ def __init__(self, application, **factory_kw):
+ self.application = application
+ self.factory_kw = factory_kw
+
+ def __call__(self, environ, start_response):
+ session_factory = SessionFactory(environ, **self.factory_kw)
+ environ['paste.session.factory'] = session_factory
+
+ def session_start_response(status, headers):
+ if not session_factory.created:
+ return start_response(status, headers)
+ headers.append(session_factory.set_cookie_header())
+ return start_response(status, headers)
+
+ app_iter = self.application(environ, session_start_response)
+ if session_factory.used:
+ return wsgilib.add_close(app_iter, session_factory.close)
+ else:
+ return app_iter
+
+class SessionFactory(object):
+
+ def __init__(self, environ, cookie_name='_SID_',
+ session_class=None, **session_class_kw):
+ self.created = False
+ self.used = False
+ self.environ = environ
+ self.cookie_name = cookie_name
+ self.session = None
+ self.session_class = session_class or FileSession
+ self.session_class_kw = session_class_kw
+
+ def __call__(self):
+ self.used = True
+ if self.session is not None:
+ return self.session.data()
+ cookies = wsgilib.get_cookies(self.environ)
+ session = None
+ if cookies.has_key(self.cookie_name):
+ self.sid = cookies[self.cookie_name].value
+ try:
+ session = self.session_class(self.sid, create=False,
+ **self.session_class_kw)
+ except KeyError:
+ # Invalid SID
+ pass
+ if session is None:
+ self.created = True
+ self.sid = self.make_sid()
+ session = self.session_class(self.sid, create=True,
+ **self.session_class_kw)
+ self.session = session
+ return session.data()
+
+ def make_sid(self):
+ # @@: need better algorithm
+ return (''.join(['%02d' % x for x in time.localtime(time.time())[:6]])
+ + '-' + self.unique_id())
+
+ def unique_id(self, for_object=None):
+ """
+ Generates an opaque, identifier string that is practically
+ guaranteed to be unique. If an object is passed, then its
+ id() is incorporated into the generation. Relies on md5 and
+ returns a 32 character long string.
+ """
+ r = [time.time(), random.random(), os.times()]
+ if for_object is not None:
+ r.append(id(for_object))
+ md5_hash = md5.new(str(r))
+ try:
+ return md5_hash.hexdigest()
+ except AttributeError:
+ # Older versions of Python didn't have hexdigest, so we'll
+ # do it manually
+ hexdigest = []
+ for char in md5_hash.digest():
+ hexdigest.append('%02x' % ord(char))
+ return ''.join(hexdigest)
+
+ def set_cookie_header(self):
+ c = SimpleCookie()
+ c[self.cookie_name] = self.sid
+ c[self.cookie_name]['path'] = '/'
+ name, value = str(c).split(': ', 1)
+ return (name, value)
+
+ def close(self):
+ if self.session is not None:
+ self.session.close()
+
+class FileSession(object):
+
+ def __init__(self, sid, create=False, session_file_path='/tmp'):
+ self.session_file_path = session_file_path
+ self.sid = sid
+ if not create:
+ if not os.path.exists(self.filename()):
+ raise KeyError
+ self._data = None
+
+ def filename(self):
+ return os.path.join(self.session_file_path, self.sid)
+
+ def data(self):
+ if self._data is not None:
+ return self._data
+ if os.path.exists(self.filename()):
+ f = open(self.filename(), 'rb')
+ self._data = cPickle.load(f)
+ f.close()
+ else:
+ self._data = {}
+ return self._data
+
+ def close(self):
+ if self._data is not None:
+ filename = self.filename()
+ if not self._data:
+ if os.path.exists(filename):
+ os.unlink(filename)
+ else:
+ f = open(self.filename(), 'wb')
+ cPickle.dump(self._data, f)
+ f.close()
diff --git a/paste/tests/__init__.py b/paste/tests/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/tests/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/tests/doctest_webapp.py b/paste/tests/doctest_webapp.py
new file mode 100755
index 0000000..b0b786c
--- /dev/null
+++ b/paste/tests/doctest_webapp.py
@@ -0,0 +1,403 @@
+#!/usr/bin/env python2.4
+"""
+These are functions for use when doctest-testing a document.
+"""
+
+import subprocess
+import doctest
+import os
+import sys
+import shutil
+import re
+import cgi
+import rfc822
+from cStringIO import StringIO
+from paste import server
+from paste import wsgilib
+from paste.util.thirdparty import add_package
+add_package('PySourceColor')
+import PySourceColor
+
+
+here = os.path.abspath(__file__)
+paste_parent = os.path.dirname(
+ os.path.dirname(os.path.dirname(here)))
+
+def run(command):
+ """
+ Runs the string command, prints any output.
+ """
+ proc = subprocess.Popen(command, shell=True,
+ stderr=subprocess.STDOUT,
+ stdout=subprocess.PIPE, env=_make_env())
+ data = proc.stdout.read()
+ proc.wait()
+ while data.endswith('\n') or data.endswith('\r'):
+ data = data[:-1]
+ if data:
+ data = '\n'.join(
+ [l for l in data.splitlines() if l])
+ print data
+
+def _make_env():
+ env = os.environ.copy()
+ env['PATH'] = (env.get('PATH', '')
+ + ':'
+ + os.path.join(paste_parent, 'scripts')
+ + ':'
+ + os.path.join(paste_parent, 'paste', '3rd-party',
+ 'sqlobject-files', 'scripts'))
+ env['PYTHONPATH'] = (env.get('PYTHONPATH', '')
+ + ':'
+ + paste_parent)
+ return env
+
+def clear_dir(dir):
+ """
+ Clears (deletes) the given directory
+ """
+ shutil.rmtree(dir, True)
+
+def ls(dir=None, recurse=False, indent=0):
+ """
+ Show a directory listing
+ """
+ dir = dir or os.getcwd()
+ fns = os.listdir(dir)
+ fns.sort()
+ for fn in fns:
+ full = os.path.join(dir, fn)
+ if os.path.isdir(full):
+ fn = fn + '/'
+ print ' '*indent + fn
+ if os.path.isdir(full) and recurse:
+ ls(dir=full, recurse=True, indent=indent+2)
+
+def make_app(dir):
+ os.chdir(dir)
+ sys.path.append(os.path.dirname(dir))
+ conf, app = server.load_commandline((), allow_reload=False)
+ assert conf is not None, (
+ "server.load_commandline requested exit with code %r"
+ % app)
+ return app
+
+default_app = None
+default_url = None
+
+def set_default_app(app, url):
+ global default_app
+ global default_url
+ default_app = app
+ default_url = url
+
+def resource_filename(fn):
+ """
+ Returns the filename of the resource -- generally in the directory
+ resources/DocumentName/fn
+ """
+ return os.path.join(
+ os.path.dirname(sys.testing_document_filename),
+ 'resources',
+ os.path.splitext(os.path.basename(sys.testing_document_filename))[0],
+ fn)
+
+def show(path_info, example_name):
+ fn = resource_filename(example_name + '.html')
+ out = StringIO()
+ assert default_app is not None, (
+ "No default_app set")
+ url = default_url + path_info
+ out.write('<span class="doctest-url"><a href="%s">%s</a></span><br>\n'
+ % (url, url))
+ out.write('<div class="doctest-example">\n')
+ proc = subprocess.Popen(
+ ['wsgi-server', '--server=console', '--no-verbose',
+ '--url=' + path_info],
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ env=_make_env())
+ stdout, errors = proc.communicate()
+ stdout = StringIO(stdout)
+ headers = rfc822.Message(stdout)
+ content = stdout.read()
+ for header, value in headers.items():
+ if header.lower() == 'status' and int(value.split()[0]) == 200:
+ continue
+ if header.lower() in ('content-type', 'content-length'):
+ continue
+ if (header.lower() == 'set-cookie'
+ and value.startswith('_SID_')):
+ continue
+ out.write('<span class="doctest-header">%s: %s</span><br>\n'
+ % (header, value))
+ lines = [l for l in content.splitlines() if l.strip()]
+ for line in lines:
+ out.write(line + '\n')
+ if errors:
+ out.write('<pre class="doctest-errors">%s</pre>'
+ % errors)
+ out.write('</div>\n')
+ result = out.getvalue()
+ if not os.path.exists(fn):
+ f = open(fn, 'wb')
+ f.write(result)
+ f.close()
+ else:
+ f = open(fn, 'rb')
+ expected = f.read()
+ f.close()
+ if not html_matches(expected, result):
+ print 'Pages did not match. Expected from %s:' % fn
+ print '-'*60
+ print expected
+ print '='*60
+ print 'Actual output:'
+ print '-'*60
+ print result
+
+def html_matches(pattern, text):
+ return True
+ regex = re.escape(pattern)
+ regex = regex.replace(r'\.\.\.', '.*')
+ regex = re.sub(r'0x[0-9a-f]+', '.*', regex)
+ regex = '^%s$' % regex
+ return re.search(regex, text)
+
+def create_file(path, version, data):
+ if data.startswith('\n'):
+ data = data[1:]
+ lines = data.splitlines()
+ new_lines = []
+ for line in lines:
+ if line.rstrip() == '.':
+ new_lines.append('')
+ else:
+ new_lines.append(line)
+ data = '\n'.join(new_lines) + '\n'
+ write_data(path, data)
+ show_file(path, version)
+
+def show_file(path, version):
+ ext = os.path.splitext(path)[1]
+ f = open(path, 'rb')
+ data = f.read()
+ f.close()
+ if ext == '.py':
+ html = ('<div class="source-code">%s</div>'
+ % PySourceColor.str2html(data, PySourceColor.dark))
+ else:
+ html = '<pre class="source-code">%s</pre>' % cgi.escape(data, 1)
+ html = '<span class="source-filename">%s</span><br>%s' % (
+ path, html)
+ write_data(resource_filename('%s.%s.gen.html' % (path, version)),
+ html)
+
+def call_source_highlight(input, format):
+ proc = subprocess.Popen(['source-highlight', '--out-format=html',
+ '--no-doc', '--css=none',
+ '--src-lang=%s' % format], shell=False,
+ stdout=subprocess.PIPE)
+ stdout, stderr = proc.communicate(input)
+ result = stdout
+ proc.wait()
+ return result
+
+
+def write_data(path, data):
+ dir = os.path.dirname(os.path.abspath(path))
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ f = open(path, 'wb')
+ f.write(data)
+ f.close()
+
+
+def change_file(path, changes):
+ f = open(path, 'rb')
+ lines = f.readlines()
+ f.close()
+ for change_type, line, text in changes:
+ if change_type == 'insert':
+ lines[line:line] = [text]
+ elif change_type == 'delete':
+ lines[line:text] = []
+ else:
+ assert 0, (
+ "Unknown change_type: %r" % change_type)
+ f = open(path, 'wb')
+ f.write(''.join(lines))
+ f.close()
+
+class LongFormDocTestParser(doctest.DocTestParser):
+
+ """
+ This parser recognizes some reST comments as commands, without
+ prompts or expected output, like:
+
+ .. run:
+
+ do_this(...
+ ...)
+ """
+
+ _EXAMPLE_RE = re.compile(r"""
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?: (?P<source>
+ (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
+ (?:\n [ ]* \.\.\. .*)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*>>>) # Not a line starting with PS1
+ .*$\n? # But any other line
+ )*))
+ |
+ (?: # This is for longer commands that are prefixed with a reST
+ # comment like '.. run:' (two colons makes that a directive).
+ # These commands cannot have any output.
+
+ (?:^\.\.[ ]*(?P<run>run):[ ]*\n) # Leading command/command
+ (?:[ ]*\n)? # Blank line following
+ (?P<runsource>
+ (?:(?P<runindent> [ ]+)[^ ].*$)
+ (?:\n [ ]+ .*)*)
+ )
+ |
+ (?: # This is for shell commands
+
+ (?P<shellsource>
+ (?:^(P<shellindent> [ ]*) [$] .*) # Shell line
+ (?:\n [ ]* [>] .*)*) # Continuation
+ \n?
+ # Want consists of any non-blank lines that do not start with $
+ (?P<shellwant> (?:(?![ ]*$)
+ (?![ ]*[$]$)
+ .*$\n?
+ )*))
+ """, re.MULTILINE | re.VERBOSE)
+
+ def _parse_example(self, m, name, lineno):
+ r"""
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+
+ >>> def parseit(s):
+ ... p = LongFormDocTestParser()
+ ... return p._parse_example(p._EXAMPLE_RE.search(s), '<string>', 1)
+ >>> parseit('>>> 1\n1')
+ ('1', {}, '1', None)
+ >>> parseit('>>> (1\n... +1)\n2')
+ ('(1\n+1)', {}, '2', None)
+ >>> parseit('.. run:\n\n test1\n test2\n')
+ ('test1\ntest2', {}, '', None)
+ """
+ # Get the example's indentation level.
+ runner = m.group('run') or ''
+ indent = len(m.group('%sindent' % runner))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('%ssource' % runner).split('\n')
+ if runner:
+ self._check_prefix(source_lines[1:], ' '*indent, name, lineno)
+ else:
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[2:], ' '*indent + '.', name, lineno)
+ if runner:
+ source = '\n'.join([sl[indent:] for sl in source_lines])
+ else:
+ source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+ if runner:
+ want = ''
+ exc_msg = None
+ else:
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ # @@: Erg, this is the only line I need to change...
+ output.append( doctest.Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent') or m.group('runindent')),
+ options=options) )
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+
+
+if __name__ == '__main__':
+ import sys
+ if sys.argv[1:] and sys.argv[1] == 'doctest':
+ doctest.testmod()
+ sys.exit()
+ if not paste_parent in sys.path:
+ sys.path.append(paste_parent)
+ for fn in sys.argv[1:]:
+ fn = os.path.abspath(fn)
+ # @@: OK, ick; but this module gets loaded twice
+ sys.testing_document_filename = fn
+ doctest.testfile(fn, module_relative=False,
+ parser=LongFormDocTestParser())
+ new = os.path.splitext(fn)[0] + '.html'
+ assert new != fn
+ os.system('rest2html %s > %s' % (fn, new))
diff --git a/paste/tests/echotest.py b/paste/tests/echotest.py
new file mode 100755
index 0000000..885d571
--- /dev/null
+++ b/paste/tests/echotest.py
@@ -0,0 +1,109 @@
+"""
+Tests a WSGI stack, using urllib. Queries the echo application.
+"""
+
+import unittest
+import urlparse
+import urllib
+import os
+import sys
+
+class EchoTest(unittest.TestCase):
+
+ def url(self):
+ if not os.environ.get('ECHO_URL'):
+ print 'You must set $ECHO_URL'
+ sys.exit(1)
+ url = URL(os.environ['ECHO_URL'])
+ return url
+
+class TestEnviron(EchoTest):
+
+ def setUp(self):
+ self.page = self.url().fetch(environ='true')
+ self.environ = parse_environ(self.page)
+
+ def testRequiredKeys(self):
+ environ = self.environ
+ url = self.url()
+ required_keys = 'REQUEST_METHOD SCRIPT_NAME PATH_INFO QUERY_STRING SERVER_NAME SERVER_PORT wsgi.errors wsgi.input wsgi.multiprocess wsgi.multithread wsgi.version'
+ for key in required_keys.split():
+ assert environ.has_key(key), "Key %r missing from %r" % (key, environ)
+ self.assertEqual(environ['PATH_INFO'], '')
+ self.assertEqual(environ['SCRIPT_NAME'], url.path)
+ self.assertEqual(environ['REQUEST_METHOD'], 'GET')
+ self.assertEqual(environ['QUERY_STRING'], 'environ=true')
+ self.assertEqual(environ['SERVER_PORT'], str(url.port))
+ self.assertEqual(environ['SERVER_NAME'], url.host)
+ self.assertEqual(environ['HTTP_HOST'], url.location)
+ assert environ['HTTP_USER_AGENT'].startswith('Python-urllib/'), \
+ "HTTP_USER_AGENT should start with 'Python-urllib/': %r" % environ['HTTP_USER_AGENT']
+
+ def testPathInfo(self):
+ sub = self.url() / ''
+ environ = parse_environ(sub.fetch(environ='true'))
+ self.assertEqual(environ['PATH_INFO'], '/')
+ self.assertEqual(environ['SCRIPT_NAME'], self.url().path)
+ sub = self.url() / 'test'
+ environ = parse_environ(sub.fetch(environ='true'))
+ self.assertEqual(environ['PATH_INFO'], '/test')
+ self.assertEqual(environ['SCRIPT_NAME'], self.url().path)
+
+ def test_message(self):
+ data = self.url().fetch(message='test')
+ self.assertEqual(data, 'test')
+ data = self.url().fetch(message='')
+ self.assertEqual(data, '')
+
+############################################################
+## Utility functions
+############################################################
+
+def parse_environ(page):
+ """
+ Parses the environment that echo prints (not perfect, but good
+ enough).
+ """
+ environ = {}
+ for line in page.splitlines():
+ if '=' not in line:
+ # ignore second line of long lines
+ continue
+ name, value = line.split('=', 1)
+ environ[name] = value
+ return environ
+
+class URL:
+
+ def __init__(self, url_string):
+ self.url_string = url_string
+ (self.scheme, self.location, self.path, self.query,
+ self.fragment) = urlparse.urlsplit(url_string)
+ if ':' in self.location:
+ self.host, self.port = location.split(':', 1)
+ else:
+ self.host = self.location
+ if self.scheme == 'http':
+ self.port = '80'
+ elif self.scheme == 'https':
+ self.port = '443'
+ else:
+ assert 0, "Unknown scheme: %r" % scheme
+ self.port = int(self.port)
+
+ def fetch(self, **kw):
+ query = '&'.join(['%s=%s' % (urllib.quote(k), urllib.quote(v))
+ for k, v in kw.items()])
+ url = self.url_string
+ if query:
+ url += '?' + query
+ f = urllib.urlopen(url)
+ page = f.read()
+ f.close()
+ return page
+
+ def __div__(self, path_part):
+ return self.__class__(self.url_string + '/' + path_part)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/paste/tests/fixture.py b/paste/tests/fixture.py
new file mode 100644
index 0000000..b69c88c
--- /dev/null
+++ b/paste/tests/fixture.py
@@ -0,0 +1,307 @@
+import sys
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+import types
+import re
+from py.test.collect import Module, PyCollector
+from paste.util import thirdparty
+doctest = thirdparty.load_new_module('doctest', (2, 4))
+from paste import wsgilib
+from paste import lint
+
+class NoDefault:
+ pass
+
+class Dummy(object):
+
+ def __init__(self, **kw):
+ for name, value in kw.items():
+ if name.startswith('method_'):
+ name = name[len('method_'):]
+ value = DummyMethod(value)
+ setattr(self, name, value)
+
+class DummyMethod(object):
+
+ def __init__(self, return_value):
+ self.return_value = return_value
+
+ def __call__(self, *args, **kw):
+ return self.return_value
+
+class ParamCollector(PyCollector):
+
+ def collect_function(self, extpy):
+ if not extpy.check(func=1, basestarts='test_'):
+ return
+ func = extpy.resolve()
+ if hasattr(func, 'params'):
+ params = func.params
+ for i, param in enumerate(params):
+ item = self.Item(extpy, *param)
+ item.name = item.name + '.%i' % i
+ yield item
+ else:
+ yield self.Item(extpy)
+
+class DoctestCollector(PyCollector):
+
+ def __init__(self, extpy_or_module):
+ if isinstance(extpy_or_module, types.ModuleType):
+ self.module = extpy_or_module
+ self.extpy = None
+ else:
+ self.extpy = extpy_or_module
+ self.module = self.extpy.getpymodule()
+
+ def __call__(self, extpy):
+ # we throw it away, because this has been set up to explicitly
+ # check another module; maybe this isn't clean
+ if self.extpy is None:
+ self.extpy = extpy
+ return self
+
+ def __iter__(self):
+ finder = doctest.DocTestFinder()
+ tests = finder.find(self.module)
+ for t in tests:
+ yield DoctestItem(self.extpy, t)
+
+class DoctestItem(DoctestCollector.Item):
+
+ def __init__(self, extpy, doctestitem, *args):
+ self.extpy = extpy
+ self.doctestitem = doctestitem
+ self.name = extpy.basename
+ self.args = args
+
+ def execute(self, driver):
+ runner = doctest.DocTestRunner()
+ driver.setup_path(self.extpy)
+ target, teardown = driver.setup_method(self.extpy)
+ try:
+ (failed, tried), run_output = capture_stdout(runner.run, self.doctestitem)
+ if failed:
+ raise self.Failed(msg=run_output, tbindex=-2)
+
+ finally:
+ if teardown:
+ teardown(target)
+
+def capture_stdout(func, *args, **kw):
+ newstdout = StringIO()
+ oldstdout = sys.stdout
+ sys.stdout = newstdout
+ try:
+ result = func(*args, **kw)
+ finally:
+ sys.stdout = oldstdout
+ return result, newstdout.getvalue()
+
+def assert_error(func, *args, **kw):
+ kw.setdefault('error', Exception)
+ kw.setdefault('text_re', None)
+ error = kw.pop('error')
+ text_re = kw.pop('text_re')
+ if text_re and isinstance(text_re, str):
+ real_text_re = re.compile(text_re, re.S)
+ else:
+ real_text_re = text_re
+ try:
+ value = func(*args, **kw)
+ except error, e:
+ if real_text_re and not real_text_re.search(str(e)):
+ assert False, (
+ "Exception did not match pattern; exception:\n %r;\n"
+ "pattern:\n %r"
+ % (str(e), text_re))
+ except Exception, e:
+ assert False, (
+ "Exception type %s should have been raised; got %s instead (%s)"
+ % (error, e.__class__, e))
+ else:
+ assert False, (
+ "Exception was expected, instead successfully returned %r"
+ % (value))
+
+def sorted(l):
+ l = list(l)
+ l.sort()
+ return l
+
+
+def fake_request(application, path_info='', use_lint=True, **environ):
+ """
+ Runs the application in a fake environment, returning a response object
+ """
+ if use_lint:
+ application = lint.middleware(application)
+ status, headers, body, errors = wsgilib.raw_interactive(
+ application, path_info, **environ)
+ res = FakeResponse(status, headers, body, errors)
+ if res.errors:
+ print 'Errors:'
+ print res.errors
+ return res
+
+class FakeResponse(object):
+
+ def __init__(self, status, headers, body, errors):
+ self.status = status
+ self.headers = headers
+ self.body = body
+ self.errors = errors
+
+ def status_int__get(self):
+ return int(self.status.split()[0])
+ status_int = property(status_int__get)
+
+ def all_ok(self):
+ """
+ Asserts that there were no errors and the status was 200 OK
+ """
+ assert not self.errors, (
+ "Response had errors: %s" % self.errors)
+ assert self.status_int == 200, (
+ "Response did not return 200 OK: %r" % self.status)
+
+ def header(self, name, default=NoDefault):
+ """
+ Returns the named header; an error if there is not exactly one
+ matching header (unless you give a default -- always an error if
+ there is more than one header)
+ """
+ found = None
+ for cur_name, value in self.headers:
+ if cur_name.lower() == name.lower():
+ assert not found, (
+ "Ambiguous header: %s matches %r and %r"
+ % (name, found, value))
+ found = value
+ if found is None:
+ if default is NoDefault:
+ raise KeyError(
+ "No header found: %r (from %s)"
+ % (name, ', '.join([n for n, v in self.headers])))
+ else:
+ return default
+ return found
+
+ def all_headers(self, name):
+ """
+ Gets all headers, returns as a list
+ """
+ found = []
+ for cur_name, value in self.headers:
+ if cur_name.lower() == name.lower():
+ found.append(value)
+ return found
+
+ def __contains__(self, s):
+ return self.body.find(s) != -1
+
+ def __repr__(self):
+ return '<Response %s %r>' % (self.status, self.body[:20])
+
+ def __str__(self):
+ return 'Response: %s\n%s\n%s' % (
+ self.status,
+ '\n'.join(['%s: %s' % (n, v) for n, v in self.headers]),
+ self.body)
+
+class Dummy_smtplib(object):
+
+ existing = None
+
+ def __init__(self, server):
+ assert not self.existing, (
+ "smtplib.SMTP() called again before Dummy_smtplib.existing.reset() "
+ "called.")
+ self.server = server
+ self.open = True
+ self.__class__.existing = self
+
+ def quit(self):
+ assert self.open, (
+ "Called %s.quit() twice" % self)
+ self.open = False
+
+ def sendmail(self, from_address, to_addresses, msg):
+ self.from_address = from_address
+ self.to_addresses = to_addresses
+ self.message = msg
+
+ def install(cls):
+ smtplib.SMTP = cls
+
+ install = classmethod(install)
+
+ def reset(self):
+ assert not self.open, (
+ "SMTP connection not quit")
+ self.__class__.existing = None
+
+class FakeFilesystem(object):
+
+ def __init__(self):
+ self.files = {}
+
+ def make_file(self, filename, content):
+ self.files[filename] = content
+
+ def open(self, filename, mode='r'):
+ if not self.files.has_key(filename):
+ raise IOError("[FakeFS] No such file or directory: %r" % filename)
+
+
+class FakeFile(object):
+
+ def __init__(self, filename, content=None):
+ self.filename = filename
+ self.content = content
+
+ def open(self, mode):
+ if mode == 'r' or mode == 'rb':
+ if self.content is None:
+ raise IOError("[FakeFS] No such file or directory: %r" % filename)
+ return ReaderFile(self)
+ elif mode == 'w' or mode == 'wb':
+ return WriterFile(self)
+ else:
+ assert 0, "Mode %r not yet implemented" % mode
+
+class ReaderFile(object):
+
+ def __init__(self, file):
+ self.file = file
+ self.stream = StringIO(self.file.content)
+ self.open = True
+
+ def read(self, *args):
+ return self.stream.read(*args)
+
+ def close(self):
+ assert self.open, (
+ "Closing open file")
+ self.open = False
+
+class WriterFile(object):
+
+ def __init__(self, file):
+ self.file = file
+ self.stream = StringIO()
+ self.open = True
+
+ def write(self, arg):
+ self.stream.write(arg)
+
+ def close(self):
+ assert self.open, (
+ "Closing an open file")
+ self.open = False
+
+
+
+
diff --git a/paste/tests/pyconfig_data/context.py b/paste/tests/pyconfig_data/context.py
new file mode 100644
index 0000000..73fa626
--- /dev/null
+++ b/paste/tests/pyconfig_data/context.py
@@ -0,0 +1,8 @@
+def simplehook():
+ return 'calc'
+simplehook.config_hook = True
+
+def complexhook(context):
+ return context
+complexhook.config_hook = True
+
diff --git a/paste/tests/pyconfig_data/deriv.conf b/paste/tests/pyconfig_data/deriv.conf
new file mode 100644
index 0000000..c7cdf3c
--- /dev/null
+++ b/paste/tests/pyconfig_data/deriv.conf
@@ -0,0 +1,3 @@
+import os
+
+test1 = test1 + '+another'
diff --git a/paste/tests/pyconfig_data/nest1.conf b/paste/tests/pyconfig_data/nest1.conf
new file mode 100644
index 0000000..4eb06c6
--- /dev/null
+++ b/paste/tests/pyconfig_data/nest1.conf
@@ -0,0 +1,3 @@
+a = 1
+b = 'shadow'
+
diff --git a/paste/tests/pyconfig_data/nest2.conf b/paste/tests/pyconfig_data/nest2.conf
new file mode 100644
index 0000000..c70b217
--- /dev/null
+++ b/paste/tests/pyconfig_data/nest2.conf
@@ -0,0 +1,2 @@
+b = 2
+c = 3
diff --git a/paste/tests/pyconfig_data/one.py b/paste/tests/pyconfig_data/one.py
new file mode 100644
index 0000000..e03cf8b
--- /dev/null
+++ b/paste/tests/pyconfig_data/one.py
@@ -0,0 +1,2 @@
+name1 = 'n1'
+name2 = 'n2'
diff --git a/paste/tests/test_authentication.py b/paste/tests/test_authentication.py
new file mode 100644
index 0000000..f26e5a4
--- /dev/null
+++ b/paste/tests/test_authentication.py
@@ -0,0 +1,76 @@
+from paste import wsgilib
+from paste import login
+from fixture import *
+
+from_cmdline = 0
+
+def application(environ, start_response):
+ if environ.has_key('REMOTE_USER'):
+ start_response('200 OK', [('Content-type', 'text/plain')])
+ return ['Logged in: ' + environ['REMOTE_USER']]
+ else:
+ start_response('401 Unauthorized',
+ [('Content-type', 'text/plain')])
+ return ['Not logged in.']
+
+class AuthTest(login.Authenticator):
+ def check_auth(self, username, password):
+ return username == password
+
+def report(res):
+ if from_cmdline:
+ print res
+
+# @@ this should be part of a test fixture, I think
+def mk_basic_auth_app(**kw):
+ kw['http_login'] = True
+ kw['authenticator'] = AuthTest
+ app = login.middleware(application, **kw)
+ return app
+
+def test_basicauth_noauth():
+ res = fake_request(mk_basic_auth_app(), '/')
+ assert res.status_int == 401
+ report(res)
+
+def run_userpass(user, password):
+ userpass = user + ':' + password
+ env = {'HTTP_AUTHORIZATION' : 'Basic ' + userpass.encode('base64')}
+ return fake_request(mk_basic_auth_app(), '/', **env)
+
+def test_basicauth_okuser():
+ res = run_userpass('test', 'test') # should succeed
+ assert res.status_int == 200
+ report(res)
+
+def test_basicauth_baduser():
+ res = run_userpass('test', 'badpass') # should succeed
+ assert res.status_int == 401
+ report(res)
+
+def test_basicauth_cookie():
+ res = run_userpass('test', 'test') # should succeed
+ assert res.status_int == 200
+ report(res)
+ cookie_val = res.header('SET-COOKIE')
+ print "cookie value", cookie_val
+ app = mk_basic_auth_app()
+ env = {'HTTP_COOKIE': cookie_val}
+ res = fake_request(mk_basic_auth_app(), '/', **env)
+ report(res)
+ assert res.status_int == 200
+
+ # ensure that secret is actually used
+ res = fake_request(mk_basic_auth_app(secret='bogus'),
+ '/', **env)
+ report(res)
+ assert res.status_int == 401
+
+if __name__ == '__main__':
+ from_cmdline = 1
+ test_basicauth_noauth()
+ test_basicauth_okuser()
+ test_basicauth_baduser()
+ test_basicauth_cookie()
+
+
diff --git a/paste/tests/test_error_middleware.py b/paste/tests/test_error_middleware.py
new file mode 100644
index 0000000..f7731ae
--- /dev/null
+++ b/paste/tests/test_error_middleware.py
@@ -0,0 +1,63 @@
+from fixture import *
+from paste.error_middleware import ErrorMiddleware
+from paste import lint
+
+def do_request(app, expect_status=500):
+ res = fake_request(ErrorMiddleware(lint.middleware(app)),
+ **{'paste.config': {'debug': True}})
+ assert res.status_int == expect_status
+ return res
+
+def bad_app():
+ "No argument list!"
+ return None
+
+def start_response_app(environ, start_response):
+ "raise error before start_response"
+ raise ValueError("hi")
+
+def after_start_response_app(environ, start_response):
+ start_response("200 OK", [('Content-type', 'text/plain')])
+ raise ValueError('error2')
+
+def iter_app(environ, start_response):
+ start_response("200 OK", [('Content-type', 'text/plain')])
+ return yielder(['this', ' is ', ' a', None])
+
+def yielder(args):
+ for arg in args:
+ if arg is None:
+ raise ValueError("None raises error")
+ yield arg
+
+def test_makes_exception():
+ res = do_request(bad_app)
+ print res
+ assert '<html' in res
+ assert 'bad_app() takes no arguments (2 given' in res
+ assert 'iterator = application(environ, start_response_wrapper)' in res
+ assert 'lint.py' in res
+ assert 'error_middleware.py' in res
+
+def test_start_res():
+ res = do_request(start_response_app)
+ print res
+ assert 'ValueError: hi' in res
+ assert 'test_error_middleware.py' in res
+ assert 'line 17 in <tt>start_response_app</tt>' in res
+
+def test_after_start():
+ res = do_request(after_start_response_app, 200)
+ print res
+ assert 'ValueError: error2' in res
+ assert 'line 21' in res
+
+def test_iter_app():
+ res = do_request(iter_app, 200)
+ print res
+ assert 'None raises error' in res
+ assert 'yielder' in res
+
+
+
+
diff --git a/paste/tests/test_pyconfig.py b/paste/tests/test_pyconfig.py
new file mode 100644
index 0000000..1b93d02
--- /dev/null
+++ b/paste/tests/test_pyconfig.py
@@ -0,0 +1,46 @@
+import os
+from wsgikit import pyconfig
+from py.test import raises
+
+def path(name):
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ 'pyconfig_data', name)
+
+def test_load():
+ conf = pyconfig.load(path('one.py'))
+ assert conf['name1'] == 'n1'
+ assert conf['name2'] == "n2"
+ raises(KeyError, "conf['name3']")
+
+def test_nest():
+ conf = pyconfig.load(path('nest1.conf'))
+ conf.load(path('nest2.conf'))
+ assert conf['a'] == 1
+ assert conf['b'] == 2
+ assert conf['c'] == 3
+
+def test_derivative():
+ conf = pyconfig.Config()
+ conf.load_dict({'test1': 'a'})
+ assert conf['test1'] == 'a'
+ conf.load(path('deriv.conf'))
+ assert conf['test1'] == 'a+another'
+ conf = pyconfig.Config()
+ conf.load_dict({'test1': 'b'})
+ conf.load(path('deriv.conf'))
+ assert conf['test1'] == 'b+another'
+ assert not conf.has_key('os')
+
+def test_command():
+ conf = pyconfig.load(path('one.py'))
+ extra = conf.load_commandline(
+ ['-h', '--host', 'localhost', '--port=8080', 'arg1', '-f', 'arg2'],
+ bool_options=['help', 'verbose'],
+ aliases={'f': 'config_file', 'h': 'help', 'v': 'verbose'})
+ assert extra == ['arg1']
+ assert conf['name1'] == 'n1'
+ assert conf['host'] == 'localhost'
+ assert conf['port'] == 8080
+ assert conf['config_file'] == 'arg2'
+ raises(KeyError, "conf['h']")
+ raises(KeyError, "conf['f']")
diff --git a/paste/tests/test_urlparser.py b/paste/tests/test_urlparser.py
new file mode 100644
index 0000000..b41ecfa
--- /dev/null
+++ b/paste/tests/test_urlparser.py
@@ -0,0 +1,83 @@
+from paste.urlparser import *
+from fixture import fake_request
+
+
+def path(name):
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ 'urlparser_data', name)
+
+def make_parser(name):
+ return URLParser(path(name), name, {'index_names': ['index', 'Main']})
+
+def test_find_file():
+ p = make_parser('find_file')
+ res = fake_request(p, '/')
+ assert 'index1' in res
+ assert res.header('content-type') == 'text/plain'
+ res = fake_request(p, '/index')
+ assert 'index1' in res
+ assert res.header('content-type') == 'text/plain'
+ res = fake_request(p, '/index.txt')
+ assert 'index1' in res
+ assert res.header('content-type') == 'text/plain'
+ res = fake_request(p, '/test2.html')
+ assert 'test2' in res
+ assert res.header('content-type') == 'text/html'
+
+def test_deep():
+ p = make_parser('deep')
+ res = fake_request(p, '/')
+ assert 'index2' in res
+ res = fake_request(p, '/sub')
+ assert res.status_int == 301
+ print res
+ assert res.header('location') == 'http://localhost/sub/'
+ assert 'href="http://localhost/sub/"' in res
+ res = fake_request(p, '/sub/')
+ assert 'index3' in res
+
+def test_python():
+ p = make_parser('python')
+ res = fake_request(p, '/simpleapp')
+ res.all_ok()
+ assert 'test1' in res
+ assert res.header('test-header') == 'TEST!'
+ assert res.header('content-type') == 'text/html'
+ res = fake_request(p, '/stream')
+ res.all_ok()
+ assert 'test2' in res
+ res = fake_request(p, '/sub/simpleapp')
+ res.all_ok()
+ assert 'subsimple' in res
+
+def test_hook():
+ p = make_parser('hook')
+ res = fake_request(p, '/bob/app')
+ res.all_ok()
+ assert 'user: bob' in res
+ res = fake_request(p, '/tim/')
+ res.all_ok()
+ assert 'index: tim' in res
+
+def test_not_found_hook():
+ p = make_parser('not_found')
+ res = fake_request(p, '/simple/notfound')
+ assert res.status_int == 200
+ assert 'not found' in res
+ res = fake_request(p, '/simple/found')
+ res.all_ok()
+ assert 'is found' in res
+ res = fake_request(p, '/recur/__notfound')
+ assert res.status_int == 404
+ # @@: It's unfortunate that the original path doesn't actually show up
+ assert '/recur/notfound' in res
+ res = fake_request(p, '/recur/__isfound')
+ assert res.status_int == 200
+ assert 'is found' in res
+ res = fake_request(p, '/user/list')
+ res.all_ok()
+ assert 'user: None' in res
+ res = fake_request(p, '/user/bob/list')
+ assert res.status_int == 200
+ assert 'user: bob' in res
+
diff --git a/paste/tests/urlparser_data/__init__.py b/paste/tests/urlparser_data/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/tests/urlparser_data/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/tests/urlparser_data/deep/index.html b/paste/tests/urlparser_data/deep/index.html
new file mode 100644
index 0000000..8913442
--- /dev/null
+++ b/paste/tests/urlparser_data/deep/index.html
@@ -0,0 +1 @@
+index2
diff --git a/paste/tests/urlparser_data/deep/sub/Main.txt b/paste/tests/urlparser_data/deep/sub/Main.txt
new file mode 100644
index 0000000..ec42756
--- /dev/null
+++ b/paste/tests/urlparser_data/deep/sub/Main.txt
@@ -0,0 +1 @@
+index3
diff --git a/paste/tests/urlparser_data/find_file/index.txt b/paste/tests/urlparser_data/find_file/index.txt
new file mode 100644
index 0000000..6be29bc
--- /dev/null
+++ b/paste/tests/urlparser_data/find_file/index.txt
@@ -0,0 +1 @@
+index1
diff --git a/paste/tests/urlparser_data/find_file/test2.html b/paste/tests/urlparser_data/find_file/test2.html
new file mode 100644
index 0000000..180cf83
--- /dev/null
+++ b/paste/tests/urlparser_data/find_file/test2.html
@@ -0,0 +1 @@
+test2
diff --git a/paste/tests/urlparser_data/hook/__init__.py b/paste/tests/urlparser_data/hook/__init__.py
new file mode 100644
index 0000000..9b1055c
--- /dev/null
+++ b/paste/tests/urlparser_data/hook/__init__.py
@@ -0,0 +1,10 @@
+from paste import wsgilib
+
+def urlparser_hook(environ):
+ first, rest = wsgilib.path_info_split(environ.get('PATH_INFO', ''))
+ if not first:
+ # No username
+ return
+ environ['app.user'] = first
+ environ['SCRIPT_NAME'] += '/' + first
+ environ['PATH_INFO'] = rest
diff --git a/paste/tests/urlparser_data/hook/app.py b/paste/tests/urlparser_data/hook/app.py
new file mode 100644
index 0000000..d2714e5
--- /dev/null
+++ b/paste/tests/urlparser_data/hook/app.py
@@ -0,0 +1,5 @@
+def application(environ, start_response):
+ start_response('200 OK', [('Content-type', 'text/html')])
+ return ['user: %s' % environ['app.user']]
+
+
diff --git a/paste/tests/urlparser_data/hook/index.py b/paste/tests/urlparser_data/hook/index.py
new file mode 100644
index 0000000..49e89f0
--- /dev/null
+++ b/paste/tests/urlparser_data/hook/index.py
@@ -0,0 +1,4 @@
+def application(environ, start_response):
+ start_response('200 OK', [('Content-type', 'text/html')])
+ return ['index: %s' % environ['app.user']]
+
diff --git a/paste/tests/urlparser_data/not_found/__init__.py b/paste/tests/urlparser_data/not_found/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/tests/urlparser_data/not_found/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/tests/urlparser_data/not_found/recur/__init__.py b/paste/tests/urlparser_data/not_found/recur/__init__.py
new file mode 100644
index 0000000..48205a5
--- /dev/null
+++ b/paste/tests/urlparser_data/not_found/recur/__init__.py
@@ -0,0 +1,9 @@
+def not_found_hook(environ, start_response):
+ urlparser = environ['paste.urlparser.not_found_parser']
+ path = environ.get('PATH_INFO', '')
+ if not path:
+ return urlparser.not_found(environ, start_response)
+ # Strip off leading _'s
+ path = '/' + path.lstrip('/').lstrip('_')
+ environ['PATH_INFO'] = path
+ return urlparser(environ, start_response)
diff --git a/paste/tests/urlparser_data/not_found/recur/isfound.txt b/paste/tests/urlparser_data/not_found/recur/isfound.txt
new file mode 100644
index 0000000..c8b8fab
--- /dev/null
+++ b/paste/tests/urlparser_data/not_found/recur/isfound.txt
@@ -0,0 +1 @@
+is found
diff --git a/paste/tests/urlparser_data/not_found/simple/__init__.py b/paste/tests/urlparser_data/not_found/simple/__init__.py
new file mode 100644
index 0000000..f1e7faa
--- /dev/null
+++ b/paste/tests/urlparser_data/not_found/simple/__init__.py
@@ -0,0 +1,3 @@
+def not_found_hook(environ, start_response):
+ start_response('200 OK', [('Content-type', 'text/plain')])
+ return ['not found']
diff --git a/paste/tests/urlparser_data/not_found/simple/found.txt b/paste/tests/urlparser_data/not_found/simple/found.txt
new file mode 100644
index 0000000..c8b8fab
--- /dev/null
+++ b/paste/tests/urlparser_data/not_found/simple/found.txt
@@ -0,0 +1 @@
+is found
diff --git a/paste/tests/urlparser_data/not_found/user/__init__.py b/paste/tests/urlparser_data/not_found/user/__init__.py
new file mode 100644
index 0000000..c47f88e
--- /dev/null
+++ b/paste/tests/urlparser_data/not_found/user/__init__.py
@@ -0,0 +1,12 @@
+from paste import wsgilib
+
+def not_found_hook(environ, start_response):
+ urlparser = environ['paste.urlparser.not_found_parser']
+ first, rest = wsgilib.path_info_split(environ.get('PATH_INFO', ''))
+ if not first:
+ # No username
+ return
+ environ['app.user'] = first
+ environ['SCRIPT_NAME'] += '/' + first
+ environ['PATH_INFO'] = rest
+ return urlparser(environ, start_response)
diff --git a/paste/tests/urlparser_data/not_found/user/list.py b/paste/tests/urlparser_data/not_found/user/list.py
new file mode 100644
index 0000000..f6228f0
--- /dev/null
+++ b/paste/tests/urlparser_data/not_found/user/list.py
@@ -0,0 +1,3 @@
+def application(environ, start_response):
+ start_response('200 OK', [('Content-type', 'text/plain')])
+ return ['user: %s' % environ.get('app.user')]
diff --git a/paste/tests/urlparser_data/python/__init__.py b/paste/tests/urlparser_data/python/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/tests/urlparser_data/python/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/tests/urlparser_data/python/simpleapp.py b/paste/tests/urlparser_data/python/simpleapp.py
new file mode 100644
index 0000000..cbef9f1
--- /dev/null
+++ b/paste/tests/urlparser_data/python/simpleapp.py
@@ -0,0 +1,6 @@
+def application(environ, start_response):
+ start_response('200 OK', [('Content-type', 'text/html'),
+ ('test-header', 'TEST!')])
+ return ['test1']
+
+
diff --git a/paste/tests/urlparser_data/python/stream.py b/paste/tests/urlparser_data/python/stream.py
new file mode 100644
index 0000000..121b4d1
--- /dev/null
+++ b/paste/tests/urlparser_data/python/stream.py
@@ -0,0 +1,7 @@
+def stream():
+ def app(environ, start_response):
+ writer = start_response('200 OK', [('Content-type', 'text/html')])
+ writer('te')
+ writer('st')
+ return ['2']
+ return app
diff --git a/paste/tests/urlparser_data/python/sub/__init__.py b/paste/tests/urlparser_data/python/sub/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/tests/urlparser_data/python/sub/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/tests/urlparser_data/python/sub/simpleapp.py b/paste/tests/urlparser_data/python/sub/simpleapp.py
new file mode 100644
index 0000000..fd90966
--- /dev/null
+++ b/paste/tests/urlparser_data/python/sub/simpleapp.py
@@ -0,0 +1,6 @@
+def application(environ, start_response):
+ start_response('200 OK', [('Content-type', 'text/html'),
+ ('test-header', 'TEST!')])
+ return ['subsimple']
+
+
diff --git a/paste/threaded.py b/paste/threaded.py
new file mode 100644
index 0000000..6abbd32
--- /dev/null
+++ b/paste/threaded.py
@@ -0,0 +1,436 @@
+# Note, this is totally incomplete and untested
+import threading
+import marshal
+import Queue
+import select
+import socket
+import errno
+import logging
+import atexit
+import BaseHTTPServer
+
+__version__ = '0.1'
+
+logger = logging.getLogger('wsgiserver.threaded')
+errorLog = logging.getLogger('wsgiserver.apperrors')
+
+intLength = len(marshal.dumps(int(1)))
+
+server = None
+
+class NotEnoughDataError(Exception):
+ pass
+
+class ProtocolError(Exception):
+ pass
+
+class ThreadedWSGIServer(object):
+
+ def __init__(self, application):
+ self.application = application
+ threadCount = self.setting('StartServerThreads')
+ self._maxServerThreads = self.setting('MaxServerThreads')
+ self._minServerThreads = self.setting('MinServerThreads')
+ self._threadPool = []
+ self._threadCount = 0
+ self._threadUseCounter = []
+ self._requestQueue = Queue.Queue(self._maxServerThreads * 2)
+ self._addr = {}
+ # @@: Should load persistently
+ self._requestID = 0
+
+ logger.info('Creating %i threads' % threadCount)
+ for i in range(threadCount):
+ self.spawnThread()
+
+ #self.recordPID() @@: ?
+
+ self._socketHandlers = {}
+ self._handlerCache = {}
+ self._sockets = {}
+
+ self.addSocketHandlers()
+ self.running = True
+ atexit.register(self.awakeSelect)
+ atexit.register(self.shutdown)
+ self.readyForRequests()
+
+ def addSocketHandler(self, handlerClass, serverAddress=None):
+ """
+ Adds a socket handler for `serverAddress`, which is typically
+ a tuple ``(host, port)``.
+ """
+ if serverAddress is None:
+ serverAddress = self.address(handlerClass.defaultServerAddress())
+ self._socketHandlers[serverAddress] = handlerClass
+ self._handlerCache[serverAddress] = []
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind(serverAddress)
+ sock.listen(1024)
+ self._sockets[serverAddress] = sock
+ logger.info('Listening on: %s' % serverAddress)
+
+ def readyForRequests(self):
+ logger.info('Ready for requests')
+
+ def spawnThread(self):
+ """
+ Create a new worker thread; threads run the `threadLoop`
+ method.
+ """
+ t = threading.Thread(target=self.threadLoop)
+ t.processing = False
+ self._threadPool.append(t)
+ self._threadCount += 1
+ t.start()
+ logger.info('New thread spawned, threadcount=%s' %
+ self._threadCount)
+
+ def absorbThread(self, count=1):
+ """
+ Absorb a thread.
+ """
+ for i in range(count):
+ self._requestQueue.put(None)
+ self._threadCount -= 1
+ for t in self._threadPool:
+ if not t.isAlive():
+ rv = i.join()
+ self._threadPool.remove(i)
+ logger.info('Thread absorbed, threadcount=%s' %
+ len(self.threadPool))
+
+ def threadLoop(self):
+ self.initThread()
+ t = threading.currentThread()
+ t.processing = False
+ try:
+ while 1:
+ try:
+ handler = self._requestQueue.get()
+ # Non means time to quit
+ if handler is None:
+ break
+ t.processing = True
+ try:
+ handler.handleRequest()
+ except:
+ logger.exception()
+ handler.close()
+ t.processing = False
+ except Queue.Empty:
+ pass
+ finally:
+ self.delThread()
+
+ def initThread(self):
+ pass
+
+ def delThread(self):
+ pass
+
+ def awakeSelect(self):
+ """
+ The ``select()`` in `mainloop` is blocking, so when
+ we shut down we have to make a connect to unblock it.
+ Here's where we do that, called `shutDown`.
+ """
+
+ for addr in self._sockets.keys():
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect(addr)
+ sock.close()
+ except:
+ pass
+
+ def shutdown(self):
+ self.running = False
+
+ def run(self, timeout=1):
+ while 1:
+ if not self.running:
+ return
+
+ try:
+ input, output, exc = select.select(
+ self._sockets.values(), [], [], timeout)
+ except select.error, v:
+ if v[0] == errno.EINTR or v[0] == 0:
+ break
+ else:
+ raise
+
+ for sock in input:
+ self._requestID += 1
+ client, addr = sock.accept()
+ serverAddress = sock.getsockname()
+ try:
+ handler = self._handlerCache[serverAddress].pop()
+ except IndexError:
+ handler = self._socketHandlers[serverAddress](self, serverAddress)
+ handler.activate(client, self._requestID)
+ self._requestQueue.put(handler)
+
+class Handler(object):
+
+ def __init__(self, server, serverAddress):
+ self.server = server
+ self.serverAddress = serverAddress
+
+ def active(self, sock, requestID):
+ self.requestID = requestID
+ self.sock = sock
+
+ def close(self):
+ self.sock = None
+ self.server._handlerCache[self._serverAddress].append(self)
+
+ def handleRequest(self):
+ raise NotImplementedError
+
+class ModWebKitHandler(Handler):
+
+ protocolName = 'webkit'
+
+ def receiveDict(self):
+ """
+ Utility function to receive a marshalled dictionary from
+ the socket. Returns None if the request was empty.
+ """
+ chunk = ''
+ missing = intLength
+ while missing > 0:
+ block = self.sock.recv(missing)
+ if not block:
+ self.sock.close()
+ if len(chunk) == 0:
+ # We probably awakened due to awakeSelect being called.
+ return None
+ else:
+ # We got a partial request -- something went wrong.
+ raise NotEnoughDataError, 'received only %d of %d bytes when receiving dictLength' % (len(chunk), intLength)
+ chunk += block
+ missing = intLength - len(chunk)
+ try:
+ dictLength = loads(chunk)
+ except ValueError:
+ logger.warn('bad marshal data for webkit adapter interface; '
+ 'you can only connect to %s via an adapter, like '
+ 'mod_webkit or wkcgi, not with a browser'
+ % self._serverAddress[1])
+ raise
+ if type(dictLength) != type(1):
+ self.sock.close()
+ raise ProtocolError, "Invalid AppServer protocol"
+ chunk = ''
+ missing = dictLength
+ while missing > 0:
+ block = self.sock.recv(missing)
+ if not block:
+ self.sock.close()
+ raise NotEnoughDataError, 'received only %d of %d bytes when receiving dict' % (len(chunk), dictLength)
+ chunk += block
+ missing = dictLength - len(chunk)
+ return loads(chunk)
+
+ def defaultServerAddress(cls):
+ return ('127.0.0.1', 8086)
+ defaultServerAddress = classmethod(defaultServerAddress)
+
+ def handleRequest(self):
+ data = []
+ environ = self.receiveDict()
+ if not environ:
+ return
+ if environ.get('REQUEST_URI'):
+ requestURI = environ['REQUEST_URI']
+ else:
+ requestURI = environ.get('SCRIPT_NAME', '') + environ.get('PATH_INFO', '')
+ query = environ.get('QUERY_STRING')
+ if query:
+ requestURI += '?' + query
+ environ['wsgi.input'] = self.sock.makefile('rb', 8012)
+ environ['wsgi.errors'] = LoggingError(errorLog)
+ environ['wsgi.version'] = '1.0'
+ environ['wsgi.multithread'] = True
+ environ['wsgi.multiprocess'] = False
+ output = WebKitStreamOut(self.sock)
+
+ def start(status, headers):
+ output.write('Status: %s\n' % status)
+ for header, value in headers.items():
+ assert '\n' not in value and '\r' not in value, \
+ "Headers cannot contain newlines (%s: %r)" \
+ % (header, value)
+ assert ':' not in header, \
+ "Headers should not container ':' (%r)" % header
+ output.write('%s: %s\n' % (key, value))
+ return output.write
+
+ try:
+ result = self.server.application(environ, start)
+ if result:
+ try:
+ for data in result:
+ output.write(data)
+ finally:
+ if hasattr(result, 'close'):
+ result.close()
+ except:
+ errorLog.exception()
+
+ output.close()
+ try:
+ self.sock.shutdown(1)
+ self.sock.close()
+ except:
+ # @@: Why the except:?
+ pass
+
+class LoggingError(object):
+
+ def __init__(self, logger):
+ self.logger = logger
+
+ def flush(self):
+ pass
+
+ def write(self, s):
+ self.logger.error(s)
+
+ def writelines(self, seq):
+ for s in seq:
+ self.write(s)
+
+class WebKitStreamOut(object):
+
+ def __init__(self, sock):
+ self.sock = sock
+
+ def write(self, s):
+ self.sock.send(s)
+
+############################################################
+## HTTP
+############################################################
+
+
+class HTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """Handles incoming requests. Recreated with every request.
+ Abstract base class.
+ """
+
+ ## This sends certain CGI variables. These are some that
+ ## should be sent, but aren't:
+ ## SERVER_ADDR
+ ## SERVER_PORT
+ ## SERVER_SOFTWARE
+ ## SERVER_NAME
+ ## HTTP_CONNECTION
+ ## SERVER_PROTOCOL
+ ## HTTP_KEEP_ALIVE
+
+ ## These I don't think are needed:
+ ## DOCUMENT_ROOT
+ ## PATH_TRANSLATED
+ ## GATEWAY_INTERFACE
+ ## PATH
+ ## SERVER_SIGNATURE
+ ## SCRIPT_FILENAME (?)
+ ## SERVER_ADMIN (?)
+
+ server_version = 'WSGIServer/%s' % __version__
+
+ def handleRequest(self):
+ """
+ Actually performs the request, creating the environment and
+ calling self.doTransaction(env, myInput) to perform the
+ response.
+ """
+ self.server_version = 'Webware/0.1'
+ env = {}
+ if self.headers.has_key('Content-Type'):
+ env['CONTENT_TYPE'] = self.headers['Content-Type']
+ del self.headers['Content-Type']
+ self.headersToEnviron(self.headers, env)
+ env['REMOTE_ADDR'], env['REMOTE_PORT'] = map(str, self.client_address)
+ env['REQUEST_METHOD'] = self.command
+ path = self.path
+ if path.find('?') != -1:
+ # @@: should REQUEST_URI include QUERY_STRING?
+ env['REQUEST_URI'], env['QUERY_STRING'] = path.split('?', 1)
+ else:
+ env['REQUEST_URI'] = path
+ env['QUERY_STRING'] = ''
+ env['PATH_INFO'] = env['REQUEST_URI']
+ env['SCRIPT_NAME'] = ''
+ myInput = ''
+ if self.headers.has_key('Content-Length'):
+ myInput = self.rfile.read(int(self.headers['Content-Length']))
+ self.doTransaction(env, myInput)
+
+ do_GET = do_POST = do_HEAD = handleRequest
+ # These methods are used in WebDAV requests:
+ do_OPTIONS = do_PUT = do_DELETE = handleRequest
+ do_MKCOL = do_COPY = do_MOVE = handleRequest
+ do_PROPFIND = handleRequest
+
+ def headersToEnviron(self, headers, env):
+ """Use a simple heuristic to convert all the headers to
+ environmental variables..."""
+ for header, value in headers.items():
+ env['HTTP_%s' % (header.upper().replace('-', '_'))] = value
+ return env
+
+ def processResponse(self, data):
+ """
+ Takes a string (like what a CGI script would print) and
+ sends the actual HTTP response (response code, headers, body).
+ """
+ s = StringIO(data)
+ headers = mimetools.Message(s)
+ self.sendStatus(headers)
+ self.sendHeaders(headers)
+ self.sendBody(s)
+
+ def sendStatus(self, status):
+ status = str(status)
+ pos = status.find(' ')
+ if pos == -1:
+ code = int(status)
+ message = ''
+ else:
+ code = int(status[:pos])
+ message = status[pos:].strip()
+ self.send_response(code, message)
+
+ def sendHeaders(self, headers):
+ for header, value in headers.items():
+ self.send_header(header, value)
+ self.end_headers()
+
+ def sendBody(self, bodyFile):
+ self.wfile.write(bodyFile.read())
+ bodyFile.close()
+
+ def log_message(self, format, *args):
+ self.server.logMessage(format % args)
+
+ def log_request(self, *args, **kw):
+ pass
+
+
+class HTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+
+ protocolName = 'http'
+
+ def defaultServerAddress(cls):
+ # @@: 127.0.0.1 isn't very useful
+ return ('127.0.0.1', 80)
+ defaultServerAddress = classmethod(defaultServerAddress)
+
+ def handleRequest(self):
+ baseHandler = BaseHTTPHandler(req, None, self)
+
diff --git a/paste/twisted_wsgi.py b/paste/twisted_wsgi.py
new file mode 100644
index 0000000..022180e
--- /dev/null
+++ b/paste/twisted_wsgi.py
@@ -0,0 +1,247 @@
+# By Peter Hunt
+# The canonical location for this file: http://st0rm.hopto.org:8080/wsgi/
+
+# new twisted.wsgi resource which uses the wsgiref library, available
+# at http://cvs.eby-sarna.com/wsgiref/
+
+from wsgiref import handlers
+
+from twisted.web import resource, server, static
+from twisted.internet import reactor
+from twisted.python import log
+from twisted import copyright
+
+import string
+import sys
+import os
+import time
+import urllib
+import types
+# TODO: sendfile()!
+
+class WSGIResource(resource.Resource):
+ isLeaf = True
+ def __init__(self, application, async=False):
+ """
+ application - WSGI application to host
+ async - is the application guaranteed to NOT block?
+ """
+ resource.Resource.__init__(self)
+ self.application = application
+ self.async = async
+ def render(self, request):
+ TwistedHandler(self.application, request, self.async)
+ return server.NOT_DONE_YET
+
+class LogWrapper:
+ def write(self, msg):
+ log.err(msg)
+ def flush(self):
+ pass
+
+log_wrapper = LogWrapper()
+
+class TwistedHandler(handlers.BaseHandler):
+ origin_server = True
+ server_software = server.version
+ def __init__(self, application, request, async=False):
+ self.request = request
+ if async:
+ self.run_async(application)
+ else:
+ reactor.callInThread(self.run, application)
+ def async_finish_response(self):
+ """Reads the next block of data yielded from the application generator, similar to finish_response except for async apps"""
+ if not self.result_is_file() and not self.sendfile():
+ if isinstance(self.result, types.GeneratorType):
+ while True:
+ try:
+ data = self.result.next()
+ if len(data) == 0:
+ break # till next time folks!
+ else:
+ self._write(data)
+ except StopIteration:
+ self.close() # we're done
+ break
+ else:
+ self.finish_response()
+ else:
+ self.close()
+ def _resume(self):
+ reactor.callLater(0, self.async_finish_response)
+ def run_async(self, application):
+ try:
+ self.setup_environ()
+ self.environ["twisted.wsgi.resume"] = self._resume
+ self.result = application(self.environ, self.start_response)
+ self.async_finish_response()
+ except:
+ import traceback
+ traceback.print_exc()
+ try:
+ self.handle_error()
+ except:
+ # If we get an error handling an error, just give up already!
+ self.close()
+ raise # ...and let the actual server figure it out.
+ def run(self, application):
+ """Invoke the application synchronously"""
+ # Note to self: don't move the close()! Asynchronous servers shouldn't
+ # call close() from finish_response(), so if you close() anywhere but
+ # the double-error branch here, you'll break asynchronous servers by
+ # prematurely closing. Async servers must return from 'run()' without
+ # closing if there might still be output to iterate over.
+ try:
+ self.setup_environ()
+ self.result = application(self.environ, self.start_response)
+ self.finish_response()
+ except:
+ try:
+ self.handle_error()
+ except:
+ # If we get an error handling an error, just give up already!
+ self.close()
+ raise # ...and let the actual server figure it out.
+ def close(self):
+ handlers.BaseHandler.close(self)
+ self.request.finish()
+ def send_headers(self):
+ """OVERRIDE ME!"""
+ self.cleanup_headers()
+ self.headers_sent = True
+ if self.client_is_modern():
+ self.send_preamble()
+ for (h,v) in self.headers.items():
+ self.request.setHeader(h, v)
+ def send_preamble(self):
+ """Transmit version/status/date/server, via self._write()"""
+ if self.client_is_modern():
+ code,message = self.status.split(" ",1)
+ self.request.setResponseCode(int(code), message)
+ if not self.headers.has_key('Date'):
+ self.headers.add_header('Date', time.asctime(time.gmtime(time.time())))
+ if self.server_software and not self.headers.has_key('Server'):
+ self.headers.add_header('Server', self.server_software)
+ def _write(self,data):
+ """Override in subclass to buffer data for send to client
+
+ It's okay if this method actually transmits the data; BaseHandler
+ just separates write and flush operations for greater efficiency
+ when the underlying system actually has such a distinction.
+ """
+ self.request.write(data)
+
+ def _flush(self):
+ """Override in subclass to force sending of recent '_write()' calls
+
+ It's okay if this method is a no-op (i.e., if '_write()' actually
+ sends the data.
+ """
+ # no-op
+ #self.request.flush()
+
+ def get_stdin(self):
+ """Override in subclass to return suitable 'wsgi.input'"""
+ self.request.content.seek(0)
+ return self.request.content
+
+ def get_stderr(self):
+ """Override in subclass to return suitable 'wsgi.errors'"""
+ return log_wrapper
+
+ def add_cgi_vars(self):
+ """Override in subclass to insert CGI variables in 'self.environ'"""
+ script_name = "/"+string.join(self.request.prepath, '/')
+ serverName = string.split(self.request.getRequestHostname(), ':')[0]
+ if float(copyright.version[:3]) >= 1.3:
+ port = str(self.request.getHost().port)
+ else:
+ port = str(self.request.getHost()[2])
+
+ env = {"SERVER_SOFTWARE": server.version,
+ "SERVER_NAME": serverName,
+ "GATEWAY_INTERFACE": "CGI/1.1",
+ "SERVER_PROTOCOL": self.request.clientproto,
+ "SERVER_PORT": port,
+ "REQUEST_METHOD": self.request.method,
+ "SCRIPT_NAME": script_name, # XXX
+ "SCRIPT_FILENAME": "[wsgi application]",
+ "REQUEST_URI": self.request.uri,
+ "SCRIPT_URI": self.request.uri,
+ "SCRIPT_URL": self.request.path
+ }
+
+ client = self.request.getClient()
+ if client is not None:
+ env['REMOTE_HOST'] = client
+ ip = self.request.getClientIP()
+ if ip is not None:
+ env['REMOTE_ADDR'] = ip
+ pp = self.request.postpath
+ if pp:
+ env["PATH_INFO"] = "/"+string.join(pp, '/')
+
+ qindex = string.find(self.request.uri, '?')
+ if qindex != -1:
+ qs = env['QUERY_STRING'] = self.request.uri[qindex+1:]
+ if '=' in qs:
+ qargs = []
+ else:
+ qargs = [urllib.unquote(x) for x in qs.split('+')]
+ else:
+ env['QUERY_STRING'] = ''
+ qargs = []
+
+ # Propogate HTTP headers
+ for title, header in self.request.getAllHeaders().items():
+ envname = string.upper(string.replace(title, '-', '_'))
+ if title not in ('content-type', 'content-length'):
+ envname = "HTTP_" + envname
+ env[envname] = header
+ # Propogate our environment
+ # dont need to do this since we're updating old environ
+ #for key, value in os.environ.items():
+ # if not env.has_key(key):
+ # env[key] = value
+ self.environ.update(env)
+
+# simple little delayed processing app
+from twisted.internet import defer
+
+def blocking_call():
+ d = defer.Deferred()
+ reactor.callLater(2, d.callback, None)
+ return d
+
+def phase2(result, environ):
+ environ["thetime"] = time.time()
+ environ["twisted.wsgi.resume"]()
+
+def blocking_async_app(environ, start_response):
+ write = start_response("200 OK", [("Content-type","text/plain")])
+ yield "the time right now is " + `time.time()` + "\n"
+ blocking_call().addCallback(phase2, environ)
+ yield ""
+ yield "the time now is " + `environ["thetime"]`
+
+def serve_application(application, port=8080, async=False):
+ resource = WSGIResource(application, async=async)
+ reactor.listenTCP(port, server.Site(resource))
+ reactor.run()
+
+if __name__ == "__main__":
+ import sys
+ import optparse
+ from paste.webkit.wsgiwebkit import webkit
+ parser = optparse.OptionParser()
+ parser.add_option('-p', '--port', dest='port',
+ default=8080, type='int',
+ help="Port to serve on (default 8080)")
+ options, args = parser.parse_args()
+ if not len(args) == 1:
+ print "You must give one path, which is the root of your application"
+ sys.exit(2)
+ app = webkit(args[0])
+ serve_application(app, port=options.port)
+
diff --git a/paste/urlparser.py b/paste/urlparser.py
new file mode 100644
index 0000000..3ce1394
--- /dev/null
+++ b/paste/urlparser.py
@@ -0,0 +1,329 @@
+"""
+WSGI middleware
+
+Application dispatching, based on URL. An instance of `URLParser` is
+an application that loads and delegates to other applications. It
+looks for files in its directory that match the first part of
+PATH_INFO; these may have an extension, but are not required to have
+one, in which case the available files are searched to find the
+appropriate file. If it is ambiguous, a 404 is returned and an error
+logged.
+
+Each URLParser has a set of options, which can be local to that
+URLParser. Also, there are default options:
+
+``index_name``:
+ The name of the index file, sans extension.
+
+``hide_extensions``:
+ A list of extensions (with leading ``.``) that should not ever
+ be served.
+
+``ignore_extensions``:
+ Extensions that will be ignored when searching for a file. If
+ the extension is given explicitly, files with these extensions
+ will still be served.
+
+``constructors``:
+ A dictionary of extensions as keys, and application constructors
+ as values. Also the key ``dir`` for directories, and ``*`` when
+ no other constructor is found.
+
+ Each constructor is called like ``constructor(environ, filename)``
+ and should return an application or ``None``.
+
+By default there is a constructor for .py files that loads the module,
+and looks for an attribute ``application``, which is a ready
+application object, or an attribute that matches the module name,
+which is a factory for building applications, and is called with no
+arguments.
+
+URLParser will also look in __init__.py for special overrides. Currently
+the only override is urlparser_hook(environ), which can modify the
+environment; its return value is ignored. You can use this, for example,
+to manipulate SCRIPT_NAME/PATH_INFO (try to keep them consistent with the
+original URL -- but consuming PATH_INFO and moving that to SCRIPT_NAME
+is ok).
+"""
+
+import os
+import sys
+import imp
+import wsgilib
+
+class NoDefault:
+ pass
+
+class URLParser(object):
+
+ default_options = {
+ 'index_names': ['index', 'Index', 'main', 'Main'],
+ 'hide_extensions': ['.pyc', '.bak', '.py~'],
+ 'ignore_extensions': [],
+ 'constructors': {},
+ }
+
+ parsers_by_directory = {}
+
+ # This is lazily initialized
+ init_module = NoDefault
+
+ def __init__(self, directory, base_python_name, add_options=None):
+ if os.path.sep != '/':
+ directory = directory.replace(os.path.sep, '/')
+ self.directory = directory
+ self.add_options = add_options
+ self.base_python_name = base_python_name
+
+ def __call__(self, environ, start_response):
+ environ['paste.urlparser.base_python_name'] = self.base_python_name
+ if self.add_options:
+ if environ.has_key('paste.urlparser.options'):
+ environ['paste.urlparser.options'].update(self.add_options)
+ else:
+ environ['paste.urlparser.options'] = self.add_options.copy()
+ if self.init_module is NoDefault:
+ self.init_module = self.find_init_module(environ)
+ path_info = environ.get('PATH_INFO', '')
+ if not path_info:
+ return self.add_slash(environ, start_response)
+ if (self.init_module
+ and getattr(self.init_module, 'urlparser_hook', None)):
+ self.init_module.urlparser_hook(environ)
+ name, rest_of_path = wsgilib.path_info_split(environ['PATH_INFO'])
+ orig_path_info = environ['PATH_INFO']
+ orig_script_name = environ['SCRIPT_NAME']
+ environ['PATH_INFO'] = rest_of_path
+ if name is not None:
+ environ['SCRIPT_NAME'] = environ.get('SCRIPT_NAME', '') + '/' + name
+ if not name:
+ names = self.option(environ, 'index_names') or []
+ for index_name in names:
+ filename = self.find_file(environ, index_name)
+ if filename:
+ break
+ else:
+ # None of the index files found
+ filename = None
+ else:
+ filename = self.find_file(environ, name)
+ if filename is None:
+ application = None
+ else:
+ application = self.get_application(environ, filename)
+ if not application:
+ if (self.init_module
+ and getattr(self.init_module, 'not_found_hook', None)
+ and environ.get('paste.urlparser.not_found_parser') is not self):
+ not_found_hook = self.init_module.not_found_hook
+ environ['paste.urlparser.not_found_parser'] = self
+ environ['PATH_INFO'] = orig_path_info
+ environ['SCRIPT_NAME'] = orig_script_name
+ return not_found_hook(environ, start_response)
+ if filename is None:
+ if not name:
+ desc = 'one of %s' % ', '.join(
+ self.option(environ, 'index_names') or
+ ['(no index_names defined)'])
+ else:
+ desc = name
+
+ return self.not_found(
+ environ, start_response,
+ 'Tried to load %s from directory %s'
+ % (desc, self.directory))
+ else:
+ environ['wsgi.errors'].write(
+ 'Found resource %s, but could not construct application\n'
+ % filename)
+ return self.not_found(
+ environ, start_response,
+ 'Tried to load %s from directory %s'
+ % (filename, self.directory))
+ return application(environ, start_response)
+
+ def not_found(self, environ, start_response, debug_message=None):
+ status, headers, body = wsgilib.error_response(
+ environ,
+ '404 Not Found',
+ 'The resource at %s could not be found'
+ % wsgilib.construct_url(environ),
+ debug_message=debug_message)
+ start_response(status, headers)
+ return [body]
+
+ def option(self, environ, name):
+ return environ.get('paste.urlparser.options', {}).get(
+ name, self.default_options.get(name))
+
+ def add_slash(self, environ, start_response):
+ """
+ This happens when you try to get to a directory
+ without a trailing /
+ """
+ url = wsgilib.construct_url(environ, with_query_string=False)
+ url += '/'
+ if environ.get('QUERY_STRING'):
+ url += '?' + environ['QUERY_STRING']
+ status = '301 Moved Permanently'
+ status, headers, body = wsgilib.error_response(
+ environ,
+ status,
+ '''
+ <p>The resource has moved to <a href="%s">%s</a>. You should be redirected automatically.</p>''' % (url, url))
+ start_response(status, headers + [('Location', url)])
+ return [body]
+
+ def find_file(self, environ, base_filename):
+ possible = []
+ """Cache a few values to reduce function call overhead"""
+ ignore_extensions = self.option(environ, 'ignore_extensions')
+ hide_extensions = self.option(environ, 'hide_extensions')
+ for filename in os.listdir(self.directory):
+ base, ext = os.path.splitext(filename)
+ full_filename = os.path.join(self.directory, filename)
+ if (ext in hide_extensions
+ or not base):
+ continue
+ if filename == base_filename:
+ possible.append(full_filename)
+ continue
+ if ext in ignore_extensions:
+ continue
+ if base == base_filename:
+ possible.append(full_filename)
+ if not possible:
+ #environ['wsgi.errors'].write(
+ # 'No file found matching %r in %s\n'
+ # % (base_filename, self.directory))
+ return None
+ if len(possible) > 1:
+ environ['wsgi.errors'].write(
+ 'Ambiguous URL: %s; matches files %s\n'
+ % (wsgilib.construct_url(environ),
+ ', '.join(possible)))
+ return None
+ return possible[0]
+
+ def get_application(self, environ, filename):
+ constructors = self.option(environ, 'constructors')
+ if os.path.isdir(filename):
+ t = 'dir'
+ else:
+ t = os.path.splitext(filename)[1]
+ constructor = constructors.get(t, constructors.get('*'))
+ if constructor is None:
+ #environ['wsgi.errors'].write(
+ # 'No constructor found for %s\n' % t)
+ return constructor
+ app = constructor(environ, filename)
+ if app is None:
+ #environ['wsgi.errors'].write(
+ # 'Constructor %s return None for %s\n' %
+ # (constructor, filename))
+ pass
+ return app
+
+ def register_constructor(cls, extension, constructor):
+ d = cls.default_options['constructors']
+ assert not d.has_key(extension), (
+ "A constructor already exists for the extension %r (%r) "
+ "when attemption to register constructor %r"
+ % (extension, d[extension], constructor))
+ d[extension] = constructor
+ register_constructor = classmethod(register_constructor)
+
+ def get_parser(cls, directory, base_python_name):
+ try:
+ return cls.parsers_by_directory[(directory, base_python_name)]
+ except KeyError:
+ parser = cls(directory, base_python_name)
+ cls.parsers_by_directory[(directory, base_python_name)] = parser
+ return parser
+ get_parser = classmethod(get_parser)
+
+ def find_init_module(self, environ):
+ filename = os.path.join(self.directory, '__init__.py')
+ if not os.path.exists(filename):
+ return None
+ return load_module(environ, filename)
+
+ def __repr__(self):
+ return '<%s directory=%r; module=%s at %s>' % (
+ self.__class__.__name__,
+ self.directory,
+ self.base_python_name,
+ hex(abs(id(self))))
+
+def make_directory(environ, filename):
+ base_python_name = environ['paste.urlparser.base_python_name']
+ if base_python_name:
+ base_python_name += "." + os.path.basename(filename)
+ else:
+ base_python_name = os.path.basename(filename)
+ return URLParser.get_parser(filename, base_python_name)
+
+URLParser.register_constructor('dir', make_directory)
+
+def make_unknown(environ, filename):
+ return wsgilib.send_file(filename)
+
+URLParser.register_constructor('*', make_unknown)
+
+def load_module(environ, filename):
+ base_python_name = environ['paste.urlparser.base_python_name']
+ module_name = os.path.splitext(os.path.basename(filename))[0]
+ if base_python_name:
+ module_name = base_python_name + '.' + module_name
+ return load_module_from_name(environ, filename, module_name,
+ environ['wsgi.errors'])
+
+def load_module_from_name(environ, filename, module_name, errors):
+ if sys.modules.has_key(module_name):
+ return sys.modules[module_name]
+ init_filename = os.path.join(os.path.dirname(filename), '__init__.py')
+ if not os.path.exists(init_filename):
+ try:
+ f = open(init_filename, 'w')
+ except (OSError, IOError), e:
+ errors.write(
+ 'Cannot write __init__.py file into directory %s (%s)\n'
+ % (os.path.dirname(filename), e))
+ return None
+ f.write('#\n')
+ f.close()
+ fp = None
+ if sys.modules.has_key(module_name):
+ return sys.modules[module_name]
+ if '.' in module_name:
+ parent_name = '.'.join(module_name.split('.')[:-1])
+ base_name = module_name.split('.')[-1]
+ parent = load_module_from_name(environ, os.path.dirname(filename),
+ parent_name, errors)
+ else:
+ base_name = module_name
+ fp = None
+ try:
+ fp, pathname, stuff = imp.find_module(
+ base_name, [os.path.dirname(filename)])
+ module = imp.load_module(module_name, fp, pathname, stuff)
+ finally:
+ if fp is not None:
+ fp.close()
+ return module
+
+def make_py(environ, filename):
+ module = load_module(environ, filename)
+ if not module:
+ return None
+ if hasattr(module, 'application') and module.application:
+ return module.application
+ base_name = module.__name__.split('.')[-1]
+ if hasattr(module, base_name):
+ return getattr(module, base_name)()
+ environ['wsgi.errors'].write(
+ "Cound not find application or %s in %s\n"
+ % (base_name, module))
+ return None
+
+URLParser.register_constructor('.py', make_py)
diff --git a/paste/util/__init__.py b/paste/util/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/util/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/util/classinstance.py b/paste/util/classinstance.py
new file mode 100644
index 0000000..ef81c60
--- /dev/null
+++ b/paste/util/classinstance.py
@@ -0,0 +1,34 @@
+class classinstancemethod(object):
+ """
+ Acts like a class method when called from a class, like an
+ instance method when called by an instance. The method should
+ take two arguments, 'self' and 'cls'; one of these will be None
+ depending on how the method was called.
+ """
+
+ def __init__(self, func):
+ self.func = func
+
+ def __get__(self, obj, type=None):
+ return _methodwrapper(self.func, obj=obj, type=type)
+
+class _methodwrapper(object):
+
+ def __init__(self, func, obj, type):
+ self.func = func
+ self.obj = obj
+ self.type = type
+
+ def __call__(self, *args, **kw):
+ assert not kw.has_key('self') and not kw.has_key('cls'), (
+ "You cannot use 'self' or 'cls' arguments to a "
+ "classinstancemethod")
+ return self.func(*((self.obj, self.type) + args), **kw)
+
+ def __repr__(self):
+ if self.obj is None:
+ return ('<bound class method %s.%s>'
+ % (self.type.__name__, self.func.func_name))
+ else:
+ return ('<bound method %s.%s of %r>'
+ % (self.type.__name__, self.func.func_name, self.obj))
diff --git a/paste/util/filemixin.py b/paste/util/filemixin.py
new file mode 100644
index 0000000..b28a9cc
--- /dev/null
+++ b/paste/util/filemixin.py
@@ -0,0 +1,50 @@
+class FileMixin:
+
+ """
+ Used to provide auxiliary methods to objects simulating files.
+ Objects must implement write, and read if they are input files.
+ Also they should implement close.
+
+ Other methods you may wish to override:
+ * flush()
+ * seek(offset[, whence])
+ * tell()
+ * truncate([size])
+
+ Attributes you may wish to provide:
+ * closed
+ * encoding (you should also respect that in write())
+ * mode
+ * newlines (hard to support)
+ * softspace
+ """
+
+ def flush(self):
+ pass
+
+ def next(self):
+ return self.readline()
+
+ def readline(self, size=None):
+ # @@: This is a lame implementation; but a buffer would probably
+ # be necessary for a better implementation
+ output = []
+ while 1:
+ next = self.read(1)
+ if not next:
+ return ''.join(output)
+ output.append(next)
+ if size and size > 0 and len(output) >= size:
+ return ''.join(output)
+ if next == '\n':
+ # @@: also \r?
+ return ''.join(output)
+
+ def xreadlines(self):
+ return self
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+
diff --git a/paste/util/thirdparty.py b/paste/util/thirdparty.py
new file mode 100644
index 0000000..78eeadc
--- /dev/null
+++ b/paste/util/thirdparty.py
@@ -0,0 +1,42 @@
+import sys
+import os
+
+third_party = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)),
+ '3rd-party')
+
+new_python_path = os.path.join(third_party, 'new_python')
+
+def load_new_module(module_name, python_version):
+ """
+ Modules in the standard library that have been improved can be
+ loaded with this command. python_version is a sys.version_info
+ tuple, and if you need a newer version then we'll look in
+ ../3rd-party/new_python/python/module.py; otherwise it'll return
+ the normal module. E.g.:
+
+ doctest = load_new_module('doctest', (2, 4))
+ """
+ if python_version > sys.version_info:
+ if new_python_path not in sys.path:
+ sys.path.append(new_python_path)
+ exec "import python.%s as generic_module" % module_name
+ else:
+ exec "import %s as generic_module" % module_name
+ return generic_module
+
+def add_package(package_name):
+ """
+ If package_name has not been installed, we add the appropriate
+ path from ../3rd-party/package_name-files
+
+ *After* calling this function you can import the package on your
+ own, either from the package the user installed, or from the
+ package we distribute.
+ """
+ try:
+ exec "import %s" % package_name
+ except ImportError:
+ path = os.path.join(third_party, '%s-files' % package_name)
+ if os.path.exists(path):
+ sys.path.append(path)
diff --git a/paste/util/threadedprint.py b/paste/util/threadedprint.py
new file mode 100644
index 0000000..a9ff7fd
--- /dev/null
+++ b/paste/util/threadedprint.py
@@ -0,0 +1,213 @@
+"""
+threadedprint.py
+================
+
+:author: Ian Bicking
+:date: 12 Jul 2004
+
+Multi-threaded printing; allows the output produced via print to be
+separated according to the thread.
+
+To use this, you must install the catcher, like::
+
+ threadedprint.install()
+
+The installation optionally takes one of three parameters:
+
+default
+ The default destination for print statements (e.g., ``sys.stdout``).
+factory
+ A function that will produce the stream for a thread, given the
+ thread's name.
+paramwriter
+ Instead of writing to a file-like stream, this function will be
+ called like ``paramwriter(thread_name, text)`` for every write.
+
+The thread name is the value returned by
+``threading.currentThread().getName()``, a string (typically something
+like Thread-N).
+
+You can also submit file-like objects for specific threads, which will
+override any of these parameters. To do this, call ``register(stream,
+[threadName])``. ``threadName`` is optional, and if not provided the
+stream will be registered for the current thread.
+
+If no specific stream is registered for a thread, and no default has
+been provided, then an error will occur when anything is written to
+``sys.stdout`` (or printed).
+
+Note: the stream's ``write`` method will be called in the thread the
+text came from, so you should consider thread safety, especially if
+multiple threads share the same writer.
+
+Note: if you want access to the original standard out, use
+``sys.__stdout__``.
+
+You may also uninstall this, via::
+
+ threadedprint.uninstall()
+
+TODO
+----
+
+* Something with ``sys.stderr``.
+* Some default handlers. Maybe something that hooks into `logging`.
+* Possibly cache the results of ``factory`` calls. This would be a
+ semantic change.
+
+"""
+
+import threading
+import sys
+import filemixin
+
+class PrintCatcher(filemixin.FileMixin):
+
+ def __init__(self, default=None, factory=None, paramwriter=None):
+ assert len(filter(lambda x: x is not None,
+ [default, factory, paramwriter])) <= 1, \
+ "You can only provide one of default, factory, or paramwriter"
+ if default:
+ self._defaultfunc = self._writedefault
+ elif factory:
+ self._defaultfunc = self._writefactory
+ elif paramwriter:
+ self._defaultfunc = self._writeparam
+ else:
+ self._defaultfunc = self._writeerror
+ self._default = default
+ self._factory = factory
+ self._paramwriter = paramwriter
+ self._catchers = {}
+
+ def write(self, v, currentThread=threading.currentThread):
+ name = currentThread().getName()
+ catchers = self._catchers
+ if not catchers.has_key(name):
+ self._defaultfunc(name, v)
+ else:
+ catcher = catchers[name]
+ catcher.write(v)
+
+ def _writedefault(self, name, v):
+ self._default.write(v)
+
+ def _writefactory(self, name, v):
+ self._factory(name).write(v)
+
+ def _writeparam(self, name, v):
+ self._paramwriter(name, v)
+
+ def _writeerror(self, name, v):
+ assert 0, "There is no PrintCatcher output stream for the thread %r" % name
+
+ def register(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ self._catchers[name] = catcher
+
+ def deregister(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ assert self._catchers.has_key(name), "There is no PrintCatcher catcher for the thread %r" % name
+ del self._catchers[name]
+
+_printcatcher = None
+_oldstdout = None
+
+def install(**kw):
+ global _printcatcher, _oldstdout, register, deregister
+ if not _printcatcher:
+ _oldstdout = sys.stdout
+ _printcatcher = sys.stdout = PrintCatcher(**kw)
+ register = _printcatcher.register
+ deregister = _printcatcher.deregister
+
+def uninstall():
+ global _printcatcher, _oldstdout, register, deregister
+ if _printcatcher:
+ sys.stdout = _oldstdout
+ _printcatcher = _oldstdout = None
+ register = not_installed_error
+ deregister = not_installed_error
+
+def not_installed_error(*args, **kw):
+ assert 0, "threadedprint has not yet been installed (call threadedprint.install())"
+
+register = deregister = not_installed_error
+
+class StdinCatcher(filemixin.FileMixin):
+
+ def __init__(self, default=None, factory=None, paramwriter=None):
+ assert len(filter(lambda x: x is not None,
+ [default, factory, paramwriter])) <= 1, \
+ "You can only provide one of default, factory, or paramwriter"
+ if default:
+ self._defaultfunc = self._readdefault
+ elif factory:
+ self._defaultfunc = self._readfactory
+ elif paramwriter:
+ self._defaultfunc = self._readparam
+ else:
+ self._defaultfunc = self._readerror
+ self._default = default
+ self._factory = factory
+ self._paramwriter = paramwriter
+ self._catchers = {}
+
+ def read(self, size=None, currentThread=threading.currentThread):
+ name = currentThread().getName()
+ catchers = self._catchers
+ if not catchers.has_key(name):
+ self._defaultfunc(name, v, size)
+ else:
+ catcher = catchers[name]
+ catcher.read(size)
+
+ def _readdefault(self, name, size):
+ self._default.read(size)
+
+ def _readfactory(self, name, size):
+ self._factory(name).read(size)
+
+ def _readparam(self, name, size):
+ self._paramreader(name, size)
+
+ def _readerror(self, name, size):
+ assert 0, "There is no StdinCatcher output stream for the thread %r" % name
+
+ def register(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread.getName()
+ self._catchers[name] = catcher
+
+ def deregister(self, catcher, name=None,
+ currentThread=threading.currentThread):
+ if name is None:
+ name = currentThread().getName()
+ assert self._catchers.has_key(name), "There is no StdinCatcher catcher for the thread %r" % name
+ del self._catchers[name]
+
+_stdincatcher = None
+_oldstdin = None
+
+def install_stdin(**kw):
+ global _stdincatcher, _oldstdin, register_stdin, deregister_stdin
+ if not _stdincatcher:
+ _oldstdin = sys.stdin
+ _stdincatcher = sys.stdin = StdinCatcher(**kw)
+ register_stdin = _stdincatcher.register
+ deregister_stdin = _stdincatcher.deregister
+
+def uninstall():
+ global _stdincatcher, _oldstin, register_stdin, deregister_stdin
+ if _stdincatcher:
+ sys.stdin = _oldstdin
+ _stdincatcher = _oldstdin = None
+ register_stdin = deregister_stdin = not_installed_error_stdin
+
+def not_installed_error_stdin(*args, **kw):
+ assert 0, "threadedprint has not yet been installed for stdin (call threadedprint.install_stdin())"
diff --git a/paste/webkit/FakeWebware/MiscUtils/CSVJoiner.py b/paste/webkit/FakeWebware/MiscUtils/CSVJoiner.py
new file mode 100644
index 0000000..2bc13a9
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/CSVJoiner.py
@@ -0,0 +1,21 @@
+import types
+
+
+def joinCSVFields(fields):
+ """
+ Returns a CSV record (eg a string) from a sequence of fields.
+ Fields containing commands (,) or double quotes (") are quotes
+ and double quotes are escaped (""). The terminating newline is
+ NOT included.
+ """
+ newFields = []
+ for field in fields:
+ assert type(field) is types.StringType
+ if field.find('"')!=-1:
+ newField = '"' + field.replace('"', '""') + '"'
+ elif field.find(',')!=-1 or field.find('\n')!=-1 or field.find('\r')!=-1:
+ newField = '"' + field + '"'
+ else:
+ newField = field
+ newFields.append(newField)
+ return ','.join(newFields)
diff --git a/paste/webkit/FakeWebware/MiscUtils/CSVParser.py b/paste/webkit/FakeWebware/MiscUtils/CSVParser.py
new file mode 100644
index 0000000..047b3b9
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/CSVParser.py
@@ -0,0 +1,265 @@
+# The states of the parser
+StartRecord = 0
+StartField = 1
+InField = 2
+QuoteInField = 3
+InQuotedField = 4
+QuoteInQuotedField = 5
+EndQuotedField = 6
+
+# State handlers can return Finished to terminate parsing early
+Finished = 10
+
+
+class ParseError(Exception):
+ pass
+
+
+class CSVParser:
+ """
+ Parses CSV files including all subtleties such as:
+ * commas in fields
+ * double quotes in fields
+ * embedded newlines in fields
+ - Examples of programs that produce such beasts include
+ MySQL and Excel
+
+ For a higher-level, friendlier CSV class with many conveniences,
+ see DataTable (which uses this class for its parsing).
+
+ Example:
+ records = []
+ parse = CSVParser().parse
+ for line in lines:
+ results = parse(line)
+ if results is not None:
+ records.append(results)
+
+ CREDIT
+
+ The algorithm was taken directly from the open source Python
+ C-extension, csv:
+ http://www.object-craft.com.au/projects/csv/
+
+ It would be nice to use the csv module when present, since it is
+ substantially faster. Before that can be done, it needs to support
+ allowComments and stripWhitespace, and pass the TestCSVParser.py
+ test suite.
+ """
+
+ def __init__(self, allowComments=1, stripWhitespace=1, fieldSep=',', autoReset=1, doubleQuote=1):
+ """
+ @@ document
+ """
+ # settings
+ self._allowComments = allowComments
+ self._stripWhitespace = stripWhitespace
+ self._doubleQuote = doubleQuote
+ self._fieldSep = fieldSep
+ self._autoReset = autoReset
+
+ # Other
+ self._state = StartRecord
+ self._fields = []
+ self._hadParseError = 0
+ self._field = [] # a list of chars for the cur field
+ self.addChar = self._field.append
+
+ # The handlers for the various states
+ self._handlers = [
+ self.startRecord,
+ self.startField,
+ self.inField,
+ self.quoteInField,
+ self.inQuotedField,
+ self.quoteInQuotedField,
+ self.endQuotedField,
+ ]
+
+
+ ## Parse ##
+
+ def parse(self, line):
+ """
+ Parse the single line and return a list or string fields, or
+ None if the CSV record contains embedded newlines and the
+ record is not yet complete.
+ """
+ if self._autoReset and self._hadParseError:
+ self.reset()
+ handlers = self._handlers
+
+ i = 0
+ lineLen = len(line)
+ while i<lineLen:
+ c = line[i]
+ if c=='\r':
+ i += 1
+ if i==lineLen:
+ break # mac end of line
+ c = line[i]
+ if c=='\n':
+ i += 1
+ if i==lineLen:
+ break # dos end of line
+
+ self._hadParseError = 1
+ raise ParseError('Newline inside string')
+
+ elif c=='\n':
+ i += 1
+ if i==lineLen:
+ break # unix end of line
+
+ self._hadParseError = 1
+ raise ParseError('Newline inside string')
+
+ else:
+ if handlers[self._state](c)==Finished: # process a character
+ break
+
+ i += 1
+
+ handlers[self._state]('\0') # signal the end of the input
+
+ if self._state==StartRecord:
+ fields = self._fields
+ self._fields = []
+ if self._stripWhitespace:
+ fields = [field.strip() for field in fields]
+ return fields
+ else:
+ return None # indicates multi-line record; eg not finished
+
+
+ ## Reset ##
+
+ def reset(self):
+ """
+ Resets the parser to a fresh state in order to recover from
+ exceptions. But if autoReset is true (the default), this is
+ done automatically.
+ """
+ self._fields = []
+ self._state = StartRecord
+ self.hasParseError = 0
+
+
+ ## State Handlers ##
+
+ def startRecord(self, c):
+ if c!='\0': # not empty line
+ if c=='#' and self._allowComments:
+ return Finished
+ else:
+ self._state = StartField
+ self.startField(c)
+
+ def startField(self, c):
+ if c=='"':
+ self._state = InQuotedField # start quoted field
+ elif c==self._fieldSep:
+ self.saveField() # save empty field
+ elif c==' ' and self._stripWhitespace:
+ pass # skip over preceding whitespace
+ elif c=='\0':
+ self.saveField() # save empty field
+ self._state = StartRecord
+ else:
+ self.addChar(c) # begin new unquoted field
+ self._state = InField
+
+ def inField(self, c):
+ # in unquoted field
+ if c==self._fieldSep:
+ self.saveField()
+ self._state = StartField
+ elif c=='\0':
+ self.saveField() # end of line
+ self._state = StartRecord
+ elif c=='"' and self._doubleQuote:
+ self._state = QuoteInField
+ else:
+ self.addChar(c) # normal character
+
+ def quoteInField(self, c):
+ self.addChar('"')
+ if c=='"':
+ self._state = InField # save "" as "
+ elif c=='\0':
+ self.saveField() # end of line
+ self._state = StartRecord
+ elif c==self._fieldSep:
+ self.saveField()
+ self._state = StartField
+ else:
+ self.addChar(c) # normal character
+ self._state = InField
+
+ def inQuotedField(self, c):
+ if c=='"':
+ if self._doubleQuote:
+ self._state = QuoteInQuotedField
+ else:
+ self.saveField() # end of field
+ self._state = EndQuotedField
+ elif c=='\0':
+ self.addChar('\n') # end of line
+ else:
+ self.addChar(c) # normal character
+
+ def quoteInQuotedField(self, c):
+ if c=='"':
+ self.addChar('"') # save "" as "
+ self._state = InQuotedField
+ elif c==self._fieldSep:
+ self.saveField()
+ self._state = StartField
+ elif c==' ' and self._stripWhitespace:
+ pass # skip it
+ elif c=='\0':
+ self.saveField() # end of line
+ self._state = StartRecord
+ else:
+ self._hadParseError = 1 # illegal
+ raise ParseError, '%s expected after "' % self._fieldSep
+
+ def endQuotedField(self, c):
+ if c==self._fieldSep: # seen closing " on quoted field
+ self._state = StartField # wait for new field
+ elif c=='\0':
+ self._state = StartRecord # end of line
+ else:
+ self._hadParseError = 1
+ raise ParseError, '%s expected after "' % self._fieldSep
+
+ def saveField(self):
+ self._fields.append(''.join(self._field))
+ self._field = []
+ self.addChar = self._field.append
+
+
+# Call the global function parse() if you like the default settings of the CSVParser
+_parser = CSVParser()
+parse = _parser.parse
+
+
+import types
+def joinCSVFields(fields):
+ """
+ Returns a CSV record (eg a string) from a sequence of fields.
+ Fields containing commands (,) or double quotes (") are quoted
+ and double quotes are escaped (""). The terminating newline is
+ NOT included.
+ """
+ newFields = []
+ for field in fields:
+ assert type(field) is types.StringType
+ if field.find('"')!=-1:
+ newField = '"' + field.replace('"', '""') + '"'
+ elif field.find(',')!=-1:
+ newField = '"' + field + '"'
+ else:
+ newField = field
+ newFields.append(newField)
+ return ','.join(newFields)
diff --git a/paste/webkit/FakeWebware/MiscUtils/DBPool.py b/paste/webkit/FakeWebware/MiscUtils/DBPool.py
new file mode 100644
index 0000000..522f268
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/DBPool.py
@@ -0,0 +1,128 @@
+"""
+DBPool.py
+
+Implements a pool of cached connections to a database. This should result in
+a speedup for persistent apps. The pool of connections is threadsafe
+regardless of whether the DB API module question in general has a
+threadsafety of 1 or 2.
+
+For more information on the DB API, see:
+ http://www.python.org/topics/database/DatabaseAPI-2.0.html
+
+The idea behind DBPool is that it's completely seamless, so once you have
+established your connection, use it just as you would any other DB-API
+compliant module. For example:
+
+ dbPool = DBPool(MySQLdb, 5, host=xxx, user=xxx, ...)
+ db = dbPool.getConnection()
+
+Now use "db" exactly as if it were a MySQLdb connection. It's really
+just a proxy class.
+
+db.close() will return the connection to the pool, not actually
+close it. This is so your existing code works nicely.
+
+
+FUTURE
+
+* If in the presence of WebKit, register ourselves as a Can.
+
+
+CREDIT
+
+* Contributed by Dan Green
+* thread safety bug found by Tom Schwaller
+* Fixes by Geoff Talvola (thread safety in _threadsafe_getConnection()).
+* Clean up by Chuck Esterbrook.
+* Fix unthreadsafe functions which were leaking, Jay Love
+* Eli Green's webware-discuss comments were lifted for additional docs.
+"""
+
+
+import threading
+
+
+class DBPoolError(Exception): pass
+class UnsupportedError(DBPoolError): pass
+
+
+class PooledConnection:
+ """ A wrapper for database connections to help with DBPool. You don't normally deal with this class directly, but use DBPool to get new connections. """
+
+ def __init__(self, pool, con):
+ self._con = con
+ self._pool = pool
+
+ def close(self):
+ if self._con is not None:
+ self._pool.returnConnection(self)
+ self._con = None
+
+ def __getattr__(self, name):
+ return getattr(self._con, name)
+
+ def __del__(self):
+ self.close()
+
+
+class DBPool:
+
+ def __init__(self, dbModule, maxConnections, *args, **kwargs):
+ if dbModule.threadsafety==0:
+ raise UnsupportedError, "Database module does not support any level of threading."
+ elif dbModule.threadsafety==1:
+ from Queue import Queue
+ self._queue = Queue(maxConnections)
+ self.addConnection = self._unthreadsafe_addConnection
+ self.getConnection = self._unthreadsafe_getConnection
+ self.returnConnection = self._unthreadsafe_returnConnection
+ elif dbModule.threadsafety>=2:
+ self._lock = threading.Lock()
+ self._nextCon = 0
+ self._connections = []
+ self.addConnection = self._threadsafe_addConnection
+ self.getConnection = self._threadsafe_getConnection
+ self.returnConnection = self._threadsafe_returnConnection
+
+ # @@ 2000-12-04 ce: Should we really make all the connections now?
+ # Couldn't we do this on demand?
+ for i in range(maxConnections):
+ con = apply(dbModule.connect, args, kwargs)
+ self.addConnection(con)
+
+
+ # threadsafe/unthreadsafe refers to the database _module_, not THIS class..
+ # this class is definitely threadsafe (um. that is, I hope so - Dan)
+
+ def _threadsafe_addConnection(self, con):
+ self._connections.append(con)
+
+
+ def _threadsafe_getConnection(self):
+ self._lock.acquire()
+ try:
+ con = PooledConnection(self, self._connections[self._nextCon])
+ self._nextCon = self._nextCon + 1
+ if self._nextCon >= len(self._connections):
+ self._nextCon = 0
+ return con
+ finally:
+ self._lock.release()
+
+ def _threadsafe_returnConnection(self, con):
+ return
+
+ # These functions are used with DB modules that have connection level threadsafety, like PostgreSQL.
+ #
+ def _unthreadsafe_addConnection(self, con):
+ self._queue.put(con)
+
+ def _unthreadsafe_getConnection(self):
+ return PooledConnection(self, self._queue.get())
+
+ def _unthreadsafe_returnConnection(self, conpool):
+ """
+ This should never be called explicitly outside of this module.
+ """
+ self.addConnection(conpool._con)
+
diff --git a/paste/webkit/FakeWebware/MiscUtils/DataTable.py b/paste/webkit/FakeWebware/MiscUtils/DataTable.py
new file mode 100644
index 0000000..1c92522
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/DataTable.py
@@ -0,0 +1,804 @@
+"""
+DataTable.py
+
+
+INTRODUCTION
+
+This class is useful for representing a table of data arranged by named
+columns, where each row in the table can be thought of as a record:
+
+ name phoneNumber
+ ------ -----------
+ Chuck 893-3498
+ Bill 893-0439
+ John 893-5901
+
+This data often comes from delimited text files which typically
+have well defined columns or fields with several rows each of which can
+be thought of as a record.
+
+Using a DataTable can be as easy as using lists and dictionaries:
+
+ table = DataTable('users.csv')
+ for row in table:
+ print row['name'], row['phoneNumber']
+
+Or even:
+
+ table = DataTable('users.csv')
+ for row in table:
+ print '%(name)s %(phoneNumber)s' % row
+
+The above print statement relies on the fact that rows can be treated
+like dictionaries, using the column headings as keys.
+
+You can also treat a row like an array:
+
+ table = DataTable('something.tabbed', delimiter='\t')
+ for row in table:
+ for item in row:
+ print item,
+ print
+
+
+COLUMNS
+
+Column headings can have a type specification like so:
+ name, age:int, zip:int
+
+Possible types include string, int, float and datetime. However,
+datetime is not well supported right now.
+
+String is assumed if no type is specified but you can set that
+assumption when you create the table:
+
+ table = DataTable(headings, defaultType='float')
+
+Using types like int and float will cause DataTable to actually
+convert the string values (perhaps read from a file) to these types
+so that you can use them in natural operations. For example:
+
+ if row['age']>120:
+ self.flagData(row, 'age looks high')
+
+As you can see, each row can be accessed as a dictionary with keys
+according the column headings. Names are case sensitive.
+
+
+ADDING ROWS
+
+Like Python lists, data tables have an append() method. You can append
+TableRecords, or you pass a dictionary, list or object, in which case a
+TableRecord is created based on given values. See the method docs below
+for more details.
+
+
+FILES
+
+By default, the files that DataTable reads from are expected to be
+comma-separated value files.
+
+Limited comments are supported: A comment is any line whose very first
+character is a #. This allows you to easily comment out lines in your
+data files without having to remove them.
+
+Whitespace around field values is stripped.
+
+You can control all this behavior through the arguments found in the
+initializer and the various readFoo() methods:
+
+ ...delimiter=',', allowComments=1, stripWhite=1
+
+For example:
+
+ table = DataTable('foo.tabbed', delimiter='\t', allowComments=0, stripWhite=0)
+
+You should access these parameters by their name since additional ones
+could appear in the future, thereby changing the order.
+
+If you are creating these text files, we recommend the
+comma-separated-value format, or CSV. This format is better defined
+than the tab delimited format, and can easily be edited and manipulated
+by popular spreadsheets and databases.
+
+
+MICROSOFT EXCEL
+
+On Microsoft Windows systems with Excel and the win32all package
+(http://starship.python.net/crew/mhammond/), DataTable will use Excel
+(via COM) to read ".xls" files.
+
+from MiscUtils import DataTable
+assert DataTable.canReadExcel()
+table = DataTable.DataTable('foo.xls')
+
+With consistency to its CSV processing, DataTable will ignore any row
+whose first cell is '#' and strip surrounding whitespace around strings.
+
+
+TABLES FROM SCRATCH
+
+Here's an example that constructs a table from scratch:
+
+ table = DataTable(['name', 'age:int'])
+ table.append(['John', 80])
+ table.append({'name': 'John', 'age': 80})
+ print table
+
+
+QUERIES
+
+A simple query mechanism is supported for equality of fields:
+
+ matches = table.recordsEqualTo({'uid': 5})
+ if matches:
+ for match in matches:
+ print match
+ else:
+ print 'No matches.'
+
+
+COMMON USES
+
+* Programs can keep configuration and other data in simple comma-
+separated text files and use DataTable to access them. For example, a
+web site could read it's sidebar links from such a file, thereby
+allowing people who don't know Python (or even HTML) to edit these
+links without having to understand other implementation parts of the
+site.
+
+* Servers can use DataTable to read and write log files.
+
+
+FROM THE COMMAND LINE
+
+The only purpose in invoking DataTable from the command line is to see
+if it will read a file:
+
+> python DataTable.py foo.csv
+
+The data table is printed to stdout.
+
+
+CACHING
+
+DataTable uses "pickle caching" so that it can read .csv files faster
+on subsequent loads. You can disable this across the board with:
+ from MiscUtils.DataTable import DataTable
+ DataTable.usePickleCache = 0
+
+Or per instance by passing "usePickleCache=0" to the constructor.
+
+See the docstring of PickleCache.py for more information.
+
+
+MORE DOCS
+
+Some of the methods in this module have worthwhile doc strings to look
+at. See below.
+
+
+TO DO
+
+* Allow callback parameter or setting for parsing CSV records.
+* Perhaps TableRecord should inherit UserList and UserDict and override methods as appropriate...?
+* Better support for datetime.
+* _types and BlankValues aren't really packaged, advertised or
+ documented for customization by the user of this module.
+* DataTable:
+ * Parameterize the TextColumn class.
+ * Parameterize the TableRecord class.
+ * More list-like methods such as insert()
+ * writeFileNamed() is flawed: it doesn't write the table column
+ type
+ * Should it inherit from UserList?
+* Add error checking that a column name is not a number (which could
+ cause problems).
+* Look for various @@ tags through out the code.
+
+"""
+
+
+import os, string, sys
+from CSVParser import CSVParser
+from string import join, replace, split, strip
+from types import *
+
+try:
+ StringTypes
+except NameError:
+ StringTypes = StringType
+
+try:
+ from MiscUtils import NoDefault
+except ImportError:
+ class NoDefault:
+ pass
+
+try:
+ from mx.DateTime import DateTimeType, DateTimeFrom
+except ImportError:
+ pass
+
+
+## Types ##
+
+DateTimeType = "<custom-type 'datetime'>"
+ObjectType = "<type 'Object'>"
+
+_types = {
+ 'string': StringType,
+ 'int': IntType,
+ 'long': LongType,
+ 'float': FloatType,
+ 'datetime': DateTimeType,
+ 'object': ObjectType,
+}
+
+
+## Functions ##
+
+def canReadExcel():
+ try:
+ from win32com.client import Dispatch
+ Dispatch("Excel.Application")
+ except:
+ return False
+ else:
+ return True
+
+
+## Classes ##
+
+
+class DataTableError(Exception):
+ pass
+
+
+class TableColumn:
+ """
+ A table column represents a column of the table including name and
+ type.
+
+ It does not contain the actual values of the column. These are
+ stored individually in the rows.
+ """
+
+ def __init__(self, spec):
+
+ # spec is a string such as 'name' or 'name:type'
+ fields = split(spec, ':')
+ if len(fields)>2:
+ raise DataTableError, 'Invalid column spec %s' % repr(spec)
+ self._name = fields[0]
+
+ if len(fields)==1:
+ self._type = None
+ else:
+ self.setType(fields[1])
+
+ def name(self):
+ return self._name
+
+ def type(self):
+ return self._type
+
+ def setType(self, type):
+ """ Sets the type (by a string containing the name) of the heading. Usually invoked by DataTable to set the default type for columns whose types were not specified. """
+ if type==None:
+ self._type = None
+ else:
+ try:
+ self._type = _types[type]
+ except:
+ raise DataTableError, 'Unknown type %s' % repr(type)
+
+ def __repr__(self):
+ return '<%s %s with %s at %x>' % (
+ self.__class__.__name__, repr(self._name), repr(self._type), id(self))
+
+ def __str__(self):
+ return self._name
+
+
+ ## Utilities ##
+
+ def valueForRawValue(self, rawValue):
+ """ The rawValue is typically a string or value already of the appropriate type. TableRecord invokes this method to ensure that values (especially strings that come from files) are the correct types (e.g., ints are ints and floats are floats). """
+ # @@ 2000-07-23 ce: an if-else ladder? perhaps these should be dispatched messages or a class hier
+ if self._type is StringType:
+ value = str(rawValue)
+ elif self._type is IntType:
+ if rawValue=='':
+ value = 0
+ else:
+ value = int(rawValue)
+ elif self._type is LongType:
+ if rawValue=='':
+ value = 0
+ else:
+ value = long(rawValue)
+ elif self._type is FloatType:
+ if rawValue=='':
+ value = 0.0
+ else:
+ value = float(rawValue)
+ elif self._type is DateTimeType:
+ value = DateTimeFrom(rawValue)
+ elif self._type is ObjectType:
+ value = rawValue
+ else:
+ # no type set, leave values as they are
+ value = rawValue
+ return value
+
+
+class DataTable:
+ """
+ See the doc string for this module.
+ """
+
+ usePickleCache = 1
+
+
+ ## Init ##
+
+ def __init__(self, filenameOrHeadings=None, delimiter=',', allowComments=1, stripWhite=1, defaultType=None, usePickleCache=None):
+ if usePickleCache is None:
+ self.usePickleCache = self.usePickleCache # grab the class-level attr
+ else:
+ self.usePickleCache = usePickleCache
+ if defaultType and not _types.has_key(defaultType):
+ raise DataTableError, 'Unknown type for default type: %s' % repr(defaultType)
+ self._defaultType = defaultType
+ self._filename = None
+ self._headings = []
+ self._rows = []
+ if filenameOrHeadings:
+ if type(filenameOrHeadings) is StringType:
+ self.readFileNamed(filenameOrHeadings, delimiter, allowComments, stripWhite)
+ else:
+ self.setHeadings(filenameOrHeadings)
+
+
+ ## File I/O ##
+
+ def readFileNamed(self, filename, delimiter=',', allowComments=1, stripWhite=1):
+ self._filename = filename
+ data = None
+ if self.usePickleCache:
+ from PickleCache import readPickleCache, writePickleCache
+ data = readPickleCache(filename, pickleVersion=1, source='MiscUtils.DataTable')
+ if data is None:
+ if self._filename.endswith('.xls'):
+ self.readExcel()
+ else:
+ file = open(self._filename, 'r')
+ self.readFile(file, delimiter, allowComments, stripWhite)
+ file.close()
+ if self.usePickleCache:
+ writePickleCache(self, filename, pickleVersion=1, source='MiscUtils.DataTable')
+ else:
+ self.__dict__ = data.__dict__
+ return self
+
+ def readFile(self, file, delimiter=',', allowComments=1, stripWhite=1):
+ return self.readLines(file.readlines(), delimiter, allowComments, stripWhite)
+
+ def readString(self, string, delimiter=',', allowComments=1, stripWhite=1):
+ return self.readLines(split(string, '\n'), delimiter, allowComments, stripWhite)
+
+ def readLines(self, lines, delimiter=',', allowComments=1, stripWhite=1):
+ if self._defaultType is None:
+ self._defaultType = 'string'
+ haveReadHeadings = 0
+ parse = CSVParser(fieldSep=delimiter, allowComments=allowComments, stripWhitespace=stripWhite).parse
+ values = ''
+ for line in lines:
+ # process a row, either headings or data
+ values = parse(line)
+ if values:
+ if haveReadHeadings:
+ row = TableRecord(self, values)
+ self._rows.append(row)
+ else:
+ self.setHeadings(values)
+ haveReadHeadings = 1
+ if values is None:
+ raise DataTableError, "Unfinished multiline record."
+ return self
+
+ def canReadExcel(self):
+ return canReadExcel()
+
+ def readExcel(self):
+ maxBlankRows = 10
+ numRowsToReadPerCall = 20
+
+ from win32com.client import Dispatch
+ xl = Dispatch("Excel.Application")
+ wb = xl.Workbooks.Open(os.path.abspath(self._filename))
+ try:
+ sh = wb.Worksheets(1)
+ sh.Cells(1, 1)
+
+ # determine max column
+ numCols = 1
+ while 1:
+ if sh.Cells(1, numCols).Value in [None, '']:
+ numCols -= 1
+ break
+ numCols += 1
+ if numCols<=0:
+ return
+
+ def strip(x):
+ try:
+ return x.strip()
+ except:
+ return x
+
+ # read rows of data
+ maxCol = chr(ord('A') + numCols - 1)
+ haveReadHeadings = 0
+ rowNum = 1
+ numBlankRows = 0
+ valuesBuffer = {} # keyed by row number
+ while 1:
+ try:
+ # grab a single row
+ values = valuesBuffer[rowNum]
+ except KeyError:
+ # woops. read buffer is out of fresh rows
+ valuesRows = sh.Range('A%i:%s%i' % (rowNum, maxCol, rowNum+numRowsToReadPerCall-1)).Value
+ valuesBuffer.clear()
+ j = rowNum
+ for valuesRow in valuesRows:
+ valuesBuffer[j] = valuesRow
+ j += 1
+ values = valuesBuffer[rowNum]
+
+ # non-"buffered" version, one row at a time:
+ # values = sh.Range('A%i:%s%i' % (rowNum, maxCol, rowNum)).Value[0]
+
+ values = [strip(v) for v in values]
+ nonEmpty = [v for v in values if v]
+ if nonEmpty:
+ if values[0] not in ('#', u'#'):
+ if haveReadHeadings:
+ row = TableRecord(self, values)
+ self._rows.append(row)
+ else:
+ self.setHeadings(values)
+ haveReadHeadings = 1
+ numBlankRows = 0
+ else:
+ numBlankRows += 1
+ if numBlankRows>maxBlankRows:
+ # consider end of spreadsheet
+ break
+ rowNum += 1
+ finally:
+ wb.Close()
+
+ def save(self):
+ self.writeFileNamed(self._filename)
+
+ def writeFileNamed(self, filename):
+ file = open(filename, 'w')
+ self.writeFile(file)
+ file.close()
+
+ def writeFile(self, file):
+ """
+ @@ 2000-07-20 ce: This doesn't write the column types (like :int) back out.
+ @@ 2000-07-21 ce: It's notable that a blank numeric value gets read as zero and written out that way. Also, values None are written as blanks.
+ """
+
+ # write headings
+ file.write(join(map(lambda h: str(h), self._headings), ','))
+ file.write('\n')
+
+ def ValueWritingMapper(item):
+ # So that None gets written as a blank and everything else as a string
+ if item is None:
+ return ''
+ else:
+ return str(item)
+
+ # write rows
+ for row in self._rows:
+ file.write(join(map(ValueWritingMapper, row), ','))
+ file.write('\n')
+
+ def commit(self):
+ if self._changed:
+ self.save()
+ self._changed = 0
+
+
+ ## Headings ##
+
+ def heading(self, index):
+ if type(key) is StringType:
+ key = self._nameToIndexMap[key]
+ return self._headings[index]
+
+ def hasHeading(self, name):
+ return self._nameToIndexMap.has_key(name)
+
+ def numHeadings(self):
+ return len(self._headings)
+
+ def headings(self):
+ return self._headings
+
+ def setHeadings(self, headings):
+ """ Headings can be a list of strings (like ['name', 'age:int']) or a list of TableColumns or None. """
+ if not headings:
+ self._headings = []
+ elif isinstance(headings[0], StringTypes):
+ self._headings = map(lambda h: TableColumn(h), headings)
+ elif isinstance(headings[0], TableColumn):
+ self._headings = list(headings)
+ for heading in self._headings:
+ if heading.type() is None:
+ heading.setType(self._defaultType)
+ self.createNameToIndexMap()
+
+
+ ## Row access (list like) ##
+
+ def __len__(self):
+ return len(self._rows)
+
+ def __getitem__(self, index):
+ return self._rows[index]
+
+ def append(self, object):
+ """ If object is not a TableRecord, then one is created, passing the object to initialize the TableRecord. Therefore, object can be a TableRecord, list, dictionary or object. See TableRecord for details. """
+
+ if not isinstance(object, TableRecord):
+ object = TableRecord(self, object)
+ self._rows.append(object)
+ self._changed = 1
+
+
+ ## Queries ##
+
+ def recordsEqualTo(self, dict):
+ records = []
+ keys = dict.keys()
+ for record in self._rows:
+ matches = 1
+ for key in keys:
+ if record[key]!=dict[key]:
+ matches = 0
+ break
+ if matches:
+ records.append(record)
+ return records
+
+
+ ## As a string ##
+
+ def __repr__(self):
+ # Initial info
+ s = ['DataTable: %s\n%d rows\n' % (self._filename, len(self._rows))]
+
+ # Headings
+ s.append(' ')
+ s.append(join(map(lambda h: str(h), self._headings), ', '))
+ s.append('\n')
+
+ # Records
+ i = 0
+ for row in self._rows:
+ s.append('%3d. ' % i)
+ s.append(join(map(lambda r: str(r), row), ', '))
+ s.append('\n')
+ i = i + 1
+ return join(s, '')
+
+
+ ## As a dictionary ##
+
+ def dictKeyedBy(self, key):
+ """ Returns a dictionary containing the contents of the table indexed by the particular key. This is useful for tables that have a column which represents a unique key (such as a name, serial number, etc.). """
+ dict = {}
+ for row in self:
+ dict[row[key]] = row
+ return dict
+
+
+ ## Misc access ##
+
+ def filename(self):
+ return self._filename
+
+ def nameToIndexMap(self):
+ """ Table rows keep a reference to this map in order to speed up index-by-names (as in row['name']). """
+ return self._nameToIndexMap
+
+
+ ## Self utilities ##
+
+ def createNameToIndexMap(self):
+ """
+ Invoked by self to create the nameToIndexMap after the table's
+ headings have been read/initialized.
+ """
+ map = {}
+ for i in range(len(self._headings)):
+ map[self._headings[i].name()] = i
+ self._nameToIndexMap = map
+
+
+# @@ 2000-07-20 ce: perhaps for each type we could specify a function to convert from string values to the values of the type
+
+BlankValues = {
+ StringType: '',
+ IntType: 0,
+ FloatType: 0.0,
+ DateTimeType: '',
+ None: None,
+}
+
+
+class TableRecord:
+
+ ## Init ##
+
+ def __init__(self, table, values=None):
+ """
+ Dispatches control to one of the other init methods based on the type of values. Values can be one of three things:
+ 1. A TableRecord
+ 2. A list
+ 3. A dictionary
+ 4. Any object responding to hasValueForKey() and valueForKey().
+ """
+ self._headings = table.headings()
+ self._nameToIndexMap = table.nameToIndexMap()
+ # @@ 2000-07-20 ce: Take out the headings arg to the init method since we have an attribute for that
+
+ if values is not None:
+ valuesType = type(values)
+ if valuesType is ListType or valuesType is TupleType:
+ # @@ 2000-07-20 ce: check for required attributes instead
+ self.initFromSequence(values)
+ elif valuesType is DictType:
+ self.initFromDict(values)
+ elif valuesType is InstanceType:
+ self.initFromObject(value)
+ else:
+ raise DataTableError, 'Unknown type for values %s.' % valuesType
+
+ def initFromSequence(self, values):
+ if len(self._headings)<len(values):
+ raise DataTableError, ('There are more values than headings.\nheadings(%d, %s)\nvalues(%d, %s)' % (len(self._headings), self._headings, len(values), values))
+ self._values = []
+ numHeadings = len(self._headings)
+ numValues = len(values)
+ assert numValues<=numHeadings
+ for i in range(numHeadings):
+ heading = self._headings[i]
+ if i>=numValues:
+ self._values.append(BlankValues[heading.type()])
+ else:
+ self._values.append(heading.valueForRawValue(values[i]))
+
+ def initFromDict(self, dict):
+ self._values = []
+ for heading in self._headings:
+ name = heading.name()
+ if dict.has_key(name):
+ self._values.append(heading.valueForRawValue(dict[name]))
+ else:
+ self._values.append(BlankValues[heading.type()])
+
+ def initFromObject(self, object):
+ """
+ The object is expected to response to hasValueForKey(name) and
+ valueForKey(name) for each of the headings in the table. It's
+ alright if the object returns 0 for hasValueForKey(). In that
+ case, a "blank" value is assumed (such as zero or an empty
+ string). If hasValueForKey() returns 1, then valueForKey() must
+ return a value.
+ """
+ self._values = []
+ for heading in self._headings:
+ name = heading.name()
+ if object.hasValueForKey(name):
+ self._values.append(heading.valueForRawValue(object.valueForKey(name)))
+ else:
+ self._values.append(BlankValues[heading.type()])
+
+
+ ## Accessing like a sequence or dictionary ##
+
+ def __len__(self):
+ return len(self._values)
+
+ def __getitem__(self, key):
+ if isinstance(key, StringTypes):
+ key = self._nameToIndexMap[key]
+ try:
+ return self._values[key]
+ except TypeError:
+ raise TypeError, 'key=%r, key type=%r, self._values=%r' % (key, type(key), self._values)
+
+ def __setitem__(self, key, value):
+ if type(key) is StringType:
+ key = self._nameToIndexMap[key]
+ self._values[key] = value
+
+ def __repr__(self):
+ return '%s' % self._values
+
+ def get(self, key, default=None):
+ index = self._nameToIndexMap.get(key, None)
+ if index is None:
+ return default
+ else:
+ return self._values[index]
+
+ def has_key(self, key):
+ return self._nameToIndexMap.has_key(key)
+
+ def keys(self):
+ return self._nameToIndexMap.keys()
+
+ def values(self):
+ return self._values
+
+ def items(self):
+ items = []
+ for key in self.keys():
+ items.append((key, self[key]))
+ return items
+
+
+ ## Additional access ##
+
+ def asList(self):
+ """
+ Returns a sequence whose values are the same at the record's
+ and in the order defined by the table.
+ """
+ # It just so happens that our implementation already has this
+ return self._values[:]
+
+ def asDict(self):
+ """ Returns a dictionary whose key-values match the table record. """
+ dict = {}
+ nameToIndexMap = self._nameToIndexMap
+ for key in nameToIndexMap.keys():
+ dict[key] = self._values[nameToIndexMap[key]]
+ return dict
+
+
+ ## valueForFoo() family ##
+
+ def valueForKey(self, key, default=NoDefault):
+ if default is NoDefault:
+ return self[key]
+ else:
+ return self.get(key, default)
+
+ def valueForAttr(self, attr, default=NoDefault):
+ return self.valueForKey(attr['Name'], default)
+
+
+
+def main(args=None):
+ if args is None:
+ args = sys.argv
+ for arg in args[1:]:
+ dt = DataTable(arg)
+ print '*** %s ***' % arg
+ print dt
+ print
+
+
+if __name__=='__main__':
+ main()
diff --git a/paste/webkit/FakeWebware/MiscUtils/DateInterval.py b/paste/webkit/FakeWebware/MiscUtils/DateInterval.py
new file mode 100644
index 0000000..9081ef1
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/DateInterval.py
@@ -0,0 +1,70 @@
+"""
+DateInterval.py
+
+Convert interval strings (in the form of 1w2d, etc) to
+seconds, and back again. Is not exactly about months or
+years (leap years in particular).
+
+Accepts (y)ear, (b)month, (w)eek, (d)ay, (h)our, (m)inute, (s)econd.
+
+Exports only timeEncode and timeDecode functions.
+"""
+
+import re
+
+second = 1
+minute = second*60
+hour = minute*60
+day = hour*24
+week = day*7
+month = day*30
+year = day*365
+timeValues = {
+ 'y': year,
+ 'b': month,
+ 'w': week,
+ 'd': day,
+ 'h': hour,
+ 'm': minute,
+ 's': second,
+ }
+timeOrdered = timeValues.items()
+timeOrdered.sort(lambda a, b: -cmp(a[1], b[1]))
+
+def timeEncode(seconds):
+ """Encodes a number of seconds (representing a time interval)
+ into a form like 1h2d3s."""
+ s = ''
+ for char, amount in timeOrdered:
+ if seconds >= amount:
+ i, seconds = divmod(seconds, amount)
+ s += '%i%s' % (i, char)
+ return s
+
+_timeRE = re.compile(r'[0-9]+[a-zA-Z]')
+def timeDecode(s):
+ """Decodes a number in the format 1h4d3m (1 hour, 3 days, 3 minutes)
+ into a number of seconds"""
+ time = 0
+ for match in allMatches(s, _timeRE):
+ char = match.group(0)[-1].lower()
+ if not timeValues.has_key(char):
+ # @@: should signal error
+ continue
+ time += int(match.group(0)[:-1]) * timeValues[char]
+ return time
+
+# @@-sgd 2002-12-23 - this function does not belong in this module, find a better place.
+def allMatches(source, regex):
+ """Return a list of matches for regex in source
+ """
+ pos = 0
+ end = len(source)
+ rv = []
+ match = regex.search(source, pos)
+ while match:
+ rv.append(match)
+ match = regex.search(source, match.end() )
+ return rv
+
+__all__ = [timeEncode, timeDecode]
diff --git a/paste/webkit/FakeWebware/MiscUtils/DictForArgs.py b/paste/webkit/FakeWebware/MiscUtils/DictForArgs.py
new file mode 100644
index 0000000..85cc545
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/DictForArgs.py
@@ -0,0 +1,209 @@
+"""
+DictForArgs.py
+
+
+See the doc string for the DictForArgs() function.
+
+Also, there is a test suite in Testing/TestDictForArgs.py
+"""
+
+
+import re, string
+
+
+class DictForArgsError(Exception):
+ pass
+
+def _SyntaxError(s):
+ raise DictForArgsError, 'Syntax error: %s' % repr(s)
+
+def DictForArgs(s):
+ """
+ Takes an input such as:
+ x=3
+ name="foo"
+ first='john' last='doe'
+ required border=3
+
+ And returns a dictionary representing the same. For keys that aren't
+ given an explicit value (such as 'required' above), the value is '1'.
+
+ All values are interpreted as strings. If you want ints and floats,
+ you'll have to convert them yourself.
+
+ This syntax is equivalent to what you find in HTML and close to other
+ ML languages such as XML.
+
+ Returns {} for an empty string.
+
+ The informal grammar is:
+ (NAME [=NAME|STRING])*
+
+ Will raise DictForArgsError if the string is invalid.
+
+ See also: PyDictForArgs() and ExpandDictWithExtras() in this module
+ """
+
+ s = string.strip(s)
+
+ # Tokenize
+
+ # @@ 2001-09-29 ce: push these outside for better performance
+ nameRE = re.compile(r'\w+')
+ equalsRE = re.compile(r'\=')
+ stringRE = re.compile(r'''
+ "[^"]+"|
+ '[^']+'|
+ \S+''', re.VERBOSE) #'
+ whiteRE = re.compile(r'\s+')
+ REs = [nameRE, equalsRE, stringRE, whiteRE]
+
+ verbose = 0
+ matches = []
+ start = 0
+ sLen = len(s)
+
+ if verbose:
+ print '>> DictForArgs(%s)' % repr(s)
+ print '>> sLen:', sLen
+ while start<sLen:
+ for regEx in REs:
+ if verbose: print '>> try:', regEx
+ match = regEx.match(s, start)
+ if verbose: print '>> match:', match
+ if match is not None:
+ if match.re is not whiteRE:
+ matches.append(match)
+ start = match.end()
+ if verbose: print '>> new start:', start
+ break
+ else:
+ _SyntaxError(s)
+
+ if verbose:
+ names = []
+ for match in matches:
+ if match.re is nameRE:
+ name = 'name'
+ elif match.re is equalsRE:
+ name = 'equals'
+ elif match.re is stringRE:
+ name = 'string'
+ elif match.re is whiteRE:
+ name = 'white'
+ names.append(name)
+ #print '>> match =', name, match
+ print '>> names =', names
+
+
+ # Process tokens
+
+ # At this point we have a list of all the tokens (as re.Match objects)
+ # We need to process these into a dictionary.
+
+ dict = {}
+ matchesLen = len(matches)
+ i = 0
+ while i<matchesLen:
+ match = matches[i]
+ if i+1<matchesLen:
+ peekMatch = matches[i+1]
+ else:
+ peekMatch = None
+ if match.re is nameRE:
+ if peekMatch is not None:
+ if peekMatch.re is nameRE:
+ # We have a name without an explicit value
+ dict[match.group()] = '1'
+ i = i + 1
+ continue
+ if peekMatch.re is equalsRE:
+ if i+2<matchesLen:
+ target = matches[i+2]
+ if target.re is nameRE or target.re is stringRE:
+ value = target.group()
+ if value[0]=="'" or value[0]=='"':
+ value = value[1:-1]
+ #value = "'''%s'''" % value[1:-1]
+ #value = eval(value)
+ dict[match.group()] = value
+ i = i + 3
+ continue
+ else:
+ dict[match.group()] = '1'
+ i = i + 1
+ continue
+ _SyntaxError(s)
+
+
+ if verbose: print
+
+ return dict
+
+
+from string import letters
+
+def PyDictForArgs(s):
+ """
+ Takes an input such as:
+ x=3
+ name="foo"
+ first='john'; last='doe'
+ list=[1, 2, 3]; name='foo'
+
+ And returns a dictionary representing the same.
+
+ All values are interpreted as Python expressions. Any error in these
+ expressions will raise the appropriate Python exception. This syntax
+ allows much more power than DictForArgs() since you can include
+ lists, dictionaries, actual ints and floats, etc.
+
+ This could also open the door to hacking your software if the input
+ comes from a tainted source such as an HTML form or an unprotected
+ configuration file.
+
+ Returns {} for an empty string.
+
+ See also: DictForArgs() and ExpandDictWithExtras() in this module
+ """
+ if s:
+ s = s.strip()
+ if not s:
+ return {}
+
+ # special case: just a name
+ # meaning: name=1
+ # example: isAbstract
+ if s.find(' ')==-1 and s.find('=')==-1 and s[0] in letters:
+ s += '=1'
+
+ results = {}
+ exec s in results
+
+ del results['__builtins__']
+ return results
+
+
+def ExpandDictWithExtras(dict, key='Extras', delKey=1, dictForArgs=DictForArgs):
+ """
+ Returns a dictionary with the 'Extras' column expanded by DictForArgs(). For example, given:
+ { 'Name': 'foo', 'Extras': 'x=1 y=2' }
+ The return value is:
+ { 'Name': 'foo', 'x': '1', 'y': '2' }
+ The key argument controls what key in the dictionary is used to hold the extra arguments. The delKey argument controls whether that key and its corresponding value are retained.
+ The same dictionary may be returned if there is no extras key.
+ The most typical use of this function is to pass a row from a DataTable that was initialized from a CSV file (e.g., a spreadsheet or tabular file). FormKit and MiddleKit both use CSV files and allow for an Extras column to specify attributes that occur infrequently.
+ """
+
+ if dict.has_key(key):
+ newDict = {}
+ # We use the following for loop rather than newDict.update()
+ # so that the dict arg can be dictionary-like.
+ for k, v in dict.items():
+ newDict[k] = v
+ if delKey:
+ del newDict[key]
+ newDict.update(dictForArgs(dict[key]))
+ return newDict
+ else:
+ return dict
diff --git a/paste/webkit/FakeWebware/MiscUtils/Error.py b/paste/webkit/FakeWebware/MiscUtils/Error.py
new file mode 100644
index 0000000..b26f3e1
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/Error.py
@@ -0,0 +1,50 @@
+from UserDict import UserDict
+
+
+class Error(UserDict):
+ """
+ An error is a dictionary-like object, containing a specific user-readable error message and an object associated with it. Since Error inherits UserDict, other informative values can be arbitrarily attached to errors. For this reason, subclassing Error is rare.
+
+ Example:
+ err = Error(user, 'Invalid password.')
+ err['time'] = time.time()
+ err['attempts'] = attempts
+
+ The object and message can be accessed via methods:
+ print err.object()
+ print err.message()
+
+ When creating errors, you can pass None for both the object and the message. You can also pass additional values, which are then included in the error:
+ >>> err = Error(None, 'Too bad.', timestamp=time.time())
+ >>> err.keys()
+ ['timestamp']
+
+ Or include the values as a dictionary, instead of keyword arguments:
+ >>> info = {'timestamp': time.time()}
+ >>> err = Error(None, 'Too bad.', info)
+
+ Or you could even do both if you needed to.
+ """
+
+ def __init__(self, object, message, valueDict={}, **valueArgs):
+ """ Initializes an error with the object the error occurred for, and the user-readable error message. The message should be self sufficient such that if printed by itself, the user would understand it. """
+ UserDict.__init__(self)
+ self._object = object
+ self._message = message
+ self.update(valueDict)
+ self.update(valueArgs)
+
+ def object(self):
+ return self._object
+
+ def message(self):
+ return self._message
+
+ def __repr__(self):
+ return 'ERROR(object=%s; message=%s; data=%s)' % (repr(self._object), repr(self._message), repr(self.data))
+
+ def __str__(self):
+ return 'ERROR: %s' % self._message
+
+ def __nonzero__(self):
+ return 1
diff --git a/paste/webkit/FakeWebware/MiscUtils/Funcs.py b/paste/webkit/FakeWebware/MiscUtils/Funcs.py
new file mode 100644
index 0000000..236f925
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/Funcs.py
@@ -0,0 +1,325 @@
+"""
+Funcs.py
+
+Funcs.py, a member of MiscUtils, holds functions that don't fit in anywhere else.
+"""
+
+import md5, os, random, string, time, sys, tempfile
+True, False = 1==1, 1==0
+
+
+def commas(number):
+ """ Returns the given number as a string with commas to separate the thousands positions. The number can be a float, int, long or string. Returns None for None. """
+ if number is None:
+ return None
+ if not number:
+ return str(number)
+ number = list(str(number))
+ if '.' in number:
+ i = number.index('.')
+ else:
+ i = len(number)
+ while 1:
+ i = i-3
+ if i<=0 or number[i-1]=='-':
+ break
+ number[i:i] = [',']
+ return string.join(number, '')
+
+
+def charWrap(s, width, hanging=0):
+ """ Returns a new version of the string word wrapped with the given width and hanging indent. The font is assumed to be monospaced.
+ This can be useful for including text between <pre> </pre> tags since <pre> will not word wrap and for lengthly lines, will increase the width of a web page.
+ It can also be used to help delineate the entries in log-style output by passing hanging=4.
+ """
+ import string
+ if not s:
+ return s
+ assert hanging<width
+ hanging = ' ' * hanging
+ lines = string.split(s, '\n')
+ i = 0
+ while i<len(lines):
+ s = lines[i]
+ while len(s)>width:
+ t = s[width:]
+ s = s[:width]
+ lines[i] = s
+ i = i + 1
+ lines.insert(i, None)
+ s = hanging + t
+ else:
+ lines[i] = s
+ i = i + 1
+ return string.join(lines, '\n')
+
+# Python 2.3 contains mktemp and mkstemp, both of which accept a
+# directory argument. Earlier versions of Python only contained
+# mktemp which didn't accept a directory argument. So we have to
+# implement our own versions here.
+if sys.version_info >= (2, 3, None, None):
+ # Just use the Python 2.3 built-in versions.
+ from tempfile import mktemp, mkstemp
+else:
+ def mktemp(suffix="", dir=None):
+ """
+ User-callable function to return a unique temporary file name.
+
+ Duplicated from Python's own tempfile with the optional "dir"
+ argument added. This allows customization of the directory, without
+ having to take over the module level variable, tempdir.
+ """
+ if not dir: dir = tempfile.gettempdir()
+ pre = tempfile.gettempprefix()
+ while 1:
+ i = tempfile._counter.get_next()
+ file = os.path.join(dir, pre + str(i) + suffix)
+ if not os.path.exists(file):
+ return file
+
+ def mkstemp(suffix="", dir=None):
+ """
+ User-callable function to return a tuple containing:
+ - a os-level file handle for the temp file, open for read/write
+ - the absolute path of that file
+
+ Note that this version of the function is not as secure as the
+ version included in Python 2.3.
+ """
+ path = mktemp(suffix, dir)
+ return os.open(path, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0600), path
+
+def wordWrap(s, width=78):
+ """
+ Returns a version of the string word wrapped to the given width.
+ Respects existing newlines in the string.
+
+ Taken from:
+ http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061
+ """
+ return reduce(
+ lambda line, word, width=width: "%s%s%s" % (
+ line,
+ ' \n'[(len(line[line.rfind('\n')+1:]) + len(word) >= width)],
+ word
+ ),
+ s.split(' ')
+ )
+
+
+def dateForEmail(now=None):
+ """ Returns a properly formatted date/time string for email messages """
+ if now is None:
+ now = time.localtime(time.time())
+ if now[8]==1:
+ offset = -time.altzone / 60
+ else:
+ offset = -time.timezone / 60
+ if offset<0:
+ plusminus = '-'
+ else:
+ plusminus = '+'
+ return time.strftime('%a, %d %b %Y %H:%M:%S ', now) + plusminus + '%02d%02d' % (abs(offset/60), abs(offset%60))
+
+
+def hostName():
+ """
+ Returns the host name which is taken first from the os environment and failing that, from the 'hostname' executable. May return None if neither attempt succeeded.
+ The environment keys checked are HOST and HOSTNAME both upper and lower case.
+ """
+ for name in ['HOST', 'HOSTNAME', 'host', 'hostname']:
+ hostName = os.environ.get(name, None)
+ if hostName:
+ break
+ if not hostName:
+ hostName = string.strip(os.popen('hostname').read())
+ if not hostName:
+ hostName = None
+ else:
+ hostName = string.lower(hostName)
+ return hostName
+
+
+_localIP = None
+
+def localIP(remote=('www.yahoo.com', 80), useCache=1):
+ """
+ Gets the "public" address of the local machine, i.e. that address
+ which is connected to the general Internet.
+
+ This function connects to a remote HTTP server the first time it is
+ invoked (or every time it is invoked with useCache=0). If that is
+ not acceptable, pass remote=None, but be warned that the result is
+ less likely to be externally visible.
+
+ Getting your local ip is actually quite complex. If this function
+ is not serving your needs then you probably need to think deeply
+ about what you really want and how your network is really set up.
+ Search comp.lang.python for "local ip" for more information.
+ http://groups.google.com/groups?q=%22local+ip%22+group:comp.lang.python.*
+ """
+ global _localIP
+ if useCache and _localIP:
+ return _localIP
+ import socket
+ if remote:
+ # code from Donn Cave on comp.lang.python
+
+ # My notes:
+ # Q: Why not use this? socket.gethostbyname(socket.gethostname())
+ # A: On some machines, it returns '127.0.0.1' - not what we had in mind.
+ #
+ # Q: Why not use this? socket.gethostbyname_ex(socket.gethostname())[2]
+ # A: Because some machines have more than one IP (think "VPN", etc.) and
+ # there is no easy way to tell which one is the externally visible IP.
+
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect(remote)
+ ip, port = s.getsockname()
+ s.close()
+ _localIP = ip
+ return _localIP
+ except socket.error:
+ # oh, well. we'll use the local method
+ pass
+
+ addresses = socket.gethostbyname_ex(socket.gethostname())[2]
+ for address in addresses:
+ if address!='127.0.0.1':
+ if useCache:
+ _localIP = address
+ return address
+ if useCache:
+ _localIP = addresses[0]
+ return _localIP
+
+
+def safeDescription(x, what='what'):
+ """
+ Returns the repr() of x and its class (or type) for help in
+ debugging. A major benefit here is that exceptions from
+ repr() are consumed. This is important in places like
+ "assert" where you don't want to lose the assertion
+ exception in your attempt to get more information.
+
+ Example use:
+ assert isinstance(foo, Foo), safeDescription(foo)
+ print "foo:", safeDescription(foo) # won't raise exceptions
+
+ # better output format:
+ assert isinstance(foo, Foo), safeDescription(foo, 'foo')
+ print safeDescription(foo, 'foo')
+ """
+ try:
+ xRepr = repr(x)
+ except Exception, e:
+ xRepr = _descExc('x', e)
+ if hasattr(x, '__class__'):
+ try:
+ cRepr = repr(x.__class__)
+ except Exception, e:
+ cRepr = _descExc('x.__class__', e)
+ return '%s=%s class=%s' % (what, xRepr, cRepr)
+ else:
+ try:
+ cRepr = repr(type(x))
+ except Exception, e:
+ cRepr = _descExc('type(x)', e)
+ return '%s=%s type=%s' % (what, xRepr, cRepr)
+
+def _descExc(reprOfWhat, e):
+ """
+ Returns a description of an exception. This is a private function
+ for use by safeDescription().
+ """
+ try:
+ return '(exception from repr(%s): %s: %s)' % (reprOfWhat, e.__class__, e)
+ except:
+ return '(exception from repr(%s))' % reprOfWhat
+
+def timestamp(numSecs=None):
+ """
+ Returns a dictionary whose keys give different versions of the timestamp:
+ 'numSecs': the number of seconds
+ 'tuple': (year, month, day, hour, min, sec)
+ 'pretty': 'YYYY-MM-DD HH:MM:SS'
+ 'condensed': 'YYYYMMDDHHMMSS'
+ 'dashed': 'YYYY-MM-DD-HH-MM-SS'
+ The focus is on the year, month, day, hour and second, with no additional information such as timezone or day of year. This form of timestamp is often ideal for print statements, logs and filenames.
+ If the current number of seconds is not passed, then the current time is taken.
+ The 'pretty' format is ideal for print statements, while the 'condensed' and 'dashed' formats are generally more appropriate for filenames.
+ """
+ if numSecs is None:
+ numSecs = time.time()
+ tuple = time.localtime(numSecs)[:6]
+ pretty = '%4i-%02i-%02i %02i:%02i:%02i' % tuple
+ condensed = '%4i%02i%02i%02i%02i%02i' % tuple
+ dashed = '%4i-%02i-%02i-%02i-%02i-%02i' % tuple
+ return locals()
+
+
+def uniqueId(forObject=None):
+ """
+ Generates an opaque, identifier string that is practically guaranteed to be unique.
+ If an object is passed, then its id() is incorporated into the generation.
+ Relies on md5 and returns a 32 character long string.
+ """
+ r = [time.time(), random.random(), os.times()]
+ if forObject is not None:
+ r.append(id(forObject))
+ md5object = md5.new(str(r))
+ try:
+ return md5object.hexdigest()
+ except AttributeError:
+ # Older versions of Python didn't have hexdigest, so we'll do it manually
+ hexdigest = []
+ for char in md5object.digest():
+ hexdigest.append('%02x' % ord(char))
+ return string.join(hexdigest, '')
+
+
+def valueForString(s):
+ """
+ For a given string, returns the most appropriate Pythonic value
+ such as None, a long, an int, a list, etc. If none of those
+ make sense, then returns the string as-is.
+
+ "None", "True" and "False" are case-insensitive because there is
+ already too much case sensitivity in computing, damn it!
+ """
+ if not s:
+ return s
+ try:
+ return int(s)
+ except ValueError:
+ pass
+ try:
+ return long(s)
+ except ValueError:
+ pass
+ try:
+ return float(s)
+ except ValueError:
+ pass
+ t = s.lower()
+ if t=='none':
+ return None
+ if t=='true':
+ return True
+ if t=='false':
+ return False
+ if s[0] in '[({"\'':
+ return eval(s)
+ return s
+
+
+### Deprecated
+
+def Commas(number):
+ print 'DEPRECATED: MiscUtils.Funcs.Commas() on 02/23/01 in ver 0.5. Use commas() instead.'
+ return commas(number)
+
+def CharWrap(s, width, hanging=0):
+ print 'DEPRECATED: MiscUtils.Funcs.CharWrap() on 02/23/01 in ver 0.5. Use charWrap() instead.'
+ return charWrap(s, width, hanging)
diff --git a/paste/webkit/FakeWebware/MiscUtils/M2PickleRPC.py b/paste/webkit/FakeWebware/MiscUtils/M2PickleRPC.py
new file mode 100644
index 0000000..2976f7f
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/M2PickleRPC.py
@@ -0,0 +1,67 @@
+"""
+M2Crypto-enhanced transport for PickleRPC
+
+This lets you use M2Crypto for SSL encryption.
+
+Based on m2xmlrpclib.py which is
+ Copyright (c) 1999-2002 Ng Pheng Siong. All rights reserved
+"""
+
+from PickleRPC import Transport
+import base64, string, sys
+from M2Crypto import SSL, httpslib, m2urllib
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+__version__ = 1 # version of M2PickleRPC
+
+class M2Transport(Transport):
+ user_agent = "M2PickleRPC.py/%s - %s" % (__version__, Transport.user_agent)
+
+ def __init__(self, ssl_context=None):
+ if ssl_context is None:
+ self.ssl_ctx=SSL.Context('sslv23')
+ else:
+ self.ssl_ctx=ssl_context
+
+ def make_connection(self, host):
+ _host, _port = m2urllib.splitport(host)
+ if sys.version[0] == '2':
+ return httpslib.HTTPS(_host, int(_port), ssl_context=self.ssl_ctx)
+ elif sys.version[:3] == '1.5':
+ return httpslib.HTTPS(self.ssl_ctx, _host, int(_port))
+ else:
+ raise RuntimeError, 'unsupported Python version'
+
+ # @@ workarounds below are necessary because M2Crypto seems to
+ # return from fileobject.read() early! So we have to call it
+ # over and over to get the full data.
+
+ def parse_response(self, f):
+ """
+ Workaround M2Crypto issue mentioned above
+ """
+ sio = StringIO()
+ while 1:
+ chunk = f.read()
+ if not chunk:
+ break
+ sio.write(chunk)
+ sio.seek(0)
+ return Transport.parse_response(self, sio)
+
+ def parse_response_gzip(self, f):
+ """
+ Workaround M2Crypto issue mentioned above
+ """
+ sio = StringIO()
+ while 1:
+ chunk = f.read()
+ if not chunk:
+ break
+ sio.write(chunk)
+ sio.seek(0)
+ return Transport.parse_response_gzip(self, sio)
+
diff --git a/paste/webkit/FakeWebware/MiscUtils/MixIn.py b/paste/webkit/FakeWebware/MiscUtils/MixIn.py
new file mode 100644
index 0000000..26dde1d
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/MixIn.py
@@ -0,0 +1,70 @@
+from types import MethodType
+import sys
+
+if hasattr(sys, 'version_info') and sys.version_info[0]>=2:
+ def MixIn(pyClass, mixInClass, makeAncestor=0, mixInSuperMethods=0):
+ """
+ Mixes in the attributes of the mixInClass into the pyClass. These attributes are typically methods (but don't have to be). Note that private attributes, denoted by a double underscore, are not mixed in. Collisions are resolved by the mixInClass' attribute overwriting the pyClass'. This gives mix-ins the power to override the behavior of the pyClass.
+
+ After using MixIn(), instances of the pyClass will respond to the messages of the mixInClass.
+
+ An assertion fails if you try to mix in a class with itself.
+
+ The pyClass will be given a new attribute mixInsForCLASSNAME which is a list of all mixInClass' that have ever been installed, in the order they were installed. You may find this useful for inspection and debugging.
+
+ You are advised to install your mix-ins at the start up of your program, prior to the creation of any objects. This approach will result in less headaches. But like most things in Python, you're free to do whatever you're willing to live with. :-)
+
+ There is a bitchin' article in the Linux Journal, April 2001, "Using Mix-ins with Python" by Chuck Esterbrook which gives a thorough treatment of this topic.
+
+ An example, that resides in Webware, is MiddleKit.Core.ModelUser.py, which install mix-ins for SQL adapters. Search for "MixIn(".
+
+ If makeAncestor is 1, then a different technique is employed: the mixInClass is made the first base class of the pyClass. You probably don't need to use this and if you do, be aware that your mix-in can no longer override attributes/methods in pyClass.
+
+ If mixInSuperMethods is 1, then support will be enabled for you to be able to call the original or
+ "parent" method from the mixed-in method. This is done like so:
+
+ class MyMixInClass:
+ def foo(self):
+ MyMixInClass.mixInSuperFoo(self) # call the original method
+ # now do whatever you want
+
+ This function only exists if you are using Python 2.0 or later. Python 1.5.2 has a problem where functions (as in aMethod.im_func) are tied to their class, when in fact, they should be totally generic with only the methods being tied to their class. Apparently this was fixed in Py 2.0.
+ """
+ assert mixInClass is not pyClass, 'mixInClass = %r, pyClass = %r' % (mixInClass, pyClass)
+ if makeAncestor:
+ if mixInClass not in pyClass.__bases__:
+ pyClass.__bases__ = (mixInClass,) + pyClass.__bases__
+ else:
+ # Recursively traverse the mix-in ancestor classes in order
+ # to support inheritance
+ baseClasses = list(mixInClass.__bases__)
+ baseClasses.reverse()
+ for baseClass in baseClasses:
+ MixIn(pyClass, baseClass)
+
+ # Track the mix-ins made for a particular class
+ attrName = 'mixInsFor'+pyClass.__name__
+ mixIns = getattr(pyClass, attrName, None)
+ if mixIns is None:
+ mixIns = []
+ setattr(pyClass, attrName, mixIns)
+
+ # Make sure we haven't done this before
+ # Er, woops. Turns out we like to mix-in more than once sometimes.
+ #assert not mixInClass in mixIns, 'pyClass = %r, mixInClass = %r, mixIns = %r' % (pyClass, mixInClass, mixIns)
+
+ # Record our deed for future inspection
+ mixIns.append(mixInClass)
+
+ # Install the mix-in methods into the class
+ for name in dir(mixInClass):
+ if not name.startswith('__'): # skip private members
+ member = getattr(mixInClass, name)
+
+ if type(member) is MethodType and mixInSuperMethods:
+ if hasattr(pyClass, name):
+ origmember = getattr(pyClass, name)
+ setattr(mixInClass, 'mixInSuper' + name[0].upper() + name[1:], origmember)
+ if type(member) is MethodType:
+ member = member.im_func
+ setattr(pyClass, name, member)
diff --git a/paste/webkit/FakeWebware/MiscUtils/NamedValueAccess.py b/paste/webkit/FakeWebware/MiscUtils/NamedValueAccess.py
new file mode 100644
index 0000000..62df004
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/NamedValueAccess.py
@@ -0,0 +1,570 @@
+"""
+NamedValueAccess provides functions, a mix-in class and a wrapper class
+all for accessing Python objects by named attributes. You can use which
+ever of the three approaches best suites your needs and style.
+
+
+NOTES
+
+If Python provided a root class 'Object' in the same tradition as other
+OOP languages such as Smalltalk, Objective-C and Java, then we could
+dispense with the global functions and simply stick with the mix-in.
+
+
+TO DO
+
+* The mix-in's valueForKey() could be out of slight alignment with the
+ function, since they have different implementations. However, the test
+ cases pass for both right now.
+
+* Should the valueForKey() function provide for caching of bindings in
+ the same manner than the mix-in does?
+
+ If not, should the mix-in allow an option to *not* cache bindings?
+
+* hasValueForKey() function? (We already have a method in the mix-in)
+
+* valuesForNames() in the mix-in:
+ * Change parameter 'keys' to 'names'
+ * Use NoDefault instead of None in the parameters
+ * Revisit doc string and test cases
+
+* Docs: More improvs to doc strings.
+
+* Testing: increase coverage
+
+* Rename? class NamedValueAccess+ible:
+
+* Benchmarking: Set this up in a new file:
+ Testing/BenchNamedValueAccess.py
+ so we can experment with caching vs. not and other techniques.
+
+
+PAST DESIGN DECISIONS
+
+* Only if a name binds to a method is it invoked. Another approach is
+ to invoke any value that is __call__able, but that is unPythonic: If
+ obj.foo is a class or a function then obj.foo gives that class or
+ function, not the result of invoking it. Method is the only
+ convenience we provide, because that's one of the major points of
+ providing this.
+
+
+CREDIT
+
+Chuck Esterbrook <echuck@mindspring.com>
+Tavis Rudd <tavis@calrudd.com>
+"""
+
+
+import types
+import string, sys
+from time import time
+from MiscUtils import NoDefault
+
+
+# if technique is zero, use bound methods in the _kvGetBindings cache, otherwise use unbound
+# @@ 2000-05-31 ce: after additional testing we can probably scorge the technique=0 allowance
+technique = 1
+
+
+## Exceptions ##
+
+class NamedValueAccessError(LookupError): pass
+class ValueForKeyError(NamedValueAccessError): pass
+
+
+class NamedValueAccess:
+ """
+ This class is intended to be ancestor class such that you can say:
+ from NamedValueAccess import *
+ age = someObj.valueForName("age")
+ name = someObj.valueForName("info.fields.name")
+
+ This can be useful in setups where you wish to textually refer to the objects
+ in a program, such as an HTML template processed in the context of an
+ object-oriented framework.
+
+ Keys can be matched to either methods or ivars and with or without underscores.
+
+ valueForName() can also traverse bona fide dictionaries (DictType).
+
+ You can safely import * from this module. Only the NamedValueAccess class is exported
+ (other than typical things like string and sys).
+
+ There is no __init__() method and never will be.
+
+ You can run the test suite by running this module as a program.
+
+ You'll see the terms 'key' and 'name' in the class and its documentation. A 'key'
+ is a single identifier such as 'foo'. A name could be key, or a qualified key,
+ such as 'foo.bar.boo'. Names are generally more convenient and powerful, while
+ key-oriented methods are more efficient and provide the atomic functionality that
+ name-oriented methods are built upon. From a usage point of view, you normally
+ just use the 'name' methods and forget about the 'key'.
+
+ @@ 2000-05-21 ce: This class causes problems when used in WebKit for logging.
+ Perhaps circular references?
+ Involving self?
+ Having to do with methods bound to their objects?
+
+ @@ 2000-03-03 ce: document ivars
+
+ @@ 2000-04-24 ce: Some classes like UserDict need to use getitem()
+ instead of getattr() and don't need to deal with _bindingForGetKey().
+
+ @@ 2000-05-31 ce: Rename this class to NamedValues, NamedValueAccess, ValuesByName
+
+ @@ This class probably needs to be in MiscUtils, as it's being used in that way
+ while MiddleKit was intended for "enterprise/business objects".
+ """
+
+ #
+ # Accessing values by key
+ #
+ def hasValueForKey(self, key):
+ """ Returns true if the key is available, although that does not
+ guarantee that there will not be errors caused by retrieving the key. """
+
+ return self._bindingForGetKey(key)!=None
+
+
+ def valueForKey(self, key, default=NoDefault):
+ """ Suppose key is 'foo'. This method returns the value with the following precedence:
+ 1. Methods before non-methods
+ 2. Public attributes before private attributes
+
+ More specifically, this method then returns one of the following:
+ * self.foo()
+ * self._foo()
+ * self.foo
+ * self._foo
+
+ ...or default, if it was specified,
+ otherwise invokes and returns result of valueForUnknownKey().
+ Note that valueForUnknownKey(), normally returns an exception.
+
+ See valueForName() which is a more advanced version of this method that allows
+ multiple, qualified keys.
+ """
+
+ binding = self._bindingForGetKey(key)
+
+ if not binding:
+ if default is NoDefault:
+ return self.valueForUnknownKey(key, default)
+ else:
+ return default
+
+ if type(binding) is types.MethodType:
+# @@ 2000-05-07 ce: come to a decision on exception handling for key errors
+# try:
+ if technique:
+ result = binding(self)
+ else:
+ result = binding()
+# except:
+ # @@ 2000-02-18: Improve next line with exception info
+# raise NamedValueAccessError, 'Caught exception while accessing key (%s). Exception is %s' % (key, sys.exc_info())
+ return result
+ else:
+ return getattr(self, binding)
+
+ def hasValueForName(self, keysString):
+ try:
+ value = self.valueForName(keysString)
+ except NamedValueAccessError:
+ return 0
+ return 1
+
+ def valueForName(self, keysString, default=None):
+ """ Returns the value for the given keysString. This is the more advanced version of
+ valueForKey(), which can only handle single names. This method can handle
+ 'foo', 'foo1.foo2', 'a.b.c.d', etc. It will traverse dictionaries if needed. """
+ keys = string.split(keysString, '.')
+ return self.valueForKeySequence(keys, default)
+
+ def valueForKeySequence(self, listOfKeys, default=None):
+ # @@ 2000-02-18: document
+ return _valueForKeySequence(self, listOfKeys, default)
+
+ def valuesForNames(self, keys, default=None, defaults=None, forgive=0, includeNames=0):
+ """ Returns a list of values that match the given keys, each of which is passed
+ through valueForName() and so could be of the form 'a.b.c'.
+ keys is a sequence. default is any kind of object. defaults is a sequence.
+ forgive and includeNames is a flag.
+ If default is not None, then it is substituted when a key is not found.
+ Otherwise, if defaults is not None, then it's corresponding/parallel value
+ for the current key is substituted when a key is not found.
+ Otherwise, if forgive=1, then unknown keys simply don't produce any values.
+ Otherwise, if default and defaults are None, and forgive=0, then the unknown
+ keys will probably raise an exception through self.valueForUnknownKey() although
+ that method can always return a final, default value.
+ if keys is None, then None is returned. If keys is an empty list, then None
+ is returned.
+ Often these last four arguments are specified by key.
+ Examples:
+ names = ['origin.x', 'origin.y', 'size.width', 'size.height']
+ obj.valuesForNames(names)
+ obj.valuesForNames(names, default=0.0)
+ obj.valuesForNames(names, defaults=[0.0, 0.0, 100.0, 100.0])
+ obj.valuesForNames(names, forgive=0)
+ @@ 2000-03-04 ce: includeNames is only supported when forgive=1.
+ It should be supported for the other cases.
+ It should be documented.
+ It should be included in the test cases.
+ """
+
+ if keys is None:
+ return None
+ if len(keys) is 0:
+ return []
+ results = []
+
+ if default is not None:
+ results = map(lambda key, myself=self, mydefault=default: myself.valueForName(key, mydefault), keys)
+ elif defaults is not None:
+ if len(keys) is not len(defaults):
+ raise NamedValueAccessError, 'Keys and defaults have mismatching lengths (%d and %d).' % (len(keys), len(defaults))
+ results = map(lambda key, default, myself=self: myself.valueForName(key, default), keys, defaults)
+ elif forgive:
+ results = []
+ uniqueObject = 'uni' + 'que'
+ for key in keys:
+ value = self.valueForName(key, uniqueObject)
+ if value is not uniqueObject:
+ if includeNames:
+ results.append((key, value))
+ else:
+ results.append(value)
+ else:
+ # no defaults, no forgiveness
+ results = map(lambda key, myself=self: myself.valueForName(key), keys)
+ return results
+
+ def setValueForKey(self, key, value):
+ # @@ 2000-02-18: naming might be weired here with args reversed
+ """ Suppose key is 'foo'. This method sets the value with the following precedence:
+ 1. Public attributes before private attributes
+ 2. Methods before non-methods
+
+ More specifically, this method then uses one of the following:
+ @@ 2000-03-04 ce: fill in
+
+ ...or invokes handleUnknownSetKey().
+ """
+ raise NotImplementedError # @@ 2000-03-04 ce
+
+ def resetKeyBindings(self):
+ # @@ 2000-02-18 document this method
+ if hasattr(self, '_kvGetBindings'):
+ self._kvGetBindings = {}
+
+
+ #
+ # Errors
+ #
+ def valueForUnknownKey(self, key, default):
+ raise NamedValueAccessError, key
+
+ #def handleUnknownSetKey(self, key):
+ # raise NamedValueAccessError, key
+
+
+ #
+ # Private
+ #
+ def _bindingForGetKey(self, key):
+ """ Bindings are cached.
+ Bindings are methods or strings.
+ """
+
+ # Make _kvGetBindings dictionary if we don't have one
+ if not hasattr(self, '_kvGetBindings'):
+ self._kvGetBindings = {}
+
+ # Return the binding if we already have one
+ if self._kvGetBindings.has_key(key):
+ return self._kvGetBindings[key]
+
+ # No binding, so we have to look for the key
+
+ found = None # set to what we find
+
+ # Try plain old key
+ if hasattr(self, key):
+ found = getattr(self, key)
+ #print '0: found = ', found, type(found)
+ if type(found) is not types.MethodType:
+ found = key
+ elif technique:
+ found = getattr(self.__class__, key)
+ self._kvGetBindings[key] = found
+ #print '1: found = ', found, type(found)
+
+ # Try _key only if we didn't find a method called key
+ if type(found) is not types.MethodType:
+ underKey = '_' + key
+ if hasattr(self, underKey):
+ underAttr = getattr(self, underKey)
+ if found==None:
+ if type(underAttr) is types.MethodType:
+ if technique:
+ value = getattr(self.__class__, underKey)
+ else:
+ value = underAttr
+ else:
+ value = underKey
+ found = self._kvGetBindings[key] = value
+ else:
+ if type(underAttr) is types.MethodType:
+ if technique:
+ underAttr = getattr(self.__class__, underKey)
+ found = self._kvGetBindings[key] = underAttr
+
+ #print '2: found = ', found, type(found)
+
+ return found
+
+
+class NamedValueAccessWrapper(NamedValueAccess):
+ """
+ This provides a wrapper around an existing object which will respond
+ to the methods of NamedValueAccess. By using the wrapper, you can
+ stick with objects and methods such as obj.valueForName('x.y') (as
+ opposed to functions like valueForName()) and refrain from modifying
+ the existing class hierarchy with NamedValueAccess.
+
+ Example:
+ wrapper = NamedValueAccessWrapper(obj)
+ print wrapper.valueForName('manager.name')
+ """
+
+ def __init__(self, object):
+ self._object = object
+
+ def hasValueForKey(self, key):
+ try:
+ value = self.valueForKey(ley)
+ except NamedValueAccessError:
+ return 0
+ else:
+ return 1
+
+ def valueForKey(self, key, default=NoDefault):
+ return valueForKey(self._object)
+
+ def valueForName(self, key, default=NoDefault):
+ return valueForName(self._object)
+
+
+
+#
+# Private
+#
+
+def _valueForKeySequence(obj, listOfKeys, default=None):
+ """ This is a recursive function used to implement NamedValueAccess.valueForKeySequence.
+ Besides supporting inheritors of NamedValueAccess, this function also supports
+ dictionaries, which is why it's not found in the class.
+ """
+
+ # @@ 2000-02-18: Optimize by specifying index instead of making new list
+ if type(obj) is types.DictType:
+ try:
+ value = obj[listOfKeys[0]]
+ except: # @@ 2000-03-03 ce: this exception should be more specific. probably nameerror or indexerror
+ if default is None:
+ raise NamedValueAccessError, 'Unknown key (%s) in dictionary.' % listOfKeys[0]
+ else:
+ return default
+ else:
+ value = obj.valueForKey(listOfKeys[0], default)
+ if len(listOfKeys)>1:
+ return _valueForKeySequence(value, listOfKeys[1:], default)
+ else:
+ return value
+
+
+def _dict_valueForKey(obj, key, default=NoDefault):
+ """
+ Returns the value for a given key of the dictionary-like object.
+ This is a private, custom function built in support of valueForKey().
+ """
+ try:
+ value = obj[key]
+ except AttributeError, e:
+ # We attempt to pass only on exceptions caused
+ # by obj not responding to __getitem__. Any
+ # other exceptions generated get raised up.
+ substring = "instance has no attribute '__getitem__'"
+ if e.args[0][-len(substring):]==substring:
+ if default is NoDefault:
+ return None
+ else:
+ return
+ else:
+ raise
+ except KeyError, e:
+ if e.args[0]==key:
+ if default is NoDefault:
+ raise ValueForKeyError, key
+ else:
+ return default
+ else:
+ # If we get here, then the KeyError is deeper in the
+ # implementation of obj[key]
+ raise
+ else:
+ return value
+
+
+def valueForKey(obj, key, default=NoDefault):
+ """
+ Returns the value of the object named by the given key.
+
+ Suppose key is 'foo'. This method returns the value with the
+ following precedence:
+ 1. Methods before non-methods
+ 2. Attributes before keys (__getitem__)
+ 3. Public things before private things
+ (private being denoted by a preceding underscore)
+
+ More specifically, this method returns one of the following:
+ * obj.valueForKey(key) # only if the method exists
+ * obj.foo()
+ * obj._foo()
+ * obj.foo
+ * obj._foo
+ * obj['foo']
+ * obj.valueForUnknownKey(key)
+ * default # only if specified
+
+ If all of these fail, a ValueForKeyError is raised.
+
+
+ NOTES
+
+ * If the object provides a valueForKey() method, that method will be
+ invoked to do the work.
+
+ * valueForKey() works on dictionaries and dictionary-like objects.
+
+ * valueForUnknownKey() provides a hook by which objects can
+ delegate or chain their keyed value access to other objects.
+ The key and default arguments are passed to it and it should
+ generally respect the typical treatment of the the default
+ argument as found throughout Webware and described in the Style
+ Guidelines.
+
+ * See valueForName() which is a more advanced version of this
+ function that allows multiple, qualified keys.
+ """
+
+ # We only accept strings for keys
+ assert type(key) is types.StringType
+
+ # Use obj.valueForKey() if it is available
+ valueForKeyMeth = getattr(obj, 'valueForKey', None)
+ if valueForKeyMeth:
+ return valueForKeyMeth(key, default)
+
+ attr = None
+ method = None
+ value = None
+ unknown = 0
+ if type(obj) is types.DictType:
+ if default is NoDefault:
+ try:
+ return obj[key]
+ except KeyError:
+ raise ValueForKeyError, key
+ else:
+ return obj.get(key, default)
+ else:
+ try:
+ klass = obj.__class__
+ except AttributeError:
+ raise AttributeError, '__class__ obj type=%r, obj=%r' % (type(obj), obj)
+ method = getattr(klass, key, None)
+ if not method:
+ underKey = '_' + key
+ method = getattr(klass, underKey, None)
+ if not method:
+ attr = getattr(obj, key, NoDefault)
+ if attr is NoDefault:
+ attr = getattr(obj, underKey, NoDefault)
+ if attr is NoDefault:
+ getitem = getattr(obj.__class__, '__getitem__', None)
+ if getitem:
+ try:
+ value = getitem(obj, key)
+ except KeyError:
+ unknown = 1
+
+# if value is not NoDefault:
+# return value
+ if not unknown:
+ if method:
+ return method(obj)
+ if attr is not NoDefault:
+ return attr
+
+ # Use obj.valueForUnknownKey() if it is available
+ valueForUnknownKey = getattr(obj, 'valueForUnknownKey', None)
+ if valueForUnknownKey:
+ return valueForUnknownKey(key, default)
+
+ if default!=NoDefault:
+ return default
+ else:
+ raise ValueForKeyError, key
+
+
+def valueForName(obj, name, default=NoDefault):
+ """
+ Returns the value of the object that is named. The name can use
+ dotted notation to traverse through a network/graph of objects.
+ Since this function relies on valueForKey() for each individual
+ component of the name, you should be familiar with the semantics
+ of that notation.
+
+ Example: valueForName(obj, 'department.manager.salary')
+ """
+
+ names = string.split(name, '.')
+ for name in names:
+ obj = valueForKey(obj, name, default)
+ if obj is default:
+ return obj
+ # 2001-04-19 ce: I suppose the above technique could result in
+ # the default being returned prematurely if it was part of the
+ # chain of names. Well, that's just the way it goes for now.
+ return obj
+
+
+# Beef up UserDict with the NamedValueAccess base class and custom versions of
+# hasValueForKey() and valueForKey(). This all means that UserDict's (such as
+# os.environ) are key/value accessible.
+# @@ 2000-05-07 ce: CGIWrapper.py duplicates this.
+def _enhanceUserDict():
+ from UserDict import UserDict
+ if not NamedValueAccess in UserDict.__bases__:
+ UserDict.__bases__ = UserDict.__bases__ + (NamedValueAccess,)
+
+ def _UserDict_hasValueForKey(self, key):
+ return self.has_key(key)
+
+ def _UserDict_valueForKey(self, key, default=NoDefault):
+ if default is NoDefault:
+ if self.has_key(key):
+ return self[key]
+ else:
+ raise ValueForKeyError, key
+ else:
+ return self.get(key, default)
+
+ setattr(UserDict, 'hasValueForKey', _UserDict_hasValueForKey)
+ setattr(UserDict, 'valueForKey', _UserDict_valueForKey)
+
+_enhanceUserDict()
diff --git a/paste/webkit/FakeWebware/MiscUtils/ParamFactory.py b/paste/webkit/FakeWebware/MiscUtils/ParamFactory.py
new file mode 100644
index 0000000..4b3f2f3
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/ParamFactory.py
@@ -0,0 +1,22 @@
+from threading import Lock
+
+class ParamFactory:
+ def __init__(self, klass, **extraMethods):
+ self.lock = Lock()
+ self.cache = {}
+ self.klass = klass
+ for name, func in extraMethods.items():
+ setattr(self, name, func)
+ def __call__(self, *args):
+ self.lock.acquire()
+ if not self.cache.has_key(args):
+ value = self.klass(*args)
+ self.cache[args] = value
+ self.lock.release()
+ return value
+ else:
+ self.lock.release()
+ return self.cache[args]
+ def allInstances(self):
+ return self.cache.values()
+
diff --git a/paste/webkit/FakeWebware/MiscUtils/PickleCache.py b/paste/webkit/FakeWebware/MiscUtils/PickleCache.py
new file mode 100644
index 0000000..e826b89
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/PickleCache.py
@@ -0,0 +1,220 @@
+"""
+PickleCache provides tools for keeping fast-loading cached versions of
+files so that subsequent loads are faster. This is similar to how Python
+silently caches .pyc files next to .py files.
+
+The typical scenario is that you have a type of text file that gets
+"translated" to Pythonic data (dictionaries, tuples, instances, ints,
+etc.). By caching the Python data on disk in pickle format, you can
+avoid the expensive translation on subsequent reads of the file.
+
+Two real life cases are MiscUtils.DataTable, which loads and represents
+comma-separated files, and MiddleKit which has an object model file.
+So for examples on using this module, load up the following files and
+search for "Pickle":
+ Webware/MiscUtils/DataTable.py
+ MiddleKit/Core/Model.py
+
+The cached file is named the same as the original file with
+'.pickle.cache' suffixed. The utility of '.pickle' is to denote the file
+format and the utilty of '.cache' is to provide '*.cache' as a simple
+pattern that can be removed, ignored by backup scripts, etc.
+
+The treatment of the cached file is silent and friendly just like
+Python's approach to .pyc files. If it cannot be read or written for
+various reasons (cache is out of date, permissions are bad, wrong python
+version, etc.), then it will be silently ignored.
+
+
+GRANULARITY
+
+In constructing the test suite, I discovered that if the source file is
+newly written less than 1 second after the cached file, then the fact
+that the source file is newer will not be detected and the cache will
+still be used. I believe this is a limitation of the granularity of
+os.path.getmtime(). If anyone knows of a more granular solution, please
+let me know.
+
+This would only be a problem in programmatic situations where the source
+file was rapidly being written and read. I think that's fairly rare.
+
+
+PYTHON VERSION
+
+These operations do nothing if you don't have Python 2.2 or greater.
+
+
+SEE ALSO
+ http://www.python.org/doc/current/lib/module-pickle.html
+
+- wordwrap bar --------------------------------------------------------
+"""
+
+verbose = 0
+
+
+import os, sys, time
+from types import DictType
+from pprint import pprint
+try:
+ from cPickle import load, dump
+except ImportError:
+ from pickle import load, dump
+
+havePython22OrGreater = sys.version_info[0]>2 or (sys.version_info[0]==2 and sys.version_info[1]>=2)
+
+
+s ="""
+def readPickleCache(filename, pickleVersion=1, source=None, verbose=None):
+ return _reader.read(filename, pickleVersion, source, verbose)
+
+def writePickleCache(data, filename, pickleVersion=1, source=None, verbose=None):
+ return _writer.write(data, filename, pickleVersion, source, verbose)
+"""
+
+
+class PickleCache:
+ """
+ Just a simple abstract base class for PickleCacheReader and
+ PickleCacheWriter.
+ """
+ verbose = verbose
+
+ def picklePath(self, filename):
+ return filename + '.pickle.cache'
+
+
+class PickleCacheReader(PickleCache):
+
+ def read(self, filename, pickleVersion=1, source=None, verbose=None):
+ """
+ Returns the data from the pickle cache version of the filename, if it can read. Otherwise returns None which also indicates that writePickleCache() should be subsequently called after the original file is read.
+ """
+ if verbose is None:
+ v = self.verbose
+ else:
+ v = verbose
+ if v: print '>> PickleCacheReader.read() - verbose is on'
+ assert filename
+
+ if not os.path.exists(filename):
+ #if v: print 'cannot find %r' % filename
+ open(filename) # to get a properly constructed IOError
+
+ if not havePython22OrGreater:
+ #if v: print 'Python version is too old for this. Returning None.'
+ return None
+
+ didReadPickle = 0
+ shouldDeletePickle = 0
+
+ data = None
+
+ picklePath = self.picklePath(filename)
+ if os.path.exists(picklePath):
+ if os.path.getmtime(picklePath)<os.path.getmtime(filename):
+ #if v: print 'cache is out of date'
+ shouldDeletePickle = 1
+ else:
+ try:
+ #if v: print 'about to open for read %r' % picklePath
+ file = open(picklePath)
+ except IOError, e:
+ #if v: print 'cannot open cache file: %s: %s' % (e.__class__.__name__, e)
+ pass
+ else:
+ try:
+ #if v: print 'about to load'
+ dict = load(file)
+ except EOFError:
+ #if v: print 'EOFError - not loading'
+ shouldDeletePickle = 1
+ else:
+ file.close()
+ #if v: print 'finished reading'
+ assert isinstance(dict, DictType), 'type=%r dict=%r' % (type(dict), dict)
+ for key in ('source', 'data', 'pickle version', 'python version'):
+ assert dict.has_key(key), key
+ if source and dict['source']!=source:
+ #if v: print 'not from required source (%s): %s' % (source, dict['source'])
+ shouldDeletePickle = 1
+ elif dict['pickle version']!=pickleVersion:
+ #if v: print 'pickle version (%i) does not match expected (%i)' % (dict['pickle version'], pickleVersion)
+ shouldDeletePickle = 1
+ elif dict['python version']!=sys.version_info:
+ #if v: print 'python version %s does not match current %s' % (dict['python version'], sys.version_info)
+ shouldDeletePickle = 1
+ else:
+ #if v: print 'all tests pass. accepting data'
+ if v>1:
+ print 'display full dict:'
+ pprint(dict)
+ data = dict['data']
+ didReadPickle = 1
+
+ # delete the pickle file if suggested by previous conditions
+ if shouldDeletePickle:
+ try:
+ #if v: print 'attempting to remove pickle cache file'
+ os.remove(picklePath)
+ except OSError, e:
+ if v: print 'failed to remove: %s: %s' % (e.__class__.__name__, e)
+ pass
+
+ if v: print 'done reading data'; print
+
+ return data
+
+
+class PickleCacheWriter(PickleCache):
+
+ writeSleepInterval = 0.1
+
+ def write(self, data, filename, pickleVersion=1, source=None, verbose=None):
+ if verbose is None:
+ v = self.verbose
+ else:
+ v = verbose
+ if v: print '>> PickleCacheWriter.write() - verbose is on'
+ assert filename
+ sourceTimestamp = os.path.getmtime(filename)
+
+ if not havePython22OrGreater:
+ #if v: print 'Python version is too old for this. Returning None.'
+ return None
+
+ picklePath = self.picklePath(filename)
+ dict = {
+ 'source': source,
+ 'python version': sys.version_info,
+ 'pickle version': pickleVersion,
+ 'data': data,
+ }
+ if v>1:
+ print 'display full dict:'
+ pprint(dict)
+ try:
+ if v: print 'about to open for write %r' % picklePath
+ file = open(picklePath, 'w')
+ except IOError, e:
+ if v: print 'error. not writing. %s: %s' % (e.__class__.__name__, e)
+ else:
+ while 1:
+ dump(dict, file, 1) # 1 = binary format
+ file.close()
+ # make sure the cache has a newer timestamp, otherwise the cache will
+ # just get ignored and rewritten next time.
+ if os.path.getmtime(picklePath)==sourceTimestamp:
+ if v: print 'timestamps are identical. sleeping %0.2f seconds' % self.writeSleepInterval
+ time.sleep(self.writeSleepInterval)
+ file = open(picklePath, 'w')
+ else:
+ break
+
+ if v: print 'done writing data'; print
+
+
+# define module level convenience functions, readPickleCache and writePickleCache:
+
+_reader = PickleCacheReader(); readPickleCache = _reader.read
+_writer = PickleCacheWriter(); writePickleCache = _writer.write
diff --git a/paste/webkit/FakeWebware/MiscUtils/PickleRPC.py b/paste/webkit/FakeWebware/MiscUtils/PickleRPC.py
new file mode 100644
index 0000000..c74f651
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/PickleRPC.py
@@ -0,0 +1,428 @@
+"""
+PickleRPC provides a Server object for connection to Pickle-RPC servers
+for the purpose of making requests and receiving the responses.
+
+ >>> from MiscUtils.PickleRPC import Server
+ >>> server = Server('http://localhost/cgi-bin/WebKit.cgi/Examples/PickleRPCExample')
+ >>> server.multiply(10,20)
+ 200
+ >>> server.add(10,20)
+ 30
+
+
+See also: Server, Webkit.PickleRPCServlet, WebKit.Examples.PickleRPCExample
+
+
+UNDER THE HOOD
+
+Requests look like this:
+ {
+ 'version': 1, # default
+ 'action': 'call', # default
+ 'methodName': 'NAME',
+ 'args': (A, B, ...), # default = (,)
+ 'keywords': {'A': A, 'B': B, ...} # default = {}
+ }
+
+Only 'methodName' is required since that is the only key without a
+default value.
+
+Responses look like this:
+ {
+ 'timeReceived': N,
+ 'timeReponded': M,
+ 'value': V,
+ 'exception': E,
+ 'requestError': E,
+ }
+
+TimeReceived is the time the initial request was received.
+TimeResponded is the time at which the response was finished, as
+close to transmission as possible. The times are expressed as
+number of seconds since the Epoch, e.g., time.time().
+
+Value is whatever the method happened to return.
+
+Exception may be 'occurred' to indicate that an exception
+occurred, the specific exception, such as "KeyError: foo" or the
+entire traceback (as a string), at the discretion of the server.
+It will always be a non-empty string if it is present.
+
+RequestError is an exception such as "Missing method
+in request." (with no traceback) that indicates a problem with the
+actual request received by the Pickle-RPC server.
+
+Value, exception and requestError are all exclusive to each other.
+
+
+SECURITY
+
+Pickle RPC uses the SafeUnpickler class (in this module) to
+prevent unpickling of unauthorized classes. By default, it
+doesn't allow _any_ classes to be unpickled. You can override
+allowedGlobals() or findGlobal() in a subclass as needed to
+allow specific class instances to be unpickled.
+
+Note that both Transport in this module and PickleRPCServlet in
+WebKit are derived from SafeUnpickler.
+
+
+CREDIT
+
+The implementation of this module was taken directly from Python 2.2's
+xmlrpclib and then transformed from XML-orientation to Pickle-orientation.
+
+The zlib compression was adapted from code by Skip Montanaro that I found
+here: http://manatee.mojam.com/~skip/python/
+"""
+
+
+__version__ = 1 # version of PickleRPC protocol
+
+import types
+
+try:
+ from cPickle import dumps, Unpickler, UnpicklingError
+except ImportError:
+ from pickle import dumps, Unpickler, UnpicklingError
+
+try:
+ import zlib
+except ImportError:
+ zlib = None
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+class Error(Exception):
+ """
+ The abstract exception/error class for all PickleRPC errors.
+ """
+ pass
+
+
+class ResponseError(Error):
+ """
+ These are unhandled exceptions raised when the server was computing
+ a response. These will indicate errors such as:
+ * exception in the actual target method on the server
+ * malformed responses
+ * non "200 OK" status code responses
+ """
+ pass
+
+
+# Sometimes xmlrpclib is installed as a package, sometimes not. So we'll
+# make sure it works either way.
+try:
+ from xmlrpclib.xmlrpclib import ProtocolError as _PE
+except ImportError:
+ from xmlrpclib import ProtocolError as _PE
+# @@ 2002-01-31 ce: should this be caught somewhere for special handling? Perhaps in XMLRPCServlet?
+
+class ProtocolError(ResponseError, _PE):
+ pass
+
+
+class RequestError(Error):
+ """
+ These are errors originally raised by the server complaining about
+ malformed requests.
+ """
+ pass
+
+
+class InvalidContentTypeError(ResponseError):
+
+ def __init__(self, headers, content):
+ Exception.__init__(self) #, headers, content)
+ self.headers = headers
+ self.content = content
+
+ def __repr__(self):
+ content = self.content
+ return '%s: Content type is not text/x-python-pickled-dict\nheaders = %s\ncontent =\n%s' % (
+ self.__class__.__name__, self.headers, content)
+
+ __str__ = __repr__
+
+
+class SafeUnpickler:
+ """
+ For security reasons, we don't want to allow just anyone to unpickle
+ anything. That can cause arbitrary code to be executed.
+ So this SafeUnpickler base class is used to control
+ what can be unpickled. By default it doesn't let you unpickle
+ any class instances at all, but you can create subclass that
+ overrides allowedGlobals().
+
+ Note that the PickleRPCServlet class in WebKit is derived from this class
+ and uses its load() and loads() methods to do all unpickling.
+ """
+ def allowedGlobals(self):
+ """
+ Must return a list of (moduleName, klassName) tuples for all
+ classes that you want to allow to be unpickled.
+
+ Example:
+ return [('mx.DateTime', '_DT')]
+ allows mx.DateTime instances to be unpickled.
+ """
+ return []
+
+ def findGlobal(self, module, klass):
+ if (module, klass) not in self.allowedGlobals():
+ raise UnpicklingError, 'For security reasons, you can\'t unpickle objects from module %s with type %s' % (module, klass)
+ globals = {}
+ exec 'from %s import %s as theClass' % (module, klass) in globals
+ return globals['theClass']
+
+ def load(self, file):
+ safeUnpickler = Unpickler(file)
+ safeUnpickler.find_global = self.findGlobal
+ return safeUnpickler.load()
+
+ def loads(self, str):
+ return self.load(StringIO(str))
+
+
+# @@ 2002-01-31 ce: Could we reduce code duplication and automatically
+# inherit future improvements by actually importing and using the
+# xmlrpclib classes below either as base classes or mix-ins?
+
+
+class Server:
+ """uri [,options] -> a logical connection to an XML-RPC server
+
+ uri is the connection point on the server, given as
+ scheme://host/target.
+
+ The standard implementation always supports the "http" scheme. If
+ SSL socket support is available (Python 2.0), it also supports
+ "https".
+
+ If the target part and the slash preceding it are both omitted,
+ "/PickleRPC" is assumed.
+
+ See the module doc string for more information.
+ """
+
+ def __init__(self, uri, transport=None, verbose=0, binary=1, compressRequest=1, acceptCompressedResponse=1):
+ # establish a "logical" server connection
+
+ # get the url
+ import urllib
+ type, uri = urllib.splittype(uri)
+ if type not in ("http", "https"):
+ raise IOError, "unsupported Pickle-RPC protocol"
+ self.__host, self.__handler = urllib.splithost(uri)
+ if not self.__handler:
+ self.__handler = "/PickleRPC"
+
+ if transport is None:
+ if type == "https":
+ transport = SafeTransport()
+ else:
+ transport = Transport()
+ self.__transport = transport
+
+ self.__verbose = verbose
+ self.__binary = binary
+ self.__compressRequest = compressRequest
+ self.__acceptCompressedResponse = acceptCompressedResponse
+
+ def _request(self, methodName, args, keywords):
+ """
+ Call a method on the remote server.
+ """
+ request = {
+ 'version': 1,
+ 'action': 'call',
+ 'methodName': methodName,
+ 'args': args,
+ 'keywords': keywords,
+ }
+ if self.__binary:
+ request = dumps(request, 1)
+ else:
+ request = dumps(request)
+ if zlib is not None and self.__compressRequest and len(request) > 1000:
+ request = zlib.compress(request, 1)
+ compressed = 1
+ else:
+ compressed = 0
+
+ response = self.__transport.request(
+ self.__host,
+ self.__handler,
+ request,
+ verbose=self.__verbose,
+ binary=self.__binary,
+ compressed=compressed,
+ acceptCompressedResponse=self.__acceptCompressedResponse
+ )
+
+ return response
+
+ def __requestValue(self, methodName, args, keywords):
+ dict = self._request(methodName, args, keywords)
+ if dict.has_key('value'):
+ return dict['value']
+ elif dict.has_key('exception'):
+ raise ResponseError, dict['exception']
+ elif dict.has_key('requestError'):
+ raise RequestError, dict['requestError']
+ else:
+ raise RequestError, 'Response does not have a value, expection or requestError.'
+
+ def __repr__(self):
+ return "<%s for %s%s>" % (self.__class__.__name__, self.__host, self.__handler)
+
+ __str__ = __repr__
+
+ def __getattr__(self, name):
+ # magic method dispatcher
+ return _Method(self.__requestValue, name)
+
+ ## note: to call a remote object with an non-standard name, use
+ ## result getattr(server, "strange-python-name")(args)
+
+
+ServerProxy = Server # be like xmlrpclib for those who might guess or expect it
+
+
+
+class _Method:
+ """
+ Some magic to bind a Pickle-RPC method to an RPC server.
+ Supports "nested" methods (e.g. examples.getStateName).
+ """
+
+ def __init__(self, send, name):
+ self.__send = send
+ self.__name = name
+
+ def __getattr__(self, name):
+ return _Method(self.__send, "%s.%s" % (self.__name, name))
+
+ def __call__(self, *args, **keywords): # note that keywords are supported
+ return self.__send(self.__name, args, keywords)
+
+
+class Transport(SafeUnpickler):
+ """
+ Handles an HTTP transaction to a Pickle-RPC server.
+ """
+
+ # client identifier (may be overridden)
+ user_agent = "PickleRPC/%s (by http://webware.sf.net/)" % __version__
+
+ def request(self, host, handler, request_body, verbose=0, binary=0, compressed=0,
+ acceptCompressedResponse=0):
+ # issue Pickle-RPC request
+
+ h = self.make_connection(host)
+ if verbose:
+ h.set_debuglevel(1)
+
+ self.send_request(h, handler, request_body)
+ self.send_host(h, host)
+ self.send_user_agent(h)
+ self.send_content(h, request_body, binary, compressed, acceptCompressedResponse)
+
+ errcode, errmsg, headers = h.getreply()
+
+ if errcode != 200:
+ raise ProtocolError(
+ host + handler,
+ errcode, errmsg,
+ headers
+ )
+
+ self.verbose = verbose
+
+ if h.headers['content-type'] not in ['text/x-python-pickled-dict', 'application/x-python-binary-pickled-dict']:
+ headers = h.headers.headers
+ content = h.getfile().read()
+ raise InvalidContentTypeError(headers, content)
+
+ try:
+ content_encoding = headers["content-encoding"]
+ if content_encoding and content_encoding == "x-gzip":
+ return self.parse_response_gzip(h.getfile())
+ elif content_encoding:
+ raise ProtocolError(host + handler,
+ 500,
+ "Unknown encoding type: %s" %
+ content_encoding,
+ headers)
+ else:
+ return self.parse_response(h.getfile())
+ except KeyError:
+ return self.parse_response(h.getfile())
+
+ def make_connection(self, host):
+ # create a HTTP connection object from a host descriptor
+ import httplib
+ return httplib.HTTP(host)
+
+ def send_request(self, connection, handler, request_body):
+ connection.putrequest("POST", handler)
+
+ def send_host(self, connection, host):
+ connection.putheader("Host", host)
+
+ def send_user_agent(self, connection):
+ connection.putheader("User-Agent", self.user_agent)
+
+ def send_content(self, connection, request_body, binary=0, compressed=0,
+ acceptCompressedResponse=0):
+ if binary:
+ connection.putheader("Content-Type", "application/x-python-binary-pickled-dict")
+ else:
+ connection.putheader("Content-Type", "text/x-python-pickled-dict")
+ connection.putheader("Content-Length", str(len(request_body)))
+ if compressed:
+ connection.putheader("Content-Encoding", "x-gzip")
+ if zlib is not None and acceptCompressedResponse:
+ connection.putheader("Accept-Encoding", "gzip")
+ connection.endheaders()
+ if request_body:
+ connection.send(request_body)
+
+ def parse_response(self, f):
+ return self.load(f)
+
+ def parse_response_gzip(self, f):
+ # read response from input file, decompress it, and parse it
+ # @@ gat: could this be made more memory-efficient?
+ return self.loads(zlib.decompress(f.read()))
+
+class SafeTransport(Transport):
+ """
+ Handles an HTTPS transaction to a Pickle-RPC server.
+ """
+
+ def make_connection(self, host):
+ # create a HTTPS connection object from a host descriptor
+ # host may be a string, or a (host, x509-dict) tuple
+ import httplib
+ if isinstance(host, types.TupleType):
+ host, x509 = host
+ else:
+ x509 = {}
+ try:
+ HTTPS = httplib.HTTPS
+ except AttributeError:
+ raise NotImplementedError,\
+ "your version of httplib doesn't support HTTPS"
+ else:
+ return apply(HTTPS, (host, None), x509)
+
+ def send_host(self, connection, host):
+ if isinstance(host, types.TupleType):
+ host, x509 = host
+ connection.putheader("Host", host)
+
diff --git a/paste/webkit/FakeWebware/MiscUtils/PropertiesObject.py b/paste/webkit/FakeWebware/MiscUtils/PropertiesObject.py
new file mode 100644
index 0000000..d32f1a3
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/PropertiesObject.py
@@ -0,0 +1,155 @@
+from UserDict import UserDict
+import os, string, sys, types
+
+class WillNotRunError(Exception): pass
+
+
+class PropertiesObject(UserDict):
+ """
+ A PropertiesObject represents, in a dictionary-like fashion, the values found in a Properties.py file. That file is always included with a Webware component to advertise its name, version, status, etc. Note that a Webware component is a Python package that follows additional conventions. Also, the top level Webware directory contains a Properties.py.
+
+ Component properties are often used for:
+ * generation of documentation
+ * runtime examination of components, especially prior to loading
+
+ PropertiesObject provides additional keys:
+ * filename - the filename from which the properties were read
+ * versionString - a nicely printable string of the version
+ * requiredPyVersionString - like versionString but for requiredPyVersion instead
+ * willRun - 1 if the component will run. So far that means having the right Python version.
+ * willNotRunReason - defined only if willRun is 0. contains a readable error message
+
+ Using a PropertiesObject is better than investigating the Properties.py file directly, because the rules for determining derived keys and any future convenience methods will all be provided here.
+
+ Usage example:
+ from MiscUtils.PropertiesObject import PropertiesObject
+ props = PropertiesObject(filename)
+ for item in props.items():
+ print '%s: %s' % item
+
+ Note: We don't normally suffix a class name with "Object" as we have with this class, however, the name Properties.py is already used in our containing package and all other packages.
+ """
+
+
+ ## Init and reading ##
+
+ def __init__(self, filename=None):
+ UserDict.__init__(self)
+ if filename:
+ self.readFileNamed(filename)
+
+ def loadValues(self, dict):
+ self.update(dict)
+ self.cleanPrivateItems()
+
+
+ def readFileNamed(self, filename):
+ self['filename'] = filename
+ results = {}
+ exec open(filename) in results
+ # @@ 2001-01-20 ce: try "...in self"
+ self.update(results)
+ self.cleanPrivateItems()
+ self.createDerivedItems()
+
+
+ ## Self utility ##
+
+ def cleanPrivateItems(self):
+ """ Removes items whose keys start with a double underscore, such as __builtins__. """
+ for key in self.keys():
+ if key[:2]=='__':
+ del self[key]
+
+ def createDerivedItems(self):
+ self.createVersionString()
+ self.createRequiredPyVersionString()
+ self.createWillRun()
+
+ def _versionString(self, version):
+ """ For a sequence containing version information such as (2, 0, 0, 'pre'), this returns a printable string such as '2.0-pre'. The micro version number is only excluded from the string if it is zero. """
+ ver = map(lambda x: str(x), version)
+ if ver[2]=='0': # e.g., if minor version is 0
+ numbers = ver[:2]
+ else:
+ numbers = ver[:3]
+ rest = ver[3:]
+ numbers = string.join(numbers, '.')
+ rest = string.join(rest, '-')
+ if rest:
+ return numbers + rest
+ else:
+ return numbers
+
+ def createVersionString(self):
+ self['versionString'] = self._versionString(self['version'])
+
+ def createRequiredPyVersionString(self):
+ self['requiredPyVersionString'] = self._versionString(self['requiredPyVersion'])
+
+ def createWillRun(self):
+ self['willRun'] = 0
+ try:
+ # Invoke each of the checkFoo() methods
+ for key in self.willRunKeys():
+ methodName = 'check' + string.upper(key[0]) + key[1:]
+ method = getattr(self, methodName)
+ method()
+ except WillNotRunError, msg:
+ self['willNotRunReason'] = msg
+ return
+ self['willRun'] = 1 # we passed all the tests
+
+ def willRunKeys(self):
+ """ Returns a list of keys whose values should be examined in order to determine if the component will run. Used by createWillRun(). """
+ return ['requiredPyVersion', 'requiredOpSys', 'deniedOpSys', 'willRunFunc']
+
+ def checkRequiredPyVersion(self):
+ pyVer = getattr(sys, 'version_info', None)
+ if not pyVer:
+ # Prior 2.0 there was no version_info
+ # So we parse it out of .version which is a string
+ pyVer = string.split(sys.version)[0]
+ pyVer = string.split(pyVer, '.')
+ pyVer = map(lambda x: int(x), pyVer)
+ if tuple(pyVer)<tuple(self['requiredPyVersion']):
+ raise WillNotRunError, 'Required python ver is %s, but actual ver is %s.' % (self['requiredPyVersion'], pyVer)
+
+ def checkRequiredOpSys(self):
+ requiredOpSys = self.get('requiredOpSys', None)
+ if requiredOpSys:
+ # We accept a string or list of strings
+ if type(requiredOpSys)==types.StringType:
+ requiredOpSys = [requiredOpSys]
+ if not os.name in requiredOpSys:
+ raise WillNotRunError, 'Required op sys is %s, but actual op sys is %s.' % (requiredOpSys, os.name)
+
+ def checkDeniedOpSys(self):
+ deniedOpSys = self.get('deniedOpSys', None)
+ if deniedOpSys:
+ # We accept a string or list of strings
+ if type(deniedOpSys)==types.StringType:
+ deniedOpSys = [deniedOpSys]
+ if os.name in deniedOpSys:
+ raise WillNotRunError, 'Will not run on op sys %s and actual op sys is %s.' % (deniedOpSys, os.name)
+
+ def checkRequiredSoftware(self):
+ """ Not implemented. No op right now. """
+ # Check required software
+ # @@ 2001-01-24 ce: TBD
+ # Issues include:
+ # - order of dependencies
+ # - circular dependencies
+ # - examining Properties and willRun of dependencies
+ reqSoft = self.get('requiredSoftware', None)
+ if reqSoft:
+ for soft in reqSoft:
+ # type, name, version
+ pass
+
+ def checkWillRunFunc(self):
+ willRunFunc = self.get('willRunFunc', None)
+ if willRunFunc:
+ whyNotMsg = willRunFunc()
+ if whyNotMsg:
+ raise WillNotRunError, whyNotMsg
diff --git a/paste/webkit/FakeWebware/MiscUtils/__init__.py b/paste/webkit/FakeWebware/MiscUtils/__init__.py
new file mode 100644
index 0000000..0f39efb
--- /dev/null
+++ b/paste/webkit/FakeWebware/MiscUtils/__init__.py
@@ -0,0 +1,68 @@
+# MiscUtils component
+# Webware for Python
+# See Docs/index.html
+
+__all__ = ['Configurable', 'DBPool', 'DataTable', 'DictForArgs', 'Error', 'Funcs', 'MixIn', 'NamedValueAccess', 'PropertiesObject', 'unittest']
+
+
+try:
+ AbstractError # Python might build this in some day.
+except NameError:
+ class AbstractError(NotImplementedError):
+ """
+ This exception is raised by abstract methods in abstract classes. It
+ is a special case of NotImplementedError, that indicates that the
+ implementation won't ever be provided at that location in the future
+ --instead the subclass should provide it.
+
+ Typical usage:
+
+ from MiscUtils import AbstractError
+
+ class Foo:
+ def bar(self):
+ raise AbstractError, self.__class__
+
+ Note that added the self.__class__ makes the resulting exception
+ *much* more useful.
+ """
+ pass
+
+# @@ 2002-11-10 ce: SubclassResponsibilityError is deprecated in favor of AbstractError, post 0.7
+SubclassResponsibilityError = AbstractError
+
+
+class NoDefault:
+ """
+ This provides a singleton "thing" which can be used to initialize
+ the "default=" arguments for different retrieval methods. For
+ example:
+
+ from MiscUtils import NoDefault
+ def bar(self, name, default=NoDefault):
+ if default is NoDefault:
+ return self._bars[name] # will raise exception for invalid key
+ else:
+ return self._bars.get(name, default)
+
+ The value None does not suffice for "default=" because it does not
+ indicate whether or not a value was passed.
+
+ Consistently using this singleton is valuable due to subclassing
+ situations:
+
+ def bar(self, name, default=NoDefault):
+ if someCondition:
+ return self.specialBar(name)
+ else:
+ return SuperClass.bar(name, default)
+
+ It's also useful if one method that uses "default=NoDefault" relies
+ on another object and method to which it must pass the default.
+ (This is similar to the subclassing situation.)
+ """
+ pass
+
+
+def InstallInWebKit(appServer):
+ pass
diff --git a/paste/webkit/FakeWebware/README.txt b/paste/webkit/FakeWebware/README.txt
new file mode 100644
index 0000000..d6a7943
--- /dev/null
+++ b/paste/webkit/FakeWebware/README.txt
@@ -0,0 +1,7 @@
+This directory can be added to sys.path so that all your Webware
+imports will still work, but will import WSGIWebKit versions of the
+objects.
+
+Items will be added here on an as-needed basis. I don't want to bring
+every public object from Webware into this setup; in part because many
+of them are not currently implemented.
diff --git a/paste/webkit/FakeWebware/WebKit/HTTPServlet.py b/paste/webkit/FakeWebware/WebKit/HTTPServlet.py
new file mode 100644
index 0000000..104d2ec
--- /dev/null
+++ b/paste/webkit/FakeWebware/WebKit/HTTPServlet.py
@@ -0,0 +1 @@
+from paste.webkit.wkservlet import HTTPServlet
diff --git a/paste/webkit/FakeWebware/WebKit/Page.py b/paste/webkit/FakeWebware/WebKit/Page.py
new file mode 100644
index 0000000..2bb01ed
--- /dev/null
+++ b/paste/webkit/FakeWebware/WebKit/Page.py
@@ -0,0 +1 @@
+from paste.webkit.wkservlet import Page
diff --git a/paste/webkit/FakeWebware/WebKit/__init__.py b/paste/webkit/FakeWebware/WebKit/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/webkit/FakeWebware/WebKit/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/webkit/FakeWebware/WebUtils/Funcs.py b/paste/webkit/FakeWebware/WebUtils/Funcs.py
new file mode 100644
index 0000000..d467257
--- /dev/null
+++ b/paste/webkit/FakeWebware/WebUtils/Funcs.py
@@ -0,0 +1,148 @@
+"""
+WebUtils.Funcs provides some basic functions that are useful in HTML and web development.
+
+You can safely import * from WebUtils.Funcs if you like.
+
+
+TO DO
+
+* Document the 'codes' arg of htmlEncode/Decode.
+
+"""
+
+import string
+
+
+htmlCodes = [
+ ['&', '&amp;'],
+ ['<', '&lt;'],
+ ['>', '&gt;'],
+ ['"', '&quot;'],
+# ['\n', '<br>']
+]
+
+htmlCodesReversed = htmlCodes[:]
+htmlCodesReversed.reverse()
+
+
+def htmlEncode(s, codes=htmlCodes):
+ """ Returns the HTML encoded version of the given string. This is useful to display a plain ASCII text string on a web page."""
+ for code in codes:
+ s = string.replace(s, code[0], code[1])
+ return s
+
+def htmlDecode(s, codes=htmlCodesReversed):
+ """ Returns the ASCII decoded version of the given HTML string. This does NOT remove normal HTML tags like <p>. It is the inverse of htmlEncode(). """
+ for code in codes:
+ s = string.replace(s, code[1], code[0])
+ return s
+
+
+
+_urlEncode = {}
+for i in range(256):
+ _urlEncode[chr(i)] = '%%%02x' % i
+for c in string.letters + string.digits + '_,.-/':
+ _urlEncode[c] = c
+_urlEncode[' '] = '+'
+
+def urlEncode(s):
+ """ Returns the encoded version of the given string, safe for using as a URL. """
+ return string.join(map(lambda c: _urlEncode[c], list(s)), '')
+
+def urlDecode(s):
+ """ Returns the decoded version of the given string. Note that invalid URLs will throw exceptons. For example, a URL whose % coding is incorrect. """
+ mychr = chr
+ atoi = string.atoi
+ parts = string.split(string.replace(s, '+', ' '), '%')
+ for i in range(1, len(parts)):
+ part = parts[i]
+ parts[i] = mychr(atoi(part[:2], 16)) + part[2:]
+ return string.join(parts, '')
+
+
+def htmlForDict(dict, addSpace=None, filterValueCallBack=None, maxValueLength=None):
+ """ Returns an HTML string with a <table> where each row is a key-value pair. """
+ keys = dict.keys()
+ keys.sort()
+ # A really great (er, bad) example of hardcoding. :-)
+ html = ['<table width=100% border=0 cellpadding=2 cellspacing=2>']
+ for key in keys:
+ value = dict[key]
+ if addSpace!=None and addSpace.has_key(key):
+ target = addSpace[key]
+ value = string.join(string.split(value, target), '%s '%target)
+ if filterValueCallBack:
+ value = filterValueCallBack(value, key, dict)
+ value = str(value)
+ if maxValueLength and len(value) > maxValueLength:
+ value = value[:maxValueLength] + '...'
+ html.append('<tr bgcolor=#F0F0F0> <td> %s </td> <td> %s &nbsp;</td> </tr>\n' % (htmlEncode(str(key)), htmlEncode(value)))
+ html.append('</table>')
+ return string.join(html, '')
+
+
+def requestURI(dict):
+ """ Returns the request URI for a given CGI-style dictionary. Uses REQUEST_URI if available, otherwise constructs and returns it from SCRIPT_NAME, PATH_INFO and QUERY_STRING. """
+ uri = dict.get('REQUEST_URI', None)
+ if uri==None:
+ uri = dict.get('SCRIPT_NAME', '') + dict.get('PATH_INFO', '')
+ query = dict.get('QUERY_STRING', '')
+ if query!='':
+ uri = uri + '?' + query
+ return uri
+
+def normURL(path):
+ """Normalizes a URL path, like os.path.normpath, but acts on
+ a URL independant of operating system environmant.
+ """
+ if not path:
+ return
+
+ initialslash = path[0] == '/'
+ lastslash = path[-1] == '/'
+ comps = string.split(path, '/')
+
+ newcomps = []
+ for comp in comps:
+ if comp in ('','.'):
+ continue
+ if comp != '..':
+ newcomps.append(comp)
+ elif newcomps:
+ newcomps.pop()
+ path = string.join(newcomps, '/')
+ if path and lastslash:
+ path = path + '/'
+ if initialslash:
+ path = '/' + path
+ return path
+
+### Deprecated
+
+HTMLCodes = htmlCodes
+HTMLCodesReversed = htmlCodesReversed
+
+def HTMLEncode(s):
+ print 'DEPRECATED: WebUtils.Funcs.HTMLEncode() on 02/24/01 in ver 0.3. Use htmlEncode() instead.'
+ return htmlEncode(s)
+
+def HTMLDecode(s):
+ print 'DEPRECATED: WebUtils.Funcs.HTMLDecode() on 02/24/01 in ver 0.3. Use htmlDecode() instead.'
+ return htmlDecode(s)
+
+def URLEncode(s):
+ print 'DEPRECATED: WebUtils.Funcs.URLEncode() on 02/24/01 in ver 0.3. Use urlEncode() instead.'
+ return urlEncode(s)
+
+def URLDecode(s):
+ print 'DEPRECATED: WebUtils.Funcs.URLDecode() on 02/24/01 in ver 0.3. Use urlDecode() instead.'
+ return urlDecode(s)
+
+def HTMLForDictionary(dict, addSpace=None):
+ print 'DEPRECATED: WebUtils.Funcs.HTMLForDictionary() on 02/24/01 in ver 0.3. Use htmlForDict() instead.'
+ return htmlForDict(dict, addSpace)
+
+def RequestURI(dict):
+ print 'DEPRECATED: WebUtils.Funcs.RequestURI() on 02/24/01 in ver 0.3. Use requestURI() instead.'
+ return requestURI(dict)
diff --git a/paste/webkit/FakeWebware/WebUtils/__init__.py b/paste/webkit/FakeWebware/WebUtils/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/webkit/FakeWebware/WebUtils/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/webkit/FakeWebware/__init__.py b/paste/webkit/FakeWebware/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/webkit/FakeWebware/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/webkit/__init__.py b/paste/webkit/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/webkit/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/webkit/examples/EchoServlet.py b/paste/webkit/examples/EchoServlet.py
new file mode 100644
index 0000000..6dc30c6
--- /dev/null
+++ b/paste/webkit/examples/EchoServlet.py
@@ -0,0 +1,57 @@
+r"""\
+Paste/WebKit application
+
+Does things as requested. Takes variables:
+
+header.header-name=value, like
+ header.location=http://yahoo.com
+
+error=code, like
+ error=301 (temporary redirect)
+ error=assert (assertion error)
+
+environ=true,
+ display all the environmental variables, like
+ key=str(value)\n
+
+message=string
+ display string
+"""
+
+# Special WSGI version of WebKit:
+from paste.webkit.wkservlet import Page
+from paste import httpexceptions
+
+class EchoServlet(Page):
+
+ def writeHTML(self):
+ req = self.request()
+ headers = {}
+ for key, value in req.fields().items():
+ if key.startswith('header.'):
+ name = key[len('header.'):]
+ self.response().setHeader(name, value)
+ # @@: I shouldn't have to do this:
+ headers[name] = value
+
+ error = req.field('error', None)
+ if error and error != 'iter':
+ if error == 'assert':
+ assert 0, "I am asserting zero!"
+ raise httpexceptions.get_exception(
+ int(error), headers=headers)
+
+ if req.field('environ', None):
+ items = req.environ().items()
+ items.sort()
+ self.response().setHeader('content-type', 'text/plain')
+ for name, value in items:
+ self.write('%s=%s\n' % (name, value))
+ return
+
+ if req.hasField('message'):
+ self.response().setHeader('content-type', 'text/plain')
+ self.write(req.field('message'))
+ return
+
+ self.write('hello world!')
diff --git a/paste/webkit/examples/__init__.py b/paste/webkit/examples/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/paste/webkit/examples/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/paste/webkit/test_wkfixture.py b/paste/webkit/test_wkfixture.py
new file mode 100644
index 0000000..790a099
--- /dev/null
+++ b/paste/webkit/test_wkfixture.py
@@ -0,0 +1,66 @@
+"""
+Test fixture for Paste/WebKit testing.
+
+Maybe look at paste.tests.fixture as an alternative to this
+"""
+
+from cStringIO import StringIO
+from paste import recursive
+from paste import session
+from paste import httpexceptions
+from paste import lint
+from paste import wsgilib
+
+_default_environ = {
+ 'SCRIPT_NAME': '',
+ 'SERVER_NAME': 'localhost',
+ 'SERVER_PORT': '80',
+ 'REQUEST_METHOD': 'GET',
+ 'HTTP_HOST': 'localhost:80',
+ 'CONTENT_LENGTH': '0',
+ 'wsgi.input': StringIO(''),
+ 'wsgi.version': (1, 0),
+ 'wsgi.multithread': False,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': False,
+}
+
+def stack(application):
+ """
+ Creates a WebKit stack, except with a fixed application and
+ no URLParser.
+ """
+ return recursive.middleware(
+ lint.middleware(
+ httpexceptions.middleware(
+ lint.middleware(
+ session.middleware(
+ lint.middleware(application))))))
+
+def in_request(application, **kw):
+ """
+ Used to wrap a function in an application invokation, used like::
+
+ @in_request(my_app, path_info='/whatever',
+ wsgi__input='post-data',
+ REQUEST_METHOD='POST')
+ def test_this(output, **kw):
+ assert 'I got post-data' in output
+
+ The wrapped function received several keyword arguments, and in
+ the future there may be more, so a **kw is required so that
+ extra arguments may be ignored.
+ """
+ def decorator(func):
+ def replacement_func(*inner_args, **inner_kw):
+ # Note, *inner_args and **inner_kw are intended for use
+ # with @param or other py.test parameterizers
+ app = stack(application)
+ status, headers, output, errors = wsgilib.raw_interactive(
+ app, **kw)
+ return func(status=status, headers=headers, output=output,
+ errors=errors, application=application,
+ *inner_args,
+ **inner_kw)
+ return replacement_func
+ return decorator
diff --git a/paste/webkit/wkapplication.py b/paste/webkit/wkapplication.py
new file mode 100644
index 0000000..1c016a4
--- /dev/null
+++ b/paste/webkit/wkapplication.py
@@ -0,0 +1,18 @@
+"""
+A mostly dummy class to simulate the Webware Application object.
+"""
+
+from wkcommon import NoDefault
+
+class Application(object):
+
+ def __init__(self, transaction):
+ self._transaction = transaction
+
+ def forward(self, trans, url, context=None):
+ assert context is None, "Contexts are not supported"
+ trans.forward(url)
+
+ def setting(self, setting, default=NoDefault):
+ assert default is not NoDefault, "No settings are defined"
+ return default
diff --git a/paste/webkit/wkcommon.py b/paste/webkit/wkcommon.py
new file mode 100644
index 0000000..be25502
--- /dev/null
+++ b/paste/webkit/wkcommon.py
@@ -0,0 +1,187 @@
+import cgi
+import urllib
+import warnings
+import inspect
+import Cookie as CookieEngine
+
+__all__ = ['NoDefault', 'htmlEncode', 'htmlDecode',
+ 'urlEncode', 'urlDecode',
+ ]
+
+try:
+ from MiscUtils import NoDefault
+except ImportError:
+ class NoDefault:
+ pass
+
+def htmlEncode(s):
+ return cgi.escape(s, 1)
+
+def htmlDecode(s):
+ for char, code in [('&', '&amp;'),
+ ('<', '&lt;'),
+ ('>', '&gt;'),
+ ('"', '&quot;')]:
+ s = s.replace(code, char)
+ return s
+
+urlDecode = urllib.unquote
+urlEncode = urllib.quote
+
+def requestURI(dict):
+ """
+ Returns the request URI for a given CGI-style dictionary. Uses
+ REQUEST_URI if available, otherwise constructs and returns it from
+ SCRIPT_NAME, PATH_INFO and QUERY_STRING.
+ """
+ uri = dict.get('REQUEST_URI', None)
+ if uri is None:
+ uri = dict.get('SCRIPT_NAME', '') + dict.get('PATH_INFO', '')
+ query = dict.get('QUERY_STRING', '')
+ if query:
+ uri = uri + '?' + query
+ return uri
+
+def deprecated(msg=None):
+ if not msg:
+ frame = inspect.stack()[1]
+ methodName = frame[3]
+ msg = 'The use of %s is deprecated' % methodName
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
+
+
+
+class Cookie:
+ """
+ Cookie is used to create cookies that have additional
+ attributes beyond their value.
+
+ Note that web browsers don't typically send any information
+ with the cookie other than it's value. Therefore
+ `HTTPRequest.cookie` simply returns a value such as an
+ integer or a string.
+
+ When the server sends cookies back to the browser, it can send
+ a cookie that simply has a value, or the cookie can be
+ accompanied by various attributes (domain, path, max-age, ...)
+ as described in `RFC 2109`_. Therefore, in HTTPResponse,
+ `setCookie` can take either an instance of the Cookie class,
+ as defined in this module, or a value.
+
+ Note that Cookies values get pickled (see the `pickle` module),
+ so you can set and get cookies that are integers, lists,
+ dictionaries, etc.
+
+ .. _`RFC 2109`: ftp://ftp.isi.edu/in-notes/rfc2109.txt
+ """
+
+ ## Future
+ ##
+ ## * This class should provide error checking in the setFoo()
+ ## methods. Or maybe our internal Cookie implementation
+ ## already does that?
+ ## * This implementation is probably not as efficient as it
+ ## should be, [a] it works and [b] the interface is stable.
+ ## We can optimize later.
+
+ def __init__(self, name, value):
+ """
+ Create a cookie -- properties other than `name` and
+ `value` are set with methods.
+ """
+
+ self._cookies = CookieEngine.SimpleCookie()
+ self._name = name
+ self._value = value
+ self._cookies[name] = value
+ self._cookie = self._cookies[name]
+
+ """
+ **Accessors**
+ """
+
+ def comment(self):
+ return self._cookie['comment']
+
+ def domain(self):
+ return self._cookie['domain']
+
+ def maxAge(self):
+ return self._cookie['max-age']
+
+ def expires(self):
+ return self._cookie['expires']
+
+ def name(self):
+ return self._name
+
+ def path(self):
+ return self._cookie['path']
+
+ def isSecure(self):
+ return self._cookie['secure']
+
+ def value(self):
+ return self._value
+
+ def version(self):
+ return self._cookie['version']
+
+
+ """
+ **Setters**
+ """
+
+ def setComment(self, comment):
+ self._cookie['comment'] = comment
+
+ def setDomain(self, domain):
+ self._cookie['domain'] = domain
+
+ def setExpires(self, expires):
+ self._cookie['expires'] = expires
+
+ def setMaxAge(self, maxAge):
+ self._cookie['max-age'] = maxAge
+
+ def setPath(self, path):
+ self._cookie['path'] = path
+
+ def setSecure(self, bool):
+ self._cookie['secure'] = bool
+
+ def setValue(self, value):
+ self._value = value
+ self._cookies[self._name] = value
+
+ def setVersion(self, version):
+ self._cookie['version'] = version
+
+
+ """
+ **Misc**
+ """
+
+ def delete(self):
+ """
+ When sent, this should delete the cookie from the user's
+ browser, by making it empty, expiring it in the past,
+ and setting its max-age to 0. One of these will delete
+ the cookie for any browser (which one actually works
+ depends on the browser).
+ """
+
+ self._value = ''
+ self._cookie['expires'] = "Mon, 01-Jan-1900 00:00:00 GMT"
+ self._cookie['max-age'] = 0
+ self._cookie['path'] = '/'
+
+
+ def headerValue(self):
+ """
+ Returns a string with the value that should be
+ used in the HTTP headers. """
+
+ items = self._cookies.items()
+ assert(len(items)==1)
+ return items[0][1].OutputString()
diff --git a/paste/webkit/wkrequest.py b/paste/webkit/wkrequest.py
new file mode 100644
index 0000000..6b1785e
--- /dev/null
+++ b/paste/webkit/wkrequest.py
@@ -0,0 +1,344 @@
+"""
+A Webware HTTPRequest object, implemented based on the WSGI request
+environment dictionary.
+"""
+
+import time
+import traceback
+import cgi
+from wkcommon import NoDefault, requestURI, deprecated
+from Cookie import SimpleCookie as Cookie
+
+class HTTPRequest(object):
+
+ def __init__(self, transaction, environ):
+ self._environ = environ
+ self._transaction = transaction
+ if environ.has_key('webkit.time'):
+ self._time = environ['webkit.time']
+ else:
+ self._time = time.time()
+ self._input = environ['wsgi.input']
+ self._setupPath()
+ self._setupFields()
+ self._setupCookies()
+ self._pathInfo = None
+ self._serverRootPath = ""
+ # @@: I'm leaving out automatic path sessions
+ self._sessionExpired = False
+
+ def _setupPath(self):
+ self._environ['PATH_INFO'] = self._environ.get('PATH_INFO', '')
+ if not self._environ.has_key('REQUEST_URI'):
+ self._environ['REQUEST_URI'] = requestURI(self._environ)
+ # @@: Not necessarily true for WSGI:
+ self._adapterName = self._environ.get('SCRIPT_NAME')
+
+ def _setupFields(self):
+ self._fieldStorage = cgi.FieldStorage(
+ self._input,
+ environ=self._environ,
+ keep_blank_values=True,
+ strict_parsing=False)
+ try:
+ keys = self._fieldStorage.keys()
+ except TypeError:
+ # Maybe an XML-RPC request
+ keys = []
+ dict = {}
+ for key in keys:
+ value = self._fieldStorage[key]
+ if not isinstance(value, list):
+ if not value.filename:
+ # Turn the MiniFieldStorage into a string:
+ value = value.value
+ else:
+ value = [v.value for v in value]
+ dict[key] = value
+ if self._environ['REQUEST_METHOD'].upper() == 'POST':
+ # Then we must also parse GET variables
+ self._getFields = cgi.parse_qs(
+ self._environ.get('QUERY_STRING', ''),
+ keep_blank_values=True,
+ strict_parsing=False)
+ for name, value in self._getFields.items():
+ if not dict.has_key(name):
+ if isinstance(value, list) and len(value) == 1:
+ # parse_qs always returns a list of lists,
+ # while FieldStorage only uses lists for
+ # keys that actually repeat; this fixes that.
+ value = value[0]
+ dict[name] = value
+ self._fields = dict
+
+ def _setupCookies(self):
+ cookies = Cookie()
+ if self._environ.has_key('HTTP_COOKIE'):
+ try:
+ cookies.load(self._environ['HTTP_COOKIE'])
+ except:
+ traceback.print_exc(file=self._environ['wsgi.errors'])
+ dict = {}
+ for key in cookies.keys():
+ dict[key] = cookies[key].value
+ self._cookies = dict
+
+ def protocol(self):
+ return 'HTTP/1.0'
+
+ def time(self):
+ return self._time
+
+ def timeStamp(self):
+ return time.asctime(time.localtime(self.time()))
+
+ ## Transactions ##
+
+ def transaction(self):
+ return self._transaction
+
+ def setTransaction(self, trans):
+ self._transaction = trans
+
+ ## Values ##
+
+ def value(self, name, default=NoDefault):
+ if self._fields.has_key(name):
+ return self._fields[name]
+ else:
+ return self.cookie(name, default)
+
+ def hasValue(self, name):
+ return self._fields.has_key(name) or self._cookies.has_key(name)
+
+ def extraURLPath(self):
+ return self._environ.get('PATH_INFO', '')
+
+ ## Fields ##
+
+
+ def fieldStorage(self):
+ return self._fieldStorage
+
+ def field(self, name, default=NoDefault):
+ if default is NoDefault:
+ return self._fields[name]
+ else:
+ return self._fields.get(name, default)
+
+ def hasField(self, name):
+ return self._fields.has_key(name)
+
+ def fields(self):
+ return self._fields
+
+ def setField(self, name, value):
+ self._fields[name] = value
+
+ def delField(self, name):
+ del self._fields[name]
+
+ ## Cookies ##
+
+ def cookie(self, name, default=NoDefault):
+ """ Returns the value of the specified cookie. """
+ if default is NoDefault:
+ return self._cookies[name]
+ else:
+ return self._cookies.get(name, default)
+
+ def hasCookie(self, name):
+ return self._cookies.has_key(name)
+
+ def cookies(self):
+ """
+ Returns a dictionary-style object of all Cookie objects the
+ client sent with this request."""
+ return self._cookies
+
+ ## Variables passed by server ##
+ def serverDictionary(self):
+ """
+ Returns a dictionary with the data the web server gave us,
+ like HTTP_HOST or HTTP_USER_AGENT. """
+ return self._environ
+
+ ## Sessions ##
+
+ def session(self):
+ """ Returns the session associated with this request, either
+ as specified by sessionId() or newly created. This is a
+ convenience for transaction.session() """
+ return self._transaction.session()
+
+ def isSessionExpired(self):
+ """ Returns bool: whether or not this request originally
+ contained an expired session ID. Only works if the
+ Application.config setting "IgnoreInvalidSession" is set to 1;
+ otherwise you get a canned error page on an invalid session,
+ so your servlet never gets processed. """
+ return self._sessionExpired
+
+ def setSessionExpired(self, sessionExpired):
+ self._sessionExpired = sessionExpired
+
+ ## Authentication ##
+
+ def remoteUser(self):
+ """ Always returns None since authentication is not yet
+ supported. Take from CGI variable REMOTE_USER. """
+ # @@ 2000-03-26 ce: maybe belongs in section below. clean up docs
+ return self._environ['REMOTE_USER']
+
+ ## Remote info ##
+
+ def remoteAddress(self):
+ """ Returns a string containing the Internet Protocol (IP)
+ address of the client that sent the request. """
+ return self._environ['REMOTE_ADDR']
+
+ def remoteName(self):
+ """ Returns the fully qualified name of the client that sent
+ the request, or the IP address of the client if the name
+ cannot be determined. """
+ env = self._environ
+ return env.get('REMOTE_NAME', env['REMOTE_ADDR'])
+
+ ## Path ##
+
+ def urlPath(self):
+ raise NotImplementedError
+
+ def originalURLPath(self):
+ environ = self._environ.get('recursive.previous_environ', self._environ)
+ url = environ.get("SCRIPT_NAME", '') + environ.get('PATH_INFO', '')
+ #self._environ['wsgi.errors'].write('Original URL: %r (from %r)\n' % (url, environ))
+ return url
+
+ def urlPathDir(self):
+ raise NotImplementedError
+
+ def getstate(self):
+ raise NotImplementedError
+
+ def setURLPath(self, path):
+ raise NotImplementedError
+
+ def serverSidePath(self, path=None):
+ raise NotImplementedError
+
+ def serverSideContextPath(self, path=None):
+ base = self._environ['paste.config']['webkit_dir']
+ if path:
+ return os.path.join(base, path)
+ else:
+ return base
+
+ def contextName(self):
+ return ''
+
+ def servletURI(self):
+ """This is the URI of the servlet, without any query strings or extra path info"""
+ # @@: should be implemented
+ raise NotImplementedError
+
+ def uriWebKitRoot(self):
+ raise NotImplementedError
+
+ def fsPath(self):
+ raise NotImplementedError
+
+ def serverURL(self):
+ raise NotImplementedError
+
+ def serverURLDir(self):
+ raise NotImplementedError
+
+ def siteRoot(self):
+ raise NotImplementedError
+
+ def siteRootFromCurrentServlet(self):
+ raise NotImplementedError
+
+ def servletPathFromSiteRoot(self):
+ raise NotImplementedError
+
+ ## Special ##
+
+ def adapterName(self):
+ """
+ Returns the name of the adapter as it appears in the URL.
+ Example: '/WebKit.cgi'
+ This is useful in special cases when you are constructing URLs. See Testing/Main.py for an example use.
+ """
+ deprecated()
+ return '/'.join(self._environ['SCRIPT_NAME'].split('/')[:-1])
+
+ def rawRequest(self):
+ raise NotImplementedError
+
+ def environ(self):
+ return self._environ
+
+ def rawInput(self, rewind=0):
+ """
+ This gives you a file-like object for the data that was
+ sent with the request (e.g., the body of a POST request,
+ or the documented uploaded in a PUT request).
+
+ The file might not be rewound to the beginning if there
+ was valid, form-encoded POST data. Pass rewind=1 if
+ you want to be sure you get the entire body of the request.
+ """
+ fs = self.fieldStorage()
+ if rewind:
+ fs.file.seek(0)
+ return fs.file
+
+ ## Information ##
+
+ # @@ 2000-05-10: See FUTURE section of class doc string
+
+ def servletPath(self):
+ raise NotImplementedError
+
+ def contextPath(self):
+ raise NotImplementedError
+
+ def pathInfo(self):
+ raise NotImplementedError
+
+ def pathTranslated(self):
+ raise NotImplementedError
+
+ def queryString(self):
+ """
+ Returns the query string portion of the URL for this
+ request. Taken from the CGI variable QUERY_STRING. """
+ return self._environ.get('QUERY_STRING', '')
+
+ def uri(self):
+ """
+ Returns the request URI, which is the entire URL except
+ for the query string. """
+ return self._environ['REQUEST_URI']
+
+ def method(self):
+ """
+ Returns the HTTP request method (in all uppercase), typically
+ from the set GET, POST, PUT, DELETE, OPTIONS and TRACE."""
+ return self._environ['REQUEST_METHOD'].upper()
+
+ def sessionId(self):
+ """ Returns a string with the session id specified by the
+ client, or None if there isn't one. """
+ sid = self.value('_SID_', None)
+ return sid
+
+ ## Inspection ##
+
+ def info(self):
+ raise NotImplementedError
+
+ def htmlInfo(self):
+ raise NotImplementedError
diff --git a/paste/webkit/wkresponse.py b/paste/webkit/wkresponse.py
new file mode 100644
index 0000000..0cf0c2a
--- /dev/null
+++ b/paste/webkit/wkresponse.py
@@ -0,0 +1,267 @@
+"""
+A Webware HTTPResponse object.
+"""
+
+import time
+from wkcommon import NoDefault, Cookie
+
+class HTTPResponse(object):
+
+ def __init__(self, transaction, environ, start_response):
+ self._transaction = transaction
+ self._environ = environ
+ self._start_response = start_response
+ self._writer = None
+ self._committed = False
+ self._autoFlush = False
+ self.reset()
+
+ def endTime(self):
+ return self._endTime
+
+ def recordEndTime(self):
+ """
+ Stores the current time as the end time of the response. This
+ should be invoked at the end of deliver(). It may also be
+ invoked by the application for those responses that never
+ deliver due to an error."""
+ self._endTime = time.time()
+
+ ## Headers ##
+
+ def header(self, name, default=NoDefault):
+ """ Returns the value of the specified header. """
+ if default is NoDefault:
+ return self._headers[name.lower()]
+ else:
+ return self._headers.get(name.lower(), default)
+
+ def hasHeader(self, name):
+ return self._headers.has_key(name.lower())
+
+ def setHeader(self, name, value):
+ """
+ Sets a specific header by name.
+ """
+ assert self._committed==0, "Headers have already been sent"
+ self._headers[name.lower()] = value
+
+ def headers(self):
+ """
+ Returns a dictionary-style object of all Header objects
+ contained by this request. """
+ return self._headers
+
+ def clearHeaders(self):
+ """
+ Clears all the headers. You might consider a
+ setHeader('Content-type', 'text/html') or something similar
+ after this."""
+ assert self._committed==0
+ self._headers = {}
+
+ ## Cookies ##
+
+ def cookie(self, name):
+ """ Returns the value of the specified cookie. """
+ return self._cookies[name]
+
+ def hasCookie(self, name):
+ """
+ Returns true if the specified cookie is present.
+ """
+ return self._cookies.has_key(name)
+
+ def setCookie(self, name, value, path='/', expires='ONCLOSE',
+ secure=False):
+ """
+ Set a cookie. You can also set the path (which defaults to /),
+ You can also set when it expires. It can expire:
+ 'NOW': this is the same as trying to delete it, but it
+ doesn't really seem to work in IE
+ 'ONCLOSE': the default behavior for cookies (expires when
+ the browser closes)
+ 'NEVER': some time in the far, far future.
+ integer: a timestamp value
+ tuple: a tuple, as created by the time module
+ """
+ cookie = Cookie(name, value)
+ if expires == 'ONCLOSE' or not expires:
+ pass # this is already default behavior
+ elif expires == 'NOW' or expires == 'NEVER':
+ t = time.gmtime(time.time())
+ if expires == 'NEVER':
+ t = (t[0] + 10,) + t[1:]
+ t = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT", t)
+ cookie.setExpires(t)
+ else:
+ t = expires
+ if type(t) is StringType and t and t[0] == '+':
+ interval = timeDecode(t[1:])
+ t = time.time() + interval
+ if type(t) in (IntType, LongType,FloatType):
+ t = time.gmtime(t)
+ if type(t) in (TupleType, TimeTupleType):
+ t = time.strftime("%a, %d-%b-%Y %H:%M:%S GMT", t)
+ cookie.setExpires(t)
+ if path:
+ cookie.setPath(path)
+ if secure:
+ cookie.setSecure(secure)
+ self.addCookie(cookie)
+
+ def addCookie(self, cookie):
+ """
+ Adds a cookie that will be sent with this response.
+ cookie is a Cookie object instance. See WebKit.Cookie.
+ """
+ assert self._committed==0
+ assert isinstance(cookie, Cookie)
+ self._cookies[cookie.name()] = cookie
+
+ def delCookie(self, name):
+ """
+ Deletes a cookie at the browser. To do so, one has
+ to create and send to the browser a cookie with
+ parameters that will cause the browser to delete it.
+ """
+ if self._cookies.has_key(name):
+ self._cookies[name].delete()
+ else:
+ cookie = Cookie(name, None)
+ cookie.delete()
+ self.addCookie(cookie)
+
+ def cookies(self):
+ """
+ Returns a dictionary-style object of all Cookie objects that will be sent
+ with this response.
+ """
+ return self._cookies
+
+ def clearCookies(self):
+ """ Clears all the cookies. """
+ assert self._committed==0
+ self._cookies = {}
+
+ ## Status ##
+
+ def setStatus(self, code, msg=''):
+ """ Set the status code of the response, such as 200, 'OK'. """
+ assert self._committed==0, "Headers already sent."
+ self.setHeader('Status', str(code) + ' ' + msg)
+
+ ## Special responses ##
+
+ def sendError(self, code, msg=''):
+ """
+ Sets the status code to the specified code and message.
+ """
+ assert self._committed==0, "Response already partially sent"
+ self.setStatus(code, msg)
+
+ def sendRedirect(self, url):
+ """
+ This method sets the headers and content for the redirect, but
+ does NOT change the cookies. Use clearCookies() as
+ appropriate.
+
+ @@ 2002-03-21 ce: I thought cookies were ignored by user
+ agents if a redirect occurred. We should verify and update
+ code or docs as appropriate.
+ """
+ # ftp://ftp.isi.edu/in-notes/rfc2616.txt
+ # Sections: 10.3.3 and others
+
+ assert self._committed==0, "Headers already sent"
+
+ self.setHeader('Status', '302 Redirect')
+ self.setHeader('Location', url)
+ self.setHeader('Content-type', 'text/html')
+
+ self.write('<html> <body> This page has been redirected to '
+ '<a href="%s">%s</a>. </body> </html>' % (url, url))
+
+ ## Output ##
+
+ def write(self, charstr=None):
+ """
+ Write charstr to the response stream.
+ """
+ import pdb
+ if not charstr:
+ return
+ if self._autoFlush:
+ assert self._committed
+ self._writer(charstr)
+ else:
+ self._output.append(charstr)
+
+ def flush(self, autoFlush=True):
+ """
+ Send all accumulated response data now. Commits the response
+ headers and tells the underlying stream to flush. if
+ autoFlush is true, the responseStream will flush itself
+ automatically from now on.
+ """
+ if not self._committed:
+ self.commit()
+ if self._output:
+ self._writer(''.join(self._output))
+ self._autoFlush = autoFlush
+
+ def isCommitted(self):
+ """
+ Has the reponse already been partially or completely sent? If
+ this returns true, no new headers/cookies can be added to the
+ response.
+ """
+ return self._committed
+
+ def deliver(self):
+ """
+ The final step in the processing cycle.
+ Not used for much with responseStreams added.
+ """
+ self.recordEndTime()
+ if not self._committed: self.commit()
+
+ def commit(self):
+ """
+ Write out all headers to the reponse stream, and tell the
+ underlying response stream it can start sending data.
+ """
+ status = self._headers['status']
+ del self._headers['status']
+ headers = self._headers.items()
+ for cookie in self._cookies.values():
+ headers.append(('Set-Cookie', cookie.headerValue()))
+ self._writer = self._start_response(status, headers)
+ self._committed = True
+
+ def wsgiIterator(self):
+ return self._output
+
+ def recordSession(self):
+ raise NotImplementedError
+
+ def reset(self):
+ """
+ Resets the response (such as headers, cookies and contents).
+ """
+
+ assert self._committed == 0
+ self._headers = {}
+ self.setHeader('Content-type','text/html')
+ self.setHeader('Status', '200 OK')
+ self._cookies = {}
+ self._output = []
+
+ def rawResponse(self):
+ raise NotImplementedError
+
+ def size(self):
+ raise NotImplementedError
+
+ def mergeTextHeaders(self, headerstr):
+ raise NotImplementedError
diff --git a/paste/webkit/wkservlet.py b/paste/webkit/wkservlet.py
new file mode 100644
index 0000000..2e37dba
--- /dev/null
+++ b/paste/webkit/wkservlet.py
@@ -0,0 +1,430 @@
+"""
+This implements all of the Webware servlets (Servlet, HTTPServlet, and
+Page), as WSGI applications. The servlets themselves are
+applications, and __call__ is provided to do this.
+"""
+
+import wkcommon
+from wktransaction import Transaction
+
+class Servlet(object):
+
+ # This is nested in Servlet so that transactions can access it as
+ # an attribute, instead of having to import this module. (If they
+ # had to import this module, there would be a circular import)
+ # @@: Why not just put this in wktransaction?
+ class ReturnIterException(Exception):
+ def __init__(self, app_iter):
+ self.app_iter = app_iter
+
+ def __call__(self, environ, start_response):
+ """
+ The core WSGI method, and the core of the servlet execution.
+ """
+ trans = Transaction(environ, start_response)
+ trans.setServlet(self)
+ try:
+ trans.runTransaction()
+ trans.response().deliver()
+ return trans.response().wsgiIterator()
+ except self.ReturnIterException, e:
+ return e.app_iter
+ except self.EndResponse:
+ trans.response().deliver()
+ return trans.response().wsgiIterator()
+
+ ## Access ##
+
+ def name(self):
+ """
+ Returns the name which is simple the name of the
+ class. Subclasses should *not* override this method. It is
+ used for logging and debugging. """
+ return self.__class__.__name__
+
+
+ def awake(self, trans):
+ """
+ This message is sent to all objects that participate in the
+ request-response cycle in a top-down fashion, prior to
+ respond(). Subclasses must invoke super.
+ """
+ self._transaction = trans
+
+ def respond(self, trans):
+ raise AbstractError, self.__class__
+
+ def sleep(self, trans):
+ pass
+
+ ## Abilities ##
+
+ def canBeThreaded(self):
+ """ Returns 0 or 1 to indicate if the servlet can be
+ multithreaded. This value should not change during the
+ lifetime of the object. The default implementation returns
+ 0. Note: This is not currently used. """
+ return 0
+
+ def canBeReused(self):
+ """ Returns 0 or 1 to indicate if a single servlet instance
+ can be reused. The default is 1, but subclasses con override
+ to return 0. Keep in mind that performance may seriously be
+ degraded if instances can't be reused. Also, there's no known
+ good reasons not to reuse and instance. Remember the awake()
+ and sleep() methods are invoked for every transaction. But
+ just in case, your servlet can refuse to be reused. """
+ return 1
+
+class HTTPServlet(Servlet):
+
+ def __init__(self):
+ Servlet.__init__(self)
+ self._methodForRequestType = {} # a cache; see respond()
+
+ ## From WebKit.HTTPServlet ##
+
+ def respond(self, trans):
+ """
+ Invokes the appropriate respondToSomething() method
+ depending on the type of request (e.g., GET, POST, PUT,
+ ...). """
+ httpMethodName = trans.request().method()
+ method = self._methodForRequestType.get(httpMethodName, None)
+ if not method:
+ methName = 'respondTo' + httpMethodName.capitalize()
+ method = getattr(self, methName, self.notImplemented)
+ self._methodForRequestType[httpMethodName] = method
+ method(trans)
+
+ def notImplemented(self, trans):
+ trans.response().setHeader('Status', '501 Not Implemented')
+
+ def respondToHead(self, trans):
+ """
+ A correct but inefficient implementation.
+ Should at least provide Last-Modified and Content-Length.
+ """
+ res = trans.response()
+ w = res.write
+ res.write = lambda *args: None
+ self.respondToGet(trans)
+ res.write = w
+
+class Page(HTTPServlet):
+
+ class EndResponse(Exception):
+ pass
+
+ ## Server side filesystem ##
+
+ def serverSidePath(self, path=None):
+ raise NotImplementedError
+
+ ## From WebKit.Page ##
+
+ def awake(self, transaction):
+ self._transaction = transaction
+ self._response = transaction.response()
+ self._request = transaction.request()
+ self._session = None # don't create unless needed
+ assert self._transaction is not None
+ assert self._response is not None
+ assert self._request is not None
+
+ def respondToGet(self, transaction):
+ """ Invokes _respond() to handle the transaction. """
+ self._respond(transaction)
+
+ def respondToPost(self, transaction):
+ """ Invokes _respond() to handle the transaction. """
+ self._respond(transaction)
+
+ def _respond(self, transaction):
+ """
+ Handles actions if an _action_ field is defined, otherwise
+ invokes writeHTML().
+ Invoked by both respondToGet() and respondToPost().
+ """
+ req = transaction.request()
+
+ # Check for actions
+ for action in self.actions():
+ if req.hasField('_action_%s' % action) or \
+ req.field('_action_', None) == action or \
+ (req.hasField('_action_%s.x' % action) and \
+ req.hasField('_action_%s.y' % action)):
+ if self._actionSet().has_key(action):
+ self.handleAction(action)
+ return
+
+ self.writeHTML()
+
+ def sleep(self, transaction):
+ self._session = None
+ self._request = None
+ self._response = None
+ self._transaction = None
+
+ ## Access ##
+
+ def application(self):
+ return self.transaction().application()
+
+ def transaction(self):
+ return self._transaction
+
+ def request(self):
+ return self._request
+
+ def response(self):
+ return self._response
+
+ def session(self):
+ if not self._session:
+ self._session = self._transaction.session()
+ return self._session
+
+
+ ## Generating results ##
+
+ def title(self):
+ """ Subclasses often override this method to provide a custom title. This title should be absent of HTML tags. This implementation returns the name of the class, which is sometimes appropriate and at least informative. """
+ return self.__class__.__name__
+
+ def htTitle(self):
+ """ Return self.title(). Subclasses sometimes override this to provide an HTML enhanced version of the title. This is the method that should be used when including the page title in the actual page contents. """
+ return self.title()
+
+ def htBodyArgs(self):
+ """
+ Returns the arguments used for the HTML <body> tag. Invoked by
+ writeBody().
+
+ With the prevalence of stylesheets (CSS), you can probably skip
+ this particular HTML feature.
+ """
+ return 'color=black bgcolor=white'
+
+ def writeHTML(self):
+ """
+ Writes all the HTML for the page.
+
+ Subclasses may override this method (which is invoked by
+ respondToGet() and respondToPost()) or more commonly its
+ constituent methods, writeDocType(), writeHead() and
+ writeBody().
+ """
+ self.writeDocType()
+ self.writeln('<html>')
+ self.writeHead()
+ self.writeBody()
+ self.writeln('</html>')
+
+ def writeDocType(self):
+ """
+ Invoked by writeHTML() to write the <!DOCTYPE ...> tag.
+
+ @@ sgd-2003-01-29 - restored the 4.01 transitional as per discussions
+ on the mailing list for the 0.8 release.
+
+ # This implementation USED TO specify HTML 4.01 Transitional, but
+ # some versions of Mozilla acted strangely with that. The current
+ # implementation does nothing.
+
+ Subclasses may override to specify something else.
+
+ You can find out more about doc types by searching for DOCTYPE
+ on the web, or visiting:
+ http://www.htmlhelp.com/tools/validator/doctype.html
+ """
+ self.writeln('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">')
+ pass
+
+ def writeHead(self):
+ """
+ Writes the <head> portion of the page by writing the
+ <head>...</head> tags and invoking writeHeadParts() in between.
+ """
+ wr = self.writeln
+ wr('<head>')
+ self.writeHeadParts()
+ wr('</head>')
+
+ def writeHeadParts(self):
+ """
+ Writes the parts inside the <head>...</head> tags. Invokes
+ writeTitle() and writeStyleSheet(). Subclasses can override this
+ to add additional items and should invoke super.
+ """
+ self.writeTitle()
+ self.writeStyleSheet()
+
+ def writeTitle(self):
+ """
+ Writes the <title> portion of the page. Uses title().
+ """
+ self.writeln('\t<title>%s</title>' % self.title())
+
+ def writeStyleSheet(self):
+ """
+ Writes the style sheet for the page, however, this default
+ implementation does nothing. Subclasses should override if
+ necessary. A typical implementation is:
+ self.writeln('\t<link rel=stylesheet href=StyleSheet.css type=text/css>')
+ """
+ pass
+
+ def writeBody(self):
+ """
+ Writes the <body> portion of the page by writing the
+ <body>...</body> (making use of self.htBodyArgs()) and invoking
+ self.writeBodyParts() in between.
+ """
+ wr = self.writeln
+ bodyArgs = self.htBodyArgs()
+ if bodyArgs:
+ wr('<body %s>' % bodyArgs)
+ else:
+ wr('<body>')
+ self.writeBodyParts()
+ wr('</body>')
+
+ def writeBodyParts(self):
+ """
+ Invokes writeContent(). Subclasses should only override this
+ method to provide additional page parts such as a header,
+ sidebar and footer, that a subclass doesn't normally have to
+ worry about writing.
+
+ For writing page-specific content, subclasses should override
+ writeContent() instead.
+
+ See SidebarPage for an example override of this method.
+
+ Invoked by writeBody().
+ """
+ self.writeContent()
+
+ def writeContent(self):
+ """
+ Writes the unique, central content for the page.
+
+ Subclasses should override this method (not invoking super) to
+ write their unique page content.
+
+ Invoked by writeBodyParts().
+ """
+ self.writeln('<p> This page has not yet customized its content. </p>')
+
+
+ ## Writing ##
+
+ def write(self, *args):
+ for arg in args:
+ self._response.write(str(arg))
+
+ def writeln(self, *args):
+ for arg in args:
+ self._response.write(str(arg))
+ self._response.write('\n')
+
+
+ ## Threading ##
+
+ def canBeThreaded(self):
+ """ Returns 0 because of the ivars we set up in awake(). """
+ return 0
+
+
+ ## Actions ##
+
+ def handleAction(self, action):
+ """
+ Invoked by `_respond` when a legitimate action has
+ been found in a form. Invokes `preAction`, the actual
+ action method and `postAction`.
+
+ Subclasses rarely override this method.
+ """
+ self.preAction(action)
+ getattr(self, action)()
+ self.postAction(action)
+
+ def actions(self):
+ return []
+
+ def preAction(self, actionName):
+ raise NotImplementedError
+
+ def postAction(self, actionName):
+ raise NotImplementedError
+
+ def methodNameForAction(self, name):
+ raise NotImplementedError
+
+ ## Convenience ##
+
+ def htmlEncode(self, s):
+ return wkcommon.htmlEncode(s)
+
+ def htmlDecode(self, s):
+ return wkcommon.htmlDecode(s)
+
+ def urlEncode(self, s):
+ return wkcommon.urlEncode(s)
+
+ def urlDecode(self, s):
+ return wkcommon.urlDecode(s)
+
+ def forward(self, URL):
+ self.application().forward(self.transaction(), URL)
+
+ def includeURL(self, URL):
+ raise NotImplementedError
+
+ def callMethodOfServlet(self, URL, method, *args, **kwargs):
+ raise NotImplementedError
+
+ def endResponse(self):
+ raise self.EndResponse()
+
+ def sendRedirectAndEnd(self, url):
+ """
+ Sends a redirect back to the client and ends the response. This
+ is a very popular pattern.
+ """
+ self.response().sendRedirect(url)
+ self.endResponse()
+
+
+ ## Self utility ##
+
+ def sessionEncode(self, url=None):
+ """
+ Utility function to access session.sessionEncode.
+ Takes a url and adds the session ID as a parameter. This is for cases where
+ you don't know if the client will accepts cookies.
+ """
+ if url == None:
+ url = self.request().uri()
+ return self.session().sessionEncode(url)
+
+
+ ## Private utility ##
+
+ def _actionSet(self):
+ """ Returns a dictionary whose keys are the names returned by actions(). The dictionary is used for a quick set-membership-test in self._respond. Subclasses don't generally override this method or invoke it. """
+ if not hasattr(self, '_actionDict'):
+ self._actionDict = {}
+ for action in self.actions():
+ self._actionDict[action] = 1
+ return self._actionDict
+
+
+ ## Validate HTML output (developer debugging) ##
+
+ def validateHTML(self, closingTags='</body></html>'):
+ raise NotImplementedError
+
+
diff --git a/paste/webkit/wksession.py b/paste/webkit/wksession.py
new file mode 100644
index 0000000..1cbdd71
--- /dev/null
+++ b/paste/webkit/wksession.py
@@ -0,0 +1,47 @@
+"""
+The WebKit session object; an interface surrounding a persistent
+dictionary.
+"""
+
+from wkcommon import NoDefault
+
+class Session:
+
+ def __init__(self, dict):
+ self._values = dict
+
+ def invalidate(self):
+ self._values.clear()
+
+ def value(self, name, default=NoDefault):
+ if default is NoDefault:
+ return self._values[name]
+ else:
+ return self._values.get(name, default)
+
+ def hasValue(self, name):
+ return self._values.has_key(name)
+
+ def setValue(self, name, value):
+ self._values[name] = value
+
+ def delValue(self, name):
+ del self._values[name]
+
+ def values(self):
+ return self._values
+
+ def setTimeout(self, timeout):
+ # @@: This should really do something
+ pass
+
+ def __getitem__(self, name):
+ return self.value(name)
+
+ def __setitem__(self, name, value):
+ self.setValue(name, value)
+
+ def __delitem__(self, name):
+ self.delValue(name)
+
+
diff --git a/paste/webkit/wktransaction.py b/paste/webkit/wktransaction.py
new file mode 100644
index 0000000..af73531
--- /dev/null
+++ b/paste/webkit/wktransaction.py
@@ -0,0 +1,96 @@
+"""
+The Webware transaction object. Responsible for creating the request
+and response objects, and managing some parts of the request cycle.
+"""
+
+from wkrequest import HTTPRequest
+from wkresponse import HTTPResponse
+from wksession import Session
+from wkapplication import Application
+
+class Transaction(object):
+
+ def __init__(self, environ, start_response):
+ self._environ = environ
+ self._start_response = start_response
+ self._request = HTTPRequest(self, environ)
+ self._response = HTTPResponse(self, environ, start_response)
+ self._session = None
+ self._application = None
+
+ def application(self):
+ if self._application is None:
+ self._application = Application(self)
+ return self._application
+
+ def request(self):
+ return self._request
+
+ def response(self):
+ return self._response
+
+ def setResponse(self, response):
+ assert 0, "The response cannot be set"
+
+ def hasSession(self):
+ return self._session is not None
+
+ def session(self):
+ if not self._session:
+ self._session = Session(self.request().environ()['paste.session.factory']())
+ return self._session
+
+ def setSession(self, session):
+ self._session = session
+
+ def servlet(self):
+ return self._servlet
+
+ def setServlet(self, servlet):
+ self._servlet = servlet
+
+ def duration(self):
+ return self.response().endTime() - self.request().time()
+
+ def errorOccurred(self):
+ assert 0, "Not tracked"
+
+ def setErrorOccurred(self, flag):
+ assert 0, "Not tracked"
+
+ def awake(self):
+ if self._session:
+ self._session.awake(self)
+ self._servlet.awake(self)
+
+ def respond(self):
+ self._servlet.respond(self)
+
+ def sleep(self):
+ self._servlet.sleep(self)
+
+ def die(self):
+ # In WebKit this looks for any instance variables with a
+ # resetKeyBindings method, but I'm not sure why
+ pass
+
+ def writeExceptionReport(self, handler):
+ assert 0, "Not implemented"
+
+ def runTransaction(self):
+ try:
+ self.awake()
+ self.respond()
+ finally:
+ self.sleep()
+
+ def forward(self, url):
+ assert self._environ.has_key('paste.recursive.forward'), \
+ "Forwarding is not supported (use the recursive middleware)"
+ if url.startswith('/'):
+ # Webware considers absolute paths to still be based off
+ # of the Webware root; but recursive does not.
+ url = url[1:]
+ app_iter = self._environ['paste.recursive.forward'](url)
+ raise self._servlet.ReturnIterException(app_iter)
+
diff --git a/paste/webkit/wsgiwebkit.py b/paste/webkit/wsgiwebkit.py
new file mode 100644
index 0000000..7989702
--- /dev/null
+++ b/paste/webkit/wsgiwebkit.py
@@ -0,0 +1,67 @@
+"""
+wkserver constructs the WSGI stack of middleware to serve WebKit
+content. It actually does not include a server (umm... bad name;
+"application" was taken).
+
+Use ``some_server(webkit('/path/to/servlets/'))``
+"""
+
+import sys
+import os
+from paste import urlparser
+from paste import session
+from paste import recursive
+from paste import httpexceptions
+from paste import lint
+from paste import error_middleware
+
+def webkit(directory, install_fake_webware=True, use_lint=False):
+ if install_fake_webware:
+ _install_fake_webware()
+ app = urlparser.URLParser(directory, os.path.basename(directory))
+ if use_lint:
+ app = lint.middleware(app)
+ app = session.SessionMiddleware(app)
+ if use_lint:
+ app = lint.middleware(app)
+ app = httpexceptions.middleware(app)
+ if use_lint:
+ app = lint.middleware(app)
+ app = recursive.RecursiveMiddleware(app)
+ if use_lint:
+ app = lint.middleware(app)
+ app = error_middleware.ErrorMiddleware(app)
+ # I'll skip the use of lint on recursive, because it doesn't modify
+ # its output much at all
+ return app
+
+def install_fake_webware():
+ fake_webware_dir = os.path.join(os.path.dirname(__file__),
+ 'FakeWebware')
+ if fake_webware_dir in sys.path:
+ return
+ sys.path.insert(0, fake_webware_dir)
+_install_fake_webware = install_fake_webware
+
+if __name__ == '__main__':
+ if os.environ.has_key('SERVER_NAME'):
+ import cgiserver
+ cgiserver.run_with_cgi(webkit(os.path.dirname(__file__)),
+ use_cgitb=True,
+ redirect_stdout=True)
+ else:
+ import wsgilib
+ import sys
+ import os
+ if '-h' in sys.argv or '--help' in sys.argv or not sys.argv[1:]:
+ print 'Usage: %s URL_PATH' % sys.argv[0]
+ print 'Run like: CLIENT_DIR=XXX %s URL_PATH' % sys.argv[0]
+ print 'to put the root of the URL_PATH in in XXX'
+ sys.exit()
+ root = os.environ.get('CLIENT_DIR', os.path.dirname(__file__))
+ application = webkit(root)
+ def prapp(url, app=None):
+ if app is None:
+ app = application
+ print wsgilib.interactive(app, url)
+ prapp(sys.argv[1])
diff --git a/paste/wsgilib.py b/paste/wsgilib.py
new file mode 100644
index 0000000..4e37095
--- /dev/null
+++ b/paste/wsgilib.py
@@ -0,0 +1,252 @@
+from Cookie import SimpleCookie
+from cStringIO import StringIO
+import mimetypes
+import os
+
+def get_cookies(environ):
+ """
+ Gets a cookie object (which is a dictionary-like object) from the
+ request environment; caches this value in case get_cookies is
+ called again for the same request.
+ """
+ header = environ.get('HTTP_COOKIE', '')
+ if environ.has_key('paste.cookies'):
+ cookies, check_header = environ['paste.cookies']
+ if check_header == header:
+ return cookies
+ cookies = SimpleCookie()
+ cookies.load(header)
+ environ['paste.cookies'] = (cookies, header)
+ return cookies
+
+class add_close:
+ """
+ An an iterable that iterates over app_iter, then calls
+ close_func.
+ """
+
+ def __init__(self, app_iterable, close_func):
+ self.app_iterable = app_iterable
+ self.app_iter = iter(app_iterable)
+ self.close_func = close_func
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ return self.app_iter.next()
+
+ def close(self):
+ if hasattr(self.app_iterable, 'close'):
+ self.app_iterable.close()
+ self.close_func()
+
+def raw_interactive(application, path_info='', **environ):
+ """
+ Runs the application in a fake environment.
+ """
+ errors = StringIO()
+ basic_environ = {
+ 'PATH_INFO': str(path_info),
+ 'SCRIPT_NAME': '',
+ 'SERVER_NAME': 'localhost',
+ 'SERVER_PORT': '80',
+ 'REQUEST_METHOD': 'GET',
+ 'HTTP_HOST': 'localhost:80',
+ 'CONTENT_LENGTH': '0',
+ 'wsgi.input': StringIO(''),
+ 'wsgi.errors': errors,
+ 'wsgi.version': (1, 0),
+ 'wsgi.multithread': False,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': False,
+ 'wsgi.url_scheme': 'http',
+ }
+ for name, value in environ.items():
+ name = name.replace('__', '.')
+ basic_environ[name] = value
+ if isinstance(basic_environ['wsgi.input'], str):
+ basic_environ['wsgi.input'] = StringIO(basic_environ['wsgi.input'])
+ output = StringIO()
+ data = {}
+ def start_response(status, headers):
+ data['status'] = status
+ data['headers'] = headers
+ return output.write
+ app_iter = application(basic_environ, start_response)
+ try:
+ try:
+ for s in app_iter:
+ output.write(s)
+ except TypeError, e:
+ # Typically "iteration over non-sequence", so we want
+ # to give better debugging information...
+ e.args = ((e.args[0] + ' iterable: %r' % app_iter),) + e.args[1:]
+ raise
+ finally:
+ if hasattr(app_iter, 'close'):
+ app_iter.close()
+ return (data['status'], data['headers'], output.getvalue(),
+ errors.getvalue())
+
+def interactive(*args, **kw):
+ status, headers, content, errors = raw_interactive(*args, **kw)
+ full = StringIO()
+ if errors:
+ full.write('Errors:\n')
+ full.write(errors.strip())
+ full.write('\n----------end errors\n')
+ full.write(status + '\n')
+ for name, value in headers:
+ full.write('%s: %s\n' % (name, value))
+ full.write('\n')
+ full.write(content)
+ return full.getvalue()
+
+def construct_url(environ, with_query_string=True):
+ """
+ Reconstructs the URL from the WSGI environment.
+ """
+ url = environ['wsgi.url_scheme']+'://'
+
+ if environ.get('HTTP_HOST'):
+ url += environ['HTTP_HOST'].split(':')[0]
+ else:
+ url += environ['SERVER_NAME']
+
+ if environ['wsgi.url_scheme'] == 'https':
+ if environ['SERVER_PORT'] != '443':
+ url += ':' + environ['SERVER_PORT']
+ else:
+ if environ['SERVER_PORT'] != '80':
+ url += ':' + environ['SERVER_PORT']
+
+ url += environ.get('SCRIPT_NAME','')
+ url += environ.get('PATH_INFO','')
+ if with_query_string:
+ if environ.get('QUERY_STRING'):
+ url += '?' + environ['QUERY_STRING']
+ return url
+
+def error_body_response(error_code, message):
+ """
+ Returns a standard HTML response page for an HTTP error.
+ """
+ return '''\
+<html>
+ <head>
+ <title>%(error_code)s</title>
+ </head>
+ <body>
+ <h1>%(error_code)s</h1>
+ %(message)s
+ </body>
+</html>''' % {
+ 'error_code': error_code,
+ 'message': message,
+ }
+
+def error_response(environ, error_code, message,
+ debug_message=None):
+ """
+ Returns the status, headers, and body of an error response. Use
+ like::
+
+ status, headers, body = wsgilib.error_response(
+ '301 Moved Permanently', 'Moved to <a href="%s">%s</a>'
+ % (url, url))
+ start_response(status, headers)
+ return [body]
+ """
+ if debug_message and environ.get('paste.config', {}).get('debug'):
+ message += '\n\n<!-- %s -->' % debug_message
+ body = error_body_response(error_code, message)
+ headers = [('content-type', 'text/html'),
+ ('content-length', str(len(body)))]
+ return error_code, headers, body
+
+def send_file(filename):
+ """
+ Returns an application that will send the file at the given
+ filename. Adds a mime type based on ``mimetypes.guess_type()``.
+ """
+ # @@: Should test things like last-modified, if-modified-since,
+ # etc.
+
+ def application(environ, start_response):
+ type, encoding = mimetypes.guess_type(filename)
+ # @@: I don't know what to do with the encoding.
+ size = os.stat(filename).st_size
+ try:
+ file = open(filename, 'rb')
+ except (IOError, OSError), e:
+ status, headers, body = error_response(
+ '403 Forbidden',
+ 'You are not permitted to view this file (%s)' % e)
+ start_response(status, headers)
+ return [body]
+ start_response('200 OK',
+ [('content-type', type),
+ ('content-length', str(size))])
+ return _FileIter(file)
+
+ return application
+
+class _FileIter:
+
+ def __init__(self, file, blocksize=4096):
+ self.file = file
+ self.blocksize = blocksize
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ data = self.file.read(self.blocksize)
+ if not data:
+ raise StopIteration
+ return data
+
+ def close(self):
+ self.file.close()
+
+def has_header(headers, name):
+ """
+ Is header named ``name`` present in headers?
+ """
+ name = name.lower()
+ for header, value in headers:
+ if header.lower() == name:
+ return True
+ return False
+
+def header_value(headers, name, collapse=False):
+ """
+ Returns the header's value, or None if no such header. If a
+ header appears more than once, all the values of the headers
+ are joined with ','
+ """
+ result = [value for header, value in headers
+ if header.lower() == name]
+ if result:
+ return ','.join(result)
+ else:
+ return None
+
+def path_info_split(path_info):
+ """
+ Splits off the first segment of the path. Returns (first_part,
+ rest_of_path). first_part can be None (if PATH_INFO is empty), ''
+ (if PATH_INFO is '/'), or a name without any /'s. rest_of_path
+ can be '' or a string starting with /.
+ """
+ if not path_info:
+ return None, ''
+ assert path_info.startswith('/'), (
+ "PATH_INFO should start with /: %r" % path_info)
+ path_info = path_info.lstrip('/')
+ if '/' in path_info:
+ first, rest = path_info.split('/', 1)
+ return first, '/' + rest
+ else:
+ return path_info, ''