summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorroot <devnull@localhost>2006-04-26 10:48:09 +0000
committerroot <devnull@localhost>2006-04-26 10:48:09 +0000
commit8b1e1c104bdff504b3e775b450432e6462b8d09b (patch)
tree0367359f6a18f318741f387d82dc3dcfd8139950
downloadlogilab-common-8b1e1c104bdff504b3e775b450432e6462b8d09b.tar.gz
forget the past.
forget the past.
-rw-r--r--.hgignore4
-rw-r--r--ChangeLog461
-rw-r--r--DEPENDS1
-rw-r--r--MANIFEST.in5
-rw-r--r--README129
-rw-r--r--__init__.py309
-rw-r--r--__pkginfo__.py59
-rw-r--r--announce.txt23
-rw-r--r--astutils.py80
-rw-r--r--bind.py272
-rw-r--r--cache.py97
-rw-r--r--cli.py189
-rw-r--r--compat.py145
-rw-r--r--configuration.py578
-rw-r--r--corbautils.py96
-rw-r--r--daemon.py144
-rw-r--r--db.py530
-rw-r--r--debian/changelog316
-rw-r--r--debian/control29
-rw-r--r--debian/copyright28
-rw-r--r--debian/python-logilab-common.dirs6
-rw-r--r--debian/python-logilab-common.docs1
-rw-r--r--debian/python-logilab-common.postinst24
-rw-r--r--debian/python-logilab-common.preinst20
-rw-r--r--debian/python-logilab-common.prerm14
-rw-r--r--debian/rules69
-rw-r--r--debian/watch3
-rw-r--r--doc/makefile17
-rw-r--r--fileutils.py505
-rw-r--r--html.py47
-rw-r--r--interface.py47
-rw-r--r--logger.py152
-rw-r--r--logservice.py30
-rw-r--r--modutils.py536
-rw-r--r--monclient.py59
-rw-r--r--monserver.py117
-rw-r--r--optik_ext.py276
-rw-r--r--patricia.py187
-rw-r--r--setup.cfg3
-rw-r--r--setup.py179
-rw-r--r--shellutils.py63
-rw-r--r--sqlgen.py242
-rw-r--r--table.py958
-rw-r--r--test/_test_astng.py14
-rw-r--r--test/data/__init__.py1
-rw-r--r--test/data/foo.txt9
-rw-r--r--test/data/module.py88
-rw-r--r--test/data/module2.py77
-rw-r--r--test/data/newlines.txt3
-rw-r--r--test/data/noendingnewline.py38
-rw-r--r--test/data/nonregr.py14
-rw-r--r--test/data/normal_file.txt0
-rw-r--r--test/data/spam.txt9
-rw-r--r--test/data/sub/doc.txt1
-rw-r--r--test/data/sub/momo.py1
-rw-r--r--test/data/write_protected_file.txt0
-rw-r--r--test/foomod.py17
-rw-r--r--test/runtests.py5
-rw-r--r--test/unittest_bind.py65
-rw-r--r--test/unittest_cache.py102
-rw-r--r--test/unittest_compat.py120
-rw-r--r--test/unittest_configuration.py176
-rw-r--r--test/unittest_db.py179
-rwxr-xr-xtest/unittest_fileutils.py147
-rw-r--r--test/unittest_graph.py32
-rw-r--r--test/unittest_logger.py54
-rw-r--r--test/unittest_modutils.py203
-rw-r--r--test/unittest_patricia.py72
-rw-r--r--test/unittest_table.py428
-rw-r--r--test/unittest_testlib.py125
-rw-r--r--test/unittest_textutils.py120
-rw-r--r--test/unittest_tree.py201
-rw-r--r--test/unittest_ureports_html.py50
-rw-r--r--test/unittest_ureports_text.py90
-rw-r--r--test/utils.py70
-rw-r--r--testlib.py608
-rw-r--r--textutils.py321
-rw-r--r--tree.py350
-rw-r--r--twisted_distutils.py209
-rw-r--r--ureports/__init__.py173
-rw-r--r--ureports/docbook_writer.py138
-rw-r--r--ureports/html_writer.py131
-rw-r--r--ureports/nodes.py200
-rw-r--r--ureports/text_writer.py141
-rw-r--r--vcgutils.py212
-rw-r--r--visitor.py108
-rw-r--r--xmlrpcutils.py131
87 files changed, 12283 insertions, 0 deletions
diff --git a/.hgignore b/.hgignore
new file mode 100644
index 0000000..2849616
--- /dev/null
+++ b/.hgignore
@@ -0,0 +1,4 @@
+(^|/)\.svn($|/)
+(^|/)\.hg($|/)
+(^|/)\.hgtags($|/)
+^log$
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..d84eb1b
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,461 @@
+ChangeLog for logilab.common
+============================
+
+2006-04-25 -- 0.15.1
+ * db: add missing port handling to get_connection function and
+ dbapimodule.connect methods
+ * testlib: various fixes and minor improvments
+
+2006-03-28 -- 0.15.0
+ * added "cached" decorator and a simple text progression bar into __init__
+ * added a simple text progress bar into __init__
+ * configuration: fixed man page generation when using python 2.4
+ * db: added pysqllite2 support, preconfigured to handle timestamp using
+ mxDatetime and to correctly handle boolean types
+
+
+2006-03-06 -- 0.14.1
+ * backported file support and add LOG_CRIT to builtin in logservice module
+
+2006-02-28 -- 0.14.0
+ * renamed assertXML*Valid to assertXML*WellFormed and deprecated the old name
+ * fixed modutils.load_module_from_*
+
+
+2006-02-03 -- 0.13.1
+ * fix some tests, patch contributed by Marien Zwart
+ * added ability to log into a file with make_logger()
+
+
+2006-01-06 -- 0.13.0
+ * testlib: ability to skip a test
+ * configuration:
+ - cleaner configuration file generation
+ - refactoring so that we can have more control on file
+ configuration loading using read_config_file and load_config_file
+ instead of load_file_configuration
+ * modutils: fix is_relative to return False when from_file is a file
+ located somewhere in sys.path
+ * ureport: new "escaped" attribute on Text nodes, controling html escaping
+ * compat: make set iterable and support more other set operations...
+ * removed the astng sub-package, since it's now self-distributed as
+ logilab-astng
+
+
+2005-09-06 -- 0.12.0
+ * shellutils: bug fix in mv()
+ * compat:
+ - use set when available
+ - added sorted and reversed
+ * table: new methods and some optimizations
+ * tree: added some deprecation warnings
+
+
+
+2005-07-25 -- 0.11.0
+ * db: refactoring, added sqlite support, new helpers to support DBMS
+ specific features
+
+
+
+2005-07-07 -- 0.10.1
+ * configuration: added basic man page generation feature
+ * ureports: unicode handling, some minor fixes
+ * testlib: enhance MockConnection
+ * python2.2 related fixes in configuration and astng
+
+
+
+2005-05-04 -- 0.10.0
+ * astng: improve unit tests coverage
+
+ * astng.astng: fix Function.format_args, new method
+ Function.default_value, bug fix in Node.resolve
+
+ * astng.builder: handle classmethod and staticmethod as decorator,
+ handle data descriptors when building from living objects
+
+ * ureports:
+
+ - new docbook formatter
+ - handle ReST like urls in the text writer
+ - new build_summary utility function
+
+
+
+2005-04-14 -- 0.9.3
+ * optik_ext: add man page generation based on optik/optparse options
+ definition
+
+ * modutils: new arguments to get_source_file to handle files without
+ extensions
+
+ * astng: fix problem with the manager and python 2.2 (optik related)
+
+
+
+2005-02-16 -- 0.9.2
+ * textutils:
+
+ - added epydoc documentation
+ - new sep argument to the get_csv function
+ - fix pb with normalize_* functions on windows platforms
+
+ * fileutils:
+
+ - added epydoc documentation
+ - fixed bug in get_by_ext (renamed files_by_ext) with the
+ exclude_dirs argument
+
+ * configuration:
+
+ - fixed a bug in configuration file generation on windows platforms
+ - better test coverage
+
+ * fixed testlib.DocTest which wasn't working anymore with recent
+ versions of pyunit
+
+ * added "context_file" argument to file_from_modpath to avoid
+ possible relative import problems
+
+ * astng: use the new context_file argument from Node.resolve()
+
+
+
+2005-02-04 -- 0.9.1
+ * astng:
+
+ - remove buggy print
+ - fixed builder to deal with builtin methods
+ - fixed raw_building.build_function with python 2.4
+
+ * modutils: code cleanup, some reimplementation based on "imp",
+ better handling of windows specific extensions, epydoc documentation
+
+ * fileutils: new exclude_dirs argument to the get_by_ext function
+
+ * testlib: main() support -p option to run test in a profiled mode
+
+ * generated documentation for modutils in the doc/ subdirectory
+
+
+
+2005-01-20 -- 0.9.0
+ * astng:
+
+ - refactoring of some huge methods
+ - fix interface resolving when __implements__ is defined in a parent
+ class in another module
+ - add special code in the builder to fix problem with qt
+ - new source_line method on Node
+ - fix sys.path during parsing to avoid some failure when trying
+ to get imported names by `from module import *`, and use an astng
+ building instead of exec'ing the statement
+ - fix possible AttributeError with Function.type
+ - manager.astng_from_file fallback to astng_from_module if possible
+
+ * textutils: fix bug in normalize_paragraph, unquote handle empty string
+ correctly
+
+ * modutils:
+ - use a cache in has_module to speed up things when heavily used
+ - fix file_from_modpath to handle pyxml and os.path
+
+ * configuration: fix problem with serialization/deserialization of empty
+ string
+
+
+
+2005-01-04 -- 0.8.0
+ * modutils: a lot of fixes/rewrite on various functions to avoid
+ unnecessary imports, sys.path pollution, and other bugs (notably
+ making plylint reporting wrong modules name/path)
+
+ * astng: new "inspector" module, initially taken from pyreverse code
+ (http://www.logilab.org/projects/pyreverse), miscellaneous bug fixes
+
+ * configuration: new 'usage' parameter on the Configuration
+ initializer
+
+ * logger: unicode support
+
+ * fileutils: get_by_ext also ignore ".svn" directories, not only "CVS"
+
+
+
+2004-11-03 -- 0.7.1
+ * astng:
+
+ - don't raise a syntax error on files missing a trailing \n.
+ - fix utils.is_abstract (was causing an unexpected exception if a
+ string exception was raised).
+ - fix utils.get_implemented.
+ - fix file based manager's cache problem.
+
+ * textutils: fixed normalize_text / normalize_paragraph functions
+
+
+
+2004-10-11 -- 0.7.0
+ * astng: new methods on the manager, returning astng with nodes for
+ packages (ie recursive structure instead of the flat one), with
+ automatic lazy loading + introduction of a dict like interface to
+ manipulate those nodes and Module, Class and Function nodes.
+
+ * logservice: module imported from the ginco project
+
+ * configuration: added new classes Configuration and
+ OptionsManager2Configuration adapter, fix bug in loading options
+ from file
+
+ * optik_ext/configuration: some new option type "multiple_choice"
+
+ * fileutils: new ensure_mode function
+
+ * compat: support for sum and enumerate
+
+
+
+2004-09-23 -- 0.6.0
+ * db: added DBAPIAdapter
+
+ * textutils: fix in pretty_match causing malformated messages in pylint
+ added ansi colorization management
+
+ * modutils: new functions get_module_files, has_module and file_from_modpath
+
+ * astng: some new utility functions taken from pylint, minor changes to the
+ manager API, Node.resolve doesn't support anymore "living" resolution,
+ some new methods on astng nodes
+
+ * compat: new module for a transparent compatibility layer between
+ different python version (actually 2.2 vs 2.3 for now)
+
+
+
+2004-07-08 -- 0.5.2
+ * astng: fix another bug in klassnode.ancestors() method...
+
+ * db: fix mysql access
+
+ * cli: added a space after the prompt
+
+
+
+2004-06-04 -- 0.5.1
+ * astng: fix undefined var bug in klassnode.ancestors() method
+
+ * ureports: fix attributes on title layout
+
+ * packaging:fix the setup.py script to allow bdist_winst (well, the
+ generated installer has not been tested...) with the necessary
+ logilab/__init__.py file
+
+
+
+2004-05-10 -- 0.5.0
+ * ureports: new Universal Reports sub-package
+
+ * xmlrpcutils: new xmlrpc utilities module
+
+ * astng: resolve(name) now handle (at least try) builtins
+
+ * astng: fixed Class.as_string (empty paren when no base classes)
+
+ * astng.builder: knows a litle about method descriptors, Functin with
+ unknown arguments have argnames==None.
+
+ * fileutils: new is_binary(filename) function
+
+ * textutils: fixed some Windows bug
+
+ * tree: base not doesn't have the "title" attribute anymore
+
+ * testlib: removed the spawn function (who used that ?!), added MockSMTP,
+ MockConfigParser, MockConnexion and DocTestCase (test class for
+ modules embeding doctest). All mocks objects are very basic and will be
+ enhanced as the need comes.
+
+ * testlib: added a TestCase class with some additional methods then
+ the regular unittest.TestCase class
+
+ * cli: allow specifying a command prefix by a class attributes,more
+ robust, print available commands on help
+
+ * db: new "binary" function to get the binary wrapper for a given driver,
+ and new "system_database" function returning the system database name
+ for different DBMS.
+
+ * configuration: better group control
+
+
+
+2004-02-20 -- 0.4.5
+ * db: it's now possible to fix the modules search order. By default call
+ set_isolation_level if psycopg is used
+
+
+
+2004-02-17 -- 0.4.4
+ * modutils: special case for os.path in get_module_part
+
+ * astng: handle special case where we are on a package node importing a module
+ using the same name as the package, which may end in an infinite loop
+ on relative imports in Node.resolve
+
+ * fileutils: new get_by_ext function
+
+
+
+2004-02-11 -- 0.4.3
+ * astng: refactoring of Class.ancestor_for_* methods (now
+ depends on python 2.2 generators)
+
+ * astng: make it more robust
+
+ * configuration: more explicit exception when a bad option is
+ provided
+
+ * configuration: define a short version of an option using the "short"
+ keyword, taking a single letter as value
+
+ * configuration: new method global_set_option on the manager
+
+ * testlib : allow no "suite" nor "Run" function in test modules
+
+ * shellutils: fix bug in *mv*
+
+
+
+2003-12-23 -- 0.4.2
+ * added Project class and some new methods to the ASTNGManger
+
+ * some new functions in astng.utils
+
+ * fixed bugs in some as_string methods
+
+ * fixed bug in textutils.get_csv
+
+ * fileutils.lines now take a "comments" argument, allowing to ignore
+ comment lines
+
+
+
+2003-11-24 -- 0.4.1
+ * added missing as_string methods on astng nodes
+
+ * bug fixes on Node.resolve
+
+ * minor fixes in textutils and fileutils
+
+ * better test coverage (need more !)
+
+
+
+2003-11-13 -- 0.4.0
+ * new textutils and shellutils modules
+
+ * full astng rewrite, now based on the compiler.ast package from the
+ standard library
+
+ * added next_sbling and previous_sibling methods to Node
+
+ * fix get_cycles
+
+
+
+2003-10-14 -- 0.3.5
+ * fixed null size cache bug
+
+ * added 'sort_by_column*' methods for tables
+
+
+
+2003-10-08 -- 0.3.4
+ * fix bug in asntg, occuring with python2.3 and modules including an
+ encoding declaration
+
+ * fix bug in astutils.get_rhs_consumed_names, occuring in lists
+ comprehension
+
+ * remove debug print statement from configuration.py which caused a
+ generation of incorrect configuration files.
+
+
+
+2003-10-01 -- 0.3.3
+ * fix bug in modutils.modpath_from_file
+
+ * new module corbautils
+
+
+
+2003-09-18 -- 0.3.2
+ * fix bug in modutils.load_module_from_parts
+
+ * add missing __future__ imports
+
+
+
+2003-09-18 -- 0.3.1
+ * change implementation of modutils.load_module_from_name (use find_module
+ and load_module instead of __import__)
+
+ * more bug fixes in astng
+
+ * new functions in fileutils (lines, export) and __init__ (Execute)
+
+
+
+2003-09-12 -- 0.3
+ * expect "def suite" or "def Run(runner=None)" on unittest module
+
+ * fixes in modutils
+
+ * major fixes in astng
+
+ * new fileutils and astutils modules
+
+ * enhancement of the configuration module
+
+ * new option type "named" in optik_the ext module
+
+
+
+2003-06-18 -- 0.2.2
+ * astng bug fixes
+
+
+
+2003-06-04 -- 0.2.1
+ * bug fixes
+
+ * fix packaging problem
+
+
+
+2003-06-02 -- 0.2.0
+ * add the interface, modutils, optik_ext and configuration modules
+
+ * add the astng sub-package
+
+ * miscellaneous fixes
+
+
+
+2003-04-17 -- 0.1.2
+ * add the stringio module
+
+ * minor fixes
+
+
+
+2003-02-28 -- 0.1.1
+ * fix bug in tree.py
+
+ * new file distutils_twisted
+
+
+
+2003-02-17 -- 0.1.0
+ * initial revision
diff --git a/DEPENDS b/DEPENDS
new file mode 100644
index 0000000..5fe2924
--- /dev/null
+++ b/DEPENDS
@@ -0,0 +1 @@
+python-xml
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..07c73ce
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,5 @@
+include ChangeLog
+include README
+include DEPENDS
+recursive-include test/data *.py *.txt
+recursive-include doc/html *
diff --git a/README b/README
new file mode 100644
index 0000000..de5b7dc
--- /dev/null
+++ b/README
@@ -0,0 +1,129 @@
+Logilab's common library
+========================
+
+What's this ?
+-------------
+
+This package contains some modules used by differents Logilab's
+projects.
+
+It is released under the GNU Public License.
+
+There is no documentation available yet but the source code should be
+clean and well documented.
+
+
+Installation
+------------
+
+Extract the tarball, jump into the created directory and run ::
+
+ python setup.py install
+
+For installation options, see ::
+
+ python setup.py install --help
+
+
+Provided modules
+----------------
+
+Here is a brief description of the available modules :
+
+* astutils:
+ Some utilities function to manipulate Python's AST.
+
+* astng :
+ Python Abstract Syntax Tree New Generation. Provides an higher
+ level representation of ast objects.
+
+* bind.py :
+ Provides a way to optimize globals in certain functions by binding
+ their names to values provided in a dictionnary.
+
+* cache.py :
+ A cache implementation with a least recently used algorithm.
+
+* cli.py :
+ Command line interface helper classes.
+
+* compat.py:
+ Transparent compatibility layer between different python version
+ (actually 2.2 vs 2.3 for now)
+
+* configuration.py :
+ Two mix-in classes to handle configuration from both command line
+ (using optik) and configuration file.
+
+* corbautils.py:
+ Usefull functions for use with the OmniORB CORBA library.
+
+* daemon.py :
+ A daemon mix-in class.
+
+* db.py :
+ A generic method to get a database connection.
+
+* html.py :
+ Return an html formatted traceback from python exception infos.
+
+* fileutils.py :
+ Some file / file path manipulation utilities.
+
+* interface.py
+ Bases class for interfaces.
+
+* logger.py :
+ Define a logger interface and two concrete loggers : one which prints
+ everything on stdout, the other using syslog.
+
+* modutils.py :
+ Module manipulation utilities.
+
+* optik_ext :
+ Add an abstraction level to transparently import optik classes from
+ optparse (python >= 2.3) or the optik package. It also defines two
+ new option types : regexp and csv.
+
+* patricia.py :
+ A Python implementation of PATRICIA trie (Practical Algorithm to
+ Retrieve Information Coded in Alphanumeric).
+
+* shellutils:
+ Some utilities to replace shell scripts with python scripts.
+
+* sqlgen.py :
+ Helper class to generate SQL strings to use with python's DB-API.
+
+* testlib.py :
+ Generic tests execution methods.
+
+* textutils.py:
+ Some text manipulation utilities.
+
+* tree.py :
+ Base class to represent tree structure, and some others to make it
+ works with the visitor implementation (see below).
+
+* ureports:
+ Provides a way to create simple reports using python objects
+ without care of the final formatting. Some formatters text and html
+ are provided.
+
+* vcgutils.py :
+ utilities functions to generate file readable with Georg Sander's vcg
+ (Visualization of Compiler Graphs).
+
+* visitor.py :
+ A generic visitor pattern implementation.
+
+* twisted_distutils.py
+ This module enables the installation of plugins.tml files using standard
+ distutils syntax. Note that you can use this to install files that
+ are not twisted plugins in any package directory of your application.
+
+
+If you have any questions, please mail devel@logilab.fr for support.
+
+Sylvain Thénault
+Apr 15, 2004
diff --git a/__init__.py b/__init__.py
new file mode 100644
index 0000000..d1917b7
--- /dev/null
+++ b/__init__.py
@@ -0,0 +1,309 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2000-2002 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+Logilab common libraries
+"""
+
+from __future__ import nested_scopes
+
+__revision__ = "$Id: __init__.py,v 1.30 2006-03-24 10:37:48 syt Exp $"
+
+# FIXME: move all those functions in a separated module
+
+def intersection(list1, list2):
+ """return the intersection of list1 and list2"""
+ intersect_dict, result = {}, []
+ for item in list1:
+ intersect_dict[item] = 1
+ for item in list2:
+ if intersect_dict.has_key(item):
+ result.append(item)
+ return result
+
+def difference(list1, list2):
+ """return elements of list1 not in list2"""
+ tmp, result = {}, []
+ for i in list2:
+ tmp[i] = 1
+ for i in list1:
+ if not tmp.has_key(i):
+ result.append(i)
+ return result
+
+def union(list1, list2):
+ """return list1 union list2"""
+ tmp = {}
+ for i in list1:
+ tmp[i] = 1
+ for i in list2:
+ tmp[i] = 1
+ return tmp.keys()
+
+def make_domains(lists):
+ """
+ given a list of lists, return a list of domain for each list to produce all
+ combinaisons of possibles values
+
+ ex: (['a', 'b'], ['c','d', 'e'])
+ -> (['a', 'b', 'a', 'b', 'a', 'b'],
+ ['c', 'c', 'd', 'd', 'e', 'e'])
+ """
+ domains = []
+ for iterable in lists:
+ new_domain = iterable[:]
+ for i in range(len(domains)):
+ domains[i] = domains[i]*len(iterable)
+ if domains:
+ missing = (len(domains[0]) - len(iterable)) / len(iterable)
+ i = 0
+ for j in range(len(iterable)):
+ value = iterable[j]
+ for dummy in range(missing):
+ new_domain.insert(i, value)
+ i += 1
+ i += 1
+ domains.append(new_domain)
+ return domains
+
+
+def flatten(iterable, tr_func=None, results=None):
+ """flatten a list of list with any level
+
+ if tr_func is not None, it should be a one argument function that'll be called
+ on each final element
+ """
+ if results is None:
+ results = []
+ for val in iterable:
+ if type(val) in (type(()), type([])):
+ flatten(val, tr_func, results)
+ elif tr_func is None:
+ results.append(val)
+ else:
+ results.append(tr_func(val))
+ return results
+
+
+def get_cycles(graph_dict, vertices=None):
+ '''given a dictionnary representing an ordered graph (i.e. key are vertices
+ and values is a list of destination vertices representing edges), return a
+ list of detected cycles
+ '''
+ if not graph_dict:
+ return ()
+ result = []
+ if vertices is None:
+ vertices = graph_dict.keys()
+ for vertice in vertices:
+ _get_cycles(graph_dict, vertice, [], result)
+ return result
+
+def _get_cycles(graph_dict, vertice=None, path=None, result=None):
+ """recursive function doing the real work for get_cycles"""
+ if vertice in path:
+ cycle = [vertice]
+ for i in range(len(path)-1, 0, -1):
+ node = path[i]
+ if node == vertice:
+ break
+ cycle.insert(0, node)
+ # make a canonical representation
+ start_from = min(cycle)
+ index = cycle.index(start_from)
+ cycle = cycle[index:] + cycle[0:index]
+ # append it to result if not already in
+ if not cycle in result:
+ result.append(cycle)
+ return
+ path.append(vertice)
+ try:
+ for node in graph_dict[vertice]:
+ _get_cycles(graph_dict, node, path, result)
+ except KeyError:
+ pass
+ path.pop()
+
+
+def cached(callableobj, keyarg=None):
+ """simple decorator to cache result of method call"""
+ #print callableobj, keyarg, callableobj.func_code.co_argcount
+ if callableobj.func_code.co_argcount == 1 or keyarg == 0:
+
+ def cache_wrapper1(self, *args):
+ cache = '_%s_cache_' % callableobj.__name__
+ #print 'cache1?', cache
+ try:
+ return getattr(self, cache)
+ except AttributeError:
+ #print 'miss'
+ value = callableobj(self, *args)
+ setattr(self, cache, value)
+ return value
+ return cache_wrapper1
+
+ elif keyarg:
+
+ def cache_wrapper2(self, *args, **kwargs):
+ cache = '_%s_cache_' % callableobj.__name__
+ key = args[keyarg-1]
+ #print 'cache2?', cache, self, key
+ try:
+ _cache = getattr(self, cache)
+ except AttributeError:
+ #print 'init'
+ _cache = {}
+ setattr(self, cache, _cache)
+ try:
+ return _cache[key]
+ except KeyError:
+ #print 'miss', self, cache, key
+ _cache[key] = callableobj(self, *args, **kwargs)
+ return _cache[key]
+ return cache_wrapper2
+ def cache_wrapper3(self, *args):
+ cache = '_%s_cache_' % callableobj.__name__
+ #print 'cache3?', cache, self, args
+ try:
+ _cache = getattr(self, cache)
+ except AttributeError:
+ #print 'init'
+ _cache = {}
+ setattr(self, cache, _cache)
+ try:
+ return _cache[args]
+ except KeyError:
+ #print 'miss'
+ _cache[args] = callableobj(self, *args)
+ return _cache[args]
+ return cache_wrapper3
+
+import sys
+
+class ProgressBar(object):
+ """a simple text progression bar"""
+
+ def __init__(self, nbops, size=20., stream=sys.stdout):
+ self._dotevery = max(nbops / size, 1)
+ self._fstr = '\r[%-20s]'
+ self._dotcount, self._dots = 1, []
+ self._stream = stream
+
+ def update(self):
+ """update the progression bar"""
+ self._dotcount += 1
+ if self._dotcount >= self._dotevery:
+ self._dotcount = 1
+ self._dots.append('.')
+ self._stream.write(self._fstr % ''.join(self._dots))
+ self._stream.flush()
+
+
+import tempfile
+import os
+import time
+from os.path import exists
+
+class Execute:
+ """This is a deadlock save version of popen2 (no stdin), that returns
+ an object with errorlevel, out and err
+ """
+
+ def __init__(self, command):
+ outfile = tempfile.mktemp()
+ errfile = tempfile.mktemp()
+ self.status = os.system("( %s ) >%s 2>%s" %
+ (command, outfile, errfile)) >> 8
+ self.out = open(outfile,"r").read()
+ self.err = open(errfile,"r").read()
+ os.remove(outfile)
+ os.remove(errfile)
+
+def acquire_lock(lock_file, max_try=10, delay=10):
+ """acquire a lock represented by a file on the file system"""
+ count = 0
+ while max_try <= 0 or count < max_try:
+ if not exists(lock_file):
+ break
+ count += 1
+ time.sleep(delay)
+ else:
+ raise Exception('Unable to acquire %s' % lock_file)
+ stream = open(lock_file, 'w')
+ stream.write(str(os.getpid()))
+ stream.close()
+
+def release_lock(lock_file):
+ """release a lock represented by a file on the file system"""
+ os.remove(lock_file)
+
+
+## Deprecation utilities #########################
+
+from warnings import warn
+
+class deprecated(type):
+ """metaclass to print a warning on instantiation of a deprecated class"""
+
+ def __call__(cls, *args, **kwargs):
+ msg = getattr(cls, "__deprecation_warning__",
+ "%s is deprecated" % cls.__name__)
+ warn(msg, DeprecationWarning, stacklevel=2)
+ return type.__call__(cls, *args, **kwargs)
+
+
+def class_renamed(old_name, new_class, message=None):
+ """automatically creates a class which fires a DeprecationWarning
+ when instantiated.
+
+ >>> Set = class_renamed('Set', set, 'Set is now replaced by set')
+ >>> s = Set()
+ sample.py:57: DeprecationWarning: Set is now replaced by set
+ s = Set()
+ >>>
+ """
+ clsdict = {}
+ if message is not None:
+ clsdict['__deprecation_warning__'] = message
+ try:
+ # new-style class
+ return deprecated(old_name, (new_class,), clsdict)
+ except (NameError, TypeError):
+ # old-style class
+ class DeprecatedClass(new_class):
+ """FIXME: There might be a better way to handle old/new-style class
+ """
+ def __init__(self, *args, **kwargs):
+ warn(message, DeprecationWarning, stacklevel=2)
+ new_class.__init__(self, *args, **kwargs)
+ return DeprecatedClass
+
+
+def deprecated_function(new_func, message=None):
+ """creates a function which fires a DeprecationWarning when used
+
+ For example, if <bar> is deprecated in favour of <foo> :
+ >>> bar = deprecated_function(foo, 'bar is deprecated')
+ >>> bar()
+ sample.py:57: DeprecationWarning: bar is deprecated
+ bar()
+ >>>
+ """
+ if message is None:
+ message = "this function is deprecated, use %s instead" % (
+ new_func.func_name)
+ def deprecated(*args, **kwargs):
+ warn(message, DeprecationWarning, stacklevel=2)
+ return new_func(*args, **kwargs)
+ return deprecated
diff --git a/__pkginfo__.py b/__pkginfo__.py
new file mode 100644
index 0000000..a24ef43
--- /dev/null
+++ b/__pkginfo__.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2003-2006 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""logilab.common packaging information"""
+
+__revision__ = "$Id: __pkginfo__.py,v 1.58 2006-04-25 12:08:52 syt Exp $"
+
+modname = 'common'
+numversion = (0, 15, 1)
+version = '.'.join([str(num) for num in numversion])
+
+license = 'GPL'
+copyright = '''Copyright (c) 2003-2006 LOGILAB S.A. (Paris, FRANCE).
+http://www.logilab.fr/ -- mailto:contact@logilab.fr'''
+
+author = "Logilab"
+author_email = "devel@logilab.fr"
+
+short_desc = "useful miscellaneous modules used by Logilab projects"
+
+long_desc = """logilab-common is a collection of low-level Python packages and \
+modules,
+ designed to ease:
+ * handling command line options and configuration files
+ * writing interactive command line tools
+ * manipulation files and character strings
+ * interfacing to OmniORB
+ * generating of SQL queries
+ * running unit tests
+ * manipulating tree structures
+ * accessing RDBMS (currently postgreSQL and mysql)
+ * generating text and HTML reports
+ * logging"""
+
+
+web = "http://www.logilab.org/projects/%s" % modname
+ftp = "ftp://ftp.logilab.org/pub/%s" % modname
+mailinglist = "mailto://python-projects@lists.logilab.org"
+
+subpackage_of = 'logilab'
+subpackage_master = True
+
+from os.path import join
+include_dirs = [join('test', 'data')]
+pyversions = ['2.2', '2.3', '2.4']
+debian_maintainer = 'Alexandre Fayolle'
+debian_maintainer_email = 'afayolle@debian.org'
diff --git a/announce.txt b/announce.txt
new file mode 100644
index 0000000..27ebadb
--- /dev/null
+++ b/announce.txt
@@ -0,0 +1,23 @@
+What's new ?
+------------
+%CHANGELOG%
+
+
+What is %SOURCEPACKAGE% ?
+------------------------
+%LONG_DESC%
+
+
+Home page
+---------
+%WEB%
+
+Download
+--------
+%FTP%
+
+Mailing list
+------------
+%MAILINGLIST%
+
+%ADDITIONAL_DESCR% \ No newline at end of file
diff --git a/astutils.py b/astutils.py
new file mode 100644
index 0000000..0996f33
--- /dev/null
+++ b/astutils.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2003 Sylvain Thenault (thenault@nerim.net)
+# Copyright (c) 2003 Logilab
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""Some usefull functions to manipulate ast tuples
+"""
+
+__author__ = u"Sylvain Thenault"
+__revision__ = "$Id: astutils.py,v 1.11 2003-11-24 13:57:24 syt Exp $"
+
+import symbol
+import token
+from types import TupleType
+
+def debuild(ast_tuple):
+ """
+ reverse ast_tuple to string
+ """
+ if type(ast_tuple[1]) is TupleType:
+ result = ''
+ for child in ast_tuple[1:]:
+ result = '%s%s' % (result, debuild(child))
+ return result
+ else:
+ return ast_tuple[1]
+
+def clean(ast_tuple):
+ """
+ reverse ast tuple to a list of tokens
+ merge sequences (token.NAME, token.DOT, token.NAME)
+ """
+ result = []
+ last = None
+ for couple in _clean(ast_tuple):
+ if couple[0] == token.NAME and last == token.DOT:
+ result[-1][1] += couple[1]
+ elif couple[0] == token.DOT and last == token.NAME:
+ result[-1][1] += couple[1]
+ else:
+ result.append(couple)
+ last = couple[0]
+ return result
+
+def _clean(ast_tuple):
+ """ transform the ast into as list of tokens (i.e. final elements)
+ """
+ if type(ast_tuple[1]) is TupleType:
+ v = []
+ for c in ast_tuple[1:]:
+ v += _clean(c)
+ return v
+ else:
+ return [list(ast_tuple[:2])]
+
+def cvrtr(tuple):
+ """debug method returning an ast string in a readable fashion"""
+ if type(tuple) is TupleType:
+ try:
+ try:
+ txt = 'token.'+token.tok_name[tuple[0]]
+ except:
+ txt = 'symbol.'+symbol.sym_name[tuple[0]]
+ except:
+ txt = 'Unknown token/symbol'
+ return [txt] + map(cvrtr, tuple[1:])
+ else:
+ return tuple
+
+__all__ = ('debuild', 'clean', 'cvrtr')
diff --git a/bind.py b/bind.py
new file mode 100644
index 0000000..9f2b40f
--- /dev/null
+++ b/bind.py
@@ -0,0 +1,272 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+ This module provides a way to optimize globals in certain functions by binding
+ their names to values provided in a dictionnary
+"""
+
+__revision__ = '$Id: bind.py,v 1.8 2005-11-22 13:12:59 syt Exp $'
+
+# TODO: unit tests
+# * this module provide a function bind(func,vars) which replaces every
+# global variable 'm' by the value vars['m'] if such value exists in dict
+
+from dis import HAVE_ARGUMENT
+from new import code as make_code, function as make_function
+import inspect
+
+LOAD_GLOBAL = 116
+LOAD_CONST = 100
+EXTENDED_ARG = 143
+STORE_GLOBAL = 97
+
+def bind_code(co, globals):
+ """
+ Take a code object and a dictionnary and returns a new code object where
+ the opcodes LOAD_GLOBAL are replaced by LOAD_CONST whenever the global's
+ name appear in the dictionnary
+ """
+ consts = list(co.co_consts)
+ assigned = {}
+
+ code = co.co_code
+ new_code = ""
+ n = len(code)
+ i = 0
+ while i < n:
+ c = code[i]
+ op = ord(c)
+ i += 1
+ if op >= HAVE_ARGUMENT:
+ oparg = ord(code[i]) + ord(code[i+1]) * 256
+ i += 2
+ else:
+ oparg = None
+ if op == LOAD_GLOBAL:
+ name = co.co_names[oparg]
+ if globals.has_key(name):
+ k = assigned.get(name, None)
+ if k == None:
+ k = len(consts)
+ assigned[name] = len(consts)
+ consts.append(globals[name])
+ op = LOAD_CONST
+ oparg = k
+ new_code += chr(op)
+ if oparg is not None:
+ new_code += chr(oparg & 255)
+ new_code += chr( (oparg>>8) & 255 )
+
+ return make_code(co.co_argcount,
+ co.co_nlocals,
+ co.co_stacksize,
+ co.co_flags,
+ new_code,
+ tuple(consts),
+ co.co_names,
+ co.co_varnames,
+ co.co_filename,
+ co.co_name,
+ co.co_firstlineno,
+ co.co_lnotab )
+
+
+def bind(f, globals):
+ """Returns a new function whose code object has been
+ bound by bind_code()"""
+ newcode = bind_code(f.func_code, globals)
+ defaults = f.func_defaults or ()
+ return make_function(newcode, f.func_globals, f.func_name, defaults)
+
+if type(__builtins__) == dict:
+ builtins = __builtins__
+else:
+ builtins = __builtins__.__dict__
+
+bind_code_opt = bind(bind_code, builtins )
+bind_code_opt = bind(bind_code_opt, globals() )
+
+
+def optimize_module(m, global_consts):
+ if not inspect.ismodule(m):
+ raise TypeError
+ d = {}
+ for i in global_consts:
+ v = m.__dict__.get(i)
+ d[i] = v
+ builtins = m.__builtins__
+ for name, f in m.__dict__.items():
+ if inspect.isfunction(f):
+ f = bind(f, builtins)
+ if d:
+ f = bind(f, d)
+ m.__dict__[name] = f
+
+
+
+
+def analyze_code(co, globals, consts_dict, consts_list):
+ """Take a code object and a dictionnary and returns a
+ new code object where the opcodes LOAD_GLOBAL are replaced
+ by LOAD_CONST whenever the global's name appear in the
+ dictionnary"""
+ modified_globals = []
+ for c in co.co_consts:
+ if c not in consts_list:
+ consts_list.append(c)
+ modified = []
+ code = co.co_code
+ new_code = ""
+ n = len(code)
+ i = 0
+ extended_arg = 0
+ while i < n:
+ c = code[i]
+ op = ord(c)
+ i += 1
+ if op >= HAVE_ARGUMENT:
+ oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
+ extended_arg = 0
+ i += 2
+ else:
+ oparg = None
+ if op == EXTENDED_ARG:
+ extended_arg = oparg*65536L
+
+ if op == LOAD_GLOBAL:
+ name = co.co_names[oparg]
+ if globals.has_key(name):
+ k = consts_dict.get(name, None)
+ if k == None:
+ k = len(consts_list)
+ consts_dict[name] = k
+ consts_list.append(globals[name])
+ if op == STORE_GLOBAL:
+ name = co.co_names[oparg]
+ if globals.has_key(name):
+ modified_globals.append(name)
+ return modified_globals
+
+def rewrite_code(co, consts_dict, consts_tuple):
+ """Take a code object and a dictionnary and returns a
+ new code object where the opcodes LOAD_GLOBAL are replaced
+ by LOAD_CONST whenever the global's name appear in the
+ dictionnary"""
+ code = co.co_code
+ new_code = ""
+ n = len(code)
+ i = 0
+ consts_list = list(consts_tuple)
+ while i < n:
+ c = code[i]
+ op = ord(c)
+ i += 1
+ extended_arg = 0
+ if op >= HAVE_ARGUMENT:
+ oparg = ord(code[i]) + ord(code[i+1])*256+extended_arg
+ extended_arg = 0
+ i += 2
+ else:
+ oparg = None
+ if op == EXTENDED_ARG:
+ extended_arg = oparg*65536L
+ elif op == LOAD_GLOBAL:
+ name = co.co_names[oparg]
+ k = consts_dict.get(name)
+ if k is not None:
+ op = LOAD_CONST
+ oparg = k
+ elif op == LOAD_CONST:
+ val = co.co_consts[oparg]
+ oparg = consts_list.index(val)
+ new_code += chr(op)
+ if oparg is not None:
+ new_code += chr(oparg & 255)
+ new_code += chr( (oparg>>8) & 255 )
+
+ return make_code(co.co_argcount,
+ co.co_nlocals,
+ co.co_stacksize,
+ co.co_flags,
+ new_code,
+ consts_tuple,
+ co.co_names,
+ co.co_varnames,
+ co.co_filename,
+ co.co_name,
+ co.co_firstlineno,
+ co.co_lnotab )
+
+def optimize_module_2(m, globals_consts, bind_builtins=1):
+ if not inspect.ismodule(m):
+ raise TypeError
+ consts_dict = {}
+ consts_list = []
+ if type(globals_consts) == list or type(globals_consts) == tuple:
+ globals = {}
+ for i in globals_consts:
+ v = m.__dict__.get(i)
+ globals[i] = v
+ else:
+ globals = globals_consts
+ if bind_builtins:
+ for builtin_name, builtin_value in m.__builtins__.items():
+ # this way it is possible to redefine a builtin in globals_consts
+ globals.setdefault(builtin_name, builtin_value)
+ functions = {}
+ for name, f in m.__dict__.items():
+ if inspect.isfunction(f):
+ functions[name] = f
+ analyze_code(f.func_code, globals, consts_dict, consts_list)
+ consts_list = tuple(consts_list)
+ for name, f in functions.items():
+ newcode = rewrite_code(f.func_code, consts_dict, consts_list)
+ defaults = f.func_defaults or ()
+ m.__dict__[name] = make_function(newcode, f.func_globals, f.func_name,
+ defaults)
+
+
+def run_bench(n):
+ from time import time
+ t = time()
+ g = globals()
+ for i in range(n):
+ test = bind(bind_code, g)
+ t1 = time()-t
+ bind2 = bind(bind, {'bind_code':bind_code_opt})
+ t = time()
+ for i in range(n):
+ test=bind2(bind_code, g)
+ t2 = time()-t
+ print "1 regular version", t1
+ print "2 optimized version", t2
+ print "ratio (1-2)/1 : %f %%" % (100.*(t1-t2)/t1)
+
+
+def test_pystone():
+ from test import pystone
+ for _ in range(5):
+ pystone.main()
+ optimize_module(pystone, ('TRUE','FALSE','Proc0','Proc1','Proc2','Proc3',
+ 'Proc4','Proc5','Proc6','Proc7','Proc8','Func1',
+ ' Func2','Func3'))
+ optimize_module(pystone, builtins.keys())
+ for _ in range(5):
+ pystone.main()
+
+
+if __name__ == "__main__":
+ run_bench(1000)
diff --git a/cache.py b/cache.py
new file mode 100644
index 0000000..9e3c994
--- /dev/null
+++ b/cache.py
@@ -0,0 +1,97 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+ Cache module, with a least recently used algorithm for the management of the
+ deletion of entries.
+"""
+
+__revision__ = '$Id: cache.py,v 1.8 2003-10-31 14:18:31 syt Exp $'
+
+
+class Cache:
+ """ a dictionnary like cache
+
+ inv:
+ len(self._usage) <= self.size
+ len(self.data) <= self.size
+ """
+
+ def __init__(self, size=100):
+ self.data = {}
+ self.size = size
+ self._usage = []
+
+ def __repr__(self):
+ return repr(self.data)
+
+ def __len__(self):
+ return len(self.data)
+
+ def _update_usage(self, key):
+ # Special case : cache's size = 0 !
+ if self.size <= 0:
+ return
+
+ if not self._usage:
+ self._usage.append(key)
+
+ if self._usage[-1] != key:
+ try:
+ self._usage.remove(key)
+ except ValueError:
+ # we are inserting a new key
+ # check the size of the dictionnary
+ # and remove the oldest item in the cache
+ if self.size and len(self._usage) >= self.size:
+ del self.data[self._usage[0]]
+ del self._usage[0]
+ self._usage.append(key)
+
+
+ def __getitem__(self, key):
+ value = self.data[key]
+ self._update_usage(key)
+ return value
+
+ def __setitem__(self, key, item):
+ # Just make sure that size > 0 before inserting a new item in the cache
+ if self.size > 0:
+ self.data[key] = item
+ self._update_usage(key)
+
+ def __delitem__(self, key):
+ # If size <= 0, then we don't have anything to do
+ # XXX FIXME : Should we let the 'del' raise a KeyError ?
+ if self.size > 0:
+ del self.data[key]
+ self._usage.remove(key)
+
+ def clear(self):
+ self.data.clear()
+ self._usage = []
+
+ def keys(self):
+ return self.data.keys()
+
+ def items(self):
+ return self.data.items()
+
+ def values(self):
+ return self.data.values()
+
+ def has_key(self, key):
+ return self.data.has_key(key)
+
+
diff --git a/cli.py b/cli.py
new file mode 100644
index 0000000..a01de85
--- /dev/null
+++ b/cli.py
@@ -0,0 +1,189 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+
+ Command line interface helper classes.
+
+ It provides some default commands, a help system, a default readline
+ configuration with completion and persistent history
+"""
+
+__revision__ = "$Id: cli.py,v 1.16 2005-05-10 12:31:04 adim Exp $"
+
+
+import __builtin__
+if not hasattr(__builtin__, '_'):
+ __builtin__._ = str
+
+
+def init_readline(complete_method, histfile=None):
+ """init the readline library if available"""
+ try:
+ import readline
+ readline.parse_and_bind("tab: complete")
+ readline.set_completer(complete_method)
+ string = readline.get_completer_delims().replace(':', '')
+ readline.set_completer_delims(string)
+ if histfile is not None:
+ try:
+ readline.read_history_file(histfile)
+ except IOError:
+ pass
+ import atexit
+ atexit.register(readline.write_history_file, histfile)
+ except:
+ print 'readline si not available :-('
+
+
+class Completer :
+ """readline completer"""
+
+ def __init__(self, commands):
+ self.list = commands
+
+ def complete(self, text, state):
+ """hook called by readline when <tab> is pressed"""
+ n = len(text)
+ matches = []
+ for cmd in self.list :
+ if cmd[:n] == text :
+ matches.append(cmd)
+ try:
+ return matches[state]
+ except IndexError:
+ return None
+
+
+class CLIHelper:
+ """ an abstract command line interface client which recognize commands
+ and provide an help system
+ """
+
+ CMD_MAP = {'help' : _("Others"),
+ 'quit' : _("Others"),
+ }
+ CMD_PREFIX = ''
+
+ def __init__(self, histfile=None) :
+ self._topics = {}
+ self.commands = None
+ self._completer = Completer(self._register_commands())
+ init_readline(self._completer.complete, histfile)
+
+ def run(self):
+ """loop on user input, exit on EOF"""
+ while 1:
+ try:
+ line = raw_input('>>> ')
+ except EOFError:
+ print
+ break
+ s_line = line.strip()
+ if not s_line:
+ continue
+ args = s_line.split()
+ if self.commands.has_key(args[0]):
+ try:
+ cmd = 'do_%s' % self.commands[args[0]]
+ getattr(self, cmd)(*args[1:])
+ except EOFError:
+ break
+ except:
+ import traceback
+ traceback.print_exc()
+ else:
+ try:
+ self.handle_line(s_line)
+ except:
+ import traceback
+ traceback.print_exc()
+
+ def handle_line(self, stripped_line):
+ """method to overload in the concrete class
+
+ should handle lines wich are not command
+ """
+ raise NotImplementedError()
+
+
+ # private methods #########################################################
+
+ def _register_commands(self):
+ """ register available commands method and return the list of
+ commands name
+ """
+ self.commands = {}
+ self._command_help = {}
+ commands = [attr[3:] for attr in dir(self) if attr[:3] == 'do_']
+ for command in commands:
+ topic = self.CMD_MAP[command]
+ help_method = getattr(self, 'help_do_%s' % command)
+ self._topics.setdefault(topic, []).append(help_method)
+ self.commands[self.CMD_PREFIX + command] = command
+ self._command_help[command] = help_method
+ return self.commands.keys()
+
+ def _print_help(self, cmd, syntax, explanation):
+ print _('Command %s') % cmd
+ print _('Syntax: %s') % syntax
+ print '\t', explanation
+ print
+
+
+ # predefined commands #####################################################
+
+ def do_help(self, command=None) :
+ """base input of the help system"""
+ if self._command_help.has_key(command):
+ self._print_help(*self._command_help[command])
+ elif command is None or not self._topics.has_key(command):
+ print _("Use help <topic> or help <command>.")
+ print _("Available topics are:")
+ topics = self._topics.keys()
+ topics.sort()
+ for topic in topics:
+ print '\t', topic
+ print
+ print _("Available commands are:")
+ commands = self.commands.keys()
+ commands.sort()
+ for command in commands:
+ print '\t', command[len(self.CMD_PREFIX):]
+
+ else:
+ print _('Available commands about %s:') % command
+ print
+ for command_help_method in self._topics[command]:
+ try:
+ if callable(command_help_method):
+ self._print_help(*command_help_method())
+ else:
+ self._print_help(*command_help_method)
+ except:
+ import traceback
+ traceback.print_exc()
+ print 'ERROR in help method %s'% (
+ command_help_method.func_name)
+
+ help_do_help = ("help", "help [topic|command]",
+ _("print help message for the given topic/command or \
+available topics when no argument"))
+
+ def do_quit(self):
+ """quit the CLI"""
+ raise EOFError()
+
+ def help_do_quit(self):
+ return ("quit", "quit", _("quit the application"))
diff --git a/compat.py b/compat.py
new file mode 100644
index 0000000..186b6ed
--- /dev/null
+++ b/compat.py
@@ -0,0 +1,145 @@
+# pylint: disable-msg=E0601,W0622,W0611
+#
+# Copyright (c) 2004-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""some wrapper around tools introduced into python 2.3, making them available
+in python 2.2
+"""
+from __future__ import generators
+
+__revision__ = '$Id: compat.py,v 1.13 2006-01-03 15:31:15 syt Exp $'
+
+from logilab.common import class_renamed
+from warnings import warn
+
+try:
+ set = set
+except NameError:
+ try:
+ from sets import Set as set
+ except ImportError:
+ class set:
+ def __init__(self, values=()):
+ self._data = {}
+ warn("This implementation of Set is not complete !",
+ stacklevel=2)
+ for v in values:
+ self._data[v] = 1
+
+ def add(self, value):
+ self._data[value] = 1
+
+ def remove(self, element):
+ del self._data[element]
+
+ def pop(self):
+ return self._data.popitem()[0]
+
+ def __or__(self, other):
+ result = set(self._data.keys())
+ for val in other:
+ result.add(val)
+ return result
+ __add__ = __or__
+
+ def __and__(self, other):
+ result = set()
+ for val in other:
+ if val in self._data:
+ result.add(val)
+ return result
+
+ def __sub__(self, other):
+ result = set(self._data.keys())
+ for val in other:
+ if val in self._data:
+ result.remove(val)
+ return result
+
+ def __cmp__(self, other):
+ keys = self._data.keys()
+ okeys = other._data.keys()
+ keys.sort()
+ okeys.sort()
+ return cmp(keys, okeys)
+
+ def __len__(self):
+ return len(self._data)
+
+ def __repr__(self):
+ elements = self._data.keys()
+ return 'lcc.set(%r)' % (elements)
+ __str__ = __repr__
+
+ def __iter__(self):
+ return iter(self._data)
+
+Set = class_renamed('Set', set, 'logilab.common.compat.Set is deprecated, '
+ 'use logilab.common.compat.set instead')
+
+try:
+ from itertools import izip, chain, imap
+except ImportError:
+ # from itertools documentation ###
+ def izip(*iterables):
+ iterables = map(iter, iterables)
+ while iterables:
+ result = [i.next() for i in iterables]
+ yield tuple(result)
+
+ def chain(*iterables):
+ for it in iterables:
+ for element in it:
+ yield element
+
+ def imap(function, *iterables):
+ iterables = map(iter, iterables)
+ while True:
+ args = [i.next() for i in iterables]
+ if function is None:
+ yield tuple(args)
+ else:
+ yield function(*args)
+try:
+ sum = sum
+ enumerate = enumerate
+except NameError:
+ # define the sum and enumerate functions (builtins introduced in py 2.3)
+ import operator
+ def sum(seq, start=0):
+ """Returns the sum of all elements in the sequence"""
+ return reduce(operator.add, seq, start)
+
+ def enumerate(iterable):
+ """emulates the python2.3 enumerate() function"""
+ i = 0
+ for val in iterable:
+ yield i, val
+ i += 1
+ #return zip(range(len(iterable)), iterable)
+try:
+ sorted = sorted
+ reversed = reversed
+except NameError:
+ def sorted(l):
+ l2 = list(l)
+ l2.sort()
+ return l2
+
+ def reversed(l):
+ l2 = list(l)
+ l2.reverse()
+ return l2
diff --git a/configuration.py b/configuration.py
new file mode 100644
index 0000000..f712c81
--- /dev/null
+++ b/configuration.py
@@ -0,0 +1,578 @@
+# Copyright (c) 2003-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""Some classes used to handle advanced configuration in simple to
+complex applications.
+
+It's able to load the configuration from a file and or command line
+options, to generate a sample configuration file or to display program's
+usage. It basically fill the gap between optik/optparse and ConfigParser,
+with some additional data types (available as standalone optik extension
+in the `optik_ext` module)
+
+
+Quick start: simplest usage
+```````````````````````````````
+
+import sys
+from logilab.common.configuration import Configuration
+
+options = [('dothis', {'type':'yn', 'default': True, 'metavar': '<y or n>'}),
+ ('value', {'type': 'string', 'metavar': '<string>'}),
+ ('multiple', {'type': 'csv', 'default': ('yop',),
+ 'metavar': '<comma separated values>',
+ 'help': 'you can also document the option'}),
+ ('number', {'type': 'int', 'default':2, 'metavar':'<int>'}),
+ ]
+config = Configuration(options=options, name='My config')
+print config['dothis']
+print config['value']
+print config['multiple']
+print config['number']
+
+print config.help()
+
+f = open('myconfig.ini', 'w')
+f.write('''[MY CONFIG]
+number = 3
+dothis = no
+multiple = 1,2,3
+''')
+f.close()
+config.load_file_configuration('myconfig.ini')
+print config['dothis']
+print config['value']
+print config['multiple']
+print config['number']
+
+sys.argv = ['mon prog', '--value', 'bacon', '--multiple', '4,5,6',
+ 'nonoptionargument']
+print config.load_command_line_configuration()
+print config['value']
+
+config.generate_config()
+
+
+:version: $Revision: 1.40 $
+:author: Logilab
+:copyright: 2003-2005 LOGILAB S.A. (Paris, FRANCE)
+:contact: http://www.logilab.fr/ -- mailto:python-projects@logilab.org
+"""
+
+__revision__ = "$Id: configuration.py,v 1.40 2005-11-22 13:13:00 syt Exp $"
+__docformat__ = "restructuredtext en"
+__all__ = ('OptionsManagerMixIn', 'OptionsProviderMixIn',
+ 'ConfigurationMixIn', 'Configuration',
+ 'OptionsManager2ConfigurationAdapter')
+
+
+import sys
+import re
+from os.path import exists
+from copy import copy
+from ConfigParser import ConfigParser, NoOptionError, NoSectionError
+
+from logilab.common.optik_ext import OptionParser, OptionGroup, Values, \
+ OptionValueError, OptionError, check_yn, check_csv, check_file, \
+ check_color, check_named, generate_manpage
+from logilab.common.textutils import normalize_text, unquote
+
+class UnsupportedAction(Exception):
+ """raised by set_option when it doesn't know what to do for an action"""
+
+def choice_validator(opt_dict, name, value):
+ """validate and return a converted value for option of type 'choice'
+ """
+ if not value in opt_dict['choices']:
+ msg = "option %s: invalid value: %r, should be in %s"
+ raise OptionValueError(msg % (name, value, opt_dict['choices']))
+ return value
+
+def multiple_choice_validator(opt_dict, name, value):
+ """validate and return a converted value for option of type 'choice'
+ """
+ choices = opt_dict['choices']
+ values = check_csv(None, name, value)
+ for value in values:
+ if not value in choices:
+ msg = "option %s: invalid value: %r, should be in %s"
+ raise OptionValueError(msg % (name, value, choices))
+ return values
+
+def csv_validator(opt_dict, name, value):
+ """validate and return a converted value for option of type 'csv'
+ """
+ return check_csv(None, name, value)
+
+def yn_validator(opt_dict, name, value):
+ """validate and return a converted value for option of type 'yn'
+ """
+ return check_yn(None, name, value)
+
+def named_validator(opt_dict, name, value):
+ """validate and return a converted value for option of type 'named'
+ """
+ return check_named(None, name, value)
+
+def file_validator(opt_dict, name, value):
+ """validate and return a filepath for option of type 'file'"""
+ return check_file(None, name, value)
+
+def color_validator(opt_dict, name, value):
+ """validate and return a filepath for option of type 'file'"""
+ return check_color(None, name, value)
+
+
+VALIDATORS = {'string' : unquote,
+ 'int' : int,
+ 'float': float,
+ 'file': file_validator,
+ 'font': unquote,
+ 'color': color_validator,
+ 'regexp': re.compile,
+ 'csv': csv_validator,
+ 'yn': yn_validator,
+ 'bool': yn_validator,
+ 'named': named_validator,
+ 'choice': choice_validator,
+ 'multiple_choice': multiple_choice_validator,
+ }
+
+def convert(value, opt_dict, name=''):
+ """return a validated value for an option according to its type
+
+ optional argument name is only used for error message formatting
+ """
+ try:
+ _type = opt_dict['type']
+ except KeyError:
+ # FIXME
+ return value
+ if not VALIDATORS.has_key(_type):
+ raise Exception('Unsupported type "%s"' % _type)
+ try:
+ return VALIDATORS[_type](opt_dict, name, value)
+ except TypeError:
+ try:
+ return VALIDATORS[_type](value)
+ except OptionValueError:
+ raise
+ except:
+ raise OptionValueError('%s value (%r) should be of type %s' %
+ (name, value, _type))
+
+def comment(string):
+ """return string as a comment"""
+ lines = [line.strip() for line in string.splitlines()]
+ return '# ' + '\n# '.join(lines)
+
+## def split_value(value):
+## if len(value) > 79:
+## sp_index = len(value)
+## while
+
+def format_section(stream, section, options, doc=None):
+ """format an options section using the INI format"""
+ if doc:
+ print >> stream, comment(doc)
+ print >> stream, '[%s]' % section.upper()
+ section = {}
+ for opt_name, value, opt_dict in options:
+ if type(value) in (type(()), type([])):
+ value = ','.join(value)
+ elif hasattr(value, 'match'):
+ # compiled regexp
+ value = value.pattern
+ elif opt_dict.get('type') == 'yn':
+ value = value and 'yes' or 'no'
+ elif isinstance(value, (str, unicode)) and value.isspace():
+ value = "'%s'" % value
+ #else:
+ # value = repr(value)
+ help_msg = opt_dict.get('help')
+ if help_msg:
+ print >> stream
+ print >> stream, normalize_text(help_msg, line_len=79, indent='# ')
+ else:
+ print >> stream
+ print >> stream, '%s=%s' % (opt_name, str(value).strip())
+
+
+class OptionsManagerMixIn:
+ """MixIn to handle a configuration from both a configuration file and
+ command line options
+ """
+
+ def __init__(self, usage, config_file=None, version=None, quiet=0):
+ self.config_file = config_file
+ # configuration file parser
+ self._config_parser = ConfigParser()
+ # command line parser
+ self._optik_parser = OptionParser(usage=usage, version=version)
+ # list of registered options providers
+ self.options_providers = []
+ # dictionary assocating option name to checker
+ self._all_options = {}
+ self._short_options = {}
+ self._nocallback_options = {}
+ # verbosity
+ self.quiet = quiet
+
+ def register_options_provider(self, provider, own_group=1):
+ """register an options provider"""
+ assert provider.priority <= 0, "provider's priority can't be >= 0"
+ for i in range(len(self.options_providers)):
+ if provider.priority > self.options_providers[i].priority:
+ self.options_providers.insert(i, provider)
+ break
+ else:
+ self.options_providers.append(provider)
+ non_group_spec_options = [option for option in provider.options
+ if not option[1].has_key('group')]
+ groups = getattr(provider, 'option_groups', None)
+ if own_group:
+ self.add_option_group(provider.name.upper(), provider.__doc__,
+ non_group_spec_options, provider)
+ else:
+ for opt_name, opt_dict in non_group_spec_options:
+ args, opt_dict = self.optik_option(provider, opt_name, opt_dict)
+ self._optik_parser.add_option(*args, **opt_dict)
+ self._all_options[opt_name] = provider
+ if groups:
+ for group_name, doc in groups:
+ self.add_option_group(
+ group_name, doc,
+ [option for option in provider.options
+ if option[1].get('group') == group_name],
+ provider)
+
+ def add_option_group(self, group_name, doc, options, provider):
+ """add an option group including the listed options
+ """
+ # add section to the config file
+ self._config_parser.add_section(group_name)
+ # add option group to the command line parser
+ group = OptionGroup(self._optik_parser,
+ title=group_name.capitalize(),
+ description=doc)
+ self._optik_parser.add_option_group(group)
+ # add provider's specific options
+ for opt_name, opt_dict in options:
+ args, opt_dict = self.optik_option(provider, opt_name, opt_dict)
+ group.add_option(*args, **opt_dict)
+ self._all_options[opt_name] = provider
+
+ def optik_option(self, provider, opt_name, opt_dict):
+ """get our personal option definition and return a suitable form for
+ use with optik/optparse
+ """
+ opt_dict = copy(opt_dict)
+ if opt_dict.has_key('action'):
+ self._nocallback_options[provider] = opt_name
+ else:
+ opt_dict['action'] = 'callback'
+ opt_dict['callback'] = self.cb_set_provider_option
+ for specific in ('default', 'group'):
+ if opt_dict.has_key(specific):
+ del opt_dict[specific]
+ args = ['--' + opt_name]
+ if opt_dict.has_key('short'):
+ self._short_options[opt_dict['short']] = opt_name
+ args.append('-' + opt_dict['short'])
+ del opt_dict['short']
+ return args, opt_dict
+
+ def cb_set_provider_option(self, option, opt_name, value, parser):
+ """optik callback for option setting"""
+ # remove --
+ if opt_name.startswith('--'):
+ opt_name = opt_name[2:]
+ else: # short version
+ opt_name = self._short_options[opt_name[1:]]
+ # trick since we can't set action='store_true' on options
+ if value is None:
+ value = 1
+ self.global_set_option(opt_name, value)
+
+ def global_set_option(self, opt_name, value):
+ """set option on the correct option provider"""
+ self._all_options[opt_name].set_option(opt_name, value)
+
+ def generate_config(self, stream=None):
+ """write a configuration file according to the current configuration
+ into the given stream or stdout
+ """
+ stream = stream or sys.stdout
+ printed = 0
+ for provider in self.options_providers:
+ default_options = []
+ sections = {}
+ for opt_name, opt_dict in provider.options:
+ if opt_dict.get('type') is None:
+ continue
+ attr = provider.option_name(opt_name)
+ try:
+ value = getattr(provider.config, attr)
+ except AttributeError:
+ continue
+ if value is None:
+ continue
+ if opt_dict.get('group'):
+ sections.setdefault(opt_dict['group'], []).append(
+ (opt_name, value, opt_dict))
+ else:
+ default_options.append((opt_name, value, opt_dict))
+ if default_options:
+ if printed:
+ print >> stream, '\n'
+ format_section(stream, provider.name, default_options,
+ provider.__doc__)
+ printed = 1
+ for section, options in sections.items():
+ if printed:
+ print >> stream, '\n'
+ format_section(stream, section, options)
+ printed = 1
+
+ def generate_manpage(self, pkginfo, section=1, stream=None):
+ """write a man page for the current configuration into the given
+ stream or stdout
+ """
+ generate_manpage(self._optik_parser, pkginfo,
+ section, stream=stream or sys.stdout)
+
+ # initialization methods ##################################################
+
+ def load_file_configuration(self, config_file=None):
+ """load the configuration from file
+ """
+ self.read_config_file(config_file)
+ self.load_config_file()
+
+ def read_config_file(self, config_file=None):
+ """read the configuration file but do not load it (ie dispatching
+ values to each options provider)
+ """
+ if config_file is None:
+ config_file = self.config_file
+ if config_file and exists(config_file):
+ self._config_parser.read([config_file])
+ elif not self.quiet:
+ msg = 'No config file found, using default configuration'
+ print >> sys.stderr, msg
+ return
+
+ def load_config_file(self):
+ """dispatch values previously read from a configuration file to each
+ options provider)
+ """
+ parser = self._config_parser
+ for provider in self.options_providers:
+ default_section = provider.name
+ for opt_name, opt_dict in provider.options:
+ section = opt_dict.get('group', default_section)
+ section = section.upper()
+ try:
+ value = parser.get(section, opt_name)
+ provider.set_option(opt_name, value, opt_dict=opt_dict)
+ except (NoSectionError, NoOptionError), ex:
+ continue
+
+ def load_configuration(self, **kwargs):
+ """override configuration according to given parameters
+ """
+ for opt_name, opt_value in kwargs.items():
+ opt_name = opt_name.replace('_', '-')
+ provider = self._all_options[opt_name]
+ provider.set_option(opt_name, opt_value)
+
+
+ def load_command_line_configuration(self, args=None):
+ """override configuration according to command line parameters
+
+ return additional arguments
+ """
+ if args is None:
+ args = sys.argv[1:]
+ else:
+ args = list(args)
+ (options, args) = self._optik_parser.parse_args(args=args)
+ for provider in self._nocallback_options.keys():
+ config = provider.config
+ for attr in config.__dict__.keys():
+ value = getattr(options, attr, None)
+ if value is None:
+ continue
+ setattr(config, attr, value)
+ return args
+
+
+ # help methods ############################################################
+
+ def add_help_section(self, title, description):
+ """add a dummy option section for help purpose """
+ group = OptionGroup(self._optik_parser,
+ title=title.capitalize(),
+ description=description)
+ self._optik_parser.add_option_group(group)
+
+
+ def help(self):
+ """return the usage string for available options """
+ return self._optik_parser.format_help()
+
+
+class OptionsProviderMixIn:
+ """Mixin to provide options to an OptionsManager
+ """
+
+ # those attributes should be overriden
+ priority = -1
+ name = 'default'
+ options = ()
+
+ def __init__(self):
+ self.config = Values()
+ for option in self.options:
+ try:
+ opt_name, opt_dict = option
+ except ValueError:
+ raise Exception('Bad option: %r' % option)
+ action = opt_dict.get('action')
+ if action != 'callback':
+ # callback action have no default
+ self.set_option(opt_name, opt_dict.get('default'),
+ action, opt_dict)
+
+ def option_name(self, opt_name, opt_dict=None):
+ """get the config attribute corresponding to opt_name
+ """
+ if opt_dict is None:
+ opt_dict = self.get_option_def(opt_name)
+ return opt_dict.get('dest', opt_name.replace('-', '_'))
+
+ def set_option(self, opt_name, value, action=None, opt_dict=None):
+ """method called to set an option (registered in the options list)
+ """
+ if opt_dict is None:
+ opt_dict = self.get_option_def(opt_name)
+ if value is not None:
+ value = convert(value, opt_dict, opt_name)
+ if action is None:
+ action = opt_dict.get('action', 'store')
+ if action == 'store':
+ setattr(self.config, self.option_name(opt_name), value)
+ elif action in ('store_true', 'count'):
+ setattr(self.config, self.option_name(opt_name), 0)
+ elif action == 'store_false':
+ setattr(self.config, self.option_name(opt_name), 1)
+ elif action == 'append':
+ opt_name = self.option_name(opt_name)
+ _list = getattr(self.config, opt_name, None)
+ if _list is None:
+ if type(value) in (type(()), type([])):
+ _list = value
+ elif value is not None:
+ _list = []
+ _list.append(value)
+ setattr(self.config, opt_name, _list)
+ elif type(_list) is type(()):
+ setattr(self.config, opt_name, _list + (value,))
+ else:
+ _list.append(value)
+ else:
+ raise UnsupportedAction(action)
+
+ def get_option_def(self, opt_name):
+ """return the dictionary defining an option given it's name"""
+ for opt in self.options:
+ if opt[0] == opt_name:
+ return opt[1]
+ raise OptionError('no such option in section %r' % self.name, opt_name)
+
+
+class ConfigurationMixIn(OptionsManagerMixIn, OptionsProviderMixIn):
+ """basic mixin for simple configurations which don't need the
+ manager / providers model
+ """
+ def __init__(self, *args, **kwargs):
+ if not args:
+ kwargs.setdefault('usage', '')
+ kwargs.setdefault('quiet', 1)
+ OptionsManagerMixIn.__init__(self, *args, **kwargs)
+ OptionsProviderMixIn.__init__(self)
+ self.register_options_provider(self, own_group=0)
+
+
+class Configuration(ConfigurationMixIn):
+ """class for simple configurations which don't need the
+ manager / providers model and prefer delegation to inheritance
+
+ configuration values are accessible through a dict like interface
+ """
+
+ def __init__(self, config_file=None, options=None, name=None,
+ usage=None, doc=None):
+ if options is not None:
+ self.options = options
+ if name is not None:
+ self.name = name
+ if doc is not None:
+ self.__doc__ = doc
+ ConfigurationMixIn.__init__(self, config_file=config_file, usage=usage)
+
+ def __getitem__(self, key):
+ try:
+ return getattr(self.config, self.option_name(key))
+ except (OptionValueError, AttributeError):
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ self.set_option(self.option_name(key), value)
+
+ def get(self, key, default=None):
+ try:
+ return getattr(self.config, self.option_name(key))
+ except (OptionError, AttributeError):
+ return default
+
+
+class OptionsManager2ConfigurationAdapter:
+ """Adapt an option manager to behave like a
+ `logilab.common.configuration.Configuration` instance
+ """
+ def __init__(self, provider):
+ self.config = provider
+
+ def __getattr__(self, key):
+ return getattr(self.config, key)
+
+ def __getitem__(self, key):
+ provider = self.config._all_options[key]
+ try:
+ return getattr(provider.config, provider.option_name(key))
+ except AttributeError:
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ self.config.global_set_option(self.config.option_name(key), value)
+
+ def get(self, key, default=None):
+ provider = self.config._all_options[key]
+ try:
+ return getattr(provider.config, provider.option_name(key))
+ except AttributeError:
+ return default
+
diff --git a/corbautils.py b/corbautils.py
new file mode 100644
index 0000000..8e9eae5
--- /dev/null
+++ b/corbautils.py
@@ -0,0 +1,96 @@
+"""A set of utility function to ease the use of OmniORBpy."""
+
+__revision__ = '$Id: corbautils.py,v 1.2 2005-11-22 13:13:00 syt Exp $'
+
+from omniORB import CORBA, PortableServer
+import CosNaming
+
+orb = None
+
+def get_orb():
+ """
+ returns a reference to the ORB.
+ The first call to the method initialized the ORB
+ This method is mainly used internally in the module.
+ """
+
+ global orb
+ if orb is None:
+ import sys
+ orb = CORBA.ORB_init(sys.argv, CORBA.ORB_ID)
+ return orb
+
+def get_root_context():
+ """
+ returns a reference to the NameService object.
+ This method is mainly used internally in the module.
+ """
+
+ orb = get_orb()
+ nss = orb.resolve_initial_references("NameService")
+ rootContext = nss._narrow(CosNaming.NamingContext)
+ assert rootContext is not None,"Failed to narrow root naming context"
+ return rootContext
+
+def register_object_name(object, namepath):
+ """
+ Registers a object in the NamingService.
+ The name path is a list of 2-uples (id,kind) giving the path.
+
+ For instance if the path of an object is [('foo',''),('bar','')],
+ it is possible to get a reference to the object using the URL
+ 'corbaname::hostname#foo/bar'.
+ [('logilab','rootmodule'),('chatbot','application'),('chatter','server')]
+ is mapped to
+ 'corbaname::hostname#logilab.rootmodule/chatbot.application/chatter.server'
+
+ The get_object_reference() function can be used to resolve such a URL.
+ """
+ context = get_root_context()
+ for id, kind in namepath[:-1]:
+ name = [CosNaming.NameComponent(id, kind)]
+ try:
+ context = context.bind_new_context(name)
+ except CosNaming.NamingContext.AlreadyBound, ex:
+ context = context.resolve(name)._narrow(CosNaming.NamingContext)
+ assert context is not None, \
+ 'test context exists but is not a NamingContext'
+
+ id,kind = namepath[-1]
+ name = [CosNaming.NameComponent(id, kind)]
+ try:
+ context.bind(name, object._this())
+ except CosNaming.NamingContext.AlreadyBound, ex:
+ context.rebind(name, object._this())
+
+def activate_POA():
+ """
+ This methods activates the Portable Object Adapter.
+ You need to call it to enable the reception of messages in your code,
+ on both the client and the server.
+ """
+ orb = get_orb()
+ poa = orb.resolve_initial_references('RootPOA')
+ poaManager = poa._get_the_POAManager()
+ poaManager.activate()
+
+def run_orb():
+ """
+ Enters the ORB mainloop on the server.
+ You should not call this method on the client.
+ """
+ get_orb().run()
+
+def get_object_reference(url):
+ """
+ Resolves a corbaname URL to an object proxy.
+ See register_object_name() for examples URLs
+ """
+ return get_orb().string_to_object(url)
+
+def get_object_string(host, namepath):
+ """given an host name and a name path as described in register_object_name,
+ return a corba string identifier
+ """
+ strname = '/'.join(['.'.join(path_elt) for path_elt in namepath])
+ return 'corbaname::%s#%s' % (host, strname)
diff --git a/daemon.py b/daemon.py
new file mode 100644
index 0000000..9d3d1cb
--- /dev/null
+++ b/daemon.py
@@ -0,0 +1,144 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+a daemon mix-in class
+"""
+
+__revision__ = '$Id: daemon.py,v 1.10 2005-11-22 13:13:01 syt Exp $'
+
+import os, signal, sys, time
+from logilab.common.logger import make_logger, LOG_ALERT, LOG_NOTICE
+
+class DaemonMixIn:
+ """ mixin to make a daemon from watchers/queriers
+ """
+
+ def __init__(self, configmod) :
+ self.delay = configmod.DELAY
+ self.name = str(self.__class__).split('.')[-1]
+ self._pid_file = os.path.join('/tmp', '%s.pid'%self.name)
+ if os.path.exists(self._pid_file):
+ raise Exception('''Another instance of %s must be running.
+If it i not the case, remove the file %s''' % (self.name, self._pid_file))
+ self._alive = 1
+ self._sleeping = 0
+ treshold = configmod.LOG_TRESHOLD
+ if configmod.NODETACH:
+ configmod.log = make_logger('print', treshold, self.name).log
+ else:
+ configmod.log = make_logger('syslog', treshold, self.name).log
+ self.config = configmod
+
+ def _daemonize(self):
+ if not self.config.NODETACH:
+ # fork so the parent can exist
+ if (os.fork()):
+ return -1
+ # deconnect from tty and create a new session
+ os.setsid()
+ # fork again so the parent, (the session group leader), can exit.
+ # as a non-session group leader, we can never regain a controlling
+ # terminal.
+ if (os.fork()):
+ return -1
+ # move to the root to avoit mount pb
+ os.chdir('/')
+ # set paranoid umask
+ os.umask(077)
+ # write pid in a file
+ f = open(self._pid_file, 'w')
+ f.write(str(os.getpid()))
+ f.close()
+ # close standard descriptors
+ sys.stdin.close()
+ sys.stdout.close()
+ sys.stderr.close()
+ # put signal handler
+ signal.signal(signal.SIGTERM, self.signal_handler)
+ signal.signal(signal.SIGHUP, self.signal_handler)
+
+
+ def run(self):
+ """ optionaly go in daemon mode and
+ do what concrete classe has to do and pauses for delay between runs
+ If self.delay is negative, do a pause before starting
+ """
+ if self._daemonize() == -1:
+ return
+ self.config.log(LOG_NOTICE, '%s instance started' % self.name)
+ if self.delay < 0:
+ self.delay = -self.delay
+ time.sleep(self.delay)
+ while 1:
+ try:
+ self._run()
+ except Exception, e:
+ # display for info, sleep, and hope the problem will be solved
+ # later.
+ self.config.log(LOG_ALERT, 'Internal error: %s'%(e))
+ if not self._alive:
+ break
+ try:
+ self._sleeping = 1
+ time.sleep(self.delay)
+ self._sleeping = 0
+ except SystemExit:
+ break
+ self.config.log(LOG_NOTICE, '%s instance exited'%self.name)
+ # remove pid file
+ os.remove(self._pid_file)
+
+ def signal_handler(self, sig_num, stack_frame):
+ if sig_num == signal.SIGTERM:
+ if self._sleeping:
+ # we are sleeping so we can exit without fear
+ self.config.log(LOG_NOTICE, 'exit on SIGTERM')
+ sys.exit(0)
+ else:
+ self.config.log(LOG_NOTICE, 'exit on SIGTERM (on next turn)')
+ self._alive = 0
+ elif sig_num == signal.SIGHUP:
+ self.config.log(LOG_NOTICE, 'reloading configuration on SIGHUP')
+ reload(self.config)
+
+ def _run(self):
+ """should be overidden in the mixed class"""
+ raise NotImplementedError()
+
+## command line utilities ######################################################
+
+L_OPTIONS = ["help", "log=", "delay=", 'no-detach']
+S_OPTIONS = 'hl:d:n'
+
+def print_help(modconfig):
+ print """ --help or -h
+ displays this message
+ --log <log_level>
+ log treshold (7 record everything, 0 record only emergency.)
+ Defaults to %s
+ --delay <delay>
+ the number of seconds between two runs.
+ Defaults to %s""" % (modconfig.LOG_TRESHOLD, modconfig.DELAY)
+
+def handle_option(modconfig, opt_name, opt_value, help_meth):
+ if opt_name in ('-h','--help'):
+ help_meth()
+ sys.exit(0)
+ elif opt_name in ('-l','--log'):
+ modconfig.LOG_TRESHOLD = int(opt_value)
+ elif opt_name in ('-d', '--delay'):
+ modconfig.DELAY = int(opt_value)
+ elif opt_name in ('-n', '--no-detach'):
+ modconfig.NODETACH = 1
diff --git a/db.py b/db.py
new file mode 100644
index 0000000..6961ddd
--- /dev/null
+++ b/db.py
@@ -0,0 +1,530 @@
+# Copyright (c) 2002-2006 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""Helpers to get a DBAPI2 compliant database connection.
+"""
+
+__revision__ = "$Id: db.py,v 1.35 2006-04-25 12:02:09 syt Exp $"
+
+import sys
+import re
+
+__all__ = ['get_dbapi_compliant_module',
+ 'get_connection', 'set_prefered_driver',
+ 'PyConnection', 'PyCursor',
+ 'UnknownDriver', 'NoAdapterFound',
+ ]
+
+class UnknownDriver(Exception):
+ """raised when a unknown driver is given to get connexion"""
+
+class NoAdapterFound(Exception):
+ """Raised when no Adpater to DBAPI was found"""
+ def __init__(self, obj, objname=None, protocol='DBAPI'):
+ if objname is None:
+ objname = obj.__name__
+ Exception.__init__(self, "Could not adapt %s to protocol %s" %
+ (objname, protocol))
+ self.adapted_obj = obj
+ self.objname = objname
+ self._protocol = protocol
+
+
+def _import_driver_module(driver, drivers, imported_elements=None, quiet=True):
+ """Imports the first module found in 'drivers' for 'driver'
+
+ :rtype: tuple
+ :returns: the tuple module_object, module_name where module_object
+ is the dbapi module, and modname the module's name
+ """
+ if not driver in drivers:
+ raise UnknownDriver(driver)
+ imported_elements = imported_elements or []
+ for modname in drivers[driver]:
+ try:
+ if not quiet:
+ print >> sys.stderr, 'Trying %s' % modname
+ module = __import__(modname, globals(), locals(), imported_elements)
+ break
+ except ImportError:
+ if not quiet:
+ print >> sys.stderr, '%s is not available' % modname
+ continue
+ else:
+ raise ImportError('Unable to import a %s module' % driver)
+ if not imported_elements:
+ for part in modname.split('.')[1:]:
+ module = getattr(module, part)
+ return module, modname
+
+
+## Connection and cursor wrappers #############################################
+
+class PyConnection:
+ """A simple connection wrapper in python (useful for profiling)"""
+ def __init__(self, cnx):
+ """Wraps the original connection object"""
+ self._cnx = cnx
+
+ # XXX : Would it work if only __getattr__ was defined
+ def cursor(self):
+ """Wraps cursor()"""
+ return PyCursor(self._cnx.cursor())
+
+ def commit(self):
+ """Wraps commit()"""
+ return self._cnx.commit()
+
+ def rollback(self):
+ """Wraps rollback()"""
+ return self._cnx.rollback()
+
+ def close(self):
+ """Wraps close()"""
+ return self._cnx.close()
+
+ def __getattr__(self, attrname):
+ return getattr(self._cnx, attrname)
+
+
+class PyCursor:
+ """A simple cursor wrapper in python (useful for profiling)"""
+ def __init__(self, cursor):
+ self._cursor = cursor
+
+ def close(self):
+ """Wraps close()"""
+ return self._cursor.close()
+
+ def execute(self, *args, **kwargs):
+ """Wraps execute()"""
+ return self._cursor.execute(*args, **kwargs)
+
+ def executemany(self, *args, **kwargs):
+ """Wraps executemany()"""
+ return self._cursor.executemany(*args, **kwargs)
+
+ def fetchone(self, *args, **kwargs):
+ """Wraps fetchone()"""
+ return self._cursor.fetchone(*args, **kwargs)
+
+ def fetchmany(self, *args, **kwargs):
+ """Wraps execute()"""
+ return self._cursor.fetchmany(*args, **kwargs)
+
+ def fetchall(self, *args, **kwargs):
+ """Wraps fetchall()"""
+ return self._cursor.fetchall(*args, **kwargs)
+
+ def __getattr__(self, attrname):
+ return getattr(self._cursor, attrname)
+
+
+## Adapters list ##############################################################
+
+class DBAPIAdapter:
+ """Base class for all DBAPI adpaters"""
+
+ def __init__(self, native_module, pywrap=False):
+ """
+ :type native_module: module
+ :param native_module: the database's driver adapted module
+ """
+ self._native_module = native_module
+ self._pywrap = pywrap
+
+ def connect(self, host='', database='', user='', password='', port=''):
+ """Wraps the native module connect method"""
+ kwargs = {'host' : host, 'port' : port, 'database' : database,
+ 'user' : user, 'password' : password}
+ cnx = self._native_module.connect(**kwargs)
+ return self._wrap_if_needed(cnx)
+
+ def _wrap_if_needed(self, cnx):
+ """Wraps the connection object if self._pywrap is True, and returns it
+ If false, returns the original cnx object
+ """
+ if self._pywrap:
+ return PyConnection(cnx)
+ else:
+ return cnx
+
+ def __getattr__(self, attrname):
+ return getattr(self._native_module, attrname)
+
+
+# Postgresql #########################################################
+
+class _PgdbAdapter(DBAPIAdapter):
+ """Simple PGDB Adapter to DBAPI (pgdb modules lacks Binary() and NUMBER)
+ """
+ def __init__(self, native_module, pywrap=False):
+ DBAPIAdapter.__init__(self, native_module, pywrap)
+ self.NUMBER = native_module.pgdbType('int2', 'int4', 'serial',
+ 'int8', 'float4', 'float8',
+ 'numeric', 'bool', 'money')
+
+
+class _PsycopgAdapter(DBAPIAdapter):
+ """Simple Psycopg Adapter to DBAPI (cnx_string differs from classical ones)
+ """
+ def connect(self, host='', database='', user='', password='', port=''):
+ """Handles psycopg connexion format"""
+ if host:
+ cnx_string = 'host=%s dbname=%s user=%s' % (host, database, user)
+ else:
+ cnx_string = 'dbname=%s user=%s' % (database, user)
+ if port:
+ cnx_string += ' port=%s' % port
+ if password:
+ cnx_string = '%s password=%s' % (cnx_string, password)
+ cnx = self._native_module.connect(cnx_string)
+ cnx.set_isolation_level(1)
+ return self._wrap_if_needed(cnx)
+
+
+
+class _PgsqlAdapter(DBAPIAdapter):
+ """Simple pyPgSQL Adapter to DBAPI
+ """
+ def connect(self, host='', database='', user='', password=''):
+ """Handles psycopg connexion format"""
+ kwargs = {'host' : host, 'port': port, 'database' : database,
+ 'user' : user, 'password' : password or None}
+ cnx = self._native_module.connect(**kwargs)
+ return self._wrap_if_needed(cnx)
+
+
+ def Binary(self, string):
+ """Emulates the Binary (cf. DB-API) function"""
+ return str
+
+ def __getattr__(self, attrname):
+ # __import__('pyPgSQL.PgSQL', ...) imports the toplevel package
+ return getattr(self._native_module.PgSQL, attrname)
+
+
+# Sqlite #############################################################
+
+class _PySqlite2Adapter(DBAPIAdapter):
+ """Simple pysqlite2 Adapter to DBAPI
+ """
+ def __init__(self, native_module, pywrap=False):
+ DBAPIAdapter.__init__(self, native_module, pywrap)
+ self._init_pysqlite2()
+ # no type code in pysqlite2
+ self.BINARY = 'XXX'
+ self.STRING = 'XXX'
+ self.DATETIME = 'XXX'
+ self.NUMBER = 'XXX'
+
+ def _init_pysqlite2(self):
+ """initialize pysqlite2 to use mx.DateTime for date and timestamps"""
+ sqlite = self._native_module
+ if hasattr(sqlite, '_mx_initialized'):
+ return
+
+ from mx.DateTime import DateTimeType, strptime
+
+ def adapt_mxdatetime(mxd):
+ return mxd.strftime('%F %H:%M:%S')
+ sqlite.register_adapter(DateTimeType, adapt_mxdatetime)
+
+ def convert_mxdate(ustr):
+ return strptime(ustr, '%F %H:%M:%S')
+ sqlite.register_converter('date', convert_mxdate)
+
+ def convert_mxdatetime(ustr):
+ return strptime(ustr, '%F %H:%M:%S')
+ sqlite.register_converter('timestamp', convert_mxdatetime)
+
+ def convert_boolean(ustr):
+ if ustr.lower() == 'false':
+ return False
+ return True
+ sqlite.register_converter('boolean', convert_boolean)
+
+ sqlite._mx_initialized = 1
+
+
+ def connect(self, host='', database='', user='', password='', port=None):
+ """Handles sqlite connexion format"""
+ sqlite = self._native_module
+
+ class PySqlite2Cursor(sqlite.Cursor):
+ """cursor adapting usual dict format to pysqlite named format
+ in SQL queries
+ """
+ def execute(self, sql, kwargs=None):
+ if kwargs is not None:
+ sql = re.sub(r'%\(([^\)]+)\)s', r':\1', sql)
+ self.__class__.__bases__[0].execute(self, sql, kwargs)
+ else:
+ self.__class__.__bases__[0].execute(self, sql)
+
+ class PySqlite2CnxWrapper:
+ def __init__(self, cnx):
+ self._cnx = cnx
+
+ def cursor(self):
+ return self._cnx.cursor(PySqlite2Cursor)
+
+ def __getattr__(self, attrname):
+ return getattr(self._cnx, attrname)
+
+ cnx = sqlite.connect(database, detect_types=sqlite.PARSE_DECLTYPES)
+ return self._wrap_if_needed(PySqlite2CnxWrapper(cnx))
+
+
+class _SqliteAdapter(DBAPIAdapter):
+ """Simple sqlite Adapter to DBAPI
+ """
+ def __init__(self, native_module, pywrap=False):
+ DBAPIAdapter.__init__(self, native_module, pywrap)
+ self.DATETIME = native_module.TIMESTAMP
+
+ def connect(self, host='', database='', user='', password='', port=''):
+ """Handles sqlite connexion format"""
+ cnx = self._native_module.connect(database)
+ return self._wrap_if_needed(cnx)
+
+
+# Mysql ##############################################################
+
+class _MySqlDBAdapter(DBAPIAdapter):
+ """Simple mysql Adapter to DBAPI
+ """
+ def connect(self, host='', database='', user='', password='', port='',
+ unicode=False):
+ """Handles mysqldb connexion format
+ the unicode named argument asks to use Unicode objects for strings
+ in result sets and query parameters
+ """
+ kwargs = {'host' : host, 'port' : port, 'db' : database,
+ 'user' : user, 'passwd' : password or None,
+ 'use_unicode' : unicode}
+ return self._native_module.connect(**kwargs)
+
+
+## Helpers for DBMS specific Advanced functionalities #########################
+
+class _GenericAdvFuncHelper:
+ """Generic helper, trying to provide generic way to implement
+ specific functionnalities from others DBMS
+
+ An exception is raised when the functionality is not emulatable
+ """
+
+ def support_users(self):
+ """return True if the DBMS support users (this is usually
+ not true for in memory DBMS)
+ """
+ return True
+
+ def support_groups(self):
+ """return True if the DBMS support groups"""
+ return True
+
+ def system_database(self):
+ """return the system database for the given driver"""
+ raise Exception('not supported by this DBMS')
+
+ def sql_current_date(self):
+ return 'CURRENT_DATE'
+
+ def sql_current_time(self):
+ return 'CURRENT_TIME'
+
+ def sql_current_timestamp(self):
+ return 'CURRENT_TIMESTAMP'
+
+ def sql_create_sequence(self, seq_name):
+ return '''CREATE TABLE %s (last INTEGER);
+INSERT INTO %s VALUES (0);''' % (seq_name, seq_name)
+
+ def sql_drop_sequence(self, seq_name):
+ return 'DROP TABLE %s;' % seq_name
+
+ def sqls_increment_sequence(self, seq_name):
+ return ('UPDATE %s SET last=last+1;' % seq_name,
+ 'SELECT last FROM %s;' % seq_name)
+
+ def increment_sequence(self, cursor, seq_name):
+ for sql in self.sqls_increment_sequence(seq_name):
+ cursor.execute(sql)
+ return cursor.fetchone()[0]
+
+
+class _PGAdvFuncHelper(_GenericAdvFuncHelper):
+ """Postgres helper, taking advantage of postgres SEQUENCE support
+ """
+
+ def system_database(self):
+ """return the system database for the given driver"""
+ return 'template1'
+
+ def sql_create_sequence(self, seq_name):
+ return 'CREATE SEQUENCE %s;' % seq_name
+
+ def sql_drop_sequence(self, seq_name):
+ return 'DROP SEQUENCE %s;' % seq_name
+
+ def sqls_increment_sequence(self, seq_name):
+ return ("SELECT nextval('%s');" % seq_name,)
+
+
+class _SqliteAdvFuncHelper(_GenericAdvFuncHelper):
+ """Generic helper, trying to provide generic way to implement
+ specific functionnalities from others DBMS
+
+ An exception is raised when the functionality is not emulatable
+ """
+
+ def support_users(self):
+ """return True if the DBMS support users (this is usually
+ not true for in memory DBMS)
+ """
+ return False
+
+ def support_groups(self):
+ """return True if the DBMS support groups"""
+ return False
+
+ def sql_current_date(self):
+ return "DATE('now')"
+
+ def sql_current_time(self):
+ return "TIME('now')"
+
+ def sql_current_timestamp(self):
+ return "DATETIME('now')"
+
+
+## Drivers, Adapters and helpers registries ###################################
+
+
+PREFERED_DRIVERS = {
+ "postgres" : [ 'psycopg', 'pgdb', 'pyPgSQL.PgSQL', ],
+ "mysql" : [ 'MySQLdb', ], # 'pyMySQL.MySQL, ],
+ "sqlite" : [ 'pysqlite2.dbapi2', 'sqlite', ],
+ }
+
+_ADAPTERS = {
+ 'postgres' : { 'pgdb' : _PgdbAdapter,
+ 'psycopg' : _PsycopgAdapter,
+ 'pyPgSQL.PgSQL' : _PgsqlAdapter,
+ },
+ 'mysql' : { 'MySQLdb' : _MySqlDBAdapter, },
+ 'sqlite' : { 'pysqlite2.dbapi2' : _PySqlite2Adapter,
+ 'sqlite' : _SqliteAdapter, },
+ }
+
+# _AdapterDirectory could be more generic by adding a 'protocol' parameter
+# This one would become an adapter for 'DBAPI' protocol
+class _AdapterDirectory(dict):
+ """A simple dict that registers all adapters"""
+ def register_adapter(self, adapter, driver, modname):
+ """Registers 'adapter' in directory as adapting 'mod'"""
+ try:
+ driver_dict = self[driver]
+ except KeyError:
+ self[driver] = {}
+
+ # XXX Should we have a list of adapters ?
+ driver_dict[modname] = adapter
+
+ def adapt(self, database, prefered_drivers = None, pywrap = False):
+ """Returns an dbapi-compliant object based for database"""
+ prefered_drivers = prefered_drivers or PREFERED_DRIVERS
+ module, modname = _import_driver_module(database, prefered_drivers)
+ try:
+ return self[database][modname](module, pywrap=pywrap)
+ except KeyError:
+ raise NoAdapterFound(obj=module)
+
+ def get_adapter(self, database, modname):
+ try:
+ return self[database][modname]
+ except KeyError:
+ raise NoAdapterFound(None, modname)
+
+ADAPTER_DIRECTORY = _AdapterDirectory(_ADAPTERS)
+del _AdapterDirectory
+
+ADV_FUNC_HELPER_DIRECTORY = {'postgres': _PGAdvFuncHelper(),
+ 'sqlite': _SqliteAdvFuncHelper(),
+ None: _GenericAdvFuncHelper()}
+
+
+## Main functions #############################################################
+
+def set_prefered_driver(database, module, _drivers=PREFERED_DRIVERS):
+ """sets the prefered driver module for database
+ database is the name of the db engine (postgresql, mysql...)
+ module is the name of the module providing the connect function
+ syntax is (params_func, post_process_func_or_None)
+ _drivers is a optionnal dictionnary of drivers
+ """
+ try:
+ modules = _drivers[database]
+ except KeyError:
+ raise UnknownDriver('Unknown database %s' % database)
+ # Remove module from modules list, and re-insert it in first position
+ try:
+ modules.remove(module)
+ except ValueError:
+ raise UnknownDriver('Unknown module %s for %s' % (module, database))
+ modules.insert(0, module)
+
+def get_adv_func_helper(driver):
+ """returns an advanced function helper for the given driver"""
+ return ADV_FUNC_HELPER_DIRECTORY.get(driver,
+ ADV_FUNC_HELPER_DIRECTORY[None])
+
+def get_dbapi_compliant_module(driver, prefered_drivers = None, quiet = False,
+ pywrap = False):
+ """returns a fully dbapi compliant module"""
+ try:
+ mod = ADAPTER_DIRECTORY.adapt(driver, prefered_drivers, pywrap = pywrap)
+ except NoAdapterFound, err:
+ if not quiet:
+ msg = 'No Adapter found for %s, returning native module'
+ print >> sys.stderr, msg % err.objname
+ mod = err.adapted_obj
+ mod.adv_func_helper = get_adv_func_helper(driver)
+ return mod
+
+def get_connection(driver='postgres', host='', database='', user='', port='',
+ password='', quiet=False, drivers=PREFERED_DRIVERS,
+ pywrap=False):
+ """return a db connexion according to given arguments"""
+ module, modname = _import_driver_module(driver, drivers, ['connect'])
+ try:
+ adapter = ADAPTER_DIRECTORY.get_adapter(driver, modname)
+ except NoAdapterFound, err:
+ if not quiet:
+ msg = 'No Adapter found for %s, using default one' % err.objname
+ print >> sys.stderr, msg
+ adapted_module = DBAPIAdapter(module, pywrap)
+ else:
+ adapted_module = adapter(module, pywrap)
+ if not port:
+ try:
+ host, port = host.split(':', 1)
+ except ValueError:
+ pass
+ if port:
+ port = int(port)
+ return adapted_module.connect(host, database, user, password, port=port)
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..e9011c1
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,316 @@
+logilab-common (0.15.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Tue, 25 Apr 2006 14:01:32 +0200
+
+logilab-common (0.15.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Tue, 28 Mar 2006 12:34:26 +0200
+
+logilab-common (0.14.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 6 Mar 2006 09:04:34 +0100
+
+logilab-common (0.14.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Tue, 28 Feb 2006 17:37:59 +0100
+
+logilab-common (0.13.1-4) unstable; urgency=low
+
+ * Force removal of /usr/lib/python2.?/site-packages/logilab/__init__.py*
+ (closes: #353512)
+ * Add Conflicts with version of packages installing modules in
+ subdirectories of /usr/lib/python2.?/site-packages/logilab/
+
+ -- Alexandre Fayolle <afayolle@debian.org> Mon, 20 Feb 2006 11:13:22 +0100
+
+logilab-common (0.13.1-3) unstable; urgency=low
+
+ * upload new release to Debian
+
+ -- Alexandre Fayolle <afayolle@debian.org> Fri, 10 Feb 2006 16:27:27 +0100
+
+logilab-common (0.13.1-2) unstable; urgency=low
+
+ * fixed logilab/__init__.py handling
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 6 Feb 2006 16:36:52 +0100
+
+logilab-common (0.13.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Adrien Di Mascio <Adrien.DiMascio@logilab.fr> Fri, 3 Feb 2006 12:37:59 +0100
+
+logilab-common (0.13.0-3) unstable; urgency=low
+
+ * restored Conflicts in control...
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 23 Jan 2006 15:35:14 +0100
+
+logilab-common (0.13.0-2) unstable; urgency=low
+
+ * removed Conflicts from control
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Tue, 10 Jan 2006 17:03:00 +0100
+
+logilab-common (0.13.0-1) unstable; urgency=low
+
+ * new upstream release
+ * reorganization to install into site-python, removing the need for
+ pythonX.X- packages (closes: #351128)
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Fri, 6 Jan 2006 10:50:39 +0100
+
+logilab-common (0.12.0-3) unstable; urgency=low
+
+ * Dropped python2.1 support (closes: #337443)
+ * Added small patch from upstream CVS to allow unicode with mysql databases
+ * Changed standards to 3.6.2
+ * Fixed a lintian warning about a badly formatted line in debian/changelog
+ * updated fsf address in copyright file
+
+ -- Alexandre Fayolle <afayolle@debian.org> Wed, 9 Nov 2005 07:53:32 +0100
+
+logilab-common (0.12.0-2) unstable; urgency=low
+
+ * Remove logilab/__init__.py in prerm scripts (closes: #334087)
+
+ -- Alexandre Fayolle <afayolle@debian.org> Wed, 19 Oct 2005 08:48:59 +0200
+
+logilab-common (0.12.0-1) unstable; urgency=low
+
+ * New upstream release
+
+ -- Alexandre Fayolle <afayolle@debian.org> Tue, 6 Sep 2005 11:31:33 +0200
+
+logilab-common (0.11.0-1) unstable; urgency=low
+
+ * new umpstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 25 Jul 2005 12:36:01 +0200
+
+logilab-common (0.10.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Thu, 7 Jul 2005 13:25:43 +0200
+
+logilab-common (0.10.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Wed, 4 May 2005 12:49:25 +0200
+
+logilab-common (0.9.3-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Thu, 14 Apr 2005 11:51:23 +0200
+
+logilab-common (0.9.2-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Wed, 16 Feb 2005 13:36:30 +0100
+
+logilab-common (0.9.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Fri, 4 Feb 2005 17:41:06 +0100
+
+logilab-common (0.9.0-1) unstable; urgency=low
+
+ * new upstream release
+ * build package for python 2.4
+ * updated copyright
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Thu, 20 Jan 2005 18:00:45 +0100
+
+logilab-common (0.8.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Tue, 4 Jan 2005 17:57:04 +0100
+
+logilab-common (0.7.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Alexandre Fayolle <afayolle@debian.org> Tue, 9 Nov 2004 15:36:21 +0100
+
+logilab-common (0.7.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 11 Oct 2004 16:29:25 +0200
+
+logilab-common (0.6.0-1) unstable; urgency=low
+
+ * new upstream release
+ * updated debian/watch
+ * updated maintainer address
+
+ -- Alexandre Fayolle <afayolle@debian.org> Thu, 7 Oct 2004 19:39:46 +0200
+
+logilab-common (0.5.2-1) unstable; urgency=low
+
+ * new upstream release
+ * initial upload to Debian (closes: #258239)
+
+ -- Alexandre Fayolle <alexandre.fayolle@logilab.fr> Thu, 8 Jul 2004 12:55:49 +0200
+
+logilab-common (0.5.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Fri, 4 Jun 2004 09:57:57 +0200
+
+logilab-common (0.5.0-2) unstable; urgency=low
+
+ * add conflicts with ginco 1.0
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Thu, 27 May 2004 14:02:04 +0200
+
+logilab-common (0.5.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 10 May 2004 16:33:04 +0200
+
+logilab-common (0.4.5-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Fri, 20 Feb 2004 13:59:13 +0100
+
+logilab-common (0.4.4-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Tue, 17 Feb 2004 12:20:24 +0100
+
+logilab-common (0.4.3-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Wed, 11 Feb 2004 10:42:08 +0100
+
+logilab-common (0.4.2-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Tue, 23 Dec 2003 14:52:48 +0100
+
+logilab-common (0.4.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 24 Nov 2003 15:02:03 +0100
+
+logilab-common (0.4.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Thu, 13 Nov 2003 18:05:14 +0100
+
+logilab-common (0.3.5-1) unstable; urgency=low
+
+ * new upstream release
+ * move tests in a separated packages
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Tue, 14 Oct 2003 15:27:06 +0200
+
+logilab-common (0.3.4-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Wed, 8 Oct 2003 15:12:14 +0200
+
+logilab-common (0.3.3-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Wed, 1 Oct 2003 09:56:01 +0200
+
+logilab-common (0.3.2-2) unstable; urgency=low
+
+ * fix postinst script to avoid compilation of all packages installed for a given
+ python version.
+ * clean prerm script
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 22 Sep 2003 17:27:33 +0200
+
+logilab-common (0.3.2-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 22 Sep 2003 10:39:57 +0200
+
+logilab-common (0.3.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Thu, 18 Sep 2003 10:51:58 +0200
+
+logilab-common (0.3.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Fri, 12 Sep 2003 13:55:36 +0200
+
+logilab-common (0.2.3-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 25 Aug 2003 16:01:07 +0200
+
+logilab-common (0.2.2-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Wed, 11 Jun 2003 14:57:09 +0200
+
+logilab-common (0.2.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Wed, 4 Jun 2003 18:03:38 +0200
+
+logilab-common (0.2.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 2 Jun 2003 11:30:56 +0200
+
+logilab-common (0.1.3-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 19 May 2003 11:04:20 +0200
+
+logilab-common (0.1.2-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Thu, 17 Apr 2003 10:18:08 +0200
+
+logilab-common (0.1.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Fri, 28 Feb 2003 13:03:02 +0100
+
+logilab-common (0.1.0-1) unstable; urgency=low
+
+ * Initial Release.
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Mon, 17 Feb 2002 12:03:21 +0200
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..44ef21b
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,29 @@
+Source: logilab-common
+Section: python
+Priority: optional
+Maintainer: Alexandre Fayolle <afayolle@debian.org>
+Build-Depends: debhelper (>= 4.0.0), python-dev
+Standards-Version: 3.6.2
+
+Package: python-logilab-common
+Architecture: all
+Depends: python, python-xml
+Provides: python2.2-logilab-common, python2.3-logilab-common, python2.4-logilab-common
+Conflicts: python2.2-logilab-common, python2.3-logilab-common, python2.4-logilab-common, python2.4-constraint ( << 0.3.0-4), python2.3-constraint ( << 0.3.0-4), python-constraint ( << 0.3.0-4), python-logilab-astng ( << 0.14.0-2), python2.3-logilab-astng ( << 0.14.0-2), python2.4-logilab-astng ( << 0.14.0-2), python2.2-pylint ( << 0.9.0-2), python2.3-pylint ( << 0.9.0-2), python2.4-pylint ( << 0.9.0-2), python-pylint ( << 0.9.0-2),
+Replaces: python2.2-logilab-common, python2.3-logilab-common, python2.4-logilab-common
+Description: useful miscellaneous modules used by Logilab projects
+ logilab-common is a collection of low-level Python packages and modules,
+ designed to ease:
+ * handling command line options and configuration files
+ * writing interactive command line tools
+ * manipulation files and character strings
+ * interfacing to OmniORB
+ * generating of SQL queries
+ * running unit tests
+ * manipulating tree structures
+ * accessing RDBMS (currently postgreSQL and mysql)
+ * generating text and HTML reports
+ * logging
+ .
+ Homepage: http://www.logilab.org/projects/common
+
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..2da2d36
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,28 @@
+This package was debianized by Alexandre Fayolle <afayolle@debian.org> Sat, 13 Apr 2002 19:05:23 +0200.
+
+It was downloaded from ftp://ftp.logilab.org/pub/common
+
+Upstream Author:
+
+ Logilab <devel@logilab.fr>
+
+Copyright:
+
+Copyright (c) 2003-2006 LOGILAB S.A. (Paris, FRANCE).
+http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; either version 2 of the License, or (at your option) any later
+version.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+
+On Debian systems, the complete text of the GNU General Public License
+may be found in '/usr/share/common-licenses/GPL'.
diff --git a/debian/python-logilab-common.dirs b/debian/python-logilab-common.dirs
new file mode 100644
index 0000000..2f461a7
--- /dev/null
+++ b/debian/python-logilab-common.dirs
@@ -0,0 +1,6 @@
+usr/lib/site-python
+usr/lib/site-python/logilab
+usr/lib/site-python/logilab/common
+usr/share/doc/python-logilab-common
+usr/share/doc/python-logilab-common
+usr/share/doc/python-logilab-common/test
diff --git a/debian/python-logilab-common.docs b/debian/python-logilab-common.docs
new file mode 100644
index 0000000..3ff7356
--- /dev/null
+++ b/debian/python-logilab-common.docs
@@ -0,0 +1 @@
+doc/html/*
diff --git a/debian/python-logilab-common.postinst b/debian/python-logilab-common.postinst
new file mode 100644
index 0000000..4269962
--- /dev/null
+++ b/debian/python-logilab-common.postinst
@@ -0,0 +1,24 @@
+#! /bin/sh -e
+#
+
+
+
+# precompile python files
+VERSION=2.3
+PACKAGEDIR=/usr/lib/site-python/logilab/common
+case "$1" in
+ configure|abort-upgrade|abort-remove|abort-deconfigure)
+ python$VERSION -O /usr/lib/python$VERSION/compileall.py -q $PACKAGEDIR
+ python$VERSION /usr/lib/python$VERSION/compileall.py -q $PACKAGEDIR
+ ;;
+
+ *)
+ echo "postinst called with unknown argument \`$1'" >&2
+ exit 1
+ ;;
+esac
+
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/python-logilab-common.preinst b/debian/python-logilab-common.preinst
new file mode 100644
index 0000000..09ab97b
--- /dev/null
+++ b/debian/python-logilab-common.preinst
@@ -0,0 +1,20 @@
+#! /bin/bash
+#
+
+set -e
+
+if [[ "$1" = "upgrade" ]] ;
+then
+ if dpkg --compare-versions "$2" lt 0.13.1-4 ;
+ then
+ rm -f /usr/lib/python2.?/site-packages/logilab/__init__.py* ;
+ for d in /usr/lib/python2.?/site-packages/logilab ;
+ do
+ rmdir $d && true ;
+ done ;
+ fi ;
+fi
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/python-logilab-common.prerm b/debian/python-logilab-common.prerm
new file mode 100644
index 0000000..96079a9
--- /dev/null
+++ b/debian/python-logilab-common.prerm
@@ -0,0 +1,14 @@
+#! /bin/sh -e
+#
+
+# remove .pyc and .pyo files
+dpkg --listfiles python-logilab-common |
+ awk '$0~/\.py$/ {print $0"c\n" $0"o"}' |
+ xargs rm -f >&2
+
+
+
+
+#DEBHELPER#
+
+exit 0
diff --git a/debian/rules b/debian/rules
new file mode 100644
index 0000000..36f8724
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,69 @@
+#!/usr/bin/make -f
+# Sample debian/rules that uses debhelper.
+# GNU copyright 1997 to 1999 by Joey Hess.
+#
+# adapted by Logilab for automatic generation by debianize
+# (part of the devtools project, http://www.logilab.org/projects/devtools)
+#
+# Copyright (c) 2003-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+# This is the debhelper compatability version to use.
+export DH_COMPAT=4
+
+
+
+build: build-stamp
+build-stamp:
+ dh_testdir
+ python setup.py -q build
+ touch build-stamp
+
+clean:
+ dh_testdir
+ dh_testroot
+ rm -f build-stamp configure-stamp
+ rm -rf build
+ find . -name "*.pyc" | xargs rm -f
+ rm -f changelog.gz
+ dh_clean
+
+install: build
+ dh_testdir
+ dh_testroot
+ dh_clean -k
+ dh_installdirs
+ python setup.py -q install_lib --no-compile --install-dir=debian/python-logilab-common/usr/lib/site-python
+ python setup.py -q install_headers --install-dir=debian/python-logilab-common/usr/include/
+ # remove test directory (installed in in the doc directory)
+ rm -rf debian/python-logilab-common/usr/lib/site-python/logilab/common/test
+ # install tests
+ (cd test && find . -type f -not \( -path '*/CVS/*' -or -name '*.pyc' \) -exec install -D --mode=644 {} ../debian/python-logilab-common/usr/share/doc/python-logilab-common/test/{} \;)
+
+
+# Build architecture-independent files here.
+binary-indep: build install
+ dh_testdir
+ dh_testroot
+ dh_install -i
+ gzip -9 -c ChangeLog > changelog.gz
+ dh_installchangelogs -i
+ dh_installexamples -i
+ dh_installdocs -i README changelog.gz
+ dh_installman -i
+ dh_link -i
+ dh_compress -i -X.py -X.ini -X.xml -Xtest
+ dh_fixperms -i
+ dh_installdeb -i
+ dh_gencontrol -i
+ dh_md5sums -i
+ dh_builddeb -i
+
+
+
+binary: binary-indep
+.PHONY: build clean binary binary-indep
+
diff --git a/debian/watch b/debian/watch
new file mode 100644
index 0000000..9574cda
--- /dev/null
+++ b/debian/watch
@@ -0,0 +1,3 @@
+version=2
+ftp://ftp.logilab.org/pub/common/common-(.*)\.tar\.gz debian uupdate
+
diff --git a/doc/makefile b/doc/makefile
new file mode 100644
index 0000000..72cd324
--- /dev/null
+++ b/doc/makefile
@@ -0,0 +1,17 @@
+#MKHTML=mkdoc
+#MKHTMLOPTS=--doctype book --param toc.section.depth=2 --target html --stylesheet single-file
+#SRC=.
+
+#TXTFILES:= $(wildcard *.txt)
+#TARGET := $(TXTFILES:.txt=.html)
+
+all: apydoc
+
+#%.html: %.txt
+# ${MKHTML} ${MKHTMLOPTS} $<
+
+apydoc:
+ epydoc --html -n "Logilab's common library" ../modutils.py ../textutils.py ../fileutils.py
+
+clean:
+ rm -f *.html
diff --git a/fileutils.py b/fileutils.py
new file mode 100644
index 0000000..6513c7d
--- /dev/null
+++ b/fileutils.py
@@ -0,0 +1,505 @@
+# Copyright (c) 2003-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""Some file / file path manipulation utilities.
+
+:version: $Revision: 1.28 $
+:author: Logilab
+:copyright: 2003-2005 LOGILAB S.A. (Paris, FRANCE)
+:contact: http://www.logilab.fr/ -- mailto:python-projects@logilab.org
+
+:group path manipulation: first_level_directory, relative_path, is_binary,\
+files_by_ext, include_files_by_ext, exclude_files_by_ext, get_by_ext
+:group file manipulation: norm_read, norm_open, lines, stream_lines, lines,\
+write_open_mode, ensure_fs_mode, export
+:sort: path manipulation, file manipulation
+
+
+
+:type BASE_BLACKLIST: tuple
+:var BASE_BLACKLIST:
+ list files or directories ignored by default by the `export` function
+
+:type IGNORED_EXTENSIONS: tuple
+:var IGNORED_EXTENSIONS:
+ list file extensions ignored by default by the `export` function
+
+"""
+
+from __future__ import nested_scopes
+
+__revision__ = "$Id: fileutils.py,v 1.28 2006-02-07 13:46:01 adim Exp $"
+__docformat__ = "restructuredtext en"
+
+import sys
+import shutil
+import mimetypes
+from os.path import isabs, isdir, split, exists, walk, normpath, join
+from os import sep, mkdir, remove, listdir, stat, chmod
+from stat import ST_MODE, S_IWRITE
+from cStringIO import StringIO
+from warnings import warn
+
+
+def first_level_directory(path):
+ """return the first level directory of a path
+
+ >>> first_level_directory('home/syt/work')
+ 'home'
+ >>> first_level_directory('/home/syt/work')
+ '/'
+ >>> first_level_directory('work')
+ 'work'
+ >>>
+
+ :type path: str
+ :param path: the path for which we want the first level directory
+
+ :rtype: str
+ :return: the first level directory appearing in `path`
+ """
+ head, tail = split(path)
+ while head and tail:
+ head, tail = split(head)
+ if tail:
+ return tail
+ # path was absolute, head is the fs root
+ return head
+
+
+def is_binary(filename):
+ """return true if filename may be a binary file, according to it's
+ extension
+
+ :type filename: str
+ :param filename: the name of the file
+
+ :rtype: bool
+ :return:
+ true if the file is a binary file (actually if it's mime type
+ isn't begining by text/)
+ """
+ try:
+ return not mimetypes.guess_type(filename)[0].startswith('text')
+ except AttributeError:
+ return 1
+
+
+def write_open_mode(filename):
+ """return the write mode that should used to open file
+
+ :type filename: str
+ :param filename: the name of the file
+
+ :rtype: str
+ :return: the mode that should be use to open the file ('w' or 'wb')
+ """
+ if is_binary(filename):
+ return 'wb'
+ return 'w'
+
+# backward compat
+def get_mode(*args, **kwargs):
+ """deprecated, use `files_by_ext` instead"""
+ warn('logilab.common.fileutils.get_mode() is deprecated, use '
+ 'write_open_mode() instead', DeprecationWarning)
+ return write_open_mode(*args, **kwargs)
+
+def ensure_fs_mode(filepath, desired_mode=S_IWRITE):
+ """check that the given file has the given mode(s) set, else try to
+ set it
+
+ :type filepath: str
+ :param filepath: path of the file
+
+ :type desired_mode: int
+ :param desired_mode:
+ ORed flags describing the desired mode. Use constants from the
+ `stat` module for file permission's modes
+ """
+ mode = stat(filepath)[ST_MODE]
+ if not mode & desired_mode:
+ chmod(filepath, mode | desired_mode)
+
+ensure_mode = ensure_fs_mode # backward compat
+
+class ProtectedFile(file):
+ """a special file-object class that automatically that automatically
+ does a 'chmod +w' when needed
+
+ XXX: for now, the way it is done allows 'normal file-objects' to be
+ created during the ProtectedFile object lifetime.
+ One way to circumvent this would be to chmod / unchmod on each
+ write operation.
+
+ One other way would be to :
+
+ - catch the IOError in the __init__
+
+ - if IOError, then create a StringIO object
+
+ - each write operation writes in this StringIO obejct
+
+ - on close()/del(), write/append the StringIO content to the file and
+ do the chmod only once
+ """
+ def __init__(self, filepath, mode):
+ self.original_mode = stat(filepath)[ST_MODE]
+ self.mode_changed = False
+ if mode in ('w', 'a', 'wb', 'ab'):
+ if not self.original_mode & S_IWRITE:
+ chmod(filepath, self.original_mode | S_IWRITE)
+ self.mode_changed = True
+ file.__init__(self, filepath, mode)
+
+ def _restore_mode(self):
+ """restores the original mode if needed"""
+ if self.mode_changed:
+ chmod(self.name, self.original_mode)
+ # Don't re-chmod in case of several restore
+ self.mode_changed = False
+
+ def close(self):
+ """restore mode before closing"""
+ self._restore_mode()
+ file.close(self)
+
+ def __del__(self):
+ if not self.closed:
+ self.close()
+
+
+class UnresolvableError(Exception):
+ """exception raised by relative path when it's unable to compute relative
+ path between two paths
+ """
+
+def relative_path(from_file, to_file):
+ """try to get a relative path from from `from_file` to `to_file`
+ (path will be absolute if to_file is an absolute file). This function
+ is useful to create link in `from_file` to `to_file`. This typical use
+ case is used in this function description.
+
+ If both files are relative, they're expected to be relative to the same
+ directory.
+
+ >>> relative_path( from_file='toto/index.html', to_file='index.html')
+ '../index.html'
+ >>> relative_path( from_file='index.html', to_file='toto/index.html')
+ 'toto/index.html'
+ >>> relative_path( from_file='tutu/index.html', to_file='toto/index.html')
+ '../toto/index.html'
+ >>> relative_path( from_file='toto/index.html', to_file='/index.html')
+ '/index.html'
+ >>> relative_path( from_file='/toto/index.html', to_file='/index.html')
+ '../index.html'
+ >>> relative_path( from_file='/toto/index.html', to_file='/toto/summary.html')
+ 'summary.html'
+ >>> relative_path( from_file='index.html', to_file='index.html')
+ ''
+ >>> relative_path( from_file='/index.html', to_file='toto/index.html')
+ Traceback (most recent call last):
+ File "<string>", line 1, in ?
+ File "<stdin>", line 37, in relative_path
+ UnresolvableError
+ >>> relative_path( from_file='/index.html', to_file='/index.html')
+ ''
+ >>>
+
+ :type from_file: str
+ :param from_file: source file (where links will be inserted)
+
+ :type to_file: str
+ :param to_file: target file (on which links point)
+
+ :raise UnresolvableError: if it has been unable to guess a correct path
+
+ :rtype: str
+ :return: the relative path of `to_file` from `from_file`
+ """
+ from_file = normpath(from_file)
+ to_file = normpath(to_file)
+ if from_file == to_file:
+ return ''
+ if isabs(to_file):
+ if not isabs(from_file):
+ return to_file
+ elif isabs(from_file):
+ raise UnresolvableError()
+ from_parts = from_file.split(sep)
+ to_parts = to_file.split(sep)
+ idem = 1
+ result = []
+ while len(from_parts) > 1:
+ dirname = from_parts.pop(0)
+ if idem and len(to_parts) > 1 and dirname == to_parts[0]:
+ to_parts.pop(0)
+ else:
+ idem = 0
+ result.append('..')
+ result += to_parts
+ return sep.join(result)
+
+
+from logilab.common.textutils import _LINE_RGX
+from sys import version_info
+_HAS_UNIV_OPEN = version_info[:2] >= (2, 3)
+del version_info
+
+def norm_read(path):
+ """return the content of the file with normalized line feeds
+
+ :type path: str
+ :param path: path to the file to read
+
+ :rtype: str
+ :return: the content of the file with normalized line feeds
+ """
+ if _HAS_UNIV_OPEN:
+ return open(path, 'U').read()
+ return _LINE_RGX.sub('\n', open(path).read())
+
+def norm_open(path):
+ """return a stream for a file with content with normalized line feeds
+
+ :type path: str
+ :param path: path to the file to open
+
+ :rtype: file or StringIO
+ :return: the opened file with normalized line feeds
+ """
+ if _HAS_UNIV_OPEN:
+ return open(path, 'U')
+ return StringIO(_LINE_RGX.sub('\n', open(path).read()))
+
+
+def lines(path, comments=None):
+ """return a list of non empty lines in the file located at `path`
+
+ :type path: str
+ :param path: path to the file
+
+ :type comments: str or None
+ :param comments:
+ optional string which can be used to comment a line in the file
+ (ie lines starting with this string won't be returned)
+
+ :rtype: list
+ :return:
+ a list of stripped line in the file, without empty and commented
+ lines
+
+ :warning: at some point this function will probably return an iterator
+ """
+ stream = norm_open(path)
+ result = stream_lines(stream, comments)
+ stream.close()
+ return result
+
+def stream_lines(stream, comments=None):
+ """return a list of non empty lines in the given `stream`
+
+ :type stream: object implementing 'xreadlines' or 'readlines'
+ :param stream: file like object
+
+ :type comments: str or None
+ :param comments:
+ optional string which can be used to comment a line in the file
+ (ie lines starting with this string won't be returned)
+
+ :rtype: list
+ :return:
+ a list of stripped line in the file, without empty and commented
+ lines
+
+ :warning: at some point this function will probably return an iterator
+ """
+ try:
+ readlines = stream.xreadlines
+ except AttributeError:
+ readlines = stream.readlines
+ result = []
+ for line in readlines():
+ line = line.strip()
+ if line and (comments is None or not line.startswith(comments)):
+ result.append(line)
+ return result
+
+
+
+BASE_BLACKLIST = ('CVS', '.svn', 'debian', 'dist', 'build', '__buildlog')
+IGNORED_EXTENSIONS = ('.pyc', '.pyo', '.elc', '~')
+
+def export(from_dir, to_dir,
+ blacklist=BASE_BLACKLIST,
+ ignore_ext=IGNORED_EXTENSIONS,
+ verbose=0):
+ """make a mirror of `from_dir` in `to_dir`, omitting directories and
+ files listed in the black list or ending with one of the given
+ extensions
+
+ :type from_dir: str
+ :param from_dir: directory to export
+
+ :type to_dir: str
+ :param to_dir: destination directory
+
+ :type blacklist: list or tuple
+ :param blacklist:
+ list of files or directories to ignore, default to the content of
+ `BASE_BLACKLIST`
+
+ :type ignore_ext: list or tuple
+ :param ignore_ext:
+ list of extensions to ignore, default to the content of
+ `IGNORED_EXTENSIONS`
+
+ :type verbose: bool
+ :param verbose:
+ flag indicating wether information about exported files should be
+ printed to stderr, default to True
+ """
+ def make_mirror(_, directory, fnames):
+ """walk handler"""
+ for norecurs in blacklist:
+ try:
+ fnames.remove(norecurs)
+ except ValueError:
+ continue
+ for filename in fnames:
+ # don't include binary files
+ for ext in ignore_ext:
+ if filename.endswith(ext):
+ break
+ else:
+ src = join(directory, filename)
+ dest = to_dir + src[len(from_dir):]
+ if verbose:
+ print >> sys.stderr, src, '->', dest
+ if isdir(src):
+ if not exists(dest):
+ mkdir(dest)
+ else:
+ if exists(dest):
+ remove(dest)
+ shutil.copy2(src, dest)
+ try:
+ mkdir(to_dir)
+ except OSError:
+ pass
+ walk(from_dir, make_mirror, None)
+
+
+def files_by_ext(directory, include_exts=None, exclude_exts=None,
+ exclude_dirs=('CVS', '.svn')):
+ """return a list of files in a directory matching (or not) some
+ extensions: you should either give the `include_exts` argument (and
+ only files ending with one of the listed extensions will be
+ considered) or the `exclude_exts` argument (and only files not
+ ending by one of the listed extensions will be considered).
+ Subdirectories are processed recursivly.
+
+ :type directory: str
+ :param directory: directory where files should be searched
+
+ :type include_exts: list or tuple or None
+ :param include_exts: list of file extensions to consider
+
+ :type exclude_exts: list or tuple or None
+ :param exclude_exts: list of file extensions to ignore
+
+ :type exclude_dirs: list or tuple or None
+ :param exclude_dirs: list of directory where we should not recurse
+
+ :rtype: list
+ :return: the list of files matching input criteria
+ """
+ assert not (include_exts and exclude_exts)
+ if directory in exclude_dirs:
+ return []
+ if exclude_exts:
+ return exclude_files_by_ext(directory, exclude_exts, exclude_dirs)
+ return include_files_by_ext(directory, include_exts, exclude_dirs)
+
+# backward compat
+def get_by_ext(*args, **kwargs):
+ """deprecated, use `files_by_ext` instead"""
+ warn('logilab.common.fileutils.get_by_ext() is deprecated, use '
+ 'files_by_ext() instead', DeprecationWarning)
+ return files_by_ext(*args, **kwargs)
+
+
+def include_files_by_ext(directory, include_exts,
+ exclude_dirs=('CVS', '.svn')):
+ """return a list of files in a directory matching some extensions
+
+ :type directory: str
+ :param directory: directory where files should be searched
+
+ :type include_exts: list or tuple or None
+ :param include_exts: list of file extensions to consider
+
+ :type exclude_dirs: list or tuple or None
+ :param exclude_dirs: list of directory where we should not recurse
+
+ :rtype: list
+ :return: the list of files matching input criteria
+ """
+ result = []
+ for fname in listdir(directory):
+ absfile = join(directory, fname)
+ for ext in include_exts:
+ if fname.endswith(ext):
+ result.append(join(directory, fname))
+ break
+ else:
+ if isdir(absfile):
+ if fname in exclude_dirs:
+ continue
+ result += include_files_by_ext(join(directory, fname),
+ include_exts, exclude_dirs)
+ return result
+
+def exclude_files_by_ext(directory, exclude_exts,
+ exclude_dirs=('CVS', '.svn')):
+ """return a list of files in a directory not matching some extensions
+
+ :type directory: str
+ :param directory: directory where files should be searched
+
+ :type exclude_exts: list or tuple or None
+ :param exclude_exts: list of file extensions to ignore
+
+ :type exclude_dirs: list or tuple or None
+ :param exclude_dirs: list of directory where we should not recurse
+
+ :rtype: list
+ :return: the list of files matching input criteria
+ """
+ result = []
+ for fname in listdir(directory):
+ absfile = join(directory, fname)
+ for ext in exclude_exts:
+ if fname.endswith(ext) or fname == 'makefile':
+ break
+ else:
+ if isdir(absfile):
+ if fname in exclude_dirs:
+ continue
+ result += exclude_files_by_ext(absfile, exclude_exts,
+ exclude_dirs)
+ else:
+ result.append(join(directory, fname))
+ return result
diff --git a/html.py b/html.py
new file mode 100644
index 0000000..14bd184
--- /dev/null
+++ b/html.py
@@ -0,0 +1,47 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+"""
+
+__revision__ = "$Id: html.py,v 1.5 2003-09-12 11:54:47 syt Exp $"
+
+# mk html traceback error #####################################################
+def html_traceback(info, exception,
+ title='', encoding='ISO-8859-1', body = '') :
+ """ return an html formatted traceback from python exception infos.
+ """
+ import traceback
+ from xml.sax.saxutils import escape
+ #typ, value, tbck = info
+ stacktb = traceback.extract_tb(info[2]) #tbck)
+ strings = []
+ if body:
+ strings.append('<div class="error_body">')
+ strings.append(body)
+ strings.append('</div>')
+
+ if title:
+ strings.append('<h1 class="error">%s</h1>'% escape(title))
+ strings.append('<p class="error">%s</p>' % escape(str(exception)))
+ strings.append('<div class="error_traceback">')
+ for stackentry in stacktb :
+ strings.append('<b>File</b> <b class="file">%s</b>, <b>line</b> '
+ '<b class="line">%s</b>, <b>function</b> '
+ '<b class="function">%s</b>:<br/>'%(
+ escape(stackentry[0]), stackentry[1], stackentry[2]))
+ if stackentry[3]:
+ string = escape(stackentry[3]).encode(encoding)
+ strings.append('&nbsp;&nbsp;%s<br/>\n' % string)
+ strings.append('</div>')
+ return '\n'.join(strings)
diff --git a/interface.py b/interface.py
new file mode 100644
index 0000000..f4800f0
--- /dev/null
+++ b/interface.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2000-2004 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""
+ bases class for interfaces
+
+ TODO:
+ _ implements a check method which check that an object implements the
+ interface
+ _ Attribute objects
+
+ This module requires at least python 2.2
+"""
+
+__revision__ = "$Id: interface.py,v 1.6 2004-05-10 14:40:50 syt Exp $"
+
+from types import ListType, TupleType
+
+class Interface:
+ """base class for interfaces"""
+ def is_implemented_by(cls, instance):
+ return implements(instance, cls)
+ is_implemented_by = classmethod(is_implemented_by)
+
+def implements(instance, interface):
+ """return true if the instance implements the interface
+ """
+ if hasattr(instance, "__implements__") and \
+ (interface is instance.__implements__ or
+ (type(instance.__implements__) in (ListType, TupleType) and \
+ interface in instance.__implements__)):
+ return 1
+ return 0
+
+
diff --git a/logger.py b/logger.py
new file mode 100644
index 0000000..bf98422
--- /dev/null
+++ b/logger.py
@@ -0,0 +1,152 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+Define a logger interface and two concrete loggers : one which prints
+everything on stdout, the other using syslog.
+"""
+
+__revision__ = "$Id: logger.py,v 1.18 2006-02-03 14:17:42 adim Exp $"
+
+
+import sys
+import traceback
+import time
+
+
+LOG_EMERG = 0
+LOG_ALERT = 1
+LOG_CRIT = 2
+LOG_ERR = 3
+LOG_WARN = 4
+LOG_NOTICE = 5
+LOG_INFO = 6
+LOG_DEBUG = 7
+
+INDICATORS = ['emergency', 'alert', 'critical', 'error',
+ 'warning', 'notice', 'info', 'debug']
+
+
+def make_logger(method='print', threshold=LOG_DEBUG, sid=None, output=None):
+ """return a logger for the given method
+
+ known methods are 'print', 'eprint' and syslog'
+ """
+ if method == 'print':
+ if output is None:
+ output = sys.stdout
+ return PrintLogger(threshold, output, sid=sid)
+ elif method == 'eprint':
+ return PrintLogger(threshold, sys.stderr, sid=sid)
+ elif method == 'syslog':
+ return SysLogger(threshold, sid)
+ elif method == 'file':
+ if not output:
+ raise ValueError('No logfile specified')
+ else:
+ logfile = open(output, 'a')
+ return PrintLogger(threshold, logfile, sid=sid)
+ else:
+ raise ValueError('Unknown logger method: %r' % method)
+
+
+class AbstractLogger:
+ """logger interface.
+ Priorities allow to filter on the importance of events
+ An event gets logged if it's priority is lower than the threshold"""
+
+ def __init__(self, threshold=LOG_DEBUG, priority_indicator=1):
+ self.threshold = threshold
+ self.priority_indicator = priority_indicator
+
+ def log(self, priority=LOG_DEBUG, message='', substs=None):
+ """log a message with priority <priority>
+ substs are optional substrings
+ """
+ #print 'LOG', self, priority, self.threshold, message
+ if priority <= self.threshold :
+ if substs is not None:
+ message = message % substs
+ if self.priority_indicator:
+ message = '[%s] %s' % (INDICATORS[priority], message)
+ self._writelog(priority, message)
+
+ def _writelog(self, priority, message):
+ """Override this method in concrete class """
+ raise NotImplementedError()
+
+ def log_traceback(self, priority=LOG_ERR, tb_info=None):
+ """log traceback information with priority <priority>
+ """
+ assert tb_info is not None
+ e_type, value, tbck = tb_info
+ stacktb = traceback.extract_tb(tbck)
+ l = ['Traceback (most recent call last):']
+ for stackentry in stacktb :
+ if stackentry[3]:
+ plus = '\n %s' % stackentry[3]
+ else:
+ plus = ''
+ l.append('filename="%s" line_number="%s" function_name="%s"%s' %
+ (stackentry[0], stackentry[1], stackentry[2], plus))
+ l.append('%s: %s' % (e_type, value))
+ self.log(priority, '\n'.join(l))
+
+
+class PrintLogger(AbstractLogger):
+ """logger implementation
+
+ log everything to a file, using the standard output by default
+ """
+
+ def __init__(self, threshold, output=sys.stdout, sid=None,
+ encoding='UTF-8'):
+ AbstractLogger.__init__(self, threshold)
+ self.output = output
+ self.sid = sid
+ self.encoding = encoding
+
+ def _writelog(self, priority, message):
+ """overriden from AbstractLogger"""
+ if isinstance(message, unicode):
+ message = message.encode(self.encoding, 'replace')
+ if self.sid is not None:
+ self.output.write('[%s] [%s] %s\n' % (time.asctime(), self.sid,
+ message))
+ else:
+ self.output.write('[%s] %s\n' % (time.asctime(), message))
+ self.output.flush()
+
+class SysLogger(AbstractLogger):
+ """ logger implementation
+
+ log everything to syslog daemon
+ use the LOCAL_7 facility
+ """
+
+ def __init__(self, threshold, sid=None, encoding='UTF-8'):
+ import syslog
+ AbstractLogger.__init__(self, threshold)
+ if sid is None:
+ sid = 'syslog'
+ self.encoding = encoding
+ syslog.openlog(sid, syslog.LOG_PID)
+
+ def _writelog(self, priority, message):
+ """overriden from AbstractLogger"""
+ import syslog
+ if isinstance(message, unicode):
+ message = message.encode(self.encoding, 'replace')
+ syslog.syslog(priority | syslog.LOG_LOCAL7, message)
+
diff --git a/logservice.py b/logservice.py
new file mode 100644
index 0000000..dd90f7b
--- /dev/null
+++ b/logservice.py
@@ -0,0 +1,30 @@
+"""log utilities
+
+Copyright (c) 2003-2004 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+http://www.logilab.fr/ -- mailto:contact@logilab.fr
+"""
+
+__revision__ = "$Id: logservice.py,v 1.5 2006-03-05 16:13:28 syt Exp $"
+
+from logilab.common.logger import make_logger, LOG_ERR, LOG_WARN, LOG_NOTICE, \
+ LOG_INFO, LOG_CRIT, LOG_DEBUG
+
+def init_log(treshold, method='eprint', sid='common-log-service',
+ logger=None, output=None):
+ """init the logging system and and log methods to builtins"""
+ if logger is None:
+ logger = make_logger(method, treshold, sid, output=output)
+ # add log functions and constants to builtins
+ __builtins__.update({'log': logger.log,
+ 'log_traceback' : logger.log_traceback,
+ 'LOG_CRIT': LOG_CRIT,
+ 'LOG_ERR': LOG_ERR,
+ 'LOG_WARN': LOG_WARN,
+ 'LOG_NOTICE': LOG_NOTICE,
+ 'LOG_INFO' : LOG_INFO,
+ 'LOG_DEBUG': LOG_DEBUG,
+ })
+
+init_log(LOG_ERR)
+
+
diff --git a/modutils.py b/modutils.py
new file mode 100644
index 0000000..98faf57
--- /dev/null
+++ b/modutils.py
@@ -0,0 +1,536 @@
+# Copyright (c) 2003-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""Python modules manipulation utility functions.
+
+:version: $Revision: 1.53 $
+:author: Logilab
+:copyright: 2003-2005 LOGILAB S.A. (Paris, FRANCE)
+:contact: http://www.logilab.fr/ -- mailto:python-projects@logilab.org
+
+
+
+:type PY_SOURCE_EXTS: tuple(str)
+:var PY_SOURCE_EXTS: list of possible python source file extension
+
+:type STD_LIB_DIR: str
+:var STD_LIB_DIR: directory where standard modules are located
+
+:type BUILTIN_MODULES: dict
+:var BUILTIN_MODULES: dictionary with builtin module names has key
+"""
+
+from __future__ import nested_scopes
+
+__revision__ = "$Id: modutils.py,v 1.53 2006-03-06 08:05:11 syt Exp $"
+__docformat__ = "restructuredtext en"
+
+import sys
+import os
+from os.path import walk, splitext, join, abspath, isdir, dirname, exists
+from imp import find_module, load_module, C_BUILTIN, PY_COMPILED, PKG_DIRECTORY
+
+
+if sys.platform.startswith('win'):
+ PY_SOURCE_EXTS = ('py', 'pyw')
+ PY_COMPILED_EXTS = ('dll', 'pyd')
+ STD_LIB_DIR = join(sys.prefix, 'lib')
+else:
+ PY_SOURCE_EXTS = ('py',)
+ PY_COMPILED_EXTS = ('so',)
+ STD_LIB_DIR = join(sys.prefix, 'lib', 'python%s' % sys.version[:3])
+
+BUILTIN_MODULES = dict(zip(sys.builtin_module_names,
+ [1]*len(sys.builtin_module_names)))
+
+
+class NoSourceFile(Exception):
+ """exception raised when we are not able to get a python
+ source file for a precompiled file
+ """
+
+
+def load_module_from_name(dotted_name, path=None, use_sys=1):
+ """load a Python module from it's name
+
+ :type dotted_name: str
+ :param dotted_name: python name of a module or package
+
+ :type path: list or None
+ :param path:
+ optional list of path where the module or package should be
+ searched (use sys.path if nothing or None is given)
+
+ :type use_sys: bool
+ :param use_sys:
+ boolean indicating whether the sys.modules dictionary should be
+ used or not
+
+
+ :raise ImportError: if the module or package is not found
+
+ :rtype: module
+ :return: the loaded module
+ """
+ return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
+
+
+def load_module_from_modpath(parts, path=None, use_sys=1):
+ """load a python module from it's splitted name
+
+ :type parts: list(str) or tuple(str)
+ :param parts:
+ python name of a module or package splitted on '.'
+
+ :type path: list or None
+ :param path:
+ optional list of path where the module or package should be
+ searched (use sys.path if nothing or None is given)
+
+ :type use_sys: bool
+ :param use_sys:
+ boolean indicating whether the sys.modules dictionary should be used or not
+
+ :param _prefix: used internally, should not be specified
+
+
+ :raise ImportError: if the module or package is not found
+
+ :rtype: module
+ :return: the loaded module
+ """
+ modpath = []
+ prevmodule = None
+ for part in parts:
+ modpath.append(part)
+ curname = ".".join(modpath)
+ module = None
+ if use_sys:
+ module = sys.modules.get( curname )
+ if module is None:
+ mp_file, mp_filename, mp_desc = find_module(part, path)
+ module = load_module( curname, mp_file, mp_filename, mp_desc)
+ if prevmodule:
+ setattr(prevmodule, part, module)
+ _file = getattr( module, "__file__", "" )
+ if not _file and len(modpath) != len(parts):
+ raise ImportError("no module in %s" % ".".join( parts[len(modpath):] ) )
+ path = [dirname( _file )]
+ prevmodule = module
+ return module
+
+load_module_from_parts = load_module_from_modpath # backward compat
+
+
+
+def modpath_from_file(filename):
+ """given a file path return the corresponding splitted module's name
+ (i.e name of a module or package splitted on '.')
+
+ :type filename: str
+ :param filename: file's path for which we want the module's name
+
+
+ :raise ImportError:
+ if the corresponding module's name has not been found
+
+ :rtype: list(str)
+ :return: the corresponding splitted module's name
+ """
+ base = splitext(abspath(filename))[0]
+ for path in sys.path:
+ path = abspath(path)
+ if path and base[:len(path)] == path:
+ if filename.find('site-packages') != -1 and \
+ path.find('site-packages') == -1:
+ continue
+ mod_path = [module for module in base[len(path):].split(os.sep)
+ if module]
+ for part in mod_path[:-1]:
+ path = join(path, part)
+ if not _has_init(path):
+ break
+ else:
+ break
+ else:
+ raise ImportError('Unable to find module for %s in %s' % (
+ filename, ', \n'.join(sys.path)))
+ return mod_path
+
+
+
+def file_from_modpath(modpath, path=None, context_file=None):
+ """given a mod path (ie splited module / package name), return the
+ corresponding file, giving priority to source file over precompiled
+ file if it exists
+
+ :type modpath: list or tuple
+ :param modpath:
+ splitted module's name (i.e name of a module or package splitted
+ on '.')
+
+ :type path: list or None
+ :param path:
+ optional list of path where the module or package should be
+ searched (use sys.path if nothing or None is given)
+
+ :type context_file: str or None
+ :param context_file:
+ context file to consider, necessary if the identifier has been
+ introduced using a relative import unresolvable in the actual
+ context (i.e. modutils)
+
+ :raise ImportError: if there is no such module in the directory
+
+ :rtype: str or None
+ :return:
+ the path to the module's file or None if it's an integrated
+ builtin module such as 'sys'
+ """
+ if context_file is not None:
+ context = dirname(context_file)
+ else:
+ context = context_file
+ if modpath[0] == 'xml':
+ # handle _xmlplus
+ try:
+ return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
+ except ImportError:
+ return _file_from_modpath(modpath, path, context)
+ elif modpath == ['os', 'path']:
+ # FIXME: currently ignoring search_path...
+ return os.path.__file__
+ return _file_from_modpath(modpath, path, context)
+
+
+
+def get_module_part(dotted_name, context_file=None):
+ """given a dotted name return the module part of the name :
+
+ >>> get_module_part('logilab.common.modutils.get_module_part')
+ 'logilab.common.modutils'
+
+
+ :type dotted_name: str
+ :param dotted_name: full name of the identifier we are interested in
+
+ :type context_file: str or None
+ :param context_file:
+ context file to consider, necessary if the identifier has been
+ introduced using a relative import unresolvable in the actual
+ context (i.e. modutils)
+
+
+ :raise ImportError: if there is no such module in the directory
+
+ :rtype: str or None
+ :return:
+ the module part of the name or None if we have not been able at
+ all to import the given name
+
+ XXX: deprecated, since it doesn't handle package precedence over module
+ (see #10066)
+ """
+ # os.path trick
+ if dotted_name.startswith('os.path'):
+ return 'os.path'
+ parts = dotted_name.split('.')
+ if context_file is not None:
+ # first check for builtin module which won't be considered latter
+ # in that case (path != None)
+ if parts[0] in BUILTIN_MODULES:
+ if len(parts) > 2:
+ raise ImportError(dotted_name)
+ return parts[0]
+ # don't use += or insert, we want a new list to be created !
+ for i in range(len(parts)):
+ try:
+ file_from_modpath(parts[:i+1], context_file=context_file)
+ except ImportError:
+ if not i >= max(1, len(parts) - 2):
+ raise
+ return '.'.join(parts[:i])
+ return dotted_name
+
+
+
+def get_modules(package, src_directory, blacklist=('CVS', '.svn', 'debian')):
+ """given a package directory return a list of all available python
+ modules in the package and its subpackages
+
+ :type package: str
+ :param package: the python name for the package
+
+ :type src_directory: str
+ :param src_directory:
+ path of the directory corresponding to the package
+
+ :type blacklist: list or tuple
+ :param blacklist:
+ optional list of files or directory to ignore, default to 'CVS',
+ '.svn' and 'debian'
+
+ :rtype: list
+ :return:
+ the list of all available python modules in the package and its
+ subpackages
+ """
+ def func(modules, directory, fnames):
+ """walk handler"""
+ # remove files/directories in the black list
+ for norecurs in blacklist:
+ try:
+ fnames.remove(norecurs)
+ except ValueError:
+ continue
+ # check for __init__.py
+ if not '__init__.py' in fnames:
+ while fnames:
+ fnames.pop()
+ elif directory != src_directory:
+ #src = join(directory, file)
+ dir_package = directory[len(src_directory):].replace(os.sep, '.')
+ modules.append(package + dir_package)
+ for filename in fnames:
+ src = join(directory, filename)
+ if isdir(src):
+ continue
+ if _is_python_file(filename) and filename != '__init__.py':
+ module = package + src[len(src_directory):-3]
+ modules.append(module.replace(os.sep, '.'))
+ modules = []
+ walk(src_directory, func, modules)
+ return modules
+
+
+
+def get_module_files(src_directory, blacklist=('CVS', '.svn', 'debian')):
+ """given a package directory return a list of all available python
+ module's files in the package and its subpackages
+
+ :type src_directory: str
+ :param src_directory:
+ path of the directory corresponding to the package
+
+ :type blacklist: list(str) or tuple(str)
+ :param blacklist:
+ optional list of files or directory to ignore, default to 'CVS',
+ '.svn' and 'debian'
+
+ :rtype: list
+ :return:
+ the list of all available python module's files in the package and
+ its subpackages
+ """
+ def func(files, directory, fnames):
+ """walk handler"""
+ # remove files/directories in the black list
+ for norecurs in blacklist:
+ try:
+ fnames.remove(norecurs)
+ except ValueError:
+ continue
+ # check for __init__.py
+ if not '__init__.py' in fnames:
+ while fnames:
+ fnames.pop()
+ for filename in fnames:
+ src = join(directory, filename)
+ if isdir(src):
+ continue
+ if _is_python_file(filename):
+ files.append(src)
+ files = []
+ walk(src_directory, func, files)
+ return files
+
+
+def get_source_file(filename, include_no_ext=False):
+ """given a python module's file name return the matching source file
+ name (the filename will be returned identically if it's a already an
+ absolute path to a python source file...)
+
+ :type filename: str
+ :param filename: python module's file name
+
+
+ :raise NoSourceFile: if no source file exists on the file system
+
+ :rtype: str
+ :return: the absolute path of the source file if it exists
+ """
+ base, orig_ext = splitext(abspath(filename))
+ for ext in PY_SOURCE_EXTS:
+ source_path = '%s.%s' % (base, ext)
+ if exists(source_path):
+ return source_path
+ if include_no_ext and not orig_ext and exists(base):
+ return base
+ raise NoSourceFile(filename)
+
+
+
+def is_python_source(filename):
+ """
+ rtype: bool
+ return: True if the filename is a python source file
+ """
+ return splitext(filename)[1][1:] in PY_SOURCE_EXTS
+
+
+
+def is_standard_module(modname, std_path=(STD_LIB_DIR,)):
+ """try to guess if a module is a standard python module (by default,
+ see `std_path` parameter's description)
+
+ :type modname: str
+ :param modname: name of the module we are interested in
+
+ :type std_path: list(str) or tuple(str)
+ :param std_path: list of path considered has standard
+
+
+ :rtype: bool
+ :return:
+ true if the module:
+ - is located on the path listed in one of the directory in `std_path`
+ - is a built-in module
+ """
+ modpath = modname.split('.')
+ modname = modpath[0]
+ try:
+ filename = file_from_modpath(modpath)
+ except ImportError:
+ # import failed, i'm probably not so wrong by supposing it's
+ # not standard...
+ return 0
+ # modules which are not living in a file are considered standard
+ # (sys and __builtin__ for instance)
+ if filename is None:
+ return 1
+ filename = abspath(filename)
+ for path in std_path:
+ path = abspath(path)
+ if filename.startswith(path):
+ pfx_len = len(path)
+ if filename[pfx_len+1:pfx_len+14] != 'site-packages':
+ return 1
+ return 0
+ return False
+
+
+
+def is_relative(modname, from_file):
+ """return true if the given module name is relative to the given
+ file name
+
+ :type modname: str
+ :param modname: name of the module we are interested in
+
+ :type from_file: str
+ :param from_file:
+ path of the module from which modname has been imported
+
+ :rtype: bool
+ :return:
+ true if the module has been imported relativly to `from_file`
+ """
+ if not isdir(from_file):
+ from_file = dirname(from_file)
+ if from_file in sys.path:
+ return False
+ try:
+ find_module(modname.split('.')[0], [from_file])
+ return True
+ except ImportError:
+ return False
+
+
+# internal only functions #####################################################
+
+def _file_from_modpath(modpath, path=None, context=None):
+ """given a mod path (ie splited module / package name), return the
+ corresponding file
+
+ this function is used internally, see `file_from_modpath`'s
+ documentation for more information
+ """
+ assert len(modpath) > 0
+ if context is not None:
+ try:
+ mtype, mp_filename = _module_file(modpath, [context])
+ except ImportError:
+ mtype, mp_filename = _module_file(modpath, path)
+ else:
+ mtype, mp_filename = _module_file(modpath, path)
+ if mtype == PY_COMPILED:
+ try:
+ return get_source_file(mp_filename)
+ except NoSourceFile:
+ return mp_filename
+ elif mtype == C_BUILTIN:
+ # integrated builtin module
+ return None
+ elif mtype == PKG_DIRECTORY:
+ mp_filename = _has_init(mp_filename)
+ return mp_filename
+
+def _module_file(modpath, path=None):
+ """get a module type / file path
+
+ :type modpath: list or tuple
+ :param modpath:
+ splitted module's name (i.e name of a module or package splitted
+ on '.')
+
+ :type path: list or None
+ :param path:
+ optional list of path where the module or package should be
+ searched (use sys.path if nothing or None is given)
+
+
+ :rtype: tuple(int, str)
+ :return: the module type flag and the file path for a module
+ """
+ while modpath:
+ _, mp_filename, mp_desc = find_module(modpath[0], path)
+ modpath.pop(0)
+ mtype = mp_desc[2]
+ if modpath:
+ if mtype != PKG_DIRECTORY:
+ raise ImportError('No module %r' % '.'.join(modpath))
+ path = [mp_filename]
+ return mtype, mp_filename
+
+def _is_python_file(filename):
+ """return true if the given filename should be considered as a python file
+
+ .pyc and .pyo are ignored
+ """
+ for ext in ('.py', '.so', '.pyd', '.pyw'):
+ if filename.endswith(ext):
+ return True
+ return False
+
+
+def _has_init(directory):
+ """if the given directory has a valid __init__ file, return its path,
+ else return None
+ """
+ mod_or_pack = join(directory, '__init__')
+ for ext in ('.py', '.pyw', '.pyc', '.pyo'):
+ if exists(mod_or_pack + ext):
+ return mod_or_pack + ext
+ return None
diff --git a/monclient.py b/monclient.py
new file mode 100644
index 0000000..243f06b
--- /dev/null
+++ b/monclient.py
@@ -0,0 +1,59 @@
+"""Simple interpreter client for monserver
+provides a simple readline interface.
+"""
+from socket import socket, SOCK_STREAM, AF_INET
+from select import select
+import sys
+import readline
+import threading
+
+class SocketPrinter(threading.Thread):
+ """A thread that reads from a socket and output
+ to stdout as data are received"""
+ def __init__(self, sock):
+ threading.Thread.__init__(self)
+ self.socket = sock
+ self.stop = False
+
+ def run(self):
+ """prints socket input indefinitely"""
+ fd = self.socket.fileno()
+ self.socket.setblocking(0)
+ while not self.stop:
+ iwl, _, _ = select([fd], [], [], 2)
+ if fd in iwl:
+ data = self.socket.recv(100)
+ if data:
+ sys.stdout.write(data)
+ sys.stdout.flush()
+
+
+
+def client( host, port ):
+ """simple client that just sends input to the server"""
+ sock = socket( AF_INET, SOCK_STREAM )
+ sock.connect( (host, port) )
+ sp_thread = SocketPrinter(sock)
+ sp_thread.start()
+ while 1:
+ try:
+ line = raw_input() + "\n"
+ sock.send( line )
+ except EOFError:
+ print "Bye"
+ break
+ except:
+ sp_thread.stop = True
+ sp_thread.join()
+ raise
+ sp_thread.stop = True
+ sp_thread.join()
+
+
+if __name__ == "__main__":
+ server_host = sys.argv[1]
+ server_port = int(sys.argv[2])
+ client(server_host, server_port)
+
+
+
diff --git a/monserver.py b/monserver.py
new file mode 100644
index 0000000..bdf38d3
--- /dev/null
+++ b/monserver.py
@@ -0,0 +1,117 @@
+# -*- coding: iso-8859-1 -*-
+"""This module implements a TCP server in a separate thread that
+allows *one* client to connect and provides a command line interpreter
+allowing the remote client to explore the process on the fly
+"""
+
+__revision__ = '$Id: monserver.py,v 1.2 2005-11-22 13:13:02 syt Exp $'
+
+import threading
+import SocketServer
+import traceback
+import code
+import sys
+import time
+
+
+# NOTES: ce module étant utilisé pour l'introspection, il peut
+# être utile de fournir dans les locales de l'interpreteur des
+# objets déjà initialisés (par exemple le module __main__ ou
+# bien __main__.*) ou encore des objets servant à l'introspection
+# comme on en trouve dans pymonitor (qui prend la liste des objets
+# maintenus par le garbage collector) ou a des statistiques
+# pour faire des opérations du style:
+# inspector.count_types( MyClass )
+# inspector.list_types( MyClass ) etc...
+
+class MonitorInterpreter(code.InteractiveConsole):
+ """Subclasses InteractiveConsole so that all inputs
+ and outputs are done through a socket"""
+ def __init__(self, rfile, wfile ):
+ code.InteractiveConsole.__init__(self)
+ self.wfile = wfile
+ self.rfile = rfile
+ sys.stdout = self.wfile
+ sys.stderr = self.wfile
+
+ def write(self, data):
+ """replace stderr output by writing to wfile"""
+ self.wfile.write( data )
+ self.wfile.flush()
+
+ def raw_input( self, prompt = None ):
+ """Provides reading lines through the network"""
+ if prompt is not None:
+ self.wfile.write(prompt)
+ self.wfile.flush()
+ line = self.rfile.readline()
+ if line.endswith("\r\n"):
+ line = line[:-2]
+ elif line.endswith("\n"):
+ line = line[:-1]
+ return line
+
+
+class MonitorRequestHandler(SocketServer.BaseRequestHandler):
+ """Request handler for remote interpreter"""
+ def __init__(self, request, clientaddress, server ):
+ self.locals = {}
+ self.globals = globals().copy()
+ self.wfile = request.makefile("w")
+ self.rfile = request.makefile("r")
+ SocketServer.BaseRequestHandler.__init__(self, request, clientaddress,
+ server )
+
+ def handle(self):
+ """handle on request, through MonitorInterpreter"""
+ saved_stdout = sys.stdout
+ saved_stderr = sys.stderr
+ interpreter = MonitorInterpreter(self.rfile, self.wfile)
+ try:
+ interpreter.interact()
+ except KeyboardInterrupt:
+ self.server.exit = True
+ except:
+ sys.stdout = saved_stdout
+ sys.stderr = saved_stderr
+ traceback.print_exc()
+ print "Monitor handler exited"
+
+class Monitor(threading.Thread):
+ """Monitor server. monothreaded we only
+ allow one client at a time"""
+ def __init__(self, host, port):
+ threading.Thread.__init__(self)
+ self.host = host
+ self.port = port
+ self.exit = False
+
+
+ def run(self):
+ """run the server loop"""
+ server = SocketServer.TCPServer( (self.host, self.port),
+ MonitorRequestHandler )
+ while not self.exit:
+ server.handle_request()
+
+
+
+def demo_forever():
+ """sample demo server that outputs
+ numbers on screen"""
+ cnt = 1
+ while 1:
+ print cnt
+ time.sleep(2)
+ cnt += 1
+
+if __name__ == "__main__":
+ listen_port = int(sys.argv[1])
+ mon = Monitor( "", listen_port )
+ mon.start()
+ try:
+ demo_forever()
+ except Exception:
+ traceback.print_exc()
+ mon.exit = True
+ mon.join()
diff --git a/optik_ext.py b/optik_ext.py
new file mode 100644
index 0000000..aa51a7d
--- /dev/null
+++ b/optik_ext.py
@@ -0,0 +1,276 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2003-2006 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+add an abstraction level to transparently import optik classes from optparse
+(python >= 2.3) or the optik package.
+It also defines three new types for optik/optparse command line parser :
+
+ * regexp
+ argument of this type will be converted using re.compile
+ * csv
+ argument of this type will be converted using split(',')
+ * yn
+ argument of this type will be true if 'y' or 'yes', false if 'n' or 'no'
+ * named
+ argument of this type are in the form <NAME>=<VALUE> or <NAME>:<VALUE>
+
+"""
+
+__revision__ = '$Id: optik_ext.py,v 1.16 2006-03-28 10:34:54 syt Exp $'
+
+try:
+ # python >= 2.3
+ from optparse import OptionParser as BaseParser, Option as BaseOption, \
+ OptionGroup, OptionValueError, OptionError, Values, HelpFormatter
+except Exception, ex:
+ # python < 2.3
+ from optik import OptionParser as BaseParser, Option as BaseOption, \
+ OptionGroup, OptionValueError, OptionError, Values, HelpFormatter
+
+import re
+import sys
+import time
+from copy import copy
+from os.path import exists
+
+from logilab.common.textutils import get_csv
+
+def check_regexp(option, opt, value):
+ """check a regexp value by trying to compile it
+ return the compiled regexp
+ """
+ if hasattr(value, 'pattern'):
+ return value
+ try:
+ return re.compile(value)
+ except ValueError:
+ raise OptionValueError(
+ "option %s: invalid regexp value: %r" % (opt, value))
+
+def check_csv(option, opt, value):
+ """check a csv value by trying to split it
+ return the list of separated values
+ """
+ if isinstance(value, (list, tuple)):
+ return value
+ try:
+ return get_csv(value)
+ except ValueError:
+ raise OptionValueError(
+ "option %s: invalid regexp value: %r" % (opt, value))
+
+def check_yn(option, opt, value):
+ """check a yn value
+ return true for yes and false for no
+ """
+ if isinstance(value, int):
+ return bool(value)
+ if value in ('y', 'yes'):
+ return True
+ if value in ('n', 'no'):
+ return False
+ msg = "option %s: invalid yn value %r, should be in (y, yes, n, no)"
+ raise OptionValueError(msg % (opt, value))
+
+def check_named(option, opt, value):
+ """check a named value
+ return a 2-uple (name, value)
+ """
+ if isinstance(value, (list, tuple)) and len(value) % 2 == 0:
+ return value
+ if value.find('=') != -1:
+ return value.split('=', 1)
+ if value.find(':') != -1:
+ return value.split(':', 1)
+ msg = "option %s: invalid named value %r, should be <NAME>=<VALUE> or \
+<NAME>:<VALUE>"
+ raise OptionValueError(msg % (opt, value))
+
+def check_file(option, opt, value):
+ """check a file value
+ return the filepath
+ """
+ if exists(value):
+ return value
+ msg = "option %s: file %r does not exist"
+ raise OptionValueError(msg % (opt, value))
+
+def check_color(option, opt, value):
+ """check a color value and returns it
+ /!\ does *not* check color labels (like 'red', 'green'), only
+ checks hexadecimal forms
+ """
+ # Case (1) : color label, we trust the end-user
+ if re.match('[a-z0-9 ]+$', value, re.I):
+ return value
+ # Case (2) : only accepts hexadecimal forms
+ if re.match('#[a-f0-9]{6}', value, re.I):
+ return value
+ # Else : not a color label neither a valid hexadecimal form => error
+ msg = "option %s: invalid color : %r, should be either hexadecimal \
+ value or predefinied color"
+ raise OptionValueError(msg % (opt, value))
+
+import types
+
+class Option(BaseOption):
+ """override optik.Option to add some new option types
+ """
+ TYPES = BaseOption.TYPES + ("regexp", "csv", 'yn', 'named',
+ "multiple_choice", "file", "font", "color")
+ TYPE_CHECKER = copy(BaseOption.TYPE_CHECKER)
+ TYPE_CHECKER["regexp"] = check_regexp
+ TYPE_CHECKER["csv"] = check_csv
+ TYPE_CHECKER["yn"] = check_yn
+ TYPE_CHECKER["named"] = check_named
+ TYPE_CHECKER["multiple_choice"] = check_csv
+ TYPE_CHECKER["file"] = check_file
+ TYPE_CHECKER["color"] = check_color
+
+ def _check_choice(self):
+ """FIXME: need to override this due to optik misdesign"""
+ if self.type in ("choice", "multiple_choice"):
+ if self.choices is None:
+ raise OptionError(
+ "must supply a list of choices for type 'choice'", self)
+ elif type(self.choices) not in (types.TupleType, types.ListType):
+ raise OptionError(
+ "choices must be a list of strings ('%s' supplied)"
+ % str(type(self.choices)).split("'")[1], self)
+ elif self.choices is not None:
+ raise OptionError(
+ "must not supply choices for type %r" % self.type, self)
+ BaseOption.CHECK_METHODS[2] = _check_choice
+
+
+class OptionParser(BaseParser):
+ """override optik.OptionParser to use our Option class
+ """
+ def __init__(self, option_class=Option, *args, **kwargs):
+ BaseParser.__init__(self, option_class=Option, *args, **kwargs)
+
+class ManHelpFormatter(HelpFormatter):
+ """Format help using man pages ROFF format"""
+
+ def __init__ (self,
+ indent_increment=0,
+ max_help_position=24,
+ width=79,
+ short_first=0):
+ HelpFormatter.__init__ (
+ self, indent_increment, max_help_position, width, short_first)
+
+ def format_heading(self, heading):
+ return '.SH %s\n' % heading.upper()
+
+ def format_description(self, description):
+ return description
+
+ def format_option(self, option):
+ try:
+ optstring = option.option_strings
+ except AttributeError:
+ optstring = self.format_option_strings(option)
+ if option.help:
+ help = ' '.join([l.strip() for l in option.help.splitlines()])
+ else:
+ help = ''
+ return '''.IP "%s"
+%s
+''' % (optstring, help)
+
+ def format_head(self, optparser, pkginfo, section=1):
+ try:
+ pgm = optparser._get_prog_name()
+ except AttributeError:
+ # py >= 2.4.X (dunno which X exactly, at least 2)
+ pgm = optparser.get_prog_name()
+ short_desc = self.format_short_description(pgm, pkginfo.short_desc)
+ long_desc = self.format_long_description(pgm, pkginfo.long_desc)
+ return '%s\n%s\n%s\n%s' % (self.format_title(pgm, section), short_desc,
+ self.format_synopsis(pgm), long_desc)
+
+ def format_title(self, pgm, section):
+ date = '-'.join([str(num) for num in time.localtime()[:3]])
+ return '.TH %s %s "%s" %s' % (pgm, section, date, pgm)
+
+ def format_short_description(self, pgm, short_desc):
+ return '''.SH NAME
+.B %s
+\- %s
+''' % (pgm, short_desc.strip())
+
+ def format_synopsis(self, pgm):
+ return '''.SH SYNOPSIS
+.B %s
+[
+.I OPTIONS
+] [
+.I <arguments>
+]
+''' % pgm
+
+ def format_long_description(self, pgm, long_desc):
+ long_desc = '\n'.join([line.lstrip()
+ for line in long_desc.splitlines()])
+ long_desc = long_desc.replace('\n.\n', '\n\n')
+ if long_desc.lower().startswith(pgm):
+ long_desc = long_desc[len(pgm):]
+ return '''.SH DESCRIPTION
+.B %s
+%s
+''' % (pgm, long_desc.strip())
+
+ def format_tail(self, pkginfo):
+ return '''.SH SEE ALSO
+/usr/share/doc/pythonX.Y-%s/
+
+.SH COPYRIGHT
+%s
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published
+by the Free Software Foundation; either version 2 of the License,
+or (at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+MA 02111-1307 USA.
+.SH BUGS
+Please report bugs on the project\'s mailing list:
+%s
+
+.SH AUTHOR
+%s <%s>
+''' % (getattr(pkginfo, 'debian_name', pkginfo.modname), pkginfo.copyright,
+ pkginfo.mailinglist, pkginfo.author, pkginfo.author_email)
+
+
+def generate_manpage(optparser, pkginfo, section=1, stream=sys.stdout):
+ """generate a man page from an optik parser"""
+ formatter = ManHelpFormatter()
+ print >> stream, formatter.format_head(optparser, pkginfo, section)
+ print >> stream, optparser.format_option_help(formatter)
+ print >> stream, formatter.format_tail(pkginfo)
+
+
+__all__ = ('OptionParser', 'Option', 'OptionGroup', 'OptionValueError',
+ 'Values')
diff --git a/patricia.py b/patricia.py
new file mode 100644
index 0000000..0b10e8b
--- /dev/null
+++ b/patricia.py
@@ -0,0 +1,187 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+
+a Python implementation of PATRICIA trie
+
+PATRICIA - Practical Algorithm to Retrieve Information Coded in Alphanumeric
+ D.R.Morrison (1968).
+See http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Tree/PATRICIA.html if you
+want to know what's a PATRICIA tree...
+
+TODO: _ advanced search
+ _ profile code
+ _ use mxTextTools ?
+"""
+
+__revision__ = "$Id: patricia.py,v 1.5 2003-10-31 14:18:32 syt Exp $"
+
+def prefix(prfx, string):
+ """return the index of the first character from string which differs from
+ prefix
+ """
+ i = 0
+ while i < len(prfx):
+ if i == len(string) or prfx[i] != string[i]:
+ break
+ i += 1
+ return i
+
+def split(index, string):
+ """split a string on index, returning a 3-uple :
+ (string before index, character at index, string after index)
+ """
+ return string[:index], string[index], string[index+1:]
+
+
+class PatriciaNode:
+ """a PATRICIA trie node
+ """
+
+ def __init__(self, value='', leaf=0, data=None):
+ self.value = value
+ self.edges = {}
+ if leaf:
+ self.datas = [data]
+ else:
+ self.datas = []
+
+ def insert(self, string, data):
+ """ insert the string in the trie and associate data to it
+ if the string exists is the trie, data is added to the existing datas
+ """
+ # are we arrived ?
+ if self.value == string:
+ self.datas.append(data)
+ # not yet !
+ else:
+ # check we don't break compression (value don't match)
+ ind = prefix(self.value, string)
+ if ind < len(self.value):
+ # split this node
+ pfx, e, self.value = split(ind, self.value)
+ if ind < len(string):
+ n = PatriciaNode(pfx)
+ n.edges[string[ind]] = PatriciaNode(string[ind+1:], 1, data)
+ else:
+ n = PatriciaNode(pfx, 1, data)
+ n.edges[e] = self
+ return n
+ n_pfx, n_e, n_sfx = split(len(self.value), string)
+ if self.edges.has_key(n_e):
+ self.edges[n_e] = self.edges[n_e].insert(n_sfx, data)
+ else:
+ self.edges[n_e] = PatriciaNode(n_sfx, 1, data)
+ return self
+
+ def remove(self, string):
+ """ return datas associated with string and remove string from the trie
+ raise KeyError if the key isn't found
+ FIXME: we should change the trie structure
+ """
+ if string == self.value and self.datas:
+ datas = self.datas
+ self.datas = []
+ return datas
+ else:
+ pfx, e, sfx = split(len(self.value), string)
+ if self.value == pfx:
+ return self.edges[e].remove(sfx)
+ raise KeyError(string)
+
+ def lookup(self, string):
+ """ return datas associated with string
+ raise KeyError if the key isn't found
+ """
+ if string == self.value:
+ if self.datas:
+ return self.datas
+ raise KeyError(string)
+ else: # len(self.value) < len(string):
+ pfx, e, sfx = split(len(self.value), string)
+ if self.value == pfx:
+ return self.edges[e].lookup(sfx)
+ raise KeyError(string)
+
+ def pfx_search(self, pfx, depth=-1):
+ """ return all string with prefix pfx """
+ sfxs = []
+ if pfx and self.value[:len(pfx)] != pfx:
+ pfx, e, sfx = split(len(self.value), pfx)
+ if self.value == pfx and self.edges.has_key(e):
+ sfxs = ['%s%s%s' % (self.value, e, sfx)
+ for sfx in self.edges[e].pfx_search(sfx, depth)]
+ else:
+ if depth != 0:
+ for e, child in self.edges.items():
+ search = child.pfx_search('', depth-1-len(self.value))
+ sfxs += ['%s%s%s' % (self.value, e, sfx)
+ for sfx in search]
+ if (depth < 0 or len(self.value) <= depth):
+ if self.datas:
+ sfxs.append(self.value)
+ return sfxs
+
+ def __str__(self, indent=''):
+ node_str = ''.join([' %s%s:\n%s' % (indent, key,
+ a.__str__(' %s' % indent))
+ for key, a in self.edges.items()])
+ return '%s%s, %s\n%s' % (indent, self.value, self.datas, node_str)
+
+ def __repr__(self):
+ return '<PatriciaNode id=%s value=%s childs=%s datas=%s>' % (
+ id(self), self.value, self.edges.keys(), self.datas)
+
+
+class PatriciaTrie:
+ """ wrapper class for a patricia tree
+ delegates to the root of the tree (PatriciaNode)
+ """
+
+ def __init__(self):
+ self._trie = None
+ self.words = 0
+
+ def insert(self, string, data=None):
+ """ insert a string into the tree """
+ self.words += 1
+ if self._trie is None:
+ self._trie = PatriciaNode(string, 1, data)
+ else:
+ self._trie = self._trie.insert(string, data)
+
+ def remove(self, string):
+ """ remove a string from the tree """
+ if self._trie is not None:
+ return self._trie.remove(string)
+ raise KeyError(string)
+
+ def lookup(self, string):
+ """ look for a string into the tree """
+ if self._trie is not None:
+ return self._trie.lookup(string)
+ raise KeyError(string)
+
+ def pfx_search(self, string, depth=-1):
+ """ search all words begining by <string> """
+ if self._trie is not None:
+ return self._trie.pfx_search(string, depth)
+ raise KeyError(string)
+
+ def __str__(self):
+ return self._trie.__str__()
+
+ def __repr__(self):
+ return '<PatriciaTrie id=%s words=%s>' % (id(self), self.words)
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..386b687
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,3 @@
+[bdist_rpm]
+packager = Sylvain Thénault <sylvain.thenault@logilab.fr>
+provides = logilab.common
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..83363ae
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python
+# pylint: disable-msg=W0404,W0622,W0704,W0613,W0152
+# Copyright (c) 2003 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Generic Setup script, takes package info from __pkginfo__.py file """
+
+from __future__ import nested_scopes
+
+__revision__ = '$Id: setup.py,v 1.24 2006-04-10 15:12:53 syt Exp $'
+
+import os
+import sys
+import shutil
+from distutils.core import setup
+from distutils import command
+from distutils.command import install_lib
+from os.path import isdir, exists, join, walk
+
+# import required features
+from __pkginfo__ import modname, version, license, short_desc, long_desc, \
+ web, author, author_email
+# import optional features
+try:
+ from __pkginfo__ import distname
+except ImportError:
+ distname = modname
+try:
+ from __pkginfo__ import scripts
+except ImportError:
+ scripts = []
+try:
+ from __pkginfo__ import data_files
+except ImportError:
+ data_files = None
+try:
+ from __pkginfo__ import subpackage_of
+except ImportError:
+ subpackage_of = None
+try:
+ from __pkginfo__ import include_dirs
+except ImportError:
+ include_dirs = []
+try:
+ from __pkginfo__ import ext_modules
+except ImportError:
+ ext_modules = None
+
+BASE_BLACKLIST = ('CVS', 'debian', 'dist', 'build', '__buildlog')
+IGNORED_EXTENSIONS = ('.pyc', '.pyo', '.elc')
+
+
+def ensure_scripts(linux_scripts):
+ """
+ Creates the proper script names required for each platform
+ (taken from 4Suite)
+ """
+ from distutils import util
+ if util.get_platform()[:3] == 'win':
+ scripts_ = [script + '.bat' for script in linux_scripts]
+ else:
+ scripts_ = linux_scripts
+ return scripts_
+
+
+def get_packages(directory, prefix):
+ """return a list of subpackages for the given directory
+ """
+ result = []
+ for package in os.listdir(directory):
+ absfile = join(directory, package)
+ if isdir(absfile):
+ if exists(join(absfile, '__init__.py')) or \
+ package in ('test', 'tests'):
+ if prefix:
+ result.append('%s.%s' % (prefix, package))
+ else:
+ result.append(package)
+ result += get_packages(absfile, result[-1])
+ return result
+
+def export(from_dir, to_dir,
+ blacklist=BASE_BLACKLIST,
+ ignore_ext=IGNORED_EXTENSIONS):
+ """make a mirror of from_dir in to_dir, omitting directories and files
+ listed in the black list
+ """
+ def make_mirror(arg, directory, fnames):
+ """walk handler"""
+ for norecurs in blacklist:
+ try:
+ fnames.remove(norecurs)
+ except ValueError:
+ pass
+ for filename in fnames:
+ # don't include binary files
+ if filename[-4:] in ignore_ext:
+ continue
+ if filename[-1] == '~':
+ continue
+ src = '%s/%s' % (directory, filename)
+ dest = to_dir + src[len(from_dir):]
+ print >> sys.stderr, src, '->', dest
+ if os.path.isdir(src):
+ if not exists(dest):
+ os.mkdir(dest)
+ else:
+ if exists(dest):
+ os.remove(dest)
+ shutil.copy2(src, dest)
+ try:
+ os.mkdir(to_dir)
+ except OSError, ex:
+ # file exists ?
+ import errno
+ if ex.errno != errno.EEXIST:
+ raise
+ walk(from_dir, make_mirror, None)
+
+
+EMPTY_FILE = '"""generated file, don\'t modify or your data will be lost"""\n'
+
+class BuildScripts(command.install_lib.install_lib):
+
+ def run(self):
+ command.install_lib.install_lib.run(self)
+ # create Products.__init__.py if needed
+ product_init = join(self.install_dir, 'logilab', '__init__.py')
+ if not exists(product_init):
+ self.announce('creating logilab/__init__.py')
+ stream = open(product_init, 'w')
+ stream.write(EMPTY_FILE)
+ stream.close()
+ # manually install included directories if any
+ if include_dirs:
+ base = join('logilab', modname)
+ for directory in include_dirs:
+ dest = join(self.install_dir, base, directory)
+ export(directory, dest)
+
+def install(**kwargs):
+ """setup entry point"""
+ if subpackage_of:
+ package = subpackage_of + '.' + modname
+ kwargs['package_dir'] = {package : '.'}
+ packages = [package] + get_packages(os.getcwd(), package)
+ else:
+ kwargs['package_dir'] = {modname : '.'}
+ packages = [modname] + get_packages(os.getcwd(), modname)
+ kwargs['packages'] = packages
+ return setup(name = distname,
+ version = version,
+ license =license,
+ description = short_desc,
+ long_description = long_desc,
+ author = author,
+ author_email = author_email,
+ url = web,
+ scripts = ensure_scripts(scripts),
+ data_files=data_files,
+ ext_modules=ext_modules,
+ cmdclass={'install_lib': BuildScripts},
+ **kwargs
+ )
+
+if __name__ == '__main__' :
+ install()
diff --git a/shellutils.py b/shellutils.py
new file mode 100644
index 0000000..0fff117
--- /dev/null
+++ b/shellutils.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2003-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""
+Some shell utilities, usefull to write some python scripts instead of shell
+scripts
+"""
+
+__revision__ = '$Id: shellutils.py,v 1.6 2005-10-21 13:23:26 syt Exp $'
+
+import os
+import glob
+import shutil
+from os.path import exists, isdir, basename, join
+
+def mv(source, destination, _action=os.rename):
+ """a shell like mv, supporting wildcards
+ """
+ sources = glob.glob(source)
+ if len(sources) > 1:
+ assert isdir(destination)
+ for filename in sources:
+ _action(filename, join(destination, basename(filename)))
+ else:
+ try:
+ source = sources[0]
+ except IndexError:
+ raise OSError('No file matching %s' % source)
+ if isdir(destination) and exists(destination):
+ destination = join(destination, basename(source))
+ try:
+ _action(source, destination)
+ except OSError, ex:
+ raise OSError('Unable to move %r to %r (%s)' % (
+ source, destination, ex))
+
+def rm(*files):
+ """a shell like rm, supporting wildcards
+ """
+ for wfile in files:
+ for filename in glob.glob(wfile):
+ if isdir(filename):
+ shutil.rmtree(filename)
+ else:
+ os.remove(filename)
+
+def cp(source, destination):
+ """a shell like cp, supporting wildcards
+ """
+ mv(source, destination, _action=shutil.copy)
+
diff --git a/sqlgen.py b/sqlgen.py
new file mode 100644
index 0000000..88e5e65
--- /dev/null
+++ b/sqlgen.py
@@ -0,0 +1,242 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+
+Help to generate SQL string usable by the Python DB-API
+"""
+
+__revision__ = "$Id: sqlgen.py,v 1.11 2005-11-22 13:13:02 syt Exp $"
+
+
+# SQLGenerator ################################################################
+
+class SQLGenerator :
+ """
+ Helper class to generate SQL strings to use with python's DB-API
+ """
+
+ def where(self, keys, addon=None) :
+ """
+ keys : list of keys
+
+ >>> s = SQLGenerator()
+ >>> s.where(['nom'])
+ 'nom = %(nom)s'
+ >>> s.where(['nom','prenom'])
+ 'nom = %(nom)s AND prenom = %(prenom)s'
+ >>> s.where(['nom','prenom'], 'x.id = y.id')
+ 'x.id = y.id AND nom = %(nom)s AND prenom = %(prenom)s'
+ """
+ restriction = ["%s = %%(%s)s" % (x, x) for x in keys]
+ if addon:
+ restriction.insert(0, addon)
+ return " AND ".join(restriction)
+
+ def set(self, keys) :
+ """
+ keys : list of keys
+
+ >>> s = SQLGenerator()
+ >>> s.set(['nom'])
+ 'nom = %(nom)s'
+ >>> s.set(['nom','prenom'])
+ 'nom = %(nom)s, prenom = %(prenom)s'
+ """
+ return ", ".join(["%s = %%(%s)s" % (x, x) for x in keys])
+
+ def insert(self, table, params) :
+ """
+ table : name of the table
+ params : dictionnary that will be used as in cursor.execute(sql,params)
+
+ >>> s = SQLGenerator()
+ >>> s.insert('test',{'nom':'dupont'})
+ 'INSERT INTO test ( nom ) VALUES ( %(nom)s )'
+ >>> s.insert('test',{'nom':'dupont','prenom':'jean'})
+ 'INSERT INTO test ( nom, prenom ) VALUES ( %(nom)s, %(prenom)s )'
+ """
+ keys = ', '.join(params.keys())
+ values = ', '.join(["%%(%s)s" % x for x in params])
+ sql = 'INSERT INTO %s ( %s ) VALUES ( %s )' % (table, keys, values)
+ return sql
+
+ def select(self, table, params) :
+ """
+ table : name of the table
+ params : dictionnary that will be used as in cursor.execute(sql,params)
+
+ >>> s = SQLGenerator()
+ >>> s.select('test',{})
+ 'SELECT * FROM test'
+ >>> s.select('test',{'nom':'dupont'})
+ 'SELECT * FROM test WHERE nom = %(nom)s'
+ >>> s.select('test',{'nom':'dupont','prenom':'jean'})
+ 'SELECT * FROM test WHERE nom = %(nom)s AND prenom = %(prenom)s'
+ """
+ sql = 'SELECT * FROM %s' % table
+ where = self.where(params.keys())
+ if where :
+ sql = sql + ' WHERE %s' % where
+ return sql
+
+ def adv_select(self, model, tables, params, joins=None) :
+ """
+ model : list of columns to select
+ tables : list of tables used in from
+ params : dictionnary that will be used as in cursor.execute(sql, params)
+ joins : optional list of restriction statements to insert in the where
+ clause. Usually used to perform joins.
+
+ >>> s = SQLGenerator()
+ >>> s.adv_select(['column'],[('test', 't')], {})
+ 'SELECT column FROM test AS t'
+ >>> s.adv_select(['column'],[('test', 't')], {'nom':'dupont'})
+ 'SELECT column FROM test AS t WHERE nom = %(nom)s'
+ """
+ table_names = ["%s AS %s" % (k, v) for k, v in tables]
+ sql = 'SELECT %s FROM %s' % (', '.join(model), ', '.join(table_names))
+ if joins and type(joins) != type(''):
+ joins = ' AND '.join(joins)
+ where = self.where(params.keys(), joins)
+ if where :
+ sql = sql + ' WHERE %s' % where
+ return sql
+
+ def delete(self, table, params) :
+ """
+ table : name of the table
+ params : dictionnary that will be used as in cursor.execute(sql,params)
+
+ >>> s = SQLGenerator()
+ >>> s.delete('test',{'nom':'dupont'})
+ 'DELETE FROM test WHERE nom = %(nom)s'
+ >>> s.delete('test',{'nom':'dupont','prenom':'jean'})
+ 'DELETE FROM test WHERE nom = %(nom)s AND prenom = %(prenom)s'
+ """
+ where = self.where(params.keys())
+ sql = 'DELETE FROM %s WHERE %s' % (table, where)
+ return sql
+
+ def update(self, table, params, unique) :
+ """
+ table : name of the table
+ params : dictionnary that will be used as in cursor.execute(sql,params)
+
+ >>> s = SQLGenerator()
+ >>> s.update('test',{'id':'001','nom':'dupont'},['id'])
+ 'UPDATE test SET nom = %(nom)s WHERE id = %(id)s'
+ >>> s.update('test',{'id':'001','nom':'dupont','prenom':'jean'},['id'])
+ 'UPDATE test SET nom = %(nom)s, prenom = %(prenom)s WHERE id = %(id)s'
+ """
+ where = self.where(unique)
+ set = self.set([key for key in params if key not in unique])
+ sql = 'UPDATE %s SET %s WHERE %s' % (table, set, where)
+ return sql
+
+class BaseTable:
+ """
+ Another helper class to ease SQL table manipulation
+ """
+ # table_name = "default"
+ # supported types are s/i/d
+ # table_fields = ( ('first_field','s'), )
+ # primary_key = 'first_field'
+
+ def __init__(self, table_name, table_fields, primary_key=None):
+ if primary_key is None:
+ self._primary_key = table_fields[0][0]
+ else:
+ self._primary_key = primary_key
+
+ self._table_fields = table_fields
+ self._table_name = table_name
+ info = {
+ 'key' : self._primary_key,
+ 'table' : self._table_name,
+ 'columns' : ",".join( [ f for f,t in self._table_fields ] ),
+ 'values' : ",".join( [sql_repr(t, "%%(%s)s" % f)
+ for f,t in self._table_fields] ),
+ 'updates' : ",".join( ["%s=%s" % (f, sql_repr(t, "%%(%s)s" % f))
+ for f,t in self._table_fields] ),
+ }
+ self._insert_stmt = ("INSERT into %(table)s (%(columns)s) "
+ "VALUES (%(values)s) WHERE %(key)s=%%(key)s") % info
+ self._update_stmt = ("UPDATE %(table)s SET (%(updates)s) "
+ "VALUES WHERE %(key)s=%%(key)s") % info
+ self._select_stmt = ("SELECT %(columns)s FROM %(table)s "
+ "WHERE %(key)s=%%(key)s") % info
+ self._delete_stmt = ("DELETE FROM %(table)s "
+ "WHERE %(key)s=%%(key)s") % info
+
+ for k, t in table_fields:
+ if hasattr(self, k):
+ raise ValueError("Cannot use %s as a table field" % k)
+ setattr(self, k,None)
+
+
+ def as_dict(self):
+ d = {}
+ for k, t in self._table_fields:
+ d[k] = getattr(self, k)
+ return d
+
+ def select(self, cursor):
+ d = { 'key' : getattr(self,self._primary_key) }
+ cursor.execute(self._select_stmt % d)
+ rows = cursor.fetchall()
+ if len(rows)!=1:
+ msg = "Select: ambiguous query returned %d rows"
+ raise ValueError(msg % len(rows))
+ for (f, t), v in zip(self._table_fields, rows[0]):
+ setattr(self, f, v)
+
+ def update(self, cursor):
+ d = self.as_dict()
+ cursor.execute(self._update_stmt % d)
+
+ def delete(self, cursor):
+ d = { 'key' : getattr(self,self._primary_key) }
+
+
+# Helper functions #############################################################
+
+def name_fields(cursor, records) :
+ """
+ Take a cursor and a list of records fetched with that cursor, then return a
+ list of dictionnaries (one for each record) whose keys are column names and
+ values are records' values.
+
+ cursor : cursor used to execute the query
+ records : list returned by fetch*()
+ """
+ result = []
+ for record in records :
+ record_dict = {}
+ for i in range(len(record)) :
+ record_dict[cursor.description[i][0]] = record[i]
+ result.append(record_dict)
+ return result
+
+def sql_repr(type, val):
+ if type == 's':
+ return "'%s'" % (val,)
+ else:
+ return val
+
+
+if __name__ == "__main__":
+ import doctest
+ from logilab.common import sqlgen
+ print doctest.testmod(sqlgen)
diff --git a/table.py b/table.py
new file mode 100644
index 0000000..5d9b5b2
--- /dev/null
+++ b/table.py
@@ -0,0 +1,958 @@
+"""Table management module
+"""
+
+__revision__ = '$Id: table.py,v 1.18 2006-04-09 22:30:53 nico Exp $'
+
+from warnings import warn
+
+from logilab.common.compat import enumerate, sum, set
+
+class Table(object):
+ """Table defines a data table with column and row names.
+ inv:
+ len(self.data) <= len(self.row_names)
+ forall(self.data, lambda x: len(x) <= len(self.col_names))
+ """
+
+ def __init__(self, default_value=0, col_names=None, row_names=None):
+ self.col_names = []
+ self.row_names = []
+ self.data = []
+ self.default_value = default_value
+ if col_names:
+ self.create_columns(col_names)
+ if row_names:
+ self.create_rows(row_names)
+
+ def _next_row_name(self):
+ return 'row%s' % (len(self.row_names)+1)
+
+ def __iter__(self):
+ return iter(self.data)
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+ else:
+ return list(self) == list(other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __len__(self):
+ return len(self.row_names)
+
+ ## Rows / Columns creation #################################################
+ def create_rows(self, row_names):
+ """Appends row_names to the list of existing rows
+ """
+ self.row_names.extend(row_names)
+ for row_name in row_names:
+ self.data.append([self.default_value]*len(self.col_names))
+
+ def create_columns(self, col_names):
+ """Appends col_names to the list of existing columns
+ """
+ for col_name in col_names:
+ self.create_column(col_name)
+
+ def create_row(self, row_name=None):
+ """Creates a rowname to the row_names list
+ """
+ row_name = row_name or self._next_row_name()
+ self.row_names.append(row_name)
+ self.data.append([self.default_value]*len(self.col_names))
+
+
+ def create_column(self, col_name):
+ """Creates a colname to the col_names list
+ """
+ self.col_names.append(col_name)
+ for row in self.data:
+ row.append(self.default_value)
+
+ ## Sort by column ##########################################################
+ def sort_by_column_id(self, col_id, method = 'asc'):
+ """Sorts the table (in-place) according to data stored in col_id
+ """
+ try:
+ col_index = self.col_names.index(col_id)
+ self.sort_by_column_index(col_index, method)
+ except ValueError:
+ raise KeyError("Col (%s) not found in table" % (col_id))
+
+
+ def sort_by_column_index(self, col_index, method = 'asc'):
+ """Sorts the table 'in-place' according to data stored in col_index
+
+ method should be in ('asc', 'desc')
+ """
+ sort_list = [(row[col_index], row, row_name)
+ for row, row_name in zip(self.data, self.row_names)]
+ # Sorting sort_list will sort according to col_index
+ sort_list.sort()
+ # If we want reverse sort, then reverse list
+ if method.lower() == 'desc':
+ sort_list.reverse()
+
+ # Rebuild data / row names
+ self.data = []
+ self.row_names = []
+ for val, row, row_name in sort_list:
+ self.data.append(row)
+ self.row_names.append(row_name)
+
+ def groupby(self, colname, *others):
+ """builds indexes of data
+ :returns: nested dictionnaries pointing to actual rows
+ """
+ groups = {}
+ colnames = (colname,) + others
+ col_indexes = [self.col_names.index(col_id) for col_id in colnames]
+ for row in self.data:
+ ptr = groups
+ for col_index in col_indexes[:-1]:
+ ptr = ptr.setdefault(row[col_index], {})
+ ptr = ptr.setdefault(row[col_indexes[-1]],
+ Table(default_value=self.default_value,
+ col_names=self.col_names))
+ ptr.append_row(tuple(row))
+ return groups
+
+ def select(self, colname, value):
+ grouped = self.groupby(colname)
+ try:
+ return grouped[value]
+ except KeyError:
+ return []
+
+ def remove(self, colname, value):
+ col_index = self.col_names.index(colname)
+ for row in self.data[:]:
+ if row[col_index] == value:
+ self.data.remove(row)
+
+
+ ## The 'setter' part #######################################################
+ def set_cell(self, row_index, col_index, data):
+ """sets value of cell 'row_indew', 'col_index' to data
+ """
+ self.data[row_index][col_index] = data
+
+
+ def set_cell_by_ids(self, row_id, col_id, data):
+ """sets value of cell mapped by row_id and col_id to data
+ Raises a KeyError if row_id or col_id are not found in the table
+ """
+ try:
+ row_index = self.row_names.index(row_id)
+ except ValueError:
+ raise KeyError("Row (%s) not found in table" % (row_id))
+ else:
+ try:
+ col_index = self.col_names.index(col_id)
+ self.data[row_index][col_index] = data
+ except ValueError:
+ raise KeyError("Column (%s) not found in table" % (col_id))
+
+
+ def set_row(self, row_index, row_data):
+ """sets the 'row_index' row
+ pre:
+ type(row_data) == types.ListType
+ len(row_data) == len(self.col_names)
+ """
+ self.data[row_index] = row_data
+
+
+ def set_row_by_id(self, row_id, row_data):
+ """sets the 'row_id' column
+ pre:
+ type(row_data) == types.ListType
+ len(row_data) == len(self.row_names)
+ Raises a KeyError if row_id is not found
+ """
+ try:
+ row_index = self.row_names.index(row_id)
+ self.set_row(row_index, row_data)
+ except ValueError:
+ raise KeyError('Row (%s) not found in table' % (row_id))
+
+
+ def append_row(self, row_data, row_name=None):
+ """Appends a row to the table
+ pre:
+ type(row_data) == types.ListType
+ len(row_data) == len(self.col_names)
+ """
+ row_name = row_name or self._next_row_name()
+ self.row_names.append(row_name)
+ self.data.append(row_data)
+
+
+ def insert_row(self, index, row_data, row_name=None):
+ """Appends row_data before 'index' in the table. To make 'insert'
+ behave like 'list.insert', inserting in an out of range index will
+ insert row_data to the end of the list
+ pre:
+ type(row_data) == types.ListType
+ len(row_data) == len(self.col_names)
+ """
+ row_name = row_name or self._next_row_name()
+ self.row_names.insert(index, row_name)
+ self.data.insert(index, row_data)
+
+
+ def delete_row(self, index):
+ """Deletes the 'index' row in the table, and returns it.
+ Raises an IndexError if index is out of range
+ """
+ self.row_names.pop(index)
+ return self.data.pop(index)
+
+
+ def delete_row_by_id(self, row_id):
+ """Deletes the 'row_id' row in the table.
+ Raises a KeyError if row_id was not found.
+ """
+ try:
+ row_index = self.row_names.index(row_id)
+ self.delete_row(row_index)
+ except ValueError:
+ raise KeyError('Row (%s) not found in table' % (row_id))
+
+
+ def set_column(self, col_index, col_data):
+ """sets the 'col_index' column
+ pre:
+ type(col_data) == types.ListType
+ len(col_data) == len(self.row_names)
+ """
+
+ for row_index, cell_data in enumerate(col_data):
+ self.data[row_index][col_index] = cell_data
+
+
+ def set_column_by_id(self, col_id, col_data):
+ """sets the 'col_id' column
+ pre:
+ type(col_data) == types.ListType
+ len(col_data) == len(self.col_names)
+ Raises a KeyError if col_id is not found
+ """
+ try:
+ col_index = self.col_names.index(col_id)
+ self.set_column(col_index, col_data)
+ except ValueError:
+ raise KeyError('Column (%s) not found in table' % (col_id))
+
+
+ def append_column(self, col_data, col_name):
+ """Appends the 'col_index' column
+ pre:
+ type(col_data) == types.ListType
+ len(col_data) == len(self.row_names)
+ """
+ self.col_names.append(col_name)
+ for row_index, cell_data in enumerate(col_data):
+ self.data[row_index].append(cell_data)
+
+
+ def insert_column(self, index, col_data, col_name):
+ """Appends col_data before 'index' in the table. To make 'insert'
+ behave like 'list.insert', inserting in an out of range index will
+ insert col_data to the end of the list
+ pre:
+ type(col_data) == types.ListType
+ len(col_data) == len(self.row_names)
+ """
+ self.col_names.insert(index, col_name)
+ for row_index, cell_data in enumerate(col_data):
+ self.data[row_index].insert(index, cell_data)
+
+
+ def delete_column(self, index):
+ """Deletes the 'index' column in the table, and returns it.
+ Raises an IndexError if index is out of range
+ """
+ self.col_names.pop(index)
+ return [row.pop(index) for row in self.data]
+
+
+ def delete_column_by_id(self, col_id):
+ """Deletes the 'col_id' col in the table.
+ Raises a KeyError if col_id was not found.
+ """
+ try:
+ col_index = self.col_names.index(col_id)
+ self.delete_column(col_index)
+ except ValueError:
+ raise KeyError('Column (%s) not found in table' % (col_id))
+
+
+ ## The 'getter' part #######################################################
+
+ def get_shape(self):
+ """Returns a tuple which represents the table's shape
+ """
+ return len(self.row_names), len(self.col_names)
+ shape = property(get_shape)
+
+ def __getitem__(self, indices):
+ """provided for convenience"""
+ rows, multirows = None, False
+ cols, multicols = None, False
+ if isinstance(indices, tuple):
+ rows = indices[0]
+ if len(indices) > 1:
+ cols = indices[1]
+ else:
+ rows = indices
+ # define row slice
+ if isinstance(rows,str):
+ try:
+ rows = self.row_names.index(rows)
+ except ValueError:
+ raise KeyError("Row (%s) not found in table" % (rows))
+ if isinstance(rows,int):
+ rows = slice(rows,rows+1)
+ multirows = False
+ else:
+ rows = slice(None)
+ multirows = True
+ # define col slice
+ if isinstance(cols,str):
+ try:
+ cols = self.col_names.index(cols)
+ except ValueError:
+ raise KeyError("Column (%s) not found in table" % (cols))
+ if isinstance(cols,int):
+ cols = slice(cols,cols+1)
+ multicols = False
+ else:
+ cols = slice(None)
+ multicols = True
+ # get sub-table
+ tab = Table()
+ tab.default_value = self.default_value
+ tab.create_rows(self.row_names[rows])
+ tab.create_columns(self.col_names[cols])
+ for idx,row in enumerate(self.data[rows]):
+ tab.set_row(idx, row[cols])
+ if multirows :
+ if multicols:
+ return tab
+ else:
+ return [item[0] for item in tab.data]
+ else:
+ if multicols:
+ return tab.data[0]
+ else:
+ return tab.data[0][0]
+
+ def get_dimensions(self):
+ """Returns a tuple which represents the table's shape
+ """
+ warn('table.get_dimensions() is deprecated, use table.shape instead',
+ DeprecationWarning)
+ return self.shape
+
+ def get_element(self, row_index, col_index):
+ """Returns the element at [row_index][col_index]
+ """
+ warn('Table.get_element() is deprecated, use Table.get_cell instead',
+ DeprecationWarning)
+ return self.data[row_index][col_index]
+
+ def get_cell(self, row_index, col_index):
+ warn('table.get_cell(i,j) is deprecated, use table[i,j] instead',
+ DeprecationWarning)
+ return self.data[row_index][col_index]
+
+ def get_cell_by_ids(self, row_id, col_id):
+ """Returns the element at [row_id][col_id]
+ """
+ warn('table.get_cell_by_ids(i,j) is deprecated, use table[i,j] instead',
+ DeprecationWarning)
+ try:
+ row_index = self.row_names.index(row_id)
+ except ValueError:
+ raise KeyError("Row (%s) not found in table" % (row_id))
+ else:
+ try:
+ col_index = self.col_names.index(col_id)
+ except ValueError:
+ raise KeyError("Column (%s) not found in table" % (col_id))
+ return self.data[row_index][col_index]
+
+ def get_row(self, row_index):
+ """Returns the 'row_index' row
+ """
+ warn('table.get_row(i) is deprecated, use table[i] instead',
+ DeprecationWarning)
+ return self.data[row_index]
+
+ def get_row_by_id(self, row_id):
+ """Returns the 'row_id' row
+ """
+ warn('table.get_row_by_id(i) is deprecated, use table[i] instead',
+ DeprecationWarning)
+ try:
+ row_index = self.row_names.index(row_id)
+ except ValueError:
+ raise KeyError("Row (%s) not found in table" % (row_id))
+ return self.get_row(row_index)
+
+ def get_column(self, col_index, distinct=False):
+ """Returns the 'col_index' col
+ """
+ warn('table.get_column(i) is deprecated, use table[:,i] instead',
+ DeprecationWarning)
+ col = [row[col_index] for row in self.data]
+ if distinct:
+ return set(col)
+ else:
+ return col
+
+ def get_column_by_id(self, col_id, distinct=False):
+ """Returns the 'col_id' col
+ """
+ warn('table.get_column_by_id(i) is deprecated, use table[:,i] instead',
+ DeprecationWarning)
+ try:
+ col_index = self.col_names.index(col_id)
+ except ValueError:
+ raise KeyError("Column (%s) not found in table" % (col_id))
+ return self.get_column(col_index, distinct)
+
+
+ def get_rows(self):
+ """Returns all the rows in the table
+ """
+ warn('table.get_rows() is deprecated, just iterate over table instead',
+ DeprecationWarning)
+ return self.data
+
+
+ def get_columns(self):
+ """Returns all the columns in the table
+ """
+ return [self.get_column(index) for index in range(len(self.col_names))]
+
+
+ def apply_stylesheet(self, stylesheet):
+ """Applies the stylesheet to this table
+ """
+ for instruction in stylesheet.instructions:
+ eval(instruction)
+
+
+ def transpose(self):
+ """Keeps the self object intact, and returns the transposed (rotated)
+ table.
+ """
+ transposed = Table()
+ transposed.create_rows(self.col_names)
+ transposed.create_columns(self.row_names)
+ for col_index, column in enumerate(self.get_columns()):
+ transposed.set_row(col_index, column)
+ return transposed
+
+
+ def pprint(self):
+ """returns a string representing the table in a pretty
+ printed 'text' format.
+ """
+ # The maxium row name (to know the start_index of the first col)
+ max_row_name = 0
+ for row_name in self.row_names:
+ if len(row_name) > max_row_name:
+ max_row_name = len(row_name)
+ col_start = max_row_name + 5
+
+ lines = []
+ # Build the 'first' line <=> the col_names one
+ # The first cell <=> an empty one
+ col_names_line = [' '*col_start]
+ for col_name in self.col_names:
+ col_names_line.append(col_name.encode('iso-8859-1') + ' '*5)
+ lines.append('|' + '|'.join(col_names_line) + '|')
+ max_line_length = len(lines[0])
+
+ # Build the table
+ for row_index, row in enumerate(self.data):
+ line = []
+ # First, build the row_name's cell
+ row_name = self.row_names[row_index].encode('iso-8859-1')
+ line.append(row_name + ' '*(col_start-len(row_name)))
+
+ # Then, build all the table's cell for this line.
+ for col_index, cell in enumerate(row):
+ col_name_length = len(self.col_names[col_index]) + 5
+ data = str(cell)
+ line.append(data + ' '*(col_name_length - len(data)))
+ lines.append('|' + '|'.join(line) + '|')
+ if len(lines[-1]) > max_line_length:
+ max_line_length = len(lines[-1])
+
+ # Wrap the table with '-' to make a frame
+ lines.insert(0, '-'*max_line_length)
+ lines.append('-'*max_line_length)
+ return '\n'.join(lines)
+
+
+ def __repr__(self):
+ return repr(self.data)
+
+ def as_text(self):
+ data = []
+ # We must convert cells into strings before joining them
+ for row in self.data:
+ data.append([str(cell) for cell in row])
+ lines = ['\t'.join(row) for row in data]
+ return '\n'.join(lines)
+
+
+
+class TableStyle:
+ """Defines a table's style
+ """
+
+ def __init__(self, table):
+
+ self._table = table
+ self.size = dict([(col_name,'1*') for col_name in table.col_names])
+ # __row_column__ is a special key to define the first column which
+ # actually has no name (<=> left most column <=> row names column)
+ self.size['__row_column__'] = '1*'
+ self.alignment = dict([(col_name,'right')
+ for col_name in table.col_names])
+ self.alignment['__row_column__'] = 'right'
+
+ # We shouldn't have to create an entry for
+ # the 1st col (the row_column one)
+ self.units = dict([(col_name,'') for col_name in table.col_names])
+ self.units['__row_column__'] = ''
+
+ # XXX FIXME : params order should be reversed for all set() methods
+ def set_size(self, value, col_id):
+ """sets the size of the specified col_id to value
+ """
+ self.size[col_id] = value
+
+ def set_size_by_index(self, value, col_index):
+ """Allows to set the size according to the column index rather than
+ using the column's id.
+ BE CAREFUL : the '0' column is the '__row_column__' one !
+ """
+ if col_index == 0:
+ col_id = '__row_column__'
+ else:
+ col_id = self._table.col_names[col_index-1]
+
+ self.size[col_id] = value
+
+
+ def set_alignment(self, value, col_id):
+ """sets the alignment of the specified col_id to value
+ """
+ self.alignment[col_id] = value
+
+
+ def set_alignment_by_index(self, value, col_index):
+ """Allows to set the alignment according to the column index rather than
+ using the column's id.
+ BE CAREFUL : the '0' column is the '__row_column__' one !
+ """
+ if col_index == 0:
+ col_id = '__row_column__'
+ else:
+ col_id = self._table.col_names[col_index-1]
+
+ self.alignment[col_id] = value
+
+
+ def set_unit(self, value, col_id):
+ """sets the unit of the specified col_id to value
+ """
+ self.units[col_id] = value
+
+
+ def set_unit_by_index(self, value, col_index):
+ """Allows to set the unit according to the column index rather than
+ using the column's id.
+ BE CAREFUL : the '0' column is the '__row_column__' one !
+ (Note that in the 'unit' case, you shouldn't have to set a unit
+ for the 1st column (the __row__column__ one))
+ """
+ if col_index == 0:
+ col_id = '__row_column__'
+ else:
+ col_id = self._table.col_names[col_index-1]
+
+ self.units[col_id] = value
+
+
+ def get_size(self, col_id):
+ """Returns the size of the specified col_id
+ """
+ return self.size[col_id]
+
+
+ def get_size_by_index(self, col_index):
+ """Allows to get the size according to the column index rather than
+ using the column's id.
+ BE CAREFUL : the '0' column is the '__row_column__' one !
+ """
+ if col_index == 0:
+ col_id = '__row_column__'
+ else:
+ col_id = self._table.col_names[col_index-1]
+
+ return self.size[col_id]
+
+
+ def get_alignment(self, col_id):
+ """Returns the alignment of the specified col_id
+ """
+ return self.alignment[col_id]
+
+
+ def get_alignment_by_index(self, col_index):
+ """Allors to get the alignment according to the column index rather than
+ using the column's id.
+ BE CAREFUL : the '0' column is the '__row_column__' one !
+ """
+ if col_index == 0:
+ col_id = '__row_column__'
+ else:
+ col_id = self._table.col_names[col_index-1]
+
+ return self.alignment[col_id]
+
+
+ def get_unit(self, col_id):
+ """Returns the unit of the specified col_id
+ """
+ return self.units[col_id]
+
+
+ def get_unit_by_index(self, col_index):
+ """Allors to get the unit according to the column index rather than
+ using the column's id.
+ BE CAREFUL : the '0' column is the '__row_column__' one !
+ """
+ if col_index == 0:
+ col_id = '__row_column__'
+ else:
+ col_id = self._table.col_names[col_index-1]
+
+ return self.units[col_id]
+
+
+import re
+CELL_PROG = re.compile("([0-9]+)_([0-9]+)")
+
+class TableStyleSheet:
+ """A simple Table stylesheet
+ Rules are expressions where cells are defined by the row_index
+ and col_index separated by an underscore ('_').
+ For example, suppose you want to say that the (2,5) cell must be
+ the sum of its two preceding cells in the row, you would create
+ the following rule :
+ 2_5 = 2_3 + 2_4
+ You can also use all the math.* operations you want. For example:
+ 2_5 = sqrt(2_3**2 + 2_4**2)
+ """
+
+ def __init__(self, rules = None):
+ rules = rules or []
+ self.rules = []
+ self.instructions = []
+ for rule in rules:
+ self.add_rule(rule)
+
+
+ def add_rule(self, rule):
+ """Adds a rule to the stylesheet rules
+ """
+ try:
+ source_code = ['from math import *']
+ source_code.append(CELL_PROG.sub(r'self.data[\1][\2]', rule))
+ self.instructions.append(compile('\n'.join(source_code),
+ 'table.py', 'exec'))
+ self.rules.append(rule)
+ except SyntaxError:
+ print "Bad Stylesheet Rule : %s [skipped]"%rule
+
+
+ def add_rowsum_rule(self, dest_cell, row_index, start_col, end_col):
+ """Creates and adds a rule to sum over the row at row_index from
+ start_col to end_col.
+ dest_cell is a tuple of two elements (x,y) of the destination cell
+ No check is done for indexes ranges.
+ pre:
+ start_col >= 0
+ end_col > start_col
+ """
+ cell_list = ['%d_%d'%(row_index, index) for index in range(start_col,
+ end_col + 1)]
+ rule = '%d_%d=' % dest_cell + '+'.join(cell_list)
+ self.add_rule(rule)
+
+
+ def add_rowavg_rule(self, dest_cell, row_index, start_col, end_col):
+ """Creates and adds a rule to make the row average (from start_col
+ to end_col)
+ dest_cell is a tuple of two elements (x,y) of the destination cell
+ No check is done for indexes ranges.
+ pre:
+ start_col >= 0
+ end_col > start_col
+ """
+ cell_list = ['%d_%d'%(row_index, index) for index in range(start_col,
+ end_col + 1)]
+ num = (end_col - start_col + 1)
+ rule = '%d_%d=' % dest_cell + '('+'+'.join(cell_list)+')/%f'%num
+ self.add_rule(rule)
+
+
+ def add_colsum_rule(self, dest_cell, col_index, start_row, end_row):
+ """Creates and adds a rule to sum over the col at col_index from
+ start_row to end_row.
+ dest_cell is a tuple of two elements (x,y) of the destination cell
+ No check is done for indexes ranges.
+ pre:
+ start_row >= 0
+ end_row > start_row
+ """
+ cell_list = ['%d_%d'%(index, col_index) for index in range(start_row,
+ end_row + 1)]
+ rule = '%d_%d=' % dest_cell + '+'.join(cell_list)
+ self.add_rule(rule)
+
+
+ def add_colavg_rule(self, dest_cell, col_index, start_row, end_row):
+ """Creates and adds a rule to make the col average (from start_row
+ to end_row)
+ dest_cell is a tuple of two elements (x,y) of the destination cell
+ No check is done for indexes ranges.
+ pre:
+ start_row >= 0
+ end_row > start_row
+ """
+ cell_list = ['%d_%d'%(index, col_index) for index in range(start_row,
+ end_row + 1)]
+ num = (end_row - start_row + 1)
+ rule = '%d_%d=' % dest_cell + '('+'+'.join(cell_list)+')/%f'%num
+ self.add_rule(rule)
+
+
+
+class TableCellRenderer:
+ """Defines a simple text renderer
+ """
+
+ def __init__(self, **properties):
+ """keywords should be properties with an associated boolean as value.
+ For example :
+ renderer = TableCellRenderer(units = True, alignment = False)
+ An unspecified property will have a 'False' value by default.
+ Possible properties are :
+ alignment, unit
+ """
+ self.properties = properties
+
+
+ def render_cell(self, cell_coord, table, table_style):
+ """Renders the cell at 'cell_coord' in the table, using table_style
+ """
+ row_index, col_index = cell_coord
+ cell_value = table.data[row_index][col_index]
+ final_content = self._make_cell_content(cell_value,
+ table_style, col_index +1)
+ return self._render_cell_content(final_content,
+ table_style, col_index + 1)
+
+
+ def render_row_cell(self, row_name, table, table_style):
+ """Renders the cell for 'row_id' row
+ """
+ cell_value = row_name.encode('iso-8859-1')
+ return self._render_cell_content(cell_value, table_style, 0)
+
+
+ def render_col_cell(self, col_name, table, table_style):
+ """Renders the cell for 'col_id' row
+ """
+ cell_value = col_name.encode('iso-8859-1')
+ col_index = table.col_names.index(col_name)
+ return self._render_cell_content(cell_value, table_style, col_index +1)
+
+
+
+ def _render_cell_content(self, content, table_style, col_index):
+ """Makes the appropriate rendering for this cell content.
+ Rendering properties will be searched using the
+ *table_style.get_xxx_by_index(col_index)' methods
+
+ **This method should be overriden in the derived renderer classes.**
+ """
+ return content
+
+
+ def _make_cell_content(self, cell_content, table_style, col_index):
+ """Makes the cell content (adds decoration data, like units for
+ example)
+ """
+ final_content = cell_content
+ if 'skip_zero' in self.properties:
+ replacement_char = self.properties['skip_zero']
+ else:
+ replacement_char = 0
+ if replacement_char and final_content == 0:
+ return replacement_char
+
+ try:
+ units_on = self.properties['units']
+ if units_on:
+ final_content = self._add_unit(
+ cell_content, table_style, col_index)
+ except KeyError:
+ pass
+
+ return final_content
+
+
+ def _add_unit(self, cell_content, table_style, col_index):
+ """Adds unit to the cell_content if needed
+ """
+ unit = table_style.get_unit_by_index(col_index)
+ return str(cell_content) + " " + unit
+
+
+
+class DocbookRenderer(TableCellRenderer):
+ """Defines how to render a cell for a docboook table
+ """
+
+ def define_col_header(self, col_index, table_style):
+ """Computes the colspec element according to the style
+ """
+ size = table_style.get_size_by_index(col_index)
+ return '<colspec colname="c%d" colwidth="%s"/>\n' % \
+ (col_index, size)
+
+
+ def _render_cell_content(self, cell_content, table_style, col_index):
+ """Makes the appropriate rendering for this cell content.
+ Rendering properties will be searched using the
+ *table_style.get_xxx_by_index(col_index)' methods.
+ """
+ try:
+ align_on = self.properties['alignment']
+ alignment = table_style.get_alignment_by_index(col_index)
+ if align_on:
+ return "<entry align='%s'>%s</entry>\n" % \
+ (alignment, cell_content)
+ except KeyError:
+ # KeyError <=> Default alignment
+ return "<entry>%s</entry>\n" % cell_content
+
+
+class TableWriter:
+ """A class to write tables
+ """
+
+ def __init__(self, stream, table, style, **properties):
+ self._stream = stream
+ self.style = style or TableStyle(table)
+ self._table = table
+ self.properties = properties
+ self.renderer = None
+
+
+ def set_style(self, style):
+ """sets the table's associated style
+ """
+ self.style = style
+
+
+ def set_renderer(self, renderer):
+ """sets the way to render cell
+ """
+ self.renderer = renderer
+
+
+ def update_properties(self, **properties):
+ """Updates writer's properties (for cell rendering)
+ """
+ self.properties.update(properties)
+
+
+ def write_table(self, title = ""):
+ """Writes the table
+ """
+ raise NotImplementedError("write_table must be implemented !")
+
+
+
+class DocbookTableWriter(TableWriter):
+ """Defines an implementation of TableWriter to write a table in Docbook
+ """
+
+ def _write_headers(self):
+ """Writes col headers
+ """
+ # Define col_headers (colstpec elements)
+ for col_index in range(len(self._table.col_names)+1):
+ self._stream.write(self.renderer.define_col_header(col_index,
+ self.style))
+
+ self._stream.write("<thead>\n<row>\n")
+ # XXX FIXME : write an empty entry <=> the first (__row_column) column
+ self._stream.write('<entry></entry>\n')
+ for col_name in self._table.col_names:
+ self._stream.write(self.renderer.render_col_cell(
+ col_name, self._table,
+ self.style))
+
+ self._stream.write("</row>\n</thead>\n")
+
+
+ def _write_body(self):
+ """Writes the table body
+ """
+ self._stream.write('<tbody>\n')
+
+ for row_index, row in enumerate(self._table.data):
+ self._stream.write('<row>\n')
+ row_name = self._table.row_names[row_index]
+ # Write the first entry (row_name)
+ self._stream.write(self.renderer.render_row_cell(row_name,
+ self._table,
+ self.style))
+
+ for col_index, cell in enumerate(row):
+ self._stream.write(self.renderer.render_cell(
+ (row_index, col_index),
+ self._table, self.style))
+
+ self._stream.write('</row>\n')
+
+ self._stream.write('</tbody>\n')
+
+
+ def write_table(self, title = ""):
+ """Writes the table
+ """
+ self._stream.write('<table>\n<title>%s></title>\n'%(title))
+ self._stream.write(
+ '<tgroup cols="%d" align="left" colsep="1" rowsep="1">\n'%
+ (len(self._table.col_names)+1))
+ self._write_headers()
+ self._write_body()
+
+ self._stream.write('</tgroup>\n</table>\n')
+
+
diff --git a/test/_test_astng.py b/test/_test_astng.py
new file mode 100644
index 0000000..c526ef1
--- /dev/null
+++ b/test/_test_astng.py
@@ -0,0 +1,14 @@
+"""run all astng related tests"""
+
+__revision__ = '$Id: _test_astng.py,v 1.1 2005-04-25 14:47:04 syt Exp $'
+
+import unittest
+
+from unittest_astng import *
+from unittest_astng_builder import *
+from unittest_astng_utils import *
+from unittest_astng_manager import *
+from unittest_astng_inspector import *
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/data/__init__.py b/test/data/__init__.py
new file mode 100644
index 0000000..1d5cf02
--- /dev/null
+++ b/test/data/__init__.py
@@ -0,0 +1 @@
+__revision__="$Id: __init__.py,v 1.1 2003-10-20 06:39:32 syt Exp $"
diff --git a/test/data/foo.txt b/test/data/foo.txt
new file mode 100644
index 0000000..a08c29e
--- /dev/null
+++ b/test/data/foo.txt
@@ -0,0 +1,9 @@
+a
+b
+c
+d
+e
+f
+g
+h
+
diff --git a/test/data/module.py b/test/data/module.py
new file mode 100644
index 0000000..8cadbca
--- /dev/null
+++ b/test/data/module.py
@@ -0,0 +1,88 @@
+# -*- coding: Latin-1 -*-
+"""test module for astng
+"""
+
+__revision__ = '$Id: module.py,v 1.9 2003-11-24 13:40:26 syt Exp $'
+
+from logilab.common import modutils, Execute as spawn
+from logilab.common.astutils import *
+import os.path
+
+MY_DICT = {}
+
+
+def global_access(key, val):
+ """function test"""
+ local = 1
+ MY_DICT[key] = val
+ for i in val:
+ if i:
+ del MY_DICT[i]
+ continue
+ else:
+ break
+ else:
+ print '!!!'
+
+class YO:
+ """hehe"""
+ a=1
+ def __init__(self):
+ try:
+ self.yo = 1
+ except ValueError, ex:
+ pass
+ except (NameError, TypeError):
+ raise XXXError()
+ except:
+ raise
+
+#print '*****>',YO.__dict__
+class YOUPI(YO):
+ class_attr = None
+
+ def __init__(self):
+ self.member = None
+
+ def method(self):
+ """method test"""
+ global MY_DICT
+ try:
+ MY_DICT = {}
+ local = None
+ autre = [a for a, b in MY_DICT if b]
+ if b in autre:
+ print 'yo',
+ elif a in autre:
+ print 'hehe'
+ global_access(local, val=autre)
+ finally:
+ return local
+
+ def static_method():
+ """static method test"""
+ assert MY_DICT, '???'
+ static_method = staticmethod(static_method)
+
+ def class_method(cls):
+ """class method test"""
+ exec a in b
+ class_method = classmethod(class_method)
+
+
+def nested_args(a, (b, c, d)):
+ """nested arguments test"""
+ print a, b, c, d
+ while 1:
+ if a:
+ break
+ a += +1
+ else:
+ b += -2
+ if c:
+ d = a and b or c
+ else:
+ c = a and b or d
+ map(lambda x, y: (y, x), a)
+
+redirect = nested_args
diff --git a/test/data/module2.py b/test/data/module2.py
new file mode 100644
index 0000000..3961865
--- /dev/null
+++ b/test/data/module2.py
@@ -0,0 +1,77 @@
+from data.module import YO, YOUPI
+import data
+
+class Specialization(YOUPI, YO): pass
+
+class Metaclass(type): pass
+
+class Interface: pass
+
+class MyIFace(Interface): pass
+
+class AnotherIFace(Interface): pass
+
+class MyException(Exception): pass
+class MyError(MyException): pass
+
+class AbstractClass(object):
+
+ def to_override(self, whatever):
+ raise NotImplementedError()
+
+ def return_something(self, param):
+ if param:
+ return 'toto'
+ return
+
+class Concrete0:
+ __implements__ = MyIFace
+class Concrete1:
+ __implements__ = MyIFace, AnotherIFace
+class Concrete2:
+ __implements__ = (MyIFace,
+ AnotherIFace)
+class Concrete23(Concrete1): pass
+
+del YO.member
+
+del YO
+[SYN1, SYN2] = Concrete0, Concrete1
+assert `1`
+b = 1 | 2 & 3 ^ 8
+exec 'c = 3'
+exec 'c = 3' in {}, {}
+
+def raise_string(a=2, *args, **kwargs):
+ raise 'pas glop'
+ raise Exception, 'yo'
+ yield 'coucou'
+
+a = b + 2
+c = b * 2
+c = b / 2
+c = b // 2
+c = b - 2
+c = b % 2
+c = b ** 2
+c = b << 2
+c = b >> 2
+c = ~b
+
+c = not b
+
+d = [c]
+e = d[:]
+e = d[a:b:c]
+
+raise_string(*args, **kwargs)
+
+print >> stream, 'bonjour'
+print >> stream, 'salut',
+
+
+def make_class(any, base=data.module.YO, *args, **kwargs):
+ """check base is correctly resolved to Concrete0"""
+ class Aaaa(base):
+ """dynamic class"""
+ return Aaaa
diff --git a/test/data/newlines.txt b/test/data/newlines.txt
new file mode 100644
index 0000000..e1f25c0
--- /dev/null
+++ b/test/data/newlines.txt
@@ -0,0 +1,3 @@
+# mixed new lines
+1
+2 3
diff --git a/test/data/noendingnewline.py b/test/data/noendingnewline.py
new file mode 100644
index 0000000..353ded4
--- /dev/null
+++ b/test/data/noendingnewline.py
@@ -0,0 +1,38 @@
+
+
+import unittest
+
+
+class TestCase(unittest.TestCase):
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+
+
+ def tearDown(self):
+ unittest.TestCase.tearDown(self)
+
+ def testIt(self):
+ self.a = 10
+ self.xxx()
+
+
+ def xxx(self):
+ if False:
+ pass
+ print 'a'
+
+ if False:
+ pass
+ pass
+
+ if False:
+ pass
+ print 'rara'
+
+
+if __name__ == '__main__':
+ print 'test2'
+ unittest.main()
+
+ \ No newline at end of file
diff --git a/test/data/nonregr.py b/test/data/nonregr.py
new file mode 100644
index 0000000..24ecc7c
--- /dev/null
+++ b/test/data/nonregr.py
@@ -0,0 +1,14 @@
+try:
+ enumerate = enumerate
+except NameError:
+
+ def enumerate(iterable):
+ """emulates the python2.3 enumerate() function"""
+ i = 0
+ for val in iterable:
+ yield i, val
+ i += 1
+
+def toto(value):
+ for k, v in value:
+ print v.get('yo')
diff --git a/test/data/normal_file.txt b/test/data/normal_file.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/data/normal_file.txt
diff --git a/test/data/spam.txt b/test/data/spam.txt
new file mode 100644
index 0000000..068911b
--- /dev/null
+++ b/test/data/spam.txt
@@ -0,0 +1,9 @@
+a
+b
+c
+h
+e
+f
+g
+h
+
diff --git a/test/data/sub/doc.txt b/test/data/sub/doc.txt
new file mode 100644
index 0000000..c60eb16
--- /dev/null
+++ b/test/data/sub/doc.txt
@@ -0,0 +1 @@
+héhéhé
diff --git a/test/data/sub/momo.py b/test/data/sub/momo.py
new file mode 100644
index 0000000..e2600f5
--- /dev/null
+++ b/test/data/sub/momo.py
@@ -0,0 +1 @@
+print 'yo'
diff --git a/test/data/write_protected_file.txt b/test/data/write_protected_file.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/data/write_protected_file.txt
diff --git a/test/foomod.py b/test/foomod.py
new file mode 100644
index 0000000..05337e5
--- /dev/null
+++ b/test/foomod.py
@@ -0,0 +1,17 @@
+"""fake module for logilb.common.bind's unit tests"""
+
+__revision__ = '$Id: foomod.py,v 1.1 2005-02-15 17:06:08 adim Exp $'
+
+VAR1 = 'var1'
+VAR2 = 'var2'
+
+def f1():
+ return 'a'
+
+def f2():
+ global VAR1
+ VAR1 = 'a'
+
+def f3():
+ global VAR1
+ VAR1 = 'b'
diff --git a/test/runtests.py b/test/runtests.py
new file mode 100644
index 0000000..67d6636
--- /dev/null
+++ b/test/runtests.py
@@ -0,0 +1,5 @@
+from logilab.common.testlib import main
+
+if __name__ == '__main__':
+ import sys, os
+ main(os.path.dirname(sys.argv[0]) or '.')
diff --git a/test/unittest_bind.py b/test/unittest_bind.py
new file mode 100644
index 0000000..d404dc3
--- /dev/null
+++ b/test/unittest_bind.py
@@ -0,0 +1,65 @@
+"""unit tests for logilab.common.bind module"""
+
+__revision__ = '$Id: unittest_bind.py,v 1.2 2006-01-03 15:31:16 syt Exp $'
+
+import unittest
+
+
+from logilab.common.compat import set as Set
+from logilab.common import bind
+
+HELLO = 'Hello'
+def f():
+ return HELLO
+
+def modify_hello():
+ global HELLO
+ HELLO = 'hacked !'
+
+import foomod
+
+class BindTC(unittest.TestCase):
+ """Test suite for bind module"""
+
+ def test_simple_bind(self):
+ """tests a simple global variable becomes a local one"""
+ self.assertEquals(f(), HELLO)
+ d = {'HELLO' : HELLO}
+ new_f = bind.bind(f, d)
+ self.assertEquals(new_f(), f())
+ f_consts = f.func_code.co_consts
+ newf_consts = new_f.func_code.co_consts
+ self.assertEquals(f_consts, (None,))
+ self.assert_(newf_consts, (None, HELLO))
+
+ def test_optimize_on_a_func(self):
+ """make sure optimize only workds for modules"""
+ self.assertRaises(TypeError, bind.optimize_module, f, ('c1', 'c2'))
+ self.assertRaises(TypeError, bind.optimize_module_2, f, ('c1', 'c2'))
+ self.assertRaises(TypeError, bind.optimize_module, [], ('c1', 'c2'))
+ self.assertRaises(TypeError, bind.optimize_module_2, [], ('c1', 'c2'))
+
+ def test_analyze_code(self):
+ """tests bind.analyze_code()"""
+ consts_dict, consts_list = {}, []
+ globs = {'HELLO' : "some global value"}
+ modified = bind.analyze_code(modify_hello.func_code, globs,
+ consts_dict, consts_list)
+ self.assertEquals(consts_list, [None, 'hacked !'])
+ self.assertEquals(modified, ['HELLO'])
+
+ def test_optimize_module2(self):
+ """test optimize_module_2()"""
+ f1_consts = Set(foomod.f1.func_code.co_consts)
+ f2_consts = Set(foomod.f2.func_code.co_consts)
+ f3_consts = Set(foomod.f3.func_code.co_consts)
+ bind.optimize_module_2(foomod, ['f1', 'f2', 'f3'])
+ newf1_consts = Set(foomod.f1.func_code.co_consts)
+ newf2_consts = Set(foomod.f2.func_code.co_consts)
+ newf3_consts = Set(foomod.f3.func_code.co_consts)
+ self.assert_(newf1_consts == newf2_consts == newf3_consts)
+ self.assertEquals(newf1_consts, f1_consts | f2_consts | f3_consts)
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/test/unittest_cache.py b/test/unittest_cache.py
new file mode 100644
index 0000000..140a1c1
--- /dev/null
+++ b/test/unittest_cache.py
@@ -0,0 +1,102 @@
+# unit tests for the cache module
+
+from logilab.common.cache import Cache
+import sys
+import unittest
+
+class CacheTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.cache = Cache(5)
+
+ def test_setitem1(self):
+ """Checks that the setitem method works"""
+ self.cache[1] = 'foo'
+ self.assert_(self.cache.data[1] == 'foo',"1 : 'foo' is not in cache.data")
+ self.assert_(len(self.cache._usage) == 1, "lenght of usage list is not 1")
+ self.assert_(self.cache._usage[-1] == 1, '1 is not the most recently used key')
+ self.assert_(self.cache._usage.sort() == self.cache.data.keys().sort(), "usage list and data keys are different")
+
+ def test_setitem2(self):
+ """Checks that the setitem method works for multiple items"""
+ self.cache[1] = 'foo'
+ self.cache[2] = 'bar'
+ self.assert_(self.cache.data[2] == 'bar',"2 : 'bar' is not in cache.data")
+ self.assert_(len(self.cache._usage) == 2, "lenght of usage list is not 2")
+ self.assert_(self.cache._usage[-1] == 2, '1 is not the most recently used key')
+ self.assert_(self.cache._usage.sort() == self.cache.data.keys().sort(), "usage list and data keys are different")
+
+ def test_setitem3(self):
+ """Checks that the setitem method works when replacing an element in the cache"""
+ self.cache[1] = 'foo'
+ self.cache[1] = 'bar'
+ self.assert_(self.cache.data[1] == 'bar',"1 : 'bar' is not in cache.data")
+ self.assert_(len(self.cache._usage) == 1, "lenght of usage list is not 1")
+ self.assert_(self.cache._usage[-1] == 1, '1 is not the most recently used key')
+ self.assert_(self.cache._usage.sort() == self.cache.data.keys().sort(), "usage list and data keys are different")
+
+ def test_recycling1(self):
+ """Checks the removal of old elements"""
+ self.cache[1] = 'foo'
+ self.cache[2] = 'bar'
+ self.cache[3] = 'baz'
+ self.cache[4] = 'foz'
+ self.cache[5] = 'fuz'
+ self.cache[6] = 'spam'
+ self.assert_(not self.cache.data.has_key(1), 'key 1 has not been suppressed from the cache dictionnary')
+ self.assert_(1 not in self.cache._usage, 'key 1 has not been suppressed from the cache LRU list')
+ self.assert_(len(self.cache._usage) == 5, "lenght of usage list is not 5")
+ self.assert_(self.cache._usage[-1] == 6, '6 is not the most recently used key')
+ self.assert_(self.cache._usage.sort() == self.cache.data.keys().sort(), "usage list and data keys are different")
+
+ def test_recycling2(self):
+ """Checks that accessed elements get in the front of the list"""
+ self.cache[1] = 'foo'
+ self.cache[2] = 'bar'
+ self.cache[3] = 'baz'
+ self.cache[4] = 'foz'
+ a = self.cache[1]
+ self.assert_(a == 'foo')
+ self.assert_(self.cache._usage[-1] == 1, '1 is not the most recently used key')
+ self.assert_(self.cache._usage.sort() == self.cache.data.keys().sort(), "usage list and data keys are different")
+
+ def test_delitem(self):
+ """Checks that elements are removed from both element dict and element
+ list.
+ """
+ self.cache['foo'] = 'bar'
+ del self.cache['foo']
+ self.assert_('foo' not in self.cache.data.keys(),"Element 'foo' was not removed cache dictionnary")
+ self.assert_('foo' not in self.cache._usage,"Element 'foo' was not removed usage list")
+ self.assert_(self.cache._usage.sort() == self.cache.data.keys().sort(), "usage list and data keys are different")
+
+
+ def test_nullsize(self):
+ """Checks that a 'NULL' size cache doesn't store anything
+ """
+ null_cache = Cache(0)
+ null_cache['foo'] = 'bar'
+ self.assert_(null_cache.size == 0, 'Cache size should be O, not %d' % \
+ null_cache.size)
+ self.assert_(len(null_cache) == 0, 'Cache should be empty !')
+ # Assert null_cache['foo'] raises a KeyError
+ self.assertRaises(KeyError, null_cache.__getitem__, 'foo')
+ # Deleting element should not raise error
+ del null_cache['foo']
+
+
+def suite():
+ loader = unittest.TestLoader()
+ testsuite = loader.loadTestsFromModule(sys.modules[__name__])
+ return testsuite
+
+
+def Run():
+ testsuite = suite()
+ runner = unittest.TextTestRunner()
+ return runner.run(testsuite)
+
+if __name__ == "__main__":
+ Run()
+
+
diff --git a/test/unittest_compat.py b/test/unittest_compat.py
new file mode 100644
index 0000000..3f18da1
--- /dev/null
+++ b/test/unittest_compat.py
@@ -0,0 +1,120 @@
+"""provides unit tests for compat module"""
+
+__revision__ = '$Id: unittest_compat.py,v 1.3 2005-08-08 10:44:10 adim Exp $'
+
+import unittest
+import sys
+import types
+import __builtin__
+import pprint
+
+class CompatTCMixIn:
+ MODNAMES = {}
+ BUILTINS = []
+
+ def setUp(self):
+ self.builtins_backup = {}
+ self.modules_backup = {}
+ self.remove_builtins()
+ self.remove_modules()
+
+ def tearDown(self):
+ for modname in self.MODNAMES:
+ del sys.modules[modname]
+ for funcname, func in self.builtins_backup.items():
+ setattr(__builtin__, funcname, func)
+ for modname, mod in self.modules_backup.items():
+ sys.modules[modname] = mod
+ try:
+ del sys.modules['logilab.common.compat']
+ except KeyError:
+ pass
+
+ def remove_builtins(self):
+ for builtin in self.BUILTINS:
+ func = getattr(__builtin__, builtin, None)
+ if func is not None:
+ self.builtins_backup[builtin] = func
+ delattr(__builtin__, builtin)
+
+ def remove_modules(self):
+ for modname in self.MODNAMES:
+ if modname in sys.modules:
+ self.modules_backup[modname] = sys.modules[modname]
+ sys.modules[modname] = types.ModuleType('faked%s' % modname)
+
+ def test_removed_builtins(self):
+ """tests that builtins are actually uncallable"""
+ for builtin in self.BUILTINS:
+ self.assertRaises(NameError, eval, builtin)
+
+ def test_removed_modules(self):
+ """tests that builtins are actually emtpy"""
+ for modname, funcnames in self.MODNAMES.items():
+ import_stmt = 'from %s import %s' % (modname, ', '.join(funcnames))
+ # FIXME: use __import__ instead
+ code = compile(import_stmt, 'foo.py', 'exec')
+ self.assertRaises(ImportError, eval, code)
+
+
+class Py23CompatTC(CompatTCMixIn, unittest.TestCase):
+ BUILTINS = ('enumerate', 'sum')
+ MODNAMES = {
+ 'sets' : ('Set',),
+ 'itertools' : ('izip', 'chain'),
+ }
+
+ def test_sum(self):
+ from logilab.common.compat import sum
+ self.assertEquals(sum(range(5)), 10)
+ self.assertRaises(TypeError, sum, 'abc')
+
+ def test_enumerate(self):
+ from logilab.common.compat import enumerate
+ self.assertEquals(list(enumerate([])), [])
+ self.assertEquals(list(enumerate('abc')),
+ [(0, 'a'), (1, 'b'), (2, 'c')])
+
+ def test_basic_set(self):
+ from logilab.common.compat import set
+ s = set('abc')
+ self.assertEquals(len(s), 3)
+ s.remove('a')
+ self.assertEquals(len(s), 2)
+ s.add('a')
+ self.assertEquals(len(s), 3)
+ s.add('a')
+ self.assertEquals(len(s), 3)
+ self.assertRaises(KeyError, s.remove, 'd')
+
+
+class Py24CompatTC(CompatTCMixIn, unittest.TestCase):
+ BUILTINS = ('reversed', 'sorted', 'set',)
+
+ def test_sorted(self):
+ from logilab.common.compat import sorted
+ l = [3, 1, 2, 5, 4]
+ s = sorted(l)
+ self.assertEquals(s, [1, 2, 3, 4, 5])
+ self.assertEquals(l, [3, 1, 2, 5, 4])
+
+
+ def test_reversed(self):
+ from logilab.common.compat import reversed
+ l = range(5)
+ r = reversed(l)
+ self.assertEquals(r, [4, 3, 2, 1, 0])
+ self.assertEquals(l, range(5))
+
+ def test_set(self):
+ from logilab.common.compat import set
+ s1 = set(range(5))
+ s2 = set(range(2, 6))
+ self.assertEquals(len(s1), 5)
+ self.assertEquals(s1 & s2, set([2, 3, 4]))
+ self.assertEquals(s1 | s2, set(range(6)))
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/test/unittest_configuration.py b/test/unittest_configuration.py
new file mode 100644
index 0000000..ac727f5
--- /dev/null
+++ b/test/unittest_configuration.py
@@ -0,0 +1,176 @@
+import unittest
+from logilab.common import testlib
+
+import tempfile
+import os
+from cStringIO import StringIO
+
+from logilab.common.configuration import Configuration, OptionValueError
+
+options = [('dothis', {'type':'yn', 'default': True, 'metavar': '<y or n>'}),
+ ('value', {'type': 'string', 'metavar': '<string>', 'short': 'v'}),
+ ('multiple', {'type': 'csv', 'default': ('yop',),
+ 'metavar': '<comma separated values>',
+ 'help': 'you can also document the option'}),
+ ('number', {'type': 'int', 'default':2, 'metavar':'<int>'}),
+ ('choice', {'type': 'choice', 'default':'yo', 'choices': ('yo', 'ye'),
+ 'metavar':'<yo|ye>'}),
+ ('multiple-choice', {'type': 'multiple_choice', 'default':('yo', 'ye'),
+ 'choices': ('yo', 'ye', 'yu', 'yi', 'ya'),
+ 'metavar':'<yo|ye>'}),
+ ]
+
+class ConfigurationTC(testlib.TestCase):
+
+ def setUp(self):
+ self.cfg = Configuration(name='test', options=options, usage='Just do it ! (tm)')
+
+ def test_default(self):
+ cfg = self.cfg
+ self.assertEquals(cfg['dothis'], True)
+ self.assertEquals(cfg['value'], None)
+ self.assertEquals(cfg['multiple'], ('yop',))
+ self.assertEquals(cfg['number'], 2)
+ self.assertEquals(cfg['choice'], 'yo')
+ self.assertEquals(cfg['multiple-choice'], ('yo', 'ye'))
+
+ def test_base(self):
+ cfg = self.cfg
+ cfg.set_option('number', '0')
+ self.assertEquals(cfg['number'], 0)
+ self.assertRaises(OptionValueError, cfg.set_option, 'number', 'youpi')
+ self.assertRaises(OptionValueError, cfg.set_option, 'choice', 'youpi')
+ self.assertRaises(OptionValueError, cfg.set_option, 'multiple-choice', ('yo', 'y', 'ya'))
+ cfg.set_option('multiple-choice', 'yo, ya')
+ self.assertEquals(cfg['multiple-choice'], ['yo', 'ya'])
+ self.assertEquals(cfg.get('multiple-choice'), ['yo', 'ya'])
+ self.assertEquals(cfg.get('whatever'), None)
+
+ def test_load_command_line_configuration(self):
+ cfg = self.cfg
+ args = cfg.load_command_line_configuration(['--choice', 'ye', '--number', '4',
+ '--multiple=1,2,3', '--dothis=n',
+ 'other', 'arguments'])
+ self.assertEquals(args, ['other', 'arguments'])
+ self.assertEquals(cfg['dothis'], False)
+ self.assertEquals(cfg['multiple'], ['1', '2', '3'])
+ self.assertEquals(cfg['number'], 4)
+ self.assertEquals(cfg['choice'], 'ye')
+ self.assertEquals(cfg['value'], None)
+ args = cfg.load_command_line_configuration(['-v', 'duh'])
+ self.assertEquals(args, [])
+ self.assertEquals(cfg['value'], 'duh')
+ self.assertEquals(cfg['dothis'], False)
+ self.assertEquals(cfg['multiple'], ['1', '2', '3'])
+ self.assertEquals(cfg['number'], 4)
+ self.assertEquals(cfg['choice'], 'ye')
+
+ def test_load_configuration(self):
+ cfg = self.cfg
+ args = cfg.load_configuration(choice='ye', number='4',
+ multiple='1,2,3', dothis='n',
+ multiple_choice=('yo', 'ya'))
+ self.assertEquals(cfg['dothis'], False)
+ self.assertEquals(cfg['multiple'], ['1', '2', '3'])
+ self.assertEquals(cfg['number'], 4)
+ self.assertEquals(cfg['choice'], 'ye')
+ self.assertEquals(cfg['value'], None)
+ self.assertEquals(cfg['multiple-choice'], ('yo', 'ya'))
+
+ def test_generate_config(self):
+ stream = StringIO()
+ self.cfg.generate_config(stream)
+ self.assertLinesEquals(stream.getvalue().strip(), """# class for simple configurations which don't need the
+# manager / providers model and prefer delegation to inheritance
+#
+# configuration values are accessible through a dict like interface
+#
+[TEST]
+
+dothis=yes
+
+# you can also document the option
+multiple=yop
+
+number=2
+
+choice=yo
+
+multiple-choice=yo,ye
+""")
+
+ def test_generate_config_with_space_string(self):
+ self.cfg['value'] = ' '
+ stream = StringIO()
+ self.cfg.generate_config(stream)
+ self.assertLinesEquals(stream.getvalue().strip(), """# class for simple configurations which don't need the
+# manager / providers model and prefer delegation to inheritance
+#
+# configuration values are accessible through a dict like interface
+#
+[TEST]
+
+dothis=yes
+
+value=' '
+
+# you can also document the option
+multiple=yop
+
+number=2
+
+choice=yo
+
+multiple-choice=yo,ye
+""")
+
+
+ def test_loopback(self):
+ cfg = self.cfg
+ f = tempfile.mktemp()
+ stream = open(f, 'w')
+ try:
+ cfg.generate_config(stream)
+ stream.close()
+ new_cfg = Configuration(name='testloop', options=options)
+ new_cfg.load_file_configuration(f)
+ self.assertEquals(cfg['dothis'], new_cfg['dothis'])
+ self.assertEquals(cfg['multiple'], new_cfg['multiple'])
+ self.assertEquals(cfg['number'], new_cfg['number'])
+ self.assertEquals(cfg['choice'], new_cfg['choice'])
+ self.assertEquals(cfg['value'], new_cfg['value'])
+ self.assertEquals(cfg['multiple-choice'], new_cfg['multiple-choice'])
+ finally:
+ os.remove(f)
+
+ def test_help(self):
+ self.cfg.add_help_section('bonus', 'a nice additional help')
+ help = self.cfg.help().strip()
+ # at least in python 2.4.2 the output is:
+ # ' -v <string>, --value=<string>'
+ # it is not unlikely some optik/optparse versions do print -v<string>
+ # so accept both
+ help = help.replace(' -v <string>, ', ' -v<string>, ')
+ self.assertLinesEquals(help, """usage: Just do it ! (tm)
+
+options:
+ -h, --help show this help message and exit
+ --dothis=<y or n>
+ -v<string>, --value=<string>
+ --multiple=<comma separated values>
+ you can also document the option
+ --number=<int>
+ --choice=<yo|ye>
+ --multiple-choice=<yo|ye>
+
+ Bonus:
+ a nice additional help
+""".strip())
+
+
+ def test_manpage(self):
+ from logilab.common import __pkginfo__
+ self.cfg.generate_manpage(__pkginfo__, stream=StringIO())
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittest_db.py b/test/unittest_db.py
new file mode 100644
index 0000000..e7417dd
--- /dev/null
+++ b/test/unittest_db.py
@@ -0,0 +1,179 @@
+"""
+unit tests for module logilab.common.db
+
+"""
+__revision__ = "$Id: unittest_db.py,v 1.13 2006-03-13 12:42:56 syt Exp $"
+
+from logilab.common.testlib import TestCase, unittest_main
+from logilab.common.db import *
+from logilab.common.db import PREFERED_DRIVERS, _GenericAdvFuncHelper, _PGAdvFuncHelper
+
+
+class PreferedDriverTC(TestCase):
+ def setUp(self):
+ self.drivers = {"pg":[('foo', None), ('bar', None)]}
+ self.drivers = {'pg' : ["foo", "bar"]}
+
+ def testNormal(self):
+ set_prefered_driver('pg','bar', self.drivers)
+ self.assertEquals('bar', self.drivers['pg'][0])
+
+ def testFailuresDb(self):
+ try:
+ set_prefered_driver('oracle','bar', self.drivers)
+ self.fail()
+ except UnknownDriver, exc:
+ self.assertEquals(exc.args[0], 'Unknown database oracle')
+
+ def testFailuresDriver(self):
+ try:
+ set_prefered_driver('pg','baz', self.drivers)
+ self.fail()
+ except UnknownDriver, exc:
+ self.assertEquals(exc.args[0], 'Unknown module baz for pg')
+
+ def testGlobalVar(self):
+ old_drivers = PREFERED_DRIVERS['postgres'][:]
+ expected = old_drivers[:]
+ expected.insert(0, expected.pop(1))
+ set_prefered_driver('postgres','pgdb')
+ self.assertEquals(PREFERED_DRIVERS['postgres'], expected)
+ set_prefered_driver('postgres','psycopg')
+ self.assertEquals(PREFERED_DRIVERS['postgres'], old_drivers)
+
+
+class getCnxTC(TestCase):
+ def setUp(self):
+ self.host = 'crater.logilab.fr'
+ self.db = 'gincotest2'
+ self.user = 'adim'
+ self.passwd = 'adim'
+
+ def testPsyco(self):
+ set_prefered_driver('postgres', 'psycopg')
+ try:
+ cnx = get_connection('postgres',
+ self.host, self.db, self.user, self.passwd,
+ quiet=1)
+ except ImportError:
+ self.skip('python-psycopg is not installed')
+
+ def testPgdb(self):
+ set_prefered_driver('postgres', 'pgdb')
+ try:
+ cnx = get_connection('postgres',
+ self.host, self.db, self.user, self.passwd,
+ quiet=1)
+ except ImportError:
+ self.skip('python-pgsql is not installed')
+
+ def testPgsql(self):
+ set_prefered_driver('postgres', 'pyPgSQL.PgSQL')
+ try:
+ cnx = get_connection('postgres',
+ self.host, self.db, self.user, self.passwd,
+ quiet=1)
+ except ImportError:
+ self.skip('python-pygresql is not installed')
+
+ def testMysql(self):
+ set_prefered_driver('mysql', 'MySQLdb')
+ try:
+ cnx = get_connection('mysql',
+ self.host, self.db, self.user, self.passwd,
+ quiet=1)
+ except ImportError:
+ self.skip('python-mysqldb is not installed')
+ except Exception, ex:
+ # no mysql running ?
+ import MySQLdb
+ if not (isinstance(ex, MySQLdb.OperationalError) and ex.args[0] == 2003):
+ raise
+
+ def test_connection_wrap(self):
+ """Tests the connection wrapping"""
+ try:
+ cnx = get_connection('postgres',
+ self.host, self.db, self.user, self.passwd,
+ quiet=1)
+ except ImportError:
+ self.skip('postgresql dbapi module not installed')
+ self.failIf(isinstance(cnx, PyConnection),
+ 'cnx should *not* be a PyConnection instance')
+ cnx = get_connection('postgres',
+ self.host, self.db, self.user, self.passwd,
+ quiet=1, pywrap = True)
+ self.failUnless(isinstance(cnx, PyConnection),
+ 'cnx should be a PyConnection instance')
+
+
+ def test_cursor_wrap(self):
+ """Tests cursor wrapping"""
+ try:
+ cnx = get_connection('postgres',
+ self.host, self.db, self.user, self.passwd,
+ quiet=1, pywrap = True)
+ except ImportError:
+ self.skip('postgresql dbapi module not installed')
+ cursor = cnx.cursor()
+ self.failUnless(isinstance(cursor, PyCursor),
+ 'cnx should be a PyCursor instance')
+
+
+class DBAPIAdaptersTC(TestCase):
+ """Tests DbApi adapters management"""
+
+ def setUp(self):
+ """Memorize original PREFERED_DRIVERS"""
+ self.old_drivers = PREFERED_DRIVERS['postgres'][:]
+ self.host = 'crater.logilab.fr'
+ self.db = 'gincotest2'
+ self.user = 'adim'
+ self.passwd = 'adim'
+
+ def tearDown(self):
+ """Reset PREFERED_DRIVERS as it was"""
+ PREFERED_DRIVERS['postgres'] = self.old_drivers
+
+ def test_raise(self):
+ self.assertRaises(UnknownDriver, get_dbapi_compliant_module, 'pougloup')
+
+ def test_pgdb_types(self):
+ """Tests that NUMBER really wraps all number types"""
+ PREFERED_DRIVERS['postgres'] = ['pgdb']
+ #set_prefered_driver('postgres', 'pgdb')
+ try:
+ module = get_dbapi_compliant_module('postgres')
+ except ImportError:
+ self.skip('postgresql pgdb module not installed')
+ number_types = ('int2', 'int4', 'serial',
+ 'int8', 'float4', 'float8',
+ 'numeric', 'bool', 'money')
+ for num_type in number_types:
+ self.assertEquals(num_type, module.NUMBER)
+ self.assertNotEquals('char', module.NUMBER)
+
+ def test_pypgsql_getattr(self):
+ """Tests the getattr() delegation for pyPgSQL"""
+ set_prefered_driver('postgres', 'pyPgSQL.PgSQL')
+ try:
+ module = get_dbapi_compliant_module('postgres')
+ except ImportError:
+ self.skip('postgresql dbapi module not installed')
+ try:
+ binary = module.BINARY
+ except AttributeError, err:
+ self.fail(str(err))
+
+ def test_adv_func_helper(self):
+ try:
+ module = get_dbapi_compliant_module('postgres')
+ except ImportError:
+ self.skip('postgresql dbapi module not installed')
+ self.failUnless(isinstance(module.adv_func_helper, _PGAdvFuncHelper))
+ module = get_dbapi_compliant_module('sqlite')
+ self.failUnless(isinstance(module.adv_func_helper, _GenericAdvFuncHelper))
+
+
+if __name__ == '__main__':
+ unittest_main()
diff --git a/test/unittest_fileutils.py b/test/unittest_fileutils.py
new file mode 100755
index 0000000..f3ef664
--- /dev/null
+++ b/test/unittest_fileutils.py
@@ -0,0 +1,147 @@
+"""unit tests for logilab.common.fileutils
+
+Some file / path manipulation utilities
+"""
+__revision__ = "$Id: unittest_fileutils.py,v 1.22 2006-01-03 15:31:16 syt Exp $"
+
+import unittest
+import sys, os, tempfile, shutil
+from os.path import join
+
+from logilab.common.testlib import TestCase
+
+from logilab.common.fileutils import *
+
+
+#import data
+DATA_DIR = 'data' #data.__path__[0]
+NEWLINES_TXT = join(DATA_DIR,'newlines.txt')
+
+class FirstleveldirectoryTC(TestCase):
+
+ def test_known_values_first_level_directory(self):
+ """return the first level directory of a path"""
+ self.assertEqual(first_level_directory('truc/bidule/chouette'), 'truc', None)
+ self.assertEqual(first_level_directory('/truc/bidule/chouette'), '/', None)
+
+class IsBinaryTC(TestCase):
+ def test(self):
+ self.assertEqual(is_binary('toto.txt'), 0)
+ #self.assertEqual(is_binary('toto.xml'), 0)
+ self.assertEqual(is_binary('toto.bin'), 1)
+ self.assertEqual(is_binary('toto.sxi'), 1)
+ self.assertEqual(is_binary('toto.whatever'), 1)
+
+class GetModeTC(TestCase):
+ def test(self):
+ self.assertEqual(write_open_mode('toto.txt'), 'w')
+ #self.assertEqual(write_open_mode('toto.xml'), 'w')
+ self.assertEqual(write_open_mode('toto.bin'), 'wb')
+ self.assertEqual(write_open_mode('toto.sxi'), 'wb')
+
+class NormReadTC(TestCase):
+ def test_known_values_norm_read(self):
+ data = norm_read(NEWLINES_TXT)
+ self.assertEqual(data.strip(), '\n'.join(['# mixed new lines', '1', '2', '3']))
+
+
+class LinesTC(TestCase):
+ def test_known_values_lines(self):
+ self.assertEqual(lines(NEWLINES_TXT),
+ ['# mixed new lines', '1', '2', '3'])
+
+ def test_known_values_lines_comment(self):
+ self.assertEqual(lines(NEWLINES_TXT, comments='#'),
+ ['1', '2', '3'])
+
+class GetByExtTC(TestCase):
+ def test_include(self):
+ files = files_by_ext(DATA_DIR, include_exts=('.py',))
+ self.assertSetEqual(files,
+ [join('data', f) for f in ['__init__.py', 'module.py',
+ 'module2.py', 'noendingnewline.py',
+ 'nonregr.py', join('sub', 'momo.py')]])
+ files = files_by_ext(DATA_DIR, include_exts=('.py',), exclude_dirs=('sub', 'CVS'))
+ self.assertSetEqual(files,
+ [join('data', f) for f in ['__init__.py', 'module.py',
+ 'module2.py', 'noendingnewline.py',
+ 'nonregr.py']])
+
+ def test_exclude(self):
+ files = files_by_ext(DATA_DIR, exclude_exts=('.py', '.pyc'))
+ self.assertSetEqual(files,
+ [join('data', f) for f in ['foo.txt',
+ 'newlines.txt',
+ 'normal_file.txt',
+ 'spam.txt',
+ join('sub', 'doc.txt'),
+ 'write_protected_file.txt',
+ ]])
+
+ def test_exclude_base_dir(self):
+ self.assertEquals(files_by_ext(DATA_DIR, include_exts=('.py',), exclude_dirs=(DATA_DIR,)),
+ [])
+
+class ExportTC(TestCase):
+ def setUp(self):
+ self.tempdir = tempfile.mktemp()
+ os.mkdir(self.tempdir)
+
+ def test(self):
+ export('data', self.tempdir, verbose=0)
+ self.assert_(exists(join(self.tempdir, '__init__.py')))
+ self.assert_(exists(join(self.tempdir, 'sub')))
+ self.assert_(not exists(join(self.tempdir, '__init__.pyc')))
+ self.assert_(not exists(join(self.tempdir, 'CVS')))
+
+ def tearDown(self):
+ shutil.rmtree(self.tempdir)
+
+class ProtectedFileTC(TestCase):
+ def setUp(self):
+ self.rpath = 'data/write_protected_file.txt'
+ self.rwpath = 'data/normal_file.txt'
+ # Make sure rpath is not writable !
+ os.chmod(self.rpath, 33060)
+ # Make sure rwpath is writable !
+ os.chmod(self.rwpath, 33188)
+
+ def test_mode_change(self):
+ """tests that mode is changed when needed"""
+ # test on non-writable file
+ self.assert_(not os.access(self.rpath, os.W_OK))
+ wp_file = ProtectedFile(self.rpath, 'w')
+ self.assert_(os.access(self.rpath, os.W_OK))
+ # test on writable-file
+ self.assert_(os.access(self.rwpath, os.W_OK))
+ wp_file = ProtectedFile(self.rwpath, 'w')
+ self.assert_(os.access(self.rwpath, os.W_OK))
+
+ def test_restore_on_close(self):
+ """tests original mode is restored on close"""
+ # test on non-writable file
+ self.assert_(not os.access(self.rpath, os.W_OK))
+ ProtectedFile(self.rpath, 'w').close()
+ self.assert_(not os.access(self.rpath, os.W_OK))
+ # test on writable-file
+ self.assert_(os.access(self.rwpath, os.W_OK))
+ ProtectedFile(self.rwpath, 'w').close()
+ self.assert_(os.access(self.rwpath, os.W_OK))
+
+ def test_mode_change_on_append(self):
+ """tests that mode is changed when file is opened in 'a' mode"""
+ self.assert_(not os.access(self.rpath, os.W_OK))
+ wp_file = ProtectedFile(self.rpath, 'a')
+ self.assert_(os.access(self.rpath, os.W_OK))
+ wp_file.close()
+ self.assert_(not os.access(self.rpath, os.W_OK))
+
+
+from logilab.common.testlib import DocTest
+class ModuleDocTest(DocTest):
+ """relative_path embed tests in docstring"""
+ from logilab.common import fileutils as module
+del DocTest # necessary if we don't want it to be executed (we don't...)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittest_graph.py b/test/unittest_graph.py
new file mode 100644
index 0000000..a68eece
--- /dev/null
+++ b/test/unittest_graph.py
@@ -0,0 +1,32 @@
+# unit tests for the cache module
+
+from logilab.common import get_cycles
+import sys
+import unittest
+
+class getCycleTestCase(unittest.TestCase):
+
+ def test_known0(self):
+ self.assertEqual(get_cycles({1:[2], 2:[3], 3:[1]}), [[1, 2, 3]])
+
+ def test_known1(self):
+ self.assertEqual(get_cycles({1:[2], 2:[3], 3:[1, 4], 4:[3]}), [[1, 2, 3], [3, 4]])
+
+ def test_known2(self):
+ self.assertEqual(get_cycles({1:[2], 2:[3], 3:[0], 0:[]}), [])
+
+def suite():
+ loader = unittest.TestLoader()
+ testsuite = loader.loadTestsFromModule(sys.modules[__name__])
+ return testsuite
+
+
+def Run():
+ testsuite = suite()
+ runner = unittest.TextTestRunner()
+ return runner.run(testsuite)
+
+if __name__ == "__main__":
+ Run()
+
+
diff --git a/test/unittest_logger.py b/test/unittest_logger.py
new file mode 100644
index 0000000..dc289db
--- /dev/null
+++ b/test/unittest_logger.py
@@ -0,0 +1,54 @@
+"""unittests for logilab.common.logger"""
+
+import unittest
+from tempfile import mktemp
+import os
+
+import sys
+from cStringIO import StringIO
+
+from logilab.common.logger import *
+
+
+def get_logged_messages(output):
+ """strip timestamps and extract effective logged text
+ (log lines look like: [timestamp] message)
+ """
+ return [line.split(']')[-1].strip() for line in output.splitlines()]
+
+
+class LoggerTC(unittest.TestCase):
+
+ def test_defaultlogging(self):
+ # redirect stdout so that we can test
+ stdout_backup = sys.stdout
+ sys.stdout = StringIO()
+ # make default logger
+ logger = make_logger()
+ logger.log(message='hello')
+ logger.log(message='world')
+ output = sys.stdout.getvalue()
+ msg = get_logged_messages(output)
+ # restore stdout
+ sys.stdout = stdout_backup
+ self.assertEquals(msg, ['hello', 'world'])
+
+ def test_filelogging(self):
+ filename = mktemp(dir='/tmp')
+ # make file logger
+ logger = make_logger(method='file', output=filename)
+ logger.log(message='hello')
+ logger.log(message='world')
+ # make sure everything gets flushed (testing purpose)
+ logger.output.flush()
+ output = open(filename).read() #os.read(descr, 300)
+ # close everything correcly
+ #os.close(descr)
+ logger.output.close()
+ # remove file
+ os.remove(filename)
+ self.assertEquals(get_logged_messages(output), ['hello', 'world'])
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/test/unittest_modutils.py b/test/unittest_modutils.py
new file mode 100644
index 0000000..cab5455
--- /dev/null
+++ b/test/unittest_modutils.py
@@ -0,0 +1,203 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""
+unit tests for module modutils (module manipulation utilities)
+"""
+
+__revision__ = "$Id: unittest_modutils.py,v 1.26 2006-02-16 10:58:40 ludal Exp $"
+
+import sys
+try:
+ __file__
+except NameError:
+ __file__ = sys.argv[0]
+
+from logilab.common.testlib import TestCase, unittest_main
+from logilab.common import modutils
+
+from os import path
+from logilab import common
+from logilab.common import tree
+
+class load_module_from_name_tc(TestCase):
+ """ load a python module from it's name """
+
+ def test_knownValues_load_module_from_name_1(self):
+ self.assertEqual(modutils.load_module_from_name('sys'), sys)
+
+ def test_knownValues_load_module_from_name_2(self):
+ self.assertEqual(modutils.load_module_from_name('os.path'), path)
+
+ def test_raise_load_module_from_name_1(self):
+ self.assertRaises(ImportError,
+ modutils.load_module_from_name, 'os.path', use_sys=0)
+
+
+class get_module_part_tc(TestCase):
+ """given a dotted name return the module part of the name"""
+
+ def test_knownValues_get_module_part_1(self):
+ self.assertEqual(modutils.get_module_part('logilab.common.modutils'),
+ 'logilab.common.modutils')
+
+ def test_knownValues_get_module_part_2(self):
+ self.assertEqual(modutils.get_module_part('logilab.common.modutils.get_module_part'),
+ 'logilab.common.modutils')
+
+ def test_knownValues_get_module_part_3(self):
+ self.assertEqual(modutils.get_module_part('db.get_connexion', modutils.__file__),
+ 'db')
+
+ def test_knownValues_get_compiled_module_part(self):
+ self.assertEqual(modutils.get_module_part('math.log10'), 'math')
+ self.assertEqual(modutils.get_module_part('math.log10', __file__), 'math')
+
+ def test_knownValues_get_builtin_module_part(self):
+ self.assertEqual(modutils.get_module_part('sys.path'), 'sys')
+ self.assertEqual(modutils.get_module_part('sys.path', '__file__'), 'sys')
+
+
+class modpath_from_file_tc(TestCase):
+ """ given an absolute file path return the python module's path as a list """
+
+ def test_knownValues_modpath_from_file_1(self):
+ self.assertEqual(modutils.modpath_from_file(modutils.__file__),
+ ['logilab', 'common', 'modutils'])
+
+ def test_raise_modpath_from_file_Exception(self):
+ self.assertRaises(Exception, modutils.modpath_from_file, '/turlututu')
+
+
+class file_from_modpath_tc(TestCase):
+ """given a mod path (i.e. splited module / package name), return the
+ corresponding file, giving priority to source file over precompiled file
+ if it exists"""
+
+ def test_knownValues_file_from_modpath_1(self):
+ self.assertEqual(modutils.file_from_modpath(['logilab', 'common', 'modutils']),
+ modutils.__file__.replace('.pyc', '.py'))
+
+ def test_knownValues_file_from_modpath_2(self):
+ from os import path
+ self.assertEqual(modutils.file_from_modpath(['os', 'path']).replace('.pyc', '.py'),
+ path.__file__.replace('.pyc', '.py'))
+
+ def test_knownValues_file_from_modpath_3(self):
+ try:
+ # don't fail if pyxml isn't installed
+ from xml.dom import ext
+ except ImportError:
+ pass
+ else:
+ self.assertEqual(modutils.file_from_modpath(['xml', 'dom', 'ext']).replace('.pyc', '.py'),
+ ext.__file__.replace('.pyc', '.py'))
+
+ def test_knownValues_file_from_modpath_4(self):
+ self.assertEqual(modutils.file_from_modpath(['sys']),
+ None)
+
+ def test_raise_file_from_modpath_Exception(self):
+ self.assertRaises(ImportError, modutils.file_from_modpath, ['turlututu'])
+
+class get_source_file_tc(TestCase):
+
+ def test(self):
+ from os import path
+ self.assertEqual(modutils.get_source_file(path.__file__),
+ path.__file__.replace('.pyc', '.py'))
+
+ def test_raise(self):
+ self.assertRaises(modutils.NoSourceFile, modutils.get_source_file,'whatever')
+
+class is_standard_module_tc(TestCase):
+ """
+ return true if the module may be considered as a module from the standard
+ library
+ """
+
+ def test_knownValues_is_standard_module_0(self):
+ self.assertEqual(modutils.is_standard_module('__builtin__'), True)
+
+ def test_knownValues_is_standard_module_1(self):
+ self.assertEqual(modutils.is_standard_module('sys'), True)
+
+ def test_knownValues_is_standard_module_2(self):
+ self.assertEqual(modutils.is_standard_module('logilab'), False)
+
+ def test_knownValues_is_standard_module_3(self):
+ self.assertEqual(modutils.is_standard_module('unknown'), False)
+
+ def test_knownValues_is_standard_module_4(self):
+ self.assertEqual(modutils.is_standard_module('StringIO'), True)
+
+ def test_knownValues_is_standard_module_5(self):
+ self.assertEqual(modutils.is_standard_module('data.module', ('data',)), True)
+ self.assertEqual(modutils.is_standard_module('data.module', (path.abspath('data'),)), True)
+
+
+class is_relative_tc(TestCase):
+
+ def test_knownValues_is_relative_1(self):
+ self.assertEqual(modutils.is_relative('modutils', common.__path__[0]), True)
+
+ def test_knownValues_is_relative_2(self):
+ self.assertEqual(modutils.is_relative('modutils', tree.__file__), True)
+
+ def test_knownValues_is_relative_3(self):
+ self.assertEqual(modutils.is_relative('logilab.common.modutils',
+ common.__path__[0]), False)
+
+class get_modules_tc(TestCase):
+
+ def test_knownValues_get_modules_1(self): # XXXFIXME: TOWRITE
+ """given a directory return a list of all available python modules, even
+ in subdirectories
+ """
+ import data
+ modules = modutils.get_modules('data', data.__path__[0])
+ modules.sort()
+ self.assertEqual(modules,
+ ['data.module', 'data.module2', 'data.noendingnewline',
+ 'data.nonregr'])
+
+
+class get_modules_files_tc(TestCase):
+
+ def test_knownValues_get_module_files_1(self): # XXXFIXME: TOWRITE
+ """given a directory return a list of all available python module's files, even
+ in subdirectories
+ """
+ import data
+ modules = modutils.get_module_files('data', data.__path__[0])
+ modules.sort()
+ self.assertEqual(modules,
+ [path.join('data', x) for x in ['__init__.py', 'module.py', 'module2.py', 'noendingnewline.py', 'nonregr.py']])
+
+ def test_load_module_set_attribute(self):
+ import logilab.common
+ import logilab
+ del logilab.common
+ del sys.modules['logilab.common']
+ m = modutils.load_module_from_modpath(['logilab','common'])
+ self.assert_( hasattr(logilab,'common') )
+ self.assert_( m is logilab.common )
+
+from logilab.common.testlib import DocTest
+class ModuleDocTest(DocTest):
+ """test doc test in this module"""
+ from logilab.common import modutils as module
+del DocTest # necessary if we don't want it to be executed (we don't...)
+
+
+if __name__ == '__main__':
+ unittest_main()
diff --git a/test/unittest_patricia.py b/test/unittest_patricia.py
new file mode 100644
index 0000000..00f28c5
--- /dev/null
+++ b/test/unittest_patricia.py
@@ -0,0 +1,72 @@
+"""
+unit tests for module logilab.common.patricia
+"""
+
+__revision__ = "$Id: unittest_patricia.py,v 1.3 2003-09-05 10:22:35 syt Exp $"
+
+import unittest
+import sys
+from logilab.common.patricia import *
+
+
+class PatriciaTrieClassTest(unittest.TestCase):
+
+ def test_knownValues(self):
+ """
+ remove a child node
+ """
+ p = PatriciaTrie()
+ i = 0
+ words_list = ['maitre', 'maman', 'mange', 'manger', 'mangouste',
+ 'manigance', 'manitou']
+ words_list.sort()
+ #
+ for i in range(len(words_list)):
+ p.insert(words_list[i], i)
+ for i in range(len(words_list)):
+ assert p.lookup(words_list[i]) == [i]
+ try:
+ p.lookup('not in list')
+ raise AssertionError()
+ except KeyError:
+ pass
+ #
+ l = p.pfx_search('')
+ l.sort()
+ assert l == words_list
+ l = p.pfx_search('ma')
+ l.sort()
+ assert l == words_list
+ l = p.pfx_search('mai')
+ assert l == ['maitre']
+ l = p.pfx_search('not in list')
+ assert l == []
+ l = p.pfx_search('man', 2)
+ assert l == ['mange']
+ l = p.pfx_search('man', 1)
+ assert l == []
+ p.remove('maitre')
+ try:
+ p.lookup('maitre')
+ raise AssertionError()
+ except KeyError:
+ pass
+ #print p
+
+
+def suite():
+ """return the unitest suite"""
+ loader = unittest.TestLoader()
+ testsuite = loader.loadTestsFromModule(sys.modules[__name__])
+ return testsuite
+
+
+def Run(runner=None):
+ """run tests"""
+ testsuite = suite()
+ if runner is None:
+ runner = unittest.TextTestRunner()
+ return runner.run(testsuite)
+
+if __name__ == '__main__':
+ Run()
diff --git a/test/unittest_table.py b/test/unittest_table.py
new file mode 100644
index 0000000..5fc4be3
--- /dev/null
+++ b/test/unittest_table.py
@@ -0,0 +1,428 @@
+"""
+Unittests for table management
+"""
+
+__revision__ = '$Id: unittest_table.py,v 1.13 2006-04-09 22:30:53 nico Exp $'
+
+import unittest
+import sys
+from logilab.common.table import Table, TableStyleSheet, DocbookTableWriter, \
+ DocbookRenderer, TableStyle, TableWriter, TableCellRenderer
+from logilab.common.compat import set
+from cStringIO import StringIO
+
+class TableTC(unittest.TestCase):
+ """Table TestCase class"""
+
+ def setUp(self):
+ """Creates a default table"""
+ self.table = Table()
+ self.table.create_rows(['row1', 'row2', 'row3'])
+ self.table.create_columns(['col1', 'col2'])
+
+ def test_valeur_scalaire(self):
+ tab = Table()
+ tab.create_columns(['col1'])
+ tab.append_row([1])
+ self.assertEquals(tab, [[1]])
+ tab.append_row([2])
+ self.assertEquals(tab[0,0], 1)
+ self.assertEquals(tab[1,0], 2)
+
+ def test_valeur_ligne(self):
+ tab = Table()
+ tab.create_columns(['col1','col2'])
+ tab.append_row([1,2])
+ self.assertEquals(tab, [[1,2]])
+
+ def test_valeur_colonne(self):
+ tab = Table()
+ tab.create_columns(['col1'])
+ tab.append_row([1])
+ tab.append_row([2])
+ self.assertEquals(tab, [[1],[2]])
+ self.assertEquals(tab[:,0], [1,2])
+
+ def test_indexation(self):
+ """we should be able to use [] to access rows"""
+ self.assert_(self.table[0] == self.table.data[0])
+ self.assert_(self.table[1] == self.table.data[1])
+
+ def test_iterable(self):
+ """test iter(table)"""
+ it = iter(self.table)
+ self.assert_(it.next() == self.table.data[0])
+ self.assert_(it.next() == self.table.data[1])
+
+ def test_get_rows(self):
+ """tests Table.get_rows()"""
+ self.assertEquals(self.table, [[0, 0], [0, 0], [0, 0]])
+ self.assertEquals(self.table[:], [[0, 0], [0, 0], [0, 0]])
+ self.table.insert_column(1, range(3), 'supp')
+ self.assertEquals(self.table, [[0, 0, 0], [0, 1, 0], [0, 2, 0]])
+ self.assertEquals(self.table[:], [[0, 0, 0], [0, 1, 0], [0, 2, 0]])
+
+ def test_get_cells(self):
+ self.table.insert_column(1, range(3), 'supp')
+ self.assertEquals(self.table[0,1], 0)
+ self.assertEquals(self.table[1,1], 1)
+ self.assertEquals(self.table[2,1], 2)
+ self.assertEquals(self.table['row1', 'supp'], 0)
+ self.assertEquals(self.table['row2', 'supp'], 1)
+ self.assertEquals(self.table['row3', 'supp'], 2)
+ self.assertRaises(KeyError, self.table.__getitem__, ('row1', 'foo'))
+ self.assertRaises(KeyError, self.table.__getitem__, ('foo', 'bar'))
+
+ def test_shape(self):
+ """tests table shape"""
+ self.assertEquals(self.table.shape, (3, 2))
+ self.table.insert_column(1, range(3), 'supp')
+ self.assertEquals(self.table.shape, (3, 3))
+
+ def test_set_column(self):
+ """Tests that table.set_column() works fine.
+ """
+ self.table.set_column(0, range(3))
+ self.assertEquals(self.table[0,0], 0)
+ self.assertEquals(self.table[1,0], 1)
+ self.assertEquals(self.table[2,0], 2)
+
+ def test_set_column_by_id(self):
+ """Tests that table.set_column_by_id() works fine.
+ """
+ self.table.set_column_by_id('col1', range(3))
+ self.assertEquals(self.table[0,0], 0)
+ self.assertEquals(self.table[1,0], 1)
+ self.assertEquals(self.table[2,0], 2)
+ self.assertRaises(KeyError, self.table.set_column_by_id, 'col123', range(3))
+
+ def test_cells_ids(self):
+ """tests that we can access cells by giving row/col ids"""
+ self.assertRaises(KeyError, self.table.set_cell_by_ids, 'row12', 'col1', 12)
+ self.assertRaises(KeyError, self.table.set_cell_by_ids, 'row1', 'col12', 12)
+ self.assertEquals(self.table[0,0], 0)
+ self.table.set_cell_by_ids('row1', 'col1', 'DATA')
+ self.assertEquals(self.table[0,0], 'DATA')
+ self.assertRaises(KeyError, self.table.set_row_by_id, 'row12', [])
+ self.table.set_row_by_id('row1', ['1.0', '1.1'])
+ self.assertEquals(self.table[0,0], '1.0')
+
+ def test_insert_row(self):
+ """tests a row insertion"""
+ tmp_data = ['tmp1', 'tmp2']
+ self.table.insert_row(1, tmp_data, 'tmprow')
+ self.assertEquals(self.table[1], tmp_data)
+ self.assertEquals(self.table['tmprow'], tmp_data)
+ self.table.delete_row_by_id('tmprow')
+ self.assertRaises(KeyError, self.table.delete_row_by_id, 'tmprow')
+ self.assertEquals(self.table[1], [0, 0])
+ self.assertRaises(KeyError, self.table.__getitem__, 'tmprow')
+
+ def test_get_column(self):
+ """Tests that table.get_column() works fine.
+ """
+ self.table.set_cell(0, 1, 12)
+ self.table.set_cell(2, 1, 13)
+ self.assertEquals(self.table[:,1], [12,0,13])
+ self.assertEquals(self.table[:,'col2'], [12,0,13])
+
+ def test_get_columns(self):
+ """Tests if table.get_columns() works fine.
+ """
+ self.table.set_cell(0, 1, 12)
+ self.table.set_cell(2, 1, 13)
+ self.assertEquals(self.table.get_columns(), [[0,0,0], [12,0,13]])
+
+ def test_insert_column(self):
+ """Tests that table.insert_column() works fine.
+ """
+ self.table.insert_column(1, range(3), "inserted_column")
+ self.assertEquals(self.table[:,1], [0,1,2])
+ self.assertEquals(self.table.col_names,
+ ['col1', 'inserted_column', 'col2'])
+
+ def test_delete_column(self):
+ """Tests that table.delete_column() works fine.
+ """
+ self.table.delete_column(1)
+ self.assertEquals(self.table.col_names, ['col1'])
+ self.assertEquals(self.table[:,0], [0,0,0])
+ self.assertRaises(KeyError, self.table.delete_column_by_id, 'col2')
+ self.table.delete_column_by_id('col1')
+ self.assertEquals(self.table.col_names, [])
+
+ def test_transpose(self):
+ """Tests that table.transpose() works fine.
+ """
+ self.table.append_column(range(5,8), 'col3')
+ ttable = self.table.transpose()
+ self.assertEquals(ttable.row_names, ['col1', 'col2', 'col3'])
+ self.assertEquals(ttable.col_names, ['row1', 'row2', 'row3'])
+ self.assertEquals(ttable.data, [[0,0,0], [0,0,0], [5,6,7]])
+
+ def test_sort_table(self):
+ """Tests the table sort by column
+ """
+ self.table.set_column(0, [3, 1, 2])
+ self.table.set_column(1, [1, 2, 3])
+ self.table.sort_by_column_index(0)
+ self.assertEquals(self.table.row_names, ['row2', 'row3', 'row1'])
+ self.assertEquals(self.table.data, [[1, 2], [2, 3], [3, 1]])
+ self.table.sort_by_column_index(1, 'desc')
+ self.assertEquals(self.table.row_names, ['row3', 'row2', 'row1'])
+ self.assertEquals(self.table.data, [[2, 3], [1, 2], [3, 1]])
+
+ def test_sort_by_id(self):
+ """tests sort_by_column_id()"""
+ self.table.set_column_by_id('col1', [3, 1, 2])
+ self.table.set_column_by_id('col2', [1, 2, 3])
+ self.table.sort_by_column_id('col1')
+ self.assertRaises(KeyError, self.table.sort_by_column_id, 'col123')
+ self.assertEquals(self.table.row_names, ['row2', 'row3', 'row1'])
+ self.assertEquals(self.table.data, [[1, 2], [2, 3], [3, 1]])
+ self.table.sort_by_column_id('col2', 'desc')
+ self.assertEquals(self.table.row_names, ['row3', 'row2', 'row1'])
+ self.assertEquals(self.table.data, [[2, 3], [1, 2], [3, 1]])
+
+ def test_pprint(self):
+ """only tests pprint doesn't raise an exception"""
+ self.table.pprint()
+ str(self.table)
+
+
+class GroupByTC(unittest.TestCase):
+ """specific test suite for groupby()"""
+ def setUp(self):
+ t = Table()
+ t.create_columns(['date', 'res', 'task', 'usage'])
+ t.append_row(['date1', 'ing1', 'task1', 0.3])
+ t.append_row(['date1', 'ing2', 'task2', 0.3])
+ t.append_row(['date2', 'ing3', 'task3', 0.3])
+ t.append_row(['date3', 'ing4', 'task2', 0.3])
+ t.append_row(['date1', 'ing1', 'task3', 0.3])
+ t.append_row(['date3', 'ing1', 'task3', 0.3])
+ self.table = t
+
+ def test_single_groupby(self):
+ """tests groupby() on several columns"""
+ grouped = self.table.groupby('date')
+ self.assertEquals(len(grouped), 3)
+ self.assertEquals(len(grouped['date1']), 3)
+ self.assertEquals(len(grouped['date2']), 1)
+ self.assertEquals(len(grouped['date3']), 2)
+ self.assertEquals(grouped['date1'], [
+ ('date1', 'ing1', 'task1', 0.3),
+ ('date1', 'ing2', 'task2', 0.3),
+ ('date1', 'ing1', 'task3', 0.3),
+ ])
+ self.assertEquals(grouped['date2'], [('date2', 'ing3', 'task3', 0.3)])
+ self.assertEquals(grouped['date3'], [
+ ('date3', 'ing4', 'task2', 0.3),
+ ('date3', 'ing1', 'task3', 0.3),
+ ])
+
+ def test_multiple_groupby(self):
+ """tests groupby() on several columns"""
+ grouped = self.table.groupby('date', 'task')
+ self.assertEquals(len(grouped), 3)
+ self.assertEquals(len(grouped['date1']), 3)
+ self.assertEquals(len(grouped['date2']), 1)
+ self.assertEquals(len(grouped['date3']), 2)
+ self.assertEquals(grouped['date1']['task1'], [('date1', 'ing1', 'task1', 0.3)])
+ self.assertEquals(grouped['date2']['task3'], [('date2', 'ing3', 'task3', 0.3)])
+ self.assertEquals(grouped['date3']['task2'], [('date3', 'ing4', 'task2', 0.3)])
+ date3 = grouped['date3']
+ self.assertRaises(KeyError, date3.__getitem__, 'task1')
+
+
+ def test_select(self):
+ """tests Table.select() method"""
+ rows = self.table.select('date', 'date1')
+ self.assertEquals(rows, [
+ ('date1', 'ing1', 'task1', 0.3),
+ ('date1', 'ing2', 'task2', 0.3),
+ ('date1', 'ing1', 'task3', 0.3),
+ ])
+
+class TableStyleSheetTC(unittest.TestCase):
+ """The Stylesheet test case
+ """
+ def setUp(self):
+ """Builds a simple table to test the stylesheet
+ """
+ self.table = Table()
+ self.table.create_row('row1')
+ self.table.create_columns(['a','b','c'])
+ self.stylesheet = TableStyleSheet()
+ # We don't want anything to be printed
+ self.stdout_backup = sys.stdout
+ sys.stdout = StringIO()
+
+ def tearDown(self):
+ sys.stdout = self.stdout_backup
+
+ def test_add_rule(self):
+ """Tests that the regex pattern works as expected.
+ """
+ rule = '0_2 = sqrt(0_0**2 + 0_1**2)'
+ self.stylesheet.add_rule(rule)
+ self.table.set_row(0, [3,4,0])
+ self.table.apply_stylesheet(self.stylesheet)
+ self.assertEquals(self.table[0], [3,4,5])
+ self.assertEquals(len(self.stylesheet.rules), 1)
+ self.stylesheet.add_rule('some bad rule with bad syntax')
+ self.assertEquals(len(self.stylesheet.rules), 1, "Ill-formed rule mustn't be added")
+ self.assertEquals(len(self.stylesheet.instructions), 1, "Ill-formed rule mustn't be added")
+
+ def test_stylesheet_init(self):
+ """tests Stylesheet.__init__"""
+ rule = '0_2 = 1'
+ sheet = TableStyleSheet([rule, 'bad rule'])
+ self.assertEquals(len(sheet.rules), 1, "Ill-formed rule mustn't be added")
+ self.assertEquals(len(sheet.instructions), 1, "Ill-formed rule mustn't be added")
+
+ def test_rowavg_rule(self):
+ """Tests that add_rowavg_rule works as expected
+ """
+ self.table.set_row(0, [10,20,0])
+ self.stylesheet.add_rowavg_rule((0,2), 0, 0, 1)
+ self.table.apply_stylesheet(self.stylesheet)
+ val = self.table[0,2]
+ self.assert_(int(val) == 15)
+
+
+ def test_rowsum_rule(self):
+ """Tests that add_rowsum_rule works as expected
+ """
+ self.table.set_row(0, [10,20,0])
+ self.stylesheet.add_rowsum_rule((0,2), 0, 0, 1)
+ self.table.apply_stylesheet(self.stylesheet)
+ val = self.table[0,2]
+ self.assert_(val == 30)
+
+
+ def test_colavg_rule(self):
+ """Tests that add_colavg_rule works as expected
+ """
+ self.table.set_row(0, [10,20,0])
+ self.table.append_row([12,8,3], 'row2')
+ self.table.create_row('row3')
+ self.stylesheet.add_colavg_rule((2,0), 0, 0, 1)
+ self.table.apply_stylesheet(self.stylesheet)
+ val = self.table[2,0]
+ self.assert_(int(val) == 11)
+
+
+ def test_colsum_rule(self):
+ """Tests that add_colsum_rule works as expected
+ """
+ self.table.set_row(0, [10,20,0])
+ self.table.append_row([12,8,3], 'row2')
+ self.table.create_row('row3')
+ self.stylesheet.add_colsum_rule((2,0), 0, 0, 1)
+ self.table.apply_stylesheet(self.stylesheet)
+ val = self.table[2,0]
+ self.assert_(val == 22)
+
+
+
+class TableStyleTC(unittest.TestCase):
+ """Test suite for TableSuite"""
+ def setUp(self):
+ self.table = Table()
+ self.table.create_rows(['row1', 'row2', 'row3'])
+ self.table.create_columns(['col1', 'col2'])
+ self.style = TableStyle(self.table)
+ self._tested_attrs = (('size', '1*'),
+ ('alignment', 'right'),
+ ('unit', ''))
+
+ def test_getset(self):
+ """tests style's get and set methods"""
+ for attrname, default_value in self._tested_attrs:
+ getter = getattr(self.style, 'get_%s' % attrname)
+ setter = getattr(self.style, 'set_%s' % attrname)
+ self.assertRaises(KeyError, getter, 'badcol')
+ self.assertEquals(getter('col1'), default_value)
+ setter('FOO', 'col1')
+ self.assertEquals(getter('col1'), 'FOO')
+
+ def test_getset_index(self):
+ """tests style's get and set by index methods"""
+ for attrname, default_value in self._tested_attrs:
+ getter = getattr(self.style, 'get_%s' % attrname)
+ setter = getattr(self.style, 'set_%s' % attrname)
+ igetter = getattr(self.style, 'get_%s_by_index' % attrname)
+ isetter = getattr(self.style, 'set_%s_by_index' % attrname)
+ self.assertEquals(getter('__row_column__'), default_value)
+ isetter('FOO', 0)
+ self.assertEquals(getter('__row_column__'), 'FOO')
+ self.assertEquals(igetter(0), 'FOO')
+ self.assertEquals(getter('col1'), default_value)
+ isetter('FOO', 1)
+ self.assertEquals(getter('col1'), 'FOO')
+ self.assertEquals(igetter(1), 'FOO')
+
+
+class RendererTC(unittest.TestCase):
+ """Test suite for DocbookRenderer"""
+ def setUp(self):
+ self.renderer = DocbookRenderer(alignment = True)
+ self.table = Table()
+ self.table.create_rows(['row1', 'row2', 'row3'])
+ self.table.create_columns(['col1', 'col2'])
+ self.style = TableStyle(self.table)
+ self.base_renderer = TableCellRenderer()
+
+ def test_cell_content(self):
+ """test how alignment is rendered"""
+ entry_xml = self.renderer._render_cell_content('data', self.style, 1)
+ self.assertEquals(entry_xml, "<entry align='right'>data</entry>\n")
+ self.style.set_alignment_by_index('left', 1)
+ entry_xml = self.renderer._render_cell_content('data', self.style, 1)
+ self.assertEquals(entry_xml, "<entry align='left'>data</entry>\n")
+
+ def test_default_content_rendering(self):
+ """tests that default rendering just prints the cell's content"""
+ rendered_cell = self.base_renderer._render_cell_content('data', self.style, 1)
+ self.assertEquals(rendered_cell, "data")
+
+ def test_replacement_char(self):
+ """tests that 0 is replaced when asked for"""
+ cell_content = self.base_renderer._make_cell_content(0, self.style, 1)
+ self.assertEquals(cell_content, 0)
+ self.base_renderer.properties['skip_zero'] = '---'
+ cell_content = self.base_renderer._make_cell_content(0, self.style, 1)
+ self.assertEquals(cell_content, '---')
+
+ def test_unit(self):
+ """tests if units are added"""
+ self.base_renderer.properties['units'] = True
+ self.style.set_unit_by_index('EUR', 1)
+ cell_content = self.base_renderer._make_cell_content(12, self.style, 1)
+ self.assertEquals(cell_content, '12 EUR')
+
+
+from logilab.common import testlib
+class DocbookTableWriterTC(testlib.TestCase):
+ """TestCase for table's writer"""
+ def setUp(self):
+ self.stream = StringIO()
+ self.table = Table()
+ self.table.create_rows(['row1', 'row2', 'row3'])
+ self.table.create_columns(['col1', 'col2'])
+ self.writer = DocbookTableWriter(self.stream, self.table, None)
+ self.writer.set_renderer(DocbookRenderer())
+
+ def test_write_table(self):
+ """make sure write_table() doesn't raise any exception"""
+ self.writer.write_table()
+
+ def test_abstract_writer(self):
+ """tests that Abstract Writers can't be used !"""
+ writer = TableWriter(self.stream, self.table, None)
+ self.assertRaises(NotImplementedError, writer.write_table)
+
+
+if __name__ == '__main__':
+ testlib.unittest_main()
diff --git a/test/unittest_testlib.py b/test/unittest_testlib.py
new file mode 100644
index 0000000..09c50e2
--- /dev/null
+++ b/test/unittest_testlib.py
@@ -0,0 +1,125 @@
+"""unittest module for logilab.comon.testlib"""
+
+__revision__ = '$Id: unittest_testlib.py,v 1.5 2006-02-09 22:37:46 nico Exp $'
+
+import unittest
+from os.path import join, dirname
+try:
+ __file__
+except NameError:
+ import sys
+ __file__ = sys.argv[0]
+
+from logilab.common import testlib
+
+class MockTestCase(testlib.TestCase):
+ def __init__(self):
+ # Do not call unittest.TestCase's __init__
+ pass
+
+ def fail(self, msg):
+ raise AssertionError(msg)
+
+class TestlibTC(testlib.TestCase):
+
+ def setUp(self):
+ self.tc = MockTestCase()
+
+ def test_dict_equals(self):
+ """tests TestCase.assertDictEquals"""
+ d1 = {'a' : 1, 'b' : 2}
+ d2 = {'a' : 1, 'b' : 3}
+ d3 = dict(d1)
+ self.assertRaises(AssertionError, self.tc.assertDictEquals, d1, d2)
+ self.tc.assertDictEquals(d1, d3)
+ self.tc.assertDictEquals(d3, d1)
+ self.tc.assertDictEquals(d1, d1)
+
+ def test_list_equals(self):
+ """tests TestCase.assertListEquals"""
+ l1 = range(10)
+ l2 = range(5)
+ l3 = range(10)
+ self.assertRaises(AssertionError, self.tc.assertListEquals, l1, l2)
+ self.tc.assertListEquals(l1, l1)
+ self.tc.assertListEquals(l1, l3)
+ self.tc.assertListEquals(l3, l1)
+
+ def test_lines_equals(self):
+ """tests assertLineEquals"""
+ t1 = """some
+ text
+"""
+ t2 = """some
+
+ text"""
+ t3 = """some
+ text"""
+ self.assertRaises(AssertionError, self.tc.assertLinesEquals, t1, t2)
+ self.tc.assertLinesEquals(t1, t3)
+ self.tc.assertLinesEquals(t3, t1)
+ self.tc.assertLinesEquals(t1, t1)
+
+ def test_xml_valid(self):
+ """tests xml is valid"""
+ valid = """<root>
+ <hello />
+ <world>Logilab</world>
+ </root>"""
+ invalid = """<root><h2> </root>"""
+ self.tc.assertXMLStringWellFormed(valid)
+ self.assertRaises(AssertionError, self.tc.assertXMLStringWellFormed, invalid)
+ invalid = """<root><h2 </h2> </root>"""
+ self.assertRaises(AssertionError, self.tc.assertXMLStringWellFormed, invalid)
+
+
+ def test_set_equality_for_lists(self):
+ l1 = [0, 1, 2]
+ l2 = [1, 2, 3]
+ self.assertRaises(AssertionError, self.tc.assertSetEqual, l1, l2)
+ self.tc.assertSetEqual(l1, l1)
+ self.tc.assertSetEqual([], [])
+ l1 = [0, 1, 1]
+ l2 = [0, 1]
+ self.assertRaises(AssertionError, self.tc.assertSetEqual, l1, l2)
+ self.tc.assertSetEqual(l1, l1)
+
+
+ def test_set_equality_for_dicts(self):
+ d1 = {'a' : 1, 'b' : 2}
+ d2 = {'a' : 1}
+ self.assertRaises(AssertionError, self.tc.assertSetEqual, d1, d2)
+ self.tc.assertSetEqual(d1, d1)
+ self.tc.assertSetEqual({}, {})
+
+ def test_set_equality_for_iterables(self):
+ self.assertRaises(AssertionError, self.tc.assertSetEqual, xrange(5), xrange(6))
+ self.tc.assertSetEqual(xrange(5), range(5))
+ self.tc.assertSetEqual([], ())
+
+ def test_file_equality(self):
+ foo = join(dirname(__file__), 'data', 'foo.txt')
+ spam = join(dirname(__file__), 'data', 'spam.txt')
+ self.assertRaises(AssertionError, self.tc.assertFileEqual, foo, spam)
+ self.tc.assertFileEqual(foo, foo)
+
+ def test_stream_equality(self):
+ foo = join(dirname(__file__), 'data', 'foo.txt')
+ spam = join(dirname(__file__), 'data', 'spam.txt')
+ stream1 = file(foo)
+ self.tc.assertStreamEqual(stream1, stream1)
+ stream1 = file(foo)
+ stream2 = file(spam)
+ self.assertRaises(AssertionError, self.tc.assertStreamEqual, stream1, stream2)
+
+ def test_text_equality(self):
+ foo = join(dirname(__file__), 'data', 'foo.txt')
+ spam = join(dirname(__file__), 'data', 'spam.txt')
+ text1 = file(foo).read()
+ self.tc.assertTextEqual(text1, text1)
+ text2 = file(spam).read()
+ self.assertRaises(AssertionError, self.tc.assertTextEqual, text1, text2)
+
+if __name__ == '__main__':
+ testlib.unittest_main()
+
diff --git a/test/unittest_textutils.py b/test/unittest_textutils.py
new file mode 100644
index 0000000..3e6ce61
--- /dev/null
+++ b/test/unittest_textutils.py
@@ -0,0 +1,120 @@
+"""
+unit tests for module fileutils
+squeleton generated by /home/syt/cvs_work/logilab/pyreverse/py2tests.py on Sep 08 at 09:1:31
+
+
+Some file / file path manipulation utilities
+
+"""
+__revision__ = "$Id: unittest_textutils.py,v 1.8 2005-02-07 18:01:05 syt Exp $"
+
+import unittest
+import sys
+import re
+from os.path import join
+from os import getcwd, linesep
+
+from logilab.common.textutils import *
+
+
+if linesep != '\n':
+ import re
+ LINE_RGX = re.compile(linesep)
+ def ulines(string):
+ return LINE_RGX.sub('\n', string)
+else:
+ def ulines(string):
+ return string
+
+class NormalizeTextTC(unittest.TestCase):
+
+ def test_known_values(self):
+ self.assertEquals(ulines(normalize_text('''some really malformated
+ text.
+With some times some veeeeeeeeeeeeeeerrrrryyyyyyyyyyyyyyyyyyy loooooooooooooooooooooong linnnnnnnnnnnes
+
+and empty lines!
+ ''')),
+ '''some really malformated text. With some times some
+veeeeeeeeeeeeeeerrrrryyyyyyyyyyyyyyyyyyy loooooooooooooooooooooong
+linnnnnnnnnnnes
+
+and empty lines!''')
+
+class NormalizeParagraphTC(unittest.TestCase):
+
+ def test_known_values(self):
+ self.assertEquals(ulines(normalize_text("""This package contains test files shared by the logilab-common package. It isn't
+necessary to install this package unless you want to execute or look at
+the tests.""", indent=' ', line_len=70)),
+ """\
+ This package contains test files shared by the logilab-common
+ package. It isn't necessary to install this package unless you want
+ to execute or look at the tests.""")
+
+
+class GetCsvTC(unittest.TestCase):
+
+ def test_known(self):
+ self.assertEquals(get_csv('a, b,c '), ['a', 'b', 'c'])
+
+
+RGX = re.compile('abcd')
+class PrettyMatchTC(unittest.TestCase):
+
+ def test_known(self):
+ string = 'hiuherabcdef'
+ self.assertEquals(ulines(pretty_match(RGX.search(string), string)),
+ 'hiuherabcdef\n ^^^^')
+ def test_known_values_1(self):
+ rgx = re.compile('(to*)')
+ string = 'toto'
+ match = rgx.search(string)
+ self.assertEquals(ulines(pretty_match(match, string)), '''toto
+^^''')
+
+ def test_known_values_2(self):
+ rgx = re.compile('(to*)')
+ string = ''' ... ... to to
+ ... ... '''
+ match = rgx.search(string)
+ self.assertEquals(ulines(pretty_match(match, string)), ''' ... ... to to
+ ^^
+ ... ...''')
+
+
+
+
+class SearchAllTC(unittest.TestCase):
+
+ def test_known(self):
+ string = 'hiuherabcdefabcd'
+ self.assertEquals(len(searchall(RGX, string)), 2)
+
+
+class UnquoteTC(unittest.TestCase):
+ def test(self):
+ self.assertEquals(unquote('"toto"'), 'toto')
+ self.assertEquals(unquote("'l'inenarrable toto'"), "l'inenarrable toto")
+ self.assertEquals(unquote("no quote"), "no quote")
+
+
+class ColorizeAnsiTC(unittest.TestCase):
+ def test_known(self):
+ self.assertEquals(colorize_ansi('hello', 'blue', 'strike'), '\x1b[9;34mhello\x1b[0m')
+ self.assertEquals(colorize_ansi('hello', style='strike, inverse'), '\x1b[9;7mhello\x1b[0m')
+ self.assertEquals(colorize_ansi('hello', None, None), 'hello')
+ self.assertEquals(colorize_ansi('hello', '', ''), 'hello')
+ def test_raise(self):
+ self.assertRaises(KeyError, colorize_ansi, 'hello', 'bleu', None)
+ self.assertRaises(KeyError, colorize_ansi, 'hello', None, 'italique')
+
+
+from logilab.common.testlib import DocTest
+class ModuleDocTest(DocTest):
+ """test doc test in this module"""
+ from logilab.common import textutils as module
+del DocTest # necessary if we don't want it to be executed (we don't...)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittest_tree.py b/test/unittest_tree.py
new file mode 100644
index 0000000..2c9eb39
--- /dev/null
+++ b/test/unittest_tree.py
@@ -0,0 +1,201 @@
+"""
+unit tests for module logilab.common.tree
+squeleton generated by /home/syt/bin/py2tests on Jan 20 at 10:43:25
+"""
+__revision__ = "$Id: unittest_tree.py,v 1.9 2005-09-07 23:44:02 nico Exp $"
+
+import unittest
+from logilab.common.tree import *
+
+tree = ('root', (
+ ('child_1_1', (
+ ('child_2_1', ()), ('child_2_2', (
+ ('child_3_1', ()),)))),
+ ('child_1_2', (('child_2_3', ()),))))
+
+def make_tree(tuple):
+ n = Node(tuple[0])
+ for child in tuple[1]:
+ n.append(make_tree(child))
+ return n
+
+class Node_ClassTest(unittest.TestCase):
+ """ a basic tree node, caracterised by an id"""
+ def setUp(self):
+ """ called before each test from this class """
+ self.o = make_tree(tree)
+ def test_known_values_remove(self):
+ """
+ remove a child node
+ """
+ self.o.remove(self.o.get_node_by_id('child_1_1'))
+ self.assertRaises(NodeNotFound, self.o.get_node_by_id, 'child_1_1')
+
+ def test_known_values_replace(self):
+ """
+ replace a child node with another
+ """
+ self.o.replace(self.o.get_node_by_id('child_1_1'), Node('hoho'))
+ self.assertRaises(NodeNotFound, self.o.get_node_by_id, 'child_1_1')
+ self.assertEqual(self.o.get_node_by_id('hoho'), self.o.children[0])
+
+ def test_known_values_get_sibling(self):
+ """
+ return the sibling node that has given id
+ """
+ self.assertEqual(self.o.children[0].get_sibling('child_1_2'), self.o.children[1], None)
+
+ def test_raise_get_sibling_NodeNotFound(self):
+ self.assertRaises(NodeNotFound, self.o.children[0].get_sibling, 'houhou')
+
+ def test_known_values_get_node_by_id(self):
+ """
+ return node in whole hierarchy that has given id
+ """
+ self.assertEqual(self.o.get_node_by_id('child_1_1'), self.o.children[0])
+
+ def test_raise_get_node_by_id_NodeNotFound(self):
+ self.assertRaises(NodeNotFound, self.o.get_node_by_id, 'houhou')
+
+ def test_known_values_get_child_by_id(self):
+ """
+ return child of given id
+ """
+ self.assertEqual(self.o.get_child_by_id('child_2_1', recurse=1), self.o.children[0].children[0])
+
+ def test_raise_get_child_by_id_NodeNotFound(self):
+ self.assertRaises(NodeNotFound, self.o.get_child_by_id, nid='child_2_1')
+ self.assertRaises(NodeNotFound, self.o.get_child_by_id, 'houhou')
+
+ def test_known_values_get_child_by_path(self):
+ """
+ return child of given path (path is a list of ids)
+ """
+ self.assertEqual(self.o.get_child_by_path(['root', 'child_1_1', 'child_2_1']), self.o.children[0].children[0])
+
+ def test_raise_get_child_by_path_NodeNotFound(self):
+ self.assertRaises(NodeNotFound, self.o.get_child_by_path, ['child_1_1', 'child_2_11'])
+
+ def test_known_values_depth(self):
+ """
+ return depth of this node in the tree
+ """
+ self.assertEqual(self.o.depth(), 0)
+ self.assertEqual(self.o.get_child_by_id('child_2_1',True).depth(), 2)
+
+ def test_known_values_root(self):
+ """
+ return the root node of the tree
+ """
+ self.assertEqual(self.o.get_child_by_id('child_2_1', True).root(), self.o)
+
+ def test_known_values_leaves(self):
+ """
+ return a list with all the leaf nodes descendant from this task
+ """
+ self.assertEqual(self.o.leaves(), [self.o.get_child_by_id('child_2_1',True),
+ self.o.get_child_by_id('child_3_1',True),
+ self.o.get_child_by_id('child_2_3',True)])
+
+ def test_known_values_lineage(self):
+ c31 = self.o.get_child_by_id('child_3_1',True)
+ self.assertEqual(c31.lineage(), [self.o.get_child_by_id('child_3_1',True),
+ self.o.get_child_by_id('child_2_2',True),
+ self.o.get_child_by_id('child_1_1',True),
+ self.o])
+
+
+class post_order_list_FunctionTest(unittest.TestCase):
+ """"""
+ def setUp(self):
+ """ called before each test from this class """
+ self.o = make_tree(tree)
+
+ def test_known_values_post_order_list(self):
+ """
+ create a list with tree nodes for which the <filter> function returned true
+ in a post order foashion
+ """
+ L = ['child_2_1', 'child_3_1', 'child_2_2', 'child_1_1', 'child_2_3', 'child_1_2', 'root']
+ l = [n.id for n in post_order_list(self.o)]
+ self.assertEqual(l, L, l)
+
+ def test_known_values_post_order_list2(self):
+ """
+ create a list with tree nodes for which the <filter> function returned true
+ in a post order foashion
+ """
+ def filter(node):
+ if node.id == 'child_2_2':
+ return 0
+ return 1
+ L = ['child_2_1', 'child_1_1', 'child_2_3', 'child_1_2', 'root']
+ l = [n.id for n in post_order_list(self.o, filter)]
+ self.assertEqual(l, L, l)
+
+
+class PostfixedDepthFirstIterator_ClassTest(unittest.TestCase):
+ """"""
+ def setUp(self):
+ """ called before each test from this class """
+ self.o = make_tree(tree)
+
+ def test_known_values_next(self):
+ L = ['child_2_1', 'child_3_1', 'child_2_2', 'child_1_1', 'child_2_3', 'child_1_2', 'root']
+ iter = PostfixedDepthFirstIterator(self.o)
+ o = iter.next()
+ i = 0
+ while o:
+ self.assertEqual(o.id, L[i])
+ o = iter.next()
+ i += 1
+
+
+class pre_order_list_FunctionTest(unittest.TestCase):
+ """"""
+ def setUp(self):
+ """ called before each test from this class """
+ self.o = make_tree(tree)
+
+ def test_known_values_pre_order_list(self):
+ """
+ create a list with tree nodes for which the <filter> function returned true
+ in a pre order fashion
+ """
+ L = ['root', 'child_1_1', 'child_2_1', 'child_2_2', 'child_3_1', 'child_1_2', 'child_2_3']
+ l = [n.id for n in pre_order_list(self.o)]
+ self.assertEqual(l, L, l)
+
+ def test_known_values_pre_order_list2(self):
+ """
+ create a list with tree nodes for which the <filter> function returned true
+ in a pre order fashion
+ """
+ def filter(node):
+ if node.id == 'child_2_2':
+ return 0
+ return 1
+ L = ['root', 'child_1_1', 'child_2_1', 'child_1_2', 'child_2_3']
+ l = [n.id for n in pre_order_list(self.o, filter)]
+ self.assertEqual(l, L, l)
+
+
+class PrefixedDepthFirstIterator_ClassTest(unittest.TestCase):
+ """"""
+ def setUp(self):
+ """ called before each test from this class """
+ self.o = make_tree(tree)
+
+ def test_known_values_next(self):
+ L = ['root', 'child_1_1', 'child_2_1', 'child_2_2', 'child_3_1', 'child_1_2', 'child_2_3']
+ iter = PrefixedDepthFirstIterator(self.o)
+ o = iter.next()
+ i = 0
+ while o:
+ self.assertEqual(o.id, L[i])
+ o = iter.next()
+ i += 1
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittest_ureports_html.py b/test/unittest_ureports_html.py
new file mode 100644
index 0000000..024750b
--- /dev/null
+++ b/test/unittest_ureports_html.py
@@ -0,0 +1,50 @@
+'''unit tests for ureports.html_writer
+'''
+
+__revision__ = "$Id: unittest_ureports_html.py,v 1.3 2005-05-27 12:27:08 syt Exp $"
+
+import unittest
+
+from utils import WriterTC
+from logilab.common.testlib import TestCase
+
+from logilab.common.ureports.html_writer import *
+
+class HTMLWriterTC(TestCase, WriterTC):
+
+ def setUp(self):
+ self.writer = HTMLWriter(1)
+
+ # Section tests ###########################################################
+ section_base = '''<div>
+<h1>Section title</h1>
+<p>Section\'s description.
+Blabla bla</p></div>
+'''
+ section_nested = '''<div>\n<h1>Section title</h1>\n<p>Section\'s description.\nBlabla bla</p><div>\n<h2>Subsection</h2>\n<p>Sub section description</p></div>\n</div>\n'''
+
+ # List tests ##############################################################
+ list_base = '''<ul>\n<li>item1</li>\n<li>item2</li>\n<li>item3</li>\n<li>item4</li>\n</ul>\n'''
+
+ nested_list = '''<ul>
+<li><p>blabla<ul>
+<li>1</li>
+<li>2</li>
+<li>3</li>
+</ul>
+</p></li>
+<li>an other point</li>
+</ul>
+'''
+
+ # Table tests #############################################################
+ table_base = '''<table>\n<tr class="odd">\n<td>head1</td>\n<td>head2</td>\n</tr>\n<tr class="even">\n<td>cell1</td>\n<td>cell2</td>\n</tr>\n</table>\n'''
+ field_table = '''<table class="field" id="mytable">\n<tr class="odd">\n<td>f1</td>\n<td>v1</td>\n</tr>\n<tr class="even">\n<td>f22</td>\n<td>v22</td>\n</tr>\n<tr class="odd">\n<td>f333</td>\n<td>v333</td>\n</tr>\n</table>\n'''
+ advanced_table = '''<table class="whatever" id="mytable">\n<tr class="header">\n<th>field</th>\n<th>value</th>\n</tr>\n<tr class="even">\n<td>f1</td>\n<td>v1</td>\n</tr>\n<tr class="odd">\n<td>f22</td>\n<td>v22</td>\n</tr>\n<tr class="even">\n<td>f333</td>\n<td>v333</td>\n</tr>\n<tr class="odd">\n<td> <a href="http://www.perdu.com">toi perdu ?</a></td>\n<td>&nbsp;</td>\n</tr>\n</table>\n'''
+
+
+ # VerbatimText tests ######################################################
+ verbatim_base = '''<pre>blablabla</pre>'''
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unittest_ureports_text.py b/test/unittest_ureports_text.py
new file mode 100644
index 0000000..7d2ceb1
--- /dev/null
+++ b/test/unittest_ureports_text.py
@@ -0,0 +1,90 @@
+'''unit tests for ureports.text_writer
+'''
+
+__revision__ = "$Id: unittest_ureports_text.py,v 1.4 2005-05-27 12:27:08 syt Exp $"
+
+import unittest
+from utils import WriterTC
+from logilab.common.testlib import TestCase
+
+from logilab.common.ureports.text_writer import TextWriter
+
+class TextWriterTC(TestCase, WriterTC):
+ def setUp(self):
+ self.writer = TextWriter()
+
+ # Section tests ###########################################################
+ section_base = '''
+Section title
+=============
+Section\'s description.
+Blabla bla
+
+'''
+ section_nested = '''
+Section title
+=============
+Section\'s description.
+Blabla bla
+
+Subsection
+----------
+Sub section description
+
+
+'''
+
+ # List tests ##############################################################
+ list_base = '''
+* item1
+* item2
+* item3
+* item4'''
+
+ nested_list = '''
+* blabla
+ - 1
+ - 2
+ - 3
+
+* an other point'''
+
+ # Table tests #############################################################
+ table_base = '''
++------+------+
+|head1 |head2 |
++------+------+
+|cell1 |cell2 |
++------+------+
+
+'''
+ field_table = '''
+f1 : v1
+f22 : v22
+f333: v333
+'''
+ advanced_table = '''
++---------------+------+
+|field |value |
++===============+======+
+|f1 |v1 |
++---------------+------+
+|f22 |v22 |
++---------------+------+
+|f333 |v333 |
++---------------+------+
+|`toi perdu ?`_ | |
++---------------+------+
+
+'''
+
+
+ # VerbatimText tests ######################################################
+ verbatim_base = '''::
+
+ blablabla
+
+'''
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/utils.py b/test/utils.py
new file mode 100644
index 0000000..b654b2f
--- /dev/null
+++ b/test/utils.py
@@ -0,0 +1,70 @@
+'''unit tests utilities for ureports
+'''
+
+__revision__ = "$Id: utils.py,v 1.3 2005-05-27 12:27:08 syt Exp $"
+
+from cStringIO import StringIO
+from logilab.common.ureports.nodes import *
+
+class WriterTC:
+ def _test_output(self, test_id, layout, msg=None):
+ buffer = StringIO()
+ self.writer.format(layout, buffer)
+ got = buffer.getvalue()
+ expected = getattr(self, test_id)
+ try:
+ self.assertLinesEquals(got, expected)
+ except:
+ print '**** got for %s' % test_id
+ print got
+ print '**** while expected'
+ print expected
+ print '****'
+ raise
+
+ def test_section(self):
+ layout = Section('Section title',
+ 'Section\'s description.\nBlabla bla')
+ self._test_output('section_base', layout)
+ layout.append(Section('Subsection', 'Sub section description'))
+ self._test_output('section_nested', layout)
+
+ def test_verbatim(self):
+ layout = VerbatimText('blablabla')
+ self._test_output('verbatim_base', layout)
+
+
+ def test_list(self):
+ layout = List(children=('item1', 'item2', 'item3', 'item4'))
+ self._test_output('list_base', layout)
+
+ def test_nested_list(self):
+ layout = List(children=(Paragraph(("blabla", List(children=('1', "2", "3")))),
+ "an other point"))
+ self._test_output('nested_list', layout)
+
+
+ def test_table(self):
+ layout = Table(cols=2, children=('head1', 'head2', 'cell1', 'cell2'))
+ self._test_output('table_base', layout)
+
+ def test_field_table(self):
+ table = Table(cols=2, klass='field', id='mytable')
+ for field, value in (('f1', 'v1'), ('f22', 'v22'), ('f333', 'v333')):
+ table.append(Text(field))
+ table.append(Text(value))
+ self._test_output('field_table', table)
+
+ def test_advanced_table(self):
+ table = Table(cols=2, klass='whatever', id='mytable', rheaders=1)
+ for field, value in (('field', 'value') ,('f1', 'v1'), ('f22', 'v22'), ('f333', 'v333')):
+ table.append(Text(field))
+ table.append(Text(value))
+ table.append(Link('http://www.perdu.com', 'toi perdu ?'))
+ table.append(Text(''))
+ self._test_output('advanced_table', table)
+
+
+## def test_image(self):
+## layout = Verbatim('blablabla')
+## self._test_output('verbatim_base', layout)
diff --git a/testlib.py b/testlib.py
new file mode 100644
index 0000000..dec6bd8
--- /dev/null
+++ b/testlib.py
@@ -0,0 +1,608 @@
+
+# modified copy of some functions from test/regrtest.py from PyXml
+
+""" Copyright (c) 2003-2005 LOGILAB S.A. (Paris, FRANCE).
+http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+Run tests.
+
+This will find all modules whose name match a given prefix in the test
+directory, and run them. Various command line options provide
+additional facilities.
+
+Command line options:
+
+-v: verbose -- run tests in verbose mode with output to stdout
+-q: quiet -- don't print anything except if a test fails
+-t: testdir -- directory where the tests will be found
+-x: exclude -- add a test to exclude
+-p: profile -- profiled execution
+
+If no non-option arguments are present, prefixes used are 'test',
+'regrtest', 'smoketest' and 'unittest'.
+
+"""
+from __future__ import nested_scopes
+
+__revision__ = "$Id: testlib.py,v 1.47 2006-04-19 10:26:15 adim Exp $"
+
+import sys
+import os
+import getopt
+import traceback
+import unittest
+import difflib
+from warnings import warn
+
+try:
+ from test import test_support
+except ImportError:
+ # not always available
+ class TestSupport:
+ def unload(self, test):
+ pass
+ test_support = TestSupport()
+
+from logilab.common import class_renamed, deprecated_function
+from logilab.common.compat import set, enumerate
+from logilab.common.modutils import load_module_from_name
+
+__all__ = ['main', 'unittest_main', 'find_tests', 'run_test', 'spawn']
+
+DEFAULT_PREFIXES = ('test', 'regrtest', 'smoketest', 'unittest',
+ 'func', 'validation')
+
+def main(testdir=os.getcwd()):
+ """Execute a test suite.
+
+ This also parses command-line options and modifies its behaviour
+ accordingly.
+
+ tests -- a list of strings containing test names (optional)
+ testdir -- the directory in which to look for tests (optional)
+
+ Users other than the Python test suite will certainly want to
+ specify testdir; if it's omitted, the directory containing the
+ Python test suite is searched for.
+
+ If the tests argument is omitted, the tests listed on the
+ command-line will be used. If that's empty, too, then all *.py
+ files beginning with test_ will be used.
+
+ """
+
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'vqx:t:p')
+ except getopt.error, msg:
+ print msg
+ print __doc__
+ return 2
+ verbose = 0
+ quiet = 0
+ profile = 0
+ exclude = []
+ for o, a in opts:
+ if o == '-v':
+ verbose = verbose+1
+ elif o == '-q':
+ quiet = 1;
+ verbose = 0
+ elif o == '-x':
+ exclude.append(a)
+ elif o == '-t':
+ testdir = a
+ elif o == '-p':
+ profile = 1
+ elif o == '-h':
+ print __doc__
+ sys.exit(0)
+
+ for i in range(len(args)):
+ # Strip trailing ".py" from arguments
+ if args[i][-3:] == '.py':
+ args[i] = args[i][:-3]
+ if exclude:
+ for i in range(len(exclude)):
+ # Strip trailing ".py" from arguments
+ if exclude[i][-3:] == '.py':
+ exclude[i] = exclude[i][:-3]
+ tests = find_tests(testdir, args or DEFAULT_PREFIXES, excludes=exclude)
+ sys.path.insert(0, testdir)
+ # Tell tests to be moderately quiet
+ test_support.verbose = verbose
+ if profile:
+ print >> sys.stderr, '** profiled run'
+ from hotshot import Profile
+ prof = Profile('stones.prof')
+ good, bad, skipped, all_result = prof.runcall(run_tests, tests, quiet,
+ verbose)
+ prof.close()
+ else:
+ good, bad, skipped, all_result = run_tests(tests, quiet, verbose)
+ if not quiet:
+ print '*'*80
+ if all_result:
+ print 'Ran %s test cases' % all_result.testsRun,
+ if all_result.errors:
+ print ', %s errors' % len(all_result.errors),
+ if all_result.failures:
+ print ', %s failed' % len(all_result.failures),
+ if all_result.skipped:
+ print ', %s skipped' % len(all_result.skipped),
+ print
+ if good:
+ if not bad and not skipped and len(good) > 1:
+ print "All",
+ print _count(len(good), "test"), "OK."
+ if bad:
+ print _count(len(bad), "test"), "failed:",
+ print ', '.join(bad)
+ if skipped:
+ print _count(len(skipped), "test"), "skipped:",
+ print ', '.join(['%s (%s)' % (test, msg) for test, msg in skipped])
+ if profile:
+ from hotshot import stats
+ stats = stats.load('stones.prof')
+ stats.sort_stats('time', 'calls')
+ stats.print_stats(30)
+ sys.exit(len(bad) + len(skipped))
+
+def run_tests(tests, quiet, verbose, runner=None):
+ """ execute a list of tests
+ return a 3-uple with :
+ _ the list of passed tests
+ _ the list of failed tests
+ _ the list of skipped tests
+ """
+ good = []
+ bad = []
+ skipped = []
+ all_result = None
+ for test in tests:
+ if not quiet:
+ print
+ print '-'*80
+ print "Executing", test
+ result = run_test(test, verbose, runner)
+ if type(result) is type(''):
+ # an unexpected error occured
+ skipped.append( (test, result))
+ else:
+ if all_result is None:
+ all_result = result
+ else:
+ all_result.testsRun += result.testsRun
+ all_result.failures += result.failures
+ all_result.errors += result.errors
+ all_result.skipped += result.skipped
+ if result.errors or result.failures:
+ bad.append(test)
+ if verbose:
+ print "test", test, \
+ "failed -- %s errors, %s failures" % (
+ len(result.errors), len(result.failures))
+ else:
+ good.append(test)
+
+ return good, bad, skipped, all_result
+
+def find_tests(testdir,
+ prefixes=DEFAULT_PREFIXES, suffix=".py",
+ excludes=(),
+ remove_suffix=1):
+ """
+ Return a list of all applicable test modules.
+ """
+ tests = []
+ for name in os.listdir(testdir):
+ if not suffix or name[-len(suffix):] == suffix:
+ for prefix in prefixes:
+ if name[:len(prefix)] == prefix:
+ if remove_suffix:
+ name = name[:-len(suffix)]
+ if name not in excludes:
+ tests.append(name)
+ tests.sort()
+ return tests
+
+
+def run_test(test, verbose, runner=None):
+ """
+ Run a single test.
+
+ test -- the name of the test
+ verbose -- if true, print more messages
+ """
+ test_support.unload(test)
+ try:
+ m = load_module_from_name(test, path=sys.path)
+# m = __import__(test, globals(), locals(), sys.path)
+ try:
+ suite = m.suite
+ if hasattr(suite, 'func_code'):
+ suite = suite()
+ except AttributeError:
+ loader = unittest.TestLoader()
+ suite = loader.loadTestsFromModule(m)
+ if runner is None:
+ runner = SkipAwareTextTestRunner()
+ return runner.run(suite)
+ except KeyboardInterrupt, v:
+ raise KeyboardInterrupt, v, sys.exc_info()[2]
+ except:
+ type, value = sys.exc_info()[:2]
+ msg = "test %s crashed -- %s : %s" % (test, type, value)
+ if verbose:
+ traceback.print_exc()
+ return msg
+
+def _count(n, word):
+ """format word according to n"""
+ if n == 1:
+ return "%d %s" % (n, word)
+ else:
+ return "%d %ss" % (n, word)
+
+
+## PostMortem Debug facilities #####
+from pdb import Pdb
+class Debugger(Pdb):
+ def __init__(self, tcbk):
+ Pdb.__init__(self)
+ self.reset()
+ while tcbk.tb_next is not None:
+ tcbk = tcbk.tb_next
+ self._tcbk = tcbk
+
+ def start(self):
+ self.interaction(self._tcbk.tb_frame, self._tcbk)
+
+def start_interactive_mode(debuggers, descrs):
+ """starts an interactive shell so that the user can inspect errors
+ """
+ while True:
+ print "Choose a test to debug:"
+ print "\n".join(['\t%s : %s' % (i, descr) for i, descr in enumerate(descrs)])
+ print "Type 'exit' (or ^D) to quit"
+ print
+ try:
+ todebug = raw_input('Enter a test name: ')
+ if todebug.strip().lower() == 'exit':
+ print
+ break
+ else:
+ try:
+ testindex = int(todebug)
+ debugger = debuggers[testindex]
+ except (ValueError, IndexError):
+ print "ERROR: invalid test number %r" % (todebug,)
+ else:
+ debugger.start()
+ except (EOFError, KeyboardInterrupt):
+ print
+ break
+
+
+# test utils ##################################################################
+from cStringIO import StringIO
+
+class SkipAwareTestResult(unittest._TextTestResult):
+
+ def __init__(self, stream, descriptions, verbosity):
+ unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
+ self.skipped = []
+ self.debuggers = []
+ self.descrs = []
+
+ def _create_pdb(self, test_descr):
+ self.debuggers.append(Debugger(sys.exc_info()[2]))
+ self.descrs.append(test_descr)
+
+ def addError(self, test, err):
+ exc_type, exc, tcbk = err
+ # hack to avoid overriding the whole __call__ machinery in TestCase
+ if exc_type == TestSkipped:
+ self.addSkipped(test, exc)
+ else:
+ unittest._TextTestResult.addError(self, test, err)
+ self._create_pdb(self.getDescription(test))
+
+ def addFailure(self, test, err):
+ unittest._TextTestResult.addFailure(self, test, err)
+ self._create_pdb(self.getDescription(test))
+
+ def addSkipped(self, test, reason):
+ self.skipped.append((test, reason))
+ if self.showAll:
+ self.stream.writeln("SKIPPED")
+ elif self.dots:
+ self.stream.write('S')
+
+ def printErrors(self):
+ unittest._TextTestResult.printErrors(self)
+ self.printSkippedList()
+
+ def printSkippedList(self):
+ for test, err in self.skipped:
+ self.stream.writeln(self.separator1)
+ self.stream.writeln("%s: %s" % ('SKIPPED', self.getDescription(test)))
+ self.stream.writeln("\t%s" % err)
+
+
+class SkipAwareTextTestRunner(unittest.TextTestRunner):
+
+ def _makeResult(self):
+ return SkipAwareTestResult(self.stream, self.descriptions, self.verbosity)
+
+
+class SkipAwareTestProgram(unittest.TestProgram):
+ # XXX: don't try to stay close to unittest.py, use optparse
+ USAGE = """\
+Usage: %(progName)s [options] [test] [...]
+
+Options:
+ -h, --help Show this message
+ -v, --verbose Verbose output
+ -i, --pdb Enable test failure inspection
+ -q, --quiet Minimal output
+
+Examples:
+ %(progName)s - run default set of tests
+ %(progName)s MyTestSuite - run suite 'MyTestSuite'
+ %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
+ %(progName)s MyTestCase - run all 'test*' test methods
+ in MyTestCase
+"""
+ def parseArgs(self, argv):
+ self.pdbmode = False
+ import getopt
+ try:
+ options, args = getopt.getopt(argv[1:], 'hHviq',
+ ['help','verbose','quiet', 'pdb'])
+ for opt, value in options:
+ if opt in ('-h','-H','--help'):
+ self.usageExit()
+ if opt in ('-i', '--pdb'):
+ self.pdbmode = True
+ if opt in ('-q','--quiet'):
+ self.verbosity = 0
+ if opt in ('-v','--verbose'):
+ self.verbosity = 2
+ if len(args) == 0 and self.defaultTest is None:
+ self.test = self.testLoader.loadTestsFromModule(self.module)
+ return
+ if len(args) > 0:
+ self.testNames = args
+ else:
+ self.testNames = (self.defaultTest,)
+ self.createTests()
+ except getopt.error, msg:
+ self.usageExit(msg)
+
+
+ def runTests(self):
+ self.testRunner = SkipAwareTextTestRunner(verbosity=self.verbosity)
+ result = self.testRunner.run(self.test)
+ if os.environ.get('PYDEBUG'):
+ warn("PYDEBUG usage is deprecated, use -i / --pdb instead", DeprecationWarning)
+ self.pdbmode = True
+ if result.debuggers and self.pdbmode:
+ start_interactive_mode(result.debuggers, result.descrs)
+ sys.exit(not result.wasSuccessful())
+
+def unittest_main():
+ """use this functon if you want to have the same functionality as unittest.main"""
+ SkipAwareTestProgram()
+
+class TestSkipped(Exception):
+ """raised when a test is skipped"""
+
+class TestCase(unittest.TestCase):
+ """unittest.TestCase with some additional methods"""
+
+
+ def defaultTestResult(self):
+ return SkipAwareTestResult()
+
+ def skip(self, msg=None):
+ msg = msg or 'test was skipped'
+ # warn(msg, stacklevel=2)
+ raise TestSkipped(msg)
+ skipped_test = deprecated_function(skip)
+
+ def assertDictEquals(self, d1, d2):
+ """compares two dicts
+
+ If the two dict differ, the first difference is shown in the error
+ message
+ """
+ d1 = d1.copy()
+ for key, value in d2.items():
+ try:
+ if d1[key] != value:
+ self.fail('%r != %r for key %r' % (d1[key], value, key))
+ del d1[key]
+ except KeyError:
+ self.fail('missing %r key' % key)
+ if d1:
+ self.fail('d2 is lacking %r' % d1)
+ assertDictEqual = assertDictEquals
+
+ def assertSetEquals(self, got, expected):
+ """compares two iterables and shows difference between both"""
+ got, expected = list(got), list(expected)
+ self.assertEquals(len(got), len(expected))
+ got, expected = set(got), set(expected)
+ if got != expected:
+ missing = expected - got
+ unexpected = got - expected
+ self.fail('\tunexepected: %s\n\tmissing: %s' % (unexpected,
+ missing))
+ assertSetEqual = assertSetEquals
+
+ def assertListEquals(self, l1, l2):
+ """compares two lists
+
+ If the two list differ, the first difference is shown in the error
+ message
+ """
+ l1 = l1[:]
+ for i, value in enumerate(l2):
+ try:
+ if l1[0] != value:
+ self.fail('%r != %r for index %d' % (l1[0], value, i))
+ del l1[0]
+ except IndexError:
+ msg = 'l1 has only %d elements, not %s (at least %r missing)'
+ self.fail(msg % (i, len(l2), value))
+ if l1:
+ self.fail('l2 is lacking %r' % l1)
+ assertListEqual = assertListEquals
+
+ def assertLinesEquals(self, l1, l2):
+ """assert list of lines are equal"""
+ self.assertListEquals(l1.splitlines(), l2.splitlines())
+ assertLineEqual = assertLinesEquals
+
+ def assertXMLWellFormed(self, stream):
+ """asserts the XML stream is well-formed (no DTD conformance check)"""
+ from xml.sax import make_parser, SAXParseException
+ parser = make_parser()
+ try:
+ parser.parse(stream)
+ except SAXParseException:
+ self.fail('XML stream not well formed')
+ assertXMLValid = deprecated_function(assertXMLWellFormed,
+ 'assertXMLValid renamed to more precise assertXMLWellFormed')
+
+ def assertXMLStringWellFormed(self, xml_string):
+ """asserts the XML string is well-formed (no DTD conformance check)"""
+ stream = StringIO(xml_string)
+ self.assertXMLWellFormed(stream)
+
+ assertXMLStringValid = deprecated_function(
+ assertXMLStringWellFormed, 'assertXMLStringValid renamed to more precise assertXMLStringWellFormed')
+
+
+ def _difftext(self, lines1, lines2, junk=None):
+ junk = junk or (' ', '\t')
+ # result is a generator
+ result = difflib.ndiff(lines1, lines2, charjunk=lambda x: x in junk)
+ read = []
+ for line in result:
+ read.append(line)
+ # lines that don't start with a ' ' are diff ones
+ if not line.startswith(' '):
+ self.fail(''.join(read + list(result)))
+
+ def assertTextEquals(self, text1, text2, junk=None):
+ """compare two multiline strings (using difflib and splitlines())"""
+ self._difftext(text1.splitlines(True), text2.splitlines(True), junk)
+ assertTextEqual = assertTextEquals
+
+ def assertStreamEqual(self, stream1, stream2, junk=None):
+ """compare two streams (using difflib and readlines())"""
+ # if stream2 is stream2, readlines() on stream1 will also read lines
+ # in stream2, so they'll appear different, although they're not
+ if stream1 is stream2:
+ return
+ # make sure we compare from the beginning of the stream
+ stream1.seek(0)
+ stream2.seek(0)
+ # ocmpare
+ self._difftext(stream1.readlines(), stream2.readlines(), junk)
+
+ def assertFileEqual(self, fname1, fname2, junk=(' ', '\t')):
+ """compares two files using difflib"""
+ self.assertStreamEqual(file(fname1), file(fname2), junk)
+
+
+import doctest
+
+class SkippedSuite(unittest.TestSuite):
+ def test(self):
+ """just there to trigger test execution"""
+ print 'goiooo'
+ self.skipped_test('doctest module has no DocTestSuite class')
+
+class DocTest(TestCase):
+ """trigger module doctest
+ I don't know how to make unittest.main consider the DocTestSuite instance
+ without this hack
+ """
+ def __call__(self, result=None):
+ try:
+ suite = doctest.DocTestSuite(self.module)
+ except AttributeError:
+ suite = SkippedSuite()
+ return suite.run(result)
+ run = __call__
+
+ def test(self):
+ """just there to trigger test execution"""
+
+MAILBOX = None
+
+class MockSMTP:
+ """fake smtplib.SMTP"""
+
+ def __init__(self, host, port):
+ self.host = host
+ self.port = port
+ global MAILBOX
+ self.reveived = MAILBOX = []
+
+ def set_debuglevel(self, debuglevel):
+ """ignore debug level"""
+
+ def sendmail(self, fromaddr, toaddres, body):
+ """push sent mail in the mailbox"""
+ self.reveived.append((fromaddr, toaddres, body))
+
+ def quit(self):
+ """ignore quit"""
+
+
+class MockConfigParser:
+ """fake ConfigParser.ConfigParser"""
+
+ def __init__(self, options):
+ self.options = options
+
+ def get(self, section, option):
+ """return option in section"""
+ return self.options[section][option]
+
+ def has_option(self, section, option):
+ """ask if option exists in section"""
+ try:
+ return self.get(section, option) or 1
+ except KeyError:
+ return 0
+
+
+class MockConnection:
+ """fake DB-API 2.0 connexion AND cursor (i.e. cursor() return self)"""
+
+ def __init__(self, results):
+ self.received = []
+ self.states = []
+ self.results = results
+
+ def cursor(self):
+ return self
+ def execute(self, query, args=None):
+ self.received.append( (query, args) )
+ def fetchone(self):
+ return self.results[0]
+ def fetchall(self):
+ return self.results
+ def commit(self):
+ self.states.append( ('commit', len(self.received)) )
+ def rollback(self):
+ self.states.append( ('rollback', len(self.received)) )
+ def close(self):
+ pass
+
+MockConnexion = class_renamed('MockConnexion', MockConnection)
+
diff --git a/textutils.py b/textutils.py
new file mode 100644
index 0000000..7767c11
--- /dev/null
+++ b/textutils.py
@@ -0,0 +1,321 @@
+# Copyright (c) 2003-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""Some text manipulation utility functions.
+
+:version: $Revision: 1.25 $
+:author: Logilab
+:copyright: 2003-2005 LOGILAB S.A. (Paris, FRANCE)
+:contact: http://www.logilab.fr/ -- mailto:python-projects@logilab.org
+
+:group text formatting: normalize_text, normalize_paragraph, pretty_match,\
+unquote, colorize_ansi
+:group text manipulation: searchall, get_csv
+:sort: text formatting, text manipulation
+
+
+
+:type ANSI_STYLES: dict(str)
+:var ANSI_STYLES: dictionary mapping style identifier to ANSI terminal code
+
+:type ANSI_COLORS: dict(str)
+:var ANSI_COLORS: dictionary mapping color identifier to ANSI terminal code
+
+:type ANSI_PREFIX: str
+:var ANSI_PREFIX:
+ ANSI terminal code notifing the start of an ANSI escape sequence
+
+:type ANSI_END: str
+:var ANSI_END:
+ ANSI terminal code notifing the end of an ANSI escape sequence
+
+:type ANSI_RESET: str
+:var ANSI_RESET:
+ ANSI terminal code reseting format defined by a previous ANSI escape sequence
+"""
+
+__revision__ = "$Id: textutils.py,v 1.25 2005-09-06 08:51:01 alf Exp $"
+__docformat__ = "restructuredtext en"
+
+import re
+from os import linesep
+from warnings import warn
+
+
+def searchall(rgx, data):
+ """apply a regexp using "search" until no more match is found
+
+ This function is deprecated, use re.finditer() instead.
+ """
+ warn('logilab.common.textutils.searchall() is deprecated, use '
+ 're.finditer() instead', DeprecationWarning)
+ result = []
+ match = rgx.search(data)
+ while match is not None:
+ result.append(match)
+ match = rgx.search(data, match.end())
+ return result
+
+
+def unquote(string):
+ """remove optional quotes (simple or double) from the string
+
+ :type string: str or unicode
+ :param string: an optionaly quoted string
+
+ :rtype: str or unicode
+ :return: the unquoted string (or the input string if it wasn't quoted)
+ """
+ if not string:
+ return string
+ if string[0] in '"\'':
+ string = string[1:]
+ if string[-1] in '"\'':
+ string = string[:-1]
+ return string
+
+
+_BLANKLINES_RGX = re.compile('\r?\n\r?\n')
+_NORM_SPACES_RGX = re.compile('\s+')
+
+def normalize_text(text, line_len=80, indent=''):
+ """normalize a text to display it with a maximum line size and
+ optionally arbitrary indentation. Line jumps are normalized but blank
+ lines are kept. The indentation string may be used to insert a
+ comment (#) or a quoting (>) mark for instance.
+
+ :type text: str or unicode
+ :param text: the input text to normalize
+
+ :type line_len: int
+ :param line_len: expected maximum line's length, default to 80
+
+ :type indent: str or unicode
+ :param indent: optional string to use as indentation
+
+ :rtype: str or unicode
+ :return:
+ the input text normalized to fit on lines with a maximized size
+ inferior to `line_len`, and optionally prefixed by an
+ indentation string
+ """
+ result = []
+ for text in _BLANKLINES_RGX.split(text):
+ result.append(normalize_paragraph(text, line_len, indent))
+## return ('%s%s%s' % (linesep, indent, linesep)).join(result)
+ return ('%s%s' % (linesep, linesep)).join(result)
+
+def normalize_paragraph(text, line_len=80, indent=''):
+ """normalize a text to display it with a maximum line size and
+ optionaly arbitrary indentation. Line jumps are normalized. The
+ indentation string may be used top insert a comment mark for
+ instance.
+
+
+ :type text: str or unicode
+ :param text: the input text to normalize
+
+ :type line_len: int
+ :param line_len: expected maximum line's length, default to 80
+
+ :type indent: str or unicode
+ :param indent: optional string to use as indentation
+
+ :rtype: str or unicode
+ :return:
+ the input text normalized to fit on lines with a maximized size
+ inferior to `line_len`, and optionally prefixed by an
+ indentation string
+ """
+ #text = text.replace(linesep, ' ')
+ text = _NORM_SPACES_RGX.sub(' ', text)
+ lines = []
+ while text:
+ text = text.strip()
+ pos = min(len(indent) + len(text), line_len)
+ if pos == line_len and len(text) > line_len:
+ pos = pos - len(indent)
+ while pos > 0 and text[pos] != ' ':
+ pos -= 1
+ if pos == 0:
+ pos = min(len(indent) + len(text), line_len)
+ pos = pos - len(indent)
+ while text[pos] != ' ':
+ pos += 1
+ lines.append((indent + text[:pos]))
+ text = text[pos+1:]
+ return linesep.join(lines)
+
+
+def get_csv(string, sep=','):
+ """return a list of string in from a csv formatted line
+
+ >>> get_csv('a, b, c , 4')
+ ['a', 'b', 'c', '4']
+ >>> get_csv('a')
+ ['a']
+ >>>
+
+ :type string: str or unicode
+ :param string: a csv line
+
+ :type sep: str or unicode
+ :param sep: field separator, default to the comma (',')
+
+ :rtype: str or unicode
+ :return: the unquoted string (or the input string if it wasn't quoted)
+ """
+ return [word.strip() for word in string.split(sep) if word.strip()]
+
+
+_LINE_RGX = re.compile('\r\n|\r+|\n')
+
+def pretty_match(match, string, underline_char='^'):
+ """return a string with the match location underlined:
+
+ >>> import re
+ >>> print pretty_match(re.search('mange', 'il mange du bacon'), 'il mange du bacon')
+ il mange du bacon
+ ^^^^^
+ >>>
+
+ :type match: _sre.SRE_match
+ :param match: object returned by re.match, re.search or re.finditer
+
+ :type string: str or unicode
+ :param string:
+ the string on which the regular expression has been applied to
+ obtain the `match` object
+
+ :type underline_char: str or unicode
+ :param underline_char:
+ character to use to underline the matched section, default to the
+ carret '^'
+
+ :rtype: str or unicode
+ :return:
+ the original string with an inserted line to underline the match
+ location
+ """
+ start = match.start()
+ end = match.end()
+ string = _LINE_RGX.sub(linesep, string)
+ start_line_pos = string.rfind(linesep, 0, start)
+ if start_line_pos == -1:
+ start_line_pos = 0
+ result = []
+ else:
+ result = [string[:start_line_pos]]
+ start_line_pos += len(linesep)
+ offset = start - start_line_pos
+ underline = ' ' * offset + underline_char * (end - start)
+ end_line_pos = string.find(linesep, end)
+ if end_line_pos == -1:
+ string = string[start_line_pos:]
+ result.append(string)
+ result.append(underline)
+ else:
+ end = string[end_line_pos + len(linesep):]
+ string = string[start_line_pos:end_line_pos]
+ result.append(string)
+ result.append(underline)
+ result.append(end)
+ return linesep.join(result).rstrip()
+
+
+# Ansi colorization ###########################################################
+
+ANSI_PREFIX = '\033['
+ANSI_END = 'm'
+ANSI_RESET = '\033[0m'
+ANSI_STYLES = {
+ 'reset' : "0",
+ 'bold' : "1",
+ 'italic' : "3",
+ 'underline' : "4",
+ 'blink' : "5",
+ 'inverse' : "7",
+ 'strike' : "9",
+}
+ANSI_COLORS = {
+ 'reset' : "0",
+ 'black' : "30",
+ 'red' : "31",
+ 'green' : "32",
+ 'yellow' : "33",
+ 'blue' : "34",
+ 'magenta' : "35",
+ 'cyan' : "36",
+ 'white' : "37",
+}
+
+
+def _get_ansi_code(color=None, style=None):
+ """return ansi escape code corresponding to color and style
+
+ :type color: str or None
+ :param color:
+ the color identifier (see `ANSI_COLORS` for available values)
+
+ :type style: str or None
+ :param style:
+ style string (see `ANSI_COLORS` for available values). To get
+ several style effects at the same time, use a coma as separator.
+
+ :raise KeyError: if an unexistant color or style identifier is given
+
+ :rtype: str
+ :return: the built escape code
+ """
+ ansi_code = []
+ if style:
+ style_attrs = get_csv(style)
+ for effect in style_attrs:
+ ansi_code.append(ANSI_STYLES[effect])
+ if color:
+ ansi_code.append(ANSI_COLORS[color])
+ if ansi_code:
+ return ANSI_PREFIX + ';'.join(ansi_code) + ANSI_END
+ return ''
+
+def colorize_ansi(msg, color=None, style=None):
+ """colorize message by wrapping it with ansi escape codes
+
+ :type msg: str or unicode
+ :param msg: the message string to colorize
+
+ :type color: str or None
+ :param color:
+ the color identifier (see `ANSI_COLORS` for available values)
+
+ :type style: str or None
+ :param style:
+ style string (see `ANSI_COLORS` for available values). To get
+ several style effects at the same time, use a coma as separator.
+
+ :raise KeyError: if an unexistant color or style identifier is given
+
+ :rtype: str or unicode
+ :return: the ansi escaped string
+ """
+ # If both color and style are not defined, then leave the text as is
+ if color is None and style is None:
+ return msg
+ escape_code = _get_ansi_code(color, style)
+ # If invalid (or unknown) color, don't wrap msg with ansi codes
+ if escape_code:
+ return '%s%s%s' % (escape_code, msg, ANSI_RESET)
+ return msg
+
diff --git a/tree.py b/tree.py
new file mode 100644
index 0000000..00be29a
--- /dev/null
+++ b/tree.py
@@ -0,0 +1,350 @@
+# Copyright (c) 2000-2003 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""
+ base class to represent tree structure
+"""
+
+__revision__ = "$Id: tree.py,v 1.24 2006-02-24 10:40:21 nico Exp $"
+
+import sys
+from warnings import warn
+
+from logilab.common import deprecated_function
+from logilab.common.visitor import VisitedMixIn, FilteredIterator, no_filter
+
+## Exceptions #################################################################
+
+class NodeNotFound(Exception):
+ """raised when a node has not been found"""
+
+EX_SIBLING_NOT_FOUND = "No such sibling as '%s'"
+EX_CHILD_NOT_FOUND = "No such child as '%s'"
+EX_NODE_NOT_FOUND = "No such node as '%s'"
+
+
+# Base node ###################################################################
+
+class Node :
+ """a basic tree node, caracterised by an id"""
+
+ def __init__(self, nid=None) :
+ self.id = nid
+ # navigation
+ self.parent = None
+ self.children = []
+
+ def __str__(self, indent=0):
+ s = ['%s%s %s' % (' '*indent, self.__class__.__name__, self.id)]
+ indent += 2
+ for child in self.children:
+ try:
+ s.append(child.__str__(indent))
+ except TypeError:
+ s.append(child.__str__())
+ return '\n'.join(s)
+
+
+ def is_leaf(self):
+ return not self.children
+
+ def append(self, child):
+ """add a node to children"""
+ self.children.append(child)
+ child.parent = self
+
+ def remove(self, child):
+ """remove a child node"""
+ self.children.remove(child)
+ child.parent = None
+
+ def insert(self, index, child):
+ """insert a child node"""
+ self.children.insert(index, child)
+ child.parent = self
+
+ def replace(self, old_child, new_child):
+ """replace a child node with another"""
+ i = self.children.index(old_child)
+ self.children.pop(i)
+ self.children.insert(i, new_child)
+ new_child.parent = self
+
+ def get_sibling(self, nid):
+ """return the sibling node that has given id"""
+ try:
+ return self.parent.get_child_by_id(nid)
+ except NodeNotFound :
+ raise NodeNotFound(EX_SIBLING_NOT_FOUND % nid)
+
+ def next_sibling(self):
+ """
+ return the next sibling for this node if any
+ """
+ parent = self.parent
+ if parent is None:
+ # root node has no sibling
+ return None
+ index = parent.children.index(self)
+ try:
+ return parent.children[index+1]
+ except IndexError:
+ return None
+
+ def previous_sibling(self):
+ """
+ return the previous sibling for this node if any
+ """
+ parent = self.parent
+ if parent is None:
+ # root node has no sibling
+ return None
+ index = parent.children.index(self)
+ if index > 0:
+ return parent.children[index-1]
+ return None
+
+ def get_node_by_id(self, nid):
+ """
+ return node in whole hierarchy that has given id
+ """
+ root = self.root()
+ try:
+ return root.get_child_by_id(nid, 1)
+ except NodeNotFound :
+ raise NodeNotFound(EX_NODE_NOT_FOUND % nid)
+
+ def get_child_by_id(self, nid, recurse=None):
+ """
+ return child of given id
+ """
+ if self.id == nid:
+ return self
+ for c in self.children :
+ if recurse:
+ try:
+ return c.get_child_by_id(nid, 1)
+ except NodeNotFound :
+ continue
+ if c.id == nid :
+ return c
+ raise NodeNotFound(EX_CHILD_NOT_FOUND % nid)
+
+ def get_child_by_path(self, path):
+ """
+ return child of given path (path is a list of ids)
+ """
+ if len(path) > 0 and path[0] == self.id:
+ if len(path) == 1 :
+ return self
+ else :
+ for c in self.children :
+ try:
+ return c.get_child_by_path(path[1:])
+ except NodeNotFound :
+ pass
+ raise NodeNotFound(EX_CHILD_NOT_FOUND % path)
+
+ def depth(self):
+ """
+ return depth of this node in the tree
+ """
+ if self.parent is not None:
+ return 1 + self.parent.depth()
+ else :
+ return 0
+
+ def root(self):
+ """
+ return the root node of the tree
+ """
+ if self.parent is not None:
+ return self.parent.root()
+ return self
+
+ def leaves(self):
+ """
+ return a list with all the leaves nodes descendant from this node
+ """
+ leaves = []
+ if self.children:
+ for child in self.children:
+ leaves += child.leaves()
+ return leaves
+ else:
+ return [self]
+
+ leafs = deprecated_function(leaves) # backward compatibility
+
+ def flatten(self, _list=None):
+ """
+ return a list with all the nodes descendant from this node
+ """
+ if _list is None:
+ _list = []
+ _list.append(self)
+ for c in self.children:
+ c.flatten(_list)
+ return _list
+
+ def lineage(self):
+ """
+ return list of parents up to root node
+ """
+ lst = [self]
+ if self.parent is not None:
+ lst.extend(self.parent.lineage())
+ return lst
+
+class VNode(Node, VisitedMixIn):
+ """a visitable node
+ """
+ pass
+
+
+class BinaryNode(VNode):
+ """a binary node (ie only two children
+ """
+ def __init__(self, lhs=None, rhs=None) :
+ VNode.__init__(self)
+ if lhs is not None or rhs is not None:
+ assert lhs and rhs
+ self.append(lhs)
+ self.append(rhs)
+
+ def remove(self, child):
+ """remove the child and replace this node with the other child
+ """
+ self.children.remove(child)
+ self.parent.replace(self, self.children[0])
+
+ def get_parts(self):
+ """
+ return the left hand side and the right hand side of this node
+ """
+ return self.children[0], self.children[1]
+
+
+
+if sys.version_info[0:2] >= (2, 2):
+ list_class = list
+else:
+ from UserList import UserList
+ list_class = UserList
+
+class ListNode(VNode, list_class):
+ """Used to manipulate Nodes as Lists
+ """
+ def __init__(self):
+ list_class.__init__(self)
+ VNode.__init__(self)
+ self.children = self
+
+ def __str__(self, indent=0):
+ return '%s%s %s' % (indent*' ', self.__class__.__name__,
+ ', '.join([str(v) for v in self]))
+
+ def append(self, child):
+ """add a node to children"""
+ list_class.append(self, child)
+ child.parent = self
+
+ def insert(self, index, child):
+ """add a node to children"""
+ list_class.insert(self, index, child)
+ child.parent = self
+
+ def remove(self, child):
+ """add a node to children"""
+ list_class.remove(self, child)
+ child.parent = None
+
+ def pop(self, index):
+ """add a node to children"""
+ child = list_class.pop(self, index)
+ child.parent = None
+
+
+# construct list from tree ####################################################
+
+def post_order_list(node, filter_func=no_filter):
+ """
+ create a list with tree nodes for which the <filter> function returned true
+ in a post order foashion
+ """
+ l, stack = [], []
+ poped, index = 0, 0
+ while node:
+ if filter_func(node):
+ if node.children and not poped:
+ stack.append((node, index))
+ index = 0
+ node = node.children[0]
+ else:
+ l.append(node)
+ index += 1
+ try:
+ node = stack[-1][0].children[index]
+ except IndexError:
+ node = None
+ else:
+ node = None
+ poped = 0
+ if node is None and stack:
+ node, index = stack.pop()
+ poped = 1
+ return l
+
+def pre_order_list(node, filter_func=no_filter):
+ """
+ create a list with tree nodes for which the <filter> function returned true
+ in a pre order fashion
+ """
+ l, stack = [], []
+ poped, index = 0, 0
+ while node:
+ if filter_func(node):
+ if not poped:
+ l.append(node)
+ if node.children and not poped:
+ stack.append((node, index))
+ index = 0
+ node = node.children[0]
+ else:
+ index += 1
+ try:
+ node = stack[-1][0].children[index]
+ except IndexError:
+ node = None
+ else:
+ node = None
+ poped = 0
+ if node is None and len(stack) > 1:
+ node, index = stack.pop()
+ poped = 1
+ return l
+
+class PostfixedDepthFirstIterator(FilteredIterator):
+ """a postfixed depth first iterator, designed to be used with visitors
+ """
+ def __init__(self, node, filter_func=None):
+ FilteredIterator.__init__(self, node, post_order_list, filter_func)
+
+class PrefixedDepthFirstIterator(FilteredIterator):
+ """a pretfixed depth first iterator, designed to be used with visitors
+ """
+ def __init__(self, node, filter_func=None):
+ FilteredIterator.__init__(self, node, pre_order_list, filter_func)
+
diff --git a/twisted_distutils.py b/twisted_distutils.py
new file mode 100644
index 0000000..67dbd6b
--- /dev/null
+++ b/twisted_distutils.py
@@ -0,0 +1,209 @@
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+Distutils extensions for twisted framework.
+
+This module enables the installation of plugins.tml files using standard
+distutils syntax. It adds the following commands to the standard
+setup.py commands:
+* build_twisted_plugins: build (i.e. copy) plugins
+* install_twisted_plugins: install plugins
+
+Additionally, the following commands have been modified to deal with
+plugins files:
+ * sdist
+ * build
+ * install
+
+To use these extenstion, you should import the setup fonction from this
+module, and use it normally. To list the plugins.tml files, use the
+twisted_plugins keyword argument to the setup function:
+
+from twisted_distutils import setup # you can also import Extension if needed
+
+if __name__ == '__main__':
+ setup(name='my_twisted_app',
+ version='1.0',
+ author='me',
+ packages=['my_package'],
+ twisted_plugins = ['my_package/plugins.tml'])
+
+Note that you can use this to install files that are not twisted plugins in any
+package directory of your application.
+"""
+#
+# (c) 2002 Alexandre Fayolle <alexandre.fayolle@free.fr>
+# This module is heavily based on code copied from the python distutils
+# framework, especially distutils.command.build_script,
+# distutils.command.install_script. Many thanks to the authors of these
+# modules.
+# This module is provided as is, I'm not responsible if anything bad
+# happens to you or your python library while using this module. You may
+# freely copy it, distribute it and use it in your library or to distribute
+# your applications. I'd appreciate if you could drop me an email if you plan
+# to do so <wink>.
+#
+# Happy twisting!
+#
+__revision__ = "$Id: twisted_distutils.py,v 1.4 2003-09-12 11:54:48 syt Exp $"
+
+from distutils.core import Distribution, Command
+from distutils.command.install import install
+from distutils.command.build import build
+from distutils.command.sdist import sdist
+from distutils.dep_util import newer
+from distutils.util import convert_path
+import os
+
+class twisted_sdist(sdist):
+ def add_defaults(self):
+ sdist.add_defaults(self)
+ if self.distribution.has_twisted_plugins():
+ plugins = self.get_finalized_command('build_twisted_plugins')
+ self.filelist.extend(plugins.get_source_files())
+
+class twisted_install(install):
+ def initialize_options (self):
+ install.initialize_options(self)
+ self.twisted_plugins = None
+
+ def has_twisted_plugins(self):
+ return self.distribution.has_twisted_plugins()
+
+ sub_commands = []
+ sub_commands.extend(install.sub_commands)
+ sub_commands.append(('install_twisted_plugins', has_twisted_plugins))
+
+
+class twisted_build(build):
+ def initialize_options (self):
+ build.initialize_options(self)
+ self.twisted_plugins = None
+
+ def has_twisted_plugins(self):
+ return self.distribution.has_twisted_plugins()
+
+ sub_commands = []
+ sub_commands.extend(build.sub_commands)
+ sub_commands.append(('build_twisted_plugins', has_twisted_plugins))
+
+class build_twisted_plugins (Command):
+
+ description = "\"build\" twisted plugins (copy)"
+
+ user_options = [
+ ('build-dir=', 'd', "directory to \"build\" (copy) to"),
+ ('force', 'f', "forcibly build everything (ignore file timestamps"),
+ ]
+
+ boolean_options = ['force']
+
+
+ def initialize_options (self):
+ self.build_dir = None
+ self.twisted_plugins = None
+ self.force = None
+ self.outfiles = None
+
+ def get_source_files(self):
+ return self.twisted_plugins
+
+ def finalize_options (self):
+ self.set_undefined_options('build',
+ ('build_lib', 'build_dir'),
+ ('force', 'force'))
+ self.twisted_plugins = self.distribution.twisted_plugins
+
+
+ def run (self):
+ if not self.twisted_plugins:
+ return
+ self.copy_twisted_plugins()
+
+
+ def copy_twisted_plugins (self):
+ """Copy each plugin listed in 'self.twisted_plugins'.
+ """
+ self.mkpath(self.build_dir)
+ for plugin in self.twisted_plugins:
+ adjust = 0
+ plugin = convert_path(plugin)
+ outfile = os.path.join(self.build_dir, plugin)
+ if not self.force and not newer(plugin, outfile):
+ self.announce("not copying %s (up-to-date)" % plugin)
+ continue
+
+ # Always open the file, but ignore failures in dry-run mode --
+ # that way, we'll get accurate feedback if we can read the
+ # plugin.
+ try:
+ f = open(plugin, "r")
+ except IOError:
+ if not self.dry_run:
+ raise
+ f = None
+ else:
+ f.close()
+ self.copy_file(plugin, outfile)
+
+
+class install_twisted_plugins(Command):
+
+ description = "install twisted plugins"
+
+ user_options = [
+ ('install-dir=', 'd', "directory to install scripts to"),
+ ('build-dir=','b', "build directory (where to install from)"),
+ ('force', 'f', "force installation (overwrite existing files)"),
+ ('skip-build', None, "skip the build steps"),
+ ]
+
+ boolean_options = ['force', 'skip-build']
+
+
+ def initialize_options (self):
+ self.install_dir = None
+ self.force = 0
+ self.build_dir = None
+ self.skip_build = None
+
+ def finalize_options (self):
+ self.set_undefined_options('build', ('build_lib', 'build_dir'))
+ self.set_undefined_options('install',
+ ('install_lib', 'install_dir'),
+ ('force', 'force'),
+ ('skip_build', 'skip_build'),
+ )
+
+ def run (self):
+ if not self.skip_build:
+ self.run_command('build_twisted_plugins')
+ self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
+
+ def get_inputs (self):
+ return self.distribution.twisted_plugins or []
+
+ def get_outputs(self):
+ return self.outfiles or []
+
+
+
+class TwistedDistribution(Distribution):
+ def __init__(self,attrs=None):
+ self.twisted_plugins = None
+ Distribution.__init__(self, attrs)
+ self.cmdclass = {'install':twisted_install,
+ 'install_twisted_plugins':install_twisted_plugins,
+ 'build':twisted_build,
+ 'build_twisted_plugins':build_twisted_plugins,
+ 'sdist':twisted_sdist,
+ }
+
+ def has_twisted_plugins(self):
+ return self.twisted_plugins and len(self.twisted_plugins) > 0
+
+
+def setup(**attrs):
+ from distutils import core
+ attrs['distclass'] = TwistedDistribution
+ core.setup(**attrs)
diff --git a/ureports/__init__.py b/ureports/__init__.py
new file mode 100644
index 0000000..b4c6c60
--- /dev/null
+++ b/ureports/__init__.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2004-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Universal report objects and some formatting drivers
+
+a way to create simple reports using python objects, primarly designed to be
+formatted as text and html
+"""
+
+from __future__ import generators
+
+__revision__ = "$Id: __init__.py,v 1.8 2005-07-02 13:22:30 syt Exp $"
+
+import sys
+from os import linesep
+from cStringIO import StringIO
+from StringIO import StringIO as UStringIO
+
+
+def get_nodes(node, klass):
+ """return an iterator on all children node of the given klass"""
+ for child in node.children:
+ if isinstance(child, klass):
+ yield child
+ # recurse (FIXME: recursion controled by an option)
+ for grandchild in get_nodes(child, klass):
+ yield grandchild
+
+def layout_title(layout):
+ """try to return the layout's title as string, return None if not found
+ """
+ for child in layout.children:
+ if isinstance(child, Title):
+ return ' '.join([node.data for node in get_nodes(child, Text)])
+
+def build_summary(layout, level=1):
+ """make a summary for the report, including X level"""
+ assert level > 0
+ level -= 1
+ summary = List(klass='summary')
+ for child in layout.children:
+ if not isinstance(child, Section):
+ continue
+ label = layout_title(child)
+ if not label and not child.id:
+ continue
+ if not child.id:
+ child.id = label.replace(' ', '-')
+ node = Link('#'+child.id, label=label or child.id)
+ # FIXME: Three following lines produce not very compliant
+ # docbook: there are some useless <para><para>. They might be
+ # replaced by the three commented lines but this then produces
+ # a bug in html display...
+ if level and [n for n in child.children if isinstance(n, Section)]:
+ node = Paragraph([node, build_summary(child, level)])
+ summary.append(node)
+# summary.append(node)
+# if level and [n for n in child.children if isinstance(n, Section)]:
+# summary.append(build_summary(child, level))
+ return summary
+
+
+class BaseWriter(object):
+ """base class for ureport writers"""
+
+ def format(self, layout, stream=None, encoding=None):
+ """format and write the given layout into the stream object
+
+ unicode policy: unicode strings may be found in the layout;
+ try to call stream.write with it, but give it back encoded using
+ the given encoding if it fails
+ """
+ if stream is None:
+ stream = sys.stdout
+ if not encoding:
+ encoding = getattr(stream, 'encoding', 'UTF-8')
+ self.encoding = encoding or 'UTF-8'
+ self.__compute_funcs = []
+ self.out = stream
+ self.begin_format(layout)
+ layout.accept(self)
+ self.end_format(layout)
+
+ def format_children(self, layout):
+ """recurse on the layout children and call their accept method
+ (see the Visitor pattern)
+ """
+ for child in getattr(layout, 'children', ()):
+ child.accept(self)
+
+ def writeln(self, string=''):
+ """write a line in the output buffer"""
+ self.write(string + linesep)
+
+ def write(self, string):
+ """write a string in the output buffer"""
+ try:
+ self.out.write(string)
+ except UnicodeEncodeError:
+ self.out.write(string.encode(self.encoding))
+
+ def begin_format(self, layout):
+ """begin to format a layout"""
+ self.section = 0
+
+ def end_format(self, layout):
+ """finished to format a layout"""
+
+ def get_table_content(self, table):
+ """trick to get table content without actually writing it
+
+ return an aligned list of lists containing table cells values as string
+ """
+ result = [[]]
+ cols = table.cols
+ for cell in self.compute_content(table):
+ if cols == 0:
+ result.append([])
+ cols = table.cols
+ cols -= 1
+ result[-1].append(cell)
+ # fill missing cells
+ while len(result[-1]) < cols:
+ result[-1].append('')
+ return result
+
+ def compute_content(self, layout):
+ """trick to compute the formatting of children layout before actually
+ writing it
+
+ return an iterator on strings (one for each child element)
+ """
+ # use cells !
+ def write(data):
+ try:
+ stream.write(data)
+ except UnicodeEncodeError:
+ stream.write(data.encode(self.encoding))
+ def writeln(data=''):
+ try:
+ stream.write(data+linesep)
+ except UnicodeEncodeError:
+ stream.write(data.encode(self.encoding)+linesep)
+ self.write = write
+ self.writeln = writeln
+ self.__compute_funcs.append((write, writeln))
+ for child in layout.children:
+ stream = UStringIO()
+ child.accept(self)
+ yield stream.getvalue()
+ self.__compute_funcs.pop()
+ try:
+ self.write, self.writeln = self.__compute_funcs[-1]
+ except IndexError:
+ del self.write
+ del self.writeln
+
+
+from logilab.common.ureports.nodes import *
+from logilab.common.ureports.text_writer import TextWriter
+from logilab.common.ureports.html_writer import HTMLWriter
diff --git a/ureports/docbook_writer.py b/ureports/docbook_writer.py
new file mode 100644
index 0000000..5ce5760
--- /dev/null
+++ b/ureports/docbook_writer.py
@@ -0,0 +1,138 @@
+# Copyright (c) 2002-2004 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""HTML formatting drivers for ureports
+"""
+
+__revision__ = "$Id: docbook_writer.py,v 1.4 2005-05-20 16:42:23 emb Exp $"
+
+from logilab.common.ureports import HTMLWriter
+
+class DocbookWriter(HTMLWriter):
+ """format layouts as HTML"""
+
+ def begin_format(self, layout):
+ """begin to format a layout"""
+ super(HTMLWriter, self).begin_format(layout)
+ if self.snipet is None:
+ self.writeln('<?xml version="1.0" encoding="ISO-8859-1"?>')
+ self.writeln("""
+<book xmlns:xi='http://www.w3.org/2001/XInclude'
+ lang='fr'>
+""")
+
+ def end_format(self, layout):
+ """finished to format a layout"""
+ if self.snipet is None:
+ self.writeln('</book>')
+
+ def visit_section(self, layout):
+ """display a section (using <chapter> (level 0) or <section>)"""
+ if self.section == 0:
+ tag = "chapter"
+ else:
+ tag = "section"
+ self.section += 1
+ self.writeln(self._indent('<%s%s>' % (tag, self.handle_attrs(layout))))
+ self.format_children(layout)
+ self.writeln(self._indent('</%s>'% tag))
+ self.section -= 1
+
+ def visit_title(self, layout):
+ """display a title using <title>"""
+ self.write(self._indent(' <title%s>' % self.handle_attrs(layout)))
+ self.format_children(layout)
+ self.writeln('</title>')
+
+ def visit_table(self, layout):
+ """display a table as html"""
+ self.writeln(self._indent(' <table%s><title>%s</title>' \
+ % (self.handle_attrs(layout), layout.title)))
+ self.writeln(self._indent(' <tgroup cols="%s">'% layout.cols))
+ for i in range(layout.cols):
+ self.writeln(self._indent(' <colspec colname="c%s" colwidth="1*"/>' % i))
+
+ table_content = self.get_table_content(layout)
+ # write headers
+ if layout.cheaders:
+ self.writeln(self._indent(' <thead>'))
+ self._write_row(table_content[0])
+ self.writeln(self._indent(' </thead>'))
+ table_content = table_content[1:]
+ elif layout.rcheaders:
+ self.writeln(self._indent(' <thead>'))
+ self._write_row(table_content[-1])
+ self.writeln(self._indent(' </thead>'))
+ table_content = table_content[:-1]
+ # write body
+ self.writeln(self._indent(' <tbody>'))
+ for i in range(len(table_content)):
+ row = table_content[i]
+ self.writeln(self._indent(' <row>'))
+ for j in range(len(row)):
+ cell = row[j] or '&#160;'
+ self.writeln(self._indent(' <entry>%s</entry>' % cell))
+ self.writeln(self._indent(' </row>'))
+ self.writeln(self._indent(' </tbody>'))
+ self.writeln(self._indent(' </tgroup>'))
+ self.writeln(self._indent(' </table>'))
+
+ def _write_row(self, row):
+ """write content of row (using <row> <entry>)"""
+ self.writeln(' <row>')
+ for j in range(len(row)):
+ cell = row[j] or '&#160;'
+ self.writeln(' <entry>%s</entry>' % cell)
+ self.writeln(self._indent(' </row>'))
+
+ def visit_list(self, layout):
+ """display a list (using <itemizedlist>)"""
+ self.writeln(self._indent(' <itemizedlist%s>' % self.handle_attrs(layout)))
+ for row in list(self.compute_content(layout)):
+ self.writeln(' <listitem><para>%s</para></listitem>' % row)
+ self.writeln(self._indent(' </itemizedlist>'))
+
+ def visit_paragraph(self, layout):
+ """display links (using <para>)"""
+ self.write(self._indent(' <para>'))
+ self.format_children(layout)
+ self.writeln('</para>')
+
+ def visit_span(self, layout):
+ """display links (using <p>)"""
+ #TODO: translate in docbook
+ self.write('<literal %s>' % self.handle_attrs(layout))
+ self.format_children(layout)
+ self.write('</literal>')
+
+ def visit_link(self, layout):
+ """display links (using <ulink>)"""
+ self.write('<ulink url="%s"%s>%s</ulink>' % (layout.url,
+ self.handle_attrs(layout),
+ layout.label))
+
+ def visit_verbatimtext(self, layout):
+ """display verbatim text (using <programlisting>)"""
+ self.writeln(self._indent(' <programlisting>'))
+ self.write(layout.data.replace('&', '&amp;').replace('<', '&lt;'))
+ self.writeln(self._indent(' </programlisting>'))
+
+ def visit_text(self, layout):
+ """add some text"""
+ self.write(layout.data.replace('&', '&amp;').replace('<', '&lt;'))
+
+ def _indent(self, string):
+ """correctly indent string according to section"""
+ return ' ' * 2*(self.section) + string
diff --git a/ureports/html_writer.py b/ureports/html_writer.py
new file mode 100644
index 0000000..33506d0
--- /dev/null
+++ b/ureports/html_writer.py
@@ -0,0 +1,131 @@
+# Copyright (c) 2004-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""HTML formatting drivers for ureports
+"""
+
+__revision__ = "$Id: html_writer.py,v 1.10 2006-03-08 09:47:29 katia Exp $"
+
+from cgi import escape
+
+from logilab.common.ureports import BaseWriter
+
+
+class HTMLWriter(BaseWriter):
+ """format layouts as HTML"""
+
+ def __init__(self, snipet=None):
+ super(HTMLWriter, self).__init__(self)
+ self.snipet = snipet
+
+ def handle_attrs(self, layout):
+ """get an attribute string from layout member attributes"""
+ attrs = ''
+ klass = getattr(layout, 'klass', None)
+ if klass:
+ attrs += ' class="%s"' % klass
+ nid = getattr(layout, 'id', None)
+ if nid:
+ attrs += ' id="%s"' % nid
+ return attrs
+
+ def begin_format(self, layout):
+ """begin to format a layout"""
+ super(HTMLWriter, self).begin_format(layout)
+ if self.snipet is None:
+ self.writeln('<html>')
+ self.writeln('<body>')
+
+ def end_format(self, layout):
+ """finished to format a layout"""
+ if self.snipet is None:
+ self.writeln('</body>')
+ self.writeln('</html>')
+
+
+ def visit_section(self, layout):
+ """display a section as html, using div + h[section level]"""
+ self.section += 1
+ self.writeln('<div%s>' % self.handle_attrs(layout))
+ self.format_children(layout)
+ self.writeln('</div>')
+ self.section -= 1
+
+ def visit_title(self, layout):
+ """display a title using <hX>"""
+ self.write('<h%s%s>' % (self.section, self.handle_attrs(layout)))
+ self.format_children(layout)
+ self.writeln('</h%s>' % self.section)
+
+ def visit_table(self, layout):
+ """display a table as html"""
+ self.writeln('<table%s>' % self.handle_attrs(layout))
+ table_content = self.get_table_content(layout)
+ for i in range(len(table_content)):
+ row = table_content[i]
+ if i == 0 and layout.rheaders:
+ self.writeln('<tr class="header">')
+ elif i+1 == len(table_content) and layout.rrheaders:
+ self.writeln('<tr class="header">')
+ else:
+ self.writeln('<tr class="%s">' % (i%2 and 'even' or 'odd'))
+ for j in range(len(row)):
+ cell = row[j] or '&nbsp;'
+ if (layout.rheaders and i == 0) or \
+ (layout.cheaders and j == 0) or \
+ (layout.rrheaders and i+1 == len(table_content)) or \
+ (layout.rcheaders and j+1 == len(row)):
+ self.writeln('<th>%s</th>' % cell)
+ else:
+ self.writeln('<td>%s</td>' % cell)
+ self.writeln('</tr>')
+ self.writeln('</table>')
+
+ def visit_list(self, layout):
+ """display a list as html"""
+ self.writeln('<ul%s>' % self.handle_attrs(layout))
+ for row in list(self.compute_content(layout)):
+ self.writeln('<li>%s</li>' % row)
+ self.writeln('</ul>')
+
+ def visit_paragraph(self, layout):
+ """display links (using <p>)"""
+ self.write('<p>')
+ self.format_children(layout)
+ self.write('</p>')
+
+ def visit_span(self, layout):
+ """display links (using <p>)"""
+ self.write('<span%s>' % self.handle_attrs(layout))
+ self.format_children(layout)
+ self.write('</span>')
+
+ def visit_link(self, layout):
+ """display links (using <a>)"""
+ self.write(' <a href="%s"%s>%s</a>' % (layout.url,
+ self.handle_attrs(layout),
+ layout.label))
+ def visit_verbatimtext(self, layout):
+ """display verbatim text (using <pre>)"""
+ self.write('<pre>')
+ self.write(layout.data.replace('&', '&amp;').replace('<', '&lt;'))
+ self.write('</pre>')
+
+ def visit_text(self, layout):
+ """add some text"""
+ data = layout.data
+ if layout.escaped:
+ data = data.replace('&', '&amp;').replace('<', '&lt;')
+ self.write(data)
diff --git a/ureports/nodes.py b/ureports/nodes.py
new file mode 100644
index 0000000..d0829ae
--- /dev/null
+++ b/ureports/nodes.py
@@ -0,0 +1,200 @@
+# Copyright (c) 2004-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""Universal reports objects
+
+A Universal report is a tree of layout and content objects
+"""
+
+__revision__ = "$Id: nodes.py,v 1.11 2006-03-08 09:47:38 katia Exp $"
+
+from logilab.common.tree import VNode
+
+class BaseComponent(VNode):
+ """base report component
+
+ attributes
+ * id : the component's optional id
+ * klass : the component's optional klass
+ """
+ def __init__(self, id=None, klass=None):
+ VNode.__init__(self, id)
+ self.klass = klass
+
+class BaseLayout(BaseComponent):
+ """base container node
+
+ attributes
+ * BaseComponent attributes
+ * children : components in this table (i.e. the table's cells)
+ """
+ def __init__(self, children=(), **kwargs):
+ super(BaseLayout, self).__init__(**kwargs)
+ for child in children:
+ if isinstance(child, BaseComponent):
+ self.append(child)
+ else:
+ self.add_text(child)
+
+ def append(self, child):
+ """overriden to detect problems easily"""
+ assert child not in self.parents()
+ VNode.append(self, child)
+
+ def parents(self):
+ """return the ancestor nodes"""
+ assert self.parent is not self
+ if self.parent is None:
+ return []
+ return [self.parent] + self.parent.parents()
+
+ def add_text(self, text):
+ """shortcut to add text data"""
+ self.children.append(Text(text))
+
+
+# non container nodes #########################################################
+
+class Text(BaseComponent):
+ """a text portion
+
+ attributes :
+ * BaseComponent attributes
+ * data : the text value as an encoded or unicode string
+ """
+ def __init__(self, data, escaped=True, **kwargs):
+ super(Text, self).__init__(**kwargs)
+ #if isinstance(data, unicode):
+ # data = data.encode('ascii')
+ assert isinstance(data, (str, unicode)), data.__class__
+ self.escaped = escaped
+ self.data = data
+
+class VerbatimText(Text):
+ """a verbatim text, display the raw data
+
+ attributes :
+ * BaseComponent attributes
+ * data : the text value as an encoded or unicode string
+ """
+
+class Link(BaseComponent):
+ """a labelled link
+
+ attributes :
+ * BaseComponent attributes
+ * url : the link's target (REQUIRED)
+ * label : the link's label as a string (use the url by default)
+ """
+ def __init__(self, url, label=None, **kwargs):
+ super(Link, self).__init__(**kwargs)
+ assert url
+ self.url = url
+ self.label = label or url
+
+
+class Image(BaseComponent):
+ """an embeded or a single image
+
+ attributes :
+ * BaseComponent attributes
+ * filename : the image's filename (REQUIRED)
+ * stream : the stream object containing the image data (REQUIRED)
+ * title : the image's optional title
+ """
+ def __init__(self, filename, stream, title=None, **kwargs):
+ super(Link, self).__init__(**kwargs)
+ assert filename
+ assert stream
+ self.filename = filename
+ self.stream = stream
+ self.title = title
+
+
+# container nodes #############################################################
+
+class Section(BaseLayout):
+ """a section
+
+ attributes :
+ * BaseLayout attributes
+
+ a title may also be given to the constructor, it'll be added
+ as a first element
+ a description may also be given to the constructor, it'll be added
+ as a first paragraph
+ """
+ def __init__(self, title=None, description=None, **kwargs):
+ super(Section, self).__init__(**kwargs)
+ if description:
+ self.insert(0, Paragraph([Text(description)]))
+ if title:
+ self.insert(0, Title(children=(title,)))
+
+class Title(BaseLayout):
+ """a title
+
+ attributes :
+ * BaseLayout attributes
+
+ A title must not contains a section nor a paragraph!
+ """
+
+class Span(BaseLayout):
+ """a title
+
+ attributes :
+ * BaseLayout attributes
+
+ A span should only contains Text and Link nodes (in-line elements)
+ """
+
+class Paragraph(BaseLayout):
+ """a simple text paragraph
+
+ attributes :
+ * BaseLayout attributes
+
+ A paragraph must not contains a section !
+ """
+
+class Table(BaseLayout):
+ """some tabular data
+
+ attributes :
+ * BaseLayout attributes
+ * cols : the number of columns of the table (REQUIRED)
+ * rheaders : the first row's elements are table's header
+ * cheaders : the first col's elements are table's header
+ * title : the table's optional title
+ """
+ def __init__(self, cols, title=None,
+ rheaders=0, cheaders=0, rrheaders=0, rcheaders=0,
+ **kwargs):
+ super(Table, self).__init__(**kwargs)
+ assert isinstance(cols, int)
+ self.cols = cols
+ self.title = title
+ self.rheaders = rheaders
+ self.cheaders = cheaders
+ self.rrheaders = rrheaders
+ self.rcheaders = rcheaders
+
+class List(BaseLayout):
+ """some list data
+
+ attributes :
+ * BaseLayout attributes
+ """
diff --git a/ureports/text_writer.py b/ureports/text_writer.py
new file mode 100644
index 0000000..f0a9617
--- /dev/null
+++ b/ureports/text_writer.py
@@ -0,0 +1,141 @@
+# Copyright (c) 2004-2005 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""Text formatting drivers for ureports"""
+
+__revision__ = "$Id: text_writer.py,v 1.9 2005-11-22 13:13:13 syt Exp $"
+
+from os import linesep
+
+from logilab.common.ureports import BaseWriter
+
+TITLE_UNDERLINES = ['', '=', '-', '`', '.', '~', '^']
+BULLETS = ['*', '-']
+
+class TextWriter(BaseWriter):
+ """format layouts as text
+ (ReStructured inspiration but not totally handled yet)
+ """
+ def begin_format(self, layout):
+ super(TextWriter, self).begin_format(layout)
+ self.list_level = 0
+ self.pending_urls = []
+
+ def visit_section(self, layout):
+ """display a section as text
+ """
+ self.section += 1
+ self.writeln()
+ self.format_children(layout)
+ if self.pending_urls:
+ self.writeln()
+ for label, url in self.pending_urls:
+ self.writeln('.. _`%s`: %s' % (label, url))
+ self.pending_urls = []
+ self.section -= 1
+ self.writeln()
+
+ def visit_title(self, layout):
+ title = ''.join(list(self.compute_content(layout)))
+ self.writeln(title)
+ try:
+ self.writeln(TITLE_UNDERLINES[self.section] * len(title))
+ except IndexError:
+ print "FIXME TITLE TOO DEEP. TURNING TITLE INTO TEXT"
+
+ def visit_paragraph(self, layout):
+ """enter a paragraph"""
+ self.format_children(layout)
+ self.writeln()
+
+ def visit_span(self, layout):
+ """enter a span"""
+ self.format_children(layout)
+
+ def visit_table(self, layout):
+ """display a table as text"""
+ table_content = self.get_table_content(layout)
+ # get columns width
+ cols_width = [0]*len(table_content[0])
+ for row in table_content:
+ for index in range(len(row)):
+ col = row[index]
+ cols_width[index] = max(cols_width[index], len(col))
+ if layout.klass == 'field':
+ self.field_table(layout, table_content, cols_width)
+ else:
+ self.default_table(layout, table_content, cols_width)
+ self.writeln()
+
+ def default_table(self, layout, table_content, cols_width):
+ """format a table"""
+ cols_width = [size+1 for size in cols_width]
+ format_strings = ' '.join(['%%-%ss'] * len(cols_width))
+ format_strings = format_strings % tuple(cols_width)
+ format_strings = format_strings.split(' ')
+ table_linesep = '\n+' + '+'.join(['-'*w for w in cols_width]) + '+\n'
+ headsep = '\n+' + '+'.join(['='*w for w in cols_width]) + '+\n'
+ # FIXME: layout.cheaders
+ self.write(table_linesep)
+ for i in range(len(table_content)):
+ self.write('|')
+ line = table_content[i]
+ for j in range(len(line)):
+ self.write(format_strings[j] % line[j])
+ self.write('|')
+ if i == 0 and layout.rheaders:
+ self.write(headsep)
+ else:
+ self.write(table_linesep)
+
+ def field_table(self, layout, table_content, cols_width):
+ """special case for field table"""
+ assert layout.cols == 2
+ format_string = '%s%%-%ss: %%s' % (linesep, cols_width[0])
+ for field, value in table_content:
+ self.write(format_string % (field, value))
+
+
+ def visit_list(self, layout):
+ """display a list layout as text"""
+ bullet = BULLETS[self.list_level % len(BULLETS)]
+ indent = ' ' * self.list_level
+ self.list_level += 1
+ for child in layout.children:
+ self.write('%s%s%s ' % (linesep, indent, bullet))
+ child.accept(self)
+ self.list_level -= 1
+
+ def visit_link(self, layout):
+ """add a hyperlink"""
+ if layout.label != layout.url:
+ self.write('`%s`_' % layout.label)
+ self.pending_urls.append( (layout.label, layout.url) )
+ else:
+ self.write(layout.url)
+
+ def visit_verbatimtext(self, layout):
+ """display a verbatim layout as text (so difficult ;)
+ """
+ self.writeln('::\n')
+ for line in layout.data.splitlines():
+ self.writeln(' ' + line)
+ self.writeln()
+
+ def visit_text(self, layout):
+ """add some text"""
+ self.write(layout.data)
+
+
diff --git a/vcgutils.py b/vcgutils.py
new file mode 100644
index 0000000..f6875f4
--- /dev/null
+++ b/vcgutils.py
@@ -0,0 +1,212 @@
+# Copyright (c) 2000-2002 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""
+utilities functions to generate file readable with Georg Sander's vcg
+(Visualization of Compiler Graphs).
+
+You can download vcg at http://rw4.cs.uni-sb.de/~sander/html/gshome.html
+Note that vcg exists as a debian package.
+
+See the documentation of vcg for explanation about the different value that
+maybe used for the functions parameters
+"""
+
+__revision__ = "$Id: vcgutils.py,v 1.6 2003-12-10 08:15:09 syt Exp $"
+
+import string
+
+ATTRS_VAL = {
+ 'algos': ('dfs', 'tree', 'minbackward',
+ 'left_to_right','right_to_left',
+ 'top_to_bottom','bottom_to_top',
+ 'maxdepth', 'maxdepthslow', 'mindepth', 'mindepthslow',
+ 'mindegree', 'minindegree', 'minoutdegree',
+ 'maxdegree','maxindegree', 'maxoutdegree'),
+ 'booleans': ('yes', 'no'),
+ 'colors': ('black', 'white', 'blue', 'red', 'green', 'yellow',
+ 'magenta', 'lightgrey',
+ 'cyan', 'darkgrey', 'darkblue', 'darkred', 'darkgreen',
+ 'darkyellow', 'darkmagenta', 'darkcyan', 'gold',
+ 'lightblue', 'lightred', 'lightgreen', 'lightyellow',
+ 'lightmagenta', 'lightcyan', 'lilac', 'turquoise',
+ 'aquamarine', 'khaki', 'purple', 'yellowgreen', 'pink',
+ 'orange', 'orchid'),
+ 'shapes': ('box', 'ellipse', 'rhomb', 'triangle'),
+ 'textmodes': ('center', 'left_justify', 'right_justify'),
+ 'arrowstyles': ('solid', 'line', 'none'),
+ 'linestyles': ('continuous', 'dashed', 'dotted', 'invisible'),
+ }
+
+# meaning of possible values:
+# O -> string
+# 1 -> int
+# list -> value in list
+GRAPH_ATTRS = {
+ 'title' : 0,
+ 'label' : 0,
+ 'color': ATTRS_VAL['colors'],
+ 'textcolor': ATTRS_VAL['colors'],
+ 'bordercolor': ATTRS_VAL['colors'],
+ 'width': 1,
+ 'height': 1,
+ 'borderwidth': 1,
+ 'textmode': ATTRS_VAL['textmodes'],
+ 'shape': ATTRS_VAL['shapes'],
+ 'shrink': 1,
+ 'stretch': 1,
+ 'orientation': ATTRS_VAL['algos'],
+ 'vertical_order': 1,
+ 'horizontal_order': 1,
+ 'xspace': 1,
+ 'yspace': 1,
+ 'layoutalgorithm' : ATTRS_VAL['algos'],
+ 'late_edge_labels' : ATTRS_VAL['booleans'],
+ 'display_edge_labels': ATTRS_VAL['booleans'],
+ 'dirty_edge_labels' : ATTRS_VAL['booleans'],
+ 'finetuning': ATTRS_VAL['booleans'],
+ 'manhattan_edges': ATTRS_VAL['booleans'],
+ 'smanhattan_edges': ATTRS_VAL['booleans'],
+ 'port_sharing': ATTRS_VAL['booleans'],
+ 'edges': ATTRS_VAL['booleans'],
+ 'nodes': ATTRS_VAL['booleans'],
+ 'splines': ATTRS_VAL['booleans'],
+ }
+NODE_ATTRS = {
+ 'title' : 0,
+ 'label' : 0,
+ 'color': ATTRS_VAL['colors'],
+ 'textcolor': ATTRS_VAL['colors'],
+ 'bordercolor': ATTRS_VAL['colors'],
+ 'width': 1,
+ 'height': 1,
+ 'borderwidth': 1,
+ 'textmode': ATTRS_VAL['textmodes'],
+ 'shape': ATTRS_VAL['shapes'],
+ 'shrink': 1,
+ 'stretch': 1,
+ 'vertical_order': 1,
+ 'horizontal_order': 1,
+ }
+EDGE_ATTRS = {
+ 'sourcename' : 0,
+ 'targetname' : 0,
+ 'label' : 0,
+ 'linestyle' : ATTRS_VAL['linestyles'],
+ 'class' : 1,
+ 'thickness' : 0,
+ 'color': ATTRS_VAL['colors'],
+ 'textcolor': ATTRS_VAL['colors'],
+ 'arrowcolor': ATTRS_VAL['colors'],
+ 'backarrowcolor': ATTRS_VAL['colors'],
+ 'arrowsize': 1,
+ 'backarrowsize': 1,
+ 'arrowstyle': ATTRS_VAL['arrowstyles'],
+ 'backarrowstyle': ATTRS_VAL['arrowstyles'],
+ 'textmode': ATTRS_VAL['textmodes'],
+ 'priority': 1,
+ 'anchor': 1,
+ 'horizontal_order': 1,
+ }
+
+
+# Misc utilities ###############################################################
+
+def latin_to_vcg(st):
+ """convert latin characters using vcg escape sequence
+ """
+ for char in st:
+ if char not in string.ascii_letters:
+ try:
+ num = ord(char)
+ if num >= 192:
+ st = st.replace(char, r'\fi%d'%ord(char))
+ except:
+ pass
+ return st
+
+
+class VCGPrinter:
+ """a vcg graph writer
+ """
+
+ def __init__(self, output_stream):
+ self._stream = output_stream
+ self._indent = ''
+
+ def open_graph(self, **args):
+ """open a vcg graph
+ """
+ self._stream.write('%sgraph:{\n'%self._indent)
+ self._inc_indent()
+ self._write_attributes(GRAPH_ATTRS, **args)
+
+ def close_graph(self):
+ """close a vcg graph
+ """
+ self._dec_indent()
+ self._stream.write('%s}\n'%self._indent)
+
+
+ def node(self, title, **args):
+ """draw a node
+ """
+ self._stream.write('%snode: {title:"%s"' % (self._indent, title))
+ self._write_attributes(NODE_ATTRS, **args)
+ self._stream.write('}\n')
+
+
+ def edge(self, from_node, to_node, edge_type='', **args):
+ """draw an edge from a node to another.
+ """
+ self._stream.write(
+ '%s%sedge: {sourcename:"%s" targetname:"%s"' % (
+ self._indent, edge_type, from_node, to_node))
+ self._write_attributes(EDGE_ATTRS, **args)
+ self._stream.write('}\n')
+
+
+ # private ##################################################################
+
+ def _write_attributes(self, attributes_dict, **args):
+ """write graph, node or edge attributes
+ """
+ for key, value in args.items():
+ try:
+ _type = attributes_dict[key]
+ except KeyError:
+ raise Exception('''no such attribute %s
+possible attributes are %s''' % (key, attributes_dict.keys()))
+
+ if not _type:
+ self._stream.write('%s%s:"%s"\n' % (self._indent, key, value))
+ elif _type == 1:
+ self._stream.write('%s%s:%s\n' % (self._indent, key,
+ int(value)))
+ elif value in _type:
+ self._stream.write('%s%s:%s\n' % (self._indent, key, value))
+ else:
+ raise Exception('''value %s isn\'t correct for attribute %s
+correct values are %s''' % (value, key, _type))
+
+ def _inc_indent(self):
+ """increment indentation
+ """
+ self._indent = ' %s' % self._indent
+
+ def _dec_indent(self):
+ """decrement indentation
+ """
+ self._indent = self._indent[:-2]
diff --git a/visitor.py b/visitor.py
new file mode 100644
index 0000000..2fa42eb
--- /dev/null
+++ b/visitor.py
@@ -0,0 +1,108 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+""" Copyright (c) 2002-2003 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+
+a generic visitor abstract implementation
+"""
+
+__revision__ = "$Id: visitor.py,v 1.14 2006-02-18 03:17:16 ludal Exp $"
+
+def no_filter(_):
+ return 1
+
+
+# Iterators ###################################################################
+class FilteredIterator:
+
+ def __init__(self, node, list_func, filter_func=None):
+ self._next = [(node, 0)]
+ if filter_func is None:
+ filter_func = no_filter
+ self._list = list_func(node, filter_func)
+
+ def next(self):
+ try:
+ return self._list.pop(0)
+ except :
+ return None
+
+
+# Base Visitor ################################################################
+class Visitor:
+
+ def __init__(self, iterator_class, filter_func=None):
+ self._iter_class = iterator_class
+ self.filter = filter_func
+
+ def visit(self, node, *args, **kargs):
+ """
+ launch the visit on a given node
+
+ call 'open_visit' before the begining of the visit, with extra args
+ given
+ when all nodes have been visited, call the 'close_visit' method
+ """
+ self.open_visit(node, *args, **kargs)
+ return self.close_visit(self._visit(node))
+
+ def _visit(self, node):
+ iterator = self._get_iterator(node)
+ n = iterator.next()
+ while n:
+ result = n.accept(self)
+ n = iterator.next()
+ return result
+
+ def _get_iterator(self, node):
+ return self._iter_class(node, self.filter)
+
+ def open_visit(self, *args, **kargs):
+ """
+ method called at the beginning of the visit
+ """
+ pass
+
+ def close_visit(self, result):
+ """
+ method called at the end of the visit
+ """
+ return result
+
+
+
+# standard visited mixin ######################################################
+class VisitedMixIn(object):
+ """
+ Visited interface allow node visitors to use the node
+ """
+ def get_visit_name(self):
+ """
+ return the visit name for the mixed class. When calling 'accept', the
+ method <'visit_' + name returned by this method> will be called on the
+ visitor
+ """
+ try:
+ return self.TYPE.replace('-', '_')
+ except:
+ return self.__class__.__name__.lower()
+
+ def accept(self, visitor, *args, **kwargs):
+ func = getattr(visitor, 'visit_%s' % self.get_visit_name())
+ return func(self, *args, **kwargs)
+
+ def leave(self, visitor, *args, **kwargs):
+ func = getattr(visitor, 'leave_%s' % self.get_visit_name())
+ return func(self, *args, **kwargs)
+
+
diff --git a/xmlrpcutils.py b/xmlrpcutils.py
new file mode 100644
index 0000000..147d36d
--- /dev/null
+++ b/xmlrpcutils.py
@@ -0,0 +1,131 @@
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+"""XML-RPC utilities
+
+ Copyright (c) 2003-2004 LOGILAB S.A. (Paris, FRANCE).
+ http://www.logilab.fr/ -- mailto:contact@logilab.fr
+"""
+
+__revision__ = "$Id: xmlrpcutils.py,v 1.3 2005-11-22 13:13:03 syt Exp $"
+
+import xmlrpclib
+from base64 import encodestring
+#from cStringIO import StringIO
+
+ProtocolError = xmlrpclib.ProtocolError
+
+## class BasicAuthTransport(xmlrpclib.Transport):
+## def __init__(self, username=None, password=None):
+## self.username = username
+## self.password = password
+## self.verbose = None
+## self.has_ssl = httplib.__dict__.has_key("HTTPConnection")
+
+## def request(self, host, handler, request_body, verbose=None):
+## # issue XML-RPC request
+## if self.has_ssl:
+## if host.startswith("https:"): h = httplib.HTTPSConnection(host)
+## else: h = httplib.HTTPConnection(host)
+## else: h = httplib.HTTP(host)
+
+## h.putrequest("POST", handler)
+
+## # required by HTTP/1.1
+## if not self.has_ssl: # HTTPConnection already does 1.1
+## h.putheader("Host", host)
+## h.putheader("Connection", "close")
+
+## if request_body: h.send(request_body)
+## if self.has_ssl:
+## response = h.getresponse()
+## if response.status != 200:
+## raise xmlrpclib.ProtocolError(host + handler,
+## response.status,
+## response.reason,
+## response.msg)
+## file = response.fp
+## else:
+## errcode, errmsg, headers = h.getreply()
+## if errcode != 200:
+## raise xmlrpclib.ProtocolError(host + handler, errcode,
+## errmsg, headers)
+
+## file = h.getfile()
+
+## return self.parse_response(file)
+
+
+
+class AuthMixin:
+ """basic http authentication mixin for xmlrpc transports"""
+
+ def __init__(self, username, password, encoding):
+ self.verbose = 0
+ self.username = username
+ self.password = password
+ self.encoding = encoding
+
+ def request(self, host, handler, request_body, verbose=0):
+ """issue XML-RPC request"""
+ h = self.make_connection(host)
+ h.putrequest("POST", handler)
+ # required by XML-RPC
+ h.putheader("User-Agent", self.user_agent)
+ h.putheader("Content-Type", "text/xml")
+ h.putheader("Content-Length", str(len(request_body)))
+ h.putheader("Host", host)
+ h.putheader("Connection", "close")
+ # basic auth
+ if self.username is not None and self.password is not None:
+ h.putheader("AUTHORIZATION", "Basic %s" % encodestring(
+ "%s:%s" % (self.username, self.password)).replace("\012", ""))
+ h.endheaders()
+ # send body
+ if request_body:
+ h.send(request_body)
+ # get and check reply
+ errcode, errmsg, headers = h.getreply()
+ if errcode != 200:
+ raise ProtocolError(host + handler, errcode, errmsg, headers)
+ file = h.getfile()
+## # FIXME: encoding ??? iirc, this fix a bug in xmlrpclib but...
+## data = h.getfile().read()
+## if self.encoding != 'UTF-8':
+## data = data.replace("version='1.0'",
+## "version='1.0' encoding='%s'" % self.encoding)
+## result = StringIO()
+## result.write(data)
+## result.seek(0)
+## return self.parse_response(result)
+ return self.parse_response(file)
+
+class BasicAuthTransport(AuthMixin, xmlrpclib.Transport):
+ """basic http authentication transport"""
+
+class BasicAuthSafeTransport(AuthMixin, xmlrpclib.SafeTransport):
+ """basic https authentication transport"""
+
+
+def connect(url, user=None, passwd=None, encoding='ISO-8859-1'):
+ """return an xml rpc server on <url>, using user / password if specified
+ """
+ if user or passwd:
+ assert user and passwd is not None
+ if url.startswith('https://'):
+ transport = BasicAuthSafeTransport(user, passwd, encoding)
+ else:
+ transport = BasicAuthTransport(user, passwd, encoding)
+ else:
+ transport = None
+ server = xmlrpclib.ServerProxy(url, transport, encoding=encoding)
+ return server