summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSam Thursfield <sam.thursfield@codethink.co.uk>2014-12-18 10:43:37 +0000
committerSam Thursfield <sam.thursfield@codethink.co.uk>2014-12-18 10:43:37 +0000
commit764531201f99bf1d9c6dd451a212b741bfb6715e (patch)
treeb9a839ff8cb8000792382805d9027e4891835763
parenteb1a6a511c85163fe3e7ede56a348206075d9af9 (diff)
parent65278fdf1ec80a784f9ada8d390ad063a459c97a (diff)
downloadimport-764531201f99bf1d9c6dd451a212b741bfb6715e.tar.gz
Merge branch 'baserock/richardipsum/python_v3'
There is work still to be done on this importer, but it is usable for some Python projects and may as well be merged to 'master' now.
-rw-r--r--README.python59
-rw-r--r--TODO.python92
-rw-r--r--baserockimport/app.py23
-rw-r--r--baserockimport/exts/importer_python_common.py87
-rwxr-xr-xbaserockimport/exts/python.find_deps352
-rwxr-xr-xbaserockimport/exts/python.to_chunk33
-rwxr-xr-xbaserockimport/exts/python.to_lorry219
-rwxr-xr-xbaserockimport/exts/python_find_deps_tests.py362
-rwxr-xr-xbaserockimport/exts/python_lorry_tests.py72
-rw-r--r--baserockimport/mainloop.py18
10 files changed, 1309 insertions, 8 deletions
diff --git a/README.python b/README.python
new file mode 100644
index 0000000..a22f517
--- /dev/null
+++ b/README.python
@@ -0,0 +1,59 @@
+README
+------
+
+Most (nearly all) python packages use setuptools, for detailed information on
+setuptools see the setuptools docs[1]. If you're not familiar with setuptools
+you should read the docs[1][2] before continuing.
+
+Please note that this tool expects any python packages to be on pypi, you
+cannot currently import packages from other places.
+
+This import tool uses a combination of pypi metadata,
+pip and setuptools commands to extract dependency information
+to create a set of definitions useable with Baserock. This is not a stable
+process and will not work smoothly in many cases: because setup.py
+is just an ordinary Python script it's possible for a setup.py to do things that
+break the import tool's means to extract dependencies, for example, some packages
+bypass parts of setuptools and subclass parts of distutils's core instead.
+Another problem with importing python packages is that packages are uploaded
+to pypi as tarballs rather than as repositories and as a result the import tool
+generates a lot of tarball lorries which is the least desireable kind of lorry
+to use with Baserock. To avoid this the import tool looks through parts of the
+package metadata for links to real repos, this detection is currently extremely
+basic and will hopefully be improved in future to allow the tool to reduce the
+number of tarball lorries it generates. Some python packages
+only declare their dependency information in a human readable form within a
+README, this tool cannot do anything to extract dependency
+information that is not encoded in a machine readable fashion. At the time of
+writing numpy is an example of such a package: running the import tool on numpy
+will yield a stratum that contains numpy and none of its dependencies.
+
+Python packages may require other packages to be present for
+build/installation to proceed, in setuptools these are called setup requirements.
+Setup requirements naturally translate to Baserock build dependencies, in
+practice most python packages don't have any setup requirements, so the lists
+of build-depends for each chunk will generally be empty lists.
+
+Many python packages require additional (in addition to a python interpreter)
+packages to be present at runtime, in setuptools parlance these are install
+requirements. The import tool uses pip to recursively extract runtime
+dependency information for a given package, each dependency is added to the
+same stratum as the package we're trying to import. All packages implicitly
+depend on a python interpreter, the import tool encodes this by making all
+strata build depend on core, which at the time of writing contains cpython.
+
+Traps
+-----
+
+* Because pip executes setup.py commands to determine dependencies
+and some packages' setup.py files invoke compilers, the import tool may end up
+running compilers.
+
+* pip puts errors on stdout, some import tool errors may be vague: if it's
+not clear what's going on you can check the log, if you're using
+--log-level=debug then the import tool will log the output of all the commands
+it executes to obtain dependency information.
+
+[1]: https://pythonhosted.org/setuptools/
+[2]: https://pythonhosted.org/an_example_pypi_project/setuptools.html
+
diff --git a/TODO.python b/TODO.python
new file mode 100644
index 0000000..16b7889
--- /dev/null
+++ b/TODO.python
@@ -0,0 +1,92 @@
+TODOs
+-----
+
+* if homepage_url (in the pypi metadata for a given pacakge) is a html page,
+scrape the page for repo links, this should reduce the number of tarball
+imports the tool does.
+
+* scheme x.y e.g. pip.find_deps should avoid using a '.' makes it more
+difficult to import extensions as modules, consider the case where we want
+to import pip.find_deps for use in a test suite.
+
+* prefix cmd to logs, so when we run pip, prefix log msg with 'pip',
+same for egg_info etc
+
+* abstract popen/log,
+there is a pattern of calling Popen with stderr=STDOUT and reading
+from p.stdout till EOF, then waiting for the subprocess to terminate.
+Since this is used in 3 places, it should be factored out really.
+
+* error messages for constraints is currently a parsed form of the version
+number e.g. ('==', ('00000000', '00000000', '00000011', '*final'))
+this will be confusing, we should emit nice version numbers.
+
+* Can we avoid the compilation that happens during import of some packages,
+i.e. nixtla
+
+* add a test runner
+
+* Importing python packages that use pbr fails, see
+https://bitbucket.org/pypa/setuptools/issue/73/typeerror-dist-must-be-a-distribution#comment-7267980
+The most sensible option would seem to be to make use of the sane environment
+that pbr provides: just read the dependency information from the text files
+that pbr projects provide, see, http://docs.openstack.org/developer/pbr/
+
+Results from running the import tool on various python packages follow:
+
+* Imports tested so far (stratum is generated)
+ * SUCCEEDS
+ * nixtla: fine but requires compilation
+ * ryser
+ * Twisted
+ * Django
+ * textdata
+ * whale-agent
+ * virtualenv
+ * lxml
+ * nose
+ * six
+ * simplejson
+ * pika
+ * MarkupSafe
+ * zc.buildout
+ * Paste
+ * pycrypto
+ * Jinja2
+ * Flask
+ * bcdoc
+ * pymongo
+
+ * FAILS
+ * python-keystoneclient
+ * All openstack stuff requires pbr, pbr does not play nicely with
+ current setuptools see: [Issue 73](https://bitbucket.org/pypa/setuptoolsissue/73/typeerror-dist-must-be-a-distribution#comment-7267980)
+ we can either fix setuptools/pbr or make use of the sane environment
+ pbr provides.
+ * persistent-pineapple
+ * Git repo[1] has different layout to tarball[2] downloadeable from pypi,
+ git repo's layout isn't 'installable' by pip, so dependencies can
+ not be determined.
+ [1]: https://github.com/JasonAUnrein/Persistent-Pineapple
+ [2]: https://pypi.python.org/packages/source/p/persistent_pineapple/persistent_pineapple-1.0.0.tar.gz
+ * ftw.blog
+ * cannot satisfy dependencies
+ * boto
+ * cannot satisfy dependencies
+ * jmespath
+ * cannot satisfy dependencies
+ * rejester
+ * its setup.py subclasses distutils.core
+ * requests
+ * cannot satisfy dependencies
+ * MySQL-python
+ * egg_info blows up,
+ * python setup.py install doesn't even work
+ * maybe the user's expected to do some manual stuff first, who knows
+ * rejester (its setup.py subclasses distutils.core)
+ * redis-jobs (succeeded at first, no longer exists on pypi)
+ * coverage (stratum couldn't be generated because some tags are missing)
+
+* Imports completely tested, built, deployed and executed successfully:
+
+ * Flask
diff --git a/baserockimport/app.py b/baserockimport/app.py
index 6f4d7c3..bb46c67 100644
--- a/baserockimport/app.py
+++ b/baserockimport/app.py
@@ -82,6 +82,8 @@ class BaserockImportApplication(cliapp.Application):
arg_synopsis='REPO PROJECT_NAME SOFTWARE_NAME')
self.add_subcommand('rubygems', self.import_rubygems,
arg_synopsis='GEM_NAME [GEM_VERSION]')
+ self.add_subcommand('python', self.import_python,
+ arg_synopsis='PACKAGE_NAME [VERSION]')
self.stdout_has_colours = self._stream_has_colours(sys.stdout)
@@ -180,6 +182,23 @@ class BaserockImportApplication(cliapp.Application):
loop = baserockimport.mainloop.ImportLoop(
app=self,
- goal_kind='rubygems', goal_name=goal_name, goal_version=goal_version)
- loop.enable_importer('rubygems')
+ goal_kind='rubygems', goal_name=args[0], goal_version='master')
+ loop.enable_importer('rubygems', strata=['strata/ruby.morph'])
+ loop.run()
+
+ def import_python(self, args):
+ '''Import one or more python packages.'''
+ if len(args) < 1 or len(args) > 2:
+ raise cliapp.AppException(
+ 'Please pass the name of the python package on the commandline.')
+
+ package_name = args[0]
+
+ package_version = args[1] if len(args) == 2 else 'master'
+
+ loop = baserockimport.mainloop.ImportLoop(app=self,
+ goal_kind='python',
+ goal_name=package_name,
+ goal_version=package_version)
+ loop.enable_importer('python', strata=['strata/core.morph'])
loop.run()
diff --git a/baserockimport/exts/importer_python_common.py b/baserockimport/exts/importer_python_common.py
new file mode 100644
index 0000000..18f0847
--- /dev/null
+++ b/baserockimport/exts/importer_python_common.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from __future__ import print_function
+
+import sys
+import logging
+
+from importer_base import ImportExtension
+
+PYPI_URL = 'http://pypi.python.org/pypi'
+
+def warn(*args, **kwargs):
+ print('%s:' % sys.argv[0], *args, file=sys.stderr, **kwargs)
+
+def error(*args, **kwargs):
+ warn(*args, **kwargs)
+ sys.exit(1)
+
+def specs_satisfied(version, specs):
+ def mapping_error(op):
+ # We parse ops with requirements-parser, so any invalid user input
+ # should be detected there. This really guards against
+ # the pip developers adding some new operation to a requirement.
+ error("Invalid op in spec: %s" % op)
+
+ opmap = {'==' : lambda x, y: x == y, '!=' : lambda x, y: x != y,
+ '<=' : lambda x, y: x <= y, '>=' : lambda x, y: x >= y,
+ '<': lambda x, y: x < y, '>' : lambda x, y: x > y}
+
+ def get_op_func(op):
+ return opmap[op] if op in opmap else lambda x, y: mapping_error(op)
+
+ return all([get_op_func(op)(version, sv) for (op, sv) in specs])
+
+def name_or_closest(client, package_name):
+ '''Packages on pypi are case insensitive,
+ this function returns the package_name it was given if the package
+ is found to match exactly, otherwise it returns a version of the name
+ that case-insensitively matches the input package_name.
+
+ If no case insensitive match can be found then we return None'''
+
+ results = client.package_releases(package_name)
+
+ if len(results) > 0:
+ logging.debug('Found package %s' % package_name)
+ return package_name
+
+ logging.debug("Couldn't find exact match for %s,"
+ "searching for a similar match" % package_name)
+ results = client.search({'name': package_name})
+
+ logging.debug("Got the following similarly named packages '%s': %s"
+ % (package_name, str([(result['name'], result['version'])
+ for result in results])))
+
+ logging.debug('Filtering for exact case-insensitive matches')
+
+ results = [result for result in results
+ if result['name'].lower() == package_name.lower()]
+
+ logging.debug('Filtered results: %s' % results)
+
+ return results[0]['name'] if len(results) > 0 else None
+
+# We subclass the ImportExtension to setup the logger,
+# so that we can send logs to the import tool's log
+class PythonExtension(ImportExtension):
+ def __init__(self):
+ super(PythonExtension, self).__init__()
+
+ def process_args(self, _):
+ import __main__
+ __main__.main()
diff --git a/baserockimport/exts/python.find_deps b/baserockimport/exts/python.find_deps
new file mode 100755
index 0000000..cca0947
--- /dev/null
+++ b/baserockimport/exts/python.find_deps
@@ -0,0 +1,352 @@
+#!/usr/bin/env python
+#
+# Find the build and runtime dependencies for a given Python package
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# TODO: there is a pattern of calling Popen with stderr=STDOUT and reading
+# from p.stdout till EOF, then waiting for the subprocess to terminate.
+# Since this is used in 3 places, it should be factored out really.
+
+from __future__ import print_function
+
+import sys
+import subprocess
+import os
+import json
+import tempfile
+import logging
+import select
+import signal
+
+import pkg_resources
+import xmlrpclib
+
+from importer_python_common import *
+
+class ConflictError(Exception):
+ def __init__(self, name, spec_x, spec_y):
+ self.name = name
+ self.specs = [spec_x, spec_y]
+
+ super(ConflictError, self).__init__('%s: %s conflicts with %s'
+ % (name, spec_x, spec_y))
+
+class UnmatchedError(Exception):
+ pass
+
+def eq_check((xop, xval), (yop, yval)):
+ assert xop == '==' # Assumption, '==' spec is x
+
+ ops = (xop, yop)
+ vals = (xval, yval)
+
+ # Map a pair to a function that will return true
+ # if the specs are in conflict.
+ comp = {('==', '=='): lambda (x, y): x != y, # conflict if x != y
+ ('==', '!='): lambda (x, y): x == y, # conflict if x == y
+ ('==', '<'): lambda (x, y): x >= y, # conflict if x >= y
+ ('==', '>'): lambda (x, y): x <= y, # conflict if x <= y
+ ('==', '<='): lambda (x, y): x > y, # conflict if x > y
+ ('==', '>='): lambda (x, y): x < y, # conflict if x < y
+ }
+
+ return comp[ops](vals)
+
+def lt_check((xop, xval), (yop, yval)):
+ assert xop == '<' # Assumption, '<' spec is x
+
+ ops = (xop, yop)
+ vals = (xval, yval)
+
+ # Map a pair to a function that will return true
+ # if the specs are in conflict.
+ comp = {('<', '<'): lambda (x, y): False, # < x < y cannot conflict
+ ('<', '>'): lambda (x, y): x <= y, # conflict if x <= y
+ ('<', '<='): lambda (x, y): False, # < x <= y cannot conflict
+ ('<', '>='): lambda (x, y): x <= y # conflict if x <= y
+ }
+
+ return comp[ops](vals)
+
+def gt_check((xop, xval), (yop, yval)):
+ assert xop == '>' # Assumption, '>' spec is x
+
+ ops = (xop, yop)
+ vals = (xval, yval)
+
+ # Map a pair to a function that will return true
+ # if the specs are in conflict.
+ comp = {('>', '>'): lambda (x, y): False, # > x > y cannot conflict
+ ('>', '<='): lambda (x, y): x >= y, # conflict if x >= y
+ ('>', '>='): lambda (x, y): False, # > x >= y cannot conflict
+ }
+
+ return comp[ops](vals)
+
+def lte_check((xop, xval), (yop, yval)):
+ assert xop == '<=' # Assumption, '<=' spec is x
+
+ ops = (xop, yop)
+ vals = (xval, yval)
+
+ # Map a pair to a function that will return true
+ # if the specs are in conflict.
+ comp = {('<=', '<='): lambda (x, y): False, # <= x <= y cannot conflict
+ ('<=', '>='): lambda (x, y): x < y
+ }
+
+ return comp[ops](vals)
+
+def gte_check((xop, xval), (yop, yval)):
+ assert xop == '>=' # Assumption, '>=' spec is x
+
+ ops = (xop, yop)
+ vals = (xval, yval)
+
+ # Map a pair to a function that will return true
+ # if the specs are in conflict.
+ comp = {('>=', '>='): lambda (x, y): False} # >= x >= y cannot conflict
+
+ return comp[ops](vals)
+
+def reverse_if(c, t1, t2):
+ return [t2, t1] if c else (t1, t2)
+
+def conflict((xop, xval), (yop, yval)):
+ x, y = (xop, xval), (yop, yval)
+ ops = (xop, yop)
+
+ if '==' in ops: return eq_check(*reverse_if(yop == '==', x, y))
+ elif '!=' in ops: return False # != can only conflict with ==
+ elif '<' in ops: return lt_check(*reverse_if(yop == '<', x, y))
+ elif '>' in ops: return gt_check(*reverse_if(yop == '>', x, y))
+ elif '<=' in ops: return lte_check(*reverse_if(yop == '<=', x, y))
+
+ # not reversing here, >= x >= y should be the only combination possible
+ # here, if it's not then something is wrong.
+ elif '>=' in ops: return gte_check(x, y)
+
+ else: raise UnmatchedError('Got unmatched case (%s, %s)' % x, y)
+
+def conflict_with_set(spec, specset):
+ for s in specset:
+ if conflict(spec, s):
+ return s
+
+ return None
+
+def resolve_specs(requirements):
+ requirements = list(requirements)
+
+ logging.debug('Resolving specs from the following requirements: %s'
+ % requirements)
+ specsets = {}
+
+ for r in requirements:
+ if r.project_name not in specsets:
+ specsets[r.project_name] = set()
+
+ specset = specsets[r.project_name]
+
+ for (op, version) in r.specs:
+ spec = (op, pkg_resources.parse_version(version))
+
+ c = conflict_with_set(spec, specset)
+ if not c:
+ specset.add(spec)
+ else:
+ raise ConflictError(r.project_name, c, spec)
+
+ return specsets
+
+def resolve_versions(specsets):
+ logging.debug('Resolving versions')
+ versions = {}
+
+ for (proj_name, specset) in specsets.iteritems():
+ client = xmlrpclib.ServerProxy(PYPI_URL)
+
+ # Bit of a hack to deal with pypi case insensitivity
+ new_proj_name = name_or_closest(client, proj_name)
+ if new_proj_name == None:
+ error("Couldn't find any project with name '%s'" % proj_name)
+
+ logging.debug("Treating %s as %s" % (proj_name, new_proj_name))
+ proj_name = new_proj_name
+
+ releases = client.package_releases(proj_name)
+
+ logging.debug('Found %d releases of %s: %s'
+ % (len(releases), proj_name, releases))
+
+ candidates = [v for v in releases
+ if specs_satisfied(pkg_resources.parse_version(v), specset)]
+
+ if len(candidates) == 0:
+ error("Couldn't find any version of %s to satisfy: %s"
+ % (proj_name, specset))
+
+ logging.debug('Found %d releases of %s that satisfy constraints: %s' %
+ (len(candidates), proj_name, candidates))
+
+ assert proj_name not in versions
+ versions[proj_name] = candidates
+
+ return versions
+
+def find_build_deps(source, name, version=None):
+ logging.debug('Finding build dependencies for %s%s at %s'
+ % (name, ' %s' % version if version else '', source))
+
+ # This amounts to running python setup.py egg_info and checking
+ # the resulting egg_info dir for a file called setup_requires.txt
+
+ logging.debug('Running egg_info command')
+
+ p = subprocess.Popen(['python', 'setup.py', 'egg_info'], cwd=source,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ while True:
+ line = p.stdout.readline()
+ if line == '':
+ break
+
+ logging.debug(line.rstrip('\n'))
+
+ p.wait() # even with eof, wait for termination
+
+ if p.returncode != 0:
+ # Something went wrong, but in most cases we can probably still
+ # successfully import without knowing the setup_requires list
+ # because many python packages have an empty setup_requires list.
+ logging.warning("Couldn't obtain build dependencies for %s:"
+ " egg_info command failed"
+ " (%s may be using distutils rather than setuptools)"
+ % (name, name))
+
+ egg_dir = '%s.egg-info' % name
+ build_deps_file = os.path.join(source, egg_dir, 'setup_requires.txt')
+
+ build_deps = {}
+
+ # Check whether there's a setup_requires.txt
+ if not os.path.isfile(build_deps_file):
+ build_deps = {}
+ else:
+ with open(build_deps_file) as f:
+ specsets = resolve_specs(pkg_resources.parse_requirements(f))
+ logging.debug("Resolved specs for %s: %s" % (name, specsets))
+
+ versions = resolve_versions(specsets)
+ logging.debug('Resolved versions: %s' % versions)
+
+ # Since any of the candidates in versions should satisfy
+ # all specs, we just pick the first version we see
+ build_deps = {name: vs[0] for (name, vs) in versions.iteritems()}
+
+ return build_deps
+
+def find_runtime_deps(source, name, version=None, use_requirements_file=False):
+ logging.debug('Finding runtime dependencies for %s%s at %s'
+ % (name, ' %s' % version if version else '', source))
+
+ # Run our patched pip to get a list of installed deps
+ # Run pip install . --list-dependencies=instdeps.txt with cwd=source
+
+ # Some temporary file needed for storing the requirements
+ tmpfd, tmppath = tempfile.mkstemp()
+ logging.debug('Writing install requirements to: %s', tmppath)
+
+ args = ['pip', 'install', '.', '--list-dependencies=%s' % tmppath]
+ if use_requirements_file:
+ args.insert(args.index('.') + 1, '-r')
+ args.insert(args.index('.') + 2, 'requirements.txt')
+
+ logging.debug('Running pip, args: %s' % args)
+
+ p = subprocess.Popen(args, cwd=source, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+ while True:
+ line = p.stdout.readline()
+ if line == '':
+ break
+
+ logging.debug(line.rstrip('\n'))
+
+ p.wait() # even with eof, wait for termination
+
+ logging.debug('pip exited with code: %d' % p.returncode)
+
+ if p.returncode != 0:
+ error('failed to get runtime dependencies for %s %s at %s'
+ % (name, version, source))
+
+ with os.fdopen(tmpfd) as tmpfile:
+ ss = resolve_specs(pkg_resources.parse_requirements(tmpfile))
+ logging.debug("Resolved specs for %s: %s" % (name, ss))
+
+ logging.debug("Removing root package from specs")
+ # filter out "root" package
+ specsets = {k: v for (k, v) in ss.iteritems() if k != name}
+
+ versions = resolve_versions(specsets)
+ logging.debug('Resolved versions: %s' % versions)
+
+ # Since any of the candidates in versions should satisfy
+ # all specs, we just pick the first version we see
+ runtime_deps = {name: vs[0] for (name, vs) in versions.iteritems()}
+
+ os.remove(tmppath)
+
+ if (len(runtime_deps) == 0 and not use_requirements_file
+ and os.path.isfile(os.path.join(source, 'requirements.txt'))):
+ logging.debug('No install requirements specified in setup.py,'
+ ' using requirements file')
+ return find_runtime_deps(source, name, version,
+ use_requirements_file=True)
+
+ return runtime_deps
+
+def main():
+ if len(sys.argv) not in [3, 4]:
+ print('usage: %s PACKAGE_SOURCE_DIR NAME [VERSION]' % sys.argv[0])
+ sys.exit(1)
+
+ logging.debug('%s: sys.argv[1:]: %s' % (sys.argv[0], sys.argv[1:]))
+ source, name = sys.argv[1:3]
+ version = sys.argv[3] if len(sys.argv) == 4 else None
+
+ client = xmlrpclib.ServerProxy(PYPI_URL)
+ new_name = name_or_closest(client, name)
+
+ if new_name == None:
+ error("Couldn't find any project with name '%s'" % name)
+
+ logging.debug('Treating %s as %s' % (name, new_name))
+ name = new_name
+
+ deps = {}
+ deps['build-dependencies'] = find_build_deps(source, name, version)
+ deps['runtime-dependencies'] = find_runtime_deps(source, name, version)
+
+ root = {'python': deps}
+
+ print(json.dumps(root))
+
+if __name__ == '__main__':
+ PythonExtension().run()
diff --git a/baserockimport/exts/python.to_chunk b/baserockimport/exts/python.to_chunk
new file mode 100755
index 0000000..74befeb
--- /dev/null
+++ b/baserockimport/exts/python.to_chunk
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# We can get rid of all of this once we modify the import tool
+
+from __future__ import print_function
+
+import sys
+
+if len(sys.argv) not in [3, 4]:
+ print('usage: %s package_source_dir chunk_name [version]' % sys.argv[0],
+ file=sys.stderr)
+ sys.exit(1)
+
+print('''name: %s
+kind: chunk
+build-commands:
+- python setup.py build
+install-commands:
+- python setup.py install --prefix=/usr --root "$DESTDIR"''' % sys.argv[2])
diff --git a/baserockimport/exts/python.to_lorry b/baserockimport/exts/python.to_lorry
new file mode 100755
index 0000000..b7341ca
--- /dev/null
+++ b/baserockimport/exts/python.to_lorry
@@ -0,0 +1,219 @@
+#!/usr/bin/env python
+#
+# Create a Baserock .lorry file for a given Python package
+#
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from __future__ import print_function
+
+import subprocess
+import requests
+import json
+import sys
+import shutil
+import tempfile
+import xmlrpclib
+import logging
+import select
+
+import pkg_resources
+
+from importer_python_common import *
+
+def fetch_package_metadata(package_name):
+ try:
+ result = requests.get('%s/%s/json' % (PYPI_URL, package_name))
+
+ # raise exception if status code is not 200 OK
+ result.raise_for_status()
+ except Exception as e:
+ error("Couldn't fetch package metadata:", e)
+
+ return result.json()
+
+def find_repo_type(url):
+
+ # Don't bother with detection if we can't get a 200 OK
+ logging.debug("Getting '%s' ..." % url)
+
+ status_code = requests.get(url).status_code
+ if status_code != 200:
+ logging.debug('Got %d status code from %s, aborting repo detection'
+ % (status_code, url))
+ return None
+
+ logging.debug('200 OK for %s' % url)
+ logging.debug('Finding repo type for %s' % url)
+
+ vcss = [('git', 'clone'), ('hg', 'clone'),
+ ('svn', 'checkout'), ('bzr', 'branch')]
+
+ for (vcs, vcs_command) in vcss:
+ logging.debug('Trying %s %s' % (vcs, vcs_command))
+ tempdir = tempfile.mkdtemp()
+
+ p = subprocess.Popen([vcs, vcs_command, url], stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
+ cwd=tempdir)
+
+ # We close stdin on parent side to prevent the child from blocking
+ # if it reads on stdin
+ p.stdin.close()
+
+ while True:
+ line = p.stdout.readline()
+ if line == '':
+ break
+
+ logging.debug(line.rstrip('\n'))
+
+ p.wait() # even with eof on both streams, we still wait
+
+ shutil.rmtree(tempdir)
+
+ if p.returncode == 0:
+ logging.debug('%s is a %s repo' % (url, vcs))
+ return vcs
+
+ logging.debug("%s doesn't seem to be a repo" % url)
+
+ return None
+
+def get_compression(url):
+ bzip = 'bzip2'
+ gzip = 'gzip'
+ lzma = 'lzma'
+
+ m = {'tar.gz': gzip, 'tgz': gzip, 'tar.Z': gzip,
+ 'tar.bz2': bzip, 'tbz2': bzip,
+ 'tar.lzma': lzma, 'tar.xz': lzma, 'tlz': lzma, 'txz': lzma}
+
+ for x in [1, 2]:
+ ext = '.'.join(url.split('.')[-x:])
+ if ext in m: return m[ext]
+
+ return None
+
+# Assumption: url passed to this function must have a 'standard' tar extension
+def make_tarball_lorry(package_name, url):
+ # TODO: this prefix probably shouldn't be hardcoded here either
+ name = 'python-packages/%s' % package_name.lower()
+
+ lorry = {'type': 'tarball', 'url': url}
+ compression = get_compression(url)
+ if compression:
+ lorry['compression'] = compression
+
+ return json.dumps({name + "-tarball": lorry}, indent=4, sort_keys=True)
+
+def filter_urls(urls):
+ allowed_extensions = ['tar.gz', 'tgz', 'tar.Z', 'tar.bz2', 'tbz2',
+ 'tar.lzma', 'tar.xz', 'tlz', 'txz', 'tar']
+
+ def allowed_extension(url):
+ return ('.'.join(url['url'].split('.')[-2:]) in allowed_extensions
+ or url['url'].split('.')[-1:] in allowed_extensions)
+
+ return filter(allowed_extension, urls)
+
+def get_releases(client, requirement):
+ try:
+ releases = client.package_releases(requirement.project_name)
+ except Exception as e:
+ error("Couldn't fetch release data:", e)
+
+ return releases
+
+def generate_tarball_lorry(client, requirement):
+ releases = get_releases(client, requirement)
+
+ if len(releases) == 0:
+ error("Couldn't find any releases for package %s"
+ % requirement.project_name)
+
+ releases = [v for v in releases if specs_satisfied(v, requirement.specs)]
+
+ if len(releases) == 0:
+ error("Couldn't find any releases of %s"
+ " that satisfy version constraints: %s"
+ % (requirement.project_name, requirement.specs))
+
+ release_version = releases[0]
+
+ logging.debug('Fetching urls for package %s with version %s'
+ % (requirement.project_name, release_version))
+
+ try:
+ # Get a list of dicts, the dicts contain the urls.
+ urls = client.release_urls(requirement.project_name, release_version)
+ except Exception as e:
+ error("Couldn't fetch release urls:", e)
+
+ tarball_urls = filter_urls(urls)
+
+ if len(tarball_urls) > 0:
+ urls = tarball_urls
+ elif len(urls) > 0:
+ warn("None of these urls look like tarballs:")
+ for url in urls:
+ warn("\t%s" % url['url'])
+ error("Cannot proceed")
+ else:
+ error("Couldn't find any download urls for package %s"
+ % requirement.project_name)
+
+ url = urls[0]['url']
+
+ return make_tarball_lorry(requirement.project_name, url)
+
+def str_repo_lorry(package_name, repo_type, url):
+ # TODO: this prefix probably shouldn't be hardcoded here
+ name = 'python-packages/%s' % package_name.lower()
+
+ return json.dumps({name: {'type': repo_type, 'url': url}},
+ indent=4, sort_keys=True)
+
+def main():
+ if len(sys.argv) != 2:
+ # TODO explain the format of python requirements
+ # warn the user that they probably want to quote their arg
+ # > < will be interpreted as redirection by the shell
+ print('usage: %s requirement' % sys.argv[0], file=sys.stderr)
+ sys.exit(1)
+
+ client = xmlrpclib.ServerProxy(PYPI_URL)
+
+ req = pkg_resources.parse_requirements(sys.argv[1]).next()
+
+ new_proj_name = name_or_closest(client, req.project_name)
+
+ if new_proj_name == None:
+ error("Couldn't find any project with name '%s'" % req.project_name)
+
+ logging.debug('Treating %s as %s' % (req.project_name, new_proj_name))
+ req.project_name = new_proj_name
+
+ metadata = fetch_package_metadata(req.project_name)
+ info = metadata['info']
+
+ repo_type = (find_repo_type(info['home_page'])
+ if 'home_page' in info else None)
+
+ print(str_repo_lorry(req.project_name, repo_type, info['home_page'])
+ if repo_type else generate_tarball_lorry(client, req))
+
+if __name__ == '__main__':
+ PythonExtension().run()
diff --git a/baserockimport/exts/python_find_deps_tests.py b/baserockimport/exts/python_find_deps_tests.py
new file mode 100755
index 0000000..f7fc2dd
--- /dev/null
+++ b/baserockimport/exts/python_find_deps_tests.py
@@ -0,0 +1,362 @@
+#!/usr/bin/env python
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import unittest
+import random
+
+import imp
+python_find_deps = imp.load_source('python_find_deps', 'python.find_deps')
+
+from pkg_resources import parse_requirements, parse_version
+
+def reverse(xs):
+ return xs[::-1]
+
+class ConflictDetectionTests(unittest.TestCase):
+
+ def setUp(self):
+ reqs = ['a == 0.1', 'a == 0.2']
+ self.test_requirements = parse_requirements(reqs)
+
+ def run_conflict_test(self, requirements, expected_conflicts):
+ names = set([r.project_name for r in requirements])
+
+ with self.assertRaises(python_find_deps.ConflictError) as cm:
+ python_find_deps.resolve_specs(requirements)
+
+ for name in names:
+ _exps = [(op, parse_version(v)) for (op, v)
+ in expected_conflicts[name]]
+
+ self.assertEqual(cm.exception.specs, _exps)
+
+ def run_conflict_test_reversed(self, requirements, expected_conflicts):
+ # First reverse conflicts to get them in the right order
+ reversed_expected_conflicts = {k: reverse(v) for (k, v)
+ in expected_conflicts.iteritems()}
+
+ self.run_conflict_test(reverse(requirements),
+ reversed_expected_conflicts)
+
+ def run_no_conflict_test(self, requirements, expected_specs):
+ print python_find_deps.resolve_specs(requirements)
+
+ names = set([r.project_name for r in requirements])
+
+ for name in names:
+ _exps = set([(op, parse_version(v)) for (op, v)
+ in expected_specs[name]])
+
+ _specs = python_find_deps.resolve_specs(requirements)[name]
+
+ self.assertEqual(_specs, _exps)
+
+ def test_eqs_eqs(self):
+ requirements = list(parse_requirements(['a == 0.1', 'a == 0.2']))
+ expected_conflicts = {'a': [('==', '0.1'), ('==', '0.2')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ def test_eqs_nt_eq(self):
+ # == x conflicts with != x
+ requirements = list(parse_requirements(['a == 0.1', 'a != 0.1']))
+ expected_conflicts = {'a': [('==', '0.1'), ('!=', '0.1')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ def test_eqs_lt(self):
+ # == x conflicts with < y if x >= y
+ requirements = list(parse_requirements(['a == 0.2', 'a < 0.1']))
+
+ expected_conflicts = {'a': [('==', '0.2'), ('<', '0.1')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ requirements = list(parse_requirements(['a == 0.1', 'a < 0.1']))
+
+ expected_conflicts = {'a': [('==', '0.1'), ('<', '0.1')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ def test_eqs_gt(self):
+ # == x conflicts with > y if x <= y
+ requirements = list(parse_requirements(['a == 0.1', 'a > 0.1']))
+
+ expected_conflicts = {'a': [('==', '0.1'), ('>', '0.1')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ requirements = list(parse_requirements(['a == 0.1', 'a > 0.2']))
+
+ expected_conflicts = {'a': [('==', '0.1'), ('>', '0.2')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ def test_eqs_lte(self):
+ # == x conflicts with <= y if x > y
+ requirements = list(parse_requirements(['a == 0.2', 'a <= 0.1']))
+
+ expected_conflicts = {'a': [('==', '0.2'), ('<=', '0.1')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ requirements = list(parse_requirements(['a == 0.1', 'a <= 0.1'])) # no conflict
+ expected_specs = {'a': set([('==', '0.1'), ('<=', '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ def test_eq_gte(self):
+ # == x conflicts with >= y if x < y
+ requirements = list(parse_requirements(['a == 0.1', 'a >= 0.2']))
+
+ expected_conflicts = {'a': [('==', '0.1'), ('>=', '0.2')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ requirements = list(parse_requirements(['a == 0.1', 'a >= 0.1']))
+ expected_specs = {'a': set([('==', '0.1'), ('>=', '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ def test_lt_lt(self):
+ # < x < y never conflicts
+ requirements = list(parse_requirements(['a < 0.1', 'a < 0.1']))
+ expected_specs = {'a': set([('<', '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ requirements = list(parse_requirements(['a < 0.1', 'a < 0.2']))
+ expected_specs = {'a': set([('<', '0.1'), ('<', '0.2')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ def test_lt_gt(self):
+ # < x conflicts with > y if x <= y
+ requirements = list(parse_requirements(['a < 0.1', 'a > 0.1']))
+
+ expected_conflicts = {'a': [('<', '0.1'), ('>', '0.1')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ requirements = list(parse_requirements(['a < 0.1', 'a > 0.2']))
+
+ expected_conflicts = {'a': [('<', '0.1'), ('>', '0.2')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ def test_lt_lte(self):
+ # < x <= y never conflicts
+ requirements = list(parse_requirements(['a < 0.1', 'a <= 0.1']))
+ expected_specs = {'a': set([('<', '0.1'), ('<=', '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ requirements = list(parse_requirements(['a < 0.1', 'a <= 0.2']))
+ expected_specs = {'a': set([('<', '0.1'), ('<=', '0.2')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ def test_lt_gte(self):
+ # < x conflicts with >= y if x <= y
+ requirements = list(parse_requirements(['a < 0.1', 'a >= 0.1']))
+
+ expected_conflicts = {'a': [('<', '0.1'), ('>=', '0.1')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ requirements = list(parse_requirements(['a < 0.1', 'a >= 0.2']))
+
+ expected_conflicts = {'a': [('<', '0.1'), ('>=', '0.2')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ def test_gt_gt(self):
+ # > x > y never conflicts
+ requirements = list(parse_requirements(['a > 0.1', 'a > 0.1']))
+ expected_specs = {'a': set([('>', '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ requirements = list(parse_requirements(['a > 0.1', 'a > 0.2']))
+ expected_specs = {'a': set([('>', '0.1'), ('>', '0.2')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ def test_gt_lte(self):
+ # > x conflicts with <= y if x >= y
+ requirements = list(parse_requirements(['a > 0.1', 'a <= 0.1']))
+
+ expected_conflicts = {'a': [('>', '0.1'), ('<=', '0.1')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ requirements = list(parse_requirements(['a > 0.2', 'a <= 0.1']))
+
+ expected_conflicts = {'a': [('>', '0.2'), ('<=', '0.1')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ def test_gt_gte(self):
+ # > x >= y never conflicts
+ requirements = list(parse_requirements(['a > 0.1', 'a >= 0.1']))
+ expected_specs = {'a': set([('>', '0.1'), ('>=', '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ requirements = list(parse_requirements(['a > 0.1', 'a >= 0.2']))
+ expected_specs = {'a': set([('>', '0.1'), ('>=', '0.2')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ def test_lte_lte(self):
+ # <= x <= y never conflicts
+ requirements = list(parse_requirements(['a <= 0.1', 'a <= 0.1']))
+ expected_specs = {'a': set([('<=', '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ requirements = list(parse_requirements(['a <= 0.1', 'a <= 0.2']))
+ expected_specs = {'a': set([('<=', '0.1'), ('<=', '0.2')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ def test_lte_gte(self):
+ # <= x conflicts with >= y if x < y
+ # note that if x == y, then the two specs don't add any constraint
+ requirements = list(parse_requirements(['a <= 0.1', 'a >= 0.1']))
+
+ expected_specs= {'a': set([('<=', '0.1'), ('>=', '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ requirements = list(parse_requirements(['a <= 0.1', 'a >= 0.2']))
+
+ expected_conflicts = {'a': [('<=', '0.1'), ('>=', '0.2')]}
+
+ self.run_conflict_test(requirements, expected_conflicts)
+ self.run_conflict_test_reversed(requirements, expected_conflicts)
+
+ def test_gte_gte(self):
+ # >= x >= y never conflicts
+ requirements = list(parse_requirements(['a >= 0.1', 'a >= 0.1']))
+ expected_specs = {'a': set([('>=', '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ requirements = list(parse_requirements(['a >= 0.1', 'a >= 0.2']))
+ expected_specs = {'a': set([('>=', '0.1'), ('>=', '0.2')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ def test_ne(self):
+ # != can only conflict with == (which is tested above)
+ for s in ['<', '>', '<=', '>=']:
+ requirements = list(parse_requirements(['a != 0.1', 'a %s 0.1' % s]))
+ expected_specs = {'a': set([('!=', '0.1'), ('%s' % s, '0.1')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ requirements = list(parse_requirements(['a != 0.1', 'a %s 0.2' % s]))
+ expected_specs = {'a': set([('!=', '0.1'), ('%s' % s, '0.2')])}
+
+ self.run_no_conflict_test(requirements, expected_specs)
+ self.run_no_conflict_test(reverse(requirements), expected_specs)
+
+ def test_unmatched(self):
+ # Run all permutations, fail if we get an UnmatchedException
+ # or something else we weren't expecting
+ comparitors = ['==', '!=', '<', '>', '<=', '>=']
+ vs = [('0.1', '0.1'), ('0.1', '0.2'),
+ ('%s' % random.randint(0, 100), '%s' % random.randint(0, 100))]
+
+ for (vx, vy) in vs:
+ for cmpx in comparitors:
+ for cmpy in comparitors:
+ requirements = parse_requirements(['a %s %s' % (cmpx, vx),
+ 'a %s %s' % (cmpy, vy)])
+ try:
+ python_find_deps.resolve_specs(requirements)
+ except python_find_deps.ConflictError:
+ pass
+ except python_find_deps.UnmatchedException as e:
+ self.fail('Got UnmatchedException: %s' % e)
+ except Exception as e:
+ self.fail('Got some other unexpected Exception: %s' % e)
+
+ def test_cause_unmatched(self):
+ requirements_specs = list(parse_requirements(['a == 0.1', 'a == 0.1']))
+
+ # replace our parsed specs with invalid specs
+ # specifically, specs with invalid operators
+ #
+ # note, one spec won't do, we're validating the specs logically
+ # not syntactically; we assume the specs themselves have been parsed
+ # by pkg_resources which will do the validation for us.
+ #
+ # so we need two specs to force a check for a conflict,
+ # an UnmatchedError should occur if neither of the specs
+ # contain an operator recognised by the conflict detector
+ # e.g. '===', which is undefined in a spec
+ requirements_specs[0].specs = [('===', '0.1')]
+ requirements_specs[1].specs = [('===', '0.1')]
+
+ with self.assertRaises(python_find_deps.UnmatchedError):
+ specs = python_find_deps.resolve_specs(requirements_specs)
+
+ def test_distinct_requirements_no_conflict(self):
+ requirements = list(parse_requirements(['a == 0.1', 'b == 0.1']))
+
+ specs = python_find_deps.resolve_specs(requirements)
+
+ expected_specs = {'a': set([('==', parse_version('0.1'))]),
+ 'b': set([('==', parse_version('0.1'))])}
+
+ self.assertEqual(specs, expected_specs)
+
+
+if __name__ == '__main__':
+ suite = unittest.TestLoader().loadTestsFromTestCase(ConflictDetectionTests)
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/baserockimport/exts/python_lorry_tests.py b/baserockimport/exts/python_lorry_tests.py
new file mode 100755
index 0000000..12ef564
--- /dev/null
+++ b/baserockimport/exts/python_lorry_tests.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# Copyright (C) 2014 Codethink Limited
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import imp
+python_lorry = imp.load_source('python_lorry', 'python.to_lorry')
+
+import json
+
+import unittest
+
+class Tests(unittest.TestCase):
+
+ def test_make_tarball_lorry(self):
+ gzip, bzip, lzma = 'gzip', 'bzip2', 'lzma'
+
+ valid_extensions = {'tar.gz': gzip, 'tgz': gzip, 'tar.Z': gzip,
+ 'tar.bz2': bzip, 'tbz2': bzip,
+ 'tar.lzma': lzma, 'tar.xz': lzma,
+ 'tlz': lzma, 'txz': lzma}
+
+ def make_url(extension):
+ return 'http://foobar/baz.%s' % extension
+
+ def get_tarball_lorry_url(name, lorry_json):
+ return json.loads(lorry_json)['python-packages/'
+ + name + '-tarball']['url']
+
+ def get_tarball_lorry_compression(name, lorry_json):
+ return json.loads(lorry_json)['python-packages/'
+ + name + '-tarball']['compression']
+
+ fake_package_name = 'name'
+ urls = [(make_url(ext), ext) for ext in valid_extensions]
+
+ for (url, ext) in urls:
+ lorry_json = python_lorry.make_tarball_lorry('name', url)
+ print lorry_json
+
+ tarball_url = get_tarball_lorry_url(fake_package_name, lorry_json)
+ print 'Tarball url: %s' % tarball_url
+
+ self.assertEqual(tarball_url, url)
+
+ tarball_compression = get_tarball_lorry_compression(
+ fake_package_name, lorry_json)
+
+ print 'Tarball compression: %s' % tarball_compression
+ self.assertEqual(tarball_compression, valid_extensions[ext])
+
+ url = 'http://foobar/baz.tar'
+ lorry_json = python_lorry.make_tarball_lorry('name', url)
+ self.assertEqual(get_tarball_lorry_url(fake_package_name,
+ lorry_json), url)
+ self.assertTrue('compression' not in lorry_json)
+
+
+if __name__ == '__main__':
+ suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
+ unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/baserockimport/mainloop.py b/baserockimport/mainloop.py
index b400695..057ab98 100644
--- a/baserockimport/mainloop.py
+++ b/baserockimport/mainloop.py
@@ -58,6 +58,7 @@ def run_extension(filename, args):
output.append(line)
def report_extension_stderr(line):
+ logging.debug('Received "%s" on stderr' % line)
errors.append(line)
def report_extension_logger(line):
@@ -115,7 +116,7 @@ class ImportLoop(object):
self.importers = {}
- def enable_importer(self, kind, extra_args=[]):
+ def enable_importer(self, kind, extra_args=[], **kwargs):
'''Enable an importer extension in this ImportLoop instance.
At least one importer extension must be enabled for the loop to do
@@ -129,7 +130,8 @@ class ImportLoop(object):
'''
assert kind not in self.importers
self.importers[kind] = {
- 'extra_args': extra_args
+ 'extra_args': extra_args,
+ 'kwargs': kwargs
}
def run(self):
@@ -589,18 +591,22 @@ class ImportLoop(object):
'ref': m.ref,
'unpetrify-ref': m.named_ref,
'morph': m.filename,
- 'build-depends': build_depends,
+ 'build-depends': build_depends
}
chunk_entries.append(entry)
+ kwargs = self.importers[kind]['kwargs']
+
+ stratum_build_depends = (
+ [{'morph': stratum} for stratum in kwargs['strata']]
+ if 'strata' in kwargs else [])
+
stratum_name = goal_name
stratum = {
'name': stratum_name,
'kind': 'stratum',
'description': 'Autogenerated by Baserock import tool',
- 'build-depends': [
- {'morph': 'strata/ruby.morph'}
- ],
+ 'build-depends': stratum_build_depends,
'chunks': chunk_entries,
}