diff options
-rw-r--r-- | README | 16 | ||||
-rwxr-xr-x | lorry | 5 | ||||
-rwxr-xr-x | lorry.gzip-importer | 61 |
3 files changed, 80 insertions, 2 deletions
@@ -217,6 +217,22 @@ will be tagged as 'docbook-xml-4.5') } +### Gzip + +Lorry can import a gzip file fetched from a URL. The file will be extracted in place +and committed directly to master after removing the .gz extension from the filename. + +The new commit will be tagged with the basename of the imported zip file (e.g. bkai00mp.ttf.gz +will be tagged as 'bkai00mp.ttf') + + { + "ttf-bkai00mp": { + "type": "gzip", + "url": "ftp://ftp.gnu.org/non-gnu/chinese-fonts-truetype/bkai00mp.ttf.gz" + } + } + + Tips ---- @@ -192,7 +192,8 @@ class Lorry(cliapp.Application): 'hg': self.gitify_hg, 'svn': self.gitify_svn, 'tarball': functools.partial(self.gitify_archive, 'tar'), - 'zip': functools.partial(self.gitify_archive, 'zip') + 'zip': functools.partial(self.gitify_archive, 'zip'), + 'gzip': functools.partial(self.gitify_archive, 'gzip') } vcstype = spec['type'] if vcstype not in table: @@ -500,7 +501,7 @@ class Lorry(cliapp.Application): cwd=gitdir) def gitify_archive(self, archive_type, project_name, dirname, gitdir, spec): - assert archive_type in ['zip', 'tar'] + assert archive_type in ['zip', 'gzip', 'tar'] url = spec['url'] url_path = urllib2.urlparse.urlparse(url)[2] diff --git a/lorry.gzip-importer b/lorry.gzip-importer new file mode 100755 index 0000000..c1c7215 --- /dev/null +++ b/lorry.gzip-importer @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# gzip archive frontend for git-fast-import + +from os import popen, path +from os.path import splitext +from os.path import getmtime +from sys import argv, exit, hexversion, stderr +from gzip import GzipFile +import struct + +if len(argv) < 2: + print 'usage:', argv[0], '<gzipfile>...' + exit(1) + +branch_name = 'master' +branch_ref = 'refs/heads/%s' % branch_name +committer_name = 'Lorry Gzip Importer' +committer_email = 'lorry-gzip-importer@lorry' + +fast_import = popen('git fast-import --quiet', 'w') +def printlines(list): + for str in list: + fast_import.write(str + "\n") + +# The size of a gzip file is stored in the last 4 bytes +def uncompressedsize(filename): + with open(filename) as f: + f.seek(-4, 2) + return struct.unpack('I', f.read(4))[0] + +for zipfile in argv[1:]: + + # Gzip does have an encoded mtime, however Python's GzipFile + # just ignores it, so we just yank the mtime of the zip file itself. + mtime = getmtime (zipfile); + file_size = uncompressedsize (zipfile); + zip = GzipFile(zipfile, 'rb') + + printlines(('blob', 'mark :1', 'data ' + str(file_size))) + fast_import.write(zip.read() + "\n") + + committer = committer_name + ' <' + committer_email + '> %d +0000' % \ + mtime + + zipfile_basename = path.basename(zipfile) + printlines(('commit ' + branch_ref, 'committer ' + committer, \ + 'data <<EOM', 'Imported from ' + zipfile_basename + '.', 'EOM', \ + '', 'deleteall')) + + last_dot = zipfile_basename[:-1].rfind('.') + unzipped_file = zipfile_basename[:last_dot] + fast_import.write('M 100644 :1 ' + unzipped_file + '\n'); + + zipname, _ = splitext(zipfile_basename) + printlines(('', 'tag ' + zipname, \ + 'from ' + branch_ref, 'tagger ' + committer, \ + 'data <<EOM', 'Package ' + zipfile, 'EOM', '')) + +if fast_import.close(): + exit(1) |