summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKenneth Reitz <me@kennethreitz.com>2010-09-14 00:07:19 -0400
committerKenneth Reitz <me@kennethreitz.com>2010-09-14 00:07:19 -0400
commitf913853cae2b019974d54a2a1f89a3a283e1d2e7 (patch)
tree25b28d2d9518976bec1ef21496e9e5962b5f9bf3
parentea1de420a3edcc616e7ee2536f0838e611b734e4 (diff)
parentd0c8df95a3f7234414fe6b7df72cae52b55f6af8 (diff)
downloadtablib-0.6.3.tar.gz
Merge branch 'release/0.6.3'v0.6.3
-rw-r--r--README.rst14
-rw-r--r--setup.py2
-rw-r--r--tablib/core.py72
-rw-r--r--tablib/helpers.py2
-rw-r--r--test_tablib.py63
5 files changed, 124 insertions, 29 deletions
diff --git a/README.rst b/README.rst
index 86da367..32d477f 100644
--- a/README.rst
+++ b/README.rst
@@ -31,11 +31,11 @@ Usage
Populate fresh data files: ::
- headers = ('first_name', 'last_name', 'gpa')
+ headers = ('first_name', 'last_name')
data = [
- ('John', 'Adams', 90),
- ('George', 'Washington', 67)
+ ('John', 'Adams'),
+ ('George', 'Washington')
]
data = tablib.Dataset(*data, headers=headers)
@@ -43,7 +43,11 @@ Populate fresh data files: ::
Intelligently add new rows: ::
- >>> data.append(('Henry', 'Ford', 83))
+ >>> data.append(('Henry', 'Ford'))
+
+Intelligently add new columns: ::
+
+ >>> data.append(col=('age', 90, 67, 83))
Slice rows: ::
@@ -122,7 +126,7 @@ Or, if you absolutely must: ::
Contribute
----------
-If you'd like to contribute, simply fork `the repository`_, commit your changes, and send a pull request. Make sure you add yourself to AUTHORS_.
+If you'd like to contribute, simply fork `the repository`_, commit your changes to the **develop** branch (or branch off of it), and send a pull request. Make sure you add yourself to AUTHORS_.
Roadmap
diff --git a/setup.py b/setup.py
index 5670078..02c84d3 100644
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@ if sys.argv[-1] == "publish":
setup(
name='tablib',
- version='0.6.2',
+ version='0.6.3',
description='Format agnostic tabular data library (XLS, JSON, YAML, CSV)',
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
diff --git a/tablib/core.py b/tablib/core.py
index ac61315..e4ceee1 100644
--- a/tablib/core.py
+++ b/tablib/core.py
@@ -21,8 +21,8 @@ from helpers import *
# __all__ = ['Dataset', 'DataBook']
__name__ = 'tablib'
-__version__ = '0.6.2'
-__build__ = 0x000602
+__version__ = '0.6.3'
+__build__ = 0x000603
__author__ = 'Kenneth Reitz'
__license__ = 'MIT'
__copyright__ = 'Copyright 2010 Kenneth Reitz'
@@ -80,10 +80,15 @@ class Dataset(object):
return '<dataset object>'
- def _validate(self, row=None, safety=False):
+ def _validate(self, row=None, col=None, safety=False):
"""Assures size of every row in dataset is of proper proportions."""
if row:
is_valid = (len(row) == self.width) if self.width else True
+ elif col:
+ if self.headers:
+ is_valid = (len(col) - 1) == self.height
+ else:
+ is_valid = (len(col) == self.height) if self.height else True
else:
is_valid = all((len(x)== self.width for x in self._data))
@@ -130,17 +135,27 @@ class Dataset(object):
"""Headers property."""
return self.__headers
+
@headers.setter
def headers(self, collection):
"""Validating headers setter."""
self._validate(collection)
- self.__headers = collection
-
+ if collection:
+ try:
+ self.__headers = list(collection)
+ except TypeError, why:
+ raise TypeError
+ else:
+ self.__headers = None
+
+
+
@property
def dict(self):
"""Returns python dict of Dataset."""
return self._package()
+
@property
def json(self):
"""Returns JSON representation of Dataset."""
@@ -180,16 +195,36 @@ class Dataset(object):
return stream.getvalue()
- def append(self, row):
+ def append(self, row=None, col=None):
"""Adds a row to the end of Dataset"""
- self._validate(row)
- self._data.append(tuple(row))
+ if row:
+ self._validate(row)
+ self._data.append(tuple(row))
+ elif col:
+ self._validate(col=col)
+
+ if self.headers:
+ # pop the first item off, add to headers
+ self.headers.append(col[0])
+ col = col[1:]
+
+ if self.height and self.width:
+
+ for i, row in enumerate(self._data):
+ _row = list(row)
+ _row.append(col[i])
+ self._data[i] = tuple(_row)
+ else:
+ self._data = [tuple([row]) for row in col]
- def index(self, i, row):
+ def insert(self, i, row=None, col=None):
"""Inserts a row at given position in Dataset"""
- self._validate(row)
- self._data.insert(i, tuple(row))
+ if row:
+ self._validate(row)
+ self._data.insert(i, tuple(row))
+ elif col:
+ pass
class DataBook(object):
@@ -200,12 +235,14 @@ class DataBook(object):
def __init__(self, sets=[]):
self._datasets = sets
+
def __repr__(self):
try:
return '<%s databook>' % (self.title.lower())
except AttributeError:
return '<databook object>'
+
def add_sheet(self, dataset):
"""Add given dataset ."""
if type(dataset) is Dataset:
@@ -213,6 +250,7 @@ class DataBook(object):
else:
raise InvalidDatasetType
+
def _package(self):
collector = []
for dset in self._datasets:
@@ -222,6 +260,7 @@ class DataBook(object):
))
return collector
+
@property
def size(self):
"""The number of the Datasets within DataBook."""
@@ -235,8 +274,8 @@ class DataBook(object):
stream = cStringIO.StringIO()
wb = xlwt.Workbook()
- for dset in self._datasets:
- ws = wb.add_sheet(dset.title if dset.title else 'Tabbed Dataset %s' % (int(random.random() * 100000000)))
+ for i, dset in enumerate(self._datasets):
+ ws = wb.add_sheet(dset.title if dset.title else 'Sheet%s' % (i))
#for row in self._package(dicts=False):
for i, row in enumerate(dset._package(dicts=False)):
@@ -246,24 +285,29 @@ class DataBook(object):
wb.save(stream)
return stream.getvalue()
+
@property
def json(self):
"""Returns JSON representation of Databook."""
return json.dumps(self._package())
+
@property
def yaml(self):
"""Returns YAML representation of Databook."""
return yaml.dump(self._package())
-
+
+
class InvalidDatasetType(Exception):
"Only Datasets can be added to a DataBook"
+
class InvalidDimensions(Exception):
"Invalid size"
+
class UnsupportedFormat(NotImplementedError):
"Format is not supported"
diff --git a/tablib/helpers.py b/tablib/helpers.py
index a12c4dd..b64d4b6 100644
--- a/tablib/helpers.py
+++ b/tablib/helpers.py
@@ -10,7 +10,7 @@ class Struct(object):
self.__dict__.update(entries)
def __getitem__(self, key):
- return getattr(self, key)
+ return getattr(self, key, None)
def piped():
diff --git a/test_tablib.py b/test_tablib.py
index cea6d24..93ff252 100644
--- a/test_tablib.py
+++ b/test_tablib.py
@@ -7,15 +7,14 @@ import tablib
class TablibTestCase(unittest.TestCase):
def setUp(self):
- pass
+ global data
+ data = tablib.Dataset()
def tearDown(self):
pass
+
def test_empty_append(self):
-
- data = tablib.Dataset()
-
new_row = (1,2,3)
data.append(new_row)
@@ -24,17 +23,65 @@ class TablibTestCase(unittest.TestCase):
def test_empty_append_with_headers(self):
- data = tablib.Dataset()
-
data.headers = ['first', 'second']
new_row = (1,2,3,4)
self.assertRaises(tablib.InvalidDimensions, data.append, new_row)
+
+
+ def test_add_column(self):
+ # No Headers
+
+ data.append(['kenneth'])
+ data.append(['bessie'])
+
+ new_col = ['reitz', 'monke']
+
+ data.append(col=new_col)
+
+ self.assertEquals(data[0], ('kenneth', 'reitz'))
+ self.assertEquals(data.width, 2)
+
+ # With Headers
+ data.headers = ('fname', 'lname')
+ new_col = ['age', 21, 22]
+ data.append(col=new_col)
+
+ self.assertEquals(data[new_col[0]], new_col[1:])
+
+
- # def test_adding_header with (self):
-
+ def test_add_column_no_data_no_headers(self):
+
+ # no headers
+
+ new_col = ('reitz', 'monke')
+
+ data.append(col=new_col)
+
+ self.assertEquals(data[0], tuple([new_col[0]]))
+ self.assertEquals(data.width, 1)
+ self.assertEquals(data.height, len(new_col))
+ def test_add_column_no_data_with_headers(self):
+ # no headers
+
+ data.headers = ('first', 'last')
+
+ new_col = ('age',)
+ data.append(col=new_col)
+
+ self.assertEquals(len(data.headers), 3)
+ self.assertEquals(data.width, 3)
+
+ new_col = ('foo', 'bar')
+
+ self.assertRaises(tablib.InvalidDimensions, data.append, col=new_col)
+
+ def tuple_check(self):
+ data.append(col=(1,2,3))
+
if __name__ == '__main__':
unittest.main() \ No newline at end of file