summaryrefslogtreecommitdiff
path: root/tablib/formats
diff options
context:
space:
mode:
Diffstat (limited to 'tablib/formats')
-rw-r--r--tablib/formats/_csv.py40
-rw-r--r--tablib/formats/_json.py50
-rw-r--r--tablib/formats/_tsv.py46
-rw-r--r--tablib/formats/_xls.py98
-rw-r--r--tablib/formats/_yaml.py52
5 files changed, 140 insertions, 146 deletions
diff --git a/tablib/formats/_csv.py b/tablib/formats/_csv.py
index 2391417..2c74a1c 100644
--- a/tablib/formats/_csv.py
+++ b/tablib/formats/_csv.py
@@ -16,34 +16,34 @@ extentions = ('csv',)
def export_set(dataset):
- """Returns CSV representation of Dataset."""
- stream = cStringIO.StringIO()
- _csv = csv.writer(stream)
+ """Returns CSV representation of Dataset."""
+ stream = cStringIO.StringIO()
+ _csv = csv.writer(stream)
- for row in dataset._package(dicts=False):
- _csv.writerow(row)
+ for row in dataset._package(dicts=False):
+ _csv.writerow(row)
- return stream.getvalue()
+ return stream.getvalue()
def import_set(dset, in_stream, headers=True):
- """Returns dataset from CSV stream."""
+ """Returns dataset from CSV stream."""
- dset.wipe()
+ dset.wipe()
- rows = csv.reader(in_stream.split())
- for i, row in enumerate(rows):
+ rows = csv.reader(in_stream.split())
+ for i, row in enumerate(rows):
- if (i == 0) and (headers):
- dset.headers = row
- else:
- dset.append(row)
+ if (i == 0) and (headers):
+ dset.headers = row
+ else:
+ dset.append(row)
def detect(stream):
- """Returns True if given stream is valid CSV."""
- try:
- rows = dialect = csv.Sniffer().sniff(stream)
- return True
- except csv.Error:
- return False \ No newline at end of file
+ """Returns True if given stream is valid CSV."""
+ try:
+ rows = dialect = csv.Sniffer().sniff(stream)
+ return True
+ except csv.Error:
+ return False \ No newline at end of file
diff --git a/tablib/formats/_json.py b/tablib/formats/_json.py
index 7f31ee5..262c627 100644
--- a/tablib/formats/_json.py
+++ b/tablib/formats/_json.py
@@ -3,15 +3,9 @@
""" Tablib - JSON Support
"""
-try:
- import json # load system JSON (Python >= 2.6)
-except ImportError:
- try:
- import simplejson as json
- except ImportError:
- import tablib.packages.simplejson as json # use the vendorized copy
-
import tablib.core
+from tablib.packages import anyjson
+
title = 'json'
@@ -19,37 +13,37 @@ extentions = ('json', 'jsn')
def export_set(dataset):
- """Returns JSON representation of Dataset."""
- return json.dumps(dataset.dict)
+ """Returns JSON representation of Dataset."""
+ return anyjson.serialize(dataset.dict)
def export_book(databook):
- """Returns JSON representation of Databook."""
- return json.dumps(databook._package())
+ """Returns JSON representation of Databook."""
+ return anyjson.serialize(databook._package())
def import_set(dset, in_stream):
- """Returns dataset from JSON stream."""
+ """Returns dataset from JSON stream."""
- dset.wipe()
- dset.dict = json.loads(in_stream)
+ dset.wipe()
+ dset.dict = anyjson.deserialize(in_stream)
def import_book(dbook, in_stream):
- """Returns databook from JSON stream."""
+ """Returns databook from JSON stream."""
- dbook.wipe()
- for sheet in json.loads(in_stream):
- data = tablib.core.Dataset()
- data.title = sheet['title']
- data.dict = sheet['data']
- dbook.add_sheet(data)
+ dbook.wipe()
+ for sheet in anyjson.deserialize(in_stream):
+ data = tablib.core.Dataset()
+ data.title = sheet['title']
+ data.dict = sheet['data']
+ dbook.add_sheet(data)
def detect(stream):
- """Returns True if given stream is valid JSON."""
- try:
- json.loads(stream)
- return True
- except ValueError:
- return False
+ """Returns True if given stream is valid JSON."""
+ try:
+ anyjson.deserialize(stream)
+ return True
+ except ValueError:
+ return False
diff --git a/tablib/formats/_tsv.py b/tablib/formats/_tsv.py
index 808f202..76a5f07 100644
--- a/tablib/formats/_tsv.py
+++ b/tablib/formats/_tsv.py
@@ -16,36 +16,36 @@ extentions = ('tsv',)
def export_set(dataset):
- """Returns a TSV representation of Dataset."""
- stream = cStringIO.StringIO()
- _tsv = csv.writer(stream, delimiter='\t')
+ """Returns a TSV representation of Dataset."""
+ stream = cStringIO.StringIO()
+ _tsv = csv.writer(stream, delimiter='\t')
- for row in dataset._package(dicts=False):
- _tsv.writerow(row)
+ for row in dataset._package(dicts=False):
+ _tsv.writerow(row)
- return stream.getvalue()
+ return stream.getvalue()
def import_set(dset, in_stream, headers=True):
- """Returns dataset from TSV stream."""
- dset.wipe()
+ """Returns dataset from TSV stream."""
+ dset.wipe()
- rows = csv.reader(in_stream.split('\r\n'), delimiter='\t')
- for i, row in enumerate(rows):
- # Skip empty rows
- if not row:
- continue
+ rows = csv.reader(in_stream.split('\r\n'), delimiter='\t')
+ for i, row in enumerate(rows):
+ # Skip empty rows
+ if not row:
+ continue
- if (i == 0) and (headers):
- dset.headers = row
- else:
- dset.append(row)
+ if (i == 0) and (headers):
+ dset.headers = row
+ else:
+ dset.append(row)
def detect(stream):
- """Returns True if given stream is valid TSV."""
- try:
- rows = dialect = csv.Sniffer().sniff(stream, delimiters='\t')
- return True
- except csv.Error:
- return False
+ """Returns True if given stream is valid TSV."""
+ try:
+ rows = dialect = csv.Sniffer().sniff(stream, delimiters='\t')
+ return True
+ except csv.Error:
+ return False
diff --git a/tablib/formats/_xls.py b/tablib/formats/_xls.py
index 08bc0f6..717a6d5 100644
--- a/tablib/formats/_xls.py
+++ b/tablib/formats/_xls.py
@@ -6,9 +6,9 @@
import cStringIO
try:
- import xlwt
+ import xlwt
except ImportError:
- import tablib.packages.xlwt as xlwt
+ import tablib.packages.xlwt as xlwt
title = 'xls'
@@ -20,66 +20,66 @@ bold = xlwt.easyxf("font: bold on")
def export_set(dataset):
- """Returns XLS representation of Dataset."""
+ """Returns XLS representation of Dataset."""
- wb = xlwt.Workbook(encoding='utf8')
- ws = wb.add_sheet(dataset.title if dataset.title else 'Tabbed Dataset')
+ wb = xlwt.Workbook(encoding='utf8')
+ ws = wb.add_sheet(dataset.title if dataset.title else 'Tabbed Dataset')
- dset_sheet(dataset, ws)
+ dset_sheet(dataset, ws)
- stream = cStringIO.StringIO()
- wb.save(stream)
- return stream.getvalue()
+ stream = cStringIO.StringIO()
+ wb.save(stream)
+ return stream.getvalue()
def export_book(databook):
- """Returns XLS representation of DataBook."""
+ """Returns XLS representation of DataBook."""
- wb = xlwt.Workbook(encoding='utf8')
+ wb = xlwt.Workbook(encoding='utf8')
- for i, dset in enumerate(databook._datasets):
- ws = wb.add_sheet(dset.title if dset.title else 'Sheet%s' % (i))
+ for i, dset in enumerate(databook._datasets):
+ ws = wb.add_sheet(dset.title if dset.title else 'Sheet%s' % (i))
- dset_sheet(dset, ws)
+ dset_sheet(dset, ws)
- stream = cStringIO.StringIO()
- wb.save(stream)
- return stream.getvalue()
+ stream = cStringIO.StringIO()
+ wb.save(stream)
+ return stream.getvalue()
def dset_sheet(dataset, ws):
- """Completes given worksheet from given Dataset."""
- _package = dataset._package(dicts=False)
-
- for i, sep in enumerate(dataset._separators):
- _offset = i
- _package.insert((sep[0] + _offset), (sep[1],))
-
- for i, row in enumerate(_package):
- for j, col in enumerate(row):
-
- # bold headers
- if (i == 0) and dataset.headers:
- ws.write(i, j, col, bold)
-
- # frozen header row
- ws.panes_frozen = True
- ws.horz_split_pos = 1
-
-
- # bold separators
- elif len(row) < dataset.width:
- ws.write(i, j, col, bold)
-
- # wrap the rest
- else:
- try:
- if '\n' in col:
- ws.write(i, j, col, wrap)
- else:
- ws.write(i, j, col)
- except TypeError:
- ws.write(i, j, col)
+ """Completes given worksheet from given Dataset."""
+ _package = dataset._package(dicts=False)
+
+ for i, sep in enumerate(dataset._separators):
+ _offset = i
+ _package.insert((sep[0] + _offset), (sep[1],))
+
+ for i, row in enumerate(_package):
+ for j, col in enumerate(row):
+
+ # bold headers
+ if (i == 0) and dataset.headers:
+ ws.write(i, j, col, bold)
+
+ # frozen header row
+ ws.panes_frozen = True
+ ws.horz_split_pos = 1
+
+
+ # bold separators
+ elif len(row) < dataset.width:
+ ws.write(i, j, col, bold)
+
+ # wrap the rest
+ else:
+ try:
+ if '\n' in col:
+ ws.write(i, j, col, wrap)
+ else:
+ ws.write(i, j, col)
+ except TypeError:
+ ws.write(i, j, col)
diff --git a/tablib/formats/_yaml.py b/tablib/formats/_yaml.py
index 59d49a0..3f2f8b7 100644
--- a/tablib/formats/_yaml.py
+++ b/tablib/formats/_yaml.py
@@ -4,9 +4,9 @@
"""
try:
- import yaml
+ import yaml
except ImportError:
- import tablib.packages.yaml as yaml
+ import tablib.packages.yaml as yaml
import tablib
@@ -18,40 +18,40 @@ extentions = ('yaml', 'yml')
def export_set(dataset):
- """Returns YAML representation of Dataset."""
- return yaml.dump(dataset.dict)
+ """Returns YAML representation of Dataset."""
+ return yaml.dump(dataset.dict)
def export_book(databook):
- """Returns YAML representation of Databook."""
- return yaml.dump(databook._package())
+ """Returns YAML representation of Databook."""
+ return yaml.dump(databook._package())
def import_set(dset, in_stream):
- """Returns dataset from YAML stream."""
+ """Returns dataset from YAML stream."""
- dset.wipe()
- dset.dict = yaml.load(in_stream)
+ dset.wipe()
+ dset.dict = yaml.load(in_stream)
def import_book(dbook, in_stream):
- """Returns databook from YAML stream."""
+ """Returns databook from YAML stream."""
- dbook.wipe()
+ dbook.wipe()
- for sheet in yaml.load(in_stream):
- data = tablib.core.Dataset()
- data.title = sheet['title']
- data.dict = sheet['data']
- dbook.add_sheet(data)
-
+ for sheet in yaml.load(in_stream):
+ data = tablib.core.Dataset()
+ data.title = sheet['title']
+ data.dict = sheet['data']
+ dbook.add_sheet(data)
+
def detect(stream):
- """Returns True if given stream is valid YAML."""
- try:
- _yaml = yaml.load(stream)
- if isinstance(_yaml, (list, tuple, dict)):
- return True
- else:
- return False
- except yaml.parser.ParserError:
- return False \ No newline at end of file
+ """Returns True if given stream is valid YAML."""
+ try:
+ _yaml = yaml.load(stream)
+ if isinstance(_yaml, (list, tuple, dict)):
+ return True
+ else:
+ return False
+ except yaml.parser.ParserError:
+ return False \ No newline at end of file