diff options
| -rw-r--r-- | .hgignore | 1 | ||||
| -rw-r--r-- | CHANGES | 1 | ||||
| -rw-r--r-- | Makefile | 3 | ||||
| -rw-r--r-- | sqlparse/engine/filter.py | 1 | ||||
| -rw-r--r-- | sqlparse/formatter.py | 38 | ||||
| -rw-r--r-- | sqlparse/sql.py | 26 | ||||
| -rw-r--r-- | tests/test_grouping.py | 12 | ||||
| -rw-r--r-- | tests/test_tokenize.py | 46 |
8 files changed, 77 insertions, 51 deletions
@@ -2,6 +2,7 @@ syntax: glob docs/build dist MANIFEST +.coverage extras/appengine/django extras/appengine/django.zip extras/appengine/release @@ -6,6 +6,7 @@ In Development * Improved parsing of identifier lists (issue2). * Recursive recognition of AS (issue4) and CASE. * Improved support for UPDATE statements. + * Code cleanup and better test coverage. Release 0.1.0 @@ -12,6 +12,9 @@ help: test: $(PYTHON) tests/run_tests.py +coverage: + nosetests --with-coverage --cover-inclusive --cover-package=sqlparse + clean: $(PYTHON) setup.py clean find . -name '*.pyc' -delete diff --git a/sqlparse/engine/filter.py b/sqlparse/engine/filter.py index 146690c..08ff21d 100644 --- a/sqlparse/engine/filter.py +++ b/sqlparse/engine/filter.py @@ -17,6 +17,7 @@ class TokenFilter(object): class StatementFilter(TokenFilter): def __init__(self): + TokenFilter.__init__(self) self._in_declare = False self._in_dbldollar = False self._is_create = False diff --git a/sqlparse/formatter.py b/sqlparse/formatter.py index 27a1bd9..9c6f76b 100644 --- a/sqlparse/formatter.py +++ b/sqlparse/formatter.py @@ -120,41 +120,3 @@ def build_filter_stack(stack, options): return stack -def format(statement, **options): - import filters - lexer = Lexer() -# lexer.add_filter('whitespace') - lexer.add_filter(filters.GroupFilter()) - if options.get('reindent', False): - lexer.add_filter(filters.StripWhitespaceFilter()) - lexer.add_filter(filters.IndentFilter( - n_indents=options.get('n_indents', 2))) - if options.get('ltrim', False): - lexer.add_filter(filters.LTrimFilter()) - keyword_case = options.get('keyword_case', None) - if keyword_case is not None: - assert keyword_case in ('lower', 'upper', 'capitalize') - lexer.add_filter(filters.KeywordCaseFilter(case=keyword_case)) - identifier_case = options.get('identifier_case', None) - if identifier_case is not None: - assert identifier_case in ('lower', 'upper', 'capitalize') - lexer.add_filter(filters.IdentifierCaseFilter(case=identifier_case)) - if options.get('strip_comments', False): - lexer.add_filter(filters.StripCommentsFilter()) - right_margin = options.get('right_margin', None) - if right_margin is not None: - right_margin = int(right_margin) - assert right_margin > 0 - lexer.add_filter(filters.RightMarginFilter(margin=right_margin)) - lexer.add_filter(filters.UngroupFilter()) - if options.get('output_format', None): - ofrmt = options['output_format'] - assert ofrmt in ('sql', 'python', 'php') - if ofrmt == 'python': - lexer.add_filter(filters.OutputPythonFilter()) - elif ofrmt == 'php': - lexer.add_filter(filters.OutputPHPFilter()) - tokens = [] - for ttype, value in lexer.get_tokens(unicode(statement)): - tokens.append((ttype, value)) - return statement.__class__(tokens) diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 2ab05b9..5abaad0 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -118,19 +118,19 @@ class TokenList(Token): def _get_repr_name(self): return self.__class__.__name__ - def _pprint_tree(self, max_depth=None, depth=0): - """Pretty-print the object tree.""" - indent = ' '*(depth*2) - for token in self.tokens: - if token.is_group(): - pre = ' | ' - else: - pre = ' | ' - print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(), - token._get_repr_value()) - if (token.is_group() and max_depth is not None - and depth < max_depth): - token._pprint_tree(max_depth, depth+1) + ## def _pprint_tree(self, max_depth=None, depth=0): + ## """Pretty-print the object tree.""" + ## indent = ' '*(depth*2) + ## for token in self.tokens: + ## if token.is_group(): + ## pre = ' | ' + ## else: + ## pre = ' | ' + ## print '%s%s%s \'%s\'' % (indent, pre, token._get_repr_name(), + ## token._get_repr_value()) + ## if (token.is_group() and max_depth is not None + ## and depth < max_depth): + ## token._pprint_tree(max_depth, depth+1) def flatten(self): """Generator yielding ungrouped tokens. diff --git a/tests/test_grouping.py b/tests/test_grouping.py index 6ec6616..0e6c19e 100644 --- a/tests/test_grouping.py +++ b/tests/test_grouping.py @@ -123,3 +123,15 @@ class TestGrouping(TestCaseBase): p = sqlparse.parse(s)[0] self.ndiffAssertEqual(s, p.to_unicode()) self.assertEqual(p.tokens[4].get_alias(), 'view') + + + +class TestStatement(TestCaseBase): + + def test_get_type(self): + f = lambda sql: sqlparse.parse(sql)[0] + self.assertEqual(f('select * from foo').get_type(), 'SELECT') + self.assertEqual(f('update foo').get_type(), 'UPDATE') + self.assertEqual(f(' update foo').get_type(), 'UPDATE') + self.assertEqual(f('\nupdate foo').get_type(), 'UPDATE') + self.assertEqual(f('foo').get_type(), 'UNKNOWN') diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index 127b4db..e4ef6c3 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -3,7 +3,9 @@ import unittest import types +import sqlparse from sqlparse import lexer +from sqlparse import sql from sqlparse.tokens import * @@ -38,3 +40,47 @@ class TestTokenize(unittest.TestCase): sql = 'foo\r\nbar\n' tokens = lexer.tokenize(sql) self.assertEqual(''.join(str(x[1]) for x in tokens), sql) + + +class TestToken(unittest.TestCase): + + def test_str(self): + token = sql.Token(None, 'FoO') + self.assertEqual(str(token), 'FoO') + + def test_repr(self): + token = sql.Token(Keyword, 'foo') + tst = "<Keyword 'foo' at 0x" + self.assertEqual(repr(token)[:len(tst)], tst) + token = sql.Token(Keyword, '1234567890') + tst = "<Keyword '123456...' at 0x" + self.assertEqual(repr(token)[:len(tst)], tst) + + def test_flatten(self): + token = sql.Token(Keyword, 'foo') + gen = token.flatten() + self.assertEqual(type(gen), types.GeneratorType) + lgen = list(gen) + self.assertEqual(lgen, [token]) + + +class TestTokenList(unittest.TestCase): + + def test_token_first(self): + p = sqlparse.parse(' select foo')[0] + first = p.token_first() + self.assertEqual(first.value, 'select') + self.assertEqual(p.token_first(ignore_whitespace=False).value, ' ') + self.assertEqual(sql.TokenList([]).token_first(), None) + + def test_token_matching(self): + t1 = sql.Token(Keyword, 'foo') + t2 = sql.Token(Punctuation, ',') + x = sql.TokenList([t1, t2]) + self.assertEqual(x.token_matching(0, [lambda t: t.ttype is Keyword]), + t1) + self.assertEqual(x.token_matching(0, + [lambda t: t.ttype is Punctuation]), + t2) + self.assertEqual(x.token_matching(1, [lambda t: t.ttype is Keyword]), + None) |
