diff options
| author | Vik <vmuriart@gmail.com> | 2016-06-16 02:33:28 -0700 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2016-06-16 02:33:28 -0700 |
| commit | 92b5f2bb88ed1c1080ecf7eb7449f5c642ae196a (patch) | |
| tree | 30b53c5970fc01fab5a14c9a0298f4e8d4eba077 /tests | |
| parent | 451d6d5d380cb4246c47e374aa9c4034fc7f9805 (diff) | |
| parent | 9fcf1f2cda629cdf11a8a4ac596fb7cae0e89de9 (diff) | |
| download | sqlparse-92b5f2bb88ed1c1080ecf7eb7449f5c642ae196a.tar.gz | |
Merge pull request #260 from vmuriart/long_live_indexes
Long live indexes - Improve performance
Diffstat (limited to 'tests')
| -rw-r--r-- | tests/test_grouping.py | 6 | ||||
| -rw-r--r-- | tests/test_regressions.py | 10 | ||||
| -rw-r--r-- | tests/test_tokenize.py | 18 |
3 files changed, 13 insertions, 21 deletions
diff --git a/tests/test_grouping.py b/tests/test_grouping.py index 147162f..272d266 100644 --- a/tests/test_grouping.py +++ b/tests/test_grouping.py @@ -128,11 +128,11 @@ class TestGrouping(TestCaseBase): p = sqlparse.parse("select * from (" "select a, b + c as d from table) sub")[0] subquery = p.tokens[-1].tokens[0] - iden_list = subquery.token_next_by(i=sql.IdentifierList) + idx, iden_list = subquery.token_next_by(i=sql.IdentifierList) self.assert_(iden_list is not None) # all the identifiers should be within the IdentifierList - self.assert_(subquery.token_next_by(i=sql.Identifier, - idx=iden_list) is None) + _, ilist = subquery.token_next_by(i=sql.Identifier, idx=idx) + self.assert_(ilist is None) def test_identifier_list_case(self): p = sqlparse.parse('a, case when 1 then 2 else 3 end as b, c')[0] diff --git a/tests/test_regressions.py b/tests/test_regressions.py index 616c321..b55939a 100644 --- a/tests/test_regressions.py +++ b/tests/test_regressions.py @@ -2,6 +2,7 @@ import sys +import pytest # noqa from tests.utils import TestCaseBase, load_file import sqlparse @@ -48,7 +49,7 @@ class RegressionTests(TestCaseBase): self.assert_(p.tokens[0].ttype is T.Comment.Single) def test_issue34(self): - t = sqlparse.parse("create")[0].token_next() + t = sqlparse.parse("create")[0].token_first() self.assertEqual(t.match(T.Keyword.DDL, "create"), True) self.assertEqual(t.match(T.Keyword.DDL, "CREATE"), True) @@ -311,10 +312,3 @@ def test_issue207_runaway_format(): " 2 as two,", " 3", " from dual) t0"]) - - -def test_case_within_parenthesis(): - # see issue #164 - s = '(case when 1=1 then 2 else 5 end)' - p = sqlparse.parse(s)[0] - assert isinstance(p[0][1], sql.Case) diff --git a/tests/test_tokenize.py b/tests/test_tokenize.py index 7200682..61eaa3e 100644 --- a/tests/test_tokenize.py +++ b/tests/test_tokenize.py @@ -104,23 +104,21 @@ class TestTokenList(unittest.TestCase): def test_token_first(self): p = sqlparse.parse(' select foo')[0] - first = p.token_next() + first = p.token_first() self.assertEqual(first.value, 'select') - self.assertEqual(p.token_next(skip_ws=False).value, ' ') - self.assertEqual(sql.TokenList([]).token_next(), None) + self.assertEqual(p.token_first(skip_ws=False).value, ' ') + self.assertEqual(sql.TokenList([]).token_first(), None) def test_token_matching(self): t1 = sql.Token(T.Keyword, 'foo') t2 = sql.Token(T.Punctuation, ',') x = sql.TokenList([t1, t2]) - self.assertEqual(x.token_matching(0, [lambda t: t.ttype is T.Keyword]), - t1) self.assertEqual(x.token_matching( - 0, - [lambda t: t.ttype is T.Punctuation]), - t2) - self.assertEqual(x.token_matching(1, [lambda t: t.ttype is T.Keyword]), - None) + [lambda t: t.ttype is T.Keyword], 0), t1) + self.assertEqual(x.token_matching( + [lambda t: t.ttype is T.Punctuation], 0), t2) + self.assertEqual(x.token_matching( + [lambda t: t.ttype is T.Keyword], 1), None) class TestStream(unittest.TestCase): |
