diff options
Diffstat (limited to 'sqlparse/engine')
| -rw-r--r-- | sqlparse/engine/__init__.py | 20 | ||||
| -rw-r--r-- | sqlparse/engine/filter.py | 2 | ||||
| -rw-r--r-- | sqlparse/engine/grouping.py | 9 |
3 files changed, 9 insertions, 22 deletions
diff --git a/sqlparse/engine/__init__.py b/sqlparse/engine/__init__.py index 4d7fe88..c6b8b0c 100644 --- a/sqlparse/engine/__init__.py +++ b/sqlparse/engine/__init__.py @@ -9,9 +9,6 @@ from sqlparse import lexer from sqlparse.engine import grouping from sqlparse.engine.filter import StatementFilter -# XXX remove this when cleanup is complete -Filter = object - class FilterStack(object): @@ -22,20 +19,9 @@ class FilterStack(object): self.split_statements = False self._grouping = False - def _flatten(self, stream): - for token in stream: - if token.is_group(): - for t in self._flatten(token.tokens): - yield t - else: - yield token - def enable_grouping(self): self._grouping = True - def full_analyze(self): - self.enable_grouping() - def run(self, sql, encoding=None): stream = lexer.tokenize(sql, encoding) # Process token stream @@ -43,8 +29,8 @@ class FilterStack(object): for filter_ in self.preprocess: stream = filter_.process(self, stream) - if self.stmtprocess or self.postprocess or self.split_statements \ - or self._grouping: + if (self.stmtprocess or self.postprocess or + self.split_statements or self._grouping): splitter = StatementFilter() stream = splitter.process(self, stream) @@ -71,7 +57,7 @@ class FilterStack(object): def _run2(stream): for stmt in stream: - stmt.tokens = list(self._flatten(stmt.tokens)) + stmt.tokens = list(stmt.flatten()) for filter_ in self.postprocess: stmt = filter_.process(self, stmt) yield stmt diff --git a/sqlparse/engine/filter.py b/sqlparse/engine/filter.py index 360ff9b..adf48ad 100644 --- a/sqlparse/engine/filter.py +++ b/sqlparse/engine/filter.py @@ -4,7 +4,7 @@ from sqlparse.sql import Statement, Token from sqlparse import tokens as T -class StatementFilter: +class StatementFilter(object): "Filter that split stream at individual statements" def __init__(self): diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py index f7953e7..7a93b5e 100644 --- a/sqlparse/engine/grouping.py +++ b/sqlparse/engine/grouping.py @@ -27,15 +27,16 @@ def _group_left_right(tlist, m, cls, if valid_left(left) and valid_right(right): if semicolon: + # only overwrite if a semicolon present. sright = tlist.token_next_by(m=M_SEMICOLON, idx=right) - right = sright or right # only overwrite if a semicolon present. + right = sright or right tokens = tlist.tokens_between(left, right) token = tlist.group_tokens(cls, tokens, extend=True) token = tlist.token_next_by(m=m, idx=token) def _group_matching(tlist, cls): - """Groups Tokens that have beginning and end. ie. parenthesis, brackets..""" + """Groups Tokens that have beginning and end.""" idx = 1 if imt(tlist, i=cls) else 0 token = tlist.token_next_by(m=cls.M_OPEN, idx=idx) @@ -223,9 +224,9 @@ def group_functions(tlist): has_create = False has_table = False for tmp_token in tlist.tokens: - if tmp_token.value == u'CREATE': + if tmp_token.value == 'CREATE': has_create = True - if tmp_token.value == u'TABLE': + if tmp_token.value == 'TABLE': has_table = True if has_create and has_table: return |
