summaryrefslogtreecommitdiff
path: root/sqlparse/engine/__init__.py
diff options
context:
space:
mode:
authorAndi Albrecht <albrecht.andi@gmail.com>2016-06-03 19:07:47 +0200
committerAndi Albrecht <albrecht.andi@gmail.com>2016-06-03 19:07:47 +0200
commit8d78d7337724a929aff5abc2e4657713a02df7cc (patch)
tree9dd97e37e57964120187acb7229af9001bdbdc69 /sqlparse/engine/__init__.py
parenta767c88b008d407d91b9118d124e2a9b579a7f12 (diff)
parent81e408a4f2c8281e0da7c93ed75da90df1a518e2 (diff)
downloadsqlparse-8d78d7337724a929aff5abc2e4657713a02df7cc.tar.gz
Merge pull request #244 from vmuriart/refactor
Change old-style class to new-style & remove unused function
Diffstat (limited to 'sqlparse/engine/__init__.py')
-rw-r--r--sqlparse/engine/__init__.py20
1 files changed, 3 insertions, 17 deletions
diff --git a/sqlparse/engine/__init__.py b/sqlparse/engine/__init__.py
index 4d7fe88..c6b8b0c 100644
--- a/sqlparse/engine/__init__.py
+++ b/sqlparse/engine/__init__.py
@@ -9,9 +9,6 @@ from sqlparse import lexer
from sqlparse.engine import grouping
from sqlparse.engine.filter import StatementFilter
-# XXX remove this when cleanup is complete
-Filter = object
-
class FilterStack(object):
@@ -22,20 +19,9 @@ class FilterStack(object):
self.split_statements = False
self._grouping = False
- def _flatten(self, stream):
- for token in stream:
- if token.is_group():
- for t in self._flatten(token.tokens):
- yield t
- else:
- yield token
-
def enable_grouping(self):
self._grouping = True
- def full_analyze(self):
- self.enable_grouping()
-
def run(self, sql, encoding=None):
stream = lexer.tokenize(sql, encoding)
# Process token stream
@@ -43,8 +29,8 @@ class FilterStack(object):
for filter_ in self.preprocess:
stream = filter_.process(self, stream)
- if self.stmtprocess or self.postprocess or self.split_statements \
- or self._grouping:
+ if (self.stmtprocess or self.postprocess or
+ self.split_statements or self._grouping):
splitter = StatementFilter()
stream = splitter.process(self, stream)
@@ -71,7 +57,7 @@ class FilterStack(object):
def _run2(stream):
for stmt in stream:
- stmt.tokens = list(self._flatten(stmt.tokens))
+ stmt.tokens = list(stmt.flatten())
for filter_ in self.postprocess:
stmt = filter_.process(self, stmt)
yield stmt