summaryrefslogtreecommitdiff
path: root/sqlparse/sql.py
diff options
context:
space:
mode:
authorquest <quest@wonky.windwards.net>2012-04-22 01:41:22 +0200
committerquest <quest@wonky.windwards.net>2012-04-22 01:41:22 +0200
commita16c08703c8eb213a8b570bb16636fbe7a2b4a28 (patch)
tree5d13bc4428bf678c75e0cbbdf1e35ec5655788ee /sqlparse/sql.py
parent1f8dfd8723dd7aa9610fd9249775dc3b403d7e77 (diff)
downloadsqlparse-a16c08703c8eb213a8b570bb16636fbe7a2b4a28.tar.gz
various optimizations in sql.py
Diffstat (limited to 'sqlparse/sql.py')
-rw-r--r--sqlparse/sql.py27
1 files changed, 19 insertions, 8 deletions
diff --git a/sqlparse/sql.py b/sqlparse/sql.py
index 9c7aeee..31fa34d 100644
--- a/sqlparse/sql.py
+++ b/sqlparse/sql.py
@@ -15,11 +15,13 @@ class Token(object):
the type of the token.
"""
- __slots__ = ('value', 'ttype', 'parent')
+ __slots__ = ('value', 'ttype', 'parent', 'normalized', 'is_keyword')
def __init__(self, ttype, value):
self.value = value
+ self.normalized = value.upper() if ttype in T.Keyword else value
self.ttype = ttype
+ self.is_keyword = ttype in T.Keyword
self.parent = None
def __str__(self):
@@ -71,9 +73,9 @@ class Token(object):
type_matched = self.ttype is ttype
if not type_matched or values is None:
return type_matched
- if isinstance(values, basestring):
- values = set([values])
if regex:
+ if isinstance(values, basestring):
+ values = set([values])
if self.ttype is T.Keyword:
values = set([re.compile(v, re.IGNORECASE) for v in values])
else:
@@ -83,10 +85,18 @@ class Token(object):
return True
return False
else:
- if self.ttype in T.Keyword:
- values = set([v.upper() for v in values])
- return self.value.upper() in values
+ if isinstance(values, basestring):
+ if self.is_keyword:
+ return values.upper() == self.normalized
+ else:
+ return values == self.value
+ if self.is_keyword:
+ for v in values:
+ if v.upper() == self.normalized:
+ return True
+ return False
else:
+ print len(values)
return self.value in values
def is_group(self):
@@ -227,7 +237,8 @@ class TokenList(Token):
if not isinstance(idx, int):
idx = self.token_index(idx)
- for token in self.tokens[idx:]:
+ for n in xrange(idx, len(self.tokens)):
+ token = self.tokens[n]
if token.match(ttype, value, regex):
return token
@@ -395,7 +406,7 @@ class Statement(TokenList):
return 'UNKNOWN'
elif first_token.ttype in (T.Keyword.DML, T.Keyword.DDL):
- return first_token.value.upper()
+ return first_token.normalized
return 'UNKNOWN'