summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJesús Leganés Combarro "Piranna" <piranna@gmail.com>2012-06-01 23:09:33 +0200
committerJesús Leganés Combarro "Piranna" <piranna@gmail.com>2012-06-01 23:09:33 +0200
commit5a3b9b44c1c4782eacf882c541be59db3bbec020 (patch)
tree01954dbfcb8f3f97d570b91d7caaefba4c4d7d47
parent2e84066066059891270ea27e3397ed3eb39d419f (diff)
parent7768a1a1bd67578ef2a591ac779782098716825d (diff)
downloadsqlparse-5a3b9b44c1c4782eacf882c541be59db3bbec020.tar.gz
Merge branch 'identifierlist' into issue_50
Conflicts: sqlparse/filters.py
-rw-r--r--sqlparse/engine/grouping.py74
-rw-r--r--sqlparse/filters.py38
2 files changed, 82 insertions, 30 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py
index 1487c24..4120fc3 100644
--- a/sqlparse/engine/grouping.py
+++ b/sqlparse/engine/grouping.py
@@ -185,9 +185,11 @@ def group_identifier(tlist):
def group_identifier_list(tlist):
- [group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
- if not isinstance(sgroup, sql.IdentifierList)]
- idx = 0
+ # First group the `tlist` sublists
+ for sgroup in tlist.get_sublists():
+ if not isinstance(sgroup, sql.IdentifierList):
+ group_identifier_list(sgroup)
+
# Allowed list items
fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
sql.Case)),
@@ -202,36 +204,62 @@ def group_identifier_list(tlist):
lambda t: isinstance(t, sql.Comparison),
lambda t: isinstance(t, sql.Comment),
]
- tcomma = tlist.token_next_match(idx, T.Punctuation, ',')
+
start = None
- while tcomma is not None:
+
+ tcomma = tlist.token_next_match(0, T.Punctuation, ',')
+ while tcomma:
before = tlist.token_prev(tcomma)
after = tlist.token_next(tcomma)
- # Check if the tokens around tcomma belong to a list
+
+ # Check if the tokens around tcomma belong to an identifier list
bpassed = apassed = False
for func in fend1_funcs:
- if before is not None and func(before):
+ if before and func(before):
bpassed = True
- if after is not None and func(after):
+ if after and func(after):
apassed = True
- if not bpassed or not apassed:
- # Something's wrong here, skip ahead to next ","
+
+ # Both tokens around tcomma belong to a list
+ if bpassed and apassed:
+ # Set the start of the identifier list if not defined before
+ if start == None:
+ start = before
+
+ # Look if the next token is another comma
+ next_ = tlist.token_next(after)
+ if next_:
+ if next_.match(T.Punctuation, ','):
+ tcomma = next_
+ continue
+
+ elif(next_.ttype == T.Keyword
+ and next_.value.upper() not in ('FROM', 'WHERE', 'GROUP')):
+ tcomma = next_
+ continue
+
+ # Reached the end of the list
+ # Create and group the identifiers list
+ tokens = tlist.tokens_between(start, after)
+ group = tlist.group_tokens(sql.IdentifierList, tokens)
+
+ # Skip ahead to next ","
start = None
- tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
+ tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
T.Punctuation, ',')
+
+ # At least one of the tokens around tcomma don't belong to an
+ # identifier list. Something's wrong here, skip ahead to next ","
else:
- if start is None:
- start = before
- next_ = tlist.token_next(after)
- if next_ is None or not next_.match(T.Punctuation, ','):
- # Reached the end of the list
- tokens = tlist.tokens_between(start, after)
- group = tlist.group_tokens(sql.IdentifierList, tokens)
- start = None
- tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
- T.Punctuation, ',')
- else:
- tcomma = next_
+ start = None
+ tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
+ T.Punctuation, ',')
+
+ # There's an open identifier list
+ if start:
+ # Create and group the identifiers list
+ tokens = tlist.tokens_between(start, after)
+ group = tlist.group_tokens(sql.IdentifierList, tokens)
def group_parenthesis(tlist):
diff --git a/sqlparse/filters.py b/sqlparse/filters.py
index 1ba97f5..3ee2607 100644
--- a/sqlparse/filters.py
+++ b/sqlparse/filters.py
@@ -419,21 +419,34 @@ class ReindentFilter:
def _process_identifierlist(self, tlist):
"""
Process an identifier list
+
+ If there are more than an identifier, put each on a line
"""
- # If there are more than an identifier, put each on a line
+ # Get identifiers from the tlist
identifiers = list(tlist.get_identifiers())
+
+ # Split the identifier list if we have more than one identifier
+ # and its not from a function
if len(identifiers) > 1 and not tlist.within(sql.Function):
- # Get offset size to increase
+ # Get first token
first = list(identifiers[0].flatten())[0]
+
+ # Increase offset the size of the first token
num_offset = self._get_offset(first) - len(first.value)
# Increase offset and insert new lines
self.offset += num_offset
+
+ # Insert a new line between the tokens
for token in identifiers[1:]:
tlist.insert_before(token, self.nl())
+
+ # Imsert another new line after comment tokens
for token in tlist.tokens:
if isinstance(token, sql.Comment):
tlist.insert_after(token, self.nl())
+
+ # Decrease offset the size of the first token
self.offset -= num_offset
# Process the identifier list as usual
@@ -507,7 +520,7 @@ class ReindentFilter:
# If we are processing a statement, check if we should add a new line
if isinstance(stmt, sql.Statement):
- if self._last_stmt != None:
+ if self._last_stmt:
if unicode(self._last_stmt).endswith('\n'):
nl = '\n'
else:
@@ -582,17 +595,21 @@ class ColumnsSelect:
mode = 1
# We have detected a SELECT statement
- elif mode == 1:
- if value == 'FROM':
+ elif mode in (1, 3):
+ if value in ('FROM', 'WHERE', 'GROUP'):
if oldValue:
yield oldValue
+ oldValue = ""
- mode = 3 # Columns have been checked
+ break # Columns have been checked
elif value == 'AS':
oldValue = ""
mode = 2
+ elif token_type in Whitespace:
+ mode = 3
+
elif (token_type == Punctuation
and value == ',' and not parenthesis):
if oldValue:
@@ -605,7 +622,11 @@ class ColumnsSelect:
elif value == ')':
parenthesis -= 1
- oldValue += value
+ if mode == 3:
+ oldValue = value
+ mode = 1
+ else:
+ oldValue += value
# We are processing an AS keyword
elif mode == 2:
@@ -614,6 +635,9 @@ class ColumnsSelect:
yield value
mode = 1
+ if oldValue:
+ yield oldValue
+
# ---------------------------
# postprocess