diff options
-rw-r--r-- | sqlparse/engine/grouping.py | 26 | ||||
-rw-r--r-- | sqlparse/filters.py | 23 |
2 files changed, 37 insertions, 12 deletions
diff --git a/sqlparse/engine/grouping.py b/sqlparse/engine/grouping.py index 046ec9b..66ae807 100644 --- a/sqlparse/engine/grouping.py +++ b/sqlparse/engine/grouping.py @@ -209,9 +209,11 @@ def group_identifier_list(tlist): """ Create and group the identifiers list """ + print "group_identifierlist", start, after tokens = tlist.tokens_between(start, after) return tlist.group_tokens(sql.IdentifierList, tokens) + # Search for the first identifier list start = None tcomma = tlist.token_next_match(0, T.Punctuation, ',') @@ -233,23 +235,25 @@ def group_identifier_list(tlist): if start == None: start = before - # Look if the next token is another comma - next_ = tlist.token_next(after) - if next_: - if next_.match(T.Punctuation, ','): - tcomma = next_ - continue + def continue_next(): + # Check the next token + next_ = tlist.token_next(after) + while next_: + # Next token is another comma or an identifier list keyword + if next_.match(T.Punctuation, ','): + return next_ - elif(next_.ttype == T.Keyword - and next_.value.upper() not in ('FROM', 'WHERE', 'GROUP')): - tcomma = next_ - continue + next_ = tlist.token_next(next_) + + tcomma = continue_next() + if tcomma: + continue # Reached the end of the list # Create and group the identifiers list tcomma = group_identifierlist(start, after) - # Skip ahead to next "," + # Skip ahead to next identifier list start = None tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1, T.Punctuation, ',') diff --git a/sqlparse/filters.py b/sqlparse/filters.py index 271bb75..bcfb776 100644 --- a/sqlparse/filters.py +++ b/sqlparse/filters.py @@ -438,16 +438,37 @@ class ReindentFilter: # Increase offset and insert new lines self.offset += num_offset + offset = 0 # Insert a new line between the tokens + ignore = False for token in identifiers[1:]: - tlist.insert_before(token, self.nl()) + if not ignore: + tlist.insert_before(token, self.nl()) + ignore = token.ttype + + # Check identifiers offset + if token.ttype: + l = len(token.value) + if offset < l: + offset = l # Imsert another new line after comment tokens for token in tlist.tokens: if isinstance(token, sql.Comment): tlist.insert_after(token, self.nl()) + # Update identifiers offset + if offset: + offset += 1 + + ignore = False + for token in identifiers: + if not ignore and not token.ttype: + tlist.insert_before(token, sql.Token(T.Whitespace, + " " * offset)) + ignore = token.ttype + # Decrease offset the size of the first token self.offset -= num_offset |