From 896774cb5298924abbcea81b9b90f1c7c10b3d6a Mon Sep 17 00:00:00 2001 From: Sjoerd Job Postmus Date: Thu, 2 Jun 2016 07:38:27 +0200 Subject: Special-case group_tokens(..., tokens_between()) When having been guaranteed that the tokens form a range, it is possible to get rid of a lot of calls to `Token.tokens.remove(...)` which are expensive. --- sqlparse/sql.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'sqlparse/sql.py') diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 9afdac3..81cd8e9 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -329,6 +329,29 @@ class TokenList(Token): end_idx = include_end + self.token_index(end) return self.tokens[start_idx:end_idx] + def group_tokens_between(self, grp_cls, start, end, include_end=True, extend=False): + """Replace tokens by an instance of *grp_cls*.""" + start_idx = self.token_index(start) + end_idx = self.token_index(end) + include_end + tokens = self.tokens[start_idx:end_idx] + + if extend and isinstance(start, grp_cls): + subtokens = self.tokens[start_idx+1:end_idx] + + grp = start + grp.tokens.extend(subtokens) + del self.tokens[start_idx+1:end_idx] + grp.value = start.__str__() + else: + subtokens = self.tokens[start_idx:end_idx] + grp = grp_cls(tokens) + self.tokens[start_idx:end_idx] = [grp] + grp.parent = self + + for token in subtokens: + token.parent = grp + + return grp def group_tokens(self, grp_cls, tokens, ignore_ws=False, extend=False): """Replace tokens by an instance of *grp_cls*.""" if ignore_ws: -- cgit v1.2.1 From d4cc0644c8348da5e49c58df5e26a3e969045249 Mon Sep 17 00:00:00 2001 From: Sjoerd Job Postmus Date: Thu, 2 Jun 2016 08:30:27 +0200 Subject: Replace _group_matching with an inward-out grouping algorithm All the matching between open/close was done all the time, first finding the matching closing token, and then grouping the tokens in between, and recurse over the newly created list. Instead, it is more efficient to look for the previous open-token on finding a closing-token, group these two together, and then continue on. squashed: Handle token indices in group_tokens_between and find_matching. --- sqlparse/sql.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'sqlparse/sql.py') diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 81cd8e9..dfe0430 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -331,9 +331,14 @@ class TokenList(Token): def group_tokens_between(self, grp_cls, start, end, include_end=True, extend=False): """Replace tokens by an instance of *grp_cls*.""" - start_idx = self.token_index(start) - end_idx = self.token_index(end) + include_end - tokens = self.tokens[start_idx:end_idx] + if isinstance(start, int): + start_idx = start + start = self.tokens[start_idx] + else: + start_idx = self.token_index(start) + + end_idx = self.token_index(end) if not isinstance(end, int) else end + end_idx += include_end if extend and isinstance(start, grp_cls): subtokens = self.tokens[start_idx+1:end_idx] @@ -344,7 +349,7 @@ class TokenList(Token): grp.value = start.__str__() else: subtokens = self.tokens[start_idx:end_idx] - grp = grp_cls(tokens) + grp = grp_cls(subtokens) self.tokens[start_idx:end_idx] = [grp] grp.parent = self -- cgit v1.2.1 From 67dc823e1174eee9ea2159674c8eb016b2f95b54 Mon Sep 17 00:00:00 2001 From: Sjoerd Job Postmus Date: Thu, 2 Jun 2016 10:08:00 +0200 Subject: Use specialized token_idx_next_by in group_aliased. The method group_aliased was making a lot of calls to token_index. By specializing token_next_by to token_idx_next_by, the calls to token_index became superfluous. Also use token_idx_next_by in group_identifier_list. It was making a lot of calls, which is now more than reduced in half. --- sqlparse/sql.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'sqlparse/sql.py') diff --git a/sqlparse/sql.py b/sqlparse/sql.py index dfe0430..928b784 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -225,6 +225,22 @@ class TokenList(Token): def _groupable_tokens(self): return self.tokens + def _token_idx_matching(self, funcs, start=0, end=None, reverse=False): + """next token that match functions""" + if start is None: + return None + + if not isinstance(funcs, (list, tuple)): + funcs = (funcs,) + + iterable = enumerate(self.tokens[start:end], start=start) + + for idx, token in iterable: + for func in funcs: + if func(token): + return idx, token + return None, None + def _token_matching(self, funcs, start=0, end=None, reverse=False): """next token that match functions""" if start is None: @@ -259,6 +275,10 @@ class TokenList(Token): (ignore_comments and imt(tk, i=Comment))) return self._token_matching(funcs) + def token_idx_next_by(self, i=None, m=None, t=None, idx=0, end=None): + funcs = lambda tk: imt(tk, i, m, t) + return self._token_idx_matching(funcs, idx, end) + def token_next_by(self, i=None, m=None, t=None, idx=0, end=None): funcs = lambda tk: imt(tk, i, m, t) return self._token_matching(funcs, idx, end) -- cgit v1.2.1 From 8f7968ed5c649e5227e605ee272f59dd5ca75adb Mon Sep 17 00:00:00 2001 From: Sjoerd Job Postmus Date: Thu, 2 Jun 2016 10:28:54 +0200 Subject: Index-based token_idx_prev Prevent some more calls to token_index in group_identifier_list. They are now all gone. --- sqlparse/sql.py | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) (limited to 'sqlparse/sql.py') diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 928b784..9782c33 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -233,12 +233,18 @@ class TokenList(Token): if not isinstance(funcs, (list, tuple)): funcs = (funcs,) - iterable = enumerate(self.tokens[start:end], start=start) - - for idx, token in iterable: - for func in funcs: - if func(token): - return idx, token + if reverse: + assert end is None + for idx in range(start - 2, -1, -1): + token = self.tokens[idx] + for func in funcs: + if func(token): + return idx, token + else: + for idx, token in enumerate(self.tokens[start:end], start=start): + for func in funcs: + if func(token): + return idx, token return None, None def _token_matching(self, funcs, start=0, end=None, reverse=False): @@ -312,6 +318,16 @@ class TokenList(Token): def token_matching(self, idx, funcs): return self._token_matching(funcs, idx) + def token_idx_prev(self, idx, skip_ws=True): + """Returns the previous token relative to *idx*. + + If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. + ``None`` is returned if there's no previous token. + """ + idx += 1 # alot of code usage current pre-compensates for this + funcs = lambda tk: not (tk.is_whitespace() and skip_ws) + return self._token_idx_matching(funcs, idx, reverse=True) + def token_prev(self, idx, skip_ws=True): """Returns the previous token relative to *idx*. -- cgit v1.2.1 From 89d4f68ba5bbe78a9dd89257cbe4a9f3cfa76433 Mon Sep 17 00:00:00 2001 From: Sjoerd Job Postmus Date: Thu, 2 Jun 2016 11:58:19 +0200 Subject: Use a specialized token_idx_next. Prevent calling token_index. --- sqlparse/sql.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'sqlparse/sql.py') diff --git a/sqlparse/sql.py b/sqlparse/sql.py index 9782c33..f3ef642 100644 --- a/sqlparse/sql.py +++ b/sqlparse/sql.py @@ -350,6 +350,26 @@ class TokenList(Token): funcs = lambda tk: not (tk.is_whitespace() and skip_ws) return self._token_matching(funcs, idx) + def token_idx_next(self, idx, skip_ws=True): + """Returns the next token relative to *idx*. + + If *skip_ws* is ``True`` (the default) whitespace tokens are ignored. + ``None`` is returned if there's no next token. + """ + if isinstance(idx, int): + idx += 1 # alot of code usage current pre-compensates for this + try: + if not skip_ws: + return idx, self.tokens[idx] + else: + while True: + token = self.tokens[idx] + if not token.is_whitespace(): + return idx, token + idx += 1 + except IndexError: + return None, None + def token_index(self, token, start=0): """Return list index of token.""" start = self.token_index(start) if not isinstance(start, int) else start -- cgit v1.2.1