From 6513f3537d80f483ec9c7e36d52e87f107a57f23 Mon Sep 17 00:00:00 2001 From: David Beazley Date: Wed, 29 May 2013 16:08:57 -0500 Subject: Minor code cleanups --- ply/yacc.py | 68 ++++++++++++++++++++++++++++++------------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/ply/yacc.py b/ply/yacc.py index 7cd0b07..49d83d7 100644 --- a/ply/yacc.py +++ b/ply/yacc.py @@ -1424,7 +1424,7 @@ class Grammar(object): def set_precedence(self,term,assoc,level): assert self.Productions == [None],"Must call set_precedence() before add_production()" if term in self.Precedence: - raise GrammarError("Precedence already specified for terminal '%s'" % term) + raise GrammarError("Precedence already specified for terminal %r" % term) if assoc not in ['left','right','nonassoc']: raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") self.Precedence[term] = (assoc,level) @@ -1449,11 +1449,11 @@ class Grammar(object): def add_production(self,prodname,syms,func=None,file='',line=0): if prodname in self.Terminals: - raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname)) + raise GrammarError("%s:%d: Illegal rule name %r. Already defined as a token" % (file,line,prodname)) if prodname == 'error': - raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname)) + raise GrammarError("%s:%d: Illegal rule name %r. error is a reserved word" % (file,line,prodname)) if not _is_identifier.match(prodname): - raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname)) + raise GrammarError("%s:%d: Illegal rule name %r" % (file,line,prodname)) # Look for literal tokens for n,s in enumerate(syms): @@ -1461,7 +1461,7 @@ class Grammar(object): try: c = eval(s) if (len(c) > 1): - raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname)) + raise GrammarError("%s:%d: Literal token %s in rule %r may only be a single character" % (file,line,s, prodname)) if not c in self.Terminals: self.Terminals[c] = [] syms[n] = c @@ -1469,7 +1469,7 @@ class Grammar(object): except SyntaxError: pass if not _is_identifier.match(s) and s != '%prec': - raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname)) + raise GrammarError("%s:%d: Illegal name %r in rule %r" % (file,line,s, prodname)) # Determine the precedence level if '%prec' in syms: @@ -1478,9 +1478,9 @@ class Grammar(object): if syms[-2] != '%prec': raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line)) precname = syms[-1] - prodprec = self.Precedence.get(precname,None) + prodprec = self.Precedence.get(precname) if not prodprec: - raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname)) + raise GrammarError("%s:%d: Nothing known about the precedence of %r" % (file,line,precname)) else: self.UsedPrecedence[precname] = 1 del syms[-2:] # Drop %prec from the rule @@ -2060,13 +2060,13 @@ class LRGeneratedTable(LRTable): def lr0_goto(self,I,x): # First we look for a previously cached entry - g = self.lr_goto_cache.get((id(I),x),None) + g = self.lr_goto_cache.get((id(I),x)) if g: return g # Now we generate the goto set in a way that guarantees uniqueness # of the result - s = self.lr_goto_cache.get(x,None) + s = self.lr_goto_cache.get(x) if not s: s = { } self.lr_goto_cache[x] = s @@ -2075,13 +2075,13 @@ class LRGeneratedTable(LRTable): for p in I: n = p.lr_next if n and n.lr_before == x: - s1 = s.get(id(n),None) + s1 = s.get(id(n)) if not s1: s1 = { } s[id(n)] = s1 gs.append(n) s = s1 - g = s.get('$end',None) + g = s.get('$end') if not g: if gs: g = self.lr0_closure(gs) @@ -2466,7 +2466,7 @@ class LRGeneratedTable(LRTable): laheads = self.grammar.Follow[p.name] for a in laheads: actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p))) - r = st_action.get(a,None) + r = st_action.get(a) if r is not None: # Whoa. Have a shift/reduce or reduce/reduce conflict if r > 0: @@ -2520,7 +2520,7 @@ class LRGeneratedTable(LRTable): if j >= 0: # We are in a shift state actlist.append((a,p,"shift and go to state %d" % j)) - r = st_action.get(a,None) + r = st_action.get(a) if r is not None: # Whoa have a shift/reduce or shift/shift conflict if r > 0: @@ -2711,7 +2711,7 @@ del _lr_goto_items except IOError: e = sys.exc_info()[1] - sys.stderr.write("Unable to create '%s'\n" % filename) + sys.stderr.write("Unable to create %r\n" % filename) sys.stderr.write(str(e)+"\n") return @@ -2807,7 +2807,7 @@ def parse_grammar(doc,file,line): except SyntaxError: raise except Exception: - raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip())) + raise SyntaxError("%s:%d: Syntax error in rule %r" % (file,dline,ps.strip())) return grammar @@ -2941,7 +2941,7 @@ class ParserReflect(object): # Get the tokens map def get_tokens(self): - tokens = self.pdict.get("tokens",None) + tokens = self.pdict.get("tokens") if not tokens: self.log.error("No token list is defined") self.error = 1 @@ -2970,12 +2970,12 @@ class ParserReflect(object): terminals = {} for n in self.tokens: if n in terminals: - self.log.warning("Token '%s' multiply defined", n) + self.log.warning("Token %r multiply defined", n) terminals[n] = 1 # Get the precedence map (if any) def get_precedence(self): - self.prec = self.pdict.get("precedence",None) + self.prec = self.pdict.get("precedence") # Validate and parse the precedence map def validate_precedence(self): @@ -3012,7 +3012,7 @@ class ParserReflect(object): def get_pfunctions(self): p_functions = [] for name, item in self.pdict.items(): - if name[:2] != 'p_': continue + if not name.startswith('p_'): continue if name == 'p_error': continue if isinstance(item,(types.FunctionType,types.MethodType)): line = func_code(item).co_firstlineno @@ -3041,13 +3041,13 @@ class ParserReflect(object): else: reqargs = 1 if func_code(func).co_argcount > reqargs: - self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__) + self.log.error("%s:%d: Rule %r has too many arguments",file,line,func.__name__) self.error = 1 elif func_code(func).co_argcount < reqargs: - self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__) + self.log.error("%s:%d: Rule %r requires an argument",file,line,func.__name__) self.error = 1 elif not func.__doc__: - self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__) + self.log.warning("%s:%d: No documentation string specified in function %r (ignored)",file,line,func.__name__) else: try: parsed_g = parse_grammar(doc,file,line) @@ -3066,16 +3066,16 @@ class ParserReflect(object): # or functions that look like they might be grammar rules. for n,v in self.pdict.items(): - if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue - if n[0:2] == 't_': continue - if n[0:2] == 'p_' and n != 'p_error': - self.log.warning("'%s' not defined as a function", n) + if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): continue + if n.startswith('t_'): continue + if n.startswith('p_') and n != 'p_error': + self.log.warning("%r not defined as a function", n) if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or (isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)): try: doc = v.__doc__.split(" ") if doc[1] == ':': - self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix", + self.log.warning("%s:%d: Possible grammar rule %r defined without p_ prefix", func_code(v).co_filename, func_code(v).co_firstlineno,n) except Exception: pass @@ -3197,7 +3197,7 @@ def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, star # Verify the grammar structure undefined_symbols = grammar.undefined_symbols() for sym, prod in undefined_symbols: - errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym) + errorlog.error("%s:%d: Symbol %r used, but not defined as a token or a rule",prod.file,prod.line,sym) errors = 1 unused_terminals = grammar.unused_terminals() @@ -3206,7 +3206,7 @@ def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, star debuglog.info("Unused terminals:") debuglog.info("") for term in unused_terminals: - errorlog.warning("Token '%s' defined, but not used", term) + errorlog.warning("Token %r defined, but not used", term) debuglog.info(" %s", term) # Print out all productions to the debug log @@ -3220,7 +3220,7 @@ def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, star # Find unused non-terminals unused_rules = grammar.unused_rules() for prod in unused_rules: - errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name) + errorlog.warning("%s:%d: Rule %r defined, but not used", prod.file, prod.line, prod.name) if len(unused_terminals) == 1: errorlog.warning("There is 1 unused token") @@ -3253,16 +3253,16 @@ def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, star if check_recursion: unreachable = grammar.find_unreachable() for u in unreachable: - errorlog.warning("Symbol '%s' is unreachable",u) + errorlog.warning("Symbol %r is unreachable",u) infinite = grammar.infinite_cycles() for inf in infinite: - errorlog.error("Infinite recursion detected for symbol '%s'", inf) + errorlog.error("Infinite recursion detected for symbol %r", inf) errors = 1 unused_prec = grammar.unused_precedence() for term, assoc in unused_prec: - errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term) + errorlog.error("Precedence rule %r defined for unknown symbol %r", assoc, term) errors = 1 if errors: -- cgit v1.2.1