summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorptmcg <ptmcg@9bf210a0-9d2d-494c-87cf-cfb32e7dff7b>2012-11-23 08:54:10 +0000
committerptmcg <ptmcg@9bf210a0-9d2d-494c-87cf-cfb32e7dff7b>2012-11-23 08:54:10 +0000
commit551a3a8c1f617f2bb98345af5b606e7c1b84fee8 (patch)
tree734ae210c20a98f01fe029f7e6eeb7a93b4617fb
parent4606aecb99379afb1e406e8ac60c97e86a514319 (diff)
downloadpyparsing-551a3a8c1f617f2bb98345af5b606e7c1b84fee8.tar.gz
Clean up examples to be Python 3 compatible
git-svn-id: svn://svn.code.sf.net/p/pyparsing/code/trunk@246 9bf210a0-9d2d-494c-87cf-cfb32e7dff7b
-rw-r--r--src/examples/0README.html22
-rw-r--r--src/examples/LAparser.py54
-rw-r--r--src/examples/SimpleCalc.py34
-rw-r--r--src/examples/TAP.py12
-rw-r--r--src/examples/adventureEngine.py72
-rw-r--r--src/examples/antlr_grammar.py436
-rw-r--r--src/examples/apicheck.py12
-rw-r--r--src/examples/btpyparse.py2
-rw-r--r--src/examples/builtin_parse_action_demo.py4
-rw-r--r--src/examples/cLibHeader.py4
-rw-r--r--src/examples/chemicalFormulas.py14
-rw-r--r--src/examples/commasep.py6
-rw-r--r--src/examples/configParse.py28
-rw-r--r--src/examples/cpp_enum_parser.py2
-rw-r--r--src/examples/deltaTime.py10
-rw-r--r--src/examples/dfmparse.py10
-rw-r--r--src/examples/dhcpd_leases_parser.py6
-rw-r--r--src/examples/dictExample.py8
-rw-r--r--src/examples/dictExample2.py30
-rw-r--r--src/examples/ebnftest.py14
-rw-r--r--src/examples/eval_arith.py14
-rw-r--r--src/examples/excelExpr.py6
-rw-r--r--src/examples/fourFn.py110
-rw-r--r--src/examples/fourFn.pycbin5355 -> 0 bytes
-rw-r--r--src/examples/gen_ctypes.py34
-rw-r--r--src/examples/getNTPservers.py6
-rw-r--r--src/examples/getNTPserversNew.py6
-rw-r--r--src/examples/greeting.py2
-rw-r--r--src/examples/greetingInGreek.py6
-rw-r--r--src/examples/greetingInKorean.py4
-rw-r--r--src/examples/groupUsingListAllMatches.py2
-rw-r--r--src/examples/holaMundo.py8
-rw-r--r--src/examples/htmlStripper.py6
-rw-r--r--src/examples/httpServerLogParser.py6
-rw-r--r--src/examples/idlParse.py14
-rw-r--r--src/examples/indentedGrammarExample.py2
-rw-r--r--src/examples/invRegex.py18
-rw-r--r--src/examples/jsonParser.py8
-rw-r--r--src/examples/linenoExample.py12
-rw-r--r--src/examples/list1.py6
-rw-r--r--src/examples/listAllMatches.py46
-rw-r--r--src/examples/lucene_grammar.py16
-rw-r--r--src/examples/macroExpander.py6
-rw-r--r--src/examples/makeHTMLTagExample.py6
-rw-r--r--src/examples/matchPreviousDemo.py2
-rw-r--r--src/examples/mozillaCalendarParser.py5
-rw-r--r--src/examples/nested.py4
-rw-r--r--src/examples/oc.py2
-rw-r--r--src/examples/parseListString.py8
-rw-r--r--src/examples/parsePythonValue.py10
-rw-r--r--src/examples/parseResultsSumExample.py6
-rw-r--r--src/examples/partial_gene_match.py24
-rw-r--r--src/examples/pgn.py10
-rw-r--r--src/examples/pgn.pycbin2803 -> 0 bytes
-rw-r--r--src/examples/pymicko.py88
-rw-r--r--src/examples/pythonGrammarParser.py12
-rw-r--r--src/examples/readJson.py10
-rw-r--r--src/examples/removeLineBreaks.py2
-rw-r--r--src/examples/romanNumerals.py8
-rw-r--r--src/examples/scanExamples.py28
-rw-r--r--src/examples/scanYahoo.py6
-rw-r--r--src/examples/searchParserAppDemo.py6
-rw-r--r--src/examples/searchparser.py24
-rw-r--r--src/examples/select_parser.py270
-rw-r--r--src/examples/sexpParser.py12
-rw-r--r--src/examples/simpleArith.py6
-rw-r--r--src/examples/simpleBool.py22
-rw-r--r--src/examples/simpleSQL.py120
-rw-r--r--src/examples/simpleWiki.py6
-rw-r--r--src/examples/sparser.py26
-rw-r--r--src/examples/sql2dot.py6
-rw-r--r--src/examples/stackish.py6
-rw-r--r--src/examples/stateMachine2.py259
-rw-r--r--src/examples/tagCapture.py4
-rw-r--r--src/examples/urlExtractor.py6
-rw-r--r--src/examples/urlExtractorNew.py6
-rw-r--r--src/examples/verilogParse.py46
-rw-r--r--src/examples/withAttribute.py2
-rw-r--r--src/examples/wordsToNum.py41
79 files changed, 1251 insertions, 966 deletions
diff --git a/src/examples/0README.html b/src/examples/0README.html
index b8b654f..303d44d 100644
--- a/src/examples/0README.html
+++ b/src/examples/0README.html
@@ -281,6 +281,28 @@ Pyparsing example parsing ANTLR .a files and generating a working pyparsing pars
</li>
<p>
+<li><a href="shapes.py">shapes.py</a><br>
+<b>New in version 1.5.7</b><br>
+Parse actions example simple shape definition syntax, and returning the matched tokens as
+domain objects instead of just strings.
+</li>
+<p>
+
+<li><a href="datetimeParseActions.py">datetimeParseActions.py</a><br>
+<b>New in version 1.5.7</b><br>
+Parse actions example showing a parse action returning a datetime object instead of
+string tokens, and doing validation of the tokens, raising a ParseException if the
+given YYYY/MM/DD string does not represent a valid date.
+</li>
+<p>
+
+<li><a href="position.py">position.py</a><br>
+<b>New in version 1.5.7</b><br>
+Demonstration of a couple of different ways to capture the location a particular
+expression was found within the overall input string.
+</li>
+<p>
+
</ul>
diff --git a/src/examples/LAparser.py b/src/examples/LAparser.py
index 6e3ddbd..ec75d6c 100644
--- a/src/examples/LAparser.py
+++ b/src/examples/LAparser.py
@@ -74,7 +74,7 @@ targetvar = None # Holds variable name to left of '=' sign in LA equation.
def _pushFirst( str, loc, toks ):
- if debug_flag: print "pushing ", toks[0], "str is ", str
+ if debug_flag: print("pushing ", toks[0], "str is ", str)
exprStack.append( toks[0] )
def _assignVar( str, loc, toks ):
@@ -235,7 +235,7 @@ def _evaluateStack( s ):
op2 = _evaluateStack( s )
op1 = _evaluateStack( s )
result = opn[op]( op1, op2 )
- if debug_flag: print result
+ if debug_flag: print(result)
return result
else:
return op
@@ -260,43 +260,43 @@ def parse(input_string):
# try parsing the input string
try:
L=equation.parseString( input_string )
- except ParseException,err:
- print >>sys.stderr, 'Parse Failure'
- print >>sys.stderr, err.line
- print >>sys.stderr, " "*(err.column-1) + "^"
- print >>sys.stderr, err
+ except ParseException as err:
+ print('Parse Failure', file=sys.stderr)
+ print(err.line, file=sys.stderr)
+ print(" "*(err.column-1) + "^", file=sys.stderr)
+ print(err, file=sys.stderr)
raise
# show result of parsing the input string
if debug_flag:
- print input_string, "->", L
- print "exprStack=", exprStack
+ print(input_string, "->", L)
+ print("exprStack=", exprStack)
# Evaluate the stack of parsed operands, emitting C code.
try:
result=_evaluateStack(exprStack)
except TypeError:
- print >>sys.stderr,"Unsupported operation on right side of '%s'.\nCheck for missing or incorrect tags on non-scalar operands."%input_string
+ print("Unsupported operation on right side of '%s'.\nCheck for missing or incorrect tags on non-scalar operands."%input_string, file=sys.stderr)
raise
except UnaryUnsupportedError:
- print >>sys.stderr,"Unary negation is not supported for vectors and matrices: '%s'"%input_string
+ print("Unary negation is not supported for vectors and matrices: '%s'"%input_string, file=sys.stderr)
raise
# Create final assignment and print it.
- if debug_flag: print "var=",targetvar
+ if debug_flag: print("var=",targetvar)
if targetvar != None:
try:
result = _assignfunc(targetvar,result)
except TypeError:
- print >>sys.stderr,"Left side tag does not match right side of '%s'"%input_string
+ print("Left side tag does not match right side of '%s'"%input_string, file=sys.stderr)
raise
except UnaryUnsupportedError:
- print >>sys.stderr,"Unary negation is not supported for vectors and matrices: '%s'"%input_string
+ print("Unary negation is not supported for vectors and matrices: '%s'"%input_string, file=sys.stderr)
raise
return result
else:
- print >>sys.stderr, "Empty left side in '%s'"%input_string
+ print("Empty left side in '%s'"%input_string, file=sys.stderr)
raise TypeError
##-----------------------------------------------------------------------------------
@@ -331,7 +331,7 @@ def test():
an AssertError if the output is not what is expected. Prints the
input, expected output, and actual output for all tests.
"""
- print "Testing LAParser"
+ print("Testing LAParser")
testcases = [
("Scalar addition","a = b+c","a=(b+c)"),
("Vector addition","V3_a = V3_b + V3_c","vCopy(a,vAdd(b,c))"),
@@ -362,18 +362,18 @@ def test():
for t in testcases:
name,input,expected = t
- print name
- print " %s input"%input
- print " %s expected"%expected
+ print(name)
+ print(" %s input"%input)
+ print(" %s expected"%expected)
result = parse(input)
- print " %s received"%result
- print ""
+ print(" %s received"%result)
+ print("")
assert expected == result
##TODO: Write testcases with invalid expressions and test that the expected
## exceptions are raised.
- print "Tests completed!"
+ print("Tests completed!")
##----------------------------------------------------------------------------
## The following is executed only when this module is executed as
## command line script. It runs a small test suite (see above)
@@ -394,8 +394,8 @@ if __name__ == '__main__':
Type 'debug on' to print parsing details as each string is processed.
Type 'debug off' to stop printing parsing details
"""
- print interactiveusage
- input_string = raw_input("> ")
+ print(interactiveusage)
+ input_string = input("> ")
while input_string != 'quit':
if input_string == "debug on":
@@ -404,14 +404,14 @@ if __name__ == '__main__':
debug_flag = False
else:
try:
- print parse(input_string)
+ print(parse(input_string))
except:
pass
# obtain new input string
- input_string = raw_input("> ")
+ input_string = input("> ")
# if user types 'quit' then say goodbye
- print "Good bye!"
+ print("Good bye!")
diff --git a/src/examples/SimpleCalc.py b/src/examples/SimpleCalc.py
index 577c8e3..46a5dff 100644
--- a/src/examples/SimpleCalc.py
+++ b/src/examples/SimpleCalc.py
@@ -22,7 +22,7 @@
#
#
-from __future__ import division
+
# Uncomment the line below for readline support on interactive terminal
# import readline
@@ -66,13 +66,13 @@ if __name__ == '__main__':
input_string=''
# Display instructions on how to quit the program
- print "Type in the string to be parsed or 'quit' to exit the program"
- input_string = raw_input("> ")
+ print("Type in the string to be parsed or 'quit' to exit the program")
+ input_string = input("> ")
while input_string != 'quit':
if input_string.lower() == 'debug':
debug_flag=True
- input_string = raw_input("> ")
+ input_string = input("> ")
continue
# Reset to an empty exprStack
@@ -82,37 +82,37 @@ if __name__ == '__main__':
# try parsing the input string
try:
L=pattern.parseString( input_string, parseAll=True )
- except ParseException,err:
+ except ParseException as err:
L=['Parse Failure',input_string]
# show result of parsing the input string
- if debug_flag: print input_string, "->", L
+ if debug_flag: print(input_string, "->", L)
if len(L)==0 or L[0] != 'Parse Failure':
- if debug_flag: print "exprStack=", exprStack
+ if debug_flag: print("exprStack=", exprStack)
# calculate result , store a copy in ans , display the result to user
try:
result=evaluateStack(exprStack)
- except Exception,e:
- print str(e)
+ except Exception as e:
+ print(str(e))
else:
variables['ans']=result
- print result
+ print(result)
# Assign result to a variable if required
if L.varname:
variables[L.varname] = result
- if debug_flag: print "variables=",variables
+ if debug_flag: print("variables=",variables)
else:
- print 'Parse Failure'
- print err.line
- print " "*(err.column-1) + "^"
- print err
+ print('Parse Failure')
+ print(err.line)
+ print(" "*(err.column-1) + "^")
+ print(err)
# obtain new input string
- input_string = raw_input("> ")
+ input_string = input("> ")
# if user type 'quit' then say goodbye
- print "Good bye!"
+ print("Good bye!")
diff --git a/src/examples/TAP.py b/src/examples/TAP.py
index dd551ab..139e47c 100644
--- a/src/examples/TAP.py
+++ b/src/examples/TAP.py
@@ -86,9 +86,9 @@ class TAPSummary(object):
self.bonusTests = []
self.bail = False
if results.plan:
- expected = range(1, int(results.plan.ubound)+1)
+ expected = list(range(1, int(results.plan.ubound)+1))
else:
- expected = range(1,len(results.tests)+1)
+ expected = list(range(1,len(results.tests)+1))
for i,res in enumerate(results.tests):
# test for bail out
@@ -104,7 +104,7 @@ class TAPSummary(object):
testnum = i+1
if res.testNumber != "":
if testnum != int(res.testNumber):
- print "ERROR! test %(testNumber)s out of sequence" % res
+ print("ERROR! test %(testNumber)s out of sequence" % res)
testnum = int(res.testNumber)
res["testNumber"] = testnum
@@ -211,7 +211,7 @@ if __name__ == "__main__":
"""
for test in (test1,test2,test3,test4,test5,test6):
- print test
+ print(test)
tapResult = tapOutputParser.parseString(test)[0]
- print tapResult.summary(showAll=True)
- print
+ print(tapResult.summary(showAll=True))
+ print()
diff --git a/src/examples/adventureEngine.py b/src/examples/adventureEngine.py
index 5817105..be09770 100644
--- a/src/examples/adventureEngine.py
+++ b/src/examples/adventureEngine.py
@@ -59,16 +59,16 @@ class Room(object):
self.inv.remove(it)
def describe(self):
- print self.desc
+ print(self.desc)
visibleItems = [ it for it in self.inv if it.isVisible ]
if random.random() > 0.5:
if len(visibleItems) > 1:
is_form = "are"
else:
is_form = "is"
- print "There %s %s here." % (is_form, enumerateItems(visibleItems))
+ print("There %s %s here." % (is_form, enumerateItems(visibleItems)))
else:
- print "You see %s." % (enumerateItems(visibleItems))
+ print("You see %s." % (enumerateItems(visibleItems)))
class Exit(Room):
@@ -100,7 +100,7 @@ class Item(object):
def breakItem(self):
if not self.isBroken:
- print "<Crash!>"
+ print("<Crash!>")
self.desc = "broken " + self.desc
self.isBroken = True
@@ -157,7 +157,7 @@ class Command(object):
pass
def __call__(self, player ):
- print self.verbProg.capitalize()+"..."
+ print(self.verbProg.capitalize()+"...")
self._doCommand(player)
@@ -184,7 +184,7 @@ class MoveCommand(Command):
if nextRoom:
player.moveTo( nextRoom )
else:
- print "Can't go that way."
+ print("Can't go that way.")
class TakeCommand(Command):
@@ -204,9 +204,9 @@ class TakeCommand(Command):
rm.removeItem(subj)
player.take(subj)
else:
- print subj.cantTakeMessage
+ print(subj.cantTakeMessage)
else:
- print "There is no %s here." % subj
+ print("There is no %s here." % subj)
class DropCommand(Command):
@@ -225,7 +225,7 @@ class DropCommand(Command):
rm.addItem(subj)
player.drop(subj)
else:
- print "You don't have %s." % (aOrAn(subj))
+ print("You don't have %s." % (aOrAn(subj)))
class InventoryCommand(Command):
def __init__(self, quals):
@@ -236,7 +236,7 @@ class InventoryCommand(Command):
return "INVENTORY or INV or I - lists what items you have"
def _doCommand(self, player):
- print "You have %s." % enumerateItems( player.inv )
+ print("You have %s." % enumerateItems( player.inv ))
class LookCommand(Command):
def __init__(self, quals):
@@ -272,7 +272,7 @@ class DoorsCommand(Command):
#~ print doorNames
reply += enumerateDoors( doorNames )
reply += "."
- print reply
+ print(reply)
class UseCommand(Command):
def __init__(self, quals):
@@ -294,9 +294,9 @@ class UseCommand(Command):
if self.subject.isUsable( player, self.target ):
self.subject.useItem( player, self.target )
else:
- print "You can't use that here."
+ print("You can't use that here.")
else:
- print "There is no %s here to use." % self.subject
+ print("There is no %s here to use." % self.subject)
class OpenCommand(Command):
def __init__(self, quals):
@@ -315,11 +315,11 @@ class OpenCommand(Command):
if not self.subject.isOpened:
self.subject.openItem( player )
else:
- print "It's already open."
+ print("It's already open.")
else:
- print "You can't open that."
+ print("You can't open that.")
else:
- print "There is no %s here to open." % self.subject
+ print("There is no %s here to open." % self.subject)
class CloseCommand(Command):
def __init__(self, quals):
@@ -338,11 +338,11 @@ class CloseCommand(Command):
if self.subject.isOpened:
self.subject.closeItem( player )
else:
- print "You can't close that, it's not open."
+ print("You can't close that, it's not open.")
else:
- print "You can't close that."
+ print("You can't close that.")
else:
- print "There is no %s here to close." % self.subject
+ print("There is no %s here to close." % self.subject)
class QuitCommand(Command):
def __init__(self, quals):
@@ -353,7 +353,7 @@ class QuitCommand(Command):
return "QUIT or Q - ends the game"
def _doCommand(self, player):
- print "Ok...."
+ print("Ok....")
player.gameOver = True
class HelpCommand(Command):
@@ -365,7 +365,7 @@ class HelpCommand(Command):
return "HELP or H or ? - displays this help message"
def _doCommand(self, player):
- print "Enter any of the following commands (not case sensitive):"
+ print("Enter any of the following commands (not case sensitive):")
for cmd in [
InventoryCommand,
DropCommand,
@@ -379,8 +379,8 @@ class HelpCommand(Command):
QuitCommand,
HelpCommand,
]:
- print " - %s" % cmd.helpDescription()
- print
+ print(" - %s" % cmd.helpDescription())
+ print()
class AppParseException(ParseException):
pass
@@ -460,14 +460,14 @@ class Parser(object):
try:
ret = self.bnf.parseString(cmdstr)
return ret
- except AppParseException, pe:
- print pe.msg
- except ParseException, pe:
- print random.choice([ "Sorry, I don't understand that.",
+ except AppParseException as pe:
+ print(pe.msg)
+ except ParseException as pe:
+ print(random.choice([ "Sorry, I don't understand that.",
"Huh?",
"Excuse me?",
"???",
- "What?" ] )
+ "What?" ] ))
class Player(object):
def __init__(self, name):
@@ -481,13 +481,13 @@ class Player(object):
if self.gameOver:
if rm.desc:
rm.describe()
- print "Game over!"
+ print("Game over!")
else:
rm.describe()
def take(self,it):
if it.isDeadly:
- print "Aaaagh!...., the %s killed me!" % it
+ print("Aaaagh!...., the %s killed me!" % it)
self.gameOver = True
else:
self.inv.append(it)
@@ -553,7 +553,7 @@ def createRooms( rm ):
# put items in rooms
def putItemInRoom(i,r):
- if isinstance(r,basestring):
+ if isinstance(r,str):
r = rooms[r]
r.addItem( Item.items[i] )
@@ -562,14 +562,14 @@ def playGame(p,startRoom):
parser = Parser()
p.moveTo( startRoom )
while not p.gameOver:
- cmdstr = raw_input(">> ")
+ cmdstr = input(">> ")
cmd = parser.parseCmd(cmdstr)
if cmd is not None:
cmd.command( p )
- print
- print "You ended the game with:"
+ print()
+ print("You ended the game with:")
for i in p.inv:
- print " -", aOrAn(i)
+ print(" -", aOrAn(i))
#====================
@@ -617,7 +617,7 @@ Item.items["shovel"].useAction = useShovel
Item.items["telescope"].isTakeable = False
def useTelescope(p,subj,target):
- print "You don't see anything."
+ print("You don't see anything.")
Item.items["telescope"].useAction = useTelescope
OpenableItem("treasure chest", Item.items["gold bar"])
diff --git a/src/examples/antlr_grammar.py b/src/examples/antlr_grammar.py
index b355ab5..adf877e 100644
--- a/src/examples/antlr_grammar.py
+++ b/src/examples/antlr_grammar.py
@@ -1,218 +1,218 @@
-'''
-antlr_grammar.py
-
-Created on 4 sept. 2010
-
-@author: luca
-
-(Minor updates by Paul McGuire, June, 2012)
-'''
-from pyparsing import Word, ZeroOrMore, printables, Suppress, OneOrMore, Group, \
- LineEnd, Optional, White, originalTextFor, hexnums, nums, Combine, Literal, Keyword, \
- cStyleComment, Regex, Forward, MatchFirst, And, srange, oneOf, alphas, alphanums, \
- delimitedList
-
-# http://www.antlr.org/grammar/ANTLR/ANTLRv3.g
-
-# Tokens
-EOL = Suppress(LineEnd()) # $
-singleTextString = originalTextFor(ZeroOrMore(~EOL + (White(" \t") | Word(printables)))).leaveWhitespace()
-XDIGIT = hexnums
-INT = Word(nums)
-ESC = Literal('\\') + (oneOf(list(r'nrtbf\">'+"'")) | ('u' + Word(hexnums, exact=4)) | Word(printables, exact=1))
-LITERAL_CHAR = ESC | ~(Literal("'") | Literal('\\')) + Word(printables, exact=1)
-CHAR_LITERAL = Suppress("'") + LITERAL_CHAR + Suppress("'")
-STRING_LITERAL = Suppress("'") + Combine(OneOrMore(LITERAL_CHAR)) + Suppress("'")
-DOUBLE_QUOTE_STRING_LITERAL = '"' + ZeroOrMore(LITERAL_CHAR) + '"'
-DOUBLE_ANGLE_STRING_LITERAL = '<<' + ZeroOrMore(Word(printables, exact=1)) + '>>'
-TOKEN_REF = Word(alphas.upper(), alphanums+'_')
-RULE_REF = Word(alphas.lower(), alphanums+'_')
-ACTION_ESC = (Suppress("\\") + Suppress("'")) | Suppress('\\"') | Suppress('\\') + (~(Literal("'") | Literal('"')) + Word(printables, exact=1))
-ACTION_CHAR_LITERAL = Suppress("'") + (ACTION_ESC | ~(Literal('\\') | Literal("'")) + Word(printables, exact=1)) + Suppress("'")
-ACTION_STRING_LITERAL = Suppress('"') + ZeroOrMore(ACTION_ESC | ~(Literal('\\') | Literal('"')) + Word(printables, exact=1)) + Suppress('"')
-SRC = Suppress('src') + ACTION_STRING_LITERAL("file") + INT("line")
-id = TOKEN_REF | RULE_REF
-SL_COMMENT = Suppress('//') + Suppress('$ANTLR') + SRC | ZeroOrMore(~EOL + Word(printables)) + EOL
-ML_COMMENT = cStyleComment
-WS = OneOrMore(Suppress(' ') | Suppress('\t') | (Optional(Suppress('\r')) + Literal('\n')))
-WS_LOOP = ZeroOrMore(SL_COMMENT | ML_COMMENT)
-NESTED_ARG_ACTION = Forward()
-NESTED_ARG_ACTION << Suppress('[') + ZeroOrMore(NESTED_ARG_ACTION | ACTION_STRING_LITERAL | ACTION_CHAR_LITERAL) + Suppress(']')
-ARG_ACTION = NESTED_ARG_ACTION
-NESTED_ACTION = Forward()
-NESTED_ACTION << Suppress('{') + ZeroOrMore(NESTED_ACTION | SL_COMMENT | ML_COMMENT | ACTION_STRING_LITERAL | ACTION_CHAR_LITERAL) + Suppress('}')
-ACTION = NESTED_ACTION + Optional('?')
-SCOPE = Suppress('scope')
-OPTIONS = Suppress('options') + Suppress('{') # + WS_LOOP + Suppress('{')
-TOKENS = Suppress('tokens') + Suppress('{') # + WS_LOOP + Suppress('{')
-FRAGMENT = 'fragment';
-TREE_BEGIN = Suppress('^(')
-ROOT = Suppress('^')
-BANG = Suppress('!')
-RANGE = Suppress('..')
-REWRITE = Suppress('->')
-
-# General Parser Definitions
-
-# Grammar heading
-optionValue = id | STRING_LITERAL | CHAR_LITERAL | INT | Literal('*').setName("s")
-
-option = Group(id("id") + Suppress('=') + optionValue("value"))("option")
-optionsSpec = OPTIONS + Group(OneOrMore(option + Suppress(';')))("options") + Suppress('}')
-tokenSpec = Group(TOKEN_REF("token_ref") + (Suppress('=') + (STRING_LITERAL | CHAR_LITERAL)("lit")))("token") + Suppress(';')
-tokensSpec = TOKENS + Group(OneOrMore(tokenSpec))("tokens") + Suppress('}')
-attrScope = Suppress('scope') + id + ACTION
-grammarType = Keyword('lexer') + Keyword('parser') + Keyword('tree')
-actionScopeName = id | Keyword('lexer')("l") | Keyword('parser')("p")
-action = Suppress('@') + Optional(actionScopeName + Suppress('::')) + id + ACTION
-
-grammarHeading = Optional(ML_COMMENT("ML_COMMENT")) + Optional(grammarType) + Suppress('grammar') + id("grammarName") + Suppress(';') + Optional(optionsSpec) + Optional(tokensSpec) + ZeroOrMore(attrScope) + ZeroOrMore(action)
-
-modifier = Keyword('protected') | Keyword('public') | Keyword('private') | Keyword('fragment')
-ruleAction = Suppress('@') + id + ACTION
-throwsSpec = Suppress('throws') + delimitedList(id)
-ruleScopeSpec = (Suppress('scope') + ACTION) | (Suppress('scope') + delimitedList(id) + Suppress(';')) | (Suppress('scope') + ACTION + Suppress('scope') + delimitedList(id) + Suppress(';'))
-unary_op = oneOf("^ !")
-notTerminal = CHAR_LITERAL | TOKEN_REF | STRING_LITERAL
-terminal = (CHAR_LITERAL | TOKEN_REF + Optional(ARG_ACTION) | STRING_LITERAL | '.') + Optional(unary_op)
-block = Forward()
-notSet = Suppress('~') + (notTerminal | block)
-rangeNotPython = CHAR_LITERAL("c1") + RANGE + CHAR_LITERAL("c2")
-atom = Group(rangeNotPython + Optional(unary_op)("op")) | terminal | (notSet + Optional(unary_op)("op")) | (RULE_REF + Optional(ARG_ACTION("arg")) + Optional(unary_op)("op"))
-element = Forward()
-treeSpec = Suppress('^(') + element*(2,) + Suppress(')')
-ebnfSuffix = oneOf("? * +")
-ebnf = block + Optional(ebnfSuffix("op") | '=>')
-elementNoOptionSpec = (id("result_name") + oneOf('= +=')("labelOp") + atom("atom") + Optional(ebnfSuffix)) | (id("result_name") + oneOf('= +=')("labelOp") + block + Optional(ebnfSuffix)) | atom("atom") + Optional(ebnfSuffix) | ebnf | ACTION | (treeSpec + Optional(ebnfSuffix)) # | SEMPRED ( '=>' -> GATED_SEMPRED | -> SEMPRED )
-element << Group(elementNoOptionSpec)("element")
-alternative = Group(Group(OneOrMore(element))("elements")) # Do not ask me why group is needed twice... seems like the xml that you see is not always the real structure?
-rewrite = Optional(Literal('TODO REWRITE RULES TODO'))
-block << Suppress('(') + Optional(Optional(optionsSpec("opts")) + Suppress(':')) + Group(alternative('a1') + rewrite + Group(ZeroOrMore(Suppress('|') + alternative('a2') + rewrite))("alternatives"))("block") + Suppress(')')
-altList = alternative('a1') + rewrite + Group(ZeroOrMore(Suppress('|') + alternative('a2') + rewrite))("alternatives")
-exceptionHandler = Suppress('catch') + ARG_ACTION + ACTION
-finallyClause = Suppress('finally') + ACTION
-exceptionGroup = (OneOrMore(exceptionHandler) + Optional(finallyClause)) | finallyClause
-
-ruleHeading = Optional(ML_COMMENT)("ruleComment") + Optional(modifier)("modifier") + id("ruleName") + Optional("!") + Optional(ARG_ACTION("arg")) + Optional(Suppress('returns') + ARG_ACTION("rt")) + Optional(throwsSpec) + Optional(optionsSpec) + Optional(ruleScopeSpec) + ZeroOrMore(ruleAction)
-rule = Group(ruleHeading + Suppress(':') + altList + Suppress(';') + Optional(exceptionGroup))("rule")
-
-grammarDef = grammarHeading + Group(OneOrMore(rule))("rules")
-
-def grammar():
- return grammarDef
-
-def __antlrAlternativesConverter(pyparsingRules, antlrBlock):
- rule = None
- if hasattr(antlrBlock, 'alternatives') and antlrBlock.alternatives != '' and len(antlrBlock.alternatives) > 0:
- alternatives = []
- alternatives.append(__antlrAlternativeConverter(pyparsingRules, antlrBlock.a1))
- for alternative in antlrBlock.alternatives:
- alternatives.append(__antlrAlternativeConverter(pyparsingRules, alternative))
- rule = MatchFirst(alternatives)("anonymous_or")
- elif hasattr(antlrBlock, 'a1') and antlrBlock.a1 != '':
- rule = __antlrAlternativeConverter(pyparsingRules, antlrBlock.a1)
- else:
- raise Exception('Not yet implemented')
- assert rule != None
- return rule
-
-def __antlrAlternativeConverter(pyparsingRules, antlrAlternative):
- elementList = []
- for element in antlrAlternative.elements:
- rule = None
- if hasattr(element.atom, 'c1') and element.atom.c1 != '':
- regex = r'['+str(element.atom.c1[0])+'-'+str(element.atom.c2[0]+']')
- rule = Regex(regex)("anonymous_regex")
- elif hasattr(element, 'block') and element.block != '':
- rule = __antlrAlternativesConverter(pyparsingRules, element.block)
- else:
- ruleRef = element.atom
- assert ruleRef in pyparsingRules
- rule = pyparsingRules[element.atom](element.atom)
- if hasattr(element, 'op') and element.op != '':
- if element.op == '+':
- rule = Group(OneOrMore(rule))("anonymous_one_or_more")
- elif element.op == '*':
- rule = Group(ZeroOrMore(rule))("anonymous_zero_or_more")
- elif element.op == '?':
- rule = Optional(rule)
- else:
- raise Exception('rule operator not yet implemented : ' + element.op)
- rule = rule
- elementList.append(rule)
- if len(elementList) > 1:
- rule = Group(And(elementList))("anonymous_and")
- else:
- rule = elementList[0]
- assert rule != None
- return rule
-
-def __antlrRuleConverter(pyparsingRules, antlrRule):
- rule = None
- rule = __antlrAlternativesConverter(pyparsingRules, antlrRule)
- assert rule != None
- rule(antlrRule.ruleName)
- return rule
-
-def antlrConverter(antlrGrammarTree):
- pyparsingRules = {}
- antlrTokens = {}
- for antlrToken in antlrGrammarTree.tokens:
- antlrTokens[antlrToken.token_ref] = antlrToken.lit
- for antlrTokenName, antlrToken in antlrTokens.items():
- pyparsingRules[antlrTokenName] = Literal(antlrToken)
- antlrRules = {}
- for antlrRule in antlrGrammarTree.rules:
- antlrRules[antlrRule.ruleName] = antlrRule
- pyparsingRules[antlrRule.ruleName] = Forward() # antlr is a top down grammar
- for antlrRuleName, antlrRule in antlrRules.items():
- pyparsingRule = __antlrRuleConverter(pyparsingRules, antlrRule)
- assert pyparsingRule != None
- pyparsingRules[antlrRuleName] << pyparsingRule
- return pyparsingRules
-
-if __name__ == "__main__":
-
- text = """grammar SimpleCalc;
-
-options {
- language = Python;
-}
-
-tokens {
- PLUS = '+' ;
- MINUS = '-' ;
- MULT = '*' ;
- DIV = '/' ;
-}
-
-/*------------------------------------------------------------------
- * PARSER RULES
- *------------------------------------------------------------------*/
-
-expr : term ( ( PLUS | MINUS ) term )* ;
-
-term : factor ( ( MULT | DIV ) factor )* ;
-
-factor : NUMBER ;
-
-
-/*------------------------------------------------------------------
- * LEXER RULES
- *------------------------------------------------------------------*/
-
-NUMBER : (DIGIT)+ ;
-
-/* WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ { $channel = HIDDEN; } ; */
-
-fragment DIGIT : '0'..'9' ;
-
-"""
-
- grammar().validate()
- antlrGrammarTree = grammar().parseString(text)
- print antlrGrammarTree.asXML("antlrGrammarTree")
- pyparsingRules = antlrConverter(antlrGrammarTree)
- pyparsingRule = pyparsingRules["expr"]
- pyparsingTree = pyparsingRule.parseString("2 - 5 * 42 + 7 / 25")
- print pyparsingTree.asXML("pyparsingTree")
+'''
+antlr_grammar.py
+
+Created on 4 sept. 2010
+
+@author: luca
+
+(Minor updates by Paul McGuire, June, 2012)
+'''
+from pyparsing import Word, ZeroOrMore, printables, Suppress, OneOrMore, Group, \
+ LineEnd, Optional, White, originalTextFor, hexnums, nums, Combine, Literal, Keyword, \
+ cStyleComment, Regex, Forward, MatchFirst, And, srange, oneOf, alphas, alphanums, \
+ delimitedList
+
+# http://www.antlr.org/grammar/ANTLR/ANTLRv3.g
+
+# Tokens
+EOL = Suppress(LineEnd()) # $
+singleTextString = originalTextFor(ZeroOrMore(~EOL + (White(" \t") | Word(printables)))).leaveWhitespace()
+XDIGIT = hexnums
+INT = Word(nums)
+ESC = Literal('\\') + (oneOf(list(r'nrtbf\">'+"'")) | ('u' + Word(hexnums, exact=4)) | Word(printables, exact=1))
+LITERAL_CHAR = ESC | ~(Literal("'") | Literal('\\')) + Word(printables, exact=1)
+CHAR_LITERAL = Suppress("'") + LITERAL_CHAR + Suppress("'")
+STRING_LITERAL = Suppress("'") + Combine(OneOrMore(LITERAL_CHAR)) + Suppress("'")
+DOUBLE_QUOTE_STRING_LITERAL = '"' + ZeroOrMore(LITERAL_CHAR) + '"'
+DOUBLE_ANGLE_STRING_LITERAL = '<<' + ZeroOrMore(Word(printables, exact=1)) + '>>'
+TOKEN_REF = Word(alphas.upper(), alphanums+'_')
+RULE_REF = Word(alphas.lower(), alphanums+'_')
+ACTION_ESC = (Suppress("\\") + Suppress("'")) | Suppress('\\"') | Suppress('\\') + (~(Literal("'") | Literal('"')) + Word(printables, exact=1))
+ACTION_CHAR_LITERAL = Suppress("'") + (ACTION_ESC | ~(Literal('\\') | Literal("'")) + Word(printables, exact=1)) + Suppress("'")
+ACTION_STRING_LITERAL = Suppress('"') + ZeroOrMore(ACTION_ESC | ~(Literal('\\') | Literal('"')) + Word(printables, exact=1)) + Suppress('"')
+SRC = Suppress('src') + ACTION_STRING_LITERAL("file") + INT("line")
+id = TOKEN_REF | RULE_REF
+SL_COMMENT = Suppress('//') + Suppress('$ANTLR') + SRC | ZeroOrMore(~EOL + Word(printables)) + EOL
+ML_COMMENT = cStyleComment
+WS = OneOrMore(Suppress(' ') | Suppress('\t') | (Optional(Suppress('\r')) + Literal('\n')))
+WS_LOOP = ZeroOrMore(SL_COMMENT | ML_COMMENT)
+NESTED_ARG_ACTION = Forward()
+NESTED_ARG_ACTION << Suppress('[') + ZeroOrMore(NESTED_ARG_ACTION | ACTION_STRING_LITERAL | ACTION_CHAR_LITERAL) + Suppress(']')
+ARG_ACTION = NESTED_ARG_ACTION
+NESTED_ACTION = Forward()
+NESTED_ACTION << Suppress('{') + ZeroOrMore(NESTED_ACTION | SL_COMMENT | ML_COMMENT | ACTION_STRING_LITERAL | ACTION_CHAR_LITERAL) + Suppress('}')
+ACTION = NESTED_ACTION + Optional('?')
+SCOPE = Suppress('scope')
+OPTIONS = Suppress('options') + Suppress('{') # + WS_LOOP + Suppress('{')
+TOKENS = Suppress('tokens') + Suppress('{') # + WS_LOOP + Suppress('{')
+FRAGMENT = 'fragment';
+TREE_BEGIN = Suppress('^(')
+ROOT = Suppress('^')
+BANG = Suppress('!')
+RANGE = Suppress('..')
+REWRITE = Suppress('->')
+
+# General Parser Definitions
+
+# Grammar heading
+optionValue = id | STRING_LITERAL | CHAR_LITERAL | INT | Literal('*').setName("s")
+
+option = Group(id("id") + Suppress('=') + optionValue("value"))("option")
+optionsSpec = OPTIONS + Group(OneOrMore(option + Suppress(';')))("options") + Suppress('}')
+tokenSpec = Group(TOKEN_REF("token_ref") + (Suppress('=') + (STRING_LITERAL | CHAR_LITERAL)("lit")))("token") + Suppress(';')
+tokensSpec = TOKENS + Group(OneOrMore(tokenSpec))("tokens") + Suppress('}')
+attrScope = Suppress('scope') + id + ACTION
+grammarType = Keyword('lexer') + Keyword('parser') + Keyword('tree')
+actionScopeName = id | Keyword('lexer')("l") | Keyword('parser')("p")
+action = Suppress('@') + Optional(actionScopeName + Suppress('::')) + id + ACTION
+
+grammarHeading = Optional(ML_COMMENT("ML_COMMENT")) + Optional(grammarType) + Suppress('grammar') + id("grammarName") + Suppress(';') + Optional(optionsSpec) + Optional(tokensSpec) + ZeroOrMore(attrScope) + ZeroOrMore(action)
+
+modifier = Keyword('protected') | Keyword('public') | Keyword('private') | Keyword('fragment')
+ruleAction = Suppress('@') + id + ACTION
+throwsSpec = Suppress('throws') + delimitedList(id)
+ruleScopeSpec = (Suppress('scope') + ACTION) | (Suppress('scope') + delimitedList(id) + Suppress(';')) | (Suppress('scope') + ACTION + Suppress('scope') + delimitedList(id) + Suppress(';'))
+unary_op = oneOf("^ !")
+notTerminal = CHAR_LITERAL | TOKEN_REF | STRING_LITERAL
+terminal = (CHAR_LITERAL | TOKEN_REF + Optional(ARG_ACTION) | STRING_LITERAL | '.') + Optional(unary_op)
+block = Forward()
+notSet = Suppress('~') + (notTerminal | block)
+rangeNotPython = CHAR_LITERAL("c1") + RANGE + CHAR_LITERAL("c2")
+atom = Group(rangeNotPython + Optional(unary_op)("op")) | terminal | (notSet + Optional(unary_op)("op")) | (RULE_REF + Optional(ARG_ACTION("arg")) + Optional(unary_op)("op"))
+element = Forward()
+treeSpec = Suppress('^(') + element*(2,) + Suppress(')')
+ebnfSuffix = oneOf("? * +")
+ebnf = block + Optional(ebnfSuffix("op") | '=>')
+elementNoOptionSpec = (id("result_name") + oneOf('= +=')("labelOp") + atom("atom") + Optional(ebnfSuffix)) | (id("result_name") + oneOf('= +=')("labelOp") + block + Optional(ebnfSuffix)) | atom("atom") + Optional(ebnfSuffix) | ebnf | ACTION | (treeSpec + Optional(ebnfSuffix)) # | SEMPRED ( '=>' -> GATED_SEMPRED | -> SEMPRED )
+element << Group(elementNoOptionSpec)("element")
+alternative = Group(Group(OneOrMore(element))("elements")) # Do not ask me why group is needed twice... seems like the xml that you see is not always the real structure?
+rewrite = Optional(Literal('TODO REWRITE RULES TODO'))
+block << Suppress('(') + Optional(Optional(optionsSpec("opts")) + Suppress(':')) + Group(alternative('a1') + rewrite + Group(ZeroOrMore(Suppress('|') + alternative('a2') + rewrite))("alternatives"))("block") + Suppress(')')
+altList = alternative('a1') + rewrite + Group(ZeroOrMore(Suppress('|') + alternative('a2') + rewrite))("alternatives")
+exceptionHandler = Suppress('catch') + ARG_ACTION + ACTION
+finallyClause = Suppress('finally') + ACTION
+exceptionGroup = (OneOrMore(exceptionHandler) + Optional(finallyClause)) | finallyClause
+
+ruleHeading = Optional(ML_COMMENT)("ruleComment") + Optional(modifier)("modifier") + id("ruleName") + Optional("!") + Optional(ARG_ACTION("arg")) + Optional(Suppress('returns') + ARG_ACTION("rt")) + Optional(throwsSpec) + Optional(optionsSpec) + Optional(ruleScopeSpec) + ZeroOrMore(ruleAction)
+rule = Group(ruleHeading + Suppress(':') + altList + Suppress(';') + Optional(exceptionGroup))("rule")
+
+grammarDef = grammarHeading + Group(OneOrMore(rule))("rules")
+
+def grammar():
+ return grammarDef
+
+def __antlrAlternativesConverter(pyparsingRules, antlrBlock):
+ rule = None
+ if hasattr(antlrBlock, 'alternatives') and antlrBlock.alternatives != '' and len(antlrBlock.alternatives) > 0:
+ alternatives = []
+ alternatives.append(__antlrAlternativeConverter(pyparsingRules, antlrBlock.a1))
+ for alternative in antlrBlock.alternatives:
+ alternatives.append(__antlrAlternativeConverter(pyparsingRules, alternative))
+ rule = MatchFirst(alternatives)("anonymous_or")
+ elif hasattr(antlrBlock, 'a1') and antlrBlock.a1 != '':
+ rule = __antlrAlternativeConverter(pyparsingRules, antlrBlock.a1)
+ else:
+ raise Exception('Not yet implemented')
+ assert rule != None
+ return rule
+
+def __antlrAlternativeConverter(pyparsingRules, antlrAlternative):
+ elementList = []
+ for element in antlrAlternative.elements:
+ rule = None
+ if hasattr(element.atom, 'c1') and element.atom.c1 != '':
+ regex = r'['+str(element.atom.c1[0])+'-'+str(element.atom.c2[0]+']')
+ rule = Regex(regex)("anonymous_regex")
+ elif hasattr(element, 'block') and element.block != '':
+ rule = __antlrAlternativesConverter(pyparsingRules, element.block)
+ else:
+ ruleRef = element.atom
+ assert ruleRef in pyparsingRules
+ rule = pyparsingRules[element.atom](element.atom)
+ if hasattr(element, 'op') and element.op != '':
+ if element.op == '+':
+ rule = Group(OneOrMore(rule))("anonymous_one_or_more")
+ elif element.op == '*':
+ rule = Group(ZeroOrMore(rule))("anonymous_zero_or_more")
+ elif element.op == '?':
+ rule = Optional(rule)
+ else:
+ raise Exception('rule operator not yet implemented : ' + element.op)
+ rule = rule
+ elementList.append(rule)
+ if len(elementList) > 1:
+ rule = Group(And(elementList))("anonymous_and")
+ else:
+ rule = elementList[0]
+ assert rule != None
+ return rule
+
+def __antlrRuleConverter(pyparsingRules, antlrRule):
+ rule = None
+ rule = __antlrAlternativesConverter(pyparsingRules, antlrRule)
+ assert rule != None
+ rule(antlrRule.ruleName)
+ return rule
+
+def antlrConverter(antlrGrammarTree):
+ pyparsingRules = {}
+ antlrTokens = {}
+ for antlrToken in antlrGrammarTree.tokens:
+ antlrTokens[antlrToken.token_ref] = antlrToken.lit
+ for antlrTokenName, antlrToken in list(antlrTokens.items()):
+ pyparsingRules[antlrTokenName] = Literal(antlrToken)
+ antlrRules = {}
+ for antlrRule in antlrGrammarTree.rules:
+ antlrRules[antlrRule.ruleName] = antlrRule
+ pyparsingRules[antlrRule.ruleName] = Forward() # antlr is a top down grammar
+ for antlrRuleName, antlrRule in list(antlrRules.items()):
+ pyparsingRule = __antlrRuleConverter(pyparsingRules, antlrRule)
+ assert pyparsingRule != None
+ pyparsingRules[antlrRuleName] << pyparsingRule
+ return pyparsingRules
+
+if __name__ == "__main__":
+
+ text = """grammar SimpleCalc;
+
+options {
+ language = Python;
+}
+
+tokens {
+ PLUS = '+' ;
+ MINUS = '-' ;
+ MULT = '*' ;
+ DIV = '/' ;
+}
+
+/*------------------------------------------------------------------
+ * PARSER RULES
+ *------------------------------------------------------------------*/
+
+expr : term ( ( PLUS | MINUS ) term )* ;
+
+term : factor ( ( MULT | DIV ) factor )* ;
+
+factor : NUMBER ;
+
+
+/*------------------------------------------------------------------
+ * LEXER RULES
+ *------------------------------------------------------------------*/
+
+NUMBER : (DIGIT)+ ;
+
+/* WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ { $channel = HIDDEN; } ; */
+
+fragment DIGIT : '0'..'9' ;
+
+"""
+
+ grammar().validate()
+ antlrGrammarTree = grammar().parseString(text)
+ print(antlrGrammarTree.asXML("antlrGrammarTree"))
+ pyparsingRules = antlrConverter(antlrGrammarTree)
+ pyparsingRule = pyparsingRules["expr"]
+ pyparsingTree = pyparsingRule.parseString("2 - 5 * 42 + 7 / 25")
+ print(pyparsingTree.asXML("pyparsingTree"))
diff --git a/src/examples/apicheck.py b/src/examples/apicheck.py
index b5b9358..4315ac9 100644
--- a/src/examples/apicheck.py
+++ b/src/examples/apicheck.py
@@ -42,14 +42,14 @@ test = """[ procname1 $par1 $par2 ]
api_scanner = apiRef.scanString(test)
while 1:
try:
- t,s,e = api_scanner.next()
- print "found %s on line %d" % (t.procname, lineno(s,test))
- except ParseSyntaxException, pe:
- print "invalid arg count on line", pe.lineno
- print pe.lineno,':',pe.line
+ t,s,e = next(api_scanner)
+ print("found %s on line %d" % (t.procname, lineno(s,test)))
+ except ParseSyntaxException as pe:
+ print("invalid arg count on line", pe.lineno)
+ print(pe.lineno,':',pe.line)
# reset api scanner to start after this exception location
test = "\n"*(pe.lineno-1)+test[pe.loc+1:]
api_scanner = apiRef.scanString(test)
except StopIteration:
break
- \ No newline at end of file
+
diff --git a/src/examples/btpyparse.py b/src/examples/btpyparse.py
index 9700ff2..f3c11ae 100644
--- a/src/examples/btpyparse.py
+++ b/src/examples/btpyparse.py
@@ -125,4 +125,4 @@ Some introductory text
number = {2}
}
"""
- print '\n\n'.join(defn.dump() for defn in parse_str(txt))
+ print('\n\n'.join(defn.dump() for defn in parse_str(txt)))
diff --git a/src/examples/builtin_parse_action_demo.py b/src/examples/builtin_parse_action_demo.py
index ef42640..3ec6af8 100644
--- a/src/examples/builtin_parse_action_demo.py
+++ b/src/examples/builtin_parse_action_demo.py
@@ -16,7 +16,7 @@ nums = OneOrMore(integer)
test = "2 54 34 2 211 66 43 2 0"
-print test
+print(test)
# try each of these builtins as parse actions
for fn in (sum, max, min, len, sorted, reversed, list, tuple, set, any, all):
@@ -26,4 +26,4 @@ for fn in (sum, max, min, len, sorted, reversed, list, tuple, set, any, all):
fn = lambda x : list(reversed(x))
# show how each builtin works as a free-standing parse action
- print fn_name, nums.setParseAction(fn).parseString(test)
+ print(fn_name, nums.setParseAction(fn).parseString(test))
diff --git a/src/examples/cLibHeader.py b/src/examples/cLibHeader.py
index 115aa08..bb98521 100644
--- a/src/examples/cLibHeader.py
+++ b/src/examples/cLibHeader.py
@@ -20,6 +20,6 @@ arglist = delimitedList(Group(vartype("type") + ident("name")))
functionCall = Keyword("int") + ident("name") + "(" + arglist("args") + ")" + ";"
for fn,s,e in functionCall.scanString(testdata):
- print fn.name
+ print(fn.name)
for a in fn.args:
- print " - %(name)s (%(type)s)" % a
+ print(" - %(name)s (%(type)s)" % a)
diff --git a/src/examples/chemicalFormulas.py b/src/examples/chemicalFormulas.py
index 63eff13..e7d7757 100644
--- a/src/examples/chemicalFormulas.py
+++ b/src/examples/chemicalFormulas.py
@@ -16,14 +16,14 @@ atomicWeight = {
def test( bnf, strg, fn=None ):
try:
- print strg,"->", bnf.parseString( strg ),
- except ParseException, pe:
- print pe
+ print(strg,"->", bnf.parseString( strg ), end=' ')
+ except ParseException as pe:
+ print(pe)
else:
if fn != None:
- print fn( bnf.parseString( strg ) )
+ print(fn( bnf.parseString( strg ) ))
else:
- print
+ print()
digits = "0123456789"
@@ -41,7 +41,7 @@ fn = lambda elemList : sum( [ atomicWeight[elem]*int(qty) for elem,qty in elemLi
test( formula, "H2O", fn )
test( formula, "C6H5OH", fn )
test( formula, "NaCl", fn )
-print
+print()
# Version 2 - access parsed items by field name
elementRef = Group( element("symbol") + Optional( Word( digits ), default="1" )("qty") )
@@ -51,7 +51,7 @@ fn = lambda elemList : sum( [ atomicWeight[elem.symbol]*int(elem.qty) for elem i
test( formula, "H2O", fn )
test( formula, "C6H5OH", fn )
test( formula, "NaCl", fn )
-print
+print()
# Version 3 - convert integers during parsing process
integer = Word( digits ).setParseAction(lambda t:int(t[0]))
diff --git a/src/examples/commasep.py b/src/examples/commasep.py
index d753eca..7696871 100644
--- a/src/examples/commasep.py
+++ b/src/examples/commasep.py
@@ -18,6 +18,6 @@ testData = [
]
for line in testData:
- print commaSeparatedList.parseString(line)
- print line.split(",")
- print
+ print(commaSeparatedList.parseString(line))
+ print(line.split(","))
+ print()
diff --git a/src/examples/configParse.py b/src/examples/configParse.py
index a01bef8..edea293 100644
--- a/src/examples/configParse.py
+++ b/src/examples/configParse.py
@@ -8,7 +8,7 @@
from pyparsing import \
Literal, Word, ZeroOrMore, Group, Dict, Optional, \
- printables, ParseException, restOfLine
+ printables, ParseException, restOfLine, empty
import pprint
@@ -30,7 +30,11 @@ def inifile_BNF():
nonequals = "".join( [ c for c in printables if c != "=" ] ) + " \t"
sectionDef = lbrack + Word( nonrbrack ) + rbrack
- keyDef = ~lbrack + Word( nonequals ) + equals + restOfLine
+ keyDef = ~lbrack + Word( nonequals ) + equals + empty + restOfLine
+ # strip any leading or trailing blanks from key
+ def stripKey(tokens):
+ tokens[0] = tokens[0].strip()
+ keyDef.setParseAction(stripKey)
# using Dict will allow retrieval of named data fields as attributes of the parsed results
inibnf = Dict( ZeroOrMore( Group( sectionDef + Dict( ZeroOrMore( Group( keyDef ) ) ) ) ) )
@@ -43,26 +47,26 @@ def inifile_BNF():
pp = pprint.PrettyPrinter(2)
def test( strng ):
- print strng
+ print(strng)
try:
- iniFile = file(strng)
+ iniFile = open(strng)
iniData = "".join( iniFile.readlines() )
bnf = inifile_BNF()
tokens = bnf.parseString( iniData )
pp.pprint( tokens.asList() )
- except ParseException, err:
- print err.line
- print " "*(err.column-1) + "^"
- print err
+ except ParseException as err:
+ print(err.line)
+ print(" "*(err.column-1) + "^")
+ print(err)
iniFile.close()
- print
+ print()
return tokens
ini = test("setup.ini")
-print "ini['Startup']['modemid'] =", ini['Startup']['modemid']
-print "ini.Startup =", ini.Startup
-print "ini.Startup.modemid =", ini.Startup.modemid
+print("ini['Startup']['modemid'] =", ini['Startup']['modemid'])
+print("ini.Startup =", ini.Startup)
+print("ini.Startup.modemid =", ini.Startup.modemid)
diff --git a/src/examples/cpp_enum_parser.py b/src/examples/cpp_enum_parser.py
index 70d60c0..cbd0932 100644
--- a/src/examples/cpp_enum_parser.py
+++ b/src/examples/cpp_enum_parser.py
@@ -48,5 +48,5 @@ for item,start,stop in enum.scanString(sample):
for entry in item.names:
if entry.value != '':
id = int(entry.value)
- print '%s_%s = %d' % (item.enum.upper(),entry.name.upper(),id)
+ print('%s_%s = %d' % (item.enum.upper(),entry.name.upper(),id))
id += 1
diff --git a/src/examples/deltaTime.py b/src/examples/deltaTime.py
index 8bfcf64..02539e6 100644
--- a/src/examples/deltaTime.py
+++ b/src/examples/deltaTime.py
@@ -59,7 +59,7 @@ def convertToAbsTime(toks):
else:
day = datetime(now.year, now.month, now.day)
if "timeOfDay" in toks:
- if isinstance(toks.timeOfDay,basestring):
+ if isinstance(toks.timeOfDay,str):
timeOfDay = {
"now" : timedelta(0, (now.hour*60+now.minute)*60+now.second, now.microsecond),
"noon" : timedelta(0,0,0,0,0,12),
@@ -196,10 +196,10 @@ noon last Sunday
next Sunday at 2pm""".splitlines()
for t in tests:
- print t, "(relative to %s)" % datetime.now()
+ print(t, "(relative to %s)" % datetime.now())
res = nlTimeExpression.parseString(t)
if "calculatedTime" in res:
- print res.calculatedTime
+ print(res.calculatedTime)
else:
- print "???"
- print
+ print("???")
+ print()
diff --git a/src/examples/dfmparse.py b/src/examples/dfmparse.py
index 1523497..96afbad 100644
--- a/src/examples/dfmparse.py
+++ b/src/examples/dfmparse.py
@@ -48,7 +48,7 @@ object_name = identifier
object_type = identifier
# Integer and floating point values are converted to Python longs and floats, respectively.
-int_value = Combine(Optional("-") + Word(nums)).setParseAction(lambda s,l,t: [ long(t[0]) ] )
+int_value = Combine(Optional("-") + Word(nums)).setParseAction(lambda s,l,t: [ int(t[0]) ] )
float_value = Combine(Optional("-") + Optional(Word(nums)) + "." + Word(nums)).setParseAction(lambda s,l,t: [ float(t[0]) ] )
number_value = float_value | int_value
@@ -121,7 +121,7 @@ nested_object << Group(object_definition)
#################
def printer(s, loc, tok):
- print tok,
+ print(tok, end=' ')
return tok
def get_filename_list(tf):
@@ -162,12 +162,12 @@ def main(testfiles=None, action=printer):
failures.append(f)
if failures:
- print '\nfailed while processing %s' % ', '.join(failures)
- print '\nsucceeded on %d of %d files' %(success, len(testfiles))
+ print('\nfailed while processing %s' % ', '.join(failures))
+ print('\nsucceeded on %d of %d files' %(success, len(testfiles)))
if len(retval) == 1 and len(testfiles) == 1:
# if only one file is parsed, return the parseResults directly
- return retval[retval.keys()[0]]
+ return retval[list(retval.keys())[0]]
# else, return a dictionary of parseResults
return retval
diff --git a/src/examples/dhcpd_leases_parser.py b/src/examples/dhcpd_leases_parser.py
index 6a5a87a..145e6ea 100644
--- a/src/examples/dhcpd_leases_parser.py
+++ b/src/examples/dhcpd_leases_parser.py
@@ -82,6 +82,6 @@ leaseDef = "lease" + ipAddress("ipaddress") + LBRACE + \
Dict(ZeroOrMore(Group(leaseStatement))) + RBRACE
for lease in leaseDef.searchString(sample):
- print lease.dump()
- print lease.ipaddress,'->',lease.hardware.mac
- print
+ print(lease.dump())
+ print(lease.ipaddress,'->',lease.hardware.mac)
+ print()
diff --git a/src/examples/dictExample.py b/src/examples/dictExample.py
index 7fbadf1..5085aed 100644
--- a/src/examples/dictExample.py
+++ b/src/examples/dictExample.py
@@ -34,8 +34,8 @@ datatable = heading + Dict( ZeroOrMore(rowData) ) + trailing
# now parse data and print results
data = datatable.parseString(testData)
-print data
+print(data)
pprint.pprint(data.asList())
-print "data keys=", data.keys()
-print "data['min']=", data['min']
-print "data.max", data.max
+print("data keys=", list(data.keys()))
+print("data['min']=", data['min'])
+print("data.max", data.max)
diff --git a/src/examples/dictExample2.py b/src/examples/dictExample2.py
index 9e990ad..ccb7d3c 100644
--- a/src/examples/dictExample2.py
+++ b/src/examples/dictExample2.py
@@ -36,25 +36,25 @@ datatable = heading + Dict( ZeroOrMore(rowData) ) + trailing
# now parse data and print results
data = datatable.parseString(testData)
-print data
-print data.asXML("DATA")
+print(data)
+print(data.asXML("DATA"))
pprint.pprint(data.asList())
-print "data keys=", data.keys()
-print "data['min']=", data['min']
-print "sum(data['min']) =", sum(data['min'])
-print "data.max =", data.max
-print "sum(data.max) =", sum(data.max)
+print("data keys=", list(data.keys()))
+print("data['min']=", data['min'])
+print("sum(data['min']) =", sum(data['min']))
+print("data.max =", data.max)
+print("sum(data.max) =", sum(data.max))
# now print transpose of data table, using column labels read from table header and
# values from data lists
-print
-print " " * 5,
+print()
+print(" " * 5, end=' ')
for i in range(1,len(data)):
- print "|%5s" % data[i][0],
-print
-print ("-" * 6) + ("+------" * (len(data)-1))
+ print("|%5s" % data[i][0], end=' ')
+print()
+print(("-" * 6) + ("+------" * (len(data)-1)))
for i in range(len(data.columns)):
- print "%5s" % data.columns[i],
+ print("%5s" % data.columns[i], end=' ')
for j in range(len(data) - 1):
- print '|%5s' % data[j + 1][i + 1],
- print
+ print('|%5s' % data[j + 1][i + 1], end=' ')
+ print()
diff --git a/src/examples/ebnftest.py b/src/examples/ebnftest.py
index f0ce654..32c7fed 100644
--- a/src/examples/ebnftest.py
+++ b/src/examples/ebnftest.py
@@ -1,7 +1,7 @@
-print 'Importing pyparsing...'
+print('Importing pyparsing...')
from pyparsing import *
-print 'Constructing EBNF parser with pyparsing...'
+print('Constructing EBNF parser with pyparsing...')
import ebnf
import sets
@@ -34,7 +34,7 @@ table['terminal_string'] = sglQuotedString
table['meta_identifier'] = Word(alphas+"_", alphas+"_"+nums)
table['integer'] = Word(nums)
-print 'Parsing EBNF grammar with EBNF parser...'
+print('Parsing EBNF grammar with EBNF parser...')
parsers = ebnf.parse(grammar, table)
ebnf_parser = parsers['syntax']
@@ -44,7 +44,7 @@ def tallyCommentChars(s,l,t):
global commentcharcount,commentlocs
# only count this comment if we haven't seen it before
if l not in commentlocs:
- charCount = ( len(t[0]) - len(filter(str.isspace, t[0])) )
+ charCount = ( len(t[0]) - len(list(filter(str.isspace, t[0]))) )
commentcharcount += charCount
commentlocs.add(l)
return l,t
@@ -53,14 +53,14 @@ def tallyCommentChars(s,l,t):
ebnf.ebnfComment.setParseAction( tallyCommentChars )
ebnf_parser.ignore( ebnf.ebnfComment )
-print 'Parsing EBNF grammar with generated EBNF parser...\n'
+print('Parsing EBNF grammar with generated EBNF parser...\n')
parsed_chars = ebnf_parser.parseString(grammar)
parsed_char_len = len(parsed_chars)
-print "],\n".join(str( parsed_chars.asList() ).split("],"))
+print("],\n".join(str( parsed_chars.asList() ).split("],")))
#~ grammar_length = len(grammar) - len(filter(str.isspace, grammar))-commentcharcount
#~ assert parsed_char_len == grammar_length
-print 'Ok!'
+print('Ok!')
diff --git a/src/examples/eval_arith.py b/src/examples/eval_arith.py
index 4d7683c..85566c7 100644
--- a/src/examples/eval_arith.py
+++ b/src/examples/eval_arith.py
@@ -35,7 +35,7 @@ def operatorOperands(tokenlist):
it = iter(tokenlist)
while 1:
try:
- yield (it.next(), it.next())
+ yield (next(it), next(it))
except StopIteration:
break
@@ -210,18 +210,18 @@ def main():
for test,expected in tests:
ret = comp_expr.parseString(test)[0]
parsedvalue = ret.eval()
- print test, expected, parsedvalue,
+ print(test, expected, parsedvalue, end=' ')
if parsedvalue != expected:
- print "<<< FAIL"
+ print("<<< FAIL")
failed += 1
else:
- print
+ print()
- print
+ print()
if failed:
- print failed, "tests FAILED"
+ print(failed, "tests FAILED")
else:
- print "all tests PASSED"
+ print("all tests PASSED")
if __name__=='__main__':
main()
diff --git a/src/examples/excelExpr.py b/src/examples/excelExpr.py
index c759345..0d0c06a 100644
--- a/src/examples/excelExpr.py
+++ b/src/examples/excelExpr.py
@@ -65,8 +65,8 @@ test3 = "=if(Sum(A1:A25)>42,Min(B1:B25), " \
test3a = "=sum(a1:a25,10,min(b1,c2,d3))"
import pprint
-tests = [locals()[t] for t in locals().keys() if t.startswith("test")]
+tests = [locals()[t] for t in list(locals().keys()) if t.startswith("test")]
for test in tests:
- print test
+ print(test)
pprint.pprint( (EQ + expr).parseString(test,parseAll=True).asList() )
- print
+ print()
diff --git a/src/examples/fourFn.py b/src/examples/fourFn.py
index e49227c..fedd2aa 100644
--- a/src/examples/fourFn.py
+++ b/src/examples/fourFn.py
@@ -108,7 +108,7 @@ def evaluateStack( s ):
return float( op )
if __name__ == "__main__":
-
+
def test( s, expVal ):
global exprStack
exprStack = []
@@ -116,74 +116,74 @@ if __name__ == "__main__":
results = BNF().parseString( s, parseAll=True )
val = evaluateStack( exprStack[:] )
except ParseException as e:
- print s, "failed parse:", str(pe)
+ print(s, "failed parse:", str(pe))
except Exception as e:
- print s, "failed eval:", str(e)
+ print(s, "failed eval:", str(e))
else:
if val == expVal:
- print s, "=", val, results, "=>", exprStack
+ print(s, "=", val, results, "=>", exprStack)
else:
- print s+"!!!", val, "!=", expVal, results, "=>", exprStack
+ print(s+"!!!", val, "!=", expVal, results, "=>", exprStack)
- test( "9", 9 )
+ test( "9", 9 )
test( "-9", -9 )
- test( "--9", 9 )
- test( "-E", -math.e )
- test( "9 + 3 + 6", 9 + 3 + 6 )
- test( "9 + 3 / 11", 9 + 3.0 / 11 )
- test( "(9 + 3)", (9 + 3) )
- test( "(9+3) / 11", (9+3.0) / 11 )
- test( "9 - 12 - 6", 9 - 12 - 6 )
- test( "9 - (12 - 6)", 9 - (12 - 6) )
- test( "2*3.14159", 2*3.14159 )
- test( "3.1415926535*3.1415926535 / 10", 3.1415926535*3.1415926535 / 10 )
- test( "PI * PI / 10", math.pi * math.pi / 10 )
- test( "PI*PI/10", math.pi*math.pi/10 )
- test( "PI^2", math.pi**2 )
- test( "round(PI^2)", round(math.pi**2) )
- test( "6.02E23 * 8.048", 6.02E23 * 8.048 )
- test( "e / 3", math.e / 3 )
- test( "sin(PI/2)", math.sin(math.pi/2) )
- test( "trunc(E)", int(math.e) )
- test( "trunc(-E)", int(-math.e) )
- test( "round(E)", round(math.e) )
- test( "round(-E)", round(-math.e) )
- test( "E^PI", math.e**math.pi )
- test( "2^3^2", 2**3**2 )
+ test( "--9", 9 )
+ test( "-E", -math.e )
+ test( "9 + 3 + 6", 9 + 3 + 6 )
+ test( "9 + 3 / 11", 9 + 3.0 / 11 )
+ test( "(9 + 3)", (9 + 3) )
+ test( "(9+3) / 11", (9+3.0) / 11 )
+ test( "9 - 12 - 6", 9 - 12 - 6 )
+ test( "9 - (12 - 6)", 9 - (12 - 6) )
+ test( "2*3.14159", 2*3.14159 )
+ test( "3.1415926535*3.1415926535 / 10", 3.1415926535*3.1415926535 / 10 )
+ test( "PI * PI / 10", math.pi * math.pi / 10 )
+ test( "PI*PI/10", math.pi*math.pi/10 )
+ test( "PI^2", math.pi**2 )
+ test( "round(PI^2)", round(math.pi**2) )
+ test( "6.02E23 * 8.048", 6.02E23 * 8.048 )
+ test( "e / 3", math.e / 3 )
+ test( "sin(PI/2)", math.sin(math.pi/2) )
+ test( "trunc(E)", int(math.e) )
+ test( "trunc(-E)", int(-math.e) )
+ test( "round(E)", round(math.e) )
+ test( "round(-E)", round(-math.e) )
+ test( "E^PI", math.e**math.pi )
+ test( "2^3^2", 2**3**2 )
test( "2^3+2", 2**3+2 )
test( "2^3+5", 2**3+5 )
- test( "2^9", 2**9 )
- test( "sgn(-2)", -1 )
- test( "sgn(0)", 0 )
+ test( "2^9", 2**9 )
+ test( "sgn(-2)", -1 )
+ test( "sgn(0)", 0 )
test( "foo(0.1)", 1 )
test( "sgn(0.1)", 1 )
"""
Test output:
->pythonw -u fourFn.py
-9 = 9.0 ['9'] => ['9']
-9 + 3 + 6 = 18.0 ['9', '+', '3', '+', '6'] => ['9', '3', '+', '6', '+']
-9 + 3 / 11 = 9.27272727273 ['9', '+', '3', '/', '11'] => ['9', '3', '11', '/', '+']
-(9 + 3) = 12.0 [] => ['9', '3', '+']
-(9+3) / 11 = 1.09090909091 ['/', '11'] => ['9', '3', '+', '11', '/']
-9 - 12 - 6 = -9.0 ['9', '-', '12', '-', '6'] => ['9', '12', '-', '6', '-']
-9 - (12 - 6) = 3.0 ['9', '-'] => ['9', '12', '6', '-', '-']
-2*3.14159 = 6.28318 ['2', '*', '3.14159'] => ['2', '3.14159', '*']
-3.1415926535*3.1415926535 / 10 = 0.986960440053 ['3.1415926535', '*', '3.1415926535', '/', '10'] => ['3.1415926535', '3.1415926535', '*', '10', '/']
-PI * PI / 10 = 0.986960440109 ['PI', '*', 'PI', '/', '10'] => ['PI', 'PI', '*', '10', '/']
-PI*PI/10 = 0.986960440109 ['PI', '*', 'PI', '/', '10'] => ['PI', 'PI', '*', '10', '/']
-PI^2 = 9.86960440109 ['PI', '^', '2'] => ['PI', '2', '^']
-6.02E23 * 8.048 = 4.844896e+024 ['6.02E23', '*', '8.048'] => ['6.02E23', '8.048', '*']
-e / 3 = 0.90609394282 ['E', '/', '3'] => ['E', '3', '/']
-sin(PI/2) = 1.0 ['sin', 'PI', '/', '2'] => ['PI', '2', '/', 'sin']
-trunc(E) = 2 ['trunc', 'E'] => ['E', 'trunc']
-E^PI = 23.1406926328 ['E', '^', 'PI'] => ['E', 'PI', '^']
-2^3^2 = 512.0 ['2', '^', '3', '^', '2'] => ['2', '3', '2', '^', '^']
-2^3+2 = 10.0 ['2', '^', '3', '+', '2'] => ['2', '3', '^', '2', '+']
-2^9 = 512.0 ['2', '^', '9'] => ['2', '9', '^']
-sgn(-2) = -1 ['sgn', '-2'] => ['-2', 'sgn']
-sgn(0) = 0 ['sgn', '0'] => ['0', 'sgn']
+>pythonw -u fourFn.py
+9 = 9.0 ['9'] => ['9']
+9 + 3 + 6 = 18.0 ['9', '+', '3', '+', '6'] => ['9', '3', '+', '6', '+']
+9 + 3 / 11 = 9.27272727273 ['9', '+', '3', '/', '11'] => ['9', '3', '11', '/', '+']
+(9 + 3) = 12.0 [] => ['9', '3', '+']
+(9+3) / 11 = 1.09090909091 ['/', '11'] => ['9', '3', '+', '11', '/']
+9 - 12 - 6 = -9.0 ['9', '-', '12', '-', '6'] => ['9', '12', '-', '6', '-']
+9 - (12 - 6) = 3.0 ['9', '-'] => ['9', '12', '6', '-', '-']
+2*3.14159 = 6.28318 ['2', '*', '3.14159'] => ['2', '3.14159', '*']
+3.1415926535*3.1415926535 / 10 = 0.986960440053 ['3.1415926535', '*', '3.1415926535', '/', '10'] => ['3.1415926535', '3.1415926535', '*', '10', '/']
+PI * PI / 10 = 0.986960440109 ['PI', '*', 'PI', '/', '10'] => ['PI', 'PI', '*', '10', '/']
+PI*PI/10 = 0.986960440109 ['PI', '*', 'PI', '/', '10'] => ['PI', 'PI', '*', '10', '/']
+PI^2 = 9.86960440109 ['PI', '^', '2'] => ['PI', '2', '^']
+6.02E23 * 8.048 = 4.844896e+024 ['6.02E23', '*', '8.048'] => ['6.02E23', '8.048', '*']
+e / 3 = 0.90609394282 ['E', '/', '3'] => ['E', '3', '/']
+sin(PI/2) = 1.0 ['sin', 'PI', '/', '2'] => ['PI', '2', '/', 'sin']
+trunc(E) = 2 ['trunc', 'E'] => ['E', 'trunc']
+E^PI = 23.1406926328 ['E', '^', 'PI'] => ['E', 'PI', '^']
+2^3^2 = 512.0 ['2', '^', '3', '^', '2'] => ['2', '3', '2', '^', '^']
+2^3+2 = 10.0 ['2', '^', '3', '+', '2'] => ['2', '3', '^', '2', '+']
+2^9 = 512.0 ['2', '^', '9'] => ['2', '9', '^']
+sgn(-2) = -1 ['sgn', '-2'] => ['-2', 'sgn']
+sgn(0) = 0 ['sgn', '0'] => ['0', 'sgn']
sgn(0.1) = 1 ['sgn', '0.1'] => ['0.1', 'sgn']
>Exit code: 0
"""
diff --git a/src/examples/fourFn.pyc b/src/examples/fourFn.pyc
deleted file mode 100644
index c20e9e0..0000000
--- a/src/examples/fourFn.pyc
+++ /dev/null
Binary files differ
diff --git a/src/examples/gen_ctypes.py b/src/examples/gen_ctypes.py
index 0fb62e4..2c909e4 100644
--- a/src/examples/gen_ctypes.py
+++ b/src/examples/gen_ctypes.py
@@ -132,36 +132,36 @@ for en_,_,_ in enum_def.scanString(c_header):
for ev in en_.evalues:
enum_constants.append( (ev.name, ev.value) )
-print "from ctypes import *"
-print "%s = CDLL('%s.dll')" % (module, module)
-print
-print "# user defined types"
+print("from ctypes import *")
+print("%s = CDLL('%s.dll')" % (module, module))
+print()
+print("# user defined types")
for tdname,tdtyp in typedefs:
- print "%s = %s" % (tdname, typemap[tdtyp])
+ print("%s = %s" % (tdname, typemap[tdtyp]))
for fntd in fn_typedefs:
- print "%s = CFUNCTYPE(%s)" % (fntd.fn_name,
- ',\n '.join(typeAsCtypes(a.argtype) for a in fntd.fn_args))
+ print("%s = CFUNCTYPE(%s)" % (fntd.fn_name,
+ ',\n '.join(typeAsCtypes(a.argtype) for a in fntd.fn_args)))
for udtype in user_defined_types:
- print "class %s(Structure): pass" % typemap[udtype]
+ print("class %s(Structure): pass" % typemap[udtype])
-print
-print "# constant definitions"
+print()
+print("# constant definitions")
for en,ev in enum_constants:
- print "%s = %s" % (en,ev)
+ print("%s = %s" % (en,ev))
-print
-print "# functions"
+print()
+print("# functions")
for fn in functions:
prefix = "%s.%s" % (module, fn.fn_name)
- print "%s.restype = %s" % (prefix, typeAsCtypes(fn.fn_type))
+ print("%s.restype = %s" % (prefix, typeAsCtypes(fn.fn_type)))
if fn.varargs:
- print "# warning - %s takes variable argument list" % prefix
+ print("# warning - %s takes variable argument list" % prefix)
del fn.fn_args[-1]
if fn.fn_args.asList() != [['void']]:
- print "%s.argtypes = (%s,)" % (prefix, ','.join(typeAsCtypes(a.argtype) for a in fn.fn_args))
+ print("%s.argtypes = (%s,)" % (prefix, ','.join(typeAsCtypes(a.argtype) for a in fn.fn_args)))
else:
- print "%s.argtypes = ()" % (prefix)
+ print("%s.argtypes = ()" % (prefix))
diff --git a/src/examples/getNTPservers.py b/src/examples/getNTPservers.py
index 586bc5f..bbf1d60 100644
--- a/src/examples/getNTPservers.py
+++ b/src/examples/getNTPservers.py
@@ -6,7 +6,7 @@
# Copyright 2004, by Paul McGuire
#
from pyparsing import Word, Combine, Suppress, CharsNotIn, nums
-import urllib
+import urllib.request, urllib.parse, urllib.error
integer = Word(nums)
ipAddress = Combine( integer + "." + integer + "." + integer + "." + integer )
@@ -17,13 +17,13 @@ timeServerPattern = tdStart + ipAddress.setResultsName("ipAddr") + tdEnd + \
# get list of time servers
nistTimeServerURL = "http://www.boulder.nist.gov/timefreq/service/time-servers.html"
-serverListPage = urllib.urlopen( nistTimeServerURL )
+serverListPage = urllib.request.urlopen( nistTimeServerURL )
serverListHTML = serverListPage.read()
serverListPage.close()
addrs = {}
for srvr,startloc,endloc in timeServerPattern.scanString( serverListHTML ):
- print srvr.ipAddr, "-", srvr.loc
+ print(srvr.ipAddr, "-", srvr.loc)
addrs[srvr.ipAddr] = srvr.loc
# or do this:
#~ addr,loc = srvr
diff --git a/src/examples/getNTPserversNew.py b/src/examples/getNTPserversNew.py
index 47d5439..65b8310 100644
--- a/src/examples/getNTPserversNew.py
+++ b/src/examples/getNTPserversNew.py
@@ -8,7 +8,7 @@
#
from pyparsing import (Word, Combine, Suppress, SkipTo, nums, makeHTMLTags,
delimitedList, alphas, alphanums)
-import urllib
+import urllib.request, urllib.parse, urllib.error
integer = Word(nums)
ipAddress = Combine( integer + "." + integer + "." + integer + "." + integer )
@@ -20,11 +20,11 @@ timeServerPattern = (tdStart + hostname("hostname") + tdEnd +
# get list of time servers
nistTimeServerURL = "http://tf.nist.gov/tf-cgi/servers.cgi#"
-serverListPage = urllib.urlopen( nistTimeServerURL )
+serverListPage = urllib.request.urlopen( nistTimeServerURL )
serverListHTML = serverListPage.read()
serverListPage.close()
addrs = {}
for srvr,startloc,endloc in timeServerPattern.scanString( serverListHTML ):
- print "%s (%s) - %s" % (srvr.ipAddr, srvr.hostname.strip(), srvr.loc.strip())
+ print("%s (%s) - %s" % (srvr.ipAddr, srvr.hostname.strip(), srvr.loc.strip()))
addrs[srvr.ipAddr] = srvr.loc
diff --git a/src/examples/greeting.py b/src/examples/greeting.py
index 46108b8..2e6b241 100644
--- a/src/examples/greeting.py
+++ b/src/examples/greeting.py
@@ -14,4 +14,4 @@ greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
# parse input string
-print hello, "->", greet.parseString( hello )
+print(hello, "->", greet.parseString( hello ))
diff --git a/src/examples/greetingInGreek.py b/src/examples/greetingInGreek.py
index 7869b68..07a8f0a 100644
--- a/src/examples/greetingInGreek.py
+++ b/src/examples/greetingInGreek.py
@@ -7,12 +7,12 @@
from pyparsing import Word
# define grammar
-alphas = u''.join(unichr(x) for x in xrange(0x386, 0x3ce))
-greet = Word(alphas) + u',' + Word(alphas) + u'!'
+alphas = ''.join(chr(x) for x in range(0x386, 0x3ce))
+greet = Word(alphas) + ',' + Word(alphas) + '!'
# input string
hello = "ΚαλημέÏα, κόσμε!".decode('utf-8')
# parse input string
-print greet.parseString( hello )
+print(greet.parseString( hello ))
diff --git a/src/examples/greetingInKorean.py b/src/examples/greetingInKorean.py
index 800a7fc..d0ea0e5 100644
--- a/src/examples/greetingInKorean.py
+++ b/src/examples/greetingInKorean.py
@@ -13,8 +13,8 @@ koreanWord = Word(koreanChars,min=2)
greet = koreanWord + "," + koreanWord + "!"
# input string
-hello = u'\uc548\ub155, \uc5ec\ub7ec\ubd84!' #"Hello, World!" in Korean
+hello = '\uc548\ub155, \uc5ec\ub7ec\ubd84!' #"Hello, World!" in Korean
# parse input string
-print greet.parseString( hello )
+print(greet.parseString( hello ))
diff --git a/src/examples/groupUsingListAllMatches.py b/src/examples/groupUsingListAllMatches.py
index ca9e5ab..d5037b8 100644
--- a/src/examples/groupUsingListAllMatches.py
+++ b/src/examples/groupUsingListAllMatches.py
@@ -13,4 +13,4 @@ cExpr = Word("C", nums)
grammar = ZeroOrMore(aExpr("A*") | bExpr("B*") | cExpr("C*"))
results = grammar.parseString("A1 B1 A2 C1 B2 A3")
-print results.dump()
+print(results.dump())
diff --git a/src/examples/holaMundo.py b/src/examples/holaMundo.py
index 0e10069..357e3c8 100644
--- a/src/examples/holaMundo.py
+++ b/src/examples/holaMundo.py
@@ -1,3 +1,5 @@
+# -*- coding: UTF-8 -*-
+
# escrito por Marco Alfonso, 2004 Noviembre
# importamos el modulo
@@ -14,7 +16,7 @@ tokens = saludo.parseString("Hola, Mundo !")
# el metodo parseString, nos devuelve una lista con los tokens
# encontrados, en caso de no haber errores...
for i in range(len(tokens)):
- print "Token %d -> %s" % (i,tokens[i])
+ print ("Token %d -> %s" % (i,tokens[i]))
#imprimimos cada uno de los tokens Y listooo!!, he aquí la salida
# Token 0—> Hola Token 1—> , Token 2—> Mundo Token 3—> !
@@ -23,6 +25,6 @@ for i in range(len(tokens)):
numimag = Word(nums) + 'i'
numreal = Word(nums)
numcomplex = numreal + '+' + numimag
-print numcomplex.parseString("3+5i")
-
+print (numcomplex.parseString("3+5i"))
+
# Excelente!!, bueno, los dejo, me voy a seguir tirando código…
diff --git a/src/examples/htmlStripper.py b/src/examples/htmlStripper.py
index 502acc5..0b0f459 100644
--- a/src/examples/htmlStripper.py
+++ b/src/examples/htmlStripper.py
@@ -7,7 +7,7 @@
# Copyright (c) 2006, Paul McGuire
#
from pyparsing import *
-import urllib
+import urllib.request, urllib.parse, urllib.error
removeText = replaceWith("")
scriptOpen,scriptClose = makeHTMLTags("script")
@@ -23,7 +23,7 @@ commonHTMLEntity.setParseAction(replaceHTMLEntity)
# get some HTML
targetURL = "http://wiki.python.org/moin/PythonDecoratorLibrary"
-targetPage = urllib.urlopen( targetURL )
+targetPage = urllib.request.urlopen( targetURL )
targetHTML = targetPage.read()
targetPage.close()
@@ -36,4 +36,4 @@ repeatedNewlines = LineEnd() + OneOrMore(LineEnd())
repeatedNewlines.setParseAction(replaceWith("\n\n"))
secondPass = repeatedNewlines.transformString(firstPass)
-print secondPass \ No newline at end of file
+print(secondPass) \ No newline at end of file
diff --git a/src/examples/httpServerLogParser.py b/src/examples/httpServerLogParser.py
index 9f5584d..1a808ae 100644
--- a/src/examples/httpServerLogParser.py
+++ b/src/examples/httpServerLogParser.py
@@ -63,9 +63,9 @@ testdata = """
for line in testdata.split("\n"):
if not line: continue
fields = getLogLineBNF().parseString(line)
- print fields.dump()
+ print(fields.dump())
#~ print repr(fields)
#~ for k in fields.keys():
#~ print "fields." + k + " =", fields[k]
- print fields.asXML("LOG")
- print
+ print(fields.asXML("LOG"))
+ print()
diff --git a/src/examples/idlParse.py b/src/examples/idlParse.py
index c1d5e4f..419e56f 100644
--- a/src/examples/idlParse.py
+++ b/src/examples/idlParse.py
@@ -119,20 +119,20 @@ def CORBA_IDL_BNF():
testnum = 1
def test( strng ):
global testnum
- print strng
+ print(strng)
try:
bnf = CORBA_IDL_BNF()
tokens = bnf.parseString( strng )
- print "tokens = "
+ print("tokens = ")
pprint.pprint( tokens.asList() )
imgname = "idlParse%02d.bmp" % testnum
testnum += 1
#~ tree2image.str2image( str(tokens.asList()), imgname )
- except ParseException, err:
- print err.line
- print " "*(err.column-1) + "^"
- print err
- print
+ except ParseException as err:
+ print(err.line)
+ print(" "*(err.column-1) + "^")
+ print(err)
+ print()
if __name__ == "__main__":
test(
diff --git a/src/examples/indentedGrammarExample.py b/src/examples/indentedGrammarExample.py
index e7b8cb7..f96524a 100644
--- a/src/examples/indentedGrammarExample.py
+++ b/src/examples/indentedGrammarExample.py
@@ -71,7 +71,7 @@ rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
-print data
+print(data)
parseTree = suite.parseString(data)
import pprint
diff --git a/src/examples/invRegex.py b/src/examples/invRegex.py
index 6493974..965fe8f 100644
--- a/src/examples/invRegex.py
+++ b/src/examples/invRegex.py
@@ -234,17 +234,17 @@ def main():
for t in tests:
t = t.strip()
if not t: continue
- print '-'*50
- print t
+ print('-'*50)
+ print(t)
try:
- print count(invert(t))
+ print(count(invert(t)))
for s in invert(t):
- print s
- except ParseFatalException,pfe:
- print pfe.msg
- print
+ print(s)
+ except ParseFatalException as pfe:
+ print(pfe.msg)
+ print()
continue
- print
+ print()
if __name__ == "__main__":
- main() \ No newline at end of file
+ main()
diff --git a/src/examples/jsonParser.py b/src/examples/jsonParser.py
index e1d71ac..5149b22 100644
--- a/src/examples/jsonParser.py
+++ b/src/examples/jsonParser.py
@@ -58,7 +58,7 @@ def convertNumbers(s,l,toks):
n = toks[0]
try:
return int(n)
- except ValueError, ve:
+ except ValueError as ve:
return float(n)
jsonNumber.setParseAction( convertNumbers )
@@ -97,10 +97,10 @@ if __name__ == "__main__":
import pprint
results = jsonObject.parseString(testdata)
pprint.pprint( results.asList() )
- print
+ print()
def testPrint(x):
- print type(x),repr(x)
- print results.glossary.GlossDiv.GlossList.keys()
+ print(type(x),repr(x))
+ print(list(results.glossary.GlossDiv.GlossList.keys()))
testPrint( results.glossary.title )
testPrint( results.glossary.GlossDiv.GlossList.ID )
testPrint( results.glossary.GlossDiv.GlossList.FalseValue )
diff --git a/src/examples/linenoExample.py b/src/examples/linenoExample.py
index 751822c..1186f48 100644
--- a/src/examples/linenoExample.py
+++ b/src/examples/linenoExample.py
@@ -18,11 +18,11 @@ of their country."""
def reportLongWords(st,locn,toks):
word = toks[0]
if len(word) > 3:
- print "Found '%s' on line %d at column %d" % (word, lineno(locn,st), col(locn,st))
- print "The full line of text was:"
- print "'%s'" % line(locn,st)
- print (" "*col(locn,st))+" ^"
- print
+ print("Found '%s' on line %d at column %d" % (word, lineno(locn,st), col(locn,st)))
+ print("The full line of text was:")
+ print("'%s'" % line(locn,st))
+ print((" "*col(locn,st))+" ^")
+ print()
wd = Word(alphas).setParseAction( reportLongWords )
OneOrMore(wd).parseString(data)
@@ -46,4 +46,4 @@ def createTokenObject(st,locn,toks):
wd = Word(alphas).setParseAction( createTokenObject )
for tokenObj in OneOrMore(wd).parseString(data):
- print tokenObj
+ print(tokenObj)
diff --git a/src/examples/list1.py b/src/examples/list1.py
index db3f34f..e410070 100644
--- a/src/examples/list1.py
+++ b/src/examples/list1.py
@@ -13,7 +13,7 @@ listStr = lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14]"
-print listStr.parseString(test)
+print(listStr.parseString(test))
# second pass, cleanup and add converters
@@ -30,7 +30,7 @@ listStr = lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14]"
-print listStr.parseString(test)
+print(listStr.parseString(test))
# third pass, add nested list support
cvtInt = lambda s,l,toks: int(toks[0])
@@ -47,4 +47,4 @@ listItem = real | integer | quotedString.setParseAction(removeQuotes) | Group(li
listStr << lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14, [ +2.718, 'xyzzy', -1.414] ]"
-print listStr.parseString(test) \ No newline at end of file
+print(listStr.parseString(test)) \ No newline at end of file
diff --git a/src/examples/listAllMatches.py b/src/examples/listAllMatches.py
index fa28c8b..43f998b 100644
--- a/src/examples/listAllMatches.py
+++ b/src/examples/listAllMatches.py
@@ -10,27 +10,27 @@ from pyparsing import oneOf, OneOrMore, printables, StringEnd
test = "The quick brown fox named 'Aloysius' lives at 123 Main Street (and jumps over lazy dogs in his spare time)."
nonAlphas = [ c for c in printables if not c.isalpha() ]
-print "Extract vowels, consonants, and special characters from this test string:"
-print "'" + test + "'"
-print
+print("Extract vowels, consonants, and special characters from this test string:")
+print("'" + test + "'")
+print()
-print "Define grammar using normal results names"
-print "(only last matching symbol is saved)"
+print("Define grammar using normal results names")
+print("(only last matching symbol is saved)")
vowels = oneOf(list("aeiouy"), caseless=True).setResultsName("vowels")
cons = oneOf(list("bcdfghjklmnpqrstvwxz"), caseless=True).setResultsName("cons")
other = oneOf(list(nonAlphas)).setResultsName("others")
letters = OneOrMore(cons | vowels | other) + StringEnd()
results = letters.parseString(test)
-print results
-print results.vowels
-print results.cons
-print results.others
-print
+print(results)
+print(results.vowels)
+print(results.cons)
+print(results.others)
+print()
-print "Define grammar using results names, with listAllMatches=True"
-print "(all matching symbols are saved)"
+print("Define grammar using results names, with listAllMatches=True")
+print("(all matching symbols are saved)")
vowels = oneOf(list("aeiouy"), caseless=True).setResultsName("vowels",listAllMatches=True)
cons = oneOf(list("bcdfghjklmnpqrstvwxz"), caseless=True).setResultsName("cons",listAllMatches=True)
other = oneOf(list(nonAlphas)).setResultsName("others",listAllMatches=True)
@@ -38,15 +38,15 @@ other = oneOf(list(nonAlphas)).setResultsName("others",listAllMatches=True)
letters = OneOrMore(cons | vowels | other) + StringEnd()
results = letters.parseString(test)
-print results
-print sorted(list(set(results)))
-print
-print results.vowels
-print sorted(list(set(results.vowels)))
-print
-print results.cons
-print sorted(list(set(results.cons)))
-print
-print results.others
-print sorted(list(set(results.others)))
+print(results)
+print(sorted(list(set(results))))
+print()
+print(results.vowels)
+print(sorted(list(set(results.vowels))))
+print()
+print(results.cons)
+print(sorted(list(set(results.cons))))
+print()
+print(results.others)
+print(sorted(list(set(results.others))))
diff --git a/src/examples/lucene_grammar.py b/src/examples/lucene_grammar.py
index 27c7aeb..c098cce 100644
--- a/src/examples/lucene_grammar.py
+++ b/src/examples/lucene_grammar.py
@@ -316,15 +316,15 @@ failtests = r"""
""".splitlines()
allpass = True
-for t in filter(None,map(str.strip,tests)):
- print t
+for t in [_f for _f in map(str.strip,tests) if _f]:
+ print(t)
try:
#~ expression.parseString(t,parseAll=True)
- print expression.parseString(t,parseAll=True)
- except ParseException, pe:
- print t
- print pe
+ print(expression.parseString(t,parseAll=True))
+ except ParseException as pe:
+ print(t)
+ print(pe)
allpass = False
- print
+ print()
-print ("OK", "FAIL")[not allpass]
+print(("OK", "FAIL")[not allpass])
diff --git a/src/examples/macroExpander.py b/src/examples/macroExpander.py
index 2bd599f..89562a4 100644
--- a/src/examples/macroExpander.py
+++ b/src/examples/macroExpander.py
@@ -29,7 +29,7 @@ macros = {}
def processMacroDefn(s,l,t):
macroVal = macroExpander.transformString(t.value)
macros[t.macro] = macroVal
- macroExpr << MatchFirst( map(Keyword,macros.keys()) )
+ macroExpr << MatchFirst( list(map(Keyword,list(macros.keys()))) )
return "#def " + t.macro + " " + macroVal
# parse action to replace macro references with their respective definition
@@ -55,5 +55,5 @@ testString = """
typedef char[ALEN] Acharbuf;
"""
-print macroExpander.transformString(testString)
-print macros
+print(macroExpander.transformString(testString))
+print(macros)
diff --git a/src/examples/makeHTMLTagExample.py b/src/examples/makeHTMLTagExample.py
index e3baf40..3b771c7 100644
--- a/src/examples/makeHTMLTagExample.py
+++ b/src/examples/makeHTMLTagExample.py
@@ -1,9 +1,9 @@
-import urllib
+import urllib.request, urllib.parse, urllib.error
from pyparsing import makeHTMLTags, SkipTo
# read HTML from a web page
-serverListPage = urllib.urlopen( "http://www.yahoo.com" )
+serverListPage = urllib.request.urlopen( "http://www.yahoo.com" )
htmlText = serverListPage.read()
serverListPage.close()
@@ -18,4 +18,4 @@ anchor = anchorStart + SkipTo(anchorEnd)("body") + anchorEnd
# (note the href attribute of the opening A tag is available
# as an attribute in the returned parse results)
for tokens,start,end in anchor.scanString(htmlText):
- print tokens.body,'->',tokens.href
+ print(tokens.body,'->',tokens.href)
diff --git a/src/examples/matchPreviousDemo.py b/src/examples/matchPreviousDemo.py
index 91d7eb1..f0812e9 100644
--- a/src/examples/matchPreviousDemo.py
+++ b/src/examples/matchPreviousDemo.py
@@ -30,4 +30,4 @@ classDefn = classHead + classBody + classEnd
# classDefn = classHead + classBody - classEnd
for tokens in classDefn.searchString(src):
- print tokens.classname \ No newline at end of file
+ print(tokens.classname) \ No newline at end of file
diff --git a/src/examples/mozillaCalendarParser.py b/src/examples/mozillaCalendarParser.py
index 15929a6..2805c95 100644
--- a/src/examples/mozillaCalendarParser.py
+++ b/src/examples/mozillaCalendarParser.py
@@ -1,4 +1,3 @@
-# -*- coding: UTF-8 -*-
from pyparsing import Optional, oneOf, Dict, Literal, Word, printables, Group, OneOrMore, ZeroOrMore
"""
@@ -17,7 +16,7 @@ License: Free for any use
BEGIN = Literal("BEGIN:").suppress()
END = Literal("END:").suppress()
-str = printables + "äöåÖÄÅ"
+str = printables + "\xe4\xf6\xe5\xd6\xc4\xc5"
valstr = str + " "
EQ = Literal("=").suppress()
@@ -71,7 +70,7 @@ calendars = OneOrMore(calendar)
def gotEvent(s,loc,toks):
for event in toks:
- print event['summary'], "from", event["begin"], "to", event["end"]
+ print (event['summary'], "from", event["begin"], "to", event["end"])
event.setParseAction(gotEvent)
diff --git a/src/examples/nested.py b/src/examples/nested.py
index bd529f5..24cf2f4 100644
--- a/src/examples/nested.py
+++ b/src/examples/nested.py
@@ -22,9 +22,9 @@ data = """
# use {}'s for nested lists
nestedItems = nestedExpr("{", "}")
-print( (nestedItems+stringEnd).parseString(data).asList() )
+print(( (nestedItems+stringEnd).parseString(data).asList() ))
# use default delimiters of ()'s
mathExpr = nestedExpr()
-print( mathExpr.parseString( "((( ax + by)*C) *(Z | (E^F) & D))") )
+print(( mathExpr.parseString( "((( ax + by)*C) *(Z | (E^F) & D))") ))
diff --git a/src/examples/oc.py b/src/examples/oc.py
index 0cd6990..5a1ff8e 100644
--- a/src/examples/oc.py
+++ b/src/examples/oc.py
@@ -192,4 +192,4 @@ main()
ast = program.parseString(test,parseAll=True)
import pprint
-pprint.pprint(ast.asList()) \ No newline at end of file
+pprint.pprint(ast.asList())
diff --git a/src/examples/parseListString.py b/src/examples/parseListString.py
index eb784c7..a6f07a1 100644
--- a/src/examples/parseListString.py
+++ b/src/examples/parseListString.py
@@ -18,7 +18,7 @@ listStr = lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14]"
-print listStr.parseString(test)
+print(listStr.parseString(test))
# second pass, cleanup and add converters
@@ -35,7 +35,7 @@ listStr = lbrack + delimitedList(listItem) + rbrack
test = "['a', 100, 3.14]"
-print listStr.parseString(test)
+print(listStr.parseString(test))
# third pass, add nested list support, and tuples, too!
cvtInt = lambda s,l,toks: int(toks[0])
@@ -54,7 +54,7 @@ tupleStr.setParseAction( lambda t:tuple(t.asList()) )
listStr << lbrack + delimitedList(listItem) + Optional(Suppress(",")) + rbrack
test = "['a', 100, ('A', [101,102]), 3.14, [ +2.718, 'xyzzy', -1.414] ]"
-print listStr.parseString(test)
+print(listStr.parseString(test))
# fourth pass, just parsing tuples of numbers
#~ from pyparsing import *
@@ -97,4 +97,4 @@ listStr << lbrack + delimitedList(listItem) + Optional(Suppress(",")) + rbrack
dictStr << rbrace + delimitedList( Group( listItem + colon + listItem ) ) + rbrace
test = "['a', 100, ('A', [101,102]), 3.14, [ +2.718, 'xyzzy', -1.414] ]"
test = '[{0: [2], 1: []}, {0: [], 1: [], 2: []}, {0: [1, 2]}]'
-print listStr.parseString(test)
+print(listStr.parseString(test))
diff --git a/src/examples/parsePythonValue.py b/src/examples/parsePythonValue.py
index d9671fa..bc17aed 100644
--- a/src/examples/parsePythonValue.py
+++ b/src/examples/parsePythonValue.py
@@ -56,12 +56,12 @@ tests = """['a', 100, ('A', [101,102]), 3.14, [ +2.718, 'xyzzy', -1.414] ]
'a quoted string'""".split("\n")
for test in tests:
- print "Test:", test.strip()
+ print("Test:", test.strip())
result = listItem.parseString(test)[0]
- print "Result:", result
+ print("Result:", result)
try:
for dd in result:
- if isinstance(dd,dict): print dd.items()
- except TypeError,te:
+ if isinstance(dd,dict): print(list(dd.items()))
+ except TypeError as te:
pass
- print
+ print()
diff --git a/src/examples/parseResultsSumExample.py b/src/examples/parseResultsSumExample.py
index 6da5e84..1fb694a 100644
--- a/src/examples/parseResultsSumExample.py
+++ b/src/examples/parseResultsSumExample.py
@@ -18,9 +18,9 @@ person_data = dob_ref | id_ref | info_ref
for test in (samplestr1,samplestr2,samplestr3,samplestr4,):
person = sum(person_data.searchString(test))
- print person.id
- print person.dump()
- print
+ print(person.id)
+ print(person.dump())
+ print()
\ No newline at end of file
diff --git a/src/examples/partial_gene_match.py b/src/examples/partial_gene_match.py
index 57723c1..8bf5f7c 100644
--- a/src/examples/partial_gene_match.py
+++ b/src/examples/partial_gene_match.py
@@ -5,10 +5,10 @@
# mismatches
from pyparsing import *
-import urllib
+import urllib.request, urllib.parse, urllib.error
# read in a bunch of genomic data
-datafile = urllib.urlopen("http://toxodb.org/common/downloads/release-6.0/Tgondii/TgondiiApicoplastORFsNAs_ToxoDB-6.0.fasta")
+datafile = urllib.request.urlopen("http://toxodb.org/common/downloads/release-6.0/Tgondii/TgondiiApicoplastORFsNAs_ToxoDB-6.0.fasta")
fastasrc = datafile.read()
datafile.close()
@@ -72,17 +72,17 @@ class CloseMatch(Token):
# using the genedata extracted above, look for close matches of a gene sequence
searchseq = CloseMatch("TTAAATCTAGAAGAT", 3)
for g in genedata:
- print "%s (%d)" % (g.id, g.genelen)
- print "-"*24
+ print("%s (%d)" % (g.id, g.genelen))
+ print("-"*24)
for t,startLoc,endLoc in searchseq.scanString(g.gene, overlap=True):
matched, mismatches = t[0]
- print "MATCH:", searchseq.sequence
- print "FOUND:", matched
+ print("MATCH:", searchseq.sequence)
+ print("FOUND:", matched)
if mismatches:
- print " ", ''.join(' ' if i not in mismatches else '*'
- for i,c in enumerate(searchseq.sequence))
+ print(" ", ''.join(' ' if i not in mismatches else '*'
+ for i,c in enumerate(searchseq.sequence)))
else:
- print "<exact match>"
- print "at location", startLoc
- print
- print \ No newline at end of file
+ print("<exact match>")
+ print("at location", startLoc)
+ print()
+ print() \ No newline at end of file
diff --git a/src/examples/pgn.py b/src/examples/pgn.py
index 8f6574d..c645f73 100644
--- a/src/examples/pgn.py
+++ b/src/examples/pgn.py
@@ -61,10 +61,10 @@ pgnGrammar = Suppress(ZeroOrMore(tag)) + ZeroOrMore(move) + Optional(Suppress(g
def parsePGN( pgn, bnf=pgnGrammar, fn=None ):
try:
return bnf.parseString( pgn )
- except ParseException, err:
- print err.line
- print " "*(err.column-1) + "^"
- print err
+ except ParseException as err:
+ print(err.line)
+ print(" "*(err.column-1) + "^")
+ print(err)
if __name__ == "__main__":
# input string
@@ -91,4 +91,4 @@ Bxe5 Rxe5 21. Rg5 Rxe1# {Black wins} 0-1
"""
# parse input string
tokens = parsePGN(pgn, pgnGrammar)
- print "tokens = ", tokens
+ print("tokens = ", tokens)
diff --git a/src/examples/pgn.pyc b/src/examples/pgn.pyc
deleted file mode 100644
index ee2d88f..0000000
--- a/src/examples/pgn.pyc
+++ /dev/null
Binary files differ
diff --git a/src/examples/pymicko.py b/src/examples/pymicko.py
index 1ae4efe..b136689 100644
--- a/src/examples/pymicko.py
+++ b/src/examples/pymicko.py
@@ -361,8 +361,8 @@ class SymbolTable(object):
attr_name = "Attribute"
attr_len = max(max(len(i.attribute_str()) for i in self.table),len(attr_name))
#print table header
- print "{0:3s} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}} | {9:s}".format(" No", sym_name, sym_len, kind_name, kind_len, type_name, type_len, attr_name, attr_len, "Parameters")
- print "-----------------------------" + "-" * (sym_len + kind_len + type_len + attr_len)
+ print("{0:3s} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}} | {9:s}".format(" No", sym_name, sym_len, kind_name, kind_len, type_name, type_len, attr_name, attr_len, "Parameters"))
+ print("-----------------------------" + "-" * (sym_len + kind_len + type_len + attr_len))
#print symbol table
for i,sym in enumerate(self.table):
parameters = ""
@@ -371,7 +371,7 @@ class SymbolTable(object):
parameters = "{0}".format(SharedData.TYPES[p])
else:
parameters += ", {0}".format(SharedData.TYPES[p])
- print "{0:3d} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}} | ({9})".format(i, sym.name, sym_len, SharedData.KINDS[sym.kind], kind_len, SharedData.TYPES[sym.type], type_len, sym.attribute_str(), attr_len, parameters)
+ print("{0:3d} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}} | ({9})".format(i, sym.name, sym_len, SharedData.KINDS[sym.kind], kind_len, SharedData.TYPES[sym.type], type_len, sym.attribute_str(), attr_len, parameters))
def insert_symbol(self, sname, skind, stype):
"""Inserts new symbol at the end of the symbol table.
@@ -392,7 +392,7 @@ class SymbolTable(object):
self.error()
self.table_len = len(self.table)
- def lookup_symbol(self, sname, skind=SharedData.KINDS.keys(), stype=SharedData.TYPES.keys()):
+ def lookup_symbol(self, sname, skind=list(SharedData.KINDS.keys()), stype=list(SharedData.TYPES.keys())):
"""Searches for symbol, from the end to the begining.
Returns symbol index or None
sname - symbol name
@@ -548,7 +548,7 @@ class CodeGenerator(object):
#suffix for label definition
self.definition = ":"
#list of free working registers
- self.free_registers = range(SharedData.FUNCTION_REGISTER, -1, -1)
+ self.free_registers = list(range(SharedData.FUNCTION_REGISTER, -1, -1))
#list of used working registers
self.used_registers = []
#list of used registers needed when function call is inside of a function call
@@ -923,7 +923,7 @@ class MicroC(object):
msg += ": %s" % message
if print_location and (exshared.location != None):
msg += "\n%s" % wtext
- print msg
+ print(msg)
def data_begin_action(self):
@@ -938,7 +938,7 @@ class MicroC(object):
"""Code executed after recognising a global variable"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "GLOBAL_VAR:",var
+ print("GLOBAL_VAR:",var)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_global_var(var.name, var.type)
@@ -949,7 +949,7 @@ class MicroC(object):
"""Code executed after recognising a local variable"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "LOCAL_VAR:",var, var.name, var.type
+ print("LOCAL_VAR:",var, var.name, var.type)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_local_var(var.name, var.type, self.shared.function_vars)
@@ -960,7 +960,7 @@ class MicroC(object):
"""Code executed after recognising a parameter"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "PARAM:",par
+ print("PARAM:",par)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_parameter(par.name, par.type)
@@ -971,7 +971,7 @@ class MicroC(object):
"""Code executed after recognising a constant"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "CONST:",const
+ print("CONST:",const)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
return self.symtab.insert_constant(const[0], const[1])
@@ -980,7 +980,7 @@ class MicroC(object):
"""Code executed after recognising a function definition (type and function name)"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "FUN_BEGIN:",fun
+ print("FUN_BEGIN:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.shared.function_index = self.symtab.insert_function(fun.name, fun.type)
@@ -993,7 +993,7 @@ class MicroC(object):
"""Code executed after recognising the beginning of function's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "FUN_BODY:",fun
+ print("FUN_BODY:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.codegen.function_body()
@@ -1001,7 +1001,7 @@ class MicroC(object):
def function_end_action(self, text, loc, fun):
"""Code executed at the end of function definition"""
if DEBUG > 0:
- print "FUN_END:",fun
+ print("FUN_END:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#set function's attribute to number of function parameters
@@ -1014,7 +1014,7 @@ class MicroC(object):
"""Code executed after recognising a return statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "RETURN:",ret
+ print("RETURN:",ret)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
if not self.symtab.same_types(self.shared.function_index, ret.exp[0]):
@@ -1031,7 +1031,7 @@ class MicroC(object):
"""Code executed after recognising an identificator in expression"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "EXP_VAR:",var
+ print("EXP_VAR:",var)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
var_index = self.symtab.lookup_symbol(var.name, [SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.PARAMETER, SharedData.KINDS.LOCAL_VAR])
@@ -1043,7 +1043,7 @@ class MicroC(object):
"""Code executed after recognising an assignment statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "ASSIGN:",assign
+ print("ASSIGN:",assign)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
var_index = self.symtab.lookup_symbol(assign.var, [SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.PARAMETER, SharedData.KINDS.LOCAL_VAR])
@@ -1057,7 +1057,7 @@ class MicroC(object):
"""Code executed after recognising a mulexp expression (something *|/ something)"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "MUL_EXP:",mul
+ print("MUL_EXP:",mul)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#iterate through all multiplications/divisions
@@ -1074,7 +1074,7 @@ class MicroC(object):
"""Code executed after recognising a numexp expression (something +|- something)"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "NUM_EXP:",num
+ print("NUM_EXP:",num)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#iterate through all additions/substractions
@@ -1091,7 +1091,7 @@ class MicroC(object):
"""Code executed after recognising a function call (type and function name)"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "FUN_PREP:",fun
+ print("FUN_PREP:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.lookup_symbol(fun.name, SharedData.KINDS.FUNCTION)
@@ -1108,7 +1108,7 @@ class MicroC(object):
"""Code executed after recognising each of function's arguments"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "ARGUMENT:",arg.exp
+ print("ARGUMENT:",arg.exp)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
arg_ordinal = len(self.function_arguments)
@@ -1121,7 +1121,7 @@ class MicroC(object):
"""Code executed after recognising the whole function call"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "FUN_CALL:",fun
+ print("FUN_CALL:",fun)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#check number of arguments
@@ -1143,7 +1143,7 @@ class MicroC(object):
def relexp_action(self, text, loc, arg):
"""Code executed after recognising a relexp expression (something relop something)"""
if DEBUG > 0:
- print "REL_EXP:",arg
+ print("REL_EXP:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
exshared.setpos(loc, text)
@@ -1158,7 +1158,7 @@ class MicroC(object):
"""Code executed after recognising a andexp expression (something and something)"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "AND+EXP:",arg
+ print("AND+EXP:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
label = self.codegen.label("false{0}".format(self.false_label_number), True, False)
@@ -1170,7 +1170,7 @@ class MicroC(object):
"""Code executed after recognising logexp expression (something or something)"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "LOG_EXP:",arg
+ print("LOG_EXP:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
label = self.codegen.label("true{0}".format(self.label_number), True, False)
@@ -1182,7 +1182,7 @@ class MicroC(object):
"""Code executed after recognising an if statement (if keyword)"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "IF_BEGIN:",arg
+ print("IF_BEGIN:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.false_label_number += 1
@@ -1193,7 +1193,7 @@ class MicroC(object):
"""Code executed after recognising if statement's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "IF_BODY:",arg
+ print("IF_BODY:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#generate conditional jump (based on last compare)
@@ -1209,7 +1209,7 @@ class MicroC(object):
"""Code executed after recognising if statement's else body"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "IF_ELSE:",arg
+ print("IF_ELSE:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#jump to exit after all statements for true condition are executed
@@ -1224,7 +1224,7 @@ class MicroC(object):
"""Code executed after recognising a whole if statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "IF_END:",arg
+ print("IF_END:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.codegen.newline_label("exit{0}".format(self.label_stack.pop()), True, True)
@@ -1233,7 +1233,7 @@ class MicroC(object):
"""Code executed after recognising a while statement (while keyword)"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "WHILE_BEGIN:",arg
+ print("WHILE_BEGIN:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
self.false_label_number += 1
@@ -1244,7 +1244,7 @@ class MicroC(object):
"""Code executed after recognising while statement's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "WHILE_BODY:",arg
+ print("WHILE_BODY:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#generate conditional jump (based on last compare)
@@ -1259,7 +1259,7 @@ class MicroC(object):
"""Code executed after recognising a whole while statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "WHILE_END:",arg
+ print("WHILE_END:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#jump to condition checking after while statement body
@@ -1274,7 +1274,7 @@ class MicroC(object):
"""Checks if there is a 'main' function and the type of 'main' function"""
exshared.setpos(loc, text)
if DEBUG > 0:
- print "PROGRAM_END:",arg
+ print("PROGRAM_END:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.lookup_symbol("main",SharedData.KINDS.FUNCTION)
@@ -1287,22 +1287,22 @@ class MicroC(object):
"""Parse string (helper function)"""
try:
return self.rProgram.ignore(cStyleComment).parseString(text, parseAll=True)
- except SemanticException, err:
- print err
+ except SemanticException as err:
+ print(err)
exit(3)
- except ParseException, err:
- print err
+ except ParseException as err:
+ print(err)
exit(3)
def parse_file(self,filename):
"""Parse file (helper function)"""
try:
return self.rProgram.ignore(cStyleComment).parseFile(filename, parseAll=True)
- except SemanticException, err:
- print err
+ except SemanticException as err:
+ print(err)
exit(3)
- except ParseException, err:
- print err
+ except ParseException as err:
+ print(err)
exit(3)
##########################################################################################
@@ -1323,12 +1323,12 @@ if 0:
usage = """Usage: {0} [input_file [output_file]]
If output file is omitted, output.asm is used
If input file is omitted, stdin is used""".format(argv[0])
- print usage
+ print(usage)
exit(1)
try:
parse = stdin if input_file == stdin else open(input_file,'r')
except Exception:
- print "Input file '%s' open error" % input_file
+ print("Input file '%s' open error" % input_file)
exit(2)
mc.parse_file(parse)
#if you want to see the final symbol table, uncomment next line
@@ -1338,7 +1338,7 @@ if 0:
out.write(mc.codegen.code)
out.close
except Exception:
- print "Output file '%s' open error" % output_file
+ print("Output file '%s' open error" % output_file)
exit(2)
##########################################################################################
@@ -1384,4 +1384,4 @@ if __name__ == "__main__":
mc = MicroC()
mc.parse_text(test_program_example)
- print mc.codegen.code \ No newline at end of file
+ print(mc.codegen.code) \ No newline at end of file
diff --git a/src/examples/pythonGrammarParser.py b/src/examples/pythonGrammarParser.py
index eb9c560..f0631b8 100644
--- a/src/examples/pythonGrammarParser.py
+++ b/src/examples/pythonGrammarParser.py
@@ -138,7 +138,7 @@ class SemanticGroup(object):
def __str__(self):
return "%s(%s)" % (self.label,
- " ".join([isinstance(c,basestring) and c or str(c) for c in self.contents]) )
+ " ".join([isinstance(c,str) and c or str(c) for c in self.contents]) )
class OrList(SemanticGroup):
label = "OR"
@@ -158,7 +158,7 @@ class Atom(SemanticGroup):
self.rep = contents[1]
else:
self.rep = ""
- if isinstance(contents,basestring):
+ if isinstance(contents,str):
self.contents = contents
else:
self.contents = contents[0]
@@ -212,9 +212,9 @@ assert len(bnfDefs) == expected, \
# list out defns in order they were parsed (to verify accuracy of parsing)
for k,v in bnfDefs:
- print k,"=",v
-print
+ print(k,"=",v)
+print()
# list out parsed grammar defns (demonstrates dictionary access to parsed tokens)
-for k in bnfDefs.keys():
- print k,"=",bnfDefs[k]
+for k in list(bnfDefs.keys()):
+ print(k,"=",bnfDefs[k])
diff --git a/src/examples/readJson.py b/src/examples/readJson.py
index 8ec2f99..deca53b 100644
--- a/src/examples/readJson.py
+++ b/src/examples/readJson.py
@@ -1908,10 +1908,10 @@ data = jsonObject.parseString(s)
#~ pprint( data[0].asList() )
#~ print
#~ print data.dump()
-print data.phedex.call_time
-print data.phedex.instance
-print data.phedex.request_call
-print len(data.phedex.request)
+print(data.phedex.call_time)
+print(data.phedex.instance)
+print(data.phedex.request_call)
+print(len(data.phedex.request))
for req in data.phedex.request[:10]:
#~ print req.dump()
- print "-", req.id, req.last_update
+ print("-", req.id, req.last_update)
diff --git a/src/examples/removeLineBreaks.py b/src/examples/removeLineBreaks.py
index 232034f..ba4b498 100644
--- a/src/examples/removeLineBreaks.py
+++ b/src/examples/removeLineBreaks.py
@@ -38,7 +38,7 @@ test = """
the aid of their
country.
"""
-print para.transformString(test)
+print(para.transformString(test))
# process an entire file
z = para.transformString(file("Successful Methods of Public Speaking.txt").read())
diff --git a/src/examples/romanNumerals.py b/src/examples/romanNumerals.py
index 1561ac8..03605be 100644
--- a/src/examples/romanNumerals.py
+++ b/src/examples/romanNumerals.py
@@ -56,13 +56,13 @@ tests = " ".join([makeRomanNumeral(i) for i in range(1,5000+1)])
expected = 1
for t,s,e in romanNumeral.scanString(tests):
if t[0] != expected:
- print "==>",
- print t,tests[s:e]
+ print("==>", end=' ')
+ print(t,tests[s:e])
expected += 1
-print
+print()
def test(rn):
- print rn,romanNumeral.parseString(rn)
+ print(rn,romanNumeral.parseString(rn))
test("XVI")
test("XXXIX")
test("XIV")
diff --git a/src/examples/scanExamples.py b/src/examples/scanExamples.py
index 995ace9..24ae0e7 100644
--- a/src/examples/scanExamples.py
+++ b/src/examples/scanExamples.py
@@ -20,34 +20,34 @@ CORBA::initORB("xyzzy", USERNAME, PASSWORD );
"""
#################
-print "Example of an extractor"
-print "----------------------"
+print("Example of an extractor")
+print("----------------------")
# simple grammar to match #define's
ident = Word(alphas, alphanums+"_")
macroDef = Literal("#define") + ident.setResultsName("name") + "=" + restOfLine.setResultsName("value")
for t,s,e in macroDef.scanString( testData ):
- print t.name,":", t.value
+ print(t.name,":", t.value)
# or a quick way to make a dictionary of the names and values
# (return only key and value tokens, and construct dict from key-value pairs)
# - empty ahead of restOfLine advances past leading whitespace, does implicit lstrip during parsing
macroDef = Suppress("#define") + ident + Suppress("=") + empty + restOfLine
macros = dict(list(macroDef.searchString(testData)))
-print "macros =", macros
-print
+print("macros =", macros)
+print()
#################
-print "Examples of a transformer"
-print "----------------------"
+print("Examples of a transformer")
+print("----------------------")
# convert C++ namespaces to mangled C-compatible names
scopedIdent = ident + OneOrMore( Literal("::").suppress() + ident )
scopedIdent.setParseAction(lambda t: "_".join(t))
-print "(replace namespace-scoped names with C-compatible names)"
-print scopedIdent.transformString( testData )
+print("(replace namespace-scoped names with C-compatible names)")
+print(scopedIdent.transformString( testData ))
# or a crude pre-processor (use parse actions to replace matching text)
@@ -57,14 +57,14 @@ def substituteMacro(s,l,t):
ident.setParseAction( substituteMacro )
ident.ignore(macroDef)
-print "(simulate #define pre-processor)"
-print ident.transformString( testData )
+print("(simulate #define pre-processor)")
+print(ident.transformString( testData ))
#################
-print "Example of a stripper"
-print "----------------------"
+print("Example of a stripper")
+print("----------------------")
from pyparsing import dblQuotedString, LineStart
@@ -72,4 +72,4 @@ from pyparsing import dblQuotedString, LineStart
stringMacroDef = Literal("#define") + ident + "=" + dblQuotedString + LineStart()
stringMacroDef.setParseAction( replaceWith("") )
-print stringMacroDef.transformString( testData )
+print(stringMacroDef.transformString( testData ))
diff --git a/src/examples/scanYahoo.py b/src/examples/scanYahoo.py
index 0596fad..825c169 100644
--- a/src/examples/scanYahoo.py
+++ b/src/examples/scanYahoo.py
@@ -1,7 +1,7 @@
from pyparsing import makeHTMLTags,SkipTo,htmlComment
-import urllib
+import urllib.request, urllib.parse, urllib.error
-serverListPage = urllib.urlopen( "http://www.yahoo.com" )
+serverListPage = urllib.request.urlopen( "http://www.yahoo.com" )
htmlText = serverListPage.read()
serverListPage.close()
@@ -11,4 +11,4 @@ link = aStart + SkipTo(aEnd).setResultsName("link") + aEnd
link.ignore(htmlComment)
for toks,start,end in link.scanString(htmlText):
- print toks.link, "->", toks.startA.href \ No newline at end of file
+ print(toks.link, "->", toks.startA.href) \ No newline at end of file
diff --git a/src/examples/searchParserAppDemo.py b/src/examples/searchParserAppDemo.py
index b6ea064..57749f4 100644
--- a/src/examples/searchParserAppDemo.py
+++ b/src/examples/searchParserAppDemo.py
@@ -29,6 +29,6 @@ tests = """\
prune and grape""".splitlines()
for t in tests:
- print t.strip()
- print parser.Parse(t)
- print \ No newline at end of file
+ print(t.strip())
+ print(parser.Parse(t))
+ print() \ No newline at end of file
diff --git a/src/examples/searchparser.py b/src/examples/searchparser.py
index f9ee428..86e30ca 100644
--- a/src/examples/searchparser.py
+++ b/src/examples/searchparser.py
@@ -247,14 +247,14 @@ class ParserTest(SearchQueryParser):
}
def GetWord(self, word):
- if (self.index.has_key(word)):
+ if (word in self.index):
return self.index[word]
else:
return Set()
def GetWordWildcard(self, word):
result = Set()
- for item in self.index.keys():
+ for item in list(self.index.keys()):
if word == item[0:len(word)]:
result = result.union(self.index[item])
return result
@@ -267,27 +267,27 @@ class ParserTest(SearchQueryParser):
return result
def GetNot(self, not_set):
- all = Set(self.docs.keys())
+ all = Set(list(self.docs.keys()))
return all.difference(not_set)
def Test(self):
all_ok = True
- for item in self.tests.keys():
- print item
+ for item in list(self.tests.keys()):
+ print(item)
r = self.Parse(item)
e = self.tests[item]
- print 'Result: %s' % r
- print 'Expect: %s' % e
+ print('Result: %s' % r)
+ print('Expect: %s' % e)
if e == r:
- print 'Test OK'
+ print('Test OK')
else:
all_ok = False
- print '>>>>>>>>>>>>>>>>>>>>>>Test ERROR<<<<<<<<<<<<<<<<<<<<<'
- print ''
+ print('>>>>>>>>>>>>>>>>>>>>>>Test ERROR<<<<<<<<<<<<<<<<<<<<<')
+ print('')
return all_ok
if __name__=='__main__':
if ParserTest().Test():
- print 'All tests OK'
+ print('All tests OK')
else:
- print 'One or more tests FAILED'
+ print('One or more tests FAILED')
diff --git a/src/examples/select_parser.py b/src/examples/select_parser.py
index 45c7d01..7326cc9 100644
--- a/src/examples/select_parser.py
+++ b/src/examples/select_parser.py
@@ -1,135 +1,135 @@
-# select_parser.py
-# Copyright 2010, Paul McGuire
-#
-# a simple SELECT statement parser, taken from SQLite's SELECT statement
-# definition at http://www.sqlite.org/lang_select.html
-#
-from pyparsing import *
-ParserElement.enablePackrat()
-
-LPAR,RPAR,COMMA = map(Suppress,"(),")
-select_stmt = Forward().setName("select statement")
-
-# keywords
-(UNION, ALL, AND, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
- CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY,
- HAVING, ORDER, BY, LIMIT, OFFSET, OR) = map(CaselessKeyword, """UNION, ALL, AND, INTERSECT,
- EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER, CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT,
- DISTINCT, FROM, WHERE, GROUP, BY, HAVING, ORDER, BY, LIMIT, OFFSET, OR""".replace(",","").split())
-(CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS,
- COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
- CURRENT_TIMESTAMP) = map(CaselessKeyword, """CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE,
- END, CASE, WHEN, THEN, EXISTS, COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE,
- CURRENT_TIME, CURRENT_DATE, CURRENT_TIMESTAMP""".replace(",","").split())
-keyword = MatchFirst((UNION, ALL, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
- CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY,
- HAVING, ORDER, BY, LIMIT, OFFSET, CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS,
- COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
- CURRENT_TIMESTAMP))
-
-identifier = ~keyword + Word(alphas, alphanums+"_")
-collation_name = identifier.copy()
-column_name = identifier.copy()
-column_alias = identifier.copy()
-table_name = identifier.copy()
-table_alias = identifier.copy()
-index_name = identifier.copy()
-function_name = identifier.copy()
-parameter_name = identifier.copy()
-database_name = identifier.copy()
-
-# expression
-expr = Forward().setName("expression")
-
-integer = Regex(r"[+-]?\d+")
-numeric_literal = Regex(r"\d+(\.\d*)?([eE][+-]?\d+)?")
-string_literal = QuotedString("'")
-blob_literal = Regex(r"[xX]'[0-9A-Fa-f]+'")
-literal_value = ( numeric_literal | string_literal | blob_literal |
- NULL | CURRENT_TIME | CURRENT_DATE | CURRENT_TIMESTAMP )
-bind_parameter = (
- Word("?",nums) |
- Combine(oneOf(": @ $") + parameter_name)
- )
-type_name = oneOf("TEXT REAL INTEGER BLOB NULL")
-
-expr_term = (
- CAST + LPAR + expr + AS + type_name + RPAR |
- EXISTS + LPAR + select_stmt + RPAR |
- function_name.setName("function_name") + LPAR + Optional(delimitedList(expr)) + RPAR |
- literal_value |
- bind_parameter |
- Combine(identifier+('.'+identifier)*(0,2)).setName("ident")
- )
-
-UNARY,BINARY,TERNARY=1,2,3
-expr << operatorPrecedence(expr_term,
- [
- (oneOf('- + ~') | NOT, UNARY, opAssoc.RIGHT),
- (ISNULL | NOTNULL | NOT + NULL, UNARY, opAssoc.LEFT),
- ('||', BINARY, opAssoc.LEFT),
- (oneOf('* / %'), BINARY, opAssoc.LEFT),
- (oneOf('+ -'), BINARY, opAssoc.LEFT),
- (oneOf('<< >> & |'), BINARY, opAssoc.LEFT),
- (oneOf('< <= > >='), BINARY, opAssoc.LEFT),
- (oneOf('= == != <>') | IS | IN | LIKE | GLOB | MATCH | REGEXP, BINARY, opAssoc.LEFT),
- ('||', BINARY, opAssoc.LEFT),
- ((BETWEEN,AND), TERNARY, opAssoc.LEFT),
- (IN + LPAR + Group(select_stmt | delimitedList(expr)) + RPAR, UNARY, opAssoc.LEFT),
- (AND, BINARY, opAssoc.LEFT),
- (OR, BINARY, opAssoc.LEFT),
- ])
-
-compound_operator = (UNION + Optional(ALL) | INTERSECT | EXCEPT)
-
-ordering_term = Group(expr('order_key') + Optional(COLLATE + collation_name('collate')) + Optional(ASC | DESC)('direction'))
-
-join_constraint = Group(Optional(ON + expr | USING + LPAR + Group(delimitedList(column_name)) + RPAR))
-
-join_op = COMMA | Group(Optional(NATURAL) + Optional(INNER | CROSS | LEFT + OUTER | LEFT | OUTER) + JOIN)
-
-join_source = Forward()
-single_source = ( (Group(database_name("database") + "." + table_name("table")) | table_name("table")) +
- Optional(Optional(AS) + table_alias("table_alias")) +
- Optional(INDEXED + BY + index_name("name") | NOT + INDEXED)("index") |
- (LPAR + select_stmt + RPAR + Optional(Optional(AS) + table_alias)) |
- (LPAR + join_source + RPAR) )
-
-join_source << (Group(single_source + OneOrMore(join_op + single_source + join_constraint)) |
- single_source)
-
-result_column = "*" | table_name + "." + "*" | Group(expr + Optional(Optional(AS) + column_alias))
-select_core = (SELECT + Optional(DISTINCT | ALL) + Group(delimitedList(result_column))("columns") +
- Optional(FROM + join_source("from")) +
- Optional(WHERE + expr("where_expr")) +
- Optional(GROUP + BY + Group(delimitedList(ordering_term)("group_by_terms")) +
- Optional(HAVING + expr("having_expr"))))
-
-select_stmt << (select_core + ZeroOrMore(compound_operator + select_core) +
- Optional(ORDER + BY + Group(delimitedList(ordering_term))("order_by_terms")) +
- Optional(LIMIT + (Group(expr + OFFSET + expr) | Group(expr + COMMA + expr) | expr)("limit")))
-
-tests = """\
- select * from xyzzy where z > 100
- select * from xyzzy where z > 100 order by zz
- select * from xyzzy
- select z.* from xyzzy""".splitlines()
-tests = """\
- select a, b from test_table where 1=1 and b='yes'
- select a, b from test_table where 1=1 and b in (select bb from foo)
- select z.a, b from test_table where 1=1 and b in (select bb from foo)
- select z.a, b from test_table where 1=1 and b in (select bb from foo) order by b,c desc,d
- select z.a, b from test_table left join test2_table where 1=1 and b in (select bb from foo)
- select a, db.table.b as BBB from db.table where 1=1 and BBB='yes'
- select a, db.table.b as BBB from test_table,db.table where 1=1 and BBB='yes'
- select a, db.table.b as BBB from test_table,db.table where 1=1 and BBB='yes' limit 50
- """.splitlines()
-for t in tests:
- t = t.strip()
- if not t: continue
- print t
- try:
- print select_stmt.parseString(t).dump()
- except ParseException, pe:
- print pe.msg
- print
+# select_parser.py
+# Copyright 2010, Paul McGuire
+#
+# a simple SELECT statement parser, taken from SQLite's SELECT statement
+# definition at http://www.sqlite.org/lang_select.html
+#
+from pyparsing import *
+ParserElement.enablePackrat()
+
+LPAR,RPAR,COMMA = map(Suppress,"(),")
+select_stmt = Forward().setName("select statement")
+
+# keywords
+(UNION, ALL, AND, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
+ CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY,
+ HAVING, ORDER, BY, LIMIT, OFFSET, OR) = map(CaselessKeyword, """UNION, ALL, AND, INTERSECT,
+ EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER, CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT,
+ DISTINCT, FROM, WHERE, GROUP, BY, HAVING, ORDER, BY, LIMIT, OFFSET, OR""".replace(",","").split())
+(CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS,
+ COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
+ CURRENT_TIMESTAMP) = map(CaselessKeyword, """CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE,
+ END, CASE, WHEN, THEN, EXISTS, COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE,
+ CURRENT_TIME, CURRENT_DATE, CURRENT_TIMESTAMP""".replace(",","").split())
+keyword = MatchFirst((UNION, ALL, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
+ CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY,
+ HAVING, ORDER, BY, LIMIT, OFFSET, CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS,
+ COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
+ CURRENT_TIMESTAMP))
+
+identifier = ~keyword + Word(alphas, alphanums+"_")
+collation_name = identifier.copy()
+column_name = identifier.copy()
+column_alias = identifier.copy()
+table_name = identifier.copy()
+table_alias = identifier.copy()
+index_name = identifier.copy()
+function_name = identifier.copy()
+parameter_name = identifier.copy()
+database_name = identifier.copy()
+
+# expression
+expr = Forward().setName("expression")
+
+integer = Regex(r"[+-]?\d+")
+numeric_literal = Regex(r"\d+(\.\d*)?([eE][+-]?\d+)?")
+string_literal = QuotedString("'")
+blob_literal = Regex(r"[xX]'[0-9A-Fa-f]+'")
+literal_value = ( numeric_literal | string_literal | blob_literal |
+ NULL | CURRENT_TIME | CURRENT_DATE | CURRENT_TIMESTAMP )
+bind_parameter = (
+ Word("?",nums) |
+ Combine(oneOf(": @ $") + parameter_name)
+ )
+type_name = oneOf("TEXT REAL INTEGER BLOB NULL")
+
+expr_term = (
+ CAST + LPAR + expr + AS + type_name + RPAR |
+ EXISTS + LPAR + select_stmt + RPAR |
+ function_name.setName("function_name") + LPAR + Optional(delimitedList(expr)) + RPAR |
+ literal_value |
+ bind_parameter |
+ Combine(identifier+('.'+identifier)*(0,2)).setName("ident")
+ )
+
+UNARY,BINARY,TERNARY=1,2,3
+expr << operatorPrecedence(expr_term,
+ [
+ (oneOf('- + ~') | NOT, UNARY, opAssoc.RIGHT),
+ (ISNULL | NOTNULL | NOT + NULL, UNARY, opAssoc.LEFT),
+ ('||', BINARY, opAssoc.LEFT),
+ (oneOf('* / %'), BINARY, opAssoc.LEFT),
+ (oneOf('+ -'), BINARY, opAssoc.LEFT),
+ (oneOf('<< >> & |'), BINARY, opAssoc.LEFT),
+ (oneOf('< <= > >='), BINARY, opAssoc.LEFT),
+ (oneOf('= == != <>') | IS | IN | LIKE | GLOB | MATCH | REGEXP, BINARY, opAssoc.LEFT),
+ ('||', BINARY, opAssoc.LEFT),
+ ((BETWEEN,AND), TERNARY, opAssoc.LEFT),
+ (IN + LPAR + Group(select_stmt | delimitedList(expr)) + RPAR, UNARY, opAssoc.LEFT),
+ (AND, BINARY, opAssoc.LEFT),
+ (OR, BINARY, opAssoc.LEFT),
+ ])
+
+compound_operator = (UNION + Optional(ALL) | INTERSECT | EXCEPT)
+
+ordering_term = Group(expr('order_key') + Optional(COLLATE + collation_name('collate')) + Optional(ASC | DESC)('direction'))
+
+join_constraint = Group(Optional(ON + expr | USING + LPAR + Group(delimitedList(column_name)) + RPAR))
+
+join_op = COMMA | Group(Optional(NATURAL) + Optional(INNER | CROSS | LEFT + OUTER | LEFT | OUTER) + JOIN)
+
+join_source = Forward()
+single_source = ( (Group(database_name("database") + "." + table_name("table")) | table_name("table")) +
+ Optional(Optional(AS) + table_alias("table_alias")) +
+ Optional(INDEXED + BY + index_name("name") | NOT + INDEXED)("index") |
+ (LPAR + select_stmt + RPAR + Optional(Optional(AS) + table_alias)) |
+ (LPAR + join_source + RPAR) )
+
+join_source << (Group(single_source + OneOrMore(join_op + single_source + join_constraint)) |
+ single_source)
+
+result_column = "*" | table_name + "." + "*" | Group(expr + Optional(Optional(AS) + column_alias))
+select_core = (SELECT + Optional(DISTINCT | ALL) + Group(delimitedList(result_column))("columns") +
+ Optional(FROM + join_source("from")) +
+ Optional(WHERE + expr("where_expr")) +
+ Optional(GROUP + BY + Group(delimitedList(ordering_term)("group_by_terms")) +
+ Optional(HAVING + expr("having_expr"))))
+
+select_stmt << (select_core + ZeroOrMore(compound_operator + select_core) +
+ Optional(ORDER + BY + Group(delimitedList(ordering_term))("order_by_terms")) +
+ Optional(LIMIT + (Group(expr + OFFSET + expr) | Group(expr + COMMA + expr) | expr)("limit")))
+
+tests = """\
+ select * from xyzzy where z > 100
+ select * from xyzzy where z > 100 order by zz
+ select * from xyzzy
+ select z.* from xyzzy""".splitlines()
+tests = """\
+ select a, b from test_table where 1=1 and b='yes'
+ select a, b from test_table where 1=1 and b in (select bb from foo)
+ select z.a, b from test_table where 1=1 and b in (select bb from foo)
+ select z.a, b from test_table where 1=1 and b in (select bb from foo) order by b,c desc,d
+ select z.a, b from test_table left join test2_table where 1=1 and b in (select bb from foo)
+ select a, db.table.b as BBB from db.table where 1=1 and BBB='yes'
+ select a, db.table.b as BBB from test_table,db.table where 1=1 and BBB='yes'
+ select a, db.table.b as BBB from test_table,db.table where 1=1 and BBB='yes' limit 50
+ """.splitlines()
+for t in tests:
+ t = t.strip()
+ if not t: continue
+ print(t)
+ try:
+ print(select_stmt.parseString(t).dump())
+ except ParseException as pe:
+ print(pe.msg)
+ print()
diff --git a/src/examples/sexpParser.py b/src/examples/sexpParser.py
index c67b6f8..963d153 100644
--- a/src/examples/sexpParser.py
+++ b/src/examples/sexpParser.py
@@ -156,12 +156,12 @@ t = None
alltests = [ locals()[t] for t in sorted(locals()) if t.startswith("test") ]
for t in alltests:
- print '-'*50
- print t
+ print('-'*50)
+ print(t)
try:
sexpr = sexp.parseString(t, parseAll=True)
pprint.pprint(sexpr.asList())
- except ParseFatalException, pfe:
- print "Error:", pfe.msg
- print pfe.markInputline('^')
- print
+ except ParseFatalException as pfe:
+ print("Error:", pfe.msg)
+ print(pfe.markInputline('^'))
+ print()
diff --git a/src/examples/simpleArith.py b/src/examples/simpleArith.py
index d7fc6a9..09c32d3 100644
--- a/src/examples/simpleArith.py
+++ b/src/examples/simpleArith.py
@@ -61,7 +61,7 @@ test = ["9 + 2 + 3",
"M*(X + B)",
"1+2*-3^4*5+-+-6",]
for t in test:
- print t
- print expr.parseString(t)
- print
+ print(t)
+ print(expr.parseString(t))
+ print()
diff --git a/src/examples/simpleBool.py b/src/examples/simpleBool.py
index cea08cc..0688787 100644
--- a/src/examples/simpleBool.py
+++ b/src/examples/simpleBool.py
@@ -23,9 +23,9 @@ class BoolOperand(object):
class BoolAnd(BoolOperand):
reprsymbol = '&'
- def __nonzero__(self):
+ def __bool__(self):
for a in self.args:
- if isinstance(a,basestring):
+ if isinstance(a,str):
v = eval(a)
else:
v = bool(a)
@@ -35,9 +35,9 @@ class BoolAnd(BoolOperand):
class BoolOr(BoolOperand):
reprsymbol = '|'
- def __nonzero__(self):
+ def __bool__(self):
for a in self.args:
- if isinstance(a,basestring):
+ if isinstance(a,str):
v = eval(a)
else:
v = bool(a)
@@ -50,8 +50,8 @@ class BoolNot(BoolOperand):
self.arg = t[0][1]
def __str__(self):
return "~" + str(self.arg)
- def __nonzero__(self):
- if isinstance(self.arg,basestring):
+ def __bool__(self):
+ if isinstance(self.arg,str):
v = eval(self.arg)
else:
v = bool(self.arg)
@@ -78,11 +78,11 @@ test = ["p and not q",
p = True
q = False
r = True
-print "p =", p
-print "q =", q
-print "r =", r
-print
+print("p =", p)
+print("q =", q)
+print("r =", r)
+print()
for t in test:
res = boolExpr.parseString(t)[0]
- print t,'\n', res, '=', bool(res),'\n'
+ print(t,'\n', res, '=', bool(res),'\n')
diff --git a/src/examples/simpleSQL.py b/src/examples/simpleSQL.py
index fc9e408..62f9928 100644
--- a/src/examples/simpleSQL.py
+++ b/src/examples/simpleSQL.py
@@ -10,17 +10,17 @@ from pyparsing import Literal, CaselessLiteral, Word, Upcase, delimitedList, Opt
ZeroOrMore, restOfLine, Keyword
def test( str ):
- print str,"->"
+ print(str,"->")
try:
tokens = simpleSQL.parseString( str )
- print "tokens = ", tokens
- print "tokens.columns =", tokens.columns
- print "tokens.tables =", tokens.tables
- print "tokens.where =", tokens.where
- except ParseException, err:
- print " "*err.loc + "^\n" + err.msg
- print err
- print
+ print("tokens = ", tokens)
+ print("tokens.columns =", tokens.columns)
+ print("tokens.tables =", tokens.tables)
+ print("tokens.where =", tokens.where)
+ except ParseException as err:
+ print(" "*err.loc + "^\n" + err.msg)
+ print(err)
+ print()
# define SQL tokens
@@ -87,56 +87,56 @@ test( "Select A,b from table1,table2 where table1.id eq table2.id -- test out co
"""
Test output:
->pythonw -u simpleSQL.py
-SELECT * from XYZZY, ABC ->
-tokens = ['select', '*', 'from', ['XYZZY', 'ABC']]
-tokens.columns = *
-tokens.tables = ['XYZZY', 'ABC']
-
-select * from SYS.XYZZY ->
-tokens = ['select', '*', 'from', ['SYS.XYZZY']]
-tokens.columns = *
-tokens.tables = ['SYS.XYZZY']
-
-Select A from Sys.dual ->
-tokens = ['select', ['A'], 'from', ['SYS.DUAL']]
-tokens.columns = ['A']
-tokens.tables = ['SYS.DUAL']
-
-Select A,B,C from Sys.dual ->
-tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']]
-tokens.columns = ['A', 'B', 'C']
-tokens.tables = ['SYS.DUAL']
-
-Select A, B, C from Sys.dual ->
-tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']]
-tokens.columns = ['A', 'B', 'C']
-tokens.tables = ['SYS.DUAL']
-
-Select A, B, C from Sys.dual, Table2 ->
-tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL', 'TABLE2']]
-tokens.columns = ['A', 'B', 'C']
-tokens.tables = ['SYS.DUAL', 'TABLE2']
-
-Xelect A, B, C from Sys.dual ->
-^
-Expected 'select'
-Expected 'select' (0), (1,1)
-
-Select A, B, C frox Sys.dual ->
- ^
-Expected 'from'
-Expected 'from' (15), (1,16)
-
-Select ->
- ^
-Expected '*'
-Expected '*' (6), (1,7)
-
-Select &&& frox Sys.dual ->
- ^
-Expected '*'
-Expected '*' (7), (1,8)
-
+>pythonw -u simpleSQL.py
+SELECT * from XYZZY, ABC ->
+tokens = ['select', '*', 'from', ['XYZZY', 'ABC']]
+tokens.columns = *
+tokens.tables = ['XYZZY', 'ABC']
+
+select * from SYS.XYZZY ->
+tokens = ['select', '*', 'from', ['SYS.XYZZY']]
+tokens.columns = *
+tokens.tables = ['SYS.XYZZY']
+
+Select A from Sys.dual ->
+tokens = ['select', ['A'], 'from', ['SYS.DUAL']]
+tokens.columns = ['A']
+tokens.tables = ['SYS.DUAL']
+
+Select A,B,C from Sys.dual ->
+tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']]
+tokens.columns = ['A', 'B', 'C']
+tokens.tables = ['SYS.DUAL']
+
+Select A, B, C from Sys.dual ->
+tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL']]
+tokens.columns = ['A', 'B', 'C']
+tokens.tables = ['SYS.DUAL']
+
+Select A, B, C from Sys.dual, Table2 ->
+tokens = ['select', ['A', 'B', 'C'], 'from', ['SYS.DUAL', 'TABLE2']]
+tokens.columns = ['A', 'B', 'C']
+tokens.tables = ['SYS.DUAL', 'TABLE2']
+
+Xelect A, B, C from Sys.dual ->
+^
+Expected 'select'
+Expected 'select' (0), (1,1)
+
+Select A, B, C frox Sys.dual ->
+ ^
+Expected 'from'
+Expected 'from' (15), (1,16)
+
+Select ->
+ ^
+Expected '*'
+Expected '*' (6), (1,7)
+
+Select &&& frox Sys.dual ->
+ ^
+Expected '*'
+Expected '*' (7), (1,8)
+
>Exit code: 0
""" \ No newline at end of file
diff --git a/src/examples/simpleWiki.py b/src/examples/simpleWiki.py
index fb50a0f..7a2a0ce 100644
--- a/src/examples/simpleWiki.py
+++ b/src/examples/simpleWiki.py
@@ -27,6 +27,6 @@ urlRef = QuotedString("{{",endQuoteChar="}}").setParseAction(convertToHTML_A)
wikiMarkup = urlRef | boldItalicized | bolded | italicized
-print wikiInput
-print
-print wikiMarkup.transformString(wikiInput)
+print(wikiInput)
+print()
+print(wikiMarkup.transformString(wikiInput))
diff --git a/src/examples/sparser.py b/src/examples/sparser.py
index 7217a11..7c416da 100644
--- a/src/examples/sparser.py
+++ b/src/examples/sparser.py
@@ -85,11 +85,11 @@ def debug(ftn, txt):
def fatal(ftn, txt):
"""If can't continue."""
msg = "%s.%s:FATAL:%s\n" % (modname, ftn, txt)
- raise SystemExit, msg
+ raise SystemExit(msg)
def usage():
"""Prints the docstring."""
- print __doc__
+ print(__doc__)
@@ -140,18 +140,18 @@ class ParseFileLineByLine:
definition file is available __init__ will then create some pyparsing
helper variables. """
if mode not in ['r', 'w', 'a']:
- raise IOError, (0, 'Illegal mode: ' + repr(mode))
+ raise IOError(0, 'Illegal mode: ' + repr(mode))
if string.find(filename, ':/') > 1: # URL
if mode == 'w':
- raise IOError, "can't write to a URL"
- import urllib
- self.file = urllib.urlopen(filename)
+ raise IOError("can't write to a URL")
+ import urllib.request, urllib.parse, urllib.error
+ self.file = urllib.request.urlopen(filename)
else:
filename = os.path.expanduser(filename)
if mode == 'r' or mode == 'a':
if not os.path.exists(filename):
- raise IOError, (2, 'No such file or directory: ' + filename)
+ raise IOError(2, 'No such file or directory: ' + filename)
filen, file_extension = os.path.splitext(filename)
command_dict = {
('.Z', 'r'):
@@ -174,8 +174,8 @@ class ParseFileLineByLine:
"raise IOError, (0, 'Can\'t append to .bz2 files')",
}
- exec command_dict.get((file_extension, mode),
- 'self.file = open(filename, mode)')
+ exec(command_dict.get((file_extension, mode),
+ 'self.file = open(filename, mode)'))
self.grammar = None
@@ -259,7 +259,7 @@ class ParseFileLineByLine:
# Now that 'integer', 'real', and 'qString' have been assigned I can
# execute the definition file.
- execfile(self.parsedef)
+ exec(compile(open(self.parsedef).read(), self.parsedef, 'exec'))
# Build the grammar, combination of the 'integer', 'real, 'qString',
# and '*_junk' variables assigned above in the order specified in the
@@ -335,7 +335,7 @@ def main(pargs):
input_file = sys.argv[1]
fp = ParseFileLineByLine(input_file)
for i in fp:
- print i
+ print(i)
#-------------------------
@@ -345,11 +345,11 @@ if __name__ == '__main__':
['help', 'version', 'debug', 'bb='])
for opt in opts:
if opt[0] == '-h' or opt[0] == '--help':
- print modname+": version="+__version__
+ print(modname+": version="+__version__)
usage()
sys.exit(0)
elif opt[0] == '-v' or opt[0] == '--version':
- print modname+": version="+__version__
+ print(modname+": version="+__version__)
sys.exit(0)
elif opt[0] == '-d' or opt[0] == '--debug':
debug_p = 1
diff --git a/src/examples/sql2dot.py b/src/examples/sql2dot.py
index 7fab65f..1156207 100644
--- a/src/examples/sql2dot.py
+++ b/src/examples/sql2dot.py
@@ -89,8 +89,8 @@ comment_def.setParseAction( replaceWith("") )
statement_def = comment_def | create_table_def | add_fkey_def | other_statement_def
defs = OneOrMore(statement_def)
-print """digraph g { graph [ rankdir = "LR" ]; """
+print("""digraph g { graph [ rankdir = "LR" ]; """)
for i in defs.parseString(sampleSQL):
if i!="":
- print i
-print "}" \ No newline at end of file
+ print(i)
+print("}") \ No newline at end of file
diff --git a/src/examples/stackish.py b/src/examples/stackish.py
index 1c4d84c..f80b4d6 100644
--- a/src/examples/stackish.py
+++ b/src/examples/stackish.py
@@ -76,6 +76,6 @@ tests = """\
for test in tests:
if test:
- print test
- print item.parseString(test).dump()
- print \ No newline at end of file
+ print(test)
+ print(item.parseString(test).dump())
+ print()
diff --git a/src/examples/stateMachine2.py b/src/examples/stateMachine2.py
index 286266b..eb6633d 100644
--- a/src/examples/stateMachine2.py
+++ b/src/examples/stateMachine2.py
@@ -1 +1,258 @@
-# stateMachine.py # # module to define .pystate import handler # #import imputil import sys import os import types import urlparse DEBUG = False from pyparsing import Word, Group, ZeroOrMore, alphas, \ alphanums, ParserElement, ParseException, ParseSyntaxException, \ Empty, LineEnd, OneOrMore, col, Keyword, pythonStyleComment, \ StringEnd, traceParseAction ident = Word(alphas+"_", alphanums+"_$") pythonKeywords = """and as assert break class continue def del elif else except exec finally for from global if import in is lambda None not or pass print raise return try while with yield True False""" pythonKeywords = set(pythonKeywords.split()) def no_keywords_allowed(s,l,t): wd = t[0] if wd in pythonKeywords: errmsg = "cannot not use keyword '%s' " \ "as an identifier" % wd raise ParseException(s,l,errmsg) ident.setParseAction(no_keywords_allowed) stateTransition = ident("fromState") + "->" + ident("toState") stateMachine = Keyword("statemachine") + \ ident("name") + ":" + \ OneOrMore(Group(stateTransition))("transitions") namedStateTransition = (ident("fromState") + \ "-(" + ident("transition") + ")->" + \ ident("toState")) namedStateMachine = Keyword("statemachine") + \ ident("name") + ":" + \ OneOrMore(Group(namedStateTransition))("transitions") def expand_state_definition(source, loc, tokens): indent = " " * (col(loc,source)-1) statedef = [] # build list of states states = set() fromTo = {} for tn in tokens.transitions: states.add(tn.fromState) states.add(tn.toState) fromTo[tn.fromState] = tn.toState # define base class for state classes baseStateClass = tokens.name + "State" statedef.extend([ "class %s(object):" % baseStateClass, " def __str__(self):", " return self.__class__.__name__", " def next_state(self):", " return self._next_state_class()" ]) # define all state classes statedef.extend( "class %s(%s): pass" % (s,baseStateClass) for s in states ) statedef.extend( "%s._next_state_class = %s" % (s,fromTo[s]) for s in states if s in fromTo ) return indent + ("\n"+indent).join(statedef)+"\n" stateMachine.setParseAction(expand_state_definition) def expand_named_state_definition(source,loc,tokens): indent = " " * (col(loc,source)-1) statedef = [] # build list of states and transitions states = set() transitions = set() baseStateClass = tokens.name + "State" fromTo = {} for tn in tokens.transitions: states.add(tn.fromState) states.add(tn.toState) transitions.add(tn.transition) if tn.fromState in fromTo: fromTo[tn.fromState][tn.transition] = tn.toState else: fromTo[tn.fromState] = {tn.transition:tn.toState} # add entries for terminal states for s in states: if s not in fromTo: fromTo[s] = {} # define state transition class statedef.extend([ "class %sTransition:" % baseStateClass, " def __str__(self):", " return self.transitionName", ]) statedef.extend( "%s = %sTransition()" % (tn,baseStateClass) for tn in transitions) statedef.extend("%s.transitionName = '%s'" % (tn,tn) for tn in transitions) # define base class for state classes excmsg = "'" + tokens.name + \ '.%s does not support transition "%s"' \ "'% (self, tn)" statedef.extend([ "class %s(object):" % baseStateClass, " def __str__(self):", " return self.__class__.__name__", " def next_state(self,tn):", " try:", " return self.tnmap[tn]()", " except KeyError:", " raise Exception(%s)" % excmsg, " def __getattr__(self,name):", " raise Exception(%s)" % excmsg, ]) # define all state classes for s in states: statedef.append("class %s(%s): pass" % (s,baseStateClass)) # define state transition maps and transition methods for s in states: trns = fromTo[s].items() statedef.append("%s.tnmap = {%s}" % (s, ",".join("%s:%s" % tn for tn in trns)) ) statedef.extend([ "%s.%s = staticmethod(lambda : %s())" % (s,tn_,to_) for tn_,to_ in trns ]) return indent + ("\n"+indent).join(statedef) + "\n" namedStateMachine.setParseAction( expand_named_state_definition) #====================================================================== # NEW STUFF - Matt Anderson, 2009-11-26 #====================================================================== class SuffixImporter(object): """An importer designed using the mechanism defined in :pep:`302`. I read the PEP, and also used Doug Hellmann's PyMOTW article `Modules and Imports`_, as a pattern. .. _`Modules and Imports`: http://www.doughellmann.com/PyMOTW/sys/imports.html Define a subclass that specifies a :attr:`suffix` attribute, and implements a :meth:`process_filedata` method. Then call the classmethod :meth:`register` on your class to actually install it in the appropriate places in :mod:`sys`. """ scheme = 'suffix' suffix = None path_entry = None @classmethod def trigger_url(cls): if cls.suffix is None: raise ValueError('%s.suffix is not set' % cls.__name__) return 'suffix:%s' % cls.suffix @classmethod def register(cls): sys.path_hooks.append(cls) sys.path.append(cls.trigger_url()) def __init__(self, path_entry): pr = urlparse.urlparse(str(path_entry)) if pr.scheme != self.scheme or pr.path != self.suffix: raise ImportError() self.path_entry = path_entry self._found = {} def checkpath_iter(self, fullname): for dirpath in sys.path: # if the value in sys.path_importer_cache is None, then this # path *should* be imported by the builtin mechanism, and the # entry is thus a path to a directory on the filesystem; # if it's not None, then some other importer is in charge, and # it probably isn't even a filesystem path if sys.path_importer_cache.get(dirpath,False) is None: checkpath = os.path.join( dirpath,'%s.%s' % (fullname,self.suffix)) yield checkpath def find_module(self, fullname, path=None): for checkpath in self.checkpath_iter(fullname): if os.path.isfile(checkpath): self._found[fullname] = checkpath return self return None def load_module(self, fullname): assert fullname in self._found if fullname in sys.modules: module = sys.modules[fullname] else: sys.modules[fullname] = module = types.ModuleType(fullname) data = None f = open(self._found[fullname]) try: data = f.read() finally: f.close() module.__dict__.clear() module.__file__ = self._found[fullname] module.__name__ = fullname module.__loader__ = self self.process_filedata(module, data) return module def process_filedata(self, module, data): pass class PystateImporter(SuffixImporter): suffix = 'pystate' def process_filedata(self, module, data): # MATT-NOTE: re-worked :func:`get_state_machine` # convert any statemachine expressions stateMachineExpr = (stateMachine | namedStateMachine).ignore( pythonStyleComment) generated_code = stateMachineExpr.transformString(data) if DEBUG: print generated_code # compile code object from generated code # (strip trailing spaces and tabs, compile doesn't like # dangling whitespace) COMPILE_MODE = 'exec' codeobj = compile(generated_code.rstrip(" \t"), module.__file__, COMPILE_MODE) exec codeobj in module.__dict__ PystateImporter.register() \ No newline at end of file
+# stateMachine.py
+#
+# module to define .pystate import handler
+#
+#import imputil
+import sys
+import os
+import types
+import urllib.parse
+
+DEBUG = False
+
+from pyparsing import Word, Group, ZeroOrMore, alphas, \
+ alphanums, ParserElement, ParseException, ParseSyntaxException, \
+ Empty, LineEnd, OneOrMore, col, Keyword, pythonStyleComment, \
+ StringEnd, traceParseAction
+
+
+ident = Word(alphas+"_", alphanums+"_$")
+
+pythonKeywords = """and as assert break class continue def
+ del elif else except exec finally for from global if import
+ in is lambda None not or pass print raise return try while with
+ yield True False"""
+pythonKeywords = set(pythonKeywords.split())
+def no_keywords_allowed(s,l,t):
+ wd = t[0]
+ if wd in pythonKeywords:
+ errmsg = "cannot not use keyword '%s' " \
+ "as an identifier" % wd
+ raise ParseException(s,l,errmsg)
+ident.setParseAction(no_keywords_allowed)
+
+stateTransition = ident("fromState") + "->" + ident("toState")
+stateMachine = Keyword("statemachine") + \
+ ident("name") + ":" + \
+ OneOrMore(Group(stateTransition))("transitions")
+
+namedStateTransition = (ident("fromState") + \
+ "-(" + ident("transition") + ")->" + \
+ ident("toState"))
+namedStateMachine = Keyword("statemachine") + \
+ ident("name") + ":" + \
+ OneOrMore(Group(namedStateTransition))("transitions")
+
+def expand_state_definition(source, loc, tokens):
+ indent = " " * (col(loc,source)-1)
+ statedef = []
+
+ # build list of states
+ states = set()
+ fromTo = {}
+ for tn in tokens.transitions:
+ states.add(tn.fromState)
+ states.add(tn.toState)
+ fromTo[tn.fromState] = tn.toState
+
+ # define base class for state classes
+ baseStateClass = tokens.name + "State"
+ statedef.extend([
+ "class %s(object):" % baseStateClass,
+ " def __str__(self):",
+ " return self.__class__.__name__",
+ " def next_state(self):",
+ " return self._next_state_class()" ])
+
+ # define all state classes
+ statedef.extend(
+ "class %s(%s): pass" % (s,baseStateClass)
+ for s in states )
+ statedef.extend(
+ "%s._next_state_class = %s" % (s,fromTo[s])
+ for s in states if s in fromTo )
+
+ return indent + ("\n"+indent).join(statedef)+"\n"
+
+stateMachine.setParseAction(expand_state_definition)
+
+def expand_named_state_definition(source,loc,tokens):
+ indent = " " * (col(loc,source)-1)
+ statedef = []
+ # build list of states and transitions
+ states = set()
+ transitions = set()
+
+ baseStateClass = tokens.name + "State"
+
+ fromTo = {}
+ for tn in tokens.transitions:
+ states.add(tn.fromState)
+ states.add(tn.toState)
+ transitions.add(tn.transition)
+ if tn.fromState in fromTo:
+ fromTo[tn.fromState][tn.transition] = tn.toState
+ else:
+ fromTo[tn.fromState] = {tn.transition:tn.toState}
+
+ # add entries for terminal states
+ for s in states:
+ if s not in fromTo:
+ fromTo[s] = {}
+
+ # define state transition class
+ statedef.extend([
+ "class %sTransition:" % baseStateClass,
+ " def __str__(self):",
+ " return self.transitionName",
+ ])
+ statedef.extend(
+ "%s = %sTransition()" % (tn,baseStateClass)
+ for tn in transitions)
+ statedef.extend("%s.transitionName = '%s'" % (tn,tn)
+ for tn in transitions)
+
+ # define base class for state classes
+ excmsg = "'" + tokens.name + \
+ '.%s does not support transition "%s"' \
+ "'% (self, tn)"
+ statedef.extend([
+ "class %s(object):" % baseStateClass,
+ " def __str__(self):",
+ " return self.__class__.__name__",
+ " def next_state(self,tn):",
+ " try:",
+ " return self.tnmap[tn]()",
+ " except KeyError:",
+ " raise Exception(%s)" % excmsg,
+ " def __getattr__(self,name):",
+ " raise Exception(%s)" % excmsg,
+ ])
+
+ # define all state classes
+ for s in states:
+ statedef.append("class %s(%s): pass" %
+ (s,baseStateClass))
+
+ # define state transition maps and transition methods
+ for s in states:
+ trns = list(fromTo[s].items())
+ statedef.append("%s.tnmap = {%s}" %
+ (s, ",".join("%s:%s" % tn for tn in trns)) )
+ statedef.extend([
+ "%s.%s = staticmethod(lambda : %s())" %
+ (s,tn_,to_)
+ for tn_,to_ in trns
+ ])
+
+ return indent + ("\n"+indent).join(statedef) + "\n"
+
+namedStateMachine.setParseAction(
+ expand_named_state_definition)
+
+#======================================================================
+# NEW STUFF - Matt Anderson, 2009-11-26
+#======================================================================
+class SuffixImporter(object):
+
+ """An importer designed using the mechanism defined in :pep:`302`. I read
+ the PEP, and also used Doug Hellmann's PyMOTW article `Modules and
+ Imports`_, as a pattern.
+
+ .. _`Modules and Imports`: http://www.doughellmann.com/PyMOTW/sys/imports.html
+
+ Define a subclass that specifies a :attr:`suffix` attribute, and
+ implements a :meth:`process_filedata` method. Then call the classmethod
+ :meth:`register` on your class to actually install it in the appropriate
+ places in :mod:`sys`. """
+
+ scheme = 'suffix'
+ suffix = None
+ path_entry = None
+
+ @classmethod
+ def trigger_url(cls):
+ if cls.suffix is None:
+ raise ValueError('%s.suffix is not set' % cls.__name__)
+ return 'suffix:%s' % cls.suffix
+
+ @classmethod
+ def register(cls):
+ sys.path_hooks.append(cls)
+ sys.path.append(cls.trigger_url())
+
+ def __init__(self, path_entry):
+ pr = urllib.parse.urlparse(str(path_entry))
+ if pr.scheme != self.scheme or pr.path != self.suffix:
+ raise ImportError()
+ self.path_entry = path_entry
+ self._found = {}
+
+ def checkpath_iter(self, fullname):
+ for dirpath in sys.path:
+ # if the value in sys.path_importer_cache is None, then this
+ # path *should* be imported by the builtin mechanism, and the
+ # entry is thus a path to a directory on the filesystem;
+ # if it's not None, then some other importer is in charge, and
+ # it probably isn't even a filesystem path
+ if sys.path_importer_cache.get(dirpath,False) is None:
+ checkpath = os.path.join(
+ dirpath,'%s.%s' % (fullname,self.suffix))
+ yield checkpath
+
+ def find_module(self, fullname, path=None):
+ for checkpath in self.checkpath_iter(fullname):
+ if os.path.isfile(checkpath):
+ self._found[fullname] = checkpath
+ return self
+ return None
+
+ def load_module(self, fullname):
+ assert fullname in self._found
+ if fullname in sys.modules:
+ module = sys.modules[fullname]
+ else:
+ sys.modules[fullname] = module = types.ModuleType(fullname)
+ data = None
+ f = open(self._found[fullname])
+ try:
+ data = f.read()
+ finally:
+ f.close()
+
+ module.__dict__.clear()
+ module.__file__ = self._found[fullname]
+ module.__name__ = fullname
+ module.__loader__ = self
+ self.process_filedata(module, data)
+ return module
+
+ def process_filedata(self, module, data):
+ pass
+
+class PystateImporter(SuffixImporter):
+ suffix = 'pystate'
+
+ def process_filedata(self, module, data):
+ # MATT-NOTE: re-worked :func:`get_state_machine`
+
+ # convert any statemachine expressions
+ stateMachineExpr = (stateMachine |
+ namedStateMachine).ignore(
+ pythonStyleComment)
+ generated_code = stateMachineExpr.transformString(data)
+
+ if DEBUG: print(generated_code)
+
+ # compile code object from generated code
+ # (strip trailing spaces and tabs, compile doesn't like
+ # dangling whitespace)
+ COMPILE_MODE = 'exec'
+
+ codeobj = compile(generated_code.rstrip(" \t"),
+ module.__file__,
+ COMPILE_MODE)
+
+ exec(codeobj, module.__dict__)
+
+PystateImporter.register()
diff --git a/src/examples/tagCapture.py b/src/examples/tagCapture.py
index c9077ff..e07d518 100644
--- a/src/examples/tagCapture.py
+++ b/src/examples/tagCapture.py
@@ -28,8 +28,8 @@ def matchingCloseTag(other):
for m in originalTextFor(anyOpenTag + SkipTo(matchingCloseTag(anyOpenTag),
include=True,
failOn=anyOpenTag) ).searchString(src):
- print m.dump()
+ print(m.dump())
for m in originalTextFor(anyOpenTag + SkipTo(matchingCloseTag(anyOpenTag),
include=True) ).searchString(src):
- print m.dump()
+ print(m.dump())
diff --git a/src/examples/urlExtractor.py b/src/examples/urlExtractor.py
index 2e5f7be..7c90bd7 100644
--- a/src/examples/urlExtractor.py
+++ b/src/examples/urlExtractor.py
@@ -2,7 +2,7 @@
# Copyright 2004, Paul McGuire
from pyparsing import Literal,Suppress,CharsNotIn,CaselessLiteral,\
Word,dblQuotedString,alphanums,SkipTo
-import urllib
+import urllib.request, urllib.parse, urllib.error
import pprint
# Define the pyparsing grammar for a URL, that is:
@@ -18,7 +18,7 @@ linkCloseTag = Literal("<") + "/" + CaselessLiteral("a") + ">"
link = linkOpenTag + SkipTo(linkCloseTag) + linkCloseTag.suppress()
# Go get some HTML with some links in it.
-serverListPage = urllib.urlopen( "http://www.yahoo.com" )
+serverListPage = urllib.request.urlopen( "http://www.yahoo.com" )
htmlText = serverListPage.read()
serverListPage.close()
@@ -26,7 +26,7 @@ serverListPage.close()
# match yields the tokens and start and end locations (for this application, we are
# not interested in the start and end values).
for toks,strt,end in link.scanString(htmlText):
- print toks.asList()
+ print(toks.asList())
# Rerun scanString, but this time create a dict of text:URL key-value pairs.
# Need to reverse the tokens returned by link, using a parse action.
diff --git a/src/examples/urlExtractorNew.py b/src/examples/urlExtractorNew.py
index 276e112..0569b6c 100644
--- a/src/examples/urlExtractorNew.py
+++ b/src/examples/urlExtractorNew.py
@@ -2,7 +2,7 @@
# Copyright 2004, Paul McGuire
from pyparsing import Literal,Suppress,CharsNotIn,CaselessLiteral,\
Word,dblQuotedString,alphanums,SkipTo,makeHTMLTags
-import urllib
+import urllib.request, urllib.parse, urllib.error
import pprint
# Define the pyparsing grammar for a URL, that is:
@@ -15,7 +15,7 @@ linkOpenTag,linkCloseTag = makeHTMLTags("a")
link = linkOpenTag + SkipTo(linkCloseTag).setResultsName("body") + linkCloseTag.suppress()
# Go get some HTML with some links in it.
-serverListPage = urllib.urlopen( "http://www.google.com" )
+serverListPage = urllib.request.urlopen( "http://www.google.com" )
htmlText = serverListPage.read()
serverListPage.close()
@@ -23,7 +23,7 @@ serverListPage.close()
# match yields the tokens and start and end locations (for this application, we are
# not interested in the start and end values).
for toks,strt,end in link.scanString(htmlText):
- print toks.startA.href,"->",toks.body
+ print(toks.startA.href,"->",toks.body)
# Create dictionary from list comprehension, assembled from each pair of tokens returned
# from a matched URL.
diff --git a/src/examples/verilogParse.py b/src/examples/verilogParse.py
index cc7772d..5eaf14f 100644
--- a/src/examples/verilogParse.py
+++ b/src/examples/verilogParse.py
@@ -93,7 +93,7 @@ if usePsyco:
import psyco
psyco.full()
except:
- print "failed to import psyco Python optimizer"
+ print("failed to import psyco Python optimizer")
else:
psycoOn = True
@@ -614,10 +614,10 @@ def test( strng ):
tokens = []
try:
tokens = Verilog_BNF().parseString( strng )
- except ParseException, err:
- print err.line
- print " "*(err.column-1) + "^"
- print err
+ except ParseException as err:
+ print(err.line)
+ print(" "*(err.column-1) + "^")
+ print(err)
return tokens
@@ -634,12 +634,12 @@ if 0:
else:
def main():
- print "Verilog parser test (V %s)" % __version__
- print " - using pyparsing version", pyparsing.__version__
- print " - using Python version", sys.version
- if packratOn: print " - using packrat parsing"
- if psycoOn: print " - using psyco runtime optimization"
- print
+ print("Verilog parser test (V %s)" % __version__)
+ print(" - using pyparsing version", pyparsing.__version__)
+ print(" - using Python version", sys.version)
+ if packratOn: print(" - using packrat parsing")
+ if psycoOn: print(" - using psyco runtime optimization")
+ print()
import os
import gc
@@ -652,7 +652,7 @@ else:
#~ fileDir = "verilog/new"
#~ fileDir = "verilog/new2"
#~ fileDir = "verilog/new3"
- allFiles = filter( lambda f : f.endswith(".v"), os.listdir(fileDir) )
+ allFiles = [f for f in os.listdir(fileDir) if f.endswith(".v")]
#~ allFiles = [ "list_path_delays_test.v" ]
#~ allFiles = [ "escapedIdent.v" ]
#~ allFiles = filter( lambda f : f.startswith("a") and f.endswith(".v"), os.listdir(fileDir) )
@@ -664,10 +664,10 @@ else:
for vfile in allFiles:
gc.collect()
fnam = fileDir + "/"+vfile
- infile = file(fnam)
+ infile = open(fnam)
filelines = infile.readlines()
infile.close()
- print fnam, len(filelines),
+ print(fnam, len(filelines), end=' ')
numlines += len(filelines)
teststr = "".join(filelines)
time1 = time.clock()
@@ -676,13 +676,13 @@ else:
elapsed = time2-time1
totalTime += elapsed
if ( len( tokens ) ):
- print "OK", elapsed
+ print("OK", elapsed)
#~ print "tokens="
#~ pp.pprint( tokens.asList() )
#~ print
ofnam = fileDir + "/parseOutput/" + vfile + ".parsed.txt"
- outfile = file(ofnam,"w")
+ outfile = open(ofnam,"w")
outfile.write( teststr )
outfile.write("\n")
outfile.write("\n")
@@ -690,18 +690,18 @@ else:
outfile.write("\n")
outfile.close()
else:
- print "failed", elapsed
+ print("failed", elapsed)
failCount += 1
for i,line in enumerate(filelines,1):
- print "%4d: %s" % (i,line.rstrip())
+ print("%4d: %s" % (i,line.rstrip()))
endTime = time.clock()
- print "Total parse time:", totalTime
- print "Total source lines:", numlines
- print "Average lines/sec:", ( "%.1f" % (float(numlines)/(totalTime+.05 ) ) )
+ print("Total parse time:", totalTime)
+ print("Total source lines:", numlines)
+ print("Average lines/sec:", ( "%.1f" % (float(numlines)/(totalTime+.05 ) ) ))
if failCount:
- print "FAIL - %d files failed to parse" % failCount
+ print("FAIL - %d files failed to parse" % failCount)
else:
- print "SUCCESS - all files parsed"
+ print("SUCCESS - all files parsed")
return 0
diff --git a/src/examples/withAttribute.py b/src/examples/withAttribute.py
index af7edf3..062c9ae 100644
--- a/src/examples/withAttribute.py
+++ b/src/examples/withAttribute.py
@@ -21,4 +21,4 @@ patt = tdS + fontS + NBSP + realNum("value") + NBSP + fontE + tdE
tdS.setParseAction( withAttribute(align="right",width="80") )
for s in patt.searchString(data):
- print s.value
+ print(s.value)
diff --git a/src/examples/wordsToNum.py b/src/examples/wordsToNum.py
index e40adb2..10bfbe2 100644
--- a/src/examples/wordsToNum.py
+++ b/src/examples/wordsToNum.py
@@ -5,6 +5,7 @@
#
from pyparsing import *
from operator import mul
+from functools import reduce
def makeLit(s,val):
ret = CaselessLiteral(s).setName(s)
@@ -79,30 +80,30 @@ numWords.ignore(CaselessLiteral("and"))
def test(s,expected):
try:
val = numWords.parseString(s)[0]
- except ParseException, pe:
- print "Parsing failed:"
- print s
- print "%s^" % (' '*(pe.col-1))
- print pe.msg
+ except ParseException as pe:
+ print("Parsing failed:")
+ print(s)
+ print("%s^" % (' '*(pe.col-1)))
+ print(pe.msg)
else:
- print "'%s' -> %d" % (s, val),
+ print("'%s' -> %d" % (s, val), end=' ')
if val == expected:
- print "CORRECT"
+ print("CORRECT")
else:
- print "***WRONG***, expected %d" % expected
+ print("***WRONG***, expected %d" % expected)
-test("one hundred twenty hundred", 120)
-test("one hundred and twennty", 120)
-test("one hundred and twenty", 120)
-test("one hundred and three", 103)
-test("one hundred twenty-three", 123)
-test("one hundred and twenty three", 123)
-test("one hundred twenty three million", 123000000)
-test("one hundred and twenty three million", 123000000)
-test("one hundred twenty three million and three", 123000003)
-test("fifteen hundred and sixty five", 1565)
-test("seventy-seven thousand eight hundred and nineteen", 77819)
-test("seven hundred seventy-seven thousand seven hundred and seventy-seven", 777777)
+test("one hundred twenty hundred", 120)
+test("one hundred and twennty", 120)
+test("one hundred and twenty", 120)
+test("one hundred and three", 103)
+test("one hundred twenty-three", 123)
+test("one hundred and twenty three", 123)
+test("one hundred twenty three million", 123000000)
+test("one hundred and twenty three million", 123000000)
+test("one hundred twenty three million and three", 123000003)
+test("fifteen hundred and sixty five", 1565)
+test("seventy-seven thousand eight hundred and nineteen", 77819)
+test("seven hundred seventy-seven thousand seven hundred and seventy-seven", 777777)
test("zero", 0)
test("forty two", 42)
test("fourty two", 42) \ No newline at end of file