summaryrefslogtreecommitdiff
path: root/Parser/pgen/token.py
diff options
context:
space:
mode:
Diffstat (limited to 'Parser/pgen/token.py')
-rw-r--r--Parser/pgen/token.py38
1 files changed, 0 insertions, 38 deletions
diff --git a/Parser/pgen/token.py b/Parser/pgen/token.py
deleted file mode 100644
index 2cff62ce3b..0000000000
--- a/Parser/pgen/token.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import itertools
-
-
-def generate_tokens(tokens):
- numbers = itertools.count(0)
- for line in tokens:
- line = line.strip()
-
- if not line or line.startswith("#"):
- continue
-
- name = line.split()[0]
- yield (name, next(numbers))
-
- yield ("N_TOKENS", next(numbers))
- yield ("NT_OFFSET", 256)
-
-
-def generate_opmap(tokens):
- for line in tokens:
- line = line.strip()
-
- if not line or line.startswith("#"):
- continue
-
- pieces = line.split()
-
- if len(pieces) != 2:
- continue
-
- name, op = pieces
- yield (op.strip("'"), name)
-
- # Yield independently <>. This is needed so it does not collide
- # with the token generation in "generate_tokens" because if this
- # symbol is included in Grammar/Tokens, it will collide with !=
- # as it has the same name (NOTEQUAL).
- yield ("<>", "NOTEQUAL")