summaryrefslogtreecommitdiff
path: root/Python/compile.c
diff options
context:
space:
mode:
authorT. Wouters <thomas@python.org>2019-09-12 07:05:33 -0700
committerGregory P. Smith <greg@krypto.org>2019-09-12 15:05:33 +0100
commit99b54d68172ad64ba3d0fdc0137f0df88c28ea2b (patch)
treec7fe3a8530b78060e81db3f7948545d9e076e8ed /Python/compile.c
parent355f3e1e5caf16198255df573a1f5e8b98b30105 (diff)
downloadcpython-git-99b54d68172ad64ba3d0fdc0137f0df88c28ea2b.tar.gz
Revert "Fix depth-first-search computation in compile.c (GH-16042)" (GH-16050)
This reverts commit 355f3e1e5caf16198255df573a1f5e8b98b30105. bpo-38135
Diffstat (limited to 'Python/compile.c')
-rw-r--r--Python/compile.c54
1 files changed, 30 insertions, 24 deletions
diff --git a/Python/compile.c b/Python/compile.c
index 4906ea8fac..edd8625cba 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -5373,7 +5373,7 @@ compiler_visit_slice(struct compiler *c, slice_ty s, expr_context_ty ctx)
/* End of the compiler section, beginning of the assembler section */
/* do depth-first search of basic block graph, starting with block.
- a_order records the block indices in order.
+ post records the block indices in post-order.
XXX must handle implicit jumps from one block to next
*/
@@ -5382,7 +5382,7 @@ struct assembler {
PyObject *a_bytecode; /* string containing bytecode */
int a_offset; /* offset into bytecode */
int a_nblocks; /* number of reachable blocks */
- basicblock **a_order; /* list of blocks in dfs order */
+ basicblock **a_postorder; /* list of blocks in dfs postorder */
PyObject *a_lnotab; /* string containing lnotab */
int a_lnotab_off; /* offset into lnotab */
int a_lineno; /* last lineno of emitted instruction */
@@ -5390,21 +5390,28 @@ struct assembler {
};
static void
-dfs(struct compiler *c, basicblock *entry, struct assembler *a)
+dfs(struct compiler *c, basicblock *b, struct assembler *a, int end)
{
- /* Avoid excessive recursion by following 'next' links to the
- * end of the chain before handling any branches */
- for (basicblock *b = entry; b; b = b->b_next) {
+ int i, j;
+
+ /* Get rid of recursion for normal control flow.
+ Since the number of blocks is limited, unused space in a_postorder
+ (from a_nblocks to end) can be used as a stack for still not ordered
+ blocks. */
+ for (j = end; b && !b->b_seen; b = b->b_next) {
b->b_seen = 1;
- a->a_order[a->a_nblocks++] = b;
+ assert(a->a_nblocks < j);
+ a->a_postorder[--j] = b;
}
- for (basicblock *b = entry; b; b = b->b_next) {
- for (int i = 0; i < b->b_iused; i++) {
- basicblock *target = b->b_instr[i].i_target;
- if (target && !target->b_seen) {
- dfs(c, target, a);
- }
+ while (j < end) {
+ b = a->a_postorder[j++];
+ for (i = 0; i < b->b_iused; i++) {
+ struct instr *instr = &b->b_instr[i];
+ if (instr->i_jrel || instr->i_jabs)
+ dfs(c, instr->i_target, a, j);
}
+ assert(a->a_nblocks < j);
+ a->a_postorder[a->a_nblocks++] = b;
}
}
@@ -5510,9 +5517,9 @@ assemble_init(struct assembler *a, int nblocks, int firstlineno)
PyErr_NoMemory();
return 0;
}
- a->a_order = (basicblock **)PyObject_Malloc(
+ a->a_postorder = (basicblock **)PyObject_Malloc(
sizeof(basicblock *) * nblocks);
- if (!a->a_order) {
+ if (!a->a_postorder) {
PyErr_NoMemory();
return 0;
}
@@ -5524,8 +5531,8 @@ assemble_free(struct assembler *a)
{
Py_XDECREF(a->a_bytecode);
Py_XDECREF(a->a_lnotab);
- if (a->a_order)
- PyObject_Free(a->a_order);
+ if (a->a_postorder)
+ PyObject_Free(a->a_postorder);
}
static int
@@ -5686,8 +5693,8 @@ assemble_jump_offsets(struct assembler *a, struct compiler *c)
Replace block pointer with position in bytecode. */
do {
totsize = 0;
- for (i = 0; i < a->a_nblocks; i++) {
- b = a->a_order[i];
+ for (i = a->a_nblocks - 1; i >= 0; i--) {
+ b = a->a_postorder[i];
bsize = blocksize(b);
b->b_offset = totsize;
totsize += bsize;
@@ -5991,15 +5998,14 @@ assemble(struct compiler *c, int addNone)
}
if (!assemble_init(&a, nblocks, c->u->u_firstlineno))
goto error;
- dfs(c, entryblock, &a);
- assert(a.a_nblocks <= nblocks);
+ dfs(c, entryblock, &a, nblocks);
/* Can't modify the bytecode after computing jump offsets. */
assemble_jump_offsets(&a, c);
- /* Emit code in order from dfs. */
- for (i = 0; i < a.a_nblocks; i++) {
- b = a.a_order[i];
+ /* Emit code in reverse postorder from dfs. */
+ for (i = a.a_nblocks - 1; i >= 0; i--) {
+ b = a.a_postorder[i];
for (j = 0; j < b->b_iused; j++)
if (!assemble_emit(&a, &b->b_instr[j]))
goto error;