summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorPaul Eggert <eggert@cs.ucla.edu>2020-08-24 13:12:51 -0700
committerPaul Eggert <eggert@cs.ucla.edu>2020-08-25 11:25:21 -0700
commiteb77572257bfa4e649c0c8852d2d0a58ad63eaa5 (patch)
treede34a3b16d789fd51d61a8c0e0036fe91ed4196c /src
parenta142bbd288a814822ba63194c690552f8c0ce425 (diff)
downloademacs-eb77572257bfa4e649c0c8852d2d0a58ad63eaa5.tar.gz
Fix replace-region-contents performance bug
Backport from master. * src/editfns.c (rbc_quitcounter): Remove; the quitcounter is now part of the context. (EXTRA_CONTEXT_FIELDS): Remove unused member early_abort_tests. Add jmp, quitcounter. (Freplace_buffer_contents): Use setjmp/longjmp to recover from a compareseq that runs too long. Omit unnecessary rarely_quit call. (buffer_chars_equal): Occasionally check for early abort and longjmp out if so (Bug#43016).
Diffstat (limited to 'src')
-rw-r--r--src/editfns.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/src/editfns.c b/src/editfns.c
index fe1feaf1e77..f660513b2a4 100644
--- a/src/editfns.c
+++ b/src/editfns.c
@@ -1913,9 +1913,6 @@ determines whether case is significant or ignored. */)
#undef EQUAL
#define USE_HEURISTIC
-/* Counter used to rarely_quit in replace-buffer-contents. */
-static unsigned short rbc_quitcounter;
-
#define XVECREF_YVECREF_EQUAL(ctx, xoff, yoff) \
buffer_chars_equal ((ctx), (xoff), (yoff))
@@ -1936,7 +1933,8 @@ static unsigned short rbc_quitcounter;
unsigned char *deletions; \
unsigned char *insertions; \
struct timespec time_limit; \
- unsigned int early_abort_tests;
+ sys_jmp_buf jmp; \
+ unsigned short quitcounter;
#define NOTE_DELETE(ctx, xoff) set_bit ((ctx)->deletions, (xoff))
#define NOTE_INSERT(ctx, yoff) set_bit ((ctx)->insertions, (yoff))
@@ -2065,14 +2063,17 @@ nil. */)
.heuristic = true,
.too_expensive = XFIXNUM (max_costs),
.time_limit = time_limit,
- .early_abort_tests = 0
};
memclear (ctx.deletions, del_bytes);
memclear (ctx.insertions, ins_bytes);
/* compareseq requires indices to be zero-based. We add BEGV back
later. */
- bool early_abort = compareseq (0, size_a, 0, size_b, false, &ctx);
+ bool early_abort;
+ if (! sys_setjmp (ctx.jmp))
+ early_abort = compareseq (0, size_a, 0, size_b, false, &ctx);
+ else
+ early_abort = true;
if (early_abort)
{
@@ -2082,8 +2083,6 @@ nil. */)
return Qnil;
}
- rbc_quitcounter = 0;
-
Fundo_boundary ();
bool modification_hooks_inhibited = false;
record_unwind_protect_excursion ();
@@ -2107,8 +2106,7 @@ nil. */)
walk backwards, we don’t have to keep the positions in sync. */
while (i >= 0 || j >= 0)
{
- /* Allow the user to quit if this gets too slow. */
- rarely_quit (++rbc_quitcounter);
+ rarely_quit (++ctx.quitcounter);
/* Check whether there is a change (insertion or deletion)
before the current position. */
@@ -2123,8 +2121,6 @@ nil. */)
while (j > 0 && bit_is_set (ctx.insertions, j - 1))
--j;
- rarely_quit (rbc_quitcounter++);
-
ptrdiff_t beg_a = min_a + i;
ptrdiff_t beg_b = min_b + j;
eassert (beg_a <= end_a);
@@ -2144,7 +2140,6 @@ nil. */)
}
SAFE_FREE_UNBIND_TO (count, Qnil);
- rbc_quitcounter = 0;
if (modification_hooks_inhibited)
{
@@ -2191,12 +2186,16 @@ static bool
buffer_chars_equal (struct context *ctx,
ptrdiff_t pos_a, ptrdiff_t pos_b)
{
+ if (!++ctx->quitcounter)
+ {
+ maybe_quit ();
+ if (compareseq_early_abort (ctx))
+ sys_longjmp (ctx->jmp, 1);
+ }
+
pos_a += ctx->beg_a;
pos_b += ctx->beg_b;
- /* Allow the user to escape out of a slow compareseq call. */
- rarely_quit (++rbc_quitcounter);
-
ptrdiff_t bpos_a =
ctx->a_unibyte ? pos_a : buf_charpos_to_bytepos (ctx->buffer_a, pos_a);
ptrdiff_t bpos_b =