summaryrefslogtreecommitdiff
path: root/rts/Linker.c
diff options
context:
space:
mode:
Diffstat (limited to 'rts/Linker.c')
-rw-r--r--rts/Linker.c61
1 files changed, 54 insertions, 7 deletions
diff --git a/rts/Linker.c b/rts/Linker.c
index 544e7675f0..7bd2e67278 100644
--- a/rts/Linker.c
+++ b/rts/Linker.c
@@ -1020,7 +1020,7 @@ mmap_again:
map_addr = mmap_32bit_base;
}
- const int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
+ const int prot = PROT_READ | PROT_WRITE;
IF_DEBUG(linker,
debugBelch("mmapForLinker: \tprotection %#0x\n", prot));
IF_DEBUG(linker,
@@ -1091,6 +1091,40 @@ mmap_again:
return result;
}
+
+/* Note [Memory protection in the linker]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * For many years the linker would simply map all of its memory
+ * with PROT_READ|PROT_WRITE|PROT_EXEC. However operating systems have been
+ * becoming increasingly reluctant to accept this practice (e.g. #17353,
+ * #12657) and for good reason: writable code is ripe for exploitation.
+ *
+ * Consequently mmapForLinker now maps its memory with PROT_READ|PROT_WRITE.
+ * After the linker has finished filling/relocating the mapping it must then
+ * call mmapForLinkerMarkExecutable on the sections of the mapping which
+ * contain executable code.
+ *
+ * Note that the m32 allocator handles protection of its allocations. For this
+ * reason the caller to m32_alloc() must tell the allocator whether the
+ * allocation needs to be executable. The caller must then ensure that they
+ * call m32_flush() after they are finished filling the region, which will
+ * cause the allocator to change the protection bits to PROT_READ|PROT_EXEC.
+ *
+ */
+
+/*
+ * Mark an portion of a mapping previously reserved by mmapForLinker
+ * as executable (but not writable).
+ */
+void mmapForLinkerMarkExecutable(void *start, size_t len)
+{
+ IF_DEBUG(linker,
+ debugBelch("mmapForLinkerMarkExecutable: protecting %" FMT_Word
+ " bytes starting at %p\n", (W_)len, start));
+ if (mprotect(start, len, PROT_READ|PROT_EXEC) == -1) {
+ barf("mmapForLinkerMarkExecutable: mprotect: %s\n", strerror(errno));
+ }
+}
#endif
/*
@@ -1184,8 +1218,7 @@ void freeObjectCode (ObjectCode *oc)
IF_DEBUG(zero_on_gc,
memset(oc->sections[i].start,
0x00, oc->sections[i].size));
- m32_free(oc->sections[i].start,
- oc->sections[i].size);
+ // Freed by m32_allocator_free
break;
#endif
case SECTION_MALLOC:
@@ -1215,7 +1248,7 @@ void freeObjectCode (ObjectCode *oc)
if (RTS_LINKER_USE_MMAP) {
if (!USE_CONTIGUOUS_MMAP && !RtsFlags.MiscFlags.linkerAlwaysPic &&
oc->symbol_extras != NULL) {
- m32_free(oc->symbol_extras, sizeof(SymbolExtra) * oc->n_symbol_extras);
+ // Freed by m32_allocator_free
}
}
else {
@@ -1230,7 +1263,11 @@ void freeObjectCode (ObjectCode *oc)
ocDeinit_ELF(oc);
#endif
- m32_allocator_free(oc->m32);
+#if RTS_LINKER_USE_MMAP == 1
+ m32_allocator_free(oc->rx_m32);
+ m32_allocator_free(oc->rw_m32);
+#endif
+
stgFree(oc->fileName);
stgFree(oc->archiveMemberName);
@@ -1310,7 +1347,8 @@ mkOc( pathchar *path, char *image, int imageSize,
oc->next = NULL;
#if RTS_LINKER_USE_MMAP
- oc->m32 = m32_allocator_new();
+ oc->rw_m32 = m32_allocator_new(false);
+ oc->rx_m32 = m32_allocator_new(true);
#endif
IF_DEBUG(linker, debugBelch("mkOc: done\n"));
@@ -1631,7 +1669,16 @@ int ocTryLoad (ObjectCode* oc) {
# endif
if (!r) { return r; }
- m32_allocator_flush(oc->m32);
+#if defined(NEED_SYMBOL_EXTRAS)
+ ocProtectExtras(oc);
+#endif
+
+ // We have finished loading and relocating; flush the m32 allocators to
+ // setup page protections.
+#if RTS_LINKER_USE_MMAP
+ m32_allocator_flush(oc->rx_m32);
+ m32_allocator_flush(oc->rw_m32);
+#endif
// run init/init_array/ctors/mod_init_func