summaryrefslogtreecommitdiff
path: root/ext
diff options
context:
space:
mode:
authorDavid Carlier <devnexen@gmail.com>2018-11-28 10:29:34 +0000
committerNikita Popov <nikita.ppv@gmail.com>2019-01-30 12:01:44 +0100
commit6a8260a0ac09792bf07138efbff85e7730a9750c (patch)
tree4ff463cc0da54a07be89c87db2388d96a8d20b5d /ext
parenta45361949934c580f2798c653169c406cf415768 (diff)
downloadphp-git-6a8260a0ac09792bf07138efbff85e7730a9750c.tar.gz
opcache/FreeBSD huge code page pragma support
Sort of following up on super pages support earlier, here we also detect page mappings possibly eligible to go to super pages.
Diffstat (limited to 'ext')
-rw-r--r--ext/opcache/ZendAccelerator.c40
-rw-r--r--ext/opcache/shared_alloc_mmap.c9
2 files changed, 48 insertions, 1 deletions
diff --git a/ext/opcache/ZendAccelerator.c b/ext/opcache/ZendAccelerator.c
index 5e6092f471..4b73c6a119 100644
--- a/ext/opcache/ZendAccelerator.c
+++ b/ext/opcache/ZendAccelerator.c
@@ -2624,6 +2624,12 @@ static void accel_gen_system_id(void)
# ifndef MAP_FAILED
# define MAP_FAILED ((void*)-1)
# endif
+# ifdef MAP_ALIGNED_SUPER
+# include <sys/types.h>
+# include <sys/sysctl.h>
+# include <sys/user.h>
+# define MAP_HUGETLB MAP_ALIGNED_SUPER
+# endif
# endif
# if defined(MAP_HUGETLB) || defined(MADV_HUGEPAGE)
@@ -2689,6 +2695,7 @@ static int accel_remap_huge_pages(void *start, size_t size, const char *name, si
static void accel_move_code_to_huge_pages(void)
{
+#if defined(__linux__)
FILE *f;
long unsigned int huge_page_size = 2 * 1024 * 1024;
@@ -2710,6 +2717,39 @@ static void accel_move_code_to_huge_pages(void)
}
fclose(f);
}
+#elif defined(__FreeBSD__)
+ size_t s = 0;
+ int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
+ long unsigned int huge_page_size = 2 * 1024 * 1024;
+ if(sysctl(mib, 4, NULL, &s, NULL, 0) == 0) {
+ void *addr = mmap(NULL, s * sizeof (struct kinfo_vmentry), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+ if (addr != MAP_FAILED) {
+ s = s * 4 / 3;
+ if (sysctl(mib, 4, addr, &s, NULL, 0) == 0) {
+ uintptr_t start = (uintptr_t)addr;
+ uintptr_t end = start + s;
+ while (start < end) {
+ struct kinfo_vmentry *entry = (struct kinfo_vmentry *)start;
+ size_t sz = entry->kve_structsize;
+ if (sz == 0) {
+ break;
+ }
+ int permflags = entry->kve_protection;
+ if ((permflags & KVME_PROT_READ) && !(permflags & KVME_PROT_WRITE) &&
+ (permflags & KVME_PROT_EXEC) && entry->kve_path[0] != '\0') {
+ long unsigned int seg_start = ZEND_MM_ALIGNED_SIZE_EX(start, huge_page_size);
+ long unsigned int seg_end = (end & ~(huge_page_size-1L));
+ if (seg_end > seg_start) {
+ zend_accel_error(ACCEL_LOG_DEBUG, "remap to huge page %lx-%lx %s \n", seg_start, seg_end, entry->kve_path);
+ accel_remap_huge_pages((void*)seg_start, seg_end - seg_start, entry->kve_path, entry->kve_offset + seg_start - start);
+ }
+ }
+ start += sz;
+ }
+ }
+ }
+ }
+#endif
}
# else
static void accel_move_code_to_huge_pages(void)
diff --git a/ext/opcache/shared_alloc_mmap.c b/ext/opcache/shared_alloc_mmap.c
index 121a2e4a40..6d72e5b4ab 100644
--- a/ext/opcache/shared_alloc_mmap.c
+++ b/ext/opcache/shared_alloc_mmap.c
@@ -32,6 +32,9 @@
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
# define MAP_ANONYMOUS MAP_ANON
#endif
+#if defined(MAP_ALIGNED_SUPER)
+# define MAP_HUGETLB MAP_ALIGNED_SUPER
+#endif
static int create_segments(size_t requested_size, zend_shared_segment ***shared_segments_p, int *shared_segments_count, char **error_in)
{
@@ -48,10 +51,14 @@ static int create_segments(size_t requested_size, zend_shared_segment ***shared_
#ifdef MAP_HUGETLB
/* Try to allocate huge pages first to reduce dTLB misses.
- * OS has to be configured properly
+ * OSes has to be configured properly
+ * on Linux
* (e.g. https://wiki.debian.org/Hugepages#Enabling_HugeTlbPage)
* You may verify huge page usage with the following command:
* `grep "Huge" /proc/meminfo`
+ * on FreeBSD
+ * sysctl vm.pmap.pg_ps_enabled entry
+ * (boot time config only, but enabled by default on most arches).
*/
shared_segment->p = mmap(0, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS|MAP_HUGETLB, -1, 0);
if (shared_segment->p != MAP_FAILED) {