From cd16dd03737c30608b78d235b17b3ab935ed18db Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 25 May 2022 15:52:20 -0400 Subject: mm/x86: remove dead code for hugetlbpage.c It seems to exist since the old times and never used once. Remove them. Link: https://lkml.kernel.org/r/20220525195220.10241-1-peterx@redhat.com Signed-off-by: Peter Xu Acked-by: Muchun Song Signed-off-by: Andrew Morton --- arch/x86/mm/hugetlbpage.c | 39 --------------------------------------- 1 file changed, 39 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index a0d023cb4292..509408da0da1 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -19,44 +19,6 @@ #include #include -#if 0 /* This is just for testing */ -struct page * -follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) -{ - unsigned long start = address; - int length = 1; - int nr; - struct page *page; - struct vm_area_struct *vma; - - vma = find_vma(mm, addr); - if (!vma || !is_vm_hugetlb_page(vma)) - return ERR_PTR(-EINVAL); - - pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); - - /* hugetlb should be locked, and hence, prefaulted */ - WARN_ON(!pte || pte_none(*pte)); - - page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; - - WARN_ON(!PageHead(page)); - - return page; -} - -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} - -#else - /* * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. @@ -72,7 +34,6 @@ int pud_huge(pud_t pud) { return !!(pud_val(pud) & _PAGE_PSE); } -#endif #ifdef CONFIG_HUGETLB_PAGE static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, -- cgit v1.2.1 From d92725256b4f22d084b813b37ddc394da79aacab Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 30 May 2022 14:34:50 -0400 Subject: mm: avoid unnecessary page fault retires on shared memory types I observed that for each of the shared file-backed page faults, we're very likely to retry one more time for the 1st write fault upon no page. It's because we'll need to release the mmap lock for dirty rate limit purpose with balance_dirty_pages_ratelimited() (in fault_dirty_shared_page()). Then after that throttling we return VM_FAULT_RETRY. We did that probably because VM_FAULT_RETRY is the only way we can return to the fault handler at that time telling it we've released the mmap lock. However that's not ideal because it's very likely the fault does not need to be retried at all since the pgtable was well installed before the throttling, so the next continuous fault (including taking mmap read lock, walk the pgtable, etc.) could be in most cases unnecessary. It's not only slowing down page faults for shared file-backed, but also add more mmap lock contention which is in most cases not needed at all. To observe this, one could try to write to some shmem page and look at "pgfault" value in /proc/vmstat, then we should expect 2 counts for each shmem write simply because we retried, and vm event "pgfault" will capture that. To make it more efficient, add a new VM_FAULT_COMPLETED return code just to show that we've completed the whole fault and released the lock. It's also a hint that we should very possibly not need another fault immediately on this page because we've just completed it. This patch provides a ~12% perf boost on my aarch64 test VM with a simple program sequentially dirtying 400MB shmem file being mmap()ed and these are the time it needs: Before: 650.980 ms (+-1.94%) After: 569.396 ms (+-1.38%) I believe it could help more than that. We need some special care on GUP and the s390 pgfault handler (for gmap code before returning from pgfault), the rest changes in the page fault handlers should be relatively straightforward. Another thing to mention is that mm_account_fault() does take this new fault as a generic fault to be accounted, unlike VM_FAULT_RETRY. I explicitly didn't touch hmm_vma_fault() and break_ksm() because they do not handle VM_FAULT_RETRY even with existing code, so I'm literally keeping them as-is. Link: https://lkml.kernel.org/r/20220530183450.42886-1-peterx@redhat.com Signed-off-by: Peter Xu Acked-by: Geert Uytterhoeven Acked-by: Peter Zijlstra (Intel) Acked-by: Johannes Weiner Acked-by: Vineet Gupta Acked-by: Guo Ren Acked-by: Max Filippov Acked-by: Christian Borntraeger Acked-by: Michael Ellerman (powerpc) Acked-by: Catalin Marinas Reviewed-by: Alistair Popple Reviewed-by: Ingo Molnar Acked-by: Russell King (Oracle) [arm part] Acked-by: Heiko Carstens Cc: Vasily Gorbik Cc: Stafford Horne Cc: David S. Miller Cc: Johannes Berg Cc: Brian Cain Cc: Richard Henderson Cc: Richard Weinberger Cc: Benjamin Herrenschmidt Cc: Thomas Gleixner Cc: Janosch Frank Cc: Albert Ou Cc: Anton Ivanov Cc: Dave Hansen Cc: Borislav Petkov Cc: Sven Schnelle Cc: Andrea Arcangeli Cc: James Bottomley Cc: Al Viro Cc: Alexander Gordeev Cc: Jonas Bonn Cc: Will Deacon Cc: Vlastimil Babka Cc: Michal Simek Cc: Matt Turner Cc: Paul Mackerras Cc: David Hildenbrand Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Stefan Kristiansson Cc: Paul Walmsley Cc: Ivan Kokshaysky Cc: Chris Zankel Cc: Hugh Dickins Cc: Dinh Nguyen Cc: Rich Felker Cc: H. Peter Anvin Cc: Andy Lutomirski Cc: Thomas Bogendoerfer Cc: Helge Deller Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/alpha/mm/fault.c | 4 ++++ arch/arc/mm/fault.c | 4 ++++ arch/arm/mm/fault.c | 4 ++++ arch/arm64/mm/fault.c | 4 ++++ arch/csky/mm/fault.c | 4 ++++ arch/hexagon/mm/vm_fault.c | 4 ++++ arch/ia64/mm/fault.c | 4 ++++ arch/m68k/mm/fault.c | 4 ++++ arch/microblaze/mm/fault.c | 4 ++++ arch/mips/mm/fault.c | 4 ++++ arch/nios2/mm/fault.c | 4 ++++ arch/openrisc/mm/fault.c | 4 ++++ arch/parisc/mm/fault.c | 4 ++++ arch/powerpc/mm/copro_fault.c | 5 +++++ arch/powerpc/mm/fault.c | 5 +++++ arch/riscv/mm/fault.c | 4 ++++ arch/s390/mm/fault.c | 12 ++++++++++++ arch/sh/mm/fault.c | 4 ++++ arch/sparc/mm/fault_32.c | 4 ++++ arch/sparc/mm/fault_64.c | 5 +++++ arch/um/kernel/trap.c | 4 ++++ arch/x86/mm/fault.c | 4 ++++ arch/xtensa/mm/fault.c | 4 ++++ 23 files changed, 103 insertions(+) (limited to 'arch') diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index ec20c1004abf..ef427a6bdd1a 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -155,6 +155,10 @@ retry: if (fault_signal_pending(fault, regs)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index dad27e4d69ff..5ca59a482632 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -146,6 +146,10 @@ retry: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + /* * Fault retry nuances, mmap_lock already relinquished by core mm */ diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a062e07516dd..46cccd6bf705 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -322,6 +322,10 @@ retry: return 0; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return 0; + if (!(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c5e11768e5c1..de166cdeb89a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -608,6 +608,10 @@ retry: return 0; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return 0; + if (fault & VM_FAULT_RETRY) { mm_flags |= FAULT_FLAG_TRIED; goto retry; diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 7215a46b6b8e..e15f736cca4b 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -285,6 +285,10 @@ good_area: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 4fac4b9eb316..f73c7cbfe326 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -96,6 +96,10 @@ good_area: if (fault_signal_pending(fault, regs)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + /* The most common case -- we are done. */ if (likely(!(fault & VM_FAULT_ERROR))) { if (fault & VM_FAULT_RETRY) { diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 07379d1a227f..ef78c2d66cdd 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -139,6 +139,10 @@ retry: if (fault_signal_pending(fault, regs)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { /* * We ran out of memory, or some other thing happened diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 71aa9f6315dc..4d2837eb3e2a 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -141,6 +141,10 @@ good_area: if (fault_signal_pending(fault, regs)) return 0; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return 0; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index a9626e6a68af..5c40c3ebe52f 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -222,6 +222,10 @@ good_area: if (fault_signal_pending(fault, regs)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index b08bc556d30d..a27045f5a556 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -162,6 +162,10 @@ good_area: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index a32f14cd72f2..edaca0a6c1c1 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -139,6 +139,10 @@ good_area: if (fault_signal_pending(fault, regs)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 53b760af3bb7..b4762d66e9ef 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -165,6 +165,10 @@ good_area: if (fault_signal_pending(fault, regs)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 84bc437be5cd..9ad80d4d3389 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -311,6 +311,10 @@ good_area: if (fault_signal_pending(fault, regs)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { /* * We hit a shared mapping outside of the file, or some diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index c1cb21a00884..7c507fb48182 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -65,6 +65,11 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, ret = 0; *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL); + + /* The fault is fully completed (including releasing mmap lock) */ + if (*flt & VM_FAULT_COMPLETED) + return 0; + if (unlikely(*flt & VM_FAULT_ERROR)) { if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d53fed4eccbd..014005428687 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -511,6 +511,10 @@ retry: if (fault_signal_pending(fault, regs)) return user_mode(regs) ? 0 : SIGBUS; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + goto out; + /* * Handle the retry right now, the mmap_lock has been released in that * case. @@ -525,6 +529,7 @@ retry: if (unlikely(fault & VM_FAULT_ERROR)) return mm_fault_error(regs, address, fault); +out: /* * Major/minor page fault accounting. */ diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 40694f0cab9e..f2fbd1400b7c 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -326,6 +326,10 @@ good_area: if (fault_signal_pending(fault, regs)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_RETRY)) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index e173b6187ad5..973dcd05c293 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -433,6 +433,17 @@ retry: goto out_up; goto out; } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) { + if (gmap) { + mmap_read_lock(mm); + goto out_gmap; + } + fault = 0; + goto out; + } + if (unlikely(fault & VM_FAULT_ERROR)) goto out_up; @@ -452,6 +463,7 @@ retry: mmap_read_lock(mm); goto retry; } +out_gmap: if (IS_ENABLED(CONFIG_PGSTE) && gmap) { address = __gmap_link(gmap, current->thread.gmap_addr, address); diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index e175667b1363..acd2f5e50bfc 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -485,6 +485,10 @@ good_area: if (mm_fault_error(regs, error_code, address, fault)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index ad569d9bd124..91259f291c54 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -190,6 +190,10 @@ good_area: if (fault_signal_pending(fault, regs)) return; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 253e07043298..4acc12eafbf5 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -427,6 +427,10 @@ good_area: if (fault_signal_pending(fault, regs)) goto exit_exception; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + goto lock_released; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -449,6 +453,7 @@ good_area: } mmap_read_unlock(mm); +lock_released: mm_rss = get_mm_rss(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE)); diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index d1d5d0be0308..d3ce21c4ca32 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -76,6 +76,10 @@ good_area: if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto out_nosemaphore; + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return 0; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) { goto out_of_memory; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index fad8faa29d04..fe10c6d76bac 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1408,6 +1408,10 @@ good_area: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + /* * If we need to retry the mmap_lock has already been released, * and if there is a fatal signal pending there is no guarantee diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 16f0a5ff5799..8c781b05c0bd 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -172,6 +172,10 @@ good_area: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; -- cgit v1.2.1 From ee65728e103bb7dd99d8604bf6c7aa89c7d7e446 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 27 Jun 2022 09:00:26 +0300 Subject: docs: rename Documentation/vm to Documentation/mm so it will be consistent with code mm directory and with Documentation/admin-guide/mm and won't be confused with virtual machines. Signed-off-by: Mike Rapoport Suggested-by: Matthew Wilcox Tested-by: Ira Weiny Acked-by: Jonathan Corbet Acked-by: Wu XiangCheng --- arch/loongarch/Kconfig | 2 +- arch/powerpc/include/asm/book3s/64/pgtable.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 1920d52653b4..db2838cf8c02 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -410,7 +410,7 @@ config ARCH_SPARSEMEM_ENABLE Say Y to support efficient handling of sparse physical memory, for architectures which are either NUMA (Non-Uniform Memory Access) or have huge holes in the physical address space for other reasons. - See for more. + See for more. config ARCH_ENABLE_THP_MIGRATION def_bool y diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index cb9d5fd39d7f..392ff48f77df 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1273,7 +1273,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, * should return true. * We should not call this on a hugetlb entry. We should check for HugeTLB * entry using vma->vm_flags - * The page table walk rule is explained in Documentation/vm/transhuge.rst + * The page table walk rule is explained in Documentation/mm/transhuge.rst */ static inline int pmd_trans_huge(pmd_t pmd) { -- cgit v1.2.1 From e33c267ab70de4249d22d7eab1cc7d68a889bac2 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Tue, 31 May 2022 20:22:24 -0700 Subject: mm: shrinkers: provide shrinkers with names Currently shrinkers are anonymous objects. For debugging purposes they can be identified by count/scan function names, but it's not always useful: e.g. for superblock's shrinkers it's nice to have at least an idea of to which superblock the shrinker belongs. This commit adds names to shrinkers. register_shrinker() and prealloc_shrinker() functions are extended to take a format and arguments to master a name. In some cases it's not possible to determine a good name at the time when a shrinker is allocated. For such cases shrinker_debugfs_rename() is provided. The expected format is: -[:]- For some shrinkers an instance can be encoded as (MAJOR:MINOR) pair. After this change the shrinker debugfs directory looks like: $ cd /sys/kernel/debug/shrinker/ $ ls dquota-cache-16 sb-devpts-28 sb-proc-47 sb-tmpfs-42 mm-shadow-18 sb-devtmpfs-5 sb-proc-48 sb-tmpfs-43 mm-zspool:zram0-34 sb-hugetlbfs-17 sb-pstore-31 sb-tmpfs-44 rcu-kfree-0 sb-hugetlbfs-33 sb-rootfs-2 sb-tmpfs-49 sb-aio-20 sb-iomem-12 sb-securityfs-6 sb-tracefs-13 sb-anon_inodefs-15 sb-mqueue-21 sb-selinuxfs-22 sb-xfs:vda1-36 sb-bdev-3 sb-nsfs-4 sb-sockfs-8 sb-zsmalloc-19 sb-bpf-32 sb-pipefs-14 sb-sysfs-26 thp-deferred_split-10 sb-btrfs:vda2-24 sb-proc-25 sb-tmpfs-1 thp-zero-9 sb-cgroup2-30 sb-proc-39 sb-tmpfs-27 xfs-buf:vda1-37 sb-configfs-23 sb-proc-41 sb-tmpfs-29 xfs-inodegc:vda1-38 sb-dax-11 sb-proc-45 sb-tmpfs-35 sb-debugfs-7 sb-proc-46 sb-tmpfs-40 [roman.gushchin@linux.dev: fix build warnings] Link: https://lkml.kernel.org/r/Yr+ZTnLb9lJk6fJO@castle Reported-by: kernel test robot Link: https://lkml.kernel.org/r/20220601032227.4076670-4-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin Cc: Christophe JAILLET Cc: Dave Chinner Cc: Hillf Danton Cc: Kent Overstreet Cc: Muchun Song Signed-off-by: Andrew Morton --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 17252f39bd7c..797d3286ecc1 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6317,7 +6317,7 @@ int kvm_mmu_vendor_module_init(void) if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) goto out; - ret = register_shrinker(&mmu_shrinker); + ret = register_shrinker(&mmu_shrinker, "x86-mmu"); if (ret) goto out; -- cgit v1.2.1 From 18f3962953e40401b7ed98e8524167282c3e626e Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Sun, 26 Jun 2022 22:57:17 +0800 Subject: mm: hugetlb: kill set_huge_swap_pte_at() Commit e5251fd43007 ("mm/hugetlb: introduce set_huge_swap_pte_at() helper") add set_huge_swap_pte_at() to handle swap entries on architectures that support hugepages consisting of contiguous ptes. And currently the set_huge_swap_pte_at() is only overridden by arm64. set_huge_swap_pte_at() provide a sz parameter to help determine the number of entries to be updated. But in fact, all hugetlb swap entries contain pfn information, so we can find the corresponding folio through the pfn recorded in the swap entry, then the folio_size() is the number of entries that need to be updated. And considering that users will easily cause bugs by ignoring the difference between set_huge_swap_pte_at() and set_huge_pte_at(). Let's handle swap entries in set_huge_pte_at() and remove the set_huge_swap_pte_at(), then we can call set_huge_pte_at() anywhere, which simplifies our coding. Link: https://lkml.kernel.org/r/20220626145717.53572-1-zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Acked-by: Muchun Song Cc: Mike Kravetz Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arm64/include/asm/hugetlb.h | 3 --- arch/arm64/mm/hugetlbpage.c | 34 +++++++++++++++++----------------- 2 files changed, 17 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 1fd2846dbefe..d20f5da2d76f 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -46,9 +46,6 @@ extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_GET extern pte_t huge_ptep_get(pte_t *ptep); -extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, unsigned long sz); -#define set_huge_swap_pte_at set_huge_swap_pte_at void __init arm64_hugetlb_cma_reserve(void); diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index e2a5ec9fdc0d..3be8f25aa5be 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -238,6 +238,13 @@ static void clear_flush(struct mm_struct *mm, flush_tlb_range(&vma, saddr, addr); } +static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry) +{ + VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry)); + + return page_folio(pfn_to_page(swp_offset(entry))); +} + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { @@ -247,11 +254,16 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, unsigned long pfn, dpfn; pgprot_t hugeprot; - /* - * Code needs to be expanded to handle huge swap and migration - * entries. Needed for HUGETLB and MEMORY_FAILURE. - */ - WARN_ON(!pte_present(pte)); + if (!pte_present(pte)) { + struct folio *folio; + + folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte)); + ncontig = num_contig_ptes(folio_size(folio), &pgsize); + + for (i = 0; i < ncontig; i++, ptep++) + set_pte_at(mm, addr, ptep, pte); + return; + } if (!pte_cont(pte)) { set_pte_at(mm, addr, ptep, pte); @@ -269,18 +281,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); } -void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, unsigned long sz) -{ - int i, ncontig; - size_t pgsize; - - ncontig = num_contig_ptes(sz, &pgsize); - - for (i = 0; i < ncontig; i++, ptep++) - set_pte(ptep, pte); -} - pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { -- cgit v1.2.1 From 1bcdb769f9e0ad7a17891772e6b5414a2945714f Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 21 Jun 2022 16:56:18 -0700 Subject: arm64/hugetlb: implement arm64 specific hugetlb_mask_last_page The HugeTLB address ranges are linearly scanned during fork, unmap and remap operations, and the linear scan can skip to the end of range mapped by the page table page if hitting a non-present entry, which can help to speed linear scanning of the HugeTLB address ranges. So hugetlb_mask_last_page() is introduced to help to update the address in the loop of HugeTLB linear scanning with getting the last huge page mapped by the associated page table page[1], when a non-present entry is encountered. Considering ARM64 specific cont-pte/pmd size HugeTLB, this patch implemented an ARM64 specific hugetlb_mask_last_page() to help this case. [1] https://lore.kernel.org/linux-mm/20220527225849.284839-1-mike.kravetz@oracle.com/ [baolin.wang@linux.alibaba.com: fix build] Link: https://lkml.kernel.org/r/a14e7b39-6a8a-4609-b4a1-84ac574f5c96@linux.alibaba.com Link: https://lkml.kernel.org/r/20220621235620.291305-3-mike.kravetz@oracle.com Signed-off-by: Baolin Wang Signed-off-by: Mike Kravetz Acked-by: Muchun Song Cc: "Aneesh Kumar K.V" Cc: Anshuman Khandual Cc: Catalin Marinas Cc: Christian Borntraeger Cc: David Hildenbrand Cc: James Houghton Cc: kernel test robot Cc: Michal Hocko Cc: Mina Almasry Cc: Naoya Horiguchi Cc: Paul Walmsley Cc: Peter Xu Cc: Rolf Eike Beer Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arm64/mm/hugetlbpage.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'arch') diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 3be8f25aa5be..7430060cb0d6 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -368,6 +368,28 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return NULL; } +unsigned long hugetlb_mask_last_page(struct hstate *h) +{ + unsigned long hp_size = huge_page_size(h); + + switch (hp_size) { +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + return PGDIR_SIZE - PUD_SIZE; +#endif + case CONT_PMD_SIZE: + return PUD_SIZE - CONT_PMD_SIZE; + case PMD_SIZE: + return PUD_SIZE - PMD_SIZE; + case CONT_PTE_SIZE: + return PMD_SIZE - CONT_PTE_SIZE; + default: + break; + } + + return 0UL; +} + pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) { size_t pagesize = 1UL << shift; -- cgit v1.2.1 From 6eac1eaf2105dd8a2daf3b47634c24fc956bc77a Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:37 +0530 Subject: powerpc/mm: move protection_map[] inside the platform This moves protection_map[] inside the platform and while here, also enable ARCH_HAS_VM_GET_PAGE_PROT on 32 bit and nohash 64 (aka book3e/64) platforms via DECLARE_VM_GET_PAGE_PROT. Link: https://lkml.kernel.org/r/20220711070600.2378316-4-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Christophe Leroy Cc: Michael Ellerman Cc: Paul Mackerras Cc: Nicholas Piggin Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michal Simek Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/pgtable.h | 20 +------------------- arch/powerpc/mm/pgtable.c | 24 ++++++++++++++++++++++++ 3 files changed, 26 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c2ce2e60c8f0..1035d172c7dd 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -140,7 +140,7 @@ config PPC select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT if PPC_BOOK3S_64 + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index d564d0ecd4cd..33f4bf8d22b0 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -20,25 +20,6 @@ struct mm_struct; #include #endif /* !CONFIG_PPC_BOOK3S */ -/* Note due to the way vm flags are laid out, the bits are XWR */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_X -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY_X -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_X -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED_X -#define __S111 PAGE_SHARED_X - #ifndef __ASSEMBLY__ #ifndef MAX_PTRS_PER_PGD @@ -79,6 +60,7 @@ extern void paging_init(void); void poking_init(void); extern unsigned long ioremap_bot; +extern const pgprot_t protection_map[16]; /* * kern_addr_valid is intended to indicate whether an address is a valid diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index e6166b71d36d..cb2dcdb18f8e 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -472,3 +472,27 @@ out: return ret_pte; } EXPORT_SYMBOL_GPL(__find_linux_pte); + +/* Note due to the way vm flags are laid out, the bits are XWR */ +const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY_X, + [VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_EXEC | VM_WRITE] = PAGE_COPY_X, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X +}; + +#ifndef CONFIG_PPC_BOOK3S_64 +DECLARE_VM_GET_PAGE_PROT +#endif -- cgit v1.2.1 From 25740d31ee5b6e99ab674eaa3ecbbe3f8a6d3b8c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:38 +0530 Subject: sparc/mm: move protection_map[] inside the platform This moves protection_map[] inside the platform and while here, also enable ARCH_HAS_VM_GET_PAGE_PROT on 32 bit platforms via DECLARE_VM_GET_PAGE_PROT. Link: https://lkml.kernel.org/r/20220711070600.2378316-5-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Sam Ravnborg Cc: "David S. Miller" Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/sparc/Kconfig | 2 +- arch/sparc/include/asm/pgtable_32.h | 19 ------------------- arch/sparc/include/asm/pgtable_64.h | 19 ------------------- arch/sparc/mm/init_32.c | 20 ++++++++++++++++++++ arch/sparc/mm/init_64.c | 3 +++ 5 files changed, 24 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index ba449c47effd..09f868613a4d 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -13,6 +13,7 @@ config 64BIT config SPARC bool default y + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI select ARCH_MIGHT_HAVE_PC_SERIO select DMA_OPS @@ -84,7 +85,6 @@ config SPARC64 select PERF_USE_VMALLOC select ARCH_HAVE_NMI_SAFE_CMPXCHG select HAVE_C_RECORDMCOUNT - select ARCH_HAS_VM_GET_PAGE_PROT select HAVE_ARCH_AUDITSYSCALL select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 4866625da314..8ff549004fac 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -64,25 +64,6 @@ void paging_init(void); extern unsigned long ptr_in_current_pgd; -/* xwr */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED - /* First physical page can be anywhere, the following is needed so that * va-->pa and vice versa conversions work properly without performance * hit for all __pa()/__va() operations. diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 4679e45c8348..a779418ceba9 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -187,25 +187,6 @@ bool kern_addr_valid(unsigned long addr); #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V -/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ -#define __P000 __pgprot(0) -#define __P001 __pgprot(0) -#define __P010 __pgprot(0) -#define __P011 __pgprot(0) -#define __P100 __pgprot(0) -#define __P101 __pgprot(0) -#define __P110 __pgprot(0) -#define __P111 __pgprot(0) - -#define __S000 __pgprot(0) -#define __S001 __pgprot(0) -#define __S010 __pgprot(0) -#define __S011 __pgprot(0) -#define __S100 __pgprot(0) -#define __S101 __pgprot(0) -#define __S110 __pgprot(0) -#define __S111 __pgprot(0) - #ifndef __ASSEMBLY__ pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long); diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 1e9f577f084d..d88e774c8eb4 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -302,3 +302,23 @@ void sparc_flush_page_to_ram(struct page *page) __flush_page_to_ram(vaddr); } EXPORT_SYMBOL(sparc_flush_page_to_ram); + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index f6174df2d5af..d6faee23c77d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2634,6 +2634,9 @@ void vmemmap_free(unsigned long start, unsigned long end, } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ +/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ +static pgprot_t protection_map[16] __ro_after_init; + static void prot_init_common(unsigned long page_none, unsigned long page_shared, unsigned long page_copy, -- cgit v1.2.1 From 42251045cc741adae5ffc645c8b9ee906c14d013 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:39 +0530 Subject: arm64/mm: move protection_map[] inside the platform This moves protection_map[] inside the platform and makes it a static. Link: https://lkml.kernel.org/r/20220711070600.2378316-6-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Catalin Marinas Cc: Catalin Marinas Cc: Will Deacon Cc: Arnd Bergmann Cc: Brian Cain Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/arm64/include/asm/pgtable-prot.h | 18 ------------------ arch/arm64/mm/mmap.c | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 62e0ebeed720..9b165117a454 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -89,24 +89,6 @@ extern bool arm64_use_ng_mappings; #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_READONLY -#define __P011 PAGE_READONLY -#define __P100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */ -#define __P101 PAGE_READONLY_EXEC -#define __P110 PAGE_READONLY_EXEC -#define __P111 PAGE_READONLY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC /* PAGE_EXECONLY if Enhanced PAN */ -#define __S101 PAGE_READONLY_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - #endif /* __ASSEMBLY__ */ #endif /* __ASM_PGTABLE_PROT_H */ diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 78e9490f748d..8f5b7ce857ed 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -13,6 +13,27 @@ #include #include +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_READONLY, + [VM_WRITE | VM_READ] = PAGE_READONLY, + /* PAGE_EXECONLY if Enhanced PAN */ + [VM_EXEC] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + /* PAGE_EXECONLY if Enhanced PAN */ + [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC +}; + /* * You really shouldn't be using read() or write() on /dev/mem. This might go * away in the future. -- cgit v1.2.1 From 4867fbbdd6b362400d154417e08ce76b14200ba1 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:40 +0530 Subject: x86/mm: move protection_map[] inside the platform This moves protection_map[] inside the platform and makes it a static. This also defines a helper function add_encrypt_protection_map() that can update the protection_map[] array with pgprot_encrypted(). Link: https://lkml.kernel.org/r/20220711070600.2378316-7-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Christoph Hellwig Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/x86/include/asm/mem_encrypt.h | 2 ++ arch/x86/include/asm/pgtable_types.h | 19 ------------------- arch/x86/mm/mem_encrypt_amd.c | 6 ++---- arch/x86/mm/pgprot.c | 28 ++++++++++++++++++++++++++++ 4 files changed, 32 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 88ceaf3648b3..72ca90552b6a 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -89,6 +89,8 @@ static inline void mem_encrypt_free_decrypted_mem(void) { } /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); +void add_encrypt_protection_map(void); + /* * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when * writing to or comparing values from the cr3 register. Having the diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index bdaf8391e2e0..aa174fed3a71 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -230,25 +230,6 @@ enum page_cache_mode { #endif /* __ASSEMBLY__ */ -/* xwr */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_EXEC -#define __P101 PAGE_READONLY_EXEC -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC -#define __S101 PAGE_READONLY_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - /* * early identity mapping pte attrib macros. */ diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index f6d038e2cd8e..5c3c3ed46f5a 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -486,8 +487,6 @@ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, boo void __init sme_early_init(void) { - unsigned int i; - if (!sme_me_mask) return; @@ -496,8 +495,7 @@ void __init sme_early_init(void) __supported_pte_mask = __sme_set(__supported_pte_mask); /* Update the protection map with memory encryption mask */ - for (i = 0; i < ARRAY_SIZE(protection_map); i++) - protection_map[i] = pgprot_encrypted(protection_map[i]); + add_encrypt_protection_map(); x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare; x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish; diff --git a/arch/x86/mm/pgprot.c b/arch/x86/mm/pgprot.c index 763742782286..c84bd9540b16 100644 --- a/arch/x86/mm/pgprot.c +++ b/arch/x86/mm/pgprot.c @@ -3,6 +3,34 @@ #include #include #include +#include + +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC +}; + +void add_encrypt_protection_map(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(protection_map); i++) + protection_map[i] = pgprot_encrypted(protection_map[i]); +} pgprot_t vm_get_page_prot(unsigned long vm_flags) { -- cgit v1.2.1 From fa3f9f4a912c22aadf0510bf7f4bd113da442a10 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:42 +0530 Subject: microblaze/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-9-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Michal Simek Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/microblaze/Kconfig | 1 + arch/microblaze/include/asm/pgtable.h | 17 ----------------- arch/microblaze/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 8cf429ad1c84..15f91ba8a0c4 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -7,6 +7,7 @@ config MICROBLAZE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_TABLE_SORT diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 0c72646370e1..ba348e997dbb 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -204,23 +204,6 @@ extern pte_t *va_to_pte(unsigned long address); * We consider execute permission the same as read. * Also, write permissions imply read permissions. */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY_X -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY_X -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY_X -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED_X -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED_X #ifndef __ASSEMBLY__ /* diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index f4e503461d24..353fabdfcbc5 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -285,3 +285,23 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) return p; } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY_X, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From f6d1e19c20a44cd4d2bb5451cb472648707d9b97 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:43 +0530 Subject: loongarch/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-10-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Huacai Chen Cc: WANG Xuerui Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/loongarch/Kconfig | 1 + arch/loongarch/include/asm/pgtable-bits.h | 19 ------------- arch/loongarch/mm/cache.c | 46 +++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index db2838cf8c02..adf8cf6ec5d5 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -9,6 +9,7 @@ config LOONGARCH select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_INLINE_READ_LOCK if !PREEMPTION select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h index 3badd112d9ab..9ca147a29bab 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -83,25 +83,6 @@ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC) #define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC) - -#define __P000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ) -#define __P001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC) -#define __P010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC) -#define __P011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC) -#define __P100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __P101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __P110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __P111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) - -#define __S000 __pgprot(_CACHE_CC | _PAGE_USER | _PAGE_PROTNONE | _PAGE_NO_EXEC | _PAGE_NO_READ) -#define __S001 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC) -#define __S010 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE) -#define __S011 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE) -#define __S100 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __S101 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT) -#define __S110 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE) -#define __S111 __pgprot(_CACHE_CC | _PAGE_VALID | _PAGE_USER | _PAGE_PRESENT | _PAGE_WRITE) - #ifndef __ASSEMBLY__ #define pgprot_noncached pgprot_noncached diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c index 9e5ce5aa73f7..e8c68dcf6ab2 100644 --- a/arch/loongarch/mm/cache.c +++ b/arch/loongarch/mm/cache.c @@ -139,3 +139,49 @@ void cpu_cache_init(void) shm_align_mask = PAGE_SIZE - 1; } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = __pgprot(_CACHE_CC | _PAGE_USER | + _PAGE_PROTNONE | _PAGE_NO_EXEC | + _PAGE_NO_READ), + [VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_SHARED] = __pgprot(_CACHE_CC | _PAGE_USER | + _PAGE_PROTNONE | _PAGE_NO_EXEC | + _PAGE_NO_READ), + [VM_SHARED | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC), + [VM_SHARED | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC | _PAGE_WRITE), + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_NO_EXEC | _PAGE_WRITE), + [VM_SHARED | VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_WRITE), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID | + _PAGE_USER | _PAGE_PRESENT | + _PAGE_WRITE) +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From e4e4b99b80aae3aeb6827cf0b7e3ca31b1cd892e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:44 +0530 Subject: openrisc/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-11-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Acked-by: Stafford Horne Cc: Jonas Bonn Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/openrisc/Kconfig | 1 + arch/openrisc/include/asm/pgtable.h | 18 ------------------ arch/openrisc/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index e814df4c483c..fe0dfb50eb86 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -10,6 +10,7 @@ config OPENRISC select ARCH_HAS_DMA_SET_UNCACHED select ARCH_HAS_DMA_CLEAR_UNCACHED select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select COMMON_CLK select OF select OF_EARLY_FLATTREE diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index c3abbf71e09f..dcae8aea132f 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -176,24 +176,6 @@ extern void paging_init(void); __pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \ | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI) -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY_X -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY_X -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY_X -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED_X -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED_X - /* zero page used for uninitialized stuff */ extern unsigned long empty_zero_page[2048]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index 3a021ab6f1ae..d531ab82be12 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -208,3 +208,23 @@ void __init mem_init(void) mem_init_done = 1; return; } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY_X, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From 2de9eae10d11906a989d0e9241854dc8d2729d9e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:45 +0530 Subject: xtensa/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-12-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Chris Zankel Cc: Guo Ren Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/xtensa/Kconfig | 1 + arch/xtensa/include/asm/pgtable.h | 18 ------------------ arch/xtensa/mm/init.c | 22 ++++++++++++++++++++++ 3 files changed, 23 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 0b0f0172cced..4c0d83520ff1 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -11,6 +11,7 @@ config XTENSA select ARCH_HAS_DMA_SET_UNCACHED if MMU select ARCH_HAS_STRNCPY_FROM_USER if !KASAN select ARCH_HAS_STRNLEN_USER + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 0a91376131c5..e0d5531ae00d 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -200,24 +200,6 @@ * What follows is the closest we can get by reasonable means.. * See linux/mm/mmap.c for protection_map[] array that uses these definitions. */ -#define __P000 PAGE_NONE /* private --- */ -#define __P001 PAGE_READONLY /* private --r */ -#define __P010 PAGE_COPY /* private -w- */ -#define __P011 PAGE_COPY /* private -wr */ -#define __P100 PAGE_READONLY_EXEC /* private x-- */ -#define __P101 PAGE_READONLY_EXEC /* private x-r */ -#define __P110 PAGE_COPY_EXEC /* private xw- */ -#define __P111 PAGE_COPY_EXEC /* private xwr */ - -#define __S000 PAGE_NONE /* shared --- */ -#define __S001 PAGE_READONLY /* shared --r */ -#define __S010 PAGE_SHARED /* shared -w- */ -#define __S011 PAGE_SHARED /* shared -wr */ -#define __S100 PAGE_READONLY_EXEC /* shared x-- */ -#define __S101 PAGE_READONLY_EXEC /* shared x-r */ -#define __S110 PAGE_SHARED_EXEC /* shared xw- */ -#define __S111 PAGE_SHARED_EXEC /* shared xwr */ - #ifndef __ASSEMBLY__ #define pte_ERROR(e) \ diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 6a32b2cf2718..b2587a1a7c46 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -216,3 +216,25 @@ static int __init parse_memmap_opt(char *str) return 0; } early_param("memmap", parse_memmap_opt); + +#ifdef CONFIG_MMU +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC +}; +DECLARE_VM_GET_PAGE_PROT +#endif -- cgit v1.2.1 From b2022dcf45540b1f08ce2ef761d9e9110d22e929 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:46 +0530 Subject: hexagon/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-13-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Acked-by: Brian Cain Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/hexagon/Kconfig | 1 + arch/hexagon/include/asm/pgtable.h | 27 ------------------------ arch/hexagon/mm/init.c | 42 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 43 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 54eadf265178..bc4ceecd0588 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -6,6 +6,7 @@ config HEXAGON def_bool y select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_PREEMPT select DMA_GLOBAL_POOL # Other pending projects/to-do items. diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index 0610724d6a28..f7048c18b6f9 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -126,33 +126,6 @@ extern unsigned long _dflt_cache_att; */ #define CACHEDEF (CACHE_DEFAULT << 6) -/* Private (copy-on-write) page protections. */ -#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF) -#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF) -#define __P010 __P000 /* Write-only copy-on-write */ -#define __P011 __P001 /* Read/Write copy-on-write */ -#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \ - _PAGE_READ | CACHEDEF) -#define __P110 __P100 /* Write/execute copy-on-write */ -#define __P111 __P101 /* Read/Write/Execute, copy-on-write */ - -/* Shared page protections. */ -#define __S000 __P000 -#define __S001 __P001 -#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_WRITE | CACHEDEF) -#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_WRITE | CACHEDEF) -#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | CACHEDEF) -#define __S101 __P101 -#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) -#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ - _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) - extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ /* HUGETLB not working currently */ diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 3167a3b5c97b..146115c9de61 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -234,3 +234,45 @@ void __init setup_arch_memory(void) * which is called by start_kernel() later on in the process */ } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + CACHEDEF), + [VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | CACHEDEF), + [VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + CACHEDEF), + [VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | CACHEDEF), + [VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | CACHEDEF), + [VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_READ | + CACHEDEF), + [VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | CACHEDEF), + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_READ | + CACHEDEF), + [VM_SHARED] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + CACHEDEF), + [VM_SHARED | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | CACHEDEF), + [VM_SHARED | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_WRITE | CACHEDEF), + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | _PAGE_WRITE | + CACHEDEF), + [VM_SHARED | VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | CACHEDEF), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_READ | + CACHEDEF), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_WRITE | + CACHEDEF), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | _PAGE_EXECUTE | + _PAGE_WRITE | CACHEDEF) +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From 252358f1a118d8e9b0dd4f2f67bb3cd2b742e854 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:47 +0530 Subject: parisc/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-14-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: "James E.J. Bottomley" Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/parisc/Kconfig | 1 + arch/parisc/include/asm/pgtable.h | 18 ------------------ arch/parisc/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index fa400055b2d5..891d82393957 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -12,6 +12,7 @@ config PARISC select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_PTE_SPECIAL select ARCH_NO_SG_CHAIN select ARCH_SUPPORTS_HUGETLBFS if PA20 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 69765a6dbe89..6a1899a9b420 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -271,24 +271,6 @@ extern void __update_cache(pte_t pte); */ /*xwr*/ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 __P000 /* copy on write */ -#define __P011 __P001 /* copy on write */ -#define __P100 PAGE_EXECREAD -#define __P101 PAGE_EXECREAD -#define __P110 __P100 /* copy on write */ -#define __P111 __P101 /* copy on write */ - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_WRITEONLY -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXECREAD -#define __S101 PAGE_EXECREAD -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX - extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 0a81499dd35e..f03e0961fa25 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -871,3 +871,23 @@ void flush_tlb_all(void) spin_unlock(&sid_lock); } #endif + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_NONE, + [VM_WRITE | VM_READ] = PAGE_READONLY, + [VM_EXEC] = PAGE_EXECREAD, + [VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From 4975f604feb63c7b6b6ec56cb6013026d6aaa9f8 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:48 +0530 Subject: alpha/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-15-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Richard Henderson Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/alpha/Kconfig | 1 + arch/alpha/include/asm/pgtable.h | 17 ----------------- arch/alpha/mm/init.c | 22 ++++++++++++++++++++++ 3 files changed, 23 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 7d0d26b5b3f5..db1c8b329461 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -2,6 +2,7 @@ config ALPHA bool default y + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_32BIT_USTAT_F_TINODE select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 170451fde043..3ea9661c09ff 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -116,23 +116,6 @@ struct vm_area_struct; * arch/alpha/mm/fault.c) */ /* xwr */ -#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) -#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW) -#define __P010 _PAGE_P(_PAGE_FOE) -#define __P011 _PAGE_P(_PAGE_FOE) -#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR) -#define __P101 _PAGE_P(_PAGE_FOW) -#define __P110 _PAGE_P(0) -#define __P111 _PAGE_P(0) - -#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) -#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW) -#define __S010 _PAGE_S(_PAGE_FOE) -#define __S011 _PAGE_S(_PAGE_FOE) -#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR) -#define __S101 _PAGE_S(_PAGE_FOW) -#define __S110 _PAGE_S(0) -#define __S111 _PAGE_S(0) /* * pgprot_noncached() is only for infiniband pci support, and a real diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 7511723b7669..a155180d7a83 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -280,3 +280,25 @@ mem_init(void) high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW), + [VM_WRITE] = _PAGE_P(_PAGE_FOE), + [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE), + [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR), + [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW), + [VM_EXEC | VM_WRITE] = _PAGE_P(0), + [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0), + [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW), + [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR), + [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW), + [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0) +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From 53e2fdee5f0efdf5bd099a29bfaae0b0ec90e70e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:49 +0530 Subject: nios2/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-16-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Acked-by: Dinh Nguyen Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/nios2/Kconfig | 1 + arch/nios2/include/asm/pgtable.h | 16 ---------------- arch/nios2/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 4167f1eb4cd8..e0459dffd218 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -6,6 +6,7 @@ config NIOS2 select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_DMA_SET_UNCACHED + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_SWAP select COMMON_CLK select TIMER_OF diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 262d0609268c..470516d4555e 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -40,24 +40,8 @@ struct mm_struct; */ /* Remove W bit on private pages for COW support */ -#define __P000 MKP(0, 0, 0) -#define __P001 MKP(0, 0, 1) -#define __P010 MKP(0, 0, 0) /* COW */ -#define __P011 MKP(0, 0, 1) /* COW */ -#define __P100 MKP(1, 0, 0) -#define __P101 MKP(1, 0, 1) -#define __P110 MKP(1, 0, 0) /* COW */ -#define __P111 MKP(1, 0, 1) /* COW */ /* Shared pages can have exact HW mapping */ -#define __S000 MKP(0, 0, 0) -#define __S001 MKP(0, 0, 1) -#define __S010 MKP(0, 1, 0) -#define __S011 MKP(0, 1, 1) -#define __S100 MKP(1, 0, 0) -#define __S101 MKP(1, 0, 1) -#define __S110 MKP(1, 1, 0) -#define __S111 MKP(1, 1, 1) /* Used all over the kernel */ #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \ diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 613fcaa5988a..ae24687d12ad 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -124,3 +124,23 @@ const char *arch_vma_name(struct vm_area_struct *vma) { return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL; } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = MKP(0, 0, 0), + [VM_READ] = MKP(0, 0, 1), + [VM_WRITE] = MKP(0, 0, 0), + [VM_WRITE | VM_READ] = MKP(0, 0, 1), + [VM_EXEC] = MKP(1, 0, 0), + [VM_EXEC | VM_READ] = MKP(1, 0, 1), + [VM_EXEC | VM_WRITE] = MKP(1, 0, 0), + [VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 0, 1), + [VM_SHARED] = MKP(0, 0, 0), + [VM_SHARED | VM_READ] = MKP(0, 0, 1), + [VM_SHARED | VM_WRITE] = MKP(0, 1, 0), + [VM_SHARED | VM_WRITE | VM_READ] = MKP(0, 1, 1), + [VM_SHARED | VM_EXEC] = MKP(1, 0, 0), + [VM_SHARED | VM_EXEC | VM_READ] = MKP(1, 0, 1), + [VM_SHARED | VM_EXEC | VM_WRITE] = MKP(1, 1, 0), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 1, 1) +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From 4147b5e2d5691078c9f8eaa3644ef11f5351a9db Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:50 +0530 Subject: riscv/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-17-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Paul Walmsley Cc: Palmer Dabbelt Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Paul Mackerras Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/pgtable.h | 20 -------------------- arch/riscv/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 32ffef9f6e5b..583389d4e43a 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -32,6 +32,7 @@ config RISCV select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_STACKWALK diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 1d1be9d9419c..23e643db6575 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -186,26 +186,6 @@ extern struct pt_alloc_ops pt_ops __initdata; extern pgd_t swapper_pg_dir[]; -/* MAP_PRIVATE permissions: xwr (copy-on-write) */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READ -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_EXEC -#define __P101 PAGE_READ_EXEC -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_READ_EXEC - -/* MAP_SHARED permissions: xwr */ -#define __S000 PAGE_NONE -#define __S001 PAGE_READ -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXEC -#define __S101 PAGE_READ_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_present(pmd_t pmd) { diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index d466ec670e1f..a88b7dc31a68 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -288,6 +288,26 @@ static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAG #define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) #endif /* CONFIG_XIP_KERNEL */ +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READ, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_EXEC, + [VM_EXEC | VM_READ] = PAGE_READ_EXEC, + [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READ, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC +}; +DECLARE_VM_GET_PAGE_PROT + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { unsigned long addr = __fix_to_virt(idx); -- cgit v1.2.1 From 0d70836013f2da08fa190e99a1d50ebf28a09ef5 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:51 +0530 Subject: csky/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-18-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Acked-by: Guo Ren Cc: Geert Uytterhoeven Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/csky/Kconfig | 1 + arch/csky/include/asm/pgtable.h | 18 ------------------ arch/csky/mm/init.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 21d72b078eef..588b8a9c68ed 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -6,6 +6,7 @@ config CSKY select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index bbe245117777..229a5f4ad7fc 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -77,24 +77,6 @@ #define MAX_SWAPFILES_CHECK() \ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5) -#define __P000 PAGE_NONE -#define __P001 PAGE_READ -#define __P010 PAGE_READ -#define __P011 PAGE_READ -#define __P100 PAGE_READ -#define __P101 PAGE_READ -#define __P110 PAGE_READ -#define __P111 PAGE_READ - -#define __S000 PAGE_NONE -#define __S001 PAGE_READ -#define __S010 PAGE_WRITE -#define __S011 PAGE_WRITE -#define __S100 PAGE_READ -#define __S101 PAGE_READ -#define __S110 PAGE_WRITE -#define __S111 PAGE_WRITE - extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index bf2004aa811a..bde7cabd23df 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -197,3 +197,23 @@ void __init fixaddr_init(void) vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READ, + [VM_WRITE] = PAGE_READ, + [VM_WRITE | VM_READ] = PAGE_READ, + [VM_EXEC] = PAGE_READ, + [VM_EXEC | VM_READ] = PAGE_READ, + [VM_EXEC | VM_WRITE] = PAGE_READ, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READ, + [VM_SHARED | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE, + [VM_SHARED | VM_EXEC] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From fd5d210fa66beeae6ee296eb06d55c1c6faea912 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:52 +0530 Subject: s390/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-19-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/s390/Kconfig | 1 + arch/s390/include/asm/pgtable.h | 17 ----------------- arch/s390/mm/mmap.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 91c0b80a8bf0..c4481377ca83 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -81,6 +81,7 @@ config S390 select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_VDSO_DATA + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index a397b072a580..c63a05b5368a 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -424,23 +424,6 @@ static inline int is_module_addr(void *addr) * implies read permission. */ /*xwr*/ -#define __P000 PAGE_NONE -#define __P001 PAGE_RO -#define __P010 PAGE_RO -#define __P011 PAGE_RO -#define __P100 PAGE_RX -#define __P101 PAGE_RX -#define __P110 PAGE_RX -#define __P111 PAGE_RX - -#define __S000 PAGE_NONE -#define __S001 PAGE_RO -#define __S010 PAGE_RW -#define __S011 PAGE_RW -#define __S100 PAGE_RX -#define __S101 PAGE_RX -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX /* * Segment entry (large page) protection definitions. diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index d545f5c39f7e..5980ce348832 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -188,3 +188,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_RO, + [VM_WRITE] = PAGE_RO, + [VM_WRITE | VM_READ] = PAGE_RO, + [VM_EXEC] = PAGE_RX, + [VM_EXEC | VM_READ] = PAGE_RX, + [VM_EXEC | VM_WRITE] = PAGE_RX, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_RO, + [VM_SHARED | VM_WRITE] = PAGE_RW, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW, + [VM_SHARED | VM_EXEC] = PAGE_RX, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From c619b82c55b730191e19f13e119603c702e11f0f Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:53 +0530 Subject: ia64/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-20-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/ia64/Kconfig | 1 + arch/ia64/include/asm/pgtable.h | 18 ------------------ arch/ia64/mm/init.c | 28 +++++++++++++++++++++++++++- 3 files changed, 28 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index cb93769a9f2a..0510a5737711 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -12,6 +12,7 @@ config IA64 select ARCH_HAS_DMA_MARK_CLEAN select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ACPI diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 7aa8f2330fb1..6925e28ae61d 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -161,24 +161,6 @@ * attempts to write to the page. */ /* xwr */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */ -#define __P011 PAGE_READONLY /* ditto */ -#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) -#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */ -#define __S011 PAGE_SHARED -#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) -#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) -#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) -#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) - #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) #if CONFIG_PGTABLE_LEVELS == 4 #define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 855d949d81df..fc4e4217e87f 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -273,7 +273,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX); return 0; } @@ -490,3 +490,29 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) __remove_pages(start_pfn, nr_pages, altmap); } #endif + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_READONLY, + [VM_WRITE | VM_READ] = PAGE_READONLY, + [VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_X_RX), + [VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_RX), + [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_X_RX), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_RX), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_RWX), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | + _PAGE_AR_RWX) +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From 499c1dd92ea1087b98924ebe88cbf48ac0198775 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:54 +0530 Subject: mips/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-21-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Thomas Bogendoerfer Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/mips/Kconfig | 1 + arch/mips/include/asm/pgtable.h | 22 ---------------------- arch/mips/mm/cache.c | 3 +++ 3 files changed, 4 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index db09d45d59ec..d0b7eb11ec81 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -14,6 +14,7 @@ config MIPS select ARCH_HAS_STRNLEN_USER select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_KEEP_MEMBLOCK select ARCH_SUPPORTS_UPROBES diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 374c6322775d..6caec386ad2f 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -41,28 +41,6 @@ struct vm_area_struct; * by reasonable means.. */ -/* - * Dummy values to fill the table in mmap.c - * The real values will be generated at runtime - */ -#define __P000 __pgprot(0) -#define __P001 __pgprot(0) -#define __P010 __pgprot(0) -#define __P011 __pgprot(0) -#define __P100 __pgprot(0) -#define __P101 __pgprot(0) -#define __P110 __pgprot(0) -#define __P111 __pgprot(0) - -#define __S000 __pgprot(0) -#define __S001 __pgprot(0) -#define __S010 __pgprot(0) -#define __S011 __pgprot(0) -#define __S100 __pgprot(0) -#define __S101 __pgprot(0) -#define __S110 __pgprot(0) -#define __S111 __pgprot(0) - extern unsigned long _page_cachable_default; extern void __update_cache(unsigned long address, pte_t pte); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 7be7240f7703..11b3e7ddafd5 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -159,6 +159,9 @@ EXPORT_SYMBOL(_page_cachable_default); #define PM(p) __pgprot(_page_cachable_default | (p)) +static pgprot_t protection_map[16] __ro_after_init; +DECLARE_VM_GET_PAGE_PROT + static inline void setup_protection_map(void) { protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); -- cgit v1.2.1 From 6d0b92254510cd7e12358028e85f99f0453006fa Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:55 +0530 Subject: m68k/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-22-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Geert Uytterhoeven Acked-by: Geert Uytterhoeven Cc: Thomas Bogendoerfer Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/m68k/Kconfig | 1 + arch/m68k/include/asm/mcf_pgtable.h | 59 -------------------------------- arch/m68k/include/asm/motorola_pgtable.h | 29 ---------------- arch/m68k/include/asm/sun3_pgtable.h | 23 ------------- arch/m68k/mm/mcfmmu.c | 55 +++++++++++++++++++++++++++++ arch/m68k/mm/motorola.c | 29 ++++++++++++++++ arch/m68k/mm/sun3mmu.c | 20 +++++++++++ 7 files changed, 105 insertions(+), 111 deletions(-) (limited to 'arch') diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 936cce42ae9a..49aa0cf13e96 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -7,6 +7,7 @@ config M68K select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select ARCH_NO_PREEMPT if !COLDFIRE diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h index 94f38d76e278..b619b22823f8 100644 --- a/arch/m68k/include/asm/mcf_pgtable.h +++ b/arch/m68k/include/asm/mcf_pgtable.h @@ -86,65 +86,6 @@ | CF_PAGE_READABLE \ | CF_PAGE_DIRTY) -/* - * Page protections for initialising protection_map. See mm/mmap.c - * for use. In general, the bit positions are xwr, and P-items are - * private, the S-items are shared. - */ -#define __P000 PAGE_NONE -#define __P001 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE) -#define __P010 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_WRITABLE) -#define __P011 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE \ - | CF_PAGE_WRITABLE) -#define __P100 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_EXEC) -#define __P101 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE \ - | CF_PAGE_EXEC) -#define __P110 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_WRITABLE \ - | CF_PAGE_EXEC) -#define __P111 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE \ - | CF_PAGE_WRITABLE \ - | CF_PAGE_EXEC) - -#define __S000 PAGE_NONE -#define __S001 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE) -#define __S010 PAGE_SHARED -#define __S011 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_SHARED \ - | CF_PAGE_READABLE) -#define __S100 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_EXEC) -#define __S101 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_READABLE \ - | CF_PAGE_EXEC) -#define __S110 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_SHARED \ - | CF_PAGE_EXEC) -#define __S111 __pgprot(CF_PAGE_VALID \ - | CF_PAGE_ACCESSED \ - | CF_PAGE_SHARED \ - | CF_PAGE_READABLE \ - | CF_PAGE_EXEC) - #define PTE_MASK PAGE_MASK #define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY) diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h index 7c9b56e2a750..7ac3d64c6b33 100644 --- a/arch/m68k/include/asm/motorola_pgtable.h +++ b/arch/m68k/include/asm/motorola_pgtable.h @@ -76,35 +76,6 @@ extern unsigned long mm_cachebits; #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits) -/* Alternate definitions that are compile time constants, for - initializing protection_map. The cachebits are fixed later. */ -#define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) -#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) -#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) -#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) - -/* - * The m68k can't do page protection for execute, and considers that the same are read. - * Also, write permissions imply read permissions. This is the closest we can get.. - */ -#define __P000 PAGE_NONE_C -#define __P001 PAGE_READONLY_C -#define __P010 PAGE_COPY_C -#define __P011 PAGE_COPY_C -#define __P100 PAGE_READONLY_C -#define __P101 PAGE_READONLY_C -#define __P110 PAGE_COPY_C -#define __P111 PAGE_COPY_C - -#define __S000 PAGE_NONE_C -#define __S001 PAGE_READONLY_C -#define __S010 PAGE_SHARED_C -#define __S011 PAGE_SHARED_C -#define __S100 PAGE_READONLY_C -#define __S101 PAGE_READONLY_C -#define __S110 PAGE_SHARED_C -#define __S111 PAGE_SHARED_C - #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) /* diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h index 5e4e753f0d24..90d57e537eb1 100644 --- a/arch/m68k/include/asm/sun3_pgtable.h +++ b/arch/m68k/include/asm/sun3_pgtable.h @@ -66,29 +66,6 @@ | SUN3_PAGE_SYSTEM \ | SUN3_PAGE_NOCACHE) -/* - * Page protections for initialising protection_map. The sun3 has only two - * protection settings, valid (implying read and execute) and writeable. These - * are as close as we can get... - */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED - /* Use these fake page-protections on PMDs. */ #define SUN3_PMD_VALID (0x00000001) #define SUN3_PMD_MASK (0x0000003F) diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 6f1f25125294..70aa0979e027 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -234,3 +234,58 @@ void steal_context(void) destroy_context(mm); } +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE), + [VM_WRITE] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_WRITABLE), + [VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_WRITABLE), + [VM_EXEC] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_EXEC), + [VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_EXEC), + [VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_WRITABLE | + CF_PAGE_EXEC), + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_WRITABLE | + CF_PAGE_EXEC), + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE), + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_SHARED), + [VM_SHARED | VM_EXEC] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_EXEC), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_EXEC), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_SHARED | + CF_PAGE_EXEC), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID | + CF_PAGE_ACCESSED | + CF_PAGE_READABLE | + CF_PAGE_SHARED | + CF_PAGE_EXEC) +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index df7f797c908a..2a375637e007 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -382,6 +382,35 @@ static void __init map_node(int node) #endif } +/* + * Alternate definitions that are compile time constants, for + * initializing protection_map. The cachebits are fixed later. + */ +#define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) +#define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) +#define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) +#define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) + +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = PAGE_NONE_C, + [VM_READ] = PAGE_READONLY_C, + [VM_WRITE] = PAGE_COPY_C, + [VM_WRITE | VM_READ] = PAGE_COPY_C, + [VM_EXEC] = PAGE_READONLY_C, + [VM_EXEC | VM_READ] = PAGE_READONLY_C, + [VM_EXEC | VM_WRITE] = PAGE_COPY_C, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_C, + [VM_SHARED] = PAGE_NONE_C, + [VM_SHARED | VM_READ] = PAGE_READONLY_C, + [VM_SHARED | VM_WRITE] = PAGE_SHARED_C, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_C, + [VM_SHARED | VM_EXEC] = PAGE_READONLY_C, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_C, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_C, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_C +}; +DECLARE_VM_GET_PAGE_PROT + /* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c index dad494224497..b619d0d4319c 100644 --- a/arch/m68k/mm/sun3mmu.c +++ b/arch/m68k/mm/sun3mmu.c @@ -95,3 +95,23 @@ void __init paging_init(void) } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From 5d260625b1f2affc00c596a2c8d6314865060b11 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:56 +0530 Subject: arc/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-23-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Vineet Gupta Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/arc/Kconfig | 1 + arch/arc/include/asm/pgtable-bits-arcv2.h | 18 ------------------ arch/arc/mm/mmap.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 9e3653253ef2..8be56a5d8a9b 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -13,6 +13,7 @@ config ARC select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select ARCH_32BIT_OFF_T select BUILDTIME_TABLE_SORT diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h index 183d23bc1e00..b23be557403e 100644 --- a/arch/arc/include/asm/pgtable-bits-arcv2.h +++ b/arch/arc/include/asm/pgtable-bits-arcv2.h @@ -72,24 +72,6 @@ * This is to enable COW mechanism */ /* xwr */ -#define __P000 PAGE_U_NONE -#define __P001 PAGE_U_R -#define __P010 PAGE_U_R /* Pvt-W => !W */ -#define __P011 PAGE_U_R /* Pvt-W => !W */ -#define __P100 PAGE_U_X_R /* X => R */ -#define __P101 PAGE_U_X_R -#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */ -#define __P111 PAGE_U_X_R /* Pvt-W => !W */ - -#define __S000 PAGE_U_NONE -#define __S001 PAGE_U_R -#define __S010 PAGE_U_W_R /* W => R */ -#define __S011 PAGE_U_W_R -#define __S100 PAGE_U_X_R /* X => R */ -#define __S101 PAGE_U_X_R -#define __S110 PAGE_U_X_W_R /* X => R */ -#define __S111 PAGE_U_X_W_R - #ifndef __ASSEMBLY__ #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 722d26b94307..fce5fa2b4f52 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -74,3 +74,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_U_NONE, + [VM_READ] = PAGE_U_R, + [VM_WRITE] = PAGE_U_R, + [VM_WRITE | VM_READ] = PAGE_U_R, + [VM_EXEC] = PAGE_U_X_R, + [VM_EXEC | VM_READ] = PAGE_U_X_R, + [VM_EXEC | VM_WRITE] = PAGE_U_X_R, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R, + [VM_SHARED] = PAGE_U_NONE, + [VM_SHARED | VM_READ] = PAGE_U_R, + [VM_SHARED | VM_WRITE] = PAGE_U_W_R, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R, + [VM_SHARED | VM_EXEC] = PAGE_U_X_R, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R +}; +DECLARE_VM_GET_PAGE_PROT -- cgit v1.2.1 From ca26f936f51b8c9219ede32b1a1f76c4924897aa Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:57 +0530 Subject: arm/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-24-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Russell King Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/arm/Kconfig | 1 + arch/arm/include/asm/pgtable.h | 17 ----------------- arch/arm/lib/uaccess_with_memcpy.c | 2 +- arch/arm/mm/mmu.c | 20 ++++++++++++++++++++ 4 files changed, 22 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 7630ba9cb6cc..e153b6d4fc5b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -24,6 +24,7 @@ config ARM select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index cd1f84bb40ae..78a532068fec 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -137,23 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, * 2) If we could do execute protection, then read is implied * 3) write implies read permissions */ -#define __P000 __PAGE_NONE -#define __P001 __PAGE_READONLY -#define __P010 __PAGE_COPY -#define __P011 __PAGE_COPY -#define __P100 __PAGE_READONLY_EXEC -#define __P101 __PAGE_READONLY_EXEC -#define __P110 __PAGE_COPY_EXEC -#define __P111 __PAGE_COPY_EXEC - -#define __S000 __PAGE_NONE -#define __S001 __PAGE_READONLY -#define __S010 __PAGE_SHARED -#define __S011 __PAGE_SHARED -#define __S100 __PAGE_READONLY_EXEC -#define __S101 __PAGE_READONLY_EXEC -#define __S110 __PAGE_SHARED_EXEC -#define __S111 __PAGE_SHARED_EXEC #ifndef __ASSEMBLY__ /* diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index c30b689bec2e..14eecaaf295f 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -237,7 +237,7 @@ static int __init test_size_treshold(void) if (!dst_page) goto no_dst; kernel_ptr = page_address(src_page); - user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010)); + user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY)); if (!user_ptr) goto no_vmap; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5e2be37a198e..2722abddd725 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -405,6 +405,26 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); } +static pgprot_t protection_map[16] __ro_after_init = { + [VM_NONE] = __PAGE_NONE, + [VM_READ] = __PAGE_READONLY, + [VM_WRITE] = __PAGE_COPY, + [VM_WRITE | VM_READ] = __PAGE_COPY, + [VM_EXEC] = __PAGE_READONLY_EXEC, + [VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC, + [VM_EXEC | VM_WRITE] = __PAGE_COPY_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = __PAGE_COPY_EXEC, + [VM_SHARED] = __PAGE_NONE, + [VM_SHARED | VM_READ] = __PAGE_READONLY, + [VM_SHARED | VM_WRITE] = __PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = __PAGE_SHARED, + [VM_SHARED | VM_EXEC] = __PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE] = __PAGE_SHARED_EXEC, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __PAGE_SHARED_EXEC +}; +DECLARE_VM_GET_PAGE_PROT + /* * Adjust the PMD section entries according to the CPU in use. */ -- cgit v1.2.1 From 91a8da021c4d767704086386b08bf276f926ebfd Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:58 +0530 Subject: um/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-25-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Jeff Dike Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/um/Kconfig | 1 + arch/um/include/asm/pgtable.h | 17 ----------------- arch/um/kernel/mem.c | 20 ++++++++++++++++++++ arch/x86/um/mem_32.c | 2 +- 4 files changed, 22 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 4ec22e156a2e..7fb43654e5b5 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -10,6 +10,7 @@ config UML select ARCH_HAS_KCOV select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_PREEMPT select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 167e236d9bb8..66bc3f99d9be 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -68,23 +68,6 @@ extern unsigned long end_iomem; * Also, write permissions imply read permissions. This is the closest we can * get.. */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 15295c3237a0..5b259f0a1f94 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -197,3 +197,23 @@ void *uml_kmalloc(int size, int flags) { return kmalloc(size, flags); } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_READONLY, + [VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c index 19c5dbd46770..cafd01f730da 100644 --- a/arch/x86/um/mem_32.c +++ b/arch/x86/um/mem_32.c @@ -17,7 +17,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = PAGE_READONLY; return 0; } -- cgit v1.2.1 From 34516fd83fa122e54a261b74e419c16aad5317f0 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:35:59 +0530 Subject: sh/mm: enable ARCH_HAS_VM_GET_PAGE_PROT This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT, which looks up a private and static protection_map[] array. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Link: https://lkml.kernel.org/r/20220711070600.2378316-26-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Cc: Yoshinori Sato Cc: Rich Felker Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christophe Leroy Cc: Christoph Hellwig Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Geert Uytterhoeven Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/sh/Kconfig | 1 + arch/sh/include/asm/pgtable.h | 17 ----------------- arch/sh/mm/mmap.c | 20 ++++++++++++++++++++ 3 files changed, 21 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5f220e903e5a..91f3ea325388 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -12,6 +12,7 @@ config SUPERH select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HIBERNATION_POSSIBLE if MMU select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index d7ddb1ec86a0..6fb9ec54cf9b 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -89,23 +89,6 @@ static inline unsigned long phys_addr_mask(void) * completely separate permission bits for user and kernel space. */ /*xwr*/ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_EXECREAD -#define __P101 PAGE_EXECREAD -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_WRITEONLY -#define __S011 PAGE_SHARED -#define __S100 PAGE_EXECREAD -#define __S101 PAGE_EXECREAD -#define __S110 PAGE_RWX -#define __S111 PAGE_RWX typedef pte_t *pte_addr_t; diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6a1a1297baae..b82199878b45 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -19,6 +19,26 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); #ifdef CONFIG_MMU +static const pgprot_t protection_map[16] = { + [VM_NONE] = PAGE_NONE, + [VM_READ] = PAGE_READONLY, + [VM_WRITE] = PAGE_COPY, + [VM_WRITE | VM_READ] = PAGE_COPY, + [VM_EXEC] = PAGE_EXECREAD, + [VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_EXEC | VM_WRITE] = PAGE_COPY, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, + [VM_SHARED] = PAGE_NONE, + [VM_SHARED | VM_READ] = PAGE_READONLY, + [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, + [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, + [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, + [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX +}; +DECLARE_VM_GET_PAGE_PROT + /* * To avoid cache aliases, we map the shared page with same color. */ -- cgit v1.2.1 From 3d923c5f1e21ad491acd4c0d62bf2481ce94016c Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 11 Jul 2022 12:36:00 +0530 Subject: mm/mmap: drop ARCH_HAS_VM_GET_PAGE_PROT Now all the platforms enable ARCH_HAS_GET_PAGE_PROT. They define and export own vm_get_page_prot() whether custom or standard DECLARE_VM_GET_PAGE_PROT. Hence there is no need for default generic fallback for vm_get_page_prot(). Just drop this fallback and also ARCH_HAS_GET_PAGE_PROT mechanism. Link: https://lkml.kernel.org/r/20220711070600.2378316-27-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Geert Uytterhoeven Reviewed-by: Christoph Hellwig Reviewed-by: Christophe Leroy Acked-by: Geert Uytterhoeven Cc: Arnd Bergmann Cc: Brian Cain Cc: Catalin Marinas Cc: Christoph Hellwig Cc: Chris Zankel Cc: "David S. Miller" Cc: Dinh Nguyen Cc: Guo Ren Cc: Heiko Carstens Cc: Huacai Chen Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: Jonas Bonn Cc: Michael Ellerman Cc: Michal Simek Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Mackerras Cc: Paul Walmsley Cc: Richard Henderson Cc: Rich Felker Cc: Russell King Cc: Sam Ravnborg Cc: Stafford Horne Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: WANG Xuerui Cc: Will Deacon Cc: Yoshinori Sato Signed-off-by: Andrew Morton --- arch/alpha/Kconfig | 1 - arch/arc/Kconfig | 1 - arch/arm/Kconfig | 1 - arch/arm64/Kconfig | 1 - arch/csky/Kconfig | 1 - arch/hexagon/Kconfig | 1 - arch/ia64/Kconfig | 1 - arch/loongarch/Kconfig | 1 - arch/m68k/Kconfig | 1 - arch/microblaze/Kconfig | 1 - arch/mips/Kconfig | 1 - arch/nios2/Kconfig | 1 - arch/openrisc/Kconfig | 1 - arch/parisc/Kconfig | 1 - arch/powerpc/Kconfig | 1 - arch/riscv/Kconfig | 1 - arch/s390/Kconfig | 1 - arch/sh/Kconfig | 1 - arch/sparc/Kconfig | 1 - arch/um/Kconfig | 1 - arch/x86/Kconfig | 1 - arch/xtensa/Kconfig | 1 - 22 files changed, 22 deletions(-) (limited to 'arch') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index db1c8b329461..7d0d26b5b3f5 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -2,7 +2,6 @@ config ALPHA bool default y - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_32BIT_USTAT_F_TINODE select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 8be56a5d8a9b..9e3653253ef2 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -13,7 +13,6 @@ config ARC select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC select ARCH_32BIT_OFF_T select BUILDTIME_TABLE_SORT diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e153b6d4fc5b..7630ba9cb6cc 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -24,7 +24,6 @@ config ARM select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1652a9800ebe..7030bf3f8d6f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -45,7 +45,6 @@ config ARM64 select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_ELF_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 588b8a9c68ed..21d72b078eef 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -6,7 +6,6 @@ config CSKY select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_QUEUED_RWLOCKS select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index bc4ceecd0588..54eadf265178 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -6,7 +6,6 @@ config HEXAGON def_bool y select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_PREEMPT select DMA_GLOBAL_POOL # Other pending projects/to-do items. diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 0510a5737711..cb93769a9f2a 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -12,7 +12,6 @@ config IA64 select ARCH_HAS_DMA_MARK_CLEAN select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ACPI diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index adf8cf6ec5d5..db2838cf8c02 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -9,7 +9,6 @@ config LOONGARCH select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PTE_SPECIAL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_INLINE_READ_LOCK if !PREEMPTION select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 49aa0cf13e96..936cce42ae9a 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -7,7 +7,6 @@ config M68K select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select ARCH_NO_PREEMPT if !COLDFIRE diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 15f91ba8a0c4..8cf429ad1c84 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -7,7 +7,6 @@ config MICROBLAZE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_TABLE_SORT diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d0b7eb11ec81..db09d45d59ec 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -14,7 +14,6 @@ config MIPS select ARCH_HAS_STRNLEN_USER select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_KEEP_MEMBLOCK select ARCH_SUPPORTS_UPROBES diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index e0459dffd218..4167f1eb4cd8 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -6,7 +6,6 @@ config NIOS2 select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_DMA_SET_UNCACHED - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_SWAP select COMMON_CLK select TIMER_OF diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index fe0dfb50eb86..e814df4c483c 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -10,7 +10,6 @@ config OPENRISC select ARCH_HAS_DMA_SET_UNCACHED select ARCH_HAS_DMA_CLEAR_UNCACHED select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_VM_GET_PAGE_PROT select COMMON_CLK select OF select OF_EARLY_FLATTREE diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 891d82393957..fa400055b2d5 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -12,7 +12,6 @@ config PARISC select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_PTE_SPECIAL select ARCH_NO_SG_CHAIN select ARCH_SUPPORTS_HUGETLBFS if PA20 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1035d172c7dd..250b8658b2d4 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -140,7 +140,6 @@ config PPC select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 583389d4e43a..32ffef9f6e5b 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -32,7 +32,6 @@ config RISCV select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_STACKWALK diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c4481377ca83..91c0b80a8bf0 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -81,7 +81,6 @@ config S390 select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_VDSO_DATA - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 91f3ea325388..5f220e903e5a 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -12,7 +12,6 @@ config SUPERH select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HIBERNATION_POSSIBLE if MMU select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 09f868613a4d..9c1cce74953a 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -13,7 +13,6 @@ config 64BIT config SPARC bool default y - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI select ARCH_MIGHT_HAVE_PC_SERIO select DMA_OPS diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 7fb43654e5b5..4ec22e156a2e 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -10,7 +10,6 @@ config UML select ARCH_HAS_KCOV select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_NO_PREEMPT select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index be0b95e51df6..841e4843d0c4 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -94,7 +94,6 @@ config X86 select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_DEBUG_WX select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 4c0d83520ff1..0b0f0172cced 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -11,7 +11,6 @@ config XTENSA select ARCH_HAS_DMA_SET_UNCACHED if MMU select ARCH_HAS_STRNCPY_FROM_USER if !KASAN select ARCH_HAS_STRNLEN_USER - select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS -- cgit v1.2.1 From a008a3004340887370aea38b5cd441b1db110041 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:50 +0300 Subject: csky: drop definition of PTE_ORDER Patch series "arch: make PxD_ORDER generically available", v2. The question what does PxD_ORDER define raises from time to time and there is still a conflict between MIPS and DAX definitions. Some time ago Matthew Wilcox suggested to use PMD_TABLE_ORDER to define the order of page table allocation: [1] https://lore.kernel.org/linux-arch/YPCJftSTUBEnq2lI@casper.infradead.org/ The parisc patch made it in, but mips didn't. Now mips defines from asm/include/pgtable.h were copied to loongarch which made it worse. Let's deal with it once and for all and rename PxD_ORDER defines to PxD_TABLE_ORDER or just drop them when the only possible order of page table is 0. This patch (of 15): This is the order of the page table allocation, not the order of a PTE. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220705154708.181258-1-rppt@kernel.org Link: https://lkml.kernel.org/r/20220703141203.147893-1-rppt@kernel.org Link: https://lkml.kernel.org/r/20220703141203.147893-2-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: Matthew Wilcox Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/csky/include/asm/pgtable.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index 229a5f4ad7fc..349e03dfb9ba 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -19,11 +19,10 @@ * C-SKY is two-level paging structure: */ #define PGD_ORDER 0 -#define PTE_ORDER 0 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) #define PTRS_PER_PMD 1 -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) -- cgit v1.2.1 From f7536442353d1b6bb0c7c8ad1ec549d9fa215106 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:51 +0300 Subject: csky: drop definition of PGD_ORDER This is the order of the page table allocation, not the order of a PGD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-3-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/csky/include/asm/pgalloc.h | 2 +- arch/csky/include/asm/pgtable.h | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h index bbbd0698b397..7d57e5da0914 100644 --- a/arch/csky/include/asm/pgalloc.h +++ b/arch/csky/include/asm/pgalloc.h @@ -44,7 +44,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *ret; pgd_t *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_page(GFP_KERNEL); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long *)ret); diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index 349e03dfb9ba..c3d9b92cbe61 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -18,9 +18,8 @@ /* * C-SKY is two-level paging structure: */ -#define PGD_ORDER 0 -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) #define PTRS_PER_PMD 1 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) -- cgit v1.2.1 From c94b14bd1cff085df21125a3591378c5081f143a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sun, 3 Jul 2022 17:11:52 +0300 Subject: mips: rename PMD_ORDER to PMD_TABLE_ORDER This is the order of the page table allocation, not the order of a PMD. While at it remove unused defintion of _PMD_ORDER in asm-offsets. Link: https://lkml.kernel.org/r/20220703141203.147893-4-rppt@kernel.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/mips/include/asm/pgalloc.h | 4 ++-- arch/mips/include/asm/pgtable-32.h | 2 +- arch/mips/include/asm/pgtable-64.h | 18 +++++++++--------- arch/mips/kernel/asm-offsets.c | 3 --- 4 files changed, 12 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 867e9c3db76e..0ef245cfcae9 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -67,12 +67,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_t *pmd; struct page *pg; - pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER); + pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_TABLE_ORDER); if (!pg) return NULL; if (!pgtable_pmd_page_ctor(pg)) { - __free_pages(pg, PMD_ORDER); + __free_pages(pg, PMD_TABLE_ORDER); return NULL; } diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 95df9c293d8d..8d57bd5b0b94 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -82,7 +82,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) #define PUD_ORDER aieeee_attempt_to_allocate_pud -#define PMD_ORDER aieeee_attempt_to_allocate_pmd +#define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd #define PTE_ORDER 0 #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 41921acdc9d8..ae0d5a09064d 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -51,12 +51,12 @@ #define PMD_MASK (~(PMD_SIZE-1)) # ifdef __PAGETABLE_PUD_FOLDED -# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) +# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3)) # endif #endif #ifndef __PAGETABLE_PUD_FOLDED -#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) +#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3)) @@ -91,13 +91,13 @@ # define PGD_ORDER 1 # define PUD_ORDER aieeee_attempt_to_allocate_pud # endif -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_8KB #define PGD_ORDER 0 #define PUD_ORDER aieeee_attempt_to_allocate_pud -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_16KB @@ -107,22 +107,22 @@ #define PGD_ORDER 0 #endif #define PUD_ORDER aieeee_attempt_to_allocate_pud -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_32KB #define PGD_ORDER 0 #define PUD_ORDER aieeee_attempt_to_allocate_pud -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_64KB #define PGD_ORDER 0 #define PUD_ORDER aieeee_attempt_to_allocate_pud #ifdef CONFIG_MIPS_VA_BITS_48 -#define PMD_ORDER 0 +#define PMD_TABLE_ORDER 0 #else -#define PMD_ORDER aieeee_attempt_to_allocate_pmd +#define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd #endif #define PTE_ORDER 0 #endif @@ -132,7 +132,7 @@ #define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t)) #endif #ifndef __PAGETABLE_PMD_FOLDED -#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) +#define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t)) #endif #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 04ca75278f02..ca7c5af7697d 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -197,9 +197,6 @@ void output_mm_defines(void) DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); DEFINE(_PGD_ORDER, PGD_ORDER); -#ifndef __PAGETABLE_PMD_FOLDED - DEFINE(_PMD_ORDER, PMD_ORDER); -#endif DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); -- cgit v1.2.1 From 8e20a4decd43ddfa98c146673c3981b2260bc176 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:53 +0300 Subject: mips: rename PUD_ORDER to PUD_TABLE_ORDER This is the order of the page table allocation, not the order of a PUD. Link: https://lkml.kernel.org/r/20220703141203.147893-5-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/mips/include/asm/pgalloc.h | 2 +- arch/mips/include/asm/pgtable-32.h | 2 +- arch/mips/include/asm/pgtable-64.h | 16 ++++++++-------- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 0ef245cfcae9..1ef8e86ae565 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -91,7 +91,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { pud_t *pud; - pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER); + pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_TABLE_ORDER); if (pud) pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); return pud; diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 8d57bd5b0b94..d9ae244a4fce 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -81,7 +81,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, #endif #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd #define PTE_ORDER 0 diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index ae0d5a09064d..7daf9a6509d8 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -59,7 +59,7 @@ #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) -#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3)) +#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_TABLE_ORDER - 3)) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -86,17 +86,17 @@ #ifdef CONFIG_PAGE_SIZE_4KB # ifdef CONFIG_MIPS_VA_BITS_48 # define PGD_ORDER 0 -# define PUD_ORDER 0 +# define PUD_TABLE_ORDER 0 # else # define PGD_ORDER 1 -# define PUD_ORDER aieeee_attempt_to_allocate_pud +# define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud # endif #define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_8KB #define PGD_ORDER 0 -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif @@ -106,19 +106,19 @@ #else #define PGD_ORDER 0 #endif -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_32KB #define PGD_ORDER 0 -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_64KB #define PGD_ORDER 0 -#define PUD_ORDER aieeee_attempt_to_allocate_pud +#define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #ifdef CONFIG_MIPS_VA_BITS_48 #define PMD_TABLE_ORDER 0 #else @@ -129,7 +129,7 @@ #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) #ifndef __PAGETABLE_PUD_FOLDED -#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t)) +#define PTRS_PER_PUD ((PAGE_SIZE << PUD_TABLE_ORDER) / sizeof(pud_t)) #endif #ifndef __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t)) -- cgit v1.2.1 From 6963c72d9046eab0f023e1d91960550e764ad7b9 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:54 +0300 Subject: mips: drop definitions of PTE_ORDER This is the order of the page table allocation, not the order of a PTE. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-6-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/mips/include/asm/pgtable-32.h | 9 ++++----- arch/mips/include/asm/pgtable-64.h | 15 +++++---------- arch/mips/kernel/asm-offsets.c | 1 - arch/mips/mm/tlbex.c | 2 +- 4 files changed, 10 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index d9ae244a4fce..35bd519a1078 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -62,9 +62,9 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, /* PGDIR_SHIFT determines what a third-level page table entry can map */ #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) -# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1) +# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2 - 1) #else -# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2) +# define PGDIR_SHIFT (2 * PAGE_SHIFT - PTE_T_LOG2) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -83,13 +83,12 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd -#define PTE_ORDER 0 #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) -# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2) +# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t) / 2) #else -# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +# define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #endif #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 7daf9a6509d8..dbf7e461d360 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -42,11 +42,11 @@ /* PGDIR_SHIFT determines what a third-level page table entry can map */ #ifdef __PAGETABLE_PMD_FOLDED -#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3) +#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) #else /* PMD_SHIFT determines the size of the area a second-level page table can map */ -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) @@ -86,19 +86,17 @@ #ifdef CONFIG_PAGE_SIZE_4KB # ifdef CONFIG_MIPS_VA_BITS_48 # define PGD_ORDER 0 -# define PUD_TABLE_ORDER 0 +# define PUD_TABLE_ORDER 0 # else # define PGD_ORDER 1 -# define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud +# define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud # endif #define PMD_TABLE_ORDER 0 -#define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_8KB #define PGD_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 -#define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_16KB #ifdef CONFIG_MIPS_VA_BITS_48 @@ -108,13 +106,11 @@ #endif #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 -#define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_32KB #define PGD_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 -#define PTE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_64KB #define PGD_ORDER 0 @@ -124,7 +120,6 @@ #else #define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd #endif -#define PTE_ORDER 0 #endif #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) @@ -134,7 +129,7 @@ #ifndef __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD ((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t)) #endif -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index ca7c5af7697d..0c97f755e256 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -197,7 +197,6 @@ void output_mm_defines(void) DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); DEFINE(_PGD_ORDER, PGD_ORDER); - DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 8dbbd99fc7e8..6e8e71f12fab 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -2065,7 +2065,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, UASM_i_MFC0(p, wr.r1, C0_BADVADDR); UASM_i_LW(p, wr.r2, 0, wr.r2); - UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); + UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2); uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); -- cgit v1.2.1 From bb5af4f67a567e723ac83956167efb57687b0793 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:55 +0300 Subject: mips: rename PGD_ORDER to PGD_TABLE_ORDER This is the order of the page table allocation, not the order of a PGD. While at it remove unused defintion of _PGD_ORDER in asm-offsets. Link: https://lkml.kernel.org/r/20220703141203.147893-7-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/mips/include/asm/pgalloc.h | 2 +- arch/mips/include/asm/pgtable-32.h | 6 +++--- arch/mips/include/asm/pgtable-64.h | 16 ++++++++-------- arch/mips/kernel/asm-offsets.c | 1 - arch/mips/kvm/mmu.c | 2 +- arch/mips/mm/pgtable.c | 2 +- arch/mips/mm/tlbex.c | 12 ++++++------ 7 files changed, 20 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 1ef8e86ae565..796035784c73 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -51,7 +51,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm); static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - free_pages((unsigned long)pgd, PGD_ORDER); + free_pages((unsigned long)pgd, PGD_TABLE_ORDER); } #define __pte_free_tlb(tlb,pte,address) \ diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 35bd519a1078..495c603c1a30 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -75,12 +75,12 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, * we don't really have any PUD/PMD directory physically. */ #if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) -# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1) +# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1) #else -# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) +# define __PGD_TABLE_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) #endif -#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) +#define PGD_TABLE_ORDER (__PGD_TABLE_ORDER >= 0 ? __PGD_TABLE_ORDER : 0) #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER aieeee_attempt_to_allocate_pmd diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index dbf7e461d360..a259ca4d1272 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -85,35 +85,35 @@ */ #ifdef CONFIG_PAGE_SIZE_4KB # ifdef CONFIG_MIPS_VA_BITS_48 -# define PGD_ORDER 0 +# define PGD_TABLE_ORDER 0 # define PUD_TABLE_ORDER 0 # else -# define PGD_ORDER 1 +# define PGD_TABLE_ORDER 1 # define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud # endif #define PMD_TABLE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_8KB -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_16KB #ifdef CONFIG_MIPS_VA_BITS_48 -#define PGD_ORDER 1 +#define PGD_TABLE_ORDER 1 #else -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #endif #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_32KB -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #define PMD_TABLE_ORDER 0 #endif #ifdef CONFIG_PAGE_SIZE_64KB -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #define PUD_TABLE_ORDER aieeee_attempt_to_allocate_pud #ifdef CONFIG_MIPS_VA_BITS_48 #define PMD_TABLE_ORDER 0 @@ -122,7 +122,7 @@ #endif #endif -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD ((PAGE_SIZE << PGD_TABLE_ORDER) / sizeof(pgd_t)) #ifndef __PAGETABLE_PUD_FOLDED #define PTRS_PER_PUD ((PAGE_SIZE << PUD_TABLE_ORDER) / sizeof(pud_t)) #endif diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 0c97f755e256..c4501897b870 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -196,7 +196,6 @@ void output_mm_defines(void) #endif DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); - DEFINE(_PGD_ORDER, PGD_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 1bfd1b501d82..db17e870bdff 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -80,7 +80,7 @@ pgd_t *kvm_pgd_alloc(void) { pgd_t *ret; - ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); if (ret) kvm_pgd_init(ret); diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c index 05560b042d82..3b7590660a04 100644 --- a/arch/mips/mm/pgtable.c +++ b/arch/mips/mm/pgtable.c @@ -12,7 +12,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long)ret); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 6e8e71f12fab..a57519ae96b1 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -818,7 +818,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * everything but the lower xuseg addresses goes down * the module_alloc/vmalloc path. */ - uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, ptr, label_vmalloc); } else { uasm_il_bltz(p, r, tmp, label_vmalloc); @@ -1127,7 +1127,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_SW(p, scratch, scratchpad_offset(0), 0); uasm_i_dsrl_safe(p, scratch, tmp, - PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, scratch, label_vmalloc); if (pgd_reg == -1) { @@ -1493,12 +1493,12 @@ static void setup_pw(void) #endif pgd_i = PGDIR_SHIFT; /* 1st level PGD */ #ifndef __PAGETABLE_PMD_FOLDED - pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER; + pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER; pmd_i = PMD_SHIFT; /* 2nd level PMD */ pmd_w = PMD_SHIFT - PAGE_SHIFT; #else - pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER; + pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER; #endif pt_i = PAGE_SHIFT; /* 3rd level PTE */ @@ -1536,7 +1536,7 @@ static void build_loongson3_tlb_refill_handler(void) if (check_for_high_segbits) { uasm_i_dmfc0(&p, K0, C0_BADVADDR); - uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_beqz(&p, &r, K1, label_vmalloc); uasm_i_nop(&p); @@ -2611,7 +2611,7 @@ void build_tlb_refill_handler(void) check_pabits(); #ifdef CONFIG_64BIT - check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); #endif if (cpu_has_3kex) { -- cgit v1.2.1 From bf0dc119c51ffdcfa6db0786ea877eec85985b7e Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:56 +0300 Subject: nios2: drop definition of PTE_ORDER This is the order of the page table allocation, not the order of a PTE. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-8-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Dinh Nguyen Cc: Arnd Bergmann Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/nios2/include/asm/pgtable.h | 3 +-- arch/nios2/mm/init.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 470516d4555e..1af8dbfe1793 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -53,10 +53,9 @@ struct mm_struct; #define PAGE_COPY MKP(0, 0, 1) #define PGD_ORDER 0 -#define PTE_ORDER 0 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define USER_PTRS_PER_PGD \ (CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE) diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index ae24687d12ad..7eaba31cea98 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -80,7 +80,7 @@ void __init mmu_init(void) #define __page_aligned(order) __aligned(PAGE_SIZE << (order)) pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER); -pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); +pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE); static struct page *kuser_page[1]; static int alloc_kuser_page(void) -- cgit v1.2.1 From a6714e720b5e8438ca7f779342ad91b39094f548 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:57 +0300 Subject: nios2: drop definition of PGD_ORDER This is the order of the page table allocation, not the order of a PGD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-9-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Dinh Nguyen Cc: Arnd Bergmann Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/nios2/include/asm/pgtable.h | 4 +--- arch/nios2/mm/init.c | 3 +-- arch/nios2/mm/pgtable.c | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 1af8dbfe1793..b3d45e815295 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -52,9 +52,7 @@ struct mm_struct; #define PAGE_COPY MKP(0, 0, 1) -#define PGD_ORDER 0 - -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) #define USER_PTRS_PER_PGD \ diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 7eaba31cea98..7bc82ee889c9 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -78,8 +78,7 @@ void __init mmu_init(void) flush_tlb_all(); } -#define __page_aligned(order) __aligned(PAGE_SIZE << (order)) -pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER); +pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE); pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE); static struct page *kuser_page[1]; diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c index 9b587fd592dd..7c76e8a7447a 100644 --- a/arch/nios2/mm/pgtable.c +++ b/arch/nios2/mm/pgtable.c @@ -54,7 +54,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_page(GFP_KERNEL); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init(ret); -- cgit v1.2.1 From 1721b412fc3391646e9cba35e74987516f6d0fce Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:58 +0300 Subject: loongarch: drop definition of PTE_ORDER This is the order of the page table allocation, not the order of a PTE. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-10-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/loongarch/include/asm/pgtable.h | 9 ++++----- arch/loongarch/kernel/asm-offsets.c | 1 - arch/loongarch/mm/tlbex.S | 6 +++--- 3 files changed, 7 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index d9e86cfa53e2..e0bbfc31fe72 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -24,17 +24,16 @@ #define PGD_ORDER 0 #define PUD_ORDER 0 #define PMD_ORDER 0 -#define PTE_ORDER 0 #if CONFIG_PGTABLE_LEVELS == 2 -#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #elif CONFIG_PGTABLE_LEVELS == 3 -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) #elif CONFIG_PGTABLE_LEVELS == 4 -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) @@ -55,7 +54,7 @@ #if CONFIG_PGTABLE_LEVELS > 2 #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) >> 3) #endif -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) >> 3) +#define PTRS_PER_PTE (PAGE_SIZE >> 3) #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index bfb65eb2844f..1a1166a7e61c 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -194,7 +194,6 @@ void output_mm_defines(void) #ifndef __PAGETABLE_PMD_FOLDED DEFINE(_PMD_ORDER, PMD_ORDER); #endif - DEFINE(_PTE_ORDER, PTE_ORDER); BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index 7eee40271577..e36c2c07dee3 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -83,7 +83,7 @@ vmalloc_done_load: bne t0, $r0, tlb_huge_update_load csrrd t0, LOONGARCH_CSR_BADV - srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) + srli.d t0, t0, PAGE_SHIFT andi t0, t0, (PTRS_PER_PTE - 1) slli.d t0, t0, _PTE_T_LOG2 add.d t1, ra, t0 @@ -247,7 +247,7 @@ vmalloc_done_store: bne t0, $r0, tlb_huge_update_store csrrd t0, LOONGARCH_CSR_BADV - srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) + srli.d t0, t0, PAGE_SHIFT andi t0, t0, (PTRS_PER_PTE - 1) slli.d t0, t0, _PTE_T_LOG2 add.d t1, ra, t0 @@ -414,7 +414,7 @@ vmalloc_done_modify: bne t0, $r0, tlb_huge_update_modify csrrd t0, LOONGARCH_CSR_BADV - srli.d t0, t0, (PAGE_SHIFT + PTE_ORDER) + srli.d t0, t0, PAGE_SHIFT andi t0, t0, (PTRS_PER_PTE - 1) slli.d t0, t0, _PTE_T_LOG2 add.d t1, ra, t0 -- cgit v1.2.1 From b7c0f2d454afbeaa9e6fcf5d62be2bd7708c73b0 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:11:59 +0300 Subject: loongarch: drop definition of PMD_ORDER This is the order of the page table allocation, not the order of a PMD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-11-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/loongarch/include/asm/pgalloc.h | 4 ++-- arch/loongarch/include/asm/pgtable.h | 7 +++---- arch/loongarch/kernel/asm-offsets.c | 3 --- 3 files changed, 5 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index b0a57b25c131..93e785f46639 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -66,12 +66,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) pmd_t *pmd; struct page *pg; - pg = alloc_pages(GFP_KERNEL_ACCOUNT, PMD_ORDER); + pg = alloc_page(GFP_KERNEL_ACCOUNT); if (!pg) return NULL; if (!pgtable_pmd_page_ctor(pg)) { - __free_pages(pg, PMD_ORDER); + __free_page(pg); return NULL; } diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index e0bbfc31fe72..f926537d2233 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -23,7 +23,6 @@ #define PGD_ORDER 0 #define PUD_ORDER 0 -#define PMD_ORDER 0 #if CONFIG_PGTABLE_LEVELS == 2 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) @@ -31,12 +30,12 @@ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) +#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) #elif CONFIG_PGTABLE_LEVELS == 4 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) +#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3)) @@ -52,7 +51,7 @@ #define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) >> 3) #endif #if CONFIG_PGTABLE_LEVELS > 2 -#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) >> 3) +#define PTRS_PER_PMD (PAGE_SIZE >> 3) #endif #define PTRS_PER_PTE (PAGE_SIZE >> 3) diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index 1a1166a7e61c..aa4ef42d759f 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -191,9 +191,6 @@ void output_mm_defines(void) DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); DEFINE(_PGD_ORDER, PGD_ORDER); -#ifndef __PAGETABLE_PMD_FOLDED - DEFINE(_PMD_ORDER, PMD_ORDER); -#endif BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); -- cgit v1.2.1 From f05ecc68690fcfc8724b3a3830e9157754273e27 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:12:00 +0300 Subject: loongarch: drop definition of PUD_ORDER This is the order of the page table allocation, not the order of a PUD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-12-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/loongarch/include/asm/pgalloc.h | 2 +- arch/loongarch/include/asm/pgtable.h | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index 93e785f46639..4bfeb3c9c9ac 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -90,7 +90,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { pud_t *pud; - pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER); + pud = (pud_t *) __get_free_page(GFP_KERNEL); if (pud) pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); return pud; diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index f926537d2233..a97996fefaed 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -22,7 +22,6 @@ #endif #define PGD_ORDER 0 -#define PUD_ORDER 0 #if CONFIG_PGTABLE_LEVELS == 2 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) @@ -38,7 +37,7 @@ #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) -#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3)) +#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -48,7 +47,7 @@ #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) >> 3) #if CONFIG_PGTABLE_LEVELS > 3 -#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) >> 3) +#define PTRS_PER_PUD (PAGE_SIZE >> 3) #endif #if CONFIG_PGTABLE_LEVELS > 2 #define PTRS_PER_PMD (PAGE_SIZE >> 3) -- cgit v1.2.1 From 418d5dadaf9de9d230bfd3cdcb6263fc6de1a165 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:12:01 +0300 Subject: loongarch: drop definition of PGD_ORDER This is the order of the page table allocation, not the order of a PGD. Since its always hardwired to 0, simply drop it. [rppt@linux.ibm.com: drop extra BLANK() line in arch/loongarch/kernel/asm-offsets.c] Link: https://lkml.kernel.org/r/20220705154708.181258-13-rppt@kernel.org Link: https://lkml.kernel.org/r/20220703141203.147893-13-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Huacai Chen Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/loongarch/include/asm/pgtable.h | 6 ++---- arch/loongarch/kernel/asm-offsets.c | 2 -- arch/loongarch/mm/pgtable.c | 2 +- 3 files changed, 3 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index a97996fefaed..e03443abaf7d 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -21,8 +21,6 @@ #include #endif -#define PGD_ORDER 0 - #if CONFIG_PGTABLE_LEVELS == 2 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #elif CONFIG_PGTABLE_LEVELS == 3 @@ -43,9 +41,9 @@ #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3)) +#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) -#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) >> 3) +#define PTRS_PER_PGD (PAGE_SIZE >> 3) #if CONFIG_PGTABLE_LEVELS > 3 #define PTRS_PER_PUD (PAGE_SIZE >> 3) #endif diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index aa4ef42d759f..4a3bb1b9aef3 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -190,8 +190,6 @@ void output_mm_defines(void) #endif DEFINE(_PTE_T_LOG2, PTE_T_LOG2); BLANK(); - DEFINE(_PGD_ORDER, PGD_ORDER); - BLANK(); DEFINE(_PMD_SHIFT, PMD_SHIFT); DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); BLANK(); diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index 0569647152e9..ee179ccd3e3f 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -13,7 +13,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_page(GFP_KERNEL); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long)ret); -- cgit v1.2.1 From 4501a7a03994f3d8bcf830cc42888868e88c8191 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:12:02 +0300 Subject: parisc: rename PGD_ORDER to PGD_TABLE_ORDER This is the order of the page table allocation, not the order of a PGD. Link: https://lkml.kernel.org/r/20220703141203.147893-14-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Helge Deller Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Max Filippov Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/parisc/include/asm/pgalloc.h | 6 +++--- arch/parisc/include/asm/pgtable.h | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index 54b63374579b..e3e142b1c5c5 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -20,18 +20,18 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); if (unlikely(pgd == NULL)) return NULL; - memset(pgd, 0, PAGE_SIZE << PGD_ORDER); + memset(pgd, 0, PAGE_SIZE << PGD_TABLE_ORDER); return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - free_pages((unsigned long)pgd, PGD_ORDER); + free_pages((unsigned long)pgd, PGD_TABLE_ORDER); } #if CONFIG_PGTABLE_LEVELS == 3 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 6a1899a9b420..df7b931865d2 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -118,9 +118,9 @@ extern void __update_cache(pte_t pte); #if CONFIG_PGTABLE_LEVELS == 3 #define PMD_TABLE_ORDER 1 -#define PGD_ORDER 0 +#define PGD_TABLE_ORDER 0 #else -#define PGD_ORDER 1 +#define PGD_TABLE_ORDER 1 #endif /* Definitions for 3rd level (we use PLD here for Page Lower directory @@ -144,10 +144,10 @@ extern void __update_cache(pte_t pte); /* Definitions for 1st level */ #define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD) -#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG +#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG #define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT) #else -#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) +#define BITS_PER_PGD (PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) #endif #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -- cgit v1.2.1 From 64c5ed22d6080f8ad07ed782087582a5f023f788 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 3 Jul 2022 17:12:03 +0300 Subject: xtensa: drop definition of PGD_ORDER This is the order of the page table allocation, not the order of a PGD. Since its always hardwired to 0, simply drop it. Link: https://lkml.kernel.org/r/20220703141203.147893-15-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Helge Deller Acked-by: Max Filippov Cc: Arnd Bergmann Cc: Dinh Nguyen Cc: Guo Ren Cc: Huacai Chen Cc: James Bottomley Cc: "Matthew Wilcox (Oracle)" Cc: Thomas Bogendoerfer Cc: Xuerui Wang Signed-off-by: Andrew Morton --- arch/xtensa/include/asm/pgalloc.h | 2 +- arch/xtensa/include/asm/pgtable.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index eeb2de3a89e5..7fc0f9126dd3 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h @@ -29,7 +29,7 @@ static inline pgd_t* pgd_alloc(struct mm_struct *mm) { - return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER); + return (pgd_t*) __get_free_page(GFP_KERNEL | __GFP_ZERO); } static inline void ptes_clear(pte_t *ptep) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index e0d5531ae00d..54f577c13afa 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -57,7 +57,6 @@ #define PTRS_PER_PTE 1024 #define PTRS_PER_PTE_SHIFT 10 #define PTRS_PER_PGD 1024 -#define PGD_ORDER 0 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) -- cgit v1.2.1 From 391145380f4b432403d8bdaf53ad7109104cb0df Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 5 Jul 2022 18:47:08 +0300 Subject: ARM: head.S: rename PMD_ORDER to PMD_ENTRY_ORDER PMD_ORDER denotes order of magnitude for a PMD entry, i.e PMD entry size is 2 ^ PMD_ORDER. Rename PMD_ORDER to PMD_ENTRY_ORDER to allow a generic definition of PMD_ORDER as order of a PMD allocation: (PMD_SHIFT - PAGE_SHIFT). Link: https://lkml.kernel.org/r/20220705154708.181258-16-rppt@kernel.org Signed-off-by: Mike Rapoport Acked-by: Russell King (Oracle) Signed-off-by: Andrew Morton --- arch/arm/kernel/head.S | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 500612d3da2e..29e2900178a1 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -38,10 +38,10 @@ #ifdef CONFIG_ARM_LPAE /* LPAE requires an additional page for the PGD */ #define PG_DIR_SIZE 0x5000 -#define PMD_ORDER 3 +#define PMD_ENTRY_ORDER 3 /* PMD entry size is 2^PMD_ENTRY_ORDER */ #else #define PG_DIR_SIZE 0x4000 -#define PMD_ORDER 2 +#define PMD_ENTRY_ORDER 2 #endif .globl swapper_pg_dir @@ -240,7 +240,7 @@ __create_page_tables: mov r6, r6, lsr #SECTION_SHIFT 1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base - str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping + str r3, [r4, r5, lsl #PMD_ENTRY_ORDER] @ identity mapping cmp r5, r6 addlo r5, r5, #1 @ next section blo 1b @@ -250,7 +250,7 @@ __create_page_tables: * set two variables to indicate the physical start and end of the * kernel. */ - add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER) ldr r6, =(_end - 1) adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 @@ -259,8 +259,8 @@ __create_page_tables: str r8, [r5] @ Save physical start of kernel (LE) #endif orr r3, r8, r7 @ Add the MMU flags - add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) -1: str r3, [r0], #1 << PMD_ORDER + add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) +1: str r3, [r0], #1 << PMD_ENTRY_ORDER add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 bls 1b @@ -280,14 +280,14 @@ __create_page_tables: mov r3, pc mov r3, r3, lsr #SECTION_SHIFT orr r3, r7, r3, lsl #SECTION_SHIFT - add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) - str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! + add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ENTRY_ORDER) + str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ENTRY_ORDER]! ldr r6, =(_edata_loc - 1) - add r0, r0, #1 << PMD_ORDER - add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) + add r0, r0, #1 << PMD_ENTRY_ORDER + add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) 1: cmp r0, r6 add r3, r3, #1 << SECTION_SHIFT - strls r3, [r0], #1 << PMD_ORDER + strls r3, [r0], #1 << PMD_ENTRY_ORDER bls 1b #endif @@ -297,10 +297,10 @@ __create_page_tables: */ mov r0, r2, lsr #SECTION_SHIFT cmp r2, #0 - ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER) + ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ENTRY_ORDER) addne r3, r3, r4 orrne r6, r7, r0, lsl #SECTION_SHIFT - strne r6, [r3], #1 << PMD_ORDER + strne r6, [r3], #1 << PMD_ENTRY_ORDER addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] @@ -319,7 +319,7 @@ __create_page_tables: addruart r7, r3, r0 mov r3, r3, lsr #SECTION_SHIFT - mov r3, r3, lsl #PMD_ORDER + mov r3, r3, lsl #PMD_ENTRY_ORDER add r0, r4, r3 mov r3, r7, lsr #SECTION_SHIFT @@ -349,7 +349,7 @@ __create_page_tables: * If we're using the NetWinder or CATS, we also need to map * in the 16550-type serial port for the debug messages */ - add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER) orr r3, r7, #0x7c000000 str r3, [r0] #endif @@ -359,10 +359,10 @@ __create_page_tables: * Similar reasons here - for debug. This is * only for Acorn RiscPC architectures. */ - add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER) orr r3, r7, #0x02000000 str r3, [r0] - add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER) str r3, [r0] #endif #endif -- cgit v1.2.1 From 15d2ce7129f25c51d8a840a8a002c7ba0bb1509d Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 29 Jul 2022 18:07:13 -0700 Subject: mips: rename mt_init to mips_mt_init Move mt_init out of the way for the maple tree. Use mips_mt prefix to match the rest of the functions in the file. Link: https://lkml.kernel.org/r/20220504002554.654642-2-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: SeongJae Park Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/mips/kernel/mips-mt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index d5f7362e8c24..dc023a979803 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -230,7 +230,7 @@ void mips_mt_set_cpuoptions(void) struct class *mt_class; -static int __init mt_init(void) +static int __init mips_mt_init(void) { struct class *mtc; @@ -243,4 +243,4 @@ static int __init mt_init(void) return 0; } -subsys_initcall(mt_init); +subsys_initcall(mips_mt_init); -- cgit v1.2.1 From 450d0e74d886c172ac2f72518b797a18ee8d1327 Mon Sep 17 00:00:00 2001 From: Zhou Guanghui Date: Wed, 15 Jun 2022 10:27:42 +0000 Subject: memblock,arm64: expand the static memblock memory table In a system(Huawei Ascend ARM64 SoC) using HBM, a multi-bit ECC error occurs, and the BIOS will mark the corresponding area (for example, 2 MB) as unusable. When the system restarts next time, these areas are not reported or reported as EFI_UNUSABLE_MEMORY. Both cases lead to an increase in the number of memblocks, whereas EFI_UNUSABLE_MEMORY leads to a larger number of memblocks. For example, if the EFI_UNUSABLE_MEMORY type is reported: ... memory[0x92] [0x0000200834a00000-0x0000200835bfffff], 0x0000000001200000 bytes on node 7 flags: 0x0 memory[0x93] [0x0000200835c00000-0x0000200835dfffff], 0x0000000000200000 bytes on node 7 flags: 0x4 memory[0x94] [0x0000200835e00000-0x00002008367fffff], 0x0000000000a00000 bytes on node 7 flags: 0x0 memory[0x95] [0x0000200836800000-0x00002008369fffff], 0x0000000000200000 bytes on node 7 flags: 0x4 memory[0x96] [0x0000200836a00000-0x0000200837bfffff], 0x0000000001200000 bytes on node 7 flags: 0x0 memory[0x97] [0x0000200837c00000-0x0000200837dfffff], 0x0000000000200000 bytes on node 7 flags: 0x4 memory[0x98] [0x0000200837e00000-0x000020087fffffff], 0x0000000048200000 bytes on node 7 flags: 0x0 memory[0x99] [0x0000200880000000-0x0000200bcfffffff], 0x0000000350000000 bytes on node 6 flags: 0x0 memory[0x9a] [0x0000200bd0000000-0x0000200bd01fffff], 0x0000000000200000 bytes on node 6 flags: 0x4 memory[0x9b] [0x0000200bd0200000-0x0000200bd07fffff], 0x0000000000600000 bytes on node 6 flags: 0x0 memory[0x9c] [0x0000200bd0800000-0x0000200bd09fffff], 0x0000000000200000 bytes on node 6 flags: 0x4 memory[0x9d] [0x0000200bd0a00000-0x0000200fcfffffff], 0x00000003ff600000 bytes on node 6 flags: 0x0 memory[0x9e] [0x0000200fd0000000-0x0000200fd01fffff], 0x0000000000200000 bytes on node 6 flags: 0x4 memory[0x9f] [0x0000200fd0200000-0x0000200fffffffff], 0x000000002fe00000 bytes on node 6 flags: 0x0 ... The EFI memory map is parsed to construct the memblock arrays before the memblock arrays can be resized. As the result, memory regions beyond INIT_MEMBLOCK_REGIONS are lost. Add a new macro INIT_MEMBLOCK_MEMORY_REGIONS to replace INIT_MEMBLOCK_REGTIONS to define the size of the static memblock.memory array. Allow overriding memblock.memory array size with architecture defined INIT_MEMBLOCK_MEMORY_REGIONS and make arm64 to set INIT_MEMBLOCK_MEMORY_REGIONS to 1024 when CONFIG_EFI is enabled. Link: https://lkml.kernel.org/r/20220615102742.96450-1-zhouguanghui1@huawei.com Signed-off-by: Zhou Guanghui Acked-by: Mike Rapoport Tested-by: Darren Hart Acked-by: Will Deacon [arm64] Reviewed-by: Anshuman Khandual Cc: Xu Qiang Signed-off-by: Andrew Morton --- arch/arm64/include/asm/memory.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 0af70d9abede..ce8614fa376a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -364,6 +364,15 @@ void dump_mem_limit(void); # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) #endif +/* + * memory regions which marked with flag MEMBLOCK_NOMAP(for example, the memory + * of the EFI_UNUSABLE_MEMORY type) may divide a continuous memory block into + * multiple parts. As a result, the number of memory regions is large. + */ +#ifdef CONFIG_EFI +#define INIT_MEMBLOCK_MEMORY_REGIONS (INIT_MEMBLOCK_REGIONS * 8) +#endif + #include #endif /* __ASM_MEMORY_H */ -- cgit v1.2.1