1616#include <linux/mm_types.h>
1717#include <linux/cpufeature.h>
1818#include <linux/page-flags.h>
19+ #include <linux/page_table_check.h>
1920#include <linux/radix-tree.h>
2021#include <linux/atomic.h>
22+ #include <linux/mmap_lock.h>
2123#include <asm/ctlreg.h>
2224#include <asm/bug.h>
2325#include <asm/page.h>
@@ -1190,6 +1192,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
11901192 /* At this point the reference through the mapping is still present */
11911193 if (mm_is_protected (mm ) && pte_present (res ))
11921194 WARN_ON_ONCE (uv_convert_from_secure_pte (res ));
1195+ page_table_check_pte_clear (mm , addr , res );
11931196 return res ;
11941197}
11951198
@@ -1208,6 +1211,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
12081211 /* At this point the reference through the mapping is still present */
12091212 if (mm_is_protected (vma -> vm_mm ) && pte_present (res ))
12101213 WARN_ON_ONCE (uv_convert_from_secure_pte (res ));
1214+ page_table_check_pte_clear (vma -> vm_mm , addr , res );
12111215 return res ;
12121216}
12131217
@@ -1231,6 +1235,9 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
12311235 } else {
12321236 res = ptep_xchg_lazy (mm , addr , ptep , __pte (_PAGE_INVALID ));
12331237 }
1238+
1239+ page_table_check_pte_clear (mm , addr , res );
1240+
12341241 /* Nothing to do */
12351242 if (!mm_is_protected (mm ) || !pte_present (res ))
12361243 return res ;
@@ -1327,6 +1334,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
13271334{
13281335 if (pte_present (entry ))
13291336 entry = clear_pte_bit (entry , __pgprot (_PAGE_UNUSED ));
1337+ page_table_check_ptes_set (mm , addr , ptep , entry , nr );
13301338 for (;;) {
13311339 set_pte (ptep , entry );
13321340 if (-- nr == 0 )
@@ -1703,6 +1711,7 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
17031711static inline void set_pmd_at (struct mm_struct * mm , unsigned long addr ,
17041712 pmd_t * pmdp , pmd_t entry )
17051713{
1714+ page_table_check_pmd_set (mm , addr , pmdp , entry );
17061715 set_pmd (pmdp , entry );
17071716}
17081717
@@ -1717,20 +1726,29 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
17171726static inline pmd_t pmdp_huge_get_and_clear (struct mm_struct * mm ,
17181727 unsigned long addr , pmd_t * pmdp )
17191728{
1720- return pmdp_xchg_direct (mm , addr , pmdp , __pmd (_SEGMENT_ENTRY_EMPTY ));
1729+ pmd_t pmd ;
1730+
1731+ pmd = pmdp_xchg_direct (mm , addr , pmdp , __pmd (_SEGMENT_ENTRY_EMPTY ));
1732+ page_table_check_pmd_clear (mm , addr , pmd );
1733+ return pmd ;
17211734}
17221735
17231736#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
17241737static inline pmd_t pmdp_huge_get_and_clear_full (struct vm_area_struct * vma ,
17251738 unsigned long addr ,
17261739 pmd_t * pmdp , int full )
17271740{
1741+ pmd_t pmd ;
1742+
17281743 if (full ) {
1729- pmd_t pmd = * pmdp ;
1744+ pmd = * pmdp ;
17301745 set_pmd (pmdp , __pmd (_SEGMENT_ENTRY_EMPTY ));
1746+ page_table_check_pmd_clear (vma -> vm_mm , addr , pmd );
17311747 return pmd ;
17321748 }
1733- return pmdp_xchg_lazy (vma -> vm_mm , addr , pmdp , __pmd (_SEGMENT_ENTRY_EMPTY ));
1749+ pmd = pmdp_xchg_lazy (vma -> vm_mm , addr , pmdp , __pmd (_SEGMENT_ENTRY_EMPTY ));
1750+ page_table_check_pmd_clear (vma -> vm_mm , addr , pmd );
1751+ return pmd ;
17341752}
17351753
17361754#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
@@ -1744,11 +1762,16 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
17441762static inline pmd_t pmdp_invalidate (struct vm_area_struct * vma ,
17451763 unsigned long addr , pmd_t * pmdp )
17461764{
1747- pmd_t pmd ;
1765+ pmd_t pmd = * pmdp ;
17481766
1749- VM_WARN_ON_ONCE (!pmd_present (* pmdp ));
1750- pmd = __pmd (pmd_val (* pmdp ) | _SEGMENT_ENTRY_INVALID );
1751- return pmdp_xchg_direct (vma -> vm_mm , addr , pmdp , pmd );
1767+ VM_WARN_ON_ONCE (!pmd_present (pmd ));
1768+ pmd = set_pmd_bit (pmd , __pgprot (_SEGMENT_ENTRY_INVALID ));
1769+ #ifdef CONFIG_PAGE_TABLE_CHECK
1770+ pmd = clear_pmd_bit (pmd , __pgprot (_SEGMENT_ENTRY_READ ));
1771+ #endif
1772+ page_table_check_pmd_set (vma -> vm_mm , addr , pmdp , pmd );
1773+ pmd = pmdp_xchg_direct (vma -> vm_mm , addr , pmdp , pmd );
1774+ return pmd ;
17521775}
17531776
17541777#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -1783,6 +1806,29 @@ static inline int has_transparent_hugepage(void)
17831806}
17841807#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
17851808
1809+ #ifdef CONFIG_PAGE_TABLE_CHECK
1810+ static inline bool pte_user_accessible_page (struct mm_struct * mm , unsigned long addr , pte_t pte )
1811+ {
1812+ VM_BUG_ON (mm == & init_mm );
1813+
1814+ return pte_present (pte );
1815+ }
1816+
1817+ static inline bool pmd_user_accessible_page (struct mm_struct * mm , unsigned long addr , pmd_t pmd )
1818+ {
1819+ VM_BUG_ON (mm == & init_mm );
1820+
1821+ return pmd_leaf (pmd ) && (pmd_val (pmd ) & _SEGMENT_ENTRY_READ );
1822+ }
1823+
1824+ static inline bool pud_user_accessible_page (struct mm_struct * mm , unsigned long addr , pud_t pud )
1825+ {
1826+ VM_BUG_ON (mm == & init_mm );
1827+
1828+ return pud_leaf (pud );
1829+ }
1830+ #endif
1831+
17861832/*
17871833 * 64 bit swap entry format:
17881834 * A page-table entry has some bits we have to treat in a special way.
0 commit comments