Skip to content

Commit a341223

Browse files
Peter Zijlstra (Intel)ksacilotto
authored andcommitted
tlb: mmu_gather: add tlb_flush_*_range APIs
tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, then set corresponding cleared_*. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Zhenyu Ye <yezhenyu2@huawei.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20200625080314.230-5-yezhenyu2@huawei.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit 2631ed0) CVE-2021-4002 Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com> Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com> Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com> Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
1 parent 21dcffc commit a341223

1 file changed

Lines changed: 40 additions & 15 deletions

File tree

include/asm-generic/tlb.h

Lines changed: 40 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -495,6 +495,38 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
495495
}
496496
#endif
497497

498+
/*
499+
* tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
500+
* and set corresponding cleared_*.
501+
*/
502+
static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
503+
unsigned long address, unsigned long size)
504+
{
505+
__tlb_adjust_range(tlb, address, size);
506+
tlb->cleared_ptes = 1;
507+
}
508+
509+
static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
510+
unsigned long address, unsigned long size)
511+
{
512+
__tlb_adjust_range(tlb, address, size);
513+
tlb->cleared_pmds = 1;
514+
}
515+
516+
static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
517+
unsigned long address, unsigned long size)
518+
{
519+
__tlb_adjust_range(tlb, address, size);
520+
tlb->cleared_puds = 1;
521+
}
522+
523+
static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
524+
unsigned long address, unsigned long size)
525+
{
526+
__tlb_adjust_range(tlb, address, size);
527+
tlb->cleared_p4ds = 1;
528+
}
529+
498530
#ifndef __tlb_remove_tlb_entry
499531
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
500532
#endif
@@ -508,19 +540,17 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
508540
*/
509541
#define tlb_remove_tlb_entry(tlb, ptep, address) \
510542
do { \
511-
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
512-
tlb->cleared_ptes = 1; \
543+
tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
513544
__tlb_remove_tlb_entry(tlb, ptep, address); \
514545
} while (0)
515546

516547
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
517548
do { \
518549
unsigned long _sz = huge_page_size(h); \
519-
__tlb_adjust_range(tlb, address, _sz); \
520550
if (_sz == PMD_SIZE) \
521-
tlb->cleared_pmds = 1; \
551+
tlb_flush_pmd_range(tlb, address, _sz); \
522552
else if (_sz == PUD_SIZE) \
523-
tlb->cleared_puds = 1; \
553+
tlb_flush_pud_range(tlb, address, _sz); \
524554
__tlb_remove_tlb_entry(tlb, ptep, address); \
525555
} while (0)
526556

@@ -534,8 +564,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
534564

535565
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
536566
do { \
537-
__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
538-
tlb->cleared_pmds = 1; \
567+
tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
539568
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
540569
} while (0)
541570

@@ -549,8 +578,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
549578

550579
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
551580
do { \
552-
__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
553-
tlb->cleared_puds = 1; \
581+
tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
554582
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
555583
} while (0)
556584

@@ -575,19 +603,17 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
575603
#ifndef pte_free_tlb
576604
#define pte_free_tlb(tlb, ptep, address) \
577605
do { \
578-
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
606+
tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
579607
tlb->freed_tables = 1; \
580-
tlb->cleared_pmds = 1; \
581608
__pte_free_tlb(tlb, ptep, address); \
582609
} while (0)
583610
#endif
584611

585612
#ifndef pmd_free_tlb
586613
#define pmd_free_tlb(tlb, pmdp, address) \
587614
do { \
588-
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
615+
tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
589616
tlb->freed_tables = 1; \
590-
tlb->cleared_puds = 1; \
591617
__pmd_free_tlb(tlb, pmdp, address); \
592618
} while (0)
593619
#endif
@@ -596,9 +622,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
596622
#ifndef pud_free_tlb
597623
#define pud_free_tlb(tlb, pudp, address) \
598624
do { \
599-
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
625+
tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
600626
tlb->freed_tables = 1; \
601-
tlb->cleared_p4ds = 1; \
602627
__pud_free_tlb(tlb, pudp, address); \
603628
} while (0)
604629
#endif

0 commit comments

Comments
 (0)