--- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -565,8 +565,15 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) } #endif -extern void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep); +extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, + pte_t pte); + +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + pte_t pte = *ptep; + __update_tlb(vma, address, pte); +} #define __HAVE_ARCH_UPDATE_MMU_TLB #define update_mmu_tlb update_mmu_cache @@ -574,7 +581,9 @@ extern void update_mmu_cache(struct vm_area_struct *vma, static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - update_mmu_cache(vma, address, (pte_t *)pmdp); + pte_t pte = *(pte_t *)pmdp; + + __update_tlb(vma, address, pte); } /* --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -176,8 +176,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) } } -void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data); unsigned long flags; @@ -204,7 +203,7 @@ void update_mmu_cache(struct vm_area_struct *vma, BARRIER; tlb_probe(); idx = read_c0_index(); - write_c0_entrylo0(pte_val(*ptep)); + write_c0_entrylo0(pte_val(pte)); write_c0_entryhi(address | pid); if (idx < 0) { /* BARRIER */ tlb_write_random(); --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -290,16 +290,14 @@ void local_flush_tlb_one(unsigned long page) * updates the TLB with the new pte(s), and another which also checks * for the R4k "end of page" hardware bug and does the needy. */ -void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) { -#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT + unsigned long flags; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; -#endif - unsigned long flags; + pte_t *ptep, *ptemap = NULL; int idx, pid; /* @@ -318,19 +316,20 @@ void update_mmu_cache(struct vm_area_struct *vma, pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); write_c0_entryhi(address | pid); } + pgdp = pgd_offset(vma->vm_mm, address); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); - idx = read_c0_index(); -#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT - pgdp = pgd_offset(vma->vm_mm, address); p4dp = p4d_offset(pgdp, address); pudp = pud_offset(p4dp, address); pmdp = pmd_offset(pudp, address); + idx = read_c0_index(); +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* this could be a huge page */ - if (ptep == (pte_t *)pmdp) { + if (pmd_huge(*pmdp)) { unsigned long lo; write_c0_pagemask(PM_HUGE_MASK); + ptep = (pte_t *)pmdp; lo = pte_to_entrylo(pte_val(*ptep)); write_c0_entrylo0(lo); write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); @@ -345,6 +344,13 @@ void update_mmu_cache(struct vm_area_struct *vma, } else #endif { + ptemap = ptep = pte_offset_map(pmdp, address); + /* + * update_mmu_cache() is called between pte_offset_map_lock() + * and pte_unmap_unlock(), so we can assume that ptep is not + * NULL here: and what should be done below if it were NULL? + */ + #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #ifdef CONFIG_XPA write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); @@ -372,6 +378,9 @@ void update_mmu_cache(struct vm_area_struct *vma, tlbw_use_hazard(); htw_start(); flush_micro_tlb_vm(vma); + + if (ptemap) + pte_unmap(ptemap); local_irq_restore(flags); }