patch-2.4.9 linux/arch/arm/mm/fault-armv.c

Next file: linux/arch/arm/mm/fault-common.c
Previous file: linux/arch/arm/mm/discontig.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.8/linux/arch/arm/mm/fault-armv.c linux/arch/arm/mm/fault-armv.c
@@ -540,6 +540,21 @@
 	return 0;
 }
 
+/*
+ * Hook for things that need to trap external faults.  Note that
+ * we don't guarantee that this will be the final version of the
+ * interface.
+ */
+int (*external_fault)(unsigned long addr, struct pt_regs *regs);
+
+static int
+do_external_fault(unsigned long addr, int error_code, struct pt_regs *regs)
+{
+	if (external_fault)
+		return external_fault(addr, regs);
+	return 1;
+}
+
 static const struct fsr_info {
 	int	(*fn)(unsigned long addr, int error_code, struct pt_regs *regs);
 	int	sig;
@@ -549,13 +564,13 @@
 	{ do_alignment,		SIGILL,	 "alignment exception"		   },
 	{ NULL,			SIGKILL, "terminal exception"		   },
 	{ do_alignment,		SIGILL,	 "alignment exception"		   },
-	{ NULL,			SIGBUS,	 "external abort on linefetch"	   },
+	{ do_external_fault,	SIGBUS,	 "external abort on linefetch"	   },
 	{ do_translation_fault,	SIGSEGV, "section translation fault"	   },
-	{ NULL,			SIGBUS,	 "external abort on linefetch"	   },
+	{ do_external_fault,	SIGBUS,	 "external abort on linefetch"	   },
 	{ do_page_fault,	SIGSEGV, "page translation fault"	   },
-	{ NULL,			SIGBUS,	 "external abort on non-linefetch" },
+	{ do_external_fault,	SIGBUS,	 "external abort on non-linefetch" },
 	{ NULL,			SIGSEGV, "section domain fault"		   },
-	{ NULL,			SIGBUS,	 "external abort on non-linefetch" },
+	{ do_external_fault,	SIGBUS,	 "external abort on non-linefetch" },
 	{ NULL,			SIGSEGV, "page domain fault"		   },
 	{ NULL,			SIGBUS,	 "external abort on translation"   },
 	{ do_sect_fault,	SIGSEGV, "section permission fault"	   },
@@ -571,14 +586,10 @@
 {
 	const struct fsr_info *inf = fsr_info + (fsr & 15);
 
-#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
+#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) || defined(CONFIG_DEBUG_ERRORS)
 	if (addr == regs->ARM_pc)
 		goto sa1_weirdness;
 #endif
-#if defined(CONFIG_CPU_ARM720) && defined(CONFIG_ALIGNMENT_TRAP)
-	if (addr & 3 && (fsr & 13) != 1)
-		goto arm720_weirdness;
-#endif
 
 	if (!inf->fn)
 		goto bad;
@@ -593,12 +604,16 @@
 	die_if_kernel("Oops", regs, 0);
 	return;
 
-#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
+#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100) || defined(CONFIG_DEBUG_ERRORS)
 sa1_weirdness:
 	if (user_mode(regs)) {
 		static int first = 1;
-		if (first)
-			printk(KERN_DEBUG "Weird data abort detected\n");
+		if (first) {
+			printk(KERN_DEBUG "Fixing up bad data abort at %08lx\n", addr);
+#ifdef CONFIG_DEBUG_ERRORS
+			show_pte(current->mm, addr);
+#endif
+		}
 		first = 0;
 		return;
 	}
@@ -607,37 +622,128 @@
 		goto bad;
 	return;
 #endif
-#if defined(CONFIG_CPU_ARM720) && defined(CONFIG_ALIGNMENT_TRAP)
-arm720_weirdness:
-	if (!user_mode(regs)) {
-		unsigned long instr;
-
-		instr = *(unsigned long *)instruction_pointer(regs);
-
-		if ((instr & 0x04400000) != 0x04400000) {
-			static int first = 1;
-			if (first)
-				printk("Mis-reported alignment fault at "
-					"0x%08lx, fsr 0x%02x, code 0x%02x, "
-					"PC = 0x%08lx, instr = 0x%08lx\n",
-					addr, fsr, error_code, regs->ARM_pc,
-					instr);
-			first = 0;
-			cpu_tlb_invalidate_all();
-			cpu_cache_clean_invalidate_all();
-			return;
-		}
+}
+
+asmlinkage void
+do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
+{
+	do_translation_fault(addr, 0, regs);
+}
+
+/*
+ * We take the easy way out of this problem - we make the
+ * PTE uncacheable.  However, we leave the write buffer on.
+ */
+static void adjust_pte(struct vm_area_struct *vma, unsigned long address)
+{
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte, entry;
+
+	pgd = pgd_offset(vma->vm_mm, address);
+	if (pgd_none(*pgd))
+		return;
+	if (pgd_bad(*pgd))
+		goto bad_pgd;
+
+	pmd = pmd_offset(pgd, address);
+	if (pmd_none(*pmd))
+		return;
+	if (pmd_bad(*pmd))
+		goto bad_pmd;
+
+	pte = pte_offset(pmd, address);
+	entry = *pte;
+
+	/*
+	 * If this page isn't present, or is already setup to
+	 * fault (ie, is old), we can safely ignore any issues.
+	 */
+	if (pte_present(entry) && pte_val(entry) & L_PTE_CACHEABLE) {
+		flush_cache_page(vma, address);
+		pte_val(entry) &= ~L_PTE_CACHEABLE;
+		set_pte(pte, entry);
+		flush_tlb_page(vma, address);
 	}
+	return;
 
-	if (!inf->fn || inf->fn(addr, error_code, regs))
-		goto bad;
+bad_pgd:
+	pgd_ERROR(*pgd);
+	pgd_clear(pgd);
+	return;
+
+bad_pmd:
+	pmd_ERROR(*pmd);
+	pmd_clear(pmd);
 	return;
-#endif
 }
 
-asmlinkage int
-do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
+/*
+ * Take care of architecture specific things when placing a new PTE into
+ * a page table, or changing an existing PTE.  Basically, there are two
+ * things that we need to take care of:
+ *
+ *  1. If PG_dcache_dirty is set for the page, we need to ensure
+ *     that any cache entries for the kernels virtual memory
+ *     range are written back to the page.
+ *  2. If we have multiple shared mappings of the same space in
+ *     an object, we need to deal with the cache aliasing issues.
+ *
+ * Note that the page_table_lock will be held.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 {
-	do_translation_fault(addr, 0, regs);
-	return 1;
+	struct page *page = pte_page(pte);
+	struct vm_area_struct *mpnt;
+	struct mm_struct *mm;
+	unsigned long pgoff;
+	int aliases;
+
+	if (!VALID_PAGE(page) || !page->mapping)
+		return;
+
+	if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) {
+		unsigned long kvirt = (unsigned long)page_address(page);
+		cpu_cache_clean_invalidate_range(kvirt, kvirt + PAGE_SIZE, 0);
+	}
+
+	mm = vma->vm_mm;
+	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
+	aliases = 0;
+
+	/*
+	 * If we have any shared mappings that are in the same mm
+	 * space, then we need to handle them specially to maintain
+	 * cache coherency.
+	 */
+	for (mpnt = page->mapping->i_mmap_shared; mpnt;
+	     mpnt = mpnt->vm_next_share) {
+		unsigned long off;
+
+		/*
+		 * If this VMA is not in our MM, we can ignore it.
+		 * Note that we intentionally don't mask out the VMA
+		 * that we are fixing up.
+		 */
+		if (mpnt->vm_mm != mm && mpnt != vma)
+			continue;
+
+		/*
+		 * If the page isn't in this VMA, we can also ignore it.
+		 */
+		if (pgoff < mpnt->vm_pgoff)
+			continue;
+
+		off = pgoff - mpnt->vm_pgoff;
+		if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
+			continue;
+
+		/*
+		 * Ok, it is within mpnt.  Fix it up.
+		 */
+		adjust_pte(mpnt, mpnt->vm_start + (off << PAGE_SHIFT));
+		aliases ++;
+	}
+	if (aliases)
+		adjust_pte(vma, addr);
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)