patch-2.4.9 linux/include/asm-arm/proc-armv/cache.h

Next file: linux/include/asm-arm/proc-armv/pgtable.h
Previous file: linux/include/asm-arm/proc-armo/uaccess.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.8/linux/include/asm-arm/proc-armv/cache.h linux/include/asm-arm/proc-armv/cache.h
@@ -10,6 +10,12 @@
 #include <asm/mman.h>
 
 /*
+ * This flag is used to indicate that the page pointed to by a pte
+ * is dirty and requires cleaning before returning it to the user.
+ */
+#define PG_dcache_dirty PG_arch_1
+
+/*
  * Cache handling for 32-bit ARM processors.
  *
  * Note that on ARM, we have a more accurate specification than that
@@ -54,6 +60,33 @@
 /*
  * This flushes back any buffered write data.  We have to clean the entries
  * in the cache for this page.  This does not invalidate either I or D caches.
+ *
+ * Called from:
+ * 1. mm/filemap.c:filemap_nopage
+ * 2. mm/filemap.c:filemap_nopage
+ *    [via do_no_page - ok]
+ *
+ * 3. mm/memory.c:break_cow
+ *    [copy_cow_page doesn't do anything to the cache; insufficient cache
+ *     handling.  Need to add flush_dcache_page() here]
+ *
+ * 4. mm/memory.c:do_swap_page
+ *    [read_swap_cache_async doesn't do anything to the cache: insufficient
+ *     cache handling.  Need to add flush_dcache_page() here]
+ *
+ * 5. mm/memory.c:do_anonymous_page
+ *    [zero page, never written by kernel - ok]
+ *
+ * 6. mm/memory.c:do_no_page
+ *    [we will be calling update_mmu_cache, which will catch on PG_dcache_dirty]
+ *
+ * 7. mm/shmem.c:shmem_nopage
+ * 8. mm/shmem.c:shmem_nopage
+ *    [via do_no_page - ok]
+ *
+ * 9. fs/exec.c:put_dirty_page
+ *    [we call flush_dcache_page prior to this, which will flush out the
+ *     kernel virtual addresses from the dcache - ok]
  */
 static __inline__ void flush_page_to_ram(struct page *page)
 {
@@ -69,10 +102,28 @@
 #define flush_dcache_range(_s,_e)	cpu_cache_clean_invalidate_range((_s),(_e),0)
 
 /*
- * FIXME: We currently clean the dcache for this page.  Should we
- * also invalidate the Dcache?  And what about the Icache? -- rmk
+ * flush_dcache_page is used when the kernel has written to the page
+ * cache page at virtual address page->virtual.
+ *
+ * If this page isn't mapped (ie, page->mapping = NULL), or it has
+ * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
+ * then we _must_ always clean + invalidate the dcache entries associated
+ * with the kernel mapping.
+ *
+ * Otherwise we can defer the operation, and clean the cache when we are
+ * about to change to user space.  This is the same method as used on SPARC64.
+ * See update_mmu_cache for the user space part.
  */
-#define flush_dcache_page(page)		cpu_dcache_clean_page(page_address(page))
+static inline void flush_dcache_page(struct page *page)
+{
+	if (page->mapping && !(page->mapping->i_mmap) &&
+	     !(page->mapping->i_mmap_shared))
+		set_bit(PG_dcache_dirty, &page->flags);
+	else {
+		unsigned long virt = (unsigned long)page_address(page);
+		cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0);
+	}
+}
 
 #define clean_dcache_entry(_s)		cpu_dcache_clean_entry((unsigned long)(_s))
 
@@ -84,11 +135,31 @@
 		cpu_icache_invalidate_range((_s), (_e));		\
 	} while (0)
 
-#define flush_icache_page(vma,pg)					\
-	do {								\
-		if ((vma)->vm_flags & PROT_EXEC)			\
-			cpu_icache_invalidate_page(page_address(pg));	\
-	} while (0)
+/*
+ * This function is misnamed IMHO.  There are three places where it
+ * is called, each of which is preceded immediately by a call to
+ * flush_page_to_ram:
+ *
+ *  1. kernel/ptrace.c:access_one_page
+ *     called after we have written to the kernel view of a user page.
+ *     The user page has been expundged from the cache by flush_cache_page.
+ *     [we don't need to do anything here if we add a call to
+ *      flush_dcache_page]
+ *
+ *  2. mm/memory.c:do_swap_page
+ *     called after we have (possibly) written to the kernel view of a
+ *     user page, which has previously been removed (ie, has been through
+ *     the swap cache).
+ *     [if the flush_page_to_ram() conditions are satisfied, then ok]
+ *
+ *  3. mm/memory.c:do_no_page
+ *     [if the flush_page_to_ram() conditions are satisfied, then ok]
+ *
+ * Invalidating the icache at the kernels virtual page isn't really
+ * going to do us much good, since we wouldn't have executed any
+ * instructions there.
+ */
+#define flush_icache_page(vma,pg)	do { } while (0)
 
 /*
  * Old ARM MEMC stuff.  This supports the reversed mapping handling that
@@ -156,7 +227,8 @@
 	} while (0)
 
 /*
- * 32-bit ARM Processors don't have any MMU cache
+ * if PG_dcache_dirty is set for the page, we need to ensure that any
+ * cache entries for the kernels virtual memory range are written
+ * back to the page.
  */
-#define update_mmu_cache(vma,address,pte) do { } while (0)
-
+extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)