patch-2.2.8 linux/include/asm-arm/proc-armv/pgtable.h

Next file: linux/include/asm-arm/proc-armv/processor.h
Previous file: linux/include/asm-arm/proc-armv/mm-init.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.2.7/linux/include/asm-arm/proc-armv/pgtable.h linux/include/asm-arm/proc-armv/pgtable.h
@@ -3,14 +3,15 @@
  *
  * Copyright (C) 1995, 1996, 1997 Russell King
  *
- * 12-01-1997	RMK	Altered flushing routines to use function pointers
+ * 12-Jan-1997	RMK	Altered flushing routines to use function pointers
  *			now possible to combine ARM6, ARM7 and StrongARM versions.
+ * 17-Apr-1999	RMK	Now pass an area size to clean_cache_area and
+ *			flush_icache_area.
  */
 #ifndef __ASM_PROC_PGTABLE_H
 #define __ASM_PROC_PGTABLE_H
 
-#include <asm/arch/mmu.h>
-#include <asm/arch/processor.h>		/* For TASK_SIZE */
+#include <asm/arch/memory.h>		/* For TASK_SIZE */
 
 #define LIBRARY_TEXT_START 0x0c000000
 
@@ -41,8 +42,23 @@
 				 ((_vma)->vm_flags & VM_EXEC) ? 1 : 0);	\
 	} while (0)
 
+#define clean_cache_range(_start,_end)					\
+	do {								\
+		unsigned long _s, _sz;					\
+		_s = (unsigned long)_start;				\
+		_sz = (unsigned long)_end - _s;				\
+		processor.u.armv3v4._clean_cache_area(_s, _sz);		\
+	} while (0)
+
+#define clean_cache_area(_start,_size)					\
+	do {								\
+		unsigned long _s;					\
+		_s = (unsigned long)_start;				\
+		processor.u.armv3v4._clean_cache_area(_s, _size);	\
+	} while (0)
+
 #define flush_icache_range(_start,_end)					\
-	processor.u.armv3v4._flush_icache_area((_start), (_end))
+	processor.u.armv3v4._flush_icache_area((_start), (_end) - (_start))
 
 /*
  * We don't have a MEMC chip...
@@ -60,12 +76,6 @@
 	processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK);
 
 /*
- * Make the page uncacheable (must flush page beforehand).
- */
-#define uncache_page(_page)						\
-	processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK);
-
-/*
  * TLB flushing:
  *
  *  - flush_tlb() flushes the current mm struct TLBs
@@ -106,22 +116,15 @@
 	} while (0)
 
 /*
- * Since the page tables are in cached memory, we need to flush the dirty
- * data cached entries back before we flush the tlb...  This is also useful
- * to flush out the SWI instruction for signal handlers...
+ * PMD_SHIFT determines the size of the area a second-level page table can map
  */
-#define __flush_entry_to_ram(entry)						\
-	processor.u.armv3v4._flush_cache_entry((unsigned long)(entry))
-
-#define __flush_pte_to_ram(entry)						\
-	processor.u.armv3v4._flush_cache_pte((unsigned long)(entry))
-
-/* PMD_SHIFT determines the size of the area a second-level page table can map */
 #define PMD_SHIFT       20
 #define PMD_SIZE        (1UL << PMD_SHIFT)
 #define PMD_MASK        (~(PMD_SIZE-1))
 
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
+/*
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
 #define PGDIR_SHIFT     20
 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
@@ -135,6 +138,7 @@
 #define PTRS_PER_PGD    4096
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
 
+
 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  * current 8MB value just means that there will be a 8MB "hole" after the
  * physical memory until the kernel virtual memory starts.  That means that
@@ -147,87 +151,28 @@
 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
 #define VMALLOC_END       (PAGE_OFFSET + 0x10000000)
 
-/* PMD types (actually level 1 descriptor) */
-#define PMD_TYPE_MASK		0x0003
-#define PMD_TYPE_FAULT		0x0000
-#define PMD_TYPE_TABLE		0x0001
-#define PMD_TYPE_SECT		0x0002
-#define PMD_UPDATABLE		0x0010
-#define PMD_SECT_CACHEABLE	0x0008
-#define PMD_SECT_BUFFERABLE	0x0004
-#define PMD_SECT_AP_WRITE	0x0400
-#define PMD_SECT_AP_READ	0x0800
-#define PMD_DOMAIN(x)		((x) << 5)
-
-/* PTE types (actially level 2 descriptor) */
-#define PTE_TYPE_MASK	0x0003
-#define PTE_TYPE_FAULT	0x0000
-#define PTE_TYPE_LARGE	0x0001
-#define PTE_TYPE_SMALL	0x0002
-#define PTE_AP_READ	0x0aa0
-#define PTE_AP_WRITE	0x0550
-#define PTE_CACHEABLE	0x0008
-#define PTE_BUFFERABLE	0x0004
 
-/* Domains */
+/*
+ * Domains
+ */
 #define DOMAIN_USER	0
 #define DOMAIN_KERNEL	1
 #define DOMAIN_TABLE	1
 #define DOMAIN_IO	2
 
-#define _PAGE_CHG_MASK  (0xfffff00c | PTE_TYPE_MASK)
 
-/*
- * We define the bits in the page tables as follows:
- *  PTE_BUFFERABLE	page is dirty
- *  PTE_AP_WRITE	page is writable
- *  PTE_AP_READ		page is a young (unsetting this causes faults for any access)
- *  PTE_CACHEABLE       page is readable
- *
- * A page will not be made writable without the dirty bit set.
- * It is not legal to have a writable non-dirty page though (it breaks).
- *
- * A readable page is marked as being cacheable.
- * Youngness is indicated by hardware read.  If the page is old,
- * then we will fault and make the page young again.
- */
-#define _PTE_YOUNG	PTE_AP_READ
-#define _PTE_DIRTY	PTE_BUFFERABLE
-#define _PTE_READ	PTE_CACHEABLE
-#define _PTE_WRITE	PTE_AP_WRITE
-
-#define PAGE_NONE       __pgprot(PTE_TYPE_SMALL | _PTE_YOUNG)
-#define PAGE_SHARED     __pgprot(PTE_TYPE_SMALL | _PTE_YOUNG | _PTE_READ | _PTE_WRITE)
-#define PAGE_COPY       __pgprot(PTE_TYPE_SMALL | _PTE_YOUNG | _PTE_READ)
-#define PAGE_READONLY   __pgprot(PTE_TYPE_SMALL | _PTE_YOUNG | _PTE_READ)
-#define PAGE_KERNEL     __pgprot(PTE_TYPE_SMALL | _PTE_READ  | _PTE_DIRTY | _PTE_WRITE)
 
-#define _PAGE_USER_TABLE	(PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_USER))
-#define _PAGE_KERNEL_TABLE	(PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_KERNEL))
+#undef TEST_VERIFY_AREA
 
 /*
- * The arm can't do page protection for execute, and considers that the same are read.
- * Also, write permissions imply read permissions. This is the closest we can get..
+ * The sa110 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
  */
-#define __P000  PAGE_NONE
-#define __P001  PAGE_READONLY
-#define __P010  PAGE_COPY
-#define __P011  PAGE_COPY
-#define __P100  PAGE_READONLY
-#define __P101  PAGE_READONLY
-#define __P110  PAGE_COPY
-#define __P111  PAGE_COPY
-
-#define __S000  PAGE_NONE
-#define __S001  PAGE_READONLY
-#define __S010  PAGE_SHARED
-#define __S011  PAGE_SHARED
-#define __S100  PAGE_READONLY
-#define __S101  PAGE_READONLY
-#define __S110  PAGE_SHARED
-#define __S111  PAGE_SHARED
+extern __inline__ void update_mmu_cache(struct vm_area_struct * vma,
+	unsigned long address, pte_t pte)
+{
+}
 
-#undef TEST_VERIFY_AREA
 
 /*
  * BAD_PAGETABLE is used when we need a bogus page-table, while
@@ -240,97 +185,40 @@
 extern pte_t * __bad_pagetable(void);
 extern unsigned long *empty_zero_page;
 
-#define BAD_PAGETABLE __bad_pagetable()
-#define BAD_PAGE __bad_page()
-#define ZERO_PAGE ((unsigned long) empty_zero_page)
+#define BAD_PAGETABLE	__bad_pagetable()
+#define BAD_PAGE	__bad_page()
+#define ZERO_PAGE	((unsigned long) empty_zero_page)
 
 /* number of bits that fit into a memory pointer */
-#define BYTES_PER_PTR			(sizeof(unsigned long))
-#define BITS_PER_PTR                    (8*BYTES_PER_PTR)
+#define BYTES_PER_PTR	(sizeof(unsigned long))
+#define BITS_PER_PTR	(8*BYTES_PER_PTR)
 
 /* to align the pointer to a pointer address */
-#define PTR_MASK                        (~(sizeof(void*)-1))
+#define PTR_MASK	(~(sizeof(void*)-1))
 
 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
-#define SIZEOF_PTR_LOG2                 2
+#define SIZEOF_PTR_LOG2	2
 
 /* to find an entry in a page-table */
 #define PAGE_PTR(address) \
 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
 
-/* to set the page-dir */
+/* to set the page-dir
+ * Note that we need to flush the cache and TLBs
+ * if we are affecting the current task.
+ */
 #define SET_PAGE_DIR(tsk,pgdir)					\
 do {								\
 	tsk->tss.memmap = __virt_to_phys((unsigned long)pgdir);	\
-	if ((tsk) == current)					\
+	if ((tsk) == current) {					\
+		flush_cache_all();				\
 		__asm__ __volatile__(				\
 		"mcr%?	p15, 0, %0, c2, c0, 0\n"		\
 		: : "r" (tsk->tss.memmap));			\
+		flush_tlb_all();				\
+	}							\
 } while (0)
 
-extern __inline__ int pte_none(pte_t pte)
-{
-	return !pte_val(pte);
-}
-
-#define pte_clear(ptep)	set_pte(ptep, __pte(0))
-
-extern __inline__ int pte_present(pte_t pte)
-{
-#if 0
-	/* This is what it really does, the else
-	   part is just to make it easier for the compiler */
-	switch (pte_val(pte) & PTE_TYPE_MASK) {
-	case PTE_TYPE_LARGE:
-	case PTE_TYPE_SMALL:
-		return 1;
-	default:
-		return 0;
-	}
-#else
-	return ((pte_val(pte) + 1) & 2);
-#endif
-}
-
-extern __inline__ int pmd_none(pmd_t pmd)
-{
-	return !pmd_val(pmd);
-}
-
-#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
-
-extern __inline__ int pmd_bad(pmd_t pmd)
-{
-#if 0
-	/* This is what it really does, the else
-	   part is just to make it easier for the compiler */
-	switch (pmd_val(pmd) & PMD_TYPE_MASK) {
-	case PMD_TYPE_FAULT:
-	case PMD_TYPE_TABLE:
-		return 0;
-	default:
-		return 1;
-	}
-#else
-	return pmd_val(pmd) & 2;
-#endif
-}
-
-extern __inline__ int pmd_present(pmd_t pmd)
-{
-#if 0
-	/* This is what it really does, the else
-	   part is just to make it easier for the compiler */
-	switch (pmd_val(pmd) & PMD_TYPE_MASK) {
-	case PMD_TYPE_TABLE:
-		return 1;
-	default:
-		return 0;
-	}
-#else
-	return ((pmd_val(pmd) + 1) & 2);
-#endif
-}
 
 /*
  * The "pgd_xxx()" functions here are trivial for a folded two-level
@@ -342,231 +230,224 @@
 #define pgd_present(pgd)	(1)
 #define pgd_clear(pgdp)
 
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-#define pte_read(pte)		(1)
-#define pte_exec(pte)		(1)
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
-extern __inline__ int pte_write(pte_t pte)
+/* to find an entry in a page-table-directory */
+extern __inline__ pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
 {
-	return pte_val(pte) & _PTE_WRITE;
+	return mm->pgd + (address >> PGDIR_SHIFT);
 }
 
-extern __inline__ int pte_dirty(pte_t pte)
-{
-	return pte_val(pte) & _PTE_DIRTY;
-}
+extern unsigned long get_page_2k(int priority);
+extern void free_page_2k(unsigned long page);
 
-extern __inline__ int pte_young(pte_t pte)
-{
-	return pte_val(pte) & _PTE_YOUNG;
-}
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
 
-extern __inline__ pte_t pte_wrprotect(pte_t pte)
-{
-	pte_val(pte) &= ~_PTE_WRITE;
-	return pte;
-}
+#ifndef __SMP__
+extern struct pgtable_cache_struct {
+	unsigned long *pgd_cache;
+	unsigned long *pte_cache;
+	unsigned long pgtable_cache_sz;
+} quicklists;
 
-extern __inline__ pte_t pte_nocache(pte_t pte)
-{
-	pte_val(pte) &= ~PTE_CACHEABLE;
-	return pte;
-}
+#define pgd_quicklist (quicklists.pgd_cache)
+#define pmd_quicklist ((unsigned long *)0)
+#define pte_quicklist (quicklists.pte_cache)
+#define pgtable_cache_size (quicklists.pgtable_cache_sz)
+#else
+#error Pgtable caches have to be per-CPU, so that no locking is needed.
+#endif
 
-extern __inline__ pte_t pte_mkclean(pte_t pte)
-{
-	pte_val(pte) &= ~_PTE_DIRTY;
-	return pte;
-}
+extern pgd_t *get_pgd_slow(void);
 
-extern __inline__ pte_t pte_mkold(pte_t pte)
+extern __inline__ pgd_t *get_pgd_fast(void)
 {
-	pte_val(pte) &= ~_PTE_YOUNG;
-	return pte;
+	unsigned long *ret;
+
+	if((ret = pgd_quicklist) != NULL) {
+		pgd_quicklist = (unsigned long *)(*ret);
+		ret[0] = ret[1];
+		clean_cache_area(ret, 4);
+		pgtable_cache_size--;
+	} else
+		ret = (unsigned long *)get_pgd_slow();
+	return (pgd_t *)ret;
 }
 
-extern __inline__ pte_t pte_mkwrite(pte_t pte)
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
 {
-	pte_val(pte) |= _PTE_WRITE;
-	return pte;
+	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+	pgd_quicklist = (unsigned long *) pgd;
+	pgtable_cache_size++;
 }
 
-extern __inline__ pte_t pte_mkdirty(pte_t pte)
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
 {
-	pte_val(pte) |= _PTE_DIRTY;
-	return pte;
+	free_pages((unsigned long) pgd, 2);
 }
 
-extern __inline__ pte_t pte_mkyoung(pte_t pte)
+#define pgd_free(pgd)		free_pgd_fast(pgd)
+#define pgd_alloc()		get_pgd_fast()
+
+extern __inline__ void set_pgdir(unsigned long address, pgd_t entry)
 {
-	pte_val(pte) |= _PTE_YOUNG;
-	return pte;
+	struct task_struct * p;
+	pgd_t *pgd;
+
+	read_lock(&tasklist_lock);
+	for_each_task(p) {
+		if (!p->mm)
+			continue;
+		*pgd_offset(p->mm,address) = entry;
+	}
+	read_unlock(&tasklist_lock);
+	for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+		pgd[address >> PGDIR_SHIFT] = entry;
 }
 
-/*
- * The following are unable to be implemented on this MMU
- */
-#if 0
-extern __inline__ pte_t pte_rdprotect(pte_t pte)
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/****************
+* PMD functions *
+****************/
+
+/* PMD types (actually level 1 descriptor) */
+#define PMD_TYPE_MASK		0x0003
+#define PMD_TYPE_FAULT		0x0000
+#define PMD_TYPE_TABLE		0x0001
+#define PMD_TYPE_SECT		0x0002
+#define PMD_UPDATABLE		0x0010
+#define PMD_SECT_CACHEABLE	0x0008
+#define PMD_SECT_BUFFERABLE	0x0004
+#define PMD_SECT_AP_WRITE	0x0400
+#define PMD_SECT_AP_READ	0x0800
+#define PMD_DOMAIN(x)		((x) << 5)
+
+#define _PAGE_USER_TABLE	(PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_USER))
+#define _PAGE_KERNEL_TABLE	(PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_KERNEL))
+
+#define pmd_none(pmd)		(!pmd_val(pmd))
+#define pmd_clear(pmdp)		set_pmd(pmdp, __pmd(0))
+#define pmd_bad(pmd)		(pmd_val(pmd) & 2)
+#define mk_user_pmd(ptep)	__mk_pmd(ptep, _PAGE_USER_TABLE)
+#define mk_kernel_pmd(ptep)	__mk_pmd(ptep, _PAGE_KERNEL_TABLE)
+#define set_pmd(pmdp,pmd)	processor.u.armv3v4._set_pmd(pmdp,pmd)
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir, address) ((pmd_t *)(dir))
+
+extern __inline__ int pmd_present(pmd_t pmd)
 {
-	pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ);
-	return pte;
+	return ((pmd_val(pmd) + 1) & 2);
 }
 
-extern __inline__ pte_t pte_exprotect(pte_t pte)
+/* We don't use pmd cache, so this is a dummy routine */
+extern __inline__ pmd_t *get_pmd_fast(void)
 {
-	pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ);
-	return pte;
+	return (pmd_t *)0;
 }
 
-extern __inline__ pte_t pte_mkread(pte_t pte)
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
 {
-	pte_val(pte) |= PTE_CACHEABLE;
-	return pte;
 }
 
-extern __inline__ pte_t pte_mkexec(pte_t pte)
+extern __inline__ void free_pmd_slow(pmd_t *pmd)
 {
-	pte_val(pte) |= PTE_CACHEABLE;
-	return pte;
 }
-#endif
+
+extern void __bad_pmd(pmd_t *pmd);
+extern void __bad_pmd_kernel(pmd_t *pmd);
 
 /*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
  */
-extern __inline__ pte_t mk_pte(unsigned long page, pgprot_t pgprot)
+extern __inline__ void pmd_free(pmd_t *pmd)
 {
-	pte_t pte;
-	pte_val(pte) = __virt_to_phys(page) | pgprot_val(pgprot);
-	return pte;
 }
 
-/* This takes a physical page address that is used by the remapping functions */
-extern __inline__ pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
+extern __inline__ pmd_t *pmd_alloc(pgd_t *pgd, unsigned long address)
 {
-	pte_t pte;
-	pte_val(pte) = physpage + pgprot_val(pgprot);
-	return pte;
+	return (pmd_t *) pgd;
 }
 
-extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
-	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
-	return pte;
-}
+#define pmd_free_kernel		pmd_free
+#define pmd_alloc_kernel	pmd_alloc
 
-extern __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
+extern __inline__ pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
 {
-	*pteptr = pteval;
-	__flush_pte_to_ram(pteptr);
-}
+	unsigned long pte_ptr = (unsigned long)ptep;
+	pmd_t pmd;
 
-extern __inline__ unsigned long pte_page(pte_t pte)
-{
-	return __phys_to_virt(pte_val(pte) & PAGE_MASK);
-}
+	pte_ptr -= PTRS_PER_PTE * BYTES_PER_PTR;
 
-extern __inline__ pmd_t mk_user_pmd(pte_t *ptep)
-{
-	pmd_t pmd;
-	pmd_val(pmd) = __virt_to_phys((unsigned long)ptep) | _PAGE_USER_TABLE;
-	return pmd;
-}
+	/*
+	 * The pmd must be loaded with the physical
+	 * address of the PTE table
+	 */
+	pmd_val(pmd) = __virt_to_phys(pte_ptr) | prot;
 
-extern __inline__ pmd_t mk_kernel_pmd(pte_t *ptep)
-{
-	pmd_t pmd;
-	pmd_val(pmd) = __virt_to_phys((unsigned long)ptep) | _PAGE_KERNEL_TABLE;
 	return pmd;
 }
 
-#if 1
-#define set_pmd(pmdp,pmd) processor.u.armv3v4._set_pmd(pmdp,pmd)
-#else
-extern __inline__ void set_pmd(pmd_t *pmdp, pmd_t pmd)
-{
-	*pmdp = pmd;
-	__flush_pte_to_ram(pmdp);
-}
-#endif
-
 extern __inline__ unsigned long pmd_page(pmd_t pmd)
 {
-	return __phys_to_virt(pmd_val(pmd) & 0xfffffc00);
-}
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+	unsigned long ptr;
 
-/* to find an entry in a page-table-directory */
-extern __inline__ pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
-{
-	return mm->pgd + (address >> PGDIR_SHIFT);
-}
+	ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * BYTES_PER_PTR - 1);
 
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir, address) ((pmd_t *)(dir))
+	ptr += PTRS_PER_PTE * BYTES_PER_PTR;
 
-/* Find an entry in the third-level page table.. */
-extern __inline__ pte_t * pte_offset(pmd_t * dir, unsigned long address)
-{
-	return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+	return __phys_to_virt(ptr);
 }
 
-extern unsigned long get_small_page(int priority);
-extern void free_small_page(unsigned long page);
 
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
- */
-
-#ifndef __SMP__
-extern struct pgtable_cache_struct {
-	unsigned long *pgd_cache;
-	unsigned long *pte_cache;
-	unsigned long pgtable_cache_sz;
-} quicklists;
+/****************
+* PTE functions *
+****************/
 
-#define pgd_quicklist (quicklists.pgd_cache)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist (quicklists.pte_cache)
-#define pgtable_cache_size (quicklists.pgtable_cache_sz)
-#else
-#error Pgtable caches have to be per-CPU, so that no locking is needed.
-#endif
+/* PTE types (actially level 2 descriptor) */
+#define PTE_TYPE_MASK		0x0003
+#define PTE_TYPE_FAULT		0x0000
+#define PTE_TYPE_LARGE		0x0001
+#define PTE_TYPE_SMALL		0x0002
+#define PTE_AP_READ		0x0aa0
+#define PTE_AP_WRITE		0x0550
+#define PTE_CACHEABLE		0x0008
+#define PTE_BUFFERABLE		0x0004
 
-extern pgd_t *get_pgd_slow(void);
+#define pte_none(pte)		(!pte_val(pte))
+#define pte_clear(ptep)		set_pte(ptep, __pte(0))
 
-extern __inline__ pgd_t *get_pgd_fast(void)
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+extern __inline__ pte_t mk_pte(unsigned long page, pgprot_t pgprot)
 {
-	unsigned long *ret;
-
-	if((ret = pgd_quicklist) != NULL) {
-		pgd_quicklist = (unsigned long *)(*ret);
-		ret[0] = ret[1];
-		pgtable_cache_size--;
-	} else
-		ret = (unsigned long *)get_pgd_slow();
-	return (pgd_t *)ret;
+	pte_t pte;
+	pte_val(pte) = __virt_to_phys(page) | pgprot_val(pgprot);
+	return pte;
 }
 
-extern __inline__ void free_pgd_fast(pgd_t *pgd)
+/* This takes a physical page address that is used by the remapping functions */
+extern __inline__ pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
 {
-	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
-	pgd_quicklist = (unsigned long *) pgd;
-	pgtable_cache_size++;
+	pte_t pte;
+	pte_val(pte) = physpage + pgprot_val(pgprot);
+	return pte;
 }
 
-extern __inline__ void free_pgd_slow(pgd_t *pgd)
+#define set_pte(ptep, pte)	processor.u.armv3v4._set_pte(ptep,pte)
+
+extern __inline__ unsigned long pte_page(pte_t pte)
 {
-	free_pages((unsigned long) pgd, 2);
+	return __phys_to_virt(pte_val(pte) & PAGE_MASK);
 }
 
 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
@@ -579,6 +460,7 @@
 	if((ret = (unsigned long *)pte_quicklist) != NULL) {
 		pte_quicklist = (unsigned long *)(*ret);
 		ret[0] = ret[1];
+		clean_cache_area(ret, 4);
 		pgtable_cache_size--;
 	}
 	return (pte_t *)ret;
@@ -593,31 +475,124 @@
 
 extern __inline__ void free_pte_slow(pte_t *pte)
 {
-	free_small_page((unsigned long)pte);
+	free_page_2k((unsigned long)(pte - PTRS_PER_PTE));
 }
 
-/* We don't use pmd cache, so this is a dummy routine */
-extern __inline__ pmd_t *get_pmd_fast(void)
-{
-	return (pmd_t *)0;
-}
+#define pte_free_kernel(pte)	free_pte_fast(pte)
+#define pte_free(pte)		free_pte_fast(pte)
 
-extern __inline__ void free_pmd_fast(pmd_t *pmd)
+/*###############################################################################
+ * New PageTableEntry stuff...
+ */
+/* We now keep two sets of ptes - the physical and the linux version.
+ * This gives us many advantages, and allows us greater flexibility.
+ *
+ * The Linux pte's contain:
+ *  bit   meaning
+ *   0    page present
+ *   1    young
+ *   2    bufferable	- matches physical pte
+ *   3    cacheable	- matches physical pte
+ *   4    user
+ *   5    write
+ *   6    execute
+ *   7    dirty
+ *  8-11  unused
+ *  12-31 virtual page address
+ *
+ * These are stored at the pte pointer; the physical PTE is at -1024bytes
+ */
+#define L_PTE_PRESENT		(1 << 0)
+#define L_PTE_YOUNG		(1 << 1)
+#define L_PTE_BUFFERABLE	(1 << 2)
+#define L_PTE_CACHEABLE		(1 << 3)
+#define L_PTE_USER		(1 << 4)
+#define L_PTE_WRITE		(1 << 5)
+#define L_PTE_EXEC		(1 << 6)
+#define L_PTE_DIRTY		(1 << 7)
+
+/*
+ * The following macros handle the cache and bufferable bits...
+ */
+#define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG
+#define _L_PTE_READ	L_PTE_USER | L_PTE_CACHEABLE
+#define _L_PTE_EXEC	_L_PTE_READ | L_PTE_EXEC
+
+#define PAGE_NONE       __pgprot(_L_PTE_DEFAULT)
+#define PAGE_COPY       __pgprot(_L_PTE_DEFAULT | _L_PTE_READ  | L_PTE_BUFFERABLE)
+#define PAGE_SHARED     __pgprot(_L_PTE_DEFAULT | _L_PTE_READ  | L_PTE_BUFFERABLE | L_PTE_WRITE)
+#define PAGE_READONLY   __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
+#define PAGE_KERNEL     __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE)
+
+#define _PAGE_CHG_MASK		(PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)
+
+/*
+ * The table below defines the page protection levels that we insert into our
+ * Linux page table version.  These get translated into the best that the
+ * architecture can perform.  Note that on most ARM hardware:
+ *  1) We cannot do execute protection
+ *  2) If we could do execute protection, then read is implied
+ *  3) write implies read permissions
+ */
+#define __P000  PAGE_NONE
+#define __P001  PAGE_READONLY
+#define __P010  PAGE_COPY
+#define __P011  PAGE_COPY
+#define __P100  PAGE_READONLY
+#define __P101  PAGE_READONLY
+#define __P110  PAGE_COPY
+#define __P111  PAGE_COPY
+
+#define __S000  PAGE_NONE
+#define __S001  PAGE_READONLY
+#define __S010  PAGE_SHARED
+#define __S011  PAGE_SHARED
+#define __S100  PAGE_READONLY
+#define __S101  PAGE_READONLY
+#define __S110  PAGE_SHARED
+#define __S111  PAGE_SHARED
+
+
+
+#define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+#define pte_read(pte)			(pte_val(pte) & L_PTE_USER)
+#define pte_write(pte)			(pte_val(pte) & L_PTE_WRITE)
+#define pte_exec(pte)			(pte_val(pte) & L_PTE_EXEC)
+#define pte_dirty(pte)			(pte_val(pte) & L_PTE_DIRTY)
+#define pte_young(pte)			(pte_val(pte) & L_PTE_YOUNG)
+
+#define PTE_BIT_FUNC(fn,op)			\
+extern inline pte_t fn##(pte_t pte) { pte_val(pte) op##; return pte; }
+
+//PTE_BIT_FUNC(pte_rdprotect, &= ~L_PTE_USER);
+PTE_BIT_FUNC(pte_wrprotect, &= ~L_PTE_WRITE);
+PTE_BIT_FUNC(pte_exprotect, &= ~L_PTE_EXEC);
+PTE_BIT_FUNC(pte_mkclean,   &= ~L_PTE_DIRTY);
+PTE_BIT_FUNC(pte_mkold,     &= ~L_PTE_YOUNG);
+//PTE_BIT_FUNC(pte_mkread,    |= L_PTE_USER);
+PTE_BIT_FUNC(pte_mkwrite,   |= L_PTE_WRITE);
+PTE_BIT_FUNC(pte_mkexec,    |= L_PTE_EXEC);
+PTE_BIT_FUNC(pte_mkdirty,   |= L_PTE_DIRTY);
+PTE_BIT_FUNC(pte_mkyoung,   |= L_PTE_YOUNG);
+PTE_BIT_FUNC(pte_nocache,   &= ~L_PTE_CACHEABLE);
+
+extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
+	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+	return pte;
 }
 
-extern __inline__ void free_pmd_slow(pmd_t *pmd)
+/* Find an entry in the third-level page table.. */
+extern __inline__ pte_t * pte_offset(pmd_t * dir, unsigned long address)
 {
+	return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 }
 
-extern void __bad_pmd(pmd_t *pmd);
-extern void __bad_pmd_kernel(pmd_t *pmd);
-
-#define pte_free_kernel(pte)	free_pte_fast(pte)
-#define pte_free(pte)		free_pte_fast(pte)
-#define pgd_free(pgd)		free_pgd_fast(pgd)
-#define pgd_alloc()		get_pgd_fast()
-
 extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
 {
 	address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
@@ -653,49 +628,6 @@
 		return NULL;
 	}
 	return (pte_t *) pmd_page(*pmd) + address;
-}
-
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-extern __inline__ void pmd_free(pmd_t *pmd)
-{
-}
-
-extern __inline__ pmd_t *pmd_alloc(pgd_t *pgd, unsigned long address)
-{
-	return (pmd_t *) pgd;
-}
-
-#define pmd_free_kernel		pmd_free
-#define pmd_alloc_kernel	pmd_alloc
-
-extern __inline__ void set_pgdir(unsigned long address, pgd_t entry)
-{
-	struct task_struct * p;
-	pgd_t *pgd;
-
-	read_lock(&tasklist_lock);
-	for_each_task(p) {
-		if (!p->mm)
-			continue;
-		*pgd_offset(p->mm,address) = entry;
-	}
-	read_unlock(&tasklist_lock);
-	for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
-		pgd[address >> PGDIR_SHIFT] = entry;
-}
-
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
-/*
- * The sa110 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- */
-extern __inline__ void update_mmu_cache(struct vm_area_struct * vma,
-	unsigned long address, pte_t pte)
-{
 }
 
 #define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)