patch-2.3.30 linux/include/asm-ppc/pgalloc.h

Next file: linux/include/asm-ppc/pgtable.h
Previous file: linux/include/asm-ppc/oak.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.29/linux/include/asm-ppc/pgalloc.h linux/include/asm-ppc/pgalloc.h
@@ -1,7 +1,81 @@
 #ifndef _PPC_PGALLOC_H
 #define _PPC_PGALLOC_H
 
+#include <linux/threads.h>
+#include <asm/processor.h>
 
+/*
+ * This is handled very differently on the PPC since out page tables
+ * are all 0's and I want to be able to use these zero'd pages elsewhere
+ * as well - it gives us quite a speedup.
+ *
+ * Note that the SMP/UP versions are the same but we don't need a
+ * per cpu list of zero pages because we do the zero-ing with the cache
+ * off and the access routines are lock-free but the pgt cache stuff
+ * is per-cpu since it isn't done with any lock-free access routines
+ * (although I think we need arch-specific routines so I can do lock-free).
+ *
+ * I need to generalize this so we can use it for other arch's as well.
+ * -- Cort
+ */
+#ifdef __SMP__
+#define quicklists	cpu_data[smp_processor_id()]
+#else
+extern struct pgtable_cache_struct {
+	unsigned long *pgd_cache;
+	unsigned long *pte_cache;
+	unsigned long pgtable_cache_sz;
+} quicklists;
+#endif
+
+#define pgd_quicklist 		(quicklists.pgd_cache)
+#define pmd_quicklist 		((unsigned long *)0)
+#define pte_quicklist 		(quicklists.pte_cache)
+#define pgtable_cache_size 	(quicklists.pgtable_cache_sz)
+
+extern unsigned long *zero_cache;    /* head linked list of pre-zero'd pages */
+extern atomic_t zero_sz;	     /* # currently pre-zero'd pages */
+extern atomic_t zeropage_hits;	     /* # zero'd pages request that we've done */
+extern atomic_t zeropage_calls;      /* # zero'd pages request that've been made */
+extern atomic_t zerototal;	     /* # pages zero'd over time */
+
+#define zero_quicklist     	(zero_cache)
+#define zero_cache_sz  	 	(zero_sz)
+#define zero_cache_calls 	(zeropage_calls)
+#define zero_cache_hits  	(zeropage_hits)
+#define zero_cache_total 	(zerototal)
+
+/* return a pre-zero'd page from the list, return NULL if none available -- Cort */
+extern unsigned long get_zero_page_fast(void);
+
+extern void __bad_pte(pmd_t *pmd);
+
+extern inline void set_pgdir(unsigned long address, pgd_t entry)
+{
+	struct task_struct * p;
+	pgd_t *pgd;
+#ifdef __SMP__
+	int i;
+#endif	
+        
+	read_lock(&tasklist_lock);
+	for_each_task(p) {
+		if (!p->mm)
+			continue;
+		*pgd_offset(p->mm,address) = entry;
+	}
+	read_unlock(&tasklist_lock);
+#ifndef __SMP__
+	for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+		pgd[address >> PGDIR_SHIFT] = entry;
+#else
+	/* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
+	   modify pgd caches of other CPUs as well. -jj */
+	for (i = 0; i < NR_CPUS; i++)
+		for (pgd = (pgd_t *)cpu_data[i].pgd_cache; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
+			pgd[address >> PGDIR_SHIFT] = entry;
+#endif
+}
 
 /* We don't use pmd cache, so this is a dummy routine */
 extern __inline__ pmd_t *get_pmd_fast(void)
@@ -52,9 +126,9 @@
 {
         unsigned long *ret;
 
-        if((ret = pgd_quicklist) != NULL) {
+        if ((ret = pgd_quicklist) != NULL) {
                 pgd_quicklist = (unsigned long *)(*ret);
-                ret[0] = ret[1];
+                ret[0] = 0;
                 pgtable_cache_size--;
         } else
                 ret = (unsigned long *)get_pgd_slow();
@@ -63,7 +137,7 @@
 
 extern __inline__ void free_pgd_fast(pgd_t *pgd)
 {
-        *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+        *(unsigned long **)pgd = pgd_quicklist;
         pgd_quicklist = (unsigned long *) pgd;
         pgtable_cache_size++;
 }
@@ -79,9 +153,9 @@
 {
         unsigned long *ret;
 
-        if((ret = (unsigned long *)pte_quicklist) != NULL) {
+        if ((ret = pte_quicklist) != NULL) {
                 pte_quicklist = (unsigned long *)(*ret);
-                ret[0] = ret[1];
+                ret[0] = 0;
                 pgtable_cache_size--;
 	}
         return (pte_t *)ret;
@@ -89,7 +163,7 @@
 
 extern __inline__ void free_pte_fast(pte_t *pte)
 {
-        *(unsigned long *)pte = (unsigned long) pte_quicklist;
+        *(unsigned long **)pte = pte_quicklist;
         pte_quicklist = (unsigned long *) pte;
         pgtable_cache_size++;
 }
@@ -98,6 +172,7 @@
 {
 	free_page((unsigned long)pte);
 }
+
 #define pte_free_kernel(pte)    free_pte_fast(pte)
 #define pte_free(pte)           free_pte_fast(pte)
 #define pgd_free(pgd)           free_pgd_fast(pgd)
@@ -122,30 +197,5 @@
 }
 
 extern int do_check_pgt_cache(int, int);
-
-extern __inline__ pte_t *find_pte(struct mm_struct *mm,unsigned long va)
-{
-	pgd_t *dir;
-	pmd_t *pmd;
-	pte_t *pte;
-
-	va &= PAGE_MASK;
-	
-	dir = pgd_offset( mm, va );
-	if (dir)
-	{
-		pmd = pmd_offset(dir, va & PAGE_MASK);
-		if (pmd && pmd_present(*pmd))
-		{
-			pte = pte_offset(pmd, va);
-			if (pte && pte_present(*pte))
-			{			
-				pte_uncache(*pte);
-				flush_tlb_page(find_vma(mm,va),va);
-			}
-		}
-	}
-	return pte;
-}
 
 #endif /* _PPC_PGALLOC_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)