patch-2.4.17 linux/include/asm-i386/pgalloc.h

Next file: linux/include/asm-i386/smp.h
Previous file: linux/include/asm-i386/msr.h
Back to the patch index
Back to the overall index

diff -Naur -X /home/marcelo/lib/dontdiff linux-2.4.16/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
@@ -18,15 +18,21 @@
  * Allocate and free page tables.
  */
 
-#if CONFIG_X86_PAE
+#if defined (CONFIG_X86_PAE)
+/*
+ * We can't include <linux/slab.h> here, thus these uglinesses.
+ */
+struct kmem_cache_s;
+
+extern struct kmem_cache_s *pae_pgd_cachep;
+extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
+extern void kmem_cache_free(struct kmem_cache_s *, void *);
 
-extern void *kmalloc(size_t, int);
-extern void kfree(const void *);
 
-static __inline__ pgd_t *get_pgd_slow(void)
+static inline pgd_t *get_pgd_slow(void)
 {
 	int i;
-	pgd_t *pgd = kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+	pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
 
 	if (pgd) {
 		for (i = 0; i < USER_PTRS_PER_PGD; i++) {
@@ -36,32 +42,36 @@
 			clear_page(pmd);
 			set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
 		}
-		memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+		memcpy(pgd + USER_PTRS_PER_PGD,
+			swapper_pg_dir + USER_PTRS_PER_PGD,
+			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 	}
 	return pgd;
 out_oom:
 	for (i--; i >= 0; i--)
 		free_page((unsigned long)__va(pgd_val(pgd[i])-1));
-	kfree(pgd);
+	kmem_cache_free(pae_pgd_cachep, pgd);
 	return NULL;
 }
 
 #else
 
-static __inline__ pgd_t *get_pgd_slow(void)
+static inline pgd_t *get_pgd_slow(void)
 {
 	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
 
 	if (pgd) {
 		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-		memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+		memcpy(pgd + USER_PTRS_PER_PGD,
+			swapper_pg_dir + USER_PTRS_PER_PGD,
+			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 	}
 	return pgd;
 }
 
-#endif
+#endif /* CONFIG_X86_PAE */
 
-static __inline__ pgd_t *get_pgd_fast(void)
+static inline pgd_t *get_pgd_fast(void)
 {
 	unsigned long *ret;
 
@@ -74,21 +84,21 @@
 	return (pgd_t *)ret;
 }
 
-static __inline__ void free_pgd_fast(pgd_t *pgd)
+static inline void free_pgd_fast(pgd_t *pgd)
 {
 	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
 	pgd_quicklist = (unsigned long *) pgd;
 	pgtable_cache_size++;
 }
 
-static __inline__ void free_pgd_slow(pgd_t *pgd)
+static inline void free_pgd_slow(pgd_t *pgd)
 {
-#if CONFIG_X86_PAE
+#if defined(CONFIG_X86_PAE)
 	int i;
 
 	for (i = 0; i < USER_PTRS_PER_PGD; i++)
 		free_page((unsigned long)__va(pgd_val(pgd[i])-1));
-	kfree(pgd);
+	kmem_cache_free(pae_pgd_cachep, pgd);
 #else
 	free_page((unsigned long)pgd);
 #endif
@@ -104,7 +114,8 @@
 	return pte;
 }
 
-static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
+static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
+					unsigned long address)
 {
 	unsigned long *ret;
 
@@ -116,7 +127,7 @@
 	return (pte_t *)ret;
 }
 
-static __inline__ void pte_free_fast(pte_t *pte)
+static inline void pte_free_fast(pte_t *pte)
 {
 	*(unsigned long *)pte = (unsigned long) pte_quicklist;
 	pte_quicklist = (unsigned long *) pte;
@@ -128,14 +139,9 @@
 	free_page((unsigned long)pte);
 }
 
-#define pte_free(pte)		pte_free_fast(pte)
-#ifdef CONFIG_X86_PAE
-#define pgd_alloc(mm)		get_pgd_slow()
+#define pte_free(pte)		pte_free_slow(pte)
 #define pgd_free(pgd)		free_pgd_slow(pgd)
-#else
 #define pgd_alloc(mm)		get_pgd_fast()
-#define pgd_free(pgd)		free_pgd_fast(pgd)
-#endif
 
 /*
  * allocating and freeing a pmd is trivial: the 1-entry pmd is

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)