patch-2.3.35 linux/include/asm-sparc64/pgalloc.h

Next file: linux/include/asm-sparc64/pgtable.h
Previous file: linux/include/asm-sparc64/pci.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.34/linux/include/asm-sparc64/pgalloc.h linux/include/asm-sparc64/pgalloc.h
@@ -0,0 +1,301 @@
+/* $Id */
+#ifndef _SPARC64_PGALLOC_H
+#define _SPARC64_PGALLOC_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/page.h>
+
+/* Cache and TLB flush operations. */
+
+/* These are the same regardless of whether this is an SMP kernel or not. */
+#define flush_cache_mm(__mm) \
+	do { if ((__mm) == current->mm) flushw_user(); } while(0)
+#define flush_cache_range(mm, start, end) \
+	flush_cache_mm(mm)
+#define flush_cache_page(vma, page) \
+	flush_cache_mm((vma)->vm_mm)
+
+/* These operations are unnecessary on the SpitFire since D-CACHE is write-through. */
+#define flush_icache_range(start, end)		do { } while (0)
+#define flush_page_to_ram(page)			do { } while (0)
+
+extern void __flush_dcache_range(unsigned long start, unsigned long end);
+
+extern void __flush_cache_all(void);
+
+extern void __flush_tlb_all(void);
+extern void __flush_tlb_mm(unsigned long context, unsigned long r);
+extern void __flush_tlb_range(unsigned long context, unsigned long start,
+			      unsigned long r, unsigned long end,
+			      unsigned long pgsz, unsigned long size);
+extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
+
+#ifndef __SMP__
+
+#define flush_cache_all()	__flush_cache_all()
+#define flush_tlb_all()		__flush_tlb_all()
+
+#define flush_tlb_mm(__mm) \
+do { if(CTX_VALID((__mm)->context)) \
+	__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
+} while(0)
+
+#define flush_tlb_range(__mm, start, end) \
+do { if(CTX_VALID((__mm)->context)) { \
+	unsigned long __start = (start)&PAGE_MASK; \
+	unsigned long __end = (end)&PAGE_MASK; \
+	__flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
+			  SECONDARY_CONTEXT, __end, PAGE_SIZE, \
+			  (__end - __start)); \
+     } \
+} while(0)
+
+#define flush_tlb_page(vma, page) \
+do { struct mm_struct *__mm = (vma)->vm_mm; \
+     if(CTX_VALID(__mm->context)) \
+	__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
+			 SECONDARY_CONTEXT); \
+} while(0)
+
+#else /* __SMP__ */
+
+extern void smp_flush_cache_all(void);
+extern void smp_flush_tlb_all(void);
+extern void smp_flush_tlb_mm(struct mm_struct *mm);
+extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+				unsigned long end);
+extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
+
+#define flush_cache_all()	smp_flush_cache_all()
+#define flush_tlb_all()		smp_flush_tlb_all()
+
+extern __inline__ void flush_tlb_mm(struct mm_struct *mm)
+{
+	if (CTX_VALID(mm->context))
+		smp_flush_tlb_mm(mm);
+}
+
+extern __inline__ void flush_tlb_range(struct mm_struct *mm, unsigned long start,
+				       unsigned long end)
+{
+	if (CTX_VALID(mm->context))
+		smp_flush_tlb_range(mm, start, end);
+}
+
+extern __inline__ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (CTX_VALID(mm->context))
+		smp_flush_tlb_page(mm, page);
+}
+
+#endif /* ! __SMP__ */
+
+/* Page table allocation/freeing. */
+#ifdef __SMP__
+/* Sliiiicck */
+#define pgt_quicklists	cpu_data[smp_processor_id()]
+#else
+extern struct pgtable_cache_struct {
+	unsigned long *pgd_cache;
+	unsigned long *pte_cache[2];
+	unsigned int pgcache_size;
+	unsigned int pgdcache_size;
+} pgt_quicklists;
+#endif
+#define pgd_quicklist		(pgt_quicklists.pgd_cache)
+#define pmd_quicklist		((unsigned long *)0)
+#define pte_quicklist		(pgt_quicklists.pte_cache)
+#define pgtable_cache_size	(pgt_quicklists.pgcache_size)
+#define pgd_cache_size		(pgt_quicklists.pgdcache_size)
+
+#ifndef __SMP__
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+	struct page *page = mem_map + MAP_NR(pgd);
+
+	if (!page->pprev_hash) {
+		(unsigned long *)page->next_hash = pgd_quicklist;
+		pgd_quicklist = (unsigned long *)page;
+	}
+	(unsigned long)page->pprev_hash |=
+		(((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1);
+	pgd_cache_size++;
+}
+
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+        struct page *ret;
+
+        if ((ret = (struct page *)pgd_quicklist) != NULL) {
+                unsigned long mask = (unsigned long)ret->pprev_hash;
+		unsigned long off = 0;
+
+		if (mask & 1)
+			mask &= ~1;
+		else {
+			off = PAGE_SIZE / 2;
+			mask &= ~2;
+		}
+		(unsigned long)ret->pprev_hash = mask;
+		if (!mask)
+			pgd_quicklist = (unsigned long *)ret->next_hash;
+                ret = (struct page *)(__page_address(ret) + off);
+                pgd_cache_size--;
+        } else {
+		ret = (struct page *) __get_free_page(GFP_KERNEL);
+		if(ret) {
+			struct page *page = mem_map + MAP_NR(ret);
+			
+			memset(ret, 0, PAGE_SIZE);
+			(unsigned long)page->pprev_hash = 2;
+			(unsigned long *)page->next_hash = pgd_quicklist;
+			pgd_quicklist = (unsigned long *)page;
+			pgd_cache_size++;
+		}
+        }
+        return (pgd_t *)ret;
+}
+
+#else /* __SMP__ */
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
+	pgd_quicklist = (unsigned long *) pgd;
+	pgtable_cache_size++;
+}
+
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+	unsigned long *ret;
+
+	if((ret = pgd_quicklist) != NULL) {
+		pgd_quicklist = (unsigned long *)(*ret);
+		ret[0] = 0;
+		pgtable_cache_size--;
+	} else {
+		ret = (unsigned long *) __get_free_page(GFP_KERNEL);
+		if(ret)
+			memset(ret, 0, PAGE_SIZE);
+	}
+	return (pgd_t *)ret;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+	free_page((unsigned long)pgd);
+}
+
+#endif /* __SMP__ */
+
+extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_premasked);
+
+extern __inline__ pmd_t *get_pmd_fast(void)
+{
+	unsigned long *ret;
+	int color = 0;
+
+	if (pte_quicklist[color] == NULL)
+		color = 1;
+	if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
+		pte_quicklist[color] = (unsigned long *)(*ret);
+		ret[0] = 0;
+		pgtable_cache_size--;
+	}
+	return (pmd_t *)ret;
+}
+
+extern __inline__ void free_pmd_fast(pmd_t *pmd)
+{
+	unsigned long color;
+
+	color = (((unsigned long)pmd >> PAGE_SHIFT) & 0x1UL);
+	*(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
+	pte_quicklist[color] = (unsigned long *) pmd;
+	pgtable_cache_size++;
+}
+
+extern __inline__ void free_pmd_slow(pmd_t *pmd)
+{
+	free_page((unsigned long)pmd);
+}
+
+extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted,
+			   unsigned long color);
+
+extern __inline__ pte_t *get_pte_fast(unsigned long color)
+{
+	unsigned long *ret;
+
+	if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
+		pte_quicklist[color] = (unsigned long *)(*ret);
+		ret[0] = 0;
+		pgtable_cache_size--;
+	}
+	return (pte_t *)ret;
+}
+
+extern __inline__ void free_pte_fast(pte_t *pte)
+{
+	unsigned long color = (((unsigned long)pte >> PAGE_SHIFT) & 0x1);
+	*(unsigned long *)pte = (unsigned long) pte_quicklist[color];
+	pte_quicklist[color] = (unsigned long *) pte;
+	pgtable_cache_size++;
+}
+
+extern __inline__ void free_pte_slow(pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+#define pte_free_kernel(pte)	free_pte_fast(pte)
+#define pte_free(pte)		free_pte_fast(pte)
+#define pmd_free_kernel(pmd)	free_pmd_fast(pmd)
+#define pmd_free(pmd)		free_pmd_fast(pmd)
+#define pgd_free(pgd)		free_pgd_fast(pgd)
+#define pgd_alloc()		get_pgd_fast()
+
+extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
+{
+	address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+	if (pmd_none(*pmd)) {
+		/* Be careful, address can be just about anything... */
+		unsigned long color = (((unsigned long)pmd)>>2UL) & 0x1UL;
+		pte_t *page = get_pte_fast(color);
+
+		if (!page)
+			return get_pte_slow(pmd, address, color);
+		pmd_set(pmd, page);
+		return page + address;
+	}
+	return (pte_t *) pmd_page(*pmd) + address;
+}
+
+extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
+{
+	address = (address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD - 1);
+	if (pgd_none(*pgd)) {
+		pmd_t *page = get_pmd_fast();
+
+		if (!page)
+			return get_pmd_slow(pgd, address);
+		pgd_set(pgd, page);
+		return page + address;
+	}
+	return (pmd_t *) pgd_page(*pgd) + address;
+}
+
+#define pte_alloc_kernel(pmd, addr)	pte_alloc(pmd, addr)
+#define pmd_alloc_kernel(pgd, addr)	pmd_alloc(pgd, addr)
+
+extern int do_check_pgt_cache(int, int);
+
+/* Nothing to do on sparc64 :) */
+#define set_pgdir(address, entry)	do { } while(0)
+
+#endif /* _SPARC64_PGALLOC_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)