patch-2.3.27 linux/include/asm-i386/highmem.h

Next file: linux/include/asm-i386/io.h
Previous file: linux/include/asm-i386/div64.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.26/linux/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
@@ -18,7 +18,12 @@
 #ifndef _ASM_HIGHMEM_H
 #define _ASM_HIGHMEM_H
 
+#ifdef __KERNEL__
+
 #include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
 
 /* undef for production */
 #define HIGHMEM_DEBUG 1
@@ -28,58 +33,79 @@
 
 extern pte_t *kmap_pte;
 extern pgprot_t kmap_prot;
+extern pte_t *pkmap_page_table;
 
 extern void kmap_init(void) __init;
 
-/* kmap helper functions necessary to access the highmem pages in kernel */
-#include <asm/pgtable.h>
-#include <asm/kmap_types.h>
+#define PKMAP_BASE (0xff000000UL)
+
+extern unsigned long FASTCALL(kmap_high(struct page *page));
+extern void FASTCALL(kunmap_high(struct page *page));
 
-extern inline unsigned long kmap(struct page *page, enum km_type type)
+extern inline unsigned long kmap(struct page *page)
 {
+	if (in_interrupt())
+		BUG();
 	if (page < highmem_start_page)
 		return page_address(page);
-	{
-		enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
-		unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN+idx);
-
-#if HIGHMEM_DEBUG
-		if (!pte_none(*(kmap_pte-idx)))
-		{
-			__label__ here;
-		here:
-			printk(KERN_ERR "not null pte on CPU %d from %p\n",
-			       smp_processor_id(), &&here);
-		}
-#endif
-		set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
-		__flush_tlb_one(vaddr);
+	return kmap_high(page);
+}
 
-		return vaddr;
-	}
+extern inline void kunmap(struct page *page)
+{
+	if (in_interrupt())
+		BUG();
+	if (page < highmem_start_page)
+		return;
+	kunmap_high(page);
 }
 
-extern inline void kunmap(unsigned long vaddr, enum km_type type)
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+extern inline unsigned long kmap_atomic(struct page *page, enum km_type type)
 {
+	enum fixed_addresses idx;
+	unsigned long vaddr;
+
+	if (page < highmem_start_page)
+		return page_address(page);
+
+	idx = type + KM_TYPE_NR*smp_processor_id();
+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #if HIGHMEM_DEBUG
-	enum fixed_addresses idx = type+KM_TYPE_NR*smp_processor_id();
-	if ((vaddr & PAGE_MASK) == __fix_to_virt(FIX_KMAP_BEGIN+idx))
-	{
-		/* force other mappings to Oops if they'll try to access
-		   this pte without first remap it */
-		pte_clear(kmap_pte-idx);
-		__flush_tlb_one(vaddr);
-	}
+	if (!pte_none(*(kmap_pte-idx)))
+		BUG();
 #endif
+	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+	__flush_tlb_one(vaddr);
+
+	return vaddr;
 }
 
-extern inline void kmap_check(void)
+extern inline void kunmap_atomic(unsigned long vaddr, enum km_type type)
 {
 #if HIGHMEM_DEBUG
-	int idx_base = KM_TYPE_NR*smp_processor_id(), i;
-	for (i = idx_base; i < idx_base+KM_TYPE_NR; i++)
-		if (!pte_none(*(kmap_pte-i)))
-			BUG();
+	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+	if (vaddr < FIXADDR_START) // FIXME
+		return;
+
+	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
+		BUG();
+
+	/*
+	 * force other mappings to Oops if they'll try to access
+	 * this pte without first remap it
+	 */
+	pte_clear(kmap_pte-idx);
+	__flush_tlb_one(vaddr);
 #endif
 }
+
+#endif /* __KERNEL__ */
+
 #endif /* _ASM_HIGHMEM_H */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)