patch-2.4.23 linux-2.4.23/drivers/char/agp/agpgart_be.c

Next file: linux-2.4.23/drivers/char/agp/agpgart_fe.c
Previous file: linux-2.4.23/drivers/char/agp/agp.h
Back to the patch index
Back to the overall index

diff -urN linux-2.4.22/drivers/char/agp/agpgart_be.c linux-2.4.23/drivers/char/agp/agpgart_be.c
@@ -49,8 +49,8 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/page.h>
-#ifdef CONFIG_AGP_NVIDIA
-    #include <asm/msr.h>
+#ifdef CONFIG_X86
+#include <asm/msr.h>
 #endif
 
 #include <linux/agp_backend.h>
@@ -69,6 +69,7 @@
 EXPORT_SYMBOL(agp_backend_release);
 
 static void flush_cache(void);
+static int agp_init_one(struct pci_dev *dev);
 
 static struct agp_bridge_data agp_bridge;
 static int agp_try_unsupported __initdata = 0;
@@ -1427,51 +1428,101 @@
 
 #endif /* CONFIG_AGP_I810 */
  
- #ifdef CONFIG_AGP_INTEL
+#ifdef CONFIG_AGP_I460
 
-#endif /* CONFIG_AGP_I810 */
+/* BIOS configures the chipset so that one of two apbase registers are used */
+static u8 intel_i460_dynamic_apbase = 0x10;
 
-#ifdef CONFIG_AGP_INTEL
+/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */ 
+static u8 intel_i460_pageshift = 12;
+static u32 intel_i460_pagesize;
 
-static int intel_fetch_size(void)
-{
-	int i;
-	u16 temp;
-	aper_size_info_16 *values;
+/* Keep track of which is larger, chipset or kernel page size. */
+static u32 intel_i460_cpk = 1;
 
-	pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
-	values = A_SIZE_16(agp_bridge.aperture_sizes);
+/* Structure for tracking partial use of 4MB GART pages */
+static u32 **i460_pg_detail = NULL;
+static u32 *i460_pg_count = NULL;
 
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if (temp == values[i].size_value) {
-			agp_bridge.previous_size =
-			    agp_bridge.current_size = (void *) (values + i);
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
+#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift)
+#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT)
 
-	return 0;
+#define I460_SRAM_IO_DISABLE		(1 << 4)
+#define I460_BAPBASE_ENABLE		(1 << 3)
+#define I460_AGPSIZ_MASK		0x7
+#define I460_4M_PS			(1 << 1)
+
+#define log2(x)				ffz(~(x))
+
+static gatt_mask intel_i460_masks[] =
+{
+	{ INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, 0 }
+};
+
+static unsigned long intel_i460_mask_memory(unsigned long addr, int type) 
+{
+	/* Make sure the returned address is a valid GATT entry */
+	return (agp_bridge.masks[0].mask | (((addr & 
+		     ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12));
 }
 
+static unsigned long intel_i460_unmask_memory(unsigned long addr)
+{
+	/* Turn a GATT entry into a physical address */
+	return ((addr & 0xffffff) << 12);
+}
 
-static int intel_8xx_fetch_size(void)
+static int intel_i460_fetch_size(void)
 {
 	int i;
 	u8 temp;
 	aper_size_info_8 *values;
 
-	pci_read_config_byte(agp_bridge.dev, INTEL_APSIZE, &temp);
+	/* Determine the GART page size */
+	pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp);
+	intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12;
+	intel_i460_pagesize = 1UL << intel_i460_pageshift;
 
-        /* Intel 815 chipsets have a _weird_ APSIZE register with only
-         * one non-reserved bit, so mask the others out ... */
-        if (agp_bridge.type == INTEL_I815) 
-		temp &= (1 << 3);
-        
 	values = A_SIZE_8(agp_bridge.aperture_sizes);
 
+	pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp);
+
+	/* Exit now if the IO drivers for the GART SRAMS are turned off */
+	if (temp & I460_SRAM_IO_DISABLE) {
+		printk(KERN_WARNING PFX "GART SRAMS disabled on 460GX chipset\n");
+		printk(KERN_WARNING PFX "AGPGART operation not possible\n");
+		return 0;
+	}
+
+	/* Make sure we don't try to create an 2 ^ 23 entry GATT */
+	if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
+		printk(KERN_WARNING PFX "We can't have a 32GB aperture with 4KB"
+			" GART pages\n");
+		return 0;
+	}
+
+	/* Determine the proper APBASE register */
+	if (temp & I460_BAPBASE_ENABLE)
+		intel_i460_dynamic_apbase = INTEL_I460_BAPBASE;
+	else
+		intel_i460_dynamic_apbase = INTEL_I460_APBASE;
+
 	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if (temp == values[i].size_value) {
+
+		/*
+		 * Dynamically calculate the proper num_entries and page_order 
+		 * values for the define aperture sizes. Take care not to
+		 * shift off the end of values[i].size.
+		 */	
+		values[i].num_entries = (values[i].size << 8) >>
+						(intel_i460_pageshift - 12);
+		values[i].page_order = log2((sizeof(u32)*values[i].num_entries)
+						>> PAGE_SHIFT);
+	}
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		/* Neglect control bits when matching up size_value */
+		if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
 			agp_bridge.previous_size =
 			    agp_bridge.current_size = (void *) (values + i);
 			agp_bridge.aperture_size_idx = i;
@@ -1482,217 +1533,606 @@
 	return 0;
 }
 
-static void intel_tlbflush(agp_memory * mem)
-{
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
-}
-
-
-static void intel_8xx_tlbflush(agp_memory * mem)
+/* There isn't anything to do here since 460 has no GART TLB. */ 
+static void intel_i460_tlb_flush(agp_memory * mem)
 {
-  u32 temp;
-  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
-  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp & ~(1 << 7));
-  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
-  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp | (1 << 7));
+	return;
 }
 
-
-static void intel_cleanup(void)
+/*
+ * This utility function is needed to prevent corruption of the control bits
+ * which are stored along with the aperture size in 460's AGPSIZ register
+ */
+static void intel_i460_write_agpsiz(u8 size_value)
 {
-	u16 temp;
-	aper_size_info_16 *previous_size;
+	u8 temp;
 
-	previous_size = A_SIZE_16(agp_bridge.previous_size);
-	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
-	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
-	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
-			      previous_size->size_value);
+	pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ,
+		((temp & ~I460_AGPSIZ_MASK) | size_value));
 }
 
-
-static void intel_8xx_cleanup(void)
+static void intel_i460_cleanup(void)
 {
-	u16 temp;
 	aper_size_info_8 *previous_size;
 
 	previous_size = A_SIZE_8(agp_bridge.previous_size);
-	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
-	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      previous_size->size_value);
+	intel_i460_write_agpsiz(previous_size->size_value);
+
+	if (intel_i460_cpk == 0) {
+		vfree(i460_pg_detail);
+		vfree(i460_pg_count);
+	}
 }
 
 
-static int intel_configure(void)
+/* Control bits for Out-Of-GART coherency and Burst Write Combining */
+#define I460_GXBCTL_OOG		(1UL << 0)
+#define I460_GXBCTL_BWC		(1UL << 2)
+
+static int intel_i460_configure(void)
 {
-	u32 temp;
-	u16 temp2;
-	aper_size_info_16 *current_size;
+	union {
+		u32 small[2];
+		u64 large;
+	} temp;
+	u8 scratch;
+	int i;
 
-	current_size = A_SIZE_16(agp_bridge.current_size);
+	aper_size_info_8 *current_size;
 
-	/* aperture size */
-	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value);
+	temp.large = 0;
 
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	current_size = A_SIZE_8(agp_bridge.current_size);
+	intel_i460_write_agpsiz(current_size->size_value);	
 
-	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr);
+	/*
+	 * Do the necessary rigmarole to read all eight bytes of APBASE.
+	 * This has to be done since the AGP aperture can be above 4GB on
+	 * 460 based systems.
+	 */
+	pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, 
+		&(temp.small[0]));
+	pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4,
+		&(temp.small[1]));
+
+	/* Clear BAR control bits */
+	agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1); 
+
+	pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL,
+			  (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
+
+	/* 
+	 * Initialize partial allocation trackers if a GART page is bigger than
+	 * a kernel page.
+	 */
+	if (I460_CPAGES_PER_KPAGE >= 1) {
+		intel_i460_cpk = 1;
+	} else {
+		intel_i460_cpk = 0;
 
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
+		i460_pg_detail = (void *) vmalloc(sizeof(*i460_pg_detail) *
+					current_size->num_entries);
+		i460_pg_count = (void *) vmalloc(sizeof(*i460_pg_count) *
+					current_size->num_entries);
+	
+		for (i = 0; i < current_size->num_entries; i++) {
+			i460_pg_count[i] = 0;
+			i460_pg_detail[i] = NULL;
+		}
+	}
 
-	/* paccfg/nbxcfg */
-	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
-	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
-			      (temp2 & ~(1 << 10)) | (1 << 9));
-	/* clear any possible error conditions */
-	pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
 	return 0;
 }
 
-static int intel_815_configure(void)
-{
-	u32 temp, addr;
-	u8 temp2;
-	aper_size_info_8 *current_size;
+static int intel_i460_create_gatt_table(void) {
 
-	current_size = A_SIZE_8(agp_bridge.current_size);
+	char *table;
+	int i;
+	int page_order;
+	int num_entries;
+	void *temp;
+	unsigned int read_back;
 
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value); 
+	/*
+	 * Load up the fixed address of the GART SRAMS which hold our
+	 * GATT table.
+	 */
+	table = (char *) __va(INTEL_I460_ATTBASE);
 
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	temp = agp_bridge.current_size;
+	page_order = A_SIZE_8(temp)->page_order;
+	num_entries = A_SIZE_8(temp)->num_entries;
 
-	/* attbase - aperture base */
-        /* the Intel 815 chipset spec. says that bits 29-31 in the
-         * ATTBASE register are reserved -> try not to write them */
-        if (agp_bridge.gatt_bus_addr &  INTEL_815_ATTBASE_MASK)
-		panic("gatt bus addr too high");
-	pci_read_config_dword(agp_bridge.dev, INTEL_ATTBASE, &addr);
-	addr &= INTEL_815_ATTBASE_MASK;
-	addr |= agp_bridge.gatt_bus_addr;
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, addr);
+	agp_bridge.gatt_table_real = (u32 *) table;
+	agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
+		 			     (PAGE_SIZE * (1 << page_order)));
+	agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
 
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+	for (i = 0; i < num_entries; i++) {
+		agp_bridge.gatt_table[i] = 0;
+	}
+
+	/* 
+	 * The 460 spec says we have to read the last location written to 
+	 * make sure that all writes have taken effect
+	 */
+	read_back = agp_bridge.gatt_table[i - 1];
 
-	/* apcont */
-	pci_read_config_byte(agp_bridge.dev, INTEL_815_APCONT, &temp2);
-	pci_write_config_byte(agp_bridge.dev, INTEL_815_APCONT,
-			      temp2 | (1 << 1));
-	/* clear any possible error conditions */
-        /* Oddness : this chipset seems to have no ERRSTS register ! */
 	return 0;
 }
 
-static void intel_820_tlbflush(agp_memory * mem)
+static int intel_i460_free_gatt_table(void)
 {
-  return;
-}
+	int num_entries;
+	int i;
+	void *temp;
+	unsigned int read_back;
 
-static void intel_820_cleanup(void)
-{
-	u8 temp;
-	aper_size_info_8 *previous_size;
+	temp = agp_bridge.current_size;
 
-	previous_size = A_SIZE_8(agp_bridge.previous_size);
-	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp);
-	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, 
-			      temp & ~(1 << 1));
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      previous_size->size_value);
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	for (i = 0; i < num_entries; i++) {
+		agp_bridge.gatt_table[i] = 0;
+	}
+	
+	/* 
+	 * The 460 spec says we have to read the last location written to 
+	 * make sure that all writes have taken effect
+	 */
+	read_back = agp_bridge.gatt_table[i - 1];
+
+	iounmap(agp_bridge.gatt_table);
+
+	return 0;
 }
 
+/* These functions are called when PAGE_SIZE exceeds the GART page size */	
 
-static int intel_820_configure(void)
+static int intel_i460_insert_memory_cpk(agp_memory * mem,
+				     off_t pg_start, int type)
 {
-	u32 temp;
- 	u8 temp2; 
-	aper_size_info_8 *current_size;
+	int i, j, k, num_entries;
+	void *temp;
+	unsigned long paddr;
+	unsigned int read_back;
 
-	current_size = A_SIZE_8(agp_bridge.current_size);
+	/* 
+	 * The rest of the kernel will compute page offsets in terms of
+	 * PAGE_SIZE.
+	 */
+	pg_start = I460_CPAGES_PER_KPAGE * pg_start;
 
-	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value); 
+	temp = agp_bridge.current_size;
+	num_entries = A_SIZE_8(temp)->num_entries;
 
-	/* address to map to */
-	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) {
+		printk(KERN_WARNING PFX "Looks like we're out of AGP memory\n");
+		return -EINVAL;
+	}
 
-	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr); 
+	j = pg_start;
+	while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) {
+		if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
+			return -EBUSY;
+		}
+		j++;
+	}
 
-	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+	for (i = 0, j = pg_start; i < mem->page_count; i++) {
+
+		paddr = mem->memory[i];
+
+		for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize)
+			agp_bridge.gatt_table[j] = (unsigned int)
+			    agp_bridge.mask_memory(paddr, mem->type);
+	}
+
+	/* 
+	 * The 460 spec says we have to read the last location written to 
+	 * make sure that all writes have taken effect
+	 */
+	read_back = agp_bridge.gatt_table[j - 1];
 
-	/* global enable aperture access */
-	/* This flag is not accessed through MCHCFG register as in */
-	/* i850 chipset. */
-	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp2);
-	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, 
-			      temp2 | (1 << 1));
-	/* clear any possible AGP-related error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c); 
 	return 0;
 }
 
-static int intel_830mp_configure(void)
+static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start,
+				     int type)
 {
-       u32 temp;
-       u16 temp2;
-       aper_size_info_8 *current_size;
+	int i;
+	unsigned int read_back;
 
-       current_size = A_SIZE_8(agp_bridge.current_size);
+	pg_start = I460_CPAGES_PER_KPAGE * pg_start;
 
-       /* aperture size */
-       pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-                             current_size->size_value);
+	for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * 
+							mem->page_count); i++) 
+		agp_bridge.gatt_table[i] = 0;
 
-       /* address to map to */
-       pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
-       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	/* 
+	 * The 460 spec says we have to read the last location written to 
+	 * make sure that all writes have taken effect
+	 */
+	read_back = agp_bridge.gatt_table[i - 1];
 
-       /* attbase - aperture base */
-       pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-                              agp_bridge.gatt_bus_addr);
+	return 0;
+}
 
-       /* agpctrl */
-       pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
+/*
+ * These functions are called when the GART page size exceeds PAGE_SIZE.
+ *
+ * This situation is interesting since AGP memory allocations that are
+ * smaller than a single GART page are possible.  The structures i460_pg_count
+ * and i460_pg_detail track partial allocation of the large GART pages to
+ * work around this issue.
+ *
+ * i460_pg_count[pg_num] tracks the number of kernel pages in use within
+ * GART page pg_num.  i460_pg_detail[pg_num] is an array containing a
+ * psuedo-GART entry for each of the aforementioned kernel pages.  The whole
+ * of i460_pg_detail is equivalent to a giant GATT with page size equal to
+ * that of the kernel.
+ */	
 
-       /* gmch */
-       pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
-       pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
-                             temp2 | (1 << 9));
-       /* clear any possible AGP-related error conditions */
-       pci_write_config_word(agp_bridge.dev, INTEL_I830_ERRSTS, 0x1c);
-       return 0;
+static void *intel_i460_alloc_large_page(int pg_num)
+{
+	int i;
+	void *bp, *bp_end;
+	struct page *page;
+
+	i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * 
+							I460_KPAGES_PER_CPAGE);
+	if (i460_pg_detail[pg_num] == NULL) {
+		printk(KERN_WARNING PFX "Out of memory, we're in trouble...\n");
+		return NULL;
+	}
+
+	for (i = 0; i < I460_KPAGES_PER_CPAGE; i++)
+		i460_pg_detail[pg_num][i] = 0;
+
+	bp = (void *) __get_free_pages(GFP_KERNEL, 
+					intel_i460_pageshift - PAGE_SHIFT);
+	if (bp == NULL) {
+		printk(KERN_WARNING PFX "Couldn't alloc 4M GART page...\n");
+		return NULL;
+	}
+
+	bp_end = bp + ((PAGE_SIZE * 
+			    (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1);
+
+	for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) {
+		atomic_inc(&page->count);
+		set_bit(PG_locked, &page->flags);
+		atomic_inc(&agp_bridge.current_memory_agp);
+	}
+
+	return bp;		
 }
-       
 
+static void intel_i460_free_large_page(int pg_num, unsigned long addr)
+{
+	struct page *page;
+	void *bp, *bp_end;
+
+	bp = (void *) __va(addr);
+	bp_end = bp + (PAGE_SIZE * 
+			(1 << (intel_i460_pageshift - PAGE_SHIFT)));
 
-static int intel_840_configure(void)
+	vfree(i460_pg_detail[pg_num]);
+	i460_pg_detail[pg_num] = NULL;
+
+	for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) {
+		put_page(page);
+		UnlockPage(page);
+		atomic_dec(&agp_bridge.current_memory_agp);
+	}
+
+	free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT);
+}
+	
+static int intel_i460_insert_memory_kpc(agp_memory * mem,
+				     off_t pg_start, int type)
+{
+	int i, pg, start_pg, end_pg, start_offset, end_offset, idx;
+	int num_entries;	
+	void *temp;
+	unsigned int read_back;
+	unsigned long paddr;
+
+	temp = agp_bridge.current_size;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	/* Figure out what pg_start means in terms of our large GART pages */
+	start_pg 	= pg_start / I460_KPAGES_PER_CPAGE;
+	start_offset 	= pg_start % I460_KPAGES_PER_CPAGE;
+	end_pg 		= (pg_start + mem->page_count - 1) / 
+							I460_KPAGES_PER_CPAGE;
+	end_offset 	= (pg_start + mem->page_count - 1) % 
+							I460_KPAGES_PER_CPAGE;
+
+	if (end_pg > num_entries) {
+		printk(KERN_WARNING PFX "Looks like we're out of AGP memory\n");
+		return -EINVAL;
+	}
+
+	/* Check if the requested region of the aperture is free */
+	for (pg = start_pg; pg <= end_pg; pg++) {
+		/* Allocate new GART pages if necessary */
+		if (i460_pg_detail[pg] == NULL) {
+			temp = intel_i460_alloc_large_page(pg);
+			if (temp == NULL)
+				return -ENOMEM;
+			agp_bridge.gatt_table[pg] = agp_bridge.mask_memory(
+			    			       (unsigned long) temp, 0);
+			read_back = agp_bridge.gatt_table[pg];
+		}
+
+		for (idx = ((pg == start_pg) ? start_offset : 0);
+		     idx < ((pg == end_pg) ? (end_offset + 1) 
+				       : I460_KPAGES_PER_CPAGE);
+		     idx++) {
+			if(i460_pg_detail[pg][idx] != 0)
+				return -EBUSY;
+		}
+	}
+		
+	for (pg = start_pg, i = 0; pg <= end_pg; pg++) {
+		paddr = intel_i460_unmask_memory(agp_bridge.gatt_table[pg]);
+		for (idx = ((pg == start_pg) ? start_offset : 0);
+		     idx < ((pg == end_pg) ? (end_offset + 1)
+				       : I460_KPAGES_PER_CPAGE);
+		     idx++, i++) {
+			mem->memory[i] = paddr + (idx * PAGE_SIZE);
+			i460_pg_detail[pg][idx] =
+			    agp_bridge.mask_memory(mem->memory[i], mem->type);
+			    
+			i460_pg_count[pg]++;
+		}
+	}
+
+	return 0;
+}
+	
+static int intel_i460_remove_memory_kpc(agp_memory * mem,
+				     off_t pg_start, int type)
+{
+	int i, pg, start_pg, end_pg, start_offset, end_offset, idx;
+	int num_entries;
+	void *temp;
+	unsigned int read_back;
+	unsigned long paddr;
+
+	temp = agp_bridge.current_size;
+	num_entries = A_SIZE_8(temp)->num_entries;
+
+	/* Figure out what pg_start means in terms of our large GART pages */
+	start_pg 	= pg_start / I460_KPAGES_PER_CPAGE;
+	start_offset 	= pg_start % I460_KPAGES_PER_CPAGE;
+	end_pg 		= (pg_start + mem->page_count - 1) / 
+						I460_KPAGES_PER_CPAGE;
+	end_offset 	= (pg_start + mem->page_count - 1) % 
+						I460_KPAGES_PER_CPAGE;
+
+	for (i = 0, pg = start_pg; pg <= end_pg; pg++) {
+		for (idx = ((pg == start_pg) ? start_offset : 0);
+		     idx < ((pg == end_pg) ? (end_offset + 1)
+				       : I460_KPAGES_PER_CPAGE);
+		     idx++, i++) {
+			mem->memory[i] = 0;
+			i460_pg_detail[pg][idx] = 0;
+			i460_pg_count[pg]--;
+		}
+
+		/* Free GART pages if they are unused */
+		if (i460_pg_count[pg] == 0) {
+			paddr = intel_i460_unmask_memory(agp_bridge.gatt_table[pg]);
+			agp_bridge.gatt_table[pg] = agp_bridge.scratch_page;
+			read_back = agp_bridge.gatt_table[pg];
+
+			intel_i460_free_large_page(pg, paddr);
+		}
+	}
+		
+	return 0;
+}
+
+/* Dummy routines to call the approriate {cpk,kpc} function */
+
+static int intel_i460_insert_memory(agp_memory * mem,
+				     off_t pg_start, int type)
+{
+	if (intel_i460_cpk)
+		return intel_i460_insert_memory_cpk(mem, pg_start, type);
+	else
+		return intel_i460_insert_memory_kpc(mem, pg_start, type);
+}
+
+static int intel_i460_remove_memory(agp_memory * mem,
+				     off_t pg_start, int type)
+{
+	if (intel_i460_cpk)
+		return intel_i460_remove_memory_cpk(mem, pg_start, type);
+	else
+		return intel_i460_remove_memory_kpc(mem, pg_start, type);
+}
+
+/*
+ * If the kernel page size is smaller that the chipset page size, we don't
+ * want to allocate memory until we know where it is to be bound in the
+ * aperture (a multi-kernel-page alloc might fit inside of an already
+ * allocated GART page).  Consequently, don't allocate or free anything
+ * if i460_cpk (meaning chipset pages per kernel page) isn't set.
+ *
+ * Let's just hope nobody counts on the allocated AGP memory being there
+ * before bind time (I don't think current drivers do)...
+ */ 
+static unsigned long intel_i460_alloc_page(void)
+{
+	if (intel_i460_cpk)
+		return agp_generic_alloc_page();
+
+	/* Returning NULL would cause problems */
+	return ((unsigned long) ~0UL);
+}
+
+static void intel_i460_destroy_page(unsigned long page)
+{
+	if (intel_i460_cpk)
+		agp_generic_destroy_page(page);
+}
+
+static aper_size_info_8 intel_i460_sizes[3] =
+{
+	/* 
+	 * The 32GB aperture is only available with a 4M GART page size.
+	 * Due to the dynamic GART page size, we can't figure out page_order
+	 * or num_entries until runtime.
+	 */
+	{32768, 0, 0, 4},
+	{1024, 0, 0, 2},
+	{256, 0, 0, 1}
+};
+
+static int __init intel_i460_setup(struct pci_dev *pdev)
+{
+        agp_bridge.masks = intel_i460_masks;
+        agp_bridge.aperture_sizes = (void *) intel_i460_sizes;
+        agp_bridge.size_type = U8_APER_SIZE;
+        agp_bridge.num_aperture_sizes = 3;
+        agp_bridge.dev_private_data = NULL;
+        agp_bridge.needs_scratch_page = FALSE;
+        agp_bridge.configure = intel_i460_configure;
+        agp_bridge.fetch_size = intel_i460_fetch_size;
+        agp_bridge.cleanup = intel_i460_cleanup;
+        agp_bridge.tlb_flush = intel_i460_tlb_flush;
+        agp_bridge.mask_memory = intel_i460_mask_memory;
+        agp_bridge.agp_enable = agp_generic_agp_enable;
+        agp_bridge.cache_flush = global_cache_flush;
+        agp_bridge.create_gatt_table = intel_i460_create_gatt_table;
+        agp_bridge.free_gatt_table = intel_i460_free_gatt_table;
+        agp_bridge.insert_memory = intel_i460_insert_memory;
+        agp_bridge.remove_memory = intel_i460_remove_memory;
+        agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+        agp_bridge.free_by_type = agp_generic_free_by_type;
+        agp_bridge.agp_alloc_page = intel_i460_alloc_page;
+        agp_bridge.agp_destroy_page = intel_i460_destroy_page;
+        agp_bridge.suspend = agp_generic_suspend;
+        agp_bridge.resume = agp_generic_resume;
+        agp_bridge.cant_use_aperture = 1;
+
+        return 0;
+
+        (void) pdev; /* unused */
+}
+
+#endif		/* CONFIG_AGP_I460 */
+
+#ifdef CONFIG_AGP_INTEL
+
+static int intel_fetch_size(void)
+{
+	int i;
+	u16 temp;
+	aper_size_info_16 *values;
+
+	pci_read_config_word(agp_bridge.dev, INTEL_APSIZE, &temp);
+	values = A_SIZE_16(agp_bridge.aperture_sizes);
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+
+static int intel_8xx_fetch_size(void)
+{
+	int i;
+	u8 temp;
+	aper_size_info_8 *values;
+
+	pci_read_config_byte(agp_bridge.dev, INTEL_APSIZE, &temp);
+
+        /* Intel 815 chipsets have a _weird_ APSIZE register with only
+         * one non-reserved bit, so mask the others out ... */
+        if (agp_bridge.type == INTEL_I815) 
+		temp &= (1 << 3);
+        
+	values = A_SIZE_8(agp_bridge.aperture_sizes);
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
+
+	return 0;
+}
+
+static void intel_tlbflush(agp_memory * mem)
+{
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2200);
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
+}
+
+
+static void intel_8xx_tlbflush(agp_memory * mem)
+{
+  u32 temp;
+  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
+  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp & ~(1 << 7));
+  pci_read_config_dword(agp_bridge.dev, INTEL_AGPCTRL, &temp);
+  pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, temp | (1 << 7));
+}
+
+
+static void intel_cleanup(void)
+{
+	u16 temp;
+	aper_size_info_16 *previous_size;
+
+	previous_size = A_SIZE_16(agp_bridge.previous_size);
+	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
+	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
+	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
+			      previous_size->size_value);
+}
+
+
+static void intel_8xx_cleanup(void)
+{
+	u16 temp;
+	aper_size_info_8 *previous_size;
+
+	previous_size = A_SIZE_8(agp_bridge.previous_size);
+	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp);
+	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG, temp & ~(1 << 9));
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      previous_size->size_value);
+}
+
+
+static int intel_configure(void)
 {
 	u32 temp;
 	u16 temp2;
-	aper_size_info_8 *current_size;
+	aper_size_info_16 *current_size;
 
-	current_size = A_SIZE_8(agp_bridge.current_size);
+	current_size = A_SIZE_16(agp_bridge.current_size);
 
 	/* aperture size */
-	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
-			      current_size->size_value); 
+	pci_write_config_word(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value);
 
 	/* address to map to */
 	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
@@ -1700,23 +2140,23 @@
 
 	/* attbase - aperture base */
 	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr); 
+			       agp_bridge.gatt_bus_addr);
 
 	/* agpctrl */
-	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x2280);
 
-	/* mcgcfg */
-	pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2);
-	pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG,
-			      temp2 | (1 << 9));
+	/* paccfg/nbxcfg */
+	pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
+	pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
+			      (temp2 & ~(1 << 10)) | (1 << 9));
 	/* clear any possible error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000); 
+	pci_write_config_byte(agp_bridge.dev, INTEL_ERRSTS + 1, 7);
 	return 0;
 }
 
-static int intel_845_configure(void)
+static int intel_815_configure(void)
 {
-	u32 temp;
+	u32 temp, addr;
 	u8 temp2;
 	aper_size_info_8 *current_size;
 
@@ -1731,31 +2171,50 @@
 	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
 
 	/* attbase - aperture base */
-	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
-			       agp_bridge.gatt_bus_addr); 
+        /* the Intel 815 chipset spec. says that bits 29-31 in the
+         * ATTBASE register are reserved -> try not to write them */
+        if (agp_bridge.gatt_bus_addr &  INTEL_815_ATTBASE_MASK)
+		panic("gatt bus addr too high");
+	pci_read_config_dword(agp_bridge.dev, INTEL_ATTBASE, &addr);
+	addr &= INTEL_815_ATTBASE_MASK;
+	addr |= agp_bridge.gatt_bus_addr;
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE, addr);
 
 	/* agpctrl */
 	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
 
-	/* agpm */
-	pci_read_config_byte(agp_bridge.dev, INTEL_I845_AGPM, &temp2);
-	pci_write_config_byte(agp_bridge.dev, INTEL_I845_AGPM,
+	/* apcont */
+	pci_read_config_byte(agp_bridge.dev, INTEL_815_APCONT, &temp2);
+	pci_write_config_byte(agp_bridge.dev, INTEL_815_APCONT,
 			      temp2 | (1 << 1));
 	/* clear any possible error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c); 
+        /* Oddness : this chipset seems to have no ERRSTS register ! */
 	return 0;
 }
 
-static void intel_845_resume(void)
+static void intel_820_tlbflush(agp_memory * mem)
 {
-   intel_845_configure();
+  return;
 }
 
+static void intel_820_cleanup(void)
+{
+	u8 temp;
+	aper_size_info_8 *previous_size;
 
-static int intel_850_configure(void)
+	previous_size = A_SIZE_8(agp_bridge.previous_size);
+	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, 
+			      temp & ~(1 << 1));
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      previous_size->size_value);
+}
+
+
+static int intel_820_configure(void)
 {
 	u32 temp;
-	u16 temp2;
+ 	u8 temp2; 
 	aper_size_info_8 *current_size;
 
 	current_size = A_SIZE_8(agp_bridge.current_size);
@@ -1775,12 +2234,150 @@
 	/* agpctrl */
 	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
 
-	/* mcgcfg */
-	pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2);
-	pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG,
-			      temp2 | (1 << 9));
-	/* clear any possible AGP-related error conditions */
-	pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c); 
+	/* global enable aperture access */
+	/* This flag is not accessed through MCHCFG register as in */
+	/* i850 chipset. */
+	pci_read_config_byte(agp_bridge.dev, INTEL_I820_RDCR, &temp2);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I820_RDCR, 
+			      temp2 | (1 << 1));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I820_ERRSTS, 0x001c); 
+	return 0;
+}
+
+static int intel_830mp_configure(void)
+{
+       u32 temp;
+       u16 temp2;
+       aper_size_info_8 *current_size;
+
+       current_size = A_SIZE_8(agp_bridge.current_size);
+
+       /* aperture size */
+       pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+                             current_size->size_value);
+
+       /* address to map to */
+       pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+       agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+       /* attbase - aperture base */
+       pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+                              agp_bridge.gatt_bus_addr);
+
+       /* agpctrl */
+       pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
+
+       /* gmch */
+       pci_read_config_word(agp_bridge.dev, INTEL_NBXCFG, &temp2);
+       pci_write_config_word(agp_bridge.dev, INTEL_NBXCFG,
+                             temp2 | (1 << 9));
+       /* clear any possible AGP-related error conditions */
+       pci_write_config_word(agp_bridge.dev, INTEL_I830_ERRSTS, 0x1c);
+       return 0;
+}
+       
+
+
+static int intel_840_configure(void)
+{
+	u32 temp;
+	u16 temp2;
+	aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value); 
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr); 
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+
+	/* mcgcfg */
+	pci_read_config_word(agp_bridge.dev, INTEL_I840_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge.dev, INTEL_I840_MCHCFG,
+			      temp2 | (1 << 9));
+	/* clear any possible error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I840_ERRSTS, 0xc000); 
+	return 0;
+}
+
+static int intel_845_configure(void)
+{
+	u32 temp;
+	u8 temp2;
+	aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value); 
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr); 
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+
+	/* agpm */
+	pci_read_config_byte(agp_bridge.dev, INTEL_I845_AGPM, &temp2);
+	pci_write_config_byte(agp_bridge.dev, INTEL_I845_AGPM,
+			      temp2 | (1 << 1));
+	/* clear any possible error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I845_ERRSTS, 0x001c); 
+	return 0;
+}
+
+static void intel_845_resume(void)
+{
+   intel_845_configure();
+}
+
+
+static int intel_850_configure(void)
+{
+	u32 temp;
+	u16 temp2;
+	aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value); 
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr); 
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000); 
+
+	/* mcgcfg */
+	pci_read_config_word(agp_bridge.dev, INTEL_I850_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge.dev, INTEL_I850_MCHCFG,
+			      temp2 | (1 << 9));
+	/* clear any possible AGP-related error conditions */
+	pci_write_config_word(agp_bridge.dev, INTEL_I850_ERRSTS, 0x001c); 
 	return 0;
 }
 
@@ -1817,6 +2414,37 @@
 }
 
 
+static int intel_7505_configure(void)
+{
+	u32 temp;
+	u16 temp2;
+	aper_size_info_8 *current_size;
+
+	current_size = A_SIZE_8(agp_bridge.current_size);
+
+	/* aperture size */
+	pci_write_config_byte(agp_bridge.dev, INTEL_APSIZE,
+			      current_size->size_value);
+
+	/* address to map to */
+	pci_read_config_dword(agp_bridge.dev, INTEL_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+
+	/* attbase - aperture base */
+	pci_write_config_dword(agp_bridge.dev, INTEL_ATTBASE,
+			       agp_bridge.gatt_bus_addr);
+
+	/* agpctrl */
+	pci_write_config_dword(agp_bridge.dev, INTEL_AGPCTRL, 0x0000);
+
+	/* mcgcfg */
+	pci_read_config_word(agp_bridge.dev, INTEL_I7505_MCHCFG, &temp2);
+	pci_write_config_word(agp_bridge.dev, INTEL_I7505_MCHCFG,
+			      temp2 | (1 << 9));
+	return 0;
+}
+
+
 static unsigned long intel_mask_memory(unsigned long addr, int type)
 {
 	/* Memory type is ignored */
@@ -2126,6 +2754,38 @@
 	(void) pdev; /* unused */
 }
 
+static int __init intel_7505_setup (struct pci_dev *pdev)
+{
+	agp_bridge.masks = intel_generic_masks;
+	agp_bridge.aperture_sizes = (void *) intel_8xx_sizes;
+	agp_bridge.size_type = U8_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = intel_7505_configure;
+	agp_bridge.fetch_size = intel_8xx_fetch_size;
+	agp_bridge.cleanup = intel_8xx_cleanup;
+	agp_bridge.tlb_flush = intel_8xx_tlbflush;
+	agp_bridge.mask_memory = intel_mask_memory;
+	agp_bridge.agp_enable = agp_generic_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = agp_generic_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+
+	(void) pdev; /* unused */
+}
+
 #endif /* CONFIG_AGP_INTEL */
 
 #ifdef CONFIG_AGP_VIA
@@ -2744,38 +3404,11 @@
 
 #endif /* CONFIG_AGP_AMD */
 
-#ifdef CONFIG_AGP_AMD_8151
+#ifdef CONFIG_AGP_AMD_K8
 
 /* Begin AMD-8151 support */
 
-static u_int64_t pci_read64 (struct pci_dev *dev, int reg)
-{
-	union {
-		u64 full;
-		struct {
-			u32 high;
-			u32 low;
-		} split;
-	} tmp;
-	pci_read_config_dword(dev, reg, &tmp.split.high);
-	pci_read_config_dword(dev, reg+4, &tmp.split.low);
-	return tmp.full;
-}
-
-static void pci_write64 (struct pci_dev *dev, int reg, u64 value)
-{
-	union {
-		u64 full;
-		struct {
-			u32 high;
-			u32 low;
-		} split;
-	} tmp;
-	tmp.full = value;
-	pci_write_config_dword(dev, reg, tmp.split.high);
-	pci_write_config_dword(dev, reg+4, tmp.split.low);
-}
-
+/* This is misnamed. It is really a generic AMD K8 Northbridge AMD driver. */
 
 static int x86_64_insert_memory(agp_memory * mem, off_t pg_start, int type)
 {
@@ -2783,7 +3416,6 @@
 	void *temp;
 	long tmp;
 	u32 pte;
-	u64 addr;
 
 	temp = agp_bridge.current_size;
 
@@ -2813,9 +3445,8 @@
 	}
 
 	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-		addr = agp_bridge.mask_memory(mem->memory[i], mem->type);
+		tmp = agp_bridge.mask_memory(mem->memory[i], mem->type);
 
-		tmp = addr;
 		BUG_ON(tmp & 0xffffff0000000ffc);
 		pte = (tmp & 0x000000ff00000000) >> 28;
 		pte |=(tmp & 0x00000000fffff000);
@@ -2953,10 +3584,7 @@
 
 static int amd_8151_configure(void)
 {
-	struct pci_dev *dev, *hammer=NULL;
-	int current_size;
-	int tmp, tmp2, i;
-	u64 aperbar;
+	struct pci_dev *dev;
 	unsigned long gatt_bus = virt_to_phys(agp_bridge.gatt_table_real);
 
 	/* Configure AGP regs in each x86-64 host bridge. */
@@ -2965,54 +3593,8 @@
 			PCI_FUNC(dev->devfn)==3 &&
 			PCI_SLOT(dev->devfn)>=24 && PCI_SLOT(dev->devfn)<=31) {
 			agp_bridge.gart_bus_addr = amd_x86_64_configure(dev,gatt_bus);
-			hammer = dev;
-
-			/*
-			 * TODO: Cache pci_dev's of x86-64's in private struct to save us
-			 * having to scan the pci list each time.
-			 */
-		}
-	}
-
-	if (hammer == NULL) {
-		return -ENODEV;
 	}
-
-	/* Shadow x86-64 registers into 8151 registers. */
-
-	dev = agp_bridge.dev;
-	if (!dev) 
-		return -ENODEV;
-
-	current_size = amd_x86_64_fetch_size();
-
-	pci_read_config_dword(dev, AMD_8151_APERTURESIZE, &tmp);
-	tmp &= ~(0xfff);
-
-	/* translate x86-64 size bits to 8151 size bits*/
-	for (i=0 ; i<7; i++) {
-		if (amd_8151_sizes[i].size == current_size)
-			tmp |= (amd_8151_sizes[i].size_value) << 3;
 	}
-	pci_write_config_dword(dev, AMD_8151_APERTURESIZE, tmp);
-
-	pci_read_config_dword (hammer, AMD_X86_64_GARTAPERTUREBASE, &tmp);
-	aperbar = pci_read64 (dev, AMD_8151_VMAPERTURE);
-	aperbar |= (tmp & 0x7fff) <<25;
-	aperbar &= 0x000000ffffffffff;
-	aperbar |= 1<<2;	/* This address is a 64bit ptr FIXME: Make conditional in 32bit mode */
-	pci_write64 (dev, AMD_8151_VMAPERTURE, aperbar);
-
-	pci_read_config_dword(dev, AMD_8151_AGP_CTL , &tmp);
-	tmp &= ~(AMD_8151_GTLBEN | AMD_8151_APEREN);
-	
-	pci_read_config_dword(hammer, AMD_X86_64_GARTAPERTURECTL, &tmp2);
-	if (tmp2 & AMD_X86_64_GARTEN)
-		tmp |= AMD_8151_APEREN;
-	// FIXME: bit 7 of AMD_8151_AGP_CTL (GTLBEN) must be copied if set.
-	// But where is it set ?
-	pci_write_config_dword(dev, AMD_8151_AGP_CTL, tmp);
-
 	return 0;
 }
 
@@ -3031,15 +3613,6 @@
 			tmp &= ~(AMD_X86_64_GARTEN);
 			pci_write_config_dword (dev, AMD_X86_64_GARTAPERTURECTL, tmp);
 		}
-
-		/* Now shadow the disable in the 8151 */
-		if (dev->vendor == PCI_VENDOR_ID_AMD &&
-			dev->device == PCI_DEVICE_ID_AMD_8151_0) {
-
-			pci_read_config_dword (dev, AMD_8151_AGP_CTL, &tmp);
-			tmp &= ~(AMD_8151_APEREN);	
-			pci_write_config_dword (dev, AMD_8151_AGP_CTL, tmp);
-		}
 	}
 }
 
@@ -3053,7 +3626,7 @@
 
 static gatt_mask amd_8151_masks[] =
 {
-	{0x00000001, 0}
+	{0, 0}
 };
 
 
@@ -3088,7 +3661,7 @@
 				printk (KERN_INFO "AGP: Found AGPv3 capable device at %d:%d:%d\n",
 					device->bus->number, PCI_FUNC(device->devfn), PCI_SLOT(device->devfn));
 			} else {
-				printk (KERN_INFO "AGP: Meh. version %x AGP device found.\n", scratch);
+				printk (KERN_INFO "AGP: Version %x AGP device found.\n", scratch);
 			}
 		}
 	}
@@ -3207,28 +3780,184 @@
 	(void) pdev; /* unused */
 }
 
-#endif /* CONFIG_AGP_AMD_8151 */
+/* NVIDIA x86-64 chipset support */
+ 
 
-#ifdef CONFIG_AGP_ALI
+static struct _nvidia_x86_64_private {
+	struct pci_dev *dev_1;
+} nvidia_x86_64_private;
 
-static int ali_fetch_size(void)
+
+static aper_size_info_32 nvidia_x86_64_sizes[5] =
 {
-	int i;
-	u32 temp;
-	aper_size_info_32 *values;
+	{512,  131072, 7, 0x00000000 },
+	{256,  65536,  6, 0x00000008 },
+	{128,  32768,  5, 0x0000000C },
+	{64,   16384,  4, 0x0000000E },
+	{32,   8192,   3, 0x0000000F }
+};
 
-	pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
-	temp &= ~(0xfffffff0);
-	values = A_SIZE_32(agp_bridge.aperture_sizes);
 
-	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
-		if (temp == values[i].size_value) {
-			agp_bridge.previous_size =
-			    agp_bridge.current_size = (void *) (values + i);
-			agp_bridge.aperture_size_idx = i;
-			return values[i].size;
-		}
-	}
+static int nvidia_x86_64_configure(void)
+{
+	struct pci_dev *dev, *hammer=NULL;
+	int i, current_size;
+	u32 tmp, apbase, apbar, aplimit;
+	unsigned long gatt_bus = virt_to_phys(agp_bridge.gatt_table_real);
+
+	if (!agp_bridge.dev) 
+		return -ENODEV;
+
+	/* configure AGP regs in each x86-64 host bridge */
+	pci_for_each_dev(dev) {
+		if (dev->bus->number==0 &&
+			PCI_FUNC(dev->devfn)==3 &&
+			PCI_SLOT(dev->devfn)>=24 && PCI_SLOT(dev->devfn)<=31) {
+			agp_bridge.gart_bus_addr = amd_x86_64_configure(dev,gatt_bus);
+			hammer = dev;
+		}
+	}
+	if (hammer == NULL)
+		return -ENODEV;
+
+	/* translate x86-64 aperture size to NVIDIA aperture size */
+	current_size = amd_x86_64_fetch_size();
+	for (i = 0 ; i < agp_bridge.num_aperture_sizes; i++) {
+		if (nvidia_x86_64_sizes[i].size == current_size)
+			break;
+	}
+	/* if x86-64 size does not match any NVIDIA size, exit here */
+	if (i == agp_bridge.num_aperture_sizes)
+		return -ENODEV;
+	pci_read_config_dword(nvidia_x86_64_private.dev_1, NVIDIA_X86_64_1_APSIZE, &tmp);
+	tmp &= ~(0xf);
+	tmp |= nvidia_x86_64_sizes[i].size_value;
+	pci_write_config_dword(nvidia_x86_64_private.dev_1, NVIDIA_X86_64_1_APSIZE, tmp);
+
+	/* shadow x86-64 registers into NVIDIA registers */
+	pci_read_config_dword (hammer, AMD_X86_64_GARTAPERTUREBASE, &apbase);
+	/* if x86-64 aperture base is beyond 4G, exit here */
+	if ( (apbase & 0x7fff) >> (32 - 25) )
+		 return -ENODEV;
+	apbase = (apbase & 0x7fff) << 25;
+
+	pci_read_config_dword(agp_bridge.dev, NVIDIA_X86_64_0_APBASE, &apbar);
+	apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
+	apbar |= apbase;
+	pci_write_config_dword(agp_bridge.dev, NVIDIA_X86_64_0_APBASE, apbar);
+
+	/* Shadow into secondary device looks dubious, but we keep it for now.
+	   If these two could be dropped then the NForce3 code path could
+	   be just folded into the generic functions above. */
+
+	aplimit = apbase + (current_size * 1024 * 1024) - 1;
+	pci_write_config_dword(nvidia_x86_64_private.dev_1, NVIDIA_X86_64_1_APBASE1, apbase);
+	pci_write_config_dword(nvidia_x86_64_private.dev_1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
+	pci_write_config_dword(nvidia_x86_64_private.dev_1, NVIDIA_X86_64_1_APBASE2, apbase);
+	pci_write_config_dword(nvidia_x86_64_private.dev_1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
+
+	/* Original driver updated the IORR here, but AMD documentation
+	   explicitely discourages this for something already covered by the GART. */
+	
+	return 0;
+}
+
+
+static void nvidia_x86_64_cleanup(void)
+{
+	struct pci_dev *dev;
+	u32 tmp;
+
+	pci_for_each_dev(dev) {
+		/* disable gart translation */
+		if (dev->bus->number==0 && PCI_FUNC(dev->devfn)==3 &&
+		    (PCI_SLOT(dev->devfn) >=24) && (PCI_SLOT(dev->devfn) <=31)) {
+
+			pci_read_config_dword (dev, AMD_X86_64_GARTAPERTURECTL, &tmp);
+			tmp &= ~(AMD_X86_64_GARTEN);
+			pci_write_config_dword (dev, AMD_X86_64_GARTAPERTURECTL, tmp);
+		}
+	}
+}
+
+
+static unsigned long nvidia_x86_64_mask_memory(unsigned long addr, int type)
+{
+	return addr | agp_bridge.masks[0].mask;
+}
+
+
+static gatt_mask nvidia_x86_64_masks[] =
+{
+	{0x00000001, 0}
+};
+
+
+static int __init nvidia_x86_64_setup (struct pci_dev *pdev)
+{
+	nvidia_x86_64_private.dev_1 =
+		pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(11, 0));
+
+	if (nvidia_x86_64_private.dev_1 == NULL) {
+		printk(KERN_INFO PFX "agpgart: Detected an NVIDIA "
+			"nForce3 chipset, but could not find "
+			"the secondary device.\n");
+		agp_bridge.type = NOT_SUPPORTED;
+		return -ENODEV;
+	}
+
+	agp_bridge.masks = nvidia_x86_64_masks;
+	agp_bridge.aperture_sizes = (void *) nvidia_x86_64_sizes;
+	agp_bridge.size_type = U32_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 5;
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = nvidia_x86_64_configure;
+	agp_bridge.fetch_size = amd_x86_64_fetch_size;
+	agp_bridge.cleanup = nvidia_x86_64_cleanup;
+	agp_bridge.tlb_flush = amd_x86_64_tlbflush;
+	agp_bridge.mask_memory = nvidia_x86_64_mask_memory;
+	agp_bridge.agp_enable = agp_x86_64_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
+	agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
+	agp_bridge.insert_memory = x86_64_insert_memory;
+	agp_bridge.remove_memory = agp_generic_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
+
+	return 0;
+	
+	(void) pdev; /* unused */
+}
+
+#endif /* CONFIG_AGP_AMD_K8 */
+
+#ifdef CONFIG_AGP_ALI
+
+static int ali_fetch_size(void)
+{
+	int i;
+	u32 temp;
+	aper_size_info_32 *values;
+
+	pci_read_config_dword(agp_bridge.dev, ALI_ATTBASE, &temp);
+	temp &= ~(0xfffffff0);
+	values = A_SIZE_32(agp_bridge.aperture_sizes);
+
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
+		}
+	}
 
 	return 0;
 }
@@ -4130,7 +4859,8 @@
 
 static int nvidia_configure(void)
 {
-	int i, rc, num_dirs;
+	int err;
+	int i, num_dirs;
 	u32 apbase, aplimit;
 	aper_size_info_8 *current_size;
 	u32 temp;
@@ -4141,7 +4871,7 @@
 	pci_write_config_byte(agp_bridge.dev, NVIDIA_0_APSIZE,
 		current_size->size_value);
 
-    /* address to map to */
+	/* address to map to */
 	pci_read_config_dword(agp_bridge.dev, NVIDIA_0_APBASE, &apbase);
 	apbase &= PCI_BASE_ADDRESS_MEM_MASK;
 	agp_bridge.gart_bus_addr = apbase;
@@ -4150,8 +4880,9 @@
 	pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit);
 	pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase);
 	pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit);
-	if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024)))
-		return rc;
+	err = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024);
+	if (err) 
+		return err;
 
 	/* directory size is 64k */
 	num_dirs = current_size->size / 64;
@@ -4379,6 +5110,8 @@
 #define log2(x)		ffz(~(x))
 #endif
 
+#define HP_ZX1_IOC_OFFSET	0x1000  /* ACPI reports SBA, we want IOC */
+
 #define HP_ZX1_IOVA_BASE	GB(1UL)
 #define HP_ZX1_IOVA_SIZE	GB(1UL)
 #define HP_ZX1_GART_SIZE	(HP_ZX1_IOVA_SIZE / 2)
@@ -4388,19 +5121,16 @@
 #define HP_ZX1_IOVA_TO_PDIR(va)	((va - hp_private.iova_base) >> \
 					hp_private.io_tlb_shift)
 
+static int hp_zx1_gart_found;
+
 static aper_size_info_fixed hp_zx1_sizes[] =
 {
 	{0, 0, 0},		/* filled in by hp_zx1_fetch_size() */
 };
 
-static gatt_mask hp_zx1_masks[] =
-{
-	{HP_ZX1_PDIR_VALID_BIT, 0}
-};
-
 static struct _hp_private {
-	struct pci_dev *ioc;
-	volatile u8 *registers;
+	volatile u8 *ioc_regs;
+	volatile u8 *lba_regs;
 	u64 *io_pdir;		// PDIR for entire IOVA
 	u64 *gatt;		// PDIR just for GART (subset of above)
 	u64 gatt_entries;
@@ -4427,7 +5157,7 @@
 	 * 	- IOVA space is 1Gb in size
 	 * 	- first 512Mb is IOMMU, second 512Mb is GART
 	 */
-	hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG);
+	hp->io_tlb_ps = INREG64(hp->ioc_regs, HP_ZX1_TCNFG);
 	switch (hp->io_tlb_ps) {
 		case 0: hp->io_tlb_shift = 12; break;
 		case 1: hp->io_tlb_shift = 13; break;
@@ -4443,13 +5173,13 @@
 	hp->io_page_size = 1 << hp->io_tlb_shift;
 	hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
 
-	hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1;
+	hp->iova_base = INREG64(hp->ioc_regs, HP_ZX1_IBASE) & ~0x1;
 	hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
 
 	hp->gart_size = HP_ZX1_GART_SIZE;
 	hp->gatt_entries = hp->gart_size / hp->io_page_size;
 
-	hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE));
+	hp->io_pdir = phys_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE));
 	hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
 
 	if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
@@ -4463,7 +5193,7 @@
 	return 0;
 }
 
-static int __init hp_zx1_ioc_owner(u8 ioc_rev)
+static int __init hp_zx1_ioc_owner(void)
 {
 	struct _hp_private *hp = &hp_private;
 
@@ -4498,44 +5228,21 @@
 	return 0;
 }
 
-static int __init hp_zx1_ioc_init(void)
+static int __init hp_zx1_ioc_init(u64 ioc_hpa, u64 lba_hpa)
 {
 	struct _hp_private *hp = &hp_private;
-	struct pci_dev *ioc;
-	int i;
-	u8 ioc_rev;
-
-	ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL);
-	if (!ioc) {
-		printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n");
-		return -ENODEV;
-	}
-	hp->ioc = ioc;
 
-	pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev);
-
-	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-		if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) {
-			hp->registers = (u8 *) ioremap(pci_resource_start(ioc,
-									    i),
-						    pci_resource_len(ioc, i));
-			break;
-		}
-	}
-	if (!hp->registers) {
-		printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n");
-
-		return -ENODEV;
-	}
+	hp->ioc_regs = ioremap(ioc_hpa, 1024);
+	hp->lba_regs = ioremap(lba_hpa, 256);
 
 	/*
 	 * If the IOTLB is currently disabled, we can take it over.
 	 * Otherwise, we have to share with sba_iommu.
 	 */
-	hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0;
+	hp->io_pdir_owner = (INREG64(hp->ioc_regs, HP_ZX1_IBASE) & 0x1) == 0;
 
 	if (hp->io_pdir_owner)
-		return hp_zx1_ioc_owner(ioc_rev);
+		return hp_zx1_ioc_owner();
 
 	return hp_zx1_ioc_shared();
 }
@@ -4555,185 +5262,659 @@
 	struct _hp_private *hp = &hp_private;
 
 	agp_bridge.gart_bus_addr = hp->gart_base;
-	agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP);
-	pci_read_config_dword(agp_bridge.dev,
-		agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode);
+	agp_bridge.mode = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS);
 
 	if (hp->io_pdir_owner) {
-		OUTREG64(hp->registers, HP_ZX1_PDIR_BASE,
+		OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE,
 			virt_to_phys(hp->io_pdir));
-		OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps);
-		OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
-		OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1);
-		OUTREG64(hp->registers, HP_ZX1_PCOM,
+		OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps);
+		OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
+		OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1);
+		OUTREG64(hp->ioc_regs, HP_ZX1_PCOM,
 			hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
-		INREG64(hp->registers, HP_ZX1_PCOM);
+		INREG64(hp->ioc_regs, HP_ZX1_PCOM);
+	}
+
+	return 0;
+}
+
+static void hp_zx1_cleanup(void)
+{
+	struct _hp_private *hp = &hp_private;
+
+	if (hp->io_pdir_owner)
+		OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, 0);
+	iounmap((void *) hp->ioc_regs);
+	iounmap((void *) hp->lba_regs);
+}
+
+static void hp_zx1_tlbflush(agp_memory * mem)
+{
+	struct _hp_private *hp = &hp_private;
+
+	OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, 
+		hp->gart_base | log2(hp->gart_size));
+	INREG64(hp->ioc_regs, HP_ZX1_PCOM);
+}
+
+static int hp_zx1_create_gatt_table(void)
+{
+	struct _hp_private *hp = &hp_private;
+	int i;
+
+	if (hp->io_pdir_owner) {
+		hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
+						get_order(hp->io_pdir_size));
+		if (!hp->io_pdir) {
+			printk(KERN_ERR PFX "Couldn't allocate contiguous "
+				"memory for I/O PDIR\n");
+			hp->gatt = 0;
+			hp->gatt_entries = 0;
+			return -ENOMEM;
+		}
+		memset(hp->io_pdir, 0, hp->io_pdir_size);
+
+		hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+	}
+
+	for (i = 0; i < hp->gatt_entries; i++) {
+		hp->gatt[i] = (unsigned long) agp_bridge.scratch_page;
+	}
+
+	return 0;
+}
+
+static int hp_zx1_free_gatt_table(void)
+{
+	struct _hp_private *hp = &hp_private;
+	
+	if (hp->io_pdir_owner)
+		free_pages((unsigned long) hp->io_pdir,
+			    get_order(hp->io_pdir_size));
+	else
+		hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
+	return 0;
+}
+
+static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
+{
+	struct _hp_private *hp = &hp_private;
+	int i, k;
+	off_t j, io_pg_start;
+	int io_pg_count;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+
+	io_pg_start = hp->io_pages_per_kpage * pg_start;
+	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+	if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
+		return -EINVAL;
+	}
+
+	j = io_pg_start;
+	while (j < (io_pg_start + io_pg_count)) {
+		if (hp->gatt[j]) {
+			return -EBUSY;
+		}
+		j++;
+	}
+
+	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
+		unsigned long paddr;
+
+		paddr = mem->memory[i];
+		for (k = 0;
+		     k < hp->io_pages_per_kpage;
+		     k++, j++, paddr += hp->io_page_size) {
+			hp->gatt[j] = agp_bridge.mask_memory(paddr, type);
+		}
+	}
+
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type)
+{
+	struct _hp_private *hp = &hp_private;
+	int i, io_pg_start, io_pg_count;
+
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+
+	io_pg_start = hp->io_pages_per_kpage * pg_start;
+	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
+	for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
+		hp->gatt[i] = agp_bridge.scratch_page;
+	}
+
+	agp_bridge.tlb_flush(mem);
+	return 0;
+}
+
+static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
+{
+	return HP_ZX1_PDIR_VALID_BIT | addr;
+}
+
+static void hp_zx1_agp_enable(u32 mode)
+{
+	struct _hp_private *hp = &hp_private;
+	u32 command;
+
+	command = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS);
+
+	command = agp_collect_device_status(mode, command);
+	command |= 0x00000100;
+
+	OUTREG32(hp->lba_regs, HP_ZX1_AGP_COMMAND, command);
+
+	agp_device_command(command, 0);
+}
+
+static int __init hp_zx1_setup(u64 ioc_hpa, u64 lba_hpa)
+{
+	agp_bridge.dev_private_data = NULL;
+	agp_bridge.size_type = FIXED_APER_SIZE;
+	agp_bridge.needs_scratch_page = FALSE;
+	agp_bridge.configure = hp_zx1_configure;
+	agp_bridge.fetch_size = hp_zx1_fetch_size;
+	agp_bridge.cleanup = hp_zx1_cleanup;
+	agp_bridge.tlb_flush = hp_zx1_tlbflush;
+	agp_bridge.mask_memory = hp_zx1_mask_memory;
+	agp_bridge.agp_enable = hp_zx1_agp_enable;
+	agp_bridge.cache_flush = global_cache_flush;
+	agp_bridge.create_gatt_table = hp_zx1_create_gatt_table;
+	agp_bridge.free_gatt_table = hp_zx1_free_gatt_table;
+	agp_bridge.insert_memory = hp_zx1_insert_memory;
+	agp_bridge.remove_memory = hp_zx1_remove_memory;
+	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
+	agp_bridge.free_by_type = agp_generic_free_by_type;
+	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
+	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
+	agp_bridge.cant_use_aperture = 1;
+	agp_bridge.type = HP_ZX1;
+
+	fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
+	fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
+
+	return hp_zx1_ioc_init(ioc_hpa, lba_hpa);
+}
+
+static acpi_status __init hp_zx1_gart_probe(acpi_handle obj, u32 depth, void *context, void **ret)
+{
+	acpi_handle handle, parent;
+	acpi_status status;
+	struct acpi_buffer buffer;
+	struct acpi_device_info *info;
+	u64 lba_hpa, sba_hpa, length;
+	int match;
+
+	status = acpi_hp_csr_space(obj, &lba_hpa, &length);
+	if (ACPI_FAILURE(status))
+		return AE_OK;
+
+	/* Look for an enclosing IOC scope and find its CSR space */
+	handle = obj;
+	do {
+		buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+		status = acpi_get_object_info(handle, &buffer);
+		if (ACPI_SUCCESS(status)) {
+			/* TBD check _CID also */
+			info = buffer.pointer;
+			info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0';
+			match = (strcmp(info->hardware_id.value, "HWP0001") == 0);
+			ACPI_MEM_FREE(info);
+			if (match) {
+				status = acpi_hp_csr_space(handle, &sba_hpa, &length);
+				if (ACPI_SUCCESS(status))
+					break;
+				else {
+					printk(KERN_ERR PFX "Detected HP ZX1 "
+					       "AGP LBA but no IOC.\n");
+					return AE_OK;
+				}
+			}
+		}
+
+		status = acpi_get_parent(handle, &parent);
+		handle = parent;
+	} while (ACPI_SUCCESS(status));
+
+	if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
+		return AE_OK;
+
+	printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset (ioc=%lx, lba=%lx)\n",
+		(char *) context, sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
+
+	hp_zx1_gart_found = 1;
+	return AE_CTRL_TERMINATE;
+}
+
+static int __init
+hp_zx1_gart_init(void)
+{
+	acpi_get_devices("HWP0003", hp_zx1_gart_probe, "HWP0003", NULL);
+	if (hp_zx1_gart_found)
+		return 0;
+
+	acpi_get_devices("HWP0007", hp_zx1_gart_probe, "HWP0007", NULL);
+	if (hp_zx1_gart_found)
+		return 0;
+
+	return -ENODEV;
+}
+
+#endif /* CONFIG_AGP_HP_ZX1 */
+
+#ifdef CONFIG_AGP_ATI
+static aper_size_info_lvl2 ati_generic_sizes[7] =
+{
+	{2048, 524288, 0x0000000c},
+	{1024, 262144, 0x0000000a},
+	{512, 131072, 0x00000008},
+	{256, 65536, 0x00000006},
+	{128, 32768, 0x00000004},
+	{64, 16384, 0x00000002},
+	{32, 8192, 0x00000000}
+};
+
+static gatt_mask ati_generic_masks[] =
+{
+	{0x00000001, 0}
+};
+
+typedef struct _ati_page_map {
+	unsigned long *real;
+        unsigned long *remapped;
+} ati_page_map;
+
+static struct _ati_generic_private {
+	volatile u8 *registers;
+	ati_page_map **gatt_pages;
+	int num_tables;
+} ati_generic_private;
+
+static int ati_create_page_map(ati_page_map *page_map)
+{
+	int i, err = 0;
+
+	page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
+	if (page_map->real == NULL)
+		return -ENOMEM;
+
+	SetPageReserved(virt_to_page(page_map->real));
+	/*
+	 * fredi - WARNING: added looking at the changes during
+	 * 2.4.20. I dont know if it's needed though.
+	 */
+#ifdef CONFIG_X86
+	err = change_page_attr(virt_to_page(page_map->real), 1, PAGE_KERNEL_NOCACHE);
+#endif
+	CACHE_FLUSH();
+	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
+					    PAGE_SIZE);
+	if (page_map->remapped == NULL || err) {
+		ClearPageReserved(virt_to_page(page_map->real));
+		free_page((unsigned long) page_map->real);
+		page_map->real = NULL;
+		return -ENOMEM;
+	}
+	CACHE_FLUSH();
+
+	for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
+		page_map->remapped[i] = agp_bridge.scratch_page;
+
+	return 0;
+}
+
+static void ati_free_page_map(ati_page_map *page_map)
+{
+	/*
+	 * fredi - WARNING: added looking at the changes during
+	 * 2.4.20. I dont know if it's needed though.
+	 */
+#ifdef CONFIG_X86
+	change_page_attr(virt_to_page(page_map->real), 1, PAGE_KERNEL);
+#endif
+	iounmap(page_map->remapped);
+	ClearPageReserved(virt_to_page(page_map->real));
+	free_page((unsigned long) page_map->real);
+}
+
+static void ati_free_gatt_pages(void)
+{
+	int i;
+	ati_page_map **tables;
+	ati_page_map *entry;
+
+	tables = ati_generic_private.gatt_pages;
+	for(i = 0; i < ati_generic_private.num_tables; i++) {
+		entry = tables[i];
+		if (entry != NULL) {
+			if (entry->real != NULL)
+				ati_free_page_map(entry);
+			kfree(entry);
+		}
+	}
+	kfree(tables);
+}
+
+static int ati_create_gatt_pages(int nr_tables)
+{
+	ati_page_map **tables;
+	ati_page_map *entry;
+	int retval = 0;
+	int i;
+
+	tables = kmalloc((nr_tables + 1) * sizeof(ati_page_map *),
+			 GFP_KERNEL);
+	if (tables == NULL)
+		return -ENOMEM;
+
+	memset(tables, 0, sizeof(ati_page_map *) * (nr_tables + 1));
+	for (i = 0; i < nr_tables; i++) {
+		entry = kmalloc(sizeof(ati_page_map), GFP_KERNEL);
+		if (entry == NULL) {
+			retval = -ENOMEM;
+			break;
+		}
+		memset(entry, 0, sizeof(ati_page_map));
+		tables[i] = entry;
+		retval = ati_create_page_map(entry);
+		if (retval != 0) break;
+	}
+	ati_generic_private.num_tables = nr_tables;
+	ati_generic_private.gatt_pages = tables;
+
+	if (retval != 0) ati_free_gatt_pages();
+
+	return retval;
+}
+
+/*
+ *Since we don't need contigious memory we just try
+ * to get the gatt table once
+ */
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
+	GET_PAGE_DIR_OFF(agp_bridge.gart_bus_addr))
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#undef  GET_GATT(addr)
+#define GET_GATT(addr) (ati_generic_private.gatt_pages[\
+	GET_PAGE_DIR_IDX(addr)]->remapped)
+
+static int ati_insert_memory(agp_memory * mem,
+			     off_t pg_start, int type)
+{
+	int i, j, num_entries;
+	unsigned long *cur_gatt;
+	unsigned long addr;
+
+	num_entries = A_SIZE_LVL2(agp_bridge.current_size)->num_entries;
+
+	if (type != 0 || mem->type != 0)
+		return -EINVAL;
+
+	if ((pg_start + mem->page_count) > num_entries)
+		return -EINVAL;
+
+	j = pg_start;
+	while (j < (pg_start + mem->page_count)) {
+		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		if (!PGE_EMPTY(cur_gatt[GET_GATT_OFF(addr)]))
+			return -EBUSY;
+		j++;
+	}
+
+	if (mem->is_flushed == FALSE) {
+		CACHE_FLUSH();
+		mem->is_flushed = TRUE;
+	}
+
+	for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+		addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		cur_gatt[GET_GATT_OFF(addr)] =
+			agp_bridge.mask_memory(mem->memory[i], mem->type);
 	}
-
+	agp_bridge.tlb_flush(mem);
 	return 0;
 }
 
-static void hp_zx1_cleanup(void)
+static int ati_remove_memory(agp_memory * mem, off_t pg_start,
+			     int type)
 {
-	struct _hp_private *hp = &hp_private;
-
-	if (hp->io_pdir_owner)
-		OUTREG64(hp->registers, HP_ZX1_IBASE, 0);
-	iounmap((void *) hp->registers);
-}
+	int i;
+	unsigned long *cur_gatt;
+	unsigned long addr;
 
-static void hp_zx1_tlbflush(agp_memory * mem)
-{
-	struct _hp_private *hp = &hp_private;
+	if (type != 0 || mem->type != 0) {
+		return -EINVAL;
+	}
+	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+		addr = (i * PAGE_SIZE) + agp_bridge.gart_bus_addr;
+		cur_gatt = GET_GATT(addr);
+		cur_gatt[GET_GATT_OFF(addr)] =
+			(unsigned long) agp_bridge.scratch_page;
+	}
 
-	OUTREG64(hp->registers, HP_ZX1_PCOM, 
-		hp->gart_base | log2(hp->gart_size));
-	INREG64(hp->registers, HP_ZX1_PCOM);
+	agp_bridge.tlb_flush(mem);
+	return 0;
 }
 
-static int hp_zx1_create_gatt_table(void)
+static int ati_create_gatt_table(void)
 {
-	struct _hp_private *hp = &hp_private;
+	aper_size_info_lvl2 *value;
+	ati_page_map page_dir;
+	unsigned long addr;
+	int retval;
+	u32 temp;
 	int i;
+	aper_size_info_lvl2 *current_size;
 
-	if (hp->io_pdir_owner) {
-		hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
-						get_order(hp->io_pdir_size));
-		if (!hp->io_pdir) {
-			printk(KERN_ERR PFX "Couldn't allocate contiguous "
-				"memory for I/O PDIR\n");
-			hp->gatt = 0;
-			hp->gatt_entries = 0;
-			return -ENOMEM;
-		}
-		memset(hp->io_pdir, 0, hp->io_pdir_size);
+	value = A_SIZE_LVL2(agp_bridge.current_size);
+	retval = ati_create_page_map(&page_dir);
+	if (retval != 0)
+		return retval;
 
-		hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
+	retval = ati_create_gatt_pages(value->num_entries / 1024);
+	if (retval != 0) {
+		ati_free_page_map(&page_dir);
+		return retval;
 	}
 
-	for (i = 0; i < hp->gatt_entries; i++) {
-		hp->gatt[i] = (unsigned long) agp_bridge.scratch_page;
+	agp_bridge.gatt_table_real = (u32 *)page_dir.real;
+	agp_bridge.gatt_table = (u32 *)page_dir.remapped;
+	agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real);
+
+	/* Write out the size register */
+	current_size = A_SIZE_LVL2(agp_bridge.current_size);
+
+	if ((agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS100) ||
+	    (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS200) ||
+	    (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS250)) {
+		pci_read_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, &temp);
+		temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+			| 0x00000001);
+		pci_write_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, temp);
+		pci_read_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, &temp);
+	} else {
+		pci_read_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, &temp);
+		temp = (((temp & ~(0x0000000e)) | current_size->size_value)
+			| 0x00000001);
+		pci_write_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, temp);
+		pci_read_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, &temp);
+	}
+
+	/*
+	 * Get the address for the gart region.
+	 * This is a bus address even on the alpha, b/c its
+	 * used to program the agp master not the cpu
+	 */
+	pci_read_config_dword(agp_bridge.dev, ATI_APBASE, &temp);
+	addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	agp_bridge.gart_bus_addr = addr;
+
+	/* Calculate the agp offset */
+	for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
+		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] =
+			virt_to_bus(ati_generic_private.gatt_pages[i]->real);
+		page_dir.remapped[GET_PAGE_DIR_OFF(addr)] |= 0x00000001;
 	}
 
 	return 0;
 }
 
-static int hp_zx1_free_gatt_table(void)
+static int ati_free_gatt_table(void)
 {
-	struct _hp_private *hp = &hp_private;
-	
-	if (hp->io_pdir_owner)
-		free_pages((unsigned long) hp->io_pdir,
-			    get_order(hp->io_pdir_size));
-	else
-		hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
+	ati_page_map page_dir;
+
+	page_dir.real = (unsigned long *)agp_bridge.gatt_table_real;
+	page_dir.remapped = (unsigned long *)agp_bridge.gatt_table;
+
+	ati_free_gatt_pages();
+	ati_free_page_map(&page_dir);
 	return 0;
 }
 
-static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
+static int ati_fetch_size(void)
 {
-	struct _hp_private *hp = &hp_private;
-	int i, k;
-	off_t j, io_pg_start;
-	int io_pg_count;
+	int i;
+	u32 temp;
+	aper_size_info_lvl2 *values;
 
-	if (type != 0 || mem->type != 0) {
-		return -EINVAL;
+	if ((agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS100) ||
+	    (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS200) ||
+	    (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS250)) {
+		pci_read_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, &temp);
+	} else {
+		pci_read_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, &temp);
 	}
 
-	io_pg_start = hp->io_pages_per_kpage * pg_start;
-	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
-	if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
-		return -EINVAL;
-	}
+	temp = (temp & 0x0000000e);
+	values = A_SIZE_LVL2(agp_bridge.aperture_sizes);
+	for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
+		if (temp == values[i].size_value) {
+			agp_bridge.previous_size =
+			    agp_bridge.current_size = (void *) (values + i);
 
-	j = io_pg_start;
-	while (j < (io_pg_start + io_pg_count)) {
-		if (hp->gatt[j]) {
-			return -EBUSY;
+			agp_bridge.aperture_size_idx = i;
+			return values[i].size;
 		}
-		j++;
 	}
 
-	if (mem->is_flushed == FALSE) {
-		CACHE_FLUSH();
-		mem->is_flushed = TRUE;
-	}
+	return 0;
+}
 
-	for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
-		unsigned long paddr;
+static int ati_configure(void)
+{
+	u32 temp;
 
-		paddr = mem->memory[i];
-		for (k = 0;
-		     k < hp->io_pages_per_kpage;
-		     k++, j++, paddr += hp->io_page_size) {
-			hp->gatt[j] = agp_bridge.mask_memory(paddr, type);
-		}
+	/* Get the memory mapped registers */
+	pci_read_config_dword(agp_bridge.dev, ATI_GART_MMBASE_ADDR, &temp);
+	temp = (temp & 0xfffff000);
+	ati_generic_private.registers = (volatile u8 *) ioremap(temp, 4096);
+
+	if ((agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS100) ||
+	    (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS200) ||
+	    (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS250)) {
+        	pci_write_config_dword(agp_bridge.dev, ATI_RS100_IG_AGPMODE, 0x20000);
+	} else {
+		pci_write_config_dword(agp_bridge.dev, ATI_RS300_IG_AGPMODE, 0x20000);
 	}
 
-	agp_bridge.tlb_flush(mem);
+	/* address to map too */
+        /*
+	pci_read_config_dword(agp_bridge.dev, ATI_APBASE, &temp);
+	agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
+	printk(KERN_INFO "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
+        */
+	OUTREG32(ati_generic_private.registers, ATI_GART_FEATURE_ID, 0x60000);
+
+	/* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
+	pci_read_config_dword(agp_bridge.dev, 4, &temp);
+	pci_write_config_dword(agp_bridge.dev, 4, temp | (1<<14));
+
+	/* Write out the address of the gatt table */
+	OUTREG32(ati_generic_private.registers, ATI_GART_BASE,
+		 agp_bridge.gatt_bus_addr);
+
+	/* Flush the tlb */
+	OUTREG32(ati_generic_private.registers, ATI_GART_CACHE_CNTRL, 1);
 	return 0;
 }
 
-static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type)
+static void ati_cleanup(void)
 {
-	struct _hp_private *hp = &hp_private;
-	int i, io_pg_start, io_pg_count;
+	aper_size_info_lvl2 *previous_size;
+	u32 temp;
 
-	if (type != 0 || mem->type != 0) {
-		return -EINVAL;
-	}
+	previous_size = A_SIZE_LVL2(agp_bridge.previous_size);
 
-	io_pg_start = hp->io_pages_per_kpage * pg_start;
-	io_pg_count = hp->io_pages_per_kpage * mem->page_count;
-	for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
-		hp->gatt[i] = agp_bridge.scratch_page;
+	/* Write back the previous size and disable gart translation */
+	if ((agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS100) ||
+	    (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS200) ||
+	    (agp_bridge.dev->device == PCI_DEVICE_ID_ATI_RS250)) {
+		pci_read_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, &temp);
+		temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+		pci_write_config_dword(agp_bridge.dev, ATI_RS100_APSIZE, temp);
+	} else {
+		pci_read_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, &temp);
+		temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
+		pci_write_config_dword(agp_bridge.dev, ATI_RS300_APSIZE, temp);
 	}
-
-	agp_bridge.tlb_flush(mem);
-	return 0;
+	iounmap((void *) ati_generic_private.registers);
 }
 
-static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
+static void ati_tlbflush(agp_memory * mem)
 {
-	return HP_ZX1_PDIR_VALID_BIT | addr;
+	OUTREG32(ati_generic_private.registers, ATI_GART_CACHE_CNTRL, 1);
 }
 
-static unsigned long hp_zx1_unmask_memory(unsigned long addr)
+static unsigned long ati_mask_memory(unsigned long addr, int type)
 {
-	return addr & ~(HP_ZX1_PDIR_VALID_BIT);
+	/* Memory type is ignored */
+	return addr | agp_bridge.masks[0].mask;
 }
 
-static int __init hp_zx1_setup (struct pci_dev *pdev)
+static int __init ati_generic_setup (struct pci_dev *pdev)
 {
-	agp_bridge.masks = hp_zx1_masks;
-	agp_bridge.dev_private_data = NULL;
-	agp_bridge.size_type = FIXED_APER_SIZE;
+	agp_bridge.masks = ati_generic_masks;
+	agp_bridge.aperture_sizes = (void *) ati_generic_sizes;
+	agp_bridge.size_type = LVL2_APER_SIZE;
+	agp_bridge.num_aperture_sizes = 7;
+	agp_bridge.dev_private_data = (void *) &ati_generic_private;
 	agp_bridge.needs_scratch_page = FALSE;
-	agp_bridge.configure = hp_zx1_configure;
-	agp_bridge.fetch_size = hp_zx1_fetch_size;
-	agp_bridge.cleanup = hp_zx1_cleanup;
-	agp_bridge.tlb_flush = hp_zx1_tlbflush;
-	agp_bridge.mask_memory = hp_zx1_mask_memory;
-	agp_bridge.unmask_memory = hp_zx1_unmask_memory;
+	agp_bridge.configure = ati_configure;
+	agp_bridge.fetch_size = ati_fetch_size;
+	agp_bridge.cleanup = ati_cleanup;
+	agp_bridge.tlb_flush = ati_tlbflush;
+	agp_bridge.mask_memory = ati_mask_memory;
 	agp_bridge.agp_enable = agp_generic_agp_enable;
 	agp_bridge.cache_flush = global_cache_flush;
-	agp_bridge.create_gatt_table = hp_zx1_create_gatt_table;
-	agp_bridge.free_gatt_table = hp_zx1_free_gatt_table;
-	agp_bridge.insert_memory = hp_zx1_insert_memory;
-	agp_bridge.remove_memory = hp_zx1_remove_memory;
+	agp_bridge.create_gatt_table = ati_create_gatt_table;
+	agp_bridge.free_gatt_table = ati_free_gatt_table;
+	agp_bridge.insert_memory = ati_insert_memory;
+	agp_bridge.remove_memory = ati_remove_memory;
 	agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
 	agp_bridge.free_by_type = agp_generic_free_by_type;
 	agp_bridge.agp_alloc_page = agp_generic_alloc_page;
 	agp_bridge.agp_destroy_page = agp_generic_destroy_page;
-	agp_bridge.cant_use_aperture = 1;
+	agp_bridge.suspend = agp_generic_suspend;
+	agp_bridge.resume = agp_generic_resume;
+	agp_bridge.cant_use_aperture = 0;
 
-	return hp_zx1_ioc_init();
+	return 0;
 
 	(void) pdev; /* unused */
 }
-
-#endif /* CONFIG_AGP_HP_ZX1 */
+#endif /* CONFIG_AGP_ATI */
 
 /* per-chipset initialization data.
  * note -- all chipsets for a single vendor MUST be grouped together
@@ -4829,6 +6010,20 @@
 		"AMD",
 		"761",
 		amd_irongate_setup },
+#endif
+#ifdef CONFIG_AGP_AMD_K8
+	{ PCI_DEVICE_ID_AMD_8151_0,
+		PCI_VENDOR_ID_AMD,
+		AMD_8151,
+		"AMD",
+		"On-CPU GART",
+		amd_8151_setup },
+
+	/* Note: when adding more PCI-IDs for 8151 compatible bridges
+	   add them to the end of their vendor lists, not here.
+	   This list has to be ordered by vendor. */
+#endif /* CONFIG_AGP_AMD */
+#ifdef CONFIG_AGP_AMD
 	{ 0,
 		PCI_VENDOR_ID_AMD,
 		AMD_GENERIC,
@@ -4837,15 +6032,6 @@
 		amd_irongate_setup },
 #endif /* CONFIG_AGP_AMD */
 
-#ifdef CONFIG_AGP_AMD_8151
-	{ PCI_DEVICE_ID_AMD_8151_0,
-		PCI_VENDOR_ID_AMD,
-		AMD_8151,
-		"AMD",
-		"8151",
-		amd_8151_setup },
-#endif /* CONFIG_AGP_AMD */
-
 #ifdef CONFIG_AGP_INTEL
 	{ PCI_DEVICE_ID_INTEL_82443LX_0,
 		PCI_VENDOR_ID_INTEL,
@@ -4942,6 +6128,18 @@
 		"Intel",
 		"i860",
 		intel_860_setup },
+	{ PCI_DEVICE_ID_INTEL_7205_0,
+		PCI_VENDOR_ID_INTEL,
+		INTEL_I7205,
+		"Intel",
+		"i7205",
+		intel_7505_setup },
+	{ PCI_DEVICE_ID_INTEL_7505_0,
+		PCI_VENDOR_ID_INTEL,
+		INTEL_I7505,
+		"Intel",
+		"i7505",
+		intel_7505_setup },
 	{ 0,
 		PCI_VENDOR_ID_INTEL,
 		INTEL_GENERIC,
@@ -4951,6 +6149,15 @@
 
 #endif /* CONFIG_AGP_INTEL */
 
+#ifdef CONFIG_AGP_I460
+	{ PCI_DEVICE_ID_INTEL_460GX,
+		PCI_VENDOR_ID_INTEL,
+		INTEL_460GX,
+		"Intel",
+		"460GX",
+		intel_i460_setup },
+#endif
+
 #ifdef CONFIG_AGP_SIS
 	{ PCI_DEVICE_ID_SI_740,
 		PCI_VENDOR_ID_SI,
@@ -5048,6 +6255,16 @@
 		"SiS",
                 "550",
 		sis_generic_setup },
+#endif
+#ifdef CONFIG_AGP_AMD_K8
+	{ PCI_DEVICE_ID_SI_755,
+		PCI_VENDOR_ID_SI,
+		AMD_8151,
+		"AMD",
+		"On-CPU GART",
+		amd_8151_setup },
+#endif
+#ifdef CONFIG_AGP_SIS
 	{ 0,
 		PCI_VENDOR_ID_SI,
 		SIS_GENERIC,
@@ -5123,12 +6340,36 @@
 		"Via",
 		"Apollo Pro KT400",
 		via_generic_setup },
+        { PCI_DEVICE_ID_VIA_CLE266,
+		PCI_VENDOR_ID_VIA,
+		VIA_CLE266,
+		"Via",
+		"CLE266",
+		via_generic_setup },
 	{ PCI_DEVICE_ID_VIA_P4M266,
 		PCI_VENDOR_ID_VIA,
 		VIA_APOLLO_P4M266,
 		"Via",
 		"Apollo P4M266",
 		via_generic_setup },
+#endif
+#ifdef CONFIG_AGP_AMD_K8
+ 	{ PCI_DEVICE_ID_VIA_8380_0,
+ 		PCI_VENDOR_ID_VIA,
+ 		AMD_8151,
+ 		"AMD",
+ 		"On-CPU GART",
+ 		amd_8151_setup },
+
+ 	/* VIA K8T800 */
+ 	{ PCI_DEVICE_ID_VIA_8385_0,
+  		PCI_VENDOR_ID_VIA,
+  		AMD_8151,
+  		"AMD",
+  		"On-CPU GART",
+ 		amd_8151_setup },		
+#endif
+#ifdef CONFIG_AGP_VIA
 	{ 0,
 		PCI_VENDOR_ID_VIA,
 		VIA_GENERIC,
@@ -5150,6 +6391,22 @@
 		"NVIDIA",
 		"nForce2",
 		nvidia_generic_setup },
+#endif
+#ifdef CONFIG_AGP_AMD_K8
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE3,
+		PCI_VENDOR_ID_NVIDIA,
+		NVIDIA_NFORCE3,
+		"NVIDIA",
+		"nForce3/K8 On-CPU GART",
+		nvidia_x86_64_setup },
+	{ PCI_DEVICE_ID_NVIDIA_NFORCE3S,
+	  PCI_VENDOR_ID_NVIDIA,
+	  NVIDIA_NFORCE3,
+	  "NVIDIA",
+	  "nForce3S/K8 On-CPU GART",
+	  nvidia_x86_64_setup },
+#endif
+#ifdef CONFIG_AGP_NVIDIA
 	{ 0,
 		PCI_VENDOR_ID_NVIDIA,
 		NVIDIA_GENERIC,
@@ -5158,14 +6415,50 @@
 		nvidia_generic_setup },
 #endif /* CONFIG_AGP_NVIDIA */
 
-#ifdef CONFIG_AGP_HP_ZX1
-	{ PCI_DEVICE_ID_HP_ZX1_LBA,
-		PCI_VENDOR_ID_HP,
-		HP_ZX1,
-		"HP",
-		"ZX1",
-		hp_zx1_setup },
-#endif
+#ifdef CONFIG_AGP_ATI
+	{ PCI_DEVICE_ID_ATI_RS100,
+	  PCI_VENDOR_ID_ATI,
+	  ATI_RS100,
+	  "ATI",
+	  "IGP320/M",
+	  ati_generic_setup },
+	{ PCI_DEVICE_ID_ATI_RS200,
+	  PCI_VENDOR_ID_ATI,
+	  ATI_RS200,
+	  "ATI",
+	  "IGP330/340/345/350/M",
+	  ati_generic_setup },
+	{ PCI_DEVICE_ID_ATI_RS250,
+	  PCI_VENDOR_ID_ATI,
+	  ATI_RS250,
+	  "ATI",
+	  "IGP7000/M",
+	  ati_generic_setup },
+	{ PCI_DEVICE_ID_ATI_RS300_100,
+	  PCI_VENDOR_ID_ATI,
+	  ATI_RS300_100,
+	  "ATI",
+	  "IGP9100/M",
+	  ati_generic_setup },
+	{ PCI_DEVICE_ID_ATI_RS300_133,
+	  PCI_VENDOR_ID_ATI,
+	  ATI_RS300_133,
+	  "ATI",
+	  "IGP9100/M",
+	  ati_generic_setup },
+	{ PCI_DEVICE_ID_ATI_RS300_166,
+	  PCI_VENDOR_ID_ATI,
+	  ATI_RS300_166,
+	  "ATI",
+	  "IGP9100/M",
+	  ati_generic_setup },
+	{ PCI_DEVICE_ID_ATI_RS300_200,
+	  PCI_VENDOR_ID_ATI,
+	  ATI_RS300_200,
+	  "ATI",
+	  "IGP9100/M",
+	  ati_generic_setup },
+#endif /* CONFIG_AGP_ATI */
 
 	{ 0, }, /* dummy final entry, always present */
 };
@@ -5240,7 +6533,11 @@
 	}
 
 	printk(KERN_ERR PFX "Unsupported %s chipset (device id: %04x),"
+#ifdef MODULE
 	       " you might want to try agp_try_unsupported=1.\n",
+#else
+	       " you might want to boot with agp=try_unsupported\n",
+#endif
 	       agp_bridge_info[i].vendor_name, pdev->device);
 	return -ENODEV;
 }
@@ -5251,13 +6548,52 @@
 static int __init agp_find_supported_device(void)
 {
 	struct pci_dev *dev = NULL;
-	u8 cap_ptr = 0x00;
+	int ret = -ENODEV;
 
-	if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL)
-		return -ENODEV;
+#ifdef CONFIG_AGP_HP_ZX1
+	if (hp_zx1_gart_init() == 0)
+		return 0;
+#endif
+
+	while ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev)) != NULL) {
+		ret = agp_init_one(dev);
+		if (ret != -ENODEV)
+			break;
+	} 
+	return ret;
+}	
+
+static int __init agp_init_one(struct pci_dev *dev)
+{
+	u8 cap_ptr = 0x00;
 
 	agp_bridge.dev = dev;
 
+#ifdef CONFIG_AGP_AMD_K8
+	/* If there is any K8 northbridge in the system always use the K8 driver */
+	if (agp_try_unsupported
+	    && pci_find_device(PCI_VENDOR_ID_AMD, 0x1103, NULL)
+	    && !pci_find_device(PCI_VENDOR_ID_NVIDIA, 
+				PCI_DEVICE_ID_NVIDIA_NFORCE3,
+				NULL)) { 
+
+		/* find capndx */
+		cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP);
+		if (cap_ptr == 0x00)
+			return -ENODEV;
+		agp_bridge.capndx = cap_ptr;
+		
+		/* Fill in the mode register */
+		pci_read_config_dword(agp_bridge.dev,
+				      agp_bridge.capndx + 4,
+				      &agp_bridge.mode);
+		
+		printk(KERN_INFO PFX "Detected GART in AMD K8 Northbridge\n"); 
+		agp_bridge.type = AMD_8151; 
+		return amd_8151_setup(dev);
+	}					
+#endif
+
 	/* Need to test for I810 here */
 #ifdef CONFIG_AGP_I810
 	if (dev->vendor == PCI_VENDOR_ID_INTEL) {
@@ -5526,23 +6862,6 @@
 
 #endif	/* CONFIG_AGP_SWORKS */
 
-#ifdef CONFIG_AGP_HP_ZX1
-	if (dev->vendor == PCI_VENDOR_ID_HP) {
-		do {
-			/* ZX1 LBAs can be either PCI or AGP bridges */
-			if (pci_find_capability(dev, PCI_CAP_ID_AGP)) {
-				printk(KERN_INFO PFX "Detected HP ZX1 AGP "
-				       "chipset at %s\n", dev->slot_name);
-				agp_bridge.type = HP_ZX1;
-				agp_bridge.dev = dev;
-				return hp_zx1_setup(dev);
-			}
-			dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev);
-		} while (dev);
-		return -ENODEV;
-	}
-#endif	/* CONFIG_AGP_HP_ZX1 */
-
 	/* find capndx */
 	cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP);
 	if (cap_ptr == 0x00)
@@ -5732,10 +7051,26 @@
 	&agp_copy_info
 };
 
+static int __initdata agp_off = 0; 
+int __init agp_setup(char *s) 
+{ 
+	if (!strcmp(s,"off"))
+		agp_off = 1;
+	if (!strcmp(s,"try_unsupported"))
+		agp_try_unsupported = 1;
+	return 0;		
+} 
+__setup("agp=", agp_setup); 
+
 int __init agp_init(void)
 {
 	int ret_val;
 
+	if (agp_off) { 
+		printk("AGP disabled\n"); 
+		return -1;
+	} 
+
 	printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Jeff Hartmann\n",
 	       AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)