patch-2.3.99-pre2 linux/arch/alpha/kernel/core_cia.c

Next file: linux/arch/alpha/kernel/core_pyxis.c
Previous file: linux/arch/alpha/kernel/alpha_ksyms.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.99-pre1/linux/arch/alpha/kernel/core_cia.c linux/arch/alpha/kernel/core_cia.c
@@ -6,7 +6,7 @@
  *
  *	Copyright (C) 1995  David A Rusling
  *	Copyright (C) 1997, 1998  Jay Estabrook
- *	Copyright (C) 1998, 1999  Richard Henderson
+ *	Copyright (C) 1998, 1999, 2000  Richard Henderson
  *
  * Code common to all CIA core logic chips.
  */
@@ -25,6 +25,8 @@
 #include <asm/core_cia.h>
 #undef __EXTERN_INLINE
 
+#include <linux/bootmem.h>
+
 #include "proto.h"
 #include "pci_impl.h"
 
@@ -35,27 +37,20 @@
  * handle the system transaction.  Another involves timing.  Ho hum.
  */
 
-/*
- * BIOS32-style PCI interface:
- */
-
 #define DEBUG_CONFIG 0
-#define DEBUG_DUMP_REGS 0
-
 #if DEBUG_CONFIG
 # define DBGC(args)	printk args
 #else
 # define DBGC(args)
 #endif
 
-#define vuip	volatile unsigned int  *
+#define vip	volatile int  *
 
 /*
  * Given a bus, device, and function number, compute resulting
- * configuration space address and setup the CIA_HAXR2 register
- * accordingly.  It is therefore not safe to have concurrent
- * invocations to configuration space access routines, but there
- * really shouldn't be any need for this.
+ * configuration space address.  It is therefore not safe to have
+ * concurrent invocations to configuration space access routines, but
+ * there really shouldn't be any need for this.
  *
  * Type 0:
  *
@@ -96,34 +91,16 @@
 mk_conf_addr(struct pci_dev *dev, int where, unsigned long *pci_addr,
 	     unsigned char *type1)
 {
-	unsigned long addr;
 	u8 bus = dev->bus->number;
 	u8 device_fn = dev->devfn;
 
-	DBGC(("mk_conf_addr(bus=%d, device_fn=0x%x, where=0x%x, "
-	      "pci_addr=0x%p, type1=0x%p)\n",
-	      bus, device_fn, where, pci_addr, type1));
-
-	if (bus == 0) {
-		int device = device_fn >> 3;
-
-		/* Type 0 configuration cycle.  */
-
-		if (device > 20) {
-			DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n",
-			      device));
-			return -1;
-		}
+	*type1 = (bus != 0);
+	*pci_addr = (bus << 16) | (device_fn << 8) | where;
+
+	DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
+	      " returning address 0x%p\n"
+	      bus, device_fn, where, *pci_addr));
 
-		*type1 = 0;
-		addr = (device_fn << 8) | (where);
-	} else {
-		/* Type 1 configuration cycle.  */
-		*type1 = 1;
-		addr = (bus << 16) | (device_fn << 8) | (where);
-	}
-	*pci_addr = addr;
-	DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
 	return 0;
 }
 
@@ -131,43 +108,37 @@
 conf_read(unsigned long addr, unsigned char type1)
 {
 	unsigned long flags;
-	unsigned int stat0, value;
-	unsigned int cia_cfg = 0;
+	int stat0, value;
+	int cia_cfg = 0;
 
-	value = 0xffffffffU;
-	mb();
-
-	__save_and_cli(flags);	/* avoid getting hit by machine check */
-
-	DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
+	DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
+	__save_and_cli(flags);
 
 	/* Reset status register to avoid losing errors.  */
-	stat0 = *(vuip)CIA_IOC_CIA_ERR;
-	*(vuip)CIA_IOC_CIA_ERR = stat0;
+	stat0 = *(vip)CIA_IOC_CIA_ERR;
+	*(vip)CIA_IOC_CIA_ERR = stat0;
 	mb();
-	DBGC(("conf_read: CIA ERR was 0x%x\n", stat0));
 
 	/* If Type1 access, must set CIA CFG. */
 	if (type1) {
-		cia_cfg = *(vuip)CIA_IOC_CFG;
-		*(vuip)CIA_IOC_CFG = cia_cfg | 1;
+		cia_cfg = *(vip)CIA_IOC_CFG;
+		*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
 		mb();
-		DBGC(("conf_read: TYPE1 access\n"));
+		*(vip)CIA_IOC_CFG;
 	}
 
-	mb();
 	draina();
 	mcheck_expected(0) = 1;
 	mcheck_taken(0) = 0;
 	mb();
 
 	/* Access configuration space.  */
-	value = *(vuip)addr;
+	value = *(vip)addr;
 	mb();
 	mb();  /* magic */
 	if (mcheck_taken(0)) {
 		mcheck_taken(0) = 0;
-		value = 0xffffffffU;
+		value = 0xffffffff;
 		mb();
 	}
 	mcheck_expected(0) = 0;
@@ -175,13 +146,14 @@
 
 	/* If Type1 access, must reset IOC CFG so normal IO space ops work.  */
 	if (type1) {
-		*(vuip)CIA_IOC_CFG = cia_cfg & ~1;
+		*(vip)CIA_IOC_CFG = cia_cfg;
 		mb();
+		*(vip)CIA_IOC_CFG;
 	}
 
-	DBGC(("conf_read(): finished\n"));
-
 	__restore_flags(flags);
+	DBGC(("done\n"));
+
 	return value;
 }
 
@@ -189,31 +161,31 @@
 conf_write(unsigned long addr, unsigned int value, unsigned char type1)
 {
 	unsigned long flags;
-	unsigned int stat0;
-	unsigned int cia_cfg = 0;
+	int stat0, cia_cfg = 0;
 
-	__save_and_cli(flags);	/* avoid getting hit by machine check */
+	DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
+	__save_and_cli(flags);
 
 	/* Reset status register to avoid losing errors.  */
-	stat0 = *(vuip)CIA_IOC_CIA_ERR;
-	*(vuip)CIA_IOC_CIA_ERR = stat0;
+	stat0 = *(vip)CIA_IOC_CIA_ERR;
+	*(vip)CIA_IOC_CIA_ERR = stat0;
 	mb();
-	DBGC(("conf_write: CIA ERR was 0x%x\n", stat0));
 
 	/* If Type1 access, must set CIA CFG.  */
 	if (type1) {
-		cia_cfg = *(vuip)CIA_IOC_CFG;
-		*(vuip)CIA_IOC_CFG = cia_cfg | 1;
+		cia_cfg = *(vip)CIA_IOC_CFG;
+		*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
 		mb();
-		DBGC(("conf_write: TYPE1 access\n"));
+		*(vip)CIA_IOC_CFG;
 	}
 
 	draina();
 	mcheck_expected(0) = 1;
+	mcheck_taken(0) = 0;
 	mb();
 
 	/* Access configuration space.  */
-	*(vuip)addr = value;
+	*(vip)addr = value;
 	mb();
 	mb();  /* magic */
 
@@ -222,12 +194,13 @@
 
 	/* If Type1 access, must reset IOC CFG so normal IO space ops work.  */
 	if (type1) {
-		*(vuip)CIA_IOC_CFG = cia_cfg & ~1;
+		*(vip)CIA_IOC_CFG = cia_cfg;
 		mb();
+		*(vip)CIA_IOC_CFG;
 	}
 
-	DBGC(("conf_write(): finished\n"));
 	__restore_flags(flags);
+	DBGC(("done\n"));
 }
 
 static int
@@ -314,158 +287,442 @@
 	write_dword:	cia_write_config_dword
 };
 
+/*
+ * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
+ * It cannot be invalidated.  Rather than hard code the pass numbers,
+ * actually try the tbia to see if it works.
+ */
+
 void
 cia_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end)
 {
 	wmb();
 	*(vip)CIA_IOC_PCI_TBIA = 3;	/* Flush all locked and unlocked.  */
 	mb();
+	*(vip)CIA_IOC_PCI_TBIA;
+}
+
+/*
+ * Fixup attempt number 1.
+ *
+ * Write zeros directly into the tag registers.
+ */
+
+static void
+cia_pci_tbi_try1(struct pci_controler *hose,
+		 dma_addr_t start, dma_addr_t end)
+{
+	wmb();
+	*(vip)CIA_IOC_TB_TAGn(0) = 0;
+	*(vip)CIA_IOC_TB_TAGn(1) = 0;
+	*(vip)CIA_IOC_TB_TAGn(2) = 0;
+	*(vip)CIA_IOC_TB_TAGn(3) = 0;
+	*(vip)CIA_IOC_TB_TAGn(4) = 0;
+	*(vip)CIA_IOC_TB_TAGn(5) = 0;
+	*(vip)CIA_IOC_TB_TAGn(6) = 0;
+	*(vip)CIA_IOC_TB_TAGn(7) = 0;
+	mb();
+	*(vip)CIA_IOC_TB_TAGn(0);
+}
+
+#if 0
+/*
+ * Fixup attempt number 2.  This is the method NT and NetBSD use.
+ *
+ * Allocate mappings, and put the chip into DMA loopback mode to read a
+ * garbage page.  This works by causing TLB misses, causing old entries to
+ * be purged to make room for the new entries coming in for the garbage page.
+ */
+
+#define CIA_BROKEN_TBI_TRY2_BASE	0xE0000000
+
+static void __init
+cia_enable_broken_tbi_try2(void)
+{
+	unsigned long *ppte, pte;
+	long i;
+
+	ppte = __alloc_bootmem(PAGE_SIZE, 32768, 0);
+	pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
+
+	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); ++i)
+		ppte[i] = pte;
+
+	*(vip)CIA_IOC_PCI_W3_BASE = CIA_BROKEN_TBI_TRY2_BASE | 3;
+	*(vip)CIA_IOC_PCI_W3_MASK = (PAGE_SIZE - 1) & 0xfff00000;
+	*(vip)CIA_IOC_PCI_T3_BASE = virt_to_phys(ppte) >> 2;
+}
+
+static void
+cia_pci_tbi_try2(struct pci_controler *hose,
+		 dma_addr_t start, dma_addr_t end)
+{
+	unsigned long flags;
+	unsigned long bus_addr;
+	int ctrl;
+	long i;
+
+	__save_and_cli(flags);
+
+	/* Put the chip into PCI loopback mode.  */
+	mb();
+	ctrl = *(vip)CIA_IOC_CIA_CTRL;
+	*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
+	mb();
+	*(vip)CIA_IOC_CIA_CTRL;
+	mb();
+
+	/* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
+	   each read.  This forces SG TLB misses.  NetBSD claims that the
+	   TLB entries are not quite LRU, meaning that we need to read more
+	   times than there are actual tags.  The 2117x docs claim strict
+	   round-robin.  Oh well, we've come this far...  */
+
+	bus_addr = cia_ioremap(CIA_BROKEN_TBI_TRY2_BASE);
+	for (i = 0; i < 12; ++i, bus_addr += 32768)
+		cia_readl(bus_addr);
+
+	/* Restore normal PCI operation.  */
+	mb();
+	*(vip)CIA_IOC_CIA_CTRL = ctrl;
+	mb();
+	*(vip)CIA_IOC_CIA_CTRL;
+	mb();
+
+	__restore_flags(flags);
+}
+#endif
+
+static void __init
+verify_tb_operation(void)
+{
+	static int page[PAGE_SIZE/4]
+		__attribute__((aligned(PAGE_SIZE)))
+		__initlocaldata = { 0 };
+
+	struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
+	int ctrl, addr0, tag0, pte0, data0;
+	int temp;
+
+	/* Put the chip into PCI loopback mode.  */
+	mb();
+	ctrl = *(vip)CIA_IOC_CIA_CTRL;
+	*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
+	mb();
+	*(vip)CIA_IOC_CIA_CTRL;
+	mb();
+
+	/* Write a valid entry directly into the TLB registers.  */
+
+	addr0 = arena->dma_base;
+	tag0 = addr0 | 1;
+	pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
+
+	*(vip)CIA_IOC_TB_TAGn(0) = tag0;
+	*(vip)CIA_IOC_TB_TAGn(1) = 0;
+	*(vip)CIA_IOC_TB_TAGn(2) = 0;
+	*(vip)CIA_IOC_TB_TAGn(3) = 0;
+	*(vip)CIA_IOC_TB_TAGn(4) = 0;
+	*(vip)CIA_IOC_TB_TAGn(5) = 0;
+	*(vip)CIA_IOC_TB_TAGn(6) = 0;
+	*(vip)CIA_IOC_TB_TAGn(7) = 0;
+	*(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
+	*(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
+	*(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
+	*(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
+	mb();
+
+	/* First, verify we can read back what we've written.  If
+	   this fails, we can't be sure of any of the other testing
+	   we're going to do, so bail.  */
+	/* ??? Actually, we could do the work with machine checks.
+	   By passing this register update test, we pretty much
+	   guarantee that cia_pci_tbi_try1 works.  If this test
+	   fails, cia_pci_tbi_try2 might still work.  */
+
+	temp = *(vip)CIA_IOC_TB_TAGn(0);
+	if (temp != tag0) {
+		printk("pci: failed tb register update test "
+		       "(tag0 %#x != %#x)\n", temp, tag0);
+		goto failed;
+	}
+	temp = *(vip)CIA_IOC_TB_TAGn(1);
+	if (temp != 0) {
+		printk("pci: failed tb register update test "
+		       "(tag1 %#x != 0)\n", temp);
+		goto failed;
+	}
+	temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
+	if (temp != pte0) {
+		printk("pci: failed tb register update test "
+		       "(pte0 %#x != %#x)\n", temp, pte0);
+		goto failed;
+	}
+	printk("pci: passed tb register update test\n");
+
+	/* Second, verify we can actually do I/O through this entry.  */
+
+	data0 = 0xdeadbeef;
+	page[0] = data0;
+	mcheck_expected(0) = 1;
+	mcheck_taken(0) = 0;
+	mb();
+	temp = cia_readl(cia_ioremap(addr0));
+	mb();
+	mcheck_expected(0) = 0;
+	mb();
+	if (mcheck_taken(0)) {
+		printk("pci: failed sg loopback i/o read test (mcheck)\n");
+		goto failed;
+	}
+	if (temp != data0) {
+		printk("pci: failed sg loopback i/o read test "
+		       "(%#x != %#x)\n", temp, data0);
+		goto failed;
+	}
+	printk("pci: passed sg loopback i/o read test\n");
+
+	/* Third, try to invalidate the TLB.  */
+
+	cia_pci_tbi(arena->hose, 0, -1);
+	temp = *(vip)CIA_IOC_TB_TAGn(0);
+	if (temp & 1) {
+		cia_pci_tbi_try1(arena->hose, 0, -1);
+	
+		temp = *(vip)CIA_IOC_TB_TAGn(0);
+		if (temp & 1) {
+			printk("pci: failed tbia test; "
+			       "no usable workaround\n");
+			goto failed;
+		}
+
+		alpha_mv.mv_pci_tbi = cia_pci_tbi_try1;
+		printk("pci: failed tbia test; workaround 1 succeeded\n");
+	} else {
+		printk("pci: passed tbia test\n");
+	}
+
+	/* Fourth, verify the TLB snoops the EV5's caches when
+	   doing a tlb fill.  */
+
+	data0 = 0x5adda15e;
+	page[0] = data0;
+	arena->ptes[4] = pte0;
+	mcheck_expected(0) = 1;
+	mcheck_taken(0) = 0;
+	mb();
+	temp = cia_readl(cia_ioremap(addr0 + 4*PAGE_SIZE));
+	mb();
+	mcheck_expected(0) = 0;
+	mb();
+	if (mcheck_taken(0)) {
+		printk("pci: failed pte write cache snoop test (mcheck)\n");
+		goto failed;
+	}
+	if (temp != data0) {
+		printk("pci: failed pte write cache snoop test "
+		       "(%#x != %#x)\n", temp, data0);
+		goto failed;
+	}
+	printk("pci: passed pte write cache snoop test\n");
+
+	/* Fifth, verify that a previously invalid PTE entry gets
+	   filled from the page table.  */
+
+	data0 = 0xabcdef123;
+	page[0] = data0;
+	arena->ptes[5] = pte0;
+	mcheck_expected(0) = 1;
+	mcheck_taken(0) = 0;
+	mb();
+	temp = cia_readl(cia_ioremap(addr0 + 5*PAGE_SIZE));
+	mb();
+	mcheck_expected(0) = 0;
+	mb();
+	if (mcheck_taken(0)) {
+		printk("pci: failed valid tag invalid pte reload test "
+		       "(mcheck; workaround available)\n");
+		/* Work around this bug by aligning new allocations
+		   on 4 page boundaries.  */
+		arena->align_entry = 4;
+	} else if (temp != data0) {
+		printk("pci: failed valid tag invalid pte reload test "
+		       "(%#x != %#x)\n", temp, data0);
+		goto failed;
+	} else {
+		printk("pci: passed valid tag invalid pte reload test\n");
+	}
+
+	/* Sixth, verify machine checks are working.  Test invalid
+	   pte under the same valid tag as we used above.  */
+
+	mcheck_expected(0) = 1;
+	mcheck_taken(0) = 0;
+	mb();
+	temp = cia_readl(cia_ioremap(addr0 + 6*PAGE_SIZE));
+	mb();
+	mcheck_expected(0) = 0;
+	mb();
+	printk("pci: %s pci machine check test\n",
+	       mcheck_taken(0) ? "passed" : "failed");
+
+	/* Clean up after the tests.  */
+	arena->ptes[4] = 0;
+	arena->ptes[5] = 0;
+	alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
+
+exit:
+	/* Restore normal PCI operation.  */
+	mb();
+	*(vip)CIA_IOC_CIA_CTRL = ctrl;
+	mb();
+	*(vip)CIA_IOC_CIA_CTRL;
+	mb();
+	return;
+
+failed:
+	printk("pci: disabling sg translation window\n");
+	*(vip)CIA_IOC_PCI_W0_BASE = 0;
+	alpha_mv.mv_pci_tbi = NULL;
+	goto exit;
 }
 
-void __init
-cia_init_arch(void)
+static void __init
+do_init_arch(int is_pyxis)
 {
 	struct pci_controler *hose;
-	struct resource *hae_mem;
-	unsigned int temp;
+	int temp;
+	int cia_rev;
 
-#if DEBUG_DUMP_REGS
-	temp = *(vuip)CIA_IOC_CIA_REV; mb();
-	printk("cia_init: CIA_REV was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_PCI_LAT; mb();
-	printk("cia_init: CIA_PCI_LAT was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_CIA_CTRL; mb();
-	printk("cia_init: CIA_CTRL was 0x%x\n", temp);
-	temp = *(vuip)0xfffffc8740000140UL; mb();
-	printk("cia_init: CIA_CTRL1 was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_HAE_MEM; mb();
-	printk("cia_init: CIA_HAE_MEM was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_HAE_IO; mb();
-	printk("cia_init: CIA_HAE_IO was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_CFG; mb();
-	printk("cia_init: CIA_CFG was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_CACK_EN; mb();
-	printk("cia_init: CIA_CACK_EN was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_CFG; mb();
-	printk("cia_init: CIA_CFG was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_CIA_DIAG; mb();
-	printk("cia_init: CIA_DIAG was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_DIAG_CHECK; mb();
-	printk("cia_init: CIA_DIAG_CHECK was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_PERF_MONITOR; mb();
-	printk("cia_init: CIA_PERF_MONITOR was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_PERF_CONTROL; mb();
-	printk("cia_init: CIA_PERF_CONTROL was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_CIA_ERR; mb();
-	printk("cia_init: CIA_ERR was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_CIA_STAT; mb();
-	printk("cia_init: CIA_STAT was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_MCR; mb();
-	printk("cia_init: CIA_MCR was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_CIA_CTRL; mb();
-	printk("cia_init: CIA_CTRL was 0x%x\n", temp);
-	temp = *(vuip)CIA_IOC_ERR_MASK; mb();
-	printk("cia_init: CIA_ERR_MASK was 0x%x\n", temp);
-	temp = *((vuip)CIA_IOC_PCI_W0_BASE); mb();
-	printk("cia_init: W0_BASE was 0x%x\n", temp);
-	temp = *((vuip)CIA_IOC_PCI_W1_BASE); mb();
-	printk("cia_init: W1_BASE was 0x%x\n", temp);
-	temp = *((vuip)CIA_IOC_PCI_W2_BASE); mb();
-	printk("cia_init: W2_BASE was 0x%x\n", temp);
-	temp = *((vuip)CIA_IOC_PCI_W3_BASE); mb();
-	printk("cia_init: W3_BASE was 0x%x\n", temp);
-#endif /* DEBUG_DUMP_REGS */
+	cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
+	printk("pci: cia revision %d%s\n",
+	       cia_rev, is_pyxis ? " (pyxis)" : "");
+
+	/* Set up error reporting.  */
+	temp = *(vip)CIA_IOC_ERR_MASK;
+	temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
+		  | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
+	*(vip)CIA_IOC_ERR_MASK = temp;
+
+	/* Clear all currently pending errors.  */
+	*(vip)CIA_IOC_CIA_ERR = 0;
+
+	/* Turn on mchecks.  */
+	temp = *(vip)CIA_IOC_CIA_CTRL;
+	temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
+	*(vip)CIA_IOC_CIA_CTRL = temp;
+
+	/* Clear the CFG register, which gets used for PCI config space
+	   accesses.  That is the way we want to use it, and we do not
+	   want to depend on what ARC or SRM might have left behind.  */
+	*(vip)CIA_IOC_CFG = 0;
+ 
+	/* Zero the HAEs.  */
+	*(vip)CIA_IOC_HAE_MEM = 0;
+	*(vip)CIA_IOC_HAE_IO = 0;
+
+	/* For PYXIS, we always use BWX bus and i/o accesses.  To that end,
+	   make sure they're enabled on the controler.  */
+	if (is_pyxis) {
+		temp = *(vip)CIA_IOC_CIA_CNFG;
+		temp |= CIA_CNFG_IOA_BWEN;
+		*(vip)CIA_IOC_CIA_CNFG = temp;
+	}
+
+	/* Syncronize with all previous changes.  */
+	mb();
+	*(vip)CIA_IOC_CIA_REV;
 
 	/*
 	 * Create our single hose.
 	 */
 
 	pci_isa_hose = hose = alloc_pci_controler();
-	hae_mem = alloc_resource();
-
 	hose->io_space = &ioport_resource;
-	hose->mem_space = hae_mem;
+	hose->mem_space = &iomem_resource;
 	hose->config_space = CIA_CONF;
 	hose->index = 0;
 
-	hae_mem->start = 0;
-	hae_mem->end = CIA_MEM_R1_MASK;
-	hae_mem->name = pci_hae0_name;
-	hae_mem->flags = IORESOURCE_MEM;
+	if (! is_pyxis) {
+		struct resource *hae_mem = alloc_resource();
+		hose->mem_space = hae_mem;
+
+		hae_mem->start = 0;
+		hae_mem->end = CIA_MEM_R1_MASK;
+		hae_mem->name = pci_hae0_name;
+		hae_mem->flags = IORESOURCE_MEM;
 
-	if (request_resource(&iomem_resource, hae_mem) < 0)
-		printk(KERN_ERR "Failed to request HAE_MEM\n");
+		if (request_resource(&iomem_resource, hae_mem) < 0)
+			printk(KERN_ERR "Failed to request HAE_MEM\n");
+	}
 
 	/*
 	 * Set up the PCI to main memory translation windows.
 	 *
 	 * Window 0 is scatter-gather 8MB at 8MB (for isa)
-	 * Window 1 is scatter-gather 128MB at 1GB
-	 * Window 2 is direct access 2GB at 2GB
-	 * ??? We ought to scale window 1 with memory.
+	 * Window 1 is direct access 1GB at 1GB
+	 * Window 2 is direct access 1GB at 2GB
+	 *
+	 * We must actually use 2 windows to direct-map the 2GB space,
+	 * because of an idiot-syncrasy of the CYPRESS chip used on 
+	 * many PYXIS systems.  It may respond to a PCI bus address in
+	 * the last 1MB of the 4GB address range.
+	 *
+	 * ??? NetBSD hints that page tables must be aligned to 32K,
+	 * possibly due to a hardware bug.  This is over-aligned
+	 * from the 8K alignment one would expect for an 8MB window. 
+	 * No description of what revisions affected.
 	 */
 
-	/* ??? NetBSD hints that page tables must be aligned to 32K,
-	   possibly due to a hardware bug.  This is over-aligned
-	   from the 8K alignment one would expect for an 8MB window. 
-	   No description of what CIA revisions affected.  */
-	hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0x8000);
-	hose->sg_pci = iommu_arena_new(hose, 0x40000000, 0x08000000, 0);
-	__direct_map_base = 0x80000000;
+	hose->sg_pci = NULL;
+	hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
+	__direct_map_base = 0x40000000;
 	__direct_map_size = 0x80000000;
 
-	*(vuip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
-	*(vuip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
-	*(vuip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
-
-	*(vuip)CIA_IOC_PCI_W1_BASE = hose->sg_pci->dma_base | 3;
-	*(vuip)CIA_IOC_PCI_W1_MASK = (hose->sg_pci->size - 1) & 0xfff00000;
-	*(vuip)CIA_IOC_PCI_T1_BASE = virt_to_phys(hose->sg_pci->ptes) >> 2;
+	*(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
+	*(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
+	*(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
+
+	*(vip)CIA_IOC_PCI_W1_BASE = 0x40000000 | 1;
+	*(vip)CIA_IOC_PCI_W1_MASK = (0x40000000 - 1) & 0xfff00000;
+	*(vip)CIA_IOC_PCI_T1_BASE = 0;
+
+	*(vip)CIA_IOC_PCI_W2_BASE = 0x80000000 | 1;
+	*(vip)CIA_IOC_PCI_W2_MASK = (0x40000000 - 1) & 0xfff00000;
+	*(vip)CIA_IOC_PCI_T2_BASE = 0x40000000;
 
-	*(vuip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1;
-	*(vuip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000;
-	*(vuip)CIA_IOC_PCI_T2_BASE = 0;
-
-	*(vuip)CIA_IOC_PCI_W3_BASE = 0;
+	*(vip)CIA_IOC_PCI_W3_BASE = 0;
+}
 
-	cia_pci_tbi(hose, 0, -1);
+void __init
+cia_init_arch(void)
+{
+	do_init_arch(0);
+}
 
-	/* 
-	 * Set up error reporting.
-	 */
-	temp = *(vuip)CIA_IOC_CIA_ERR;
-	temp |= 0x180;   /* master, target abort */
-	*(vuip)CIA_IOC_CIA_ERR = temp;
-
-	temp = *(vuip)CIA_IOC_CIA_CTRL;
-	temp |= 0x400;	/* turn on FILL_ERR to get mchecks */
-	*(vuip)CIA_IOC_CIA_CTRL = temp;
+void __init
+pyxis_init_arch(void)
+{
+	do_init_arch(1);
+}
 
-	/*
-	 * Next, clear the CIA_CFG register, which gets used
-	 * for PCI Config Space accesses. That is the way
-	 * we want to use it, and we do not want to depend on
-	 * what ARC or SRM might have left behind...
-	 */
-	*(vuip)CIA_IOC_CFG = 0;
- 
-	/*
-	 * Zero the HAEs. 
-	 */
-	*(vuip)CIA_IOC_HAE_MEM = 0;
-	*(vuip)CIA_IOC_HAE_IO = 0;
-	mb();
+void __init
+cia_init_pci(void)
+{
+	/* Must delay this from init_arch, as we need machine checks.  */
+	verify_tb_operation();
+	common_init_pci();
 }
 
 static inline void
 cia_pci_clr_err(void)
 {
-	unsigned int jd;
+	int jd;
 
-	jd = *(vuip)CIA_IOC_CIA_ERR;
-	*(vuip)CIA_IOC_CIA_ERR = jd;
+	jd = *(vip)CIA_IOC_CIA_ERR;
+	*(vip)CIA_IOC_CIA_ERR = jd;
 	mb();
-	*(vuip)CIA_IOC_CIA_ERR;		/* re-read to force write.  */
+	*(vip)CIA_IOC_CIA_ERR;		/* re-read to force write.  */
 }
 
 void

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)