patch-2.3.48 linux/arch/mips64/sgi-ip22/ip22-sc.c

Next file: linux/arch/mips64/sgi-ip22/ip22-setup.c
Previous file: linux/arch/mips64/sgi-ip22/ip22-rtc.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.47/linux/arch/mips64/sgi-ip22/ip22-sc.c linux/arch/mips64/sgi-ip22/ip22-sc.c
@@ -0,0 +1,168 @@
+/* $Id: ip22-sc.c,v 1.2 1999/12/04 03:59:01 ralf Exp $
+ *
+ * indy_sc.c: Indy cache managment functions.
+ *
+ * Copyright (C) 1997 Ralf Baechle (ralf@gnu.org),
+ * derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/autoconf.h>
+
+#include <asm/bcache.h>
+#include <asm/sgi/sgimc.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/bootinfo.h>
+#include <asm/sgialib.h>
+#include <asm/mmu_context.h>
+
+/* Secondary cache size in bytes, if present.  */
+static unsigned long scache_size;
+
+#undef DEBUG_CACHE
+
+#define SC_SIZE 0x00080000
+#define SC_LINE 32
+#define CI_MASK (SC_SIZE - SC_LINE)
+#define SC_ROUND(n) ((n) + SC_LINE - 1)
+#define SC_INDEX(n) ((n) & CI_MASK)
+
+static inline void indy_sc_wipe(unsigned long first, unsigned long last)
+{
+	__asm__ __volatile__("
+		.set	noreorder
+		or	%0, %4		# first line to flush
+		or	%1, %4		# last line to flush
+1:		sw	$0, 0(%0)
+		bne	%0, %1, 1b
+		daddu	%0, 32
+		.set reorder"
+		: "=r" (first), "=r" (last)
+		: "0" (first), "1" (last), "r" (0x9000000080000000)
+		: "$1");
+}
+
+static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size)
+{
+	unsigned long first_line, last_line;
+	unsigned int flags;
+
+#ifdef DEBUG_CACHE
+	printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size);
+#endif
+	/* Which lines to flush?  */
+	first_line = SC_INDEX(addr);
+	last_line = SC_INDEX(SC_ROUND(addr + size));
+
+	__save_and_cli(flags);
+	if (first_line <= last_line) {
+		indy_sc_wipe(first_line, last_line);
+		goto out;
+	}
+
+	/* Cache index wrap around.  Due to the way the buddy system works
+	   this case should not happen.  We're prepared to handle it,
+	   though. */
+	indy_sc_wipe(last_line, SC_SIZE);
+	indy_sc_wipe(0, first_line);
+out:
+	__restore_flags(flags);
+}
+
+static void inline indy_sc_enable(void)
+{
+#ifdef DEBUG_CACHE
+	printk("Enabling R4600 SCACHE\n");
+#endif
+	*(volatile unsigned char *) 0x9000000080000000 = 0;
+}
+
+static void indy_sc_disable(void)
+{
+#ifdef DEBUG_CACHE
+	printk("Disabling R4600 SCACHE\n");
+#endif
+	*(volatile unsigned short *) 0x9000000080000000 = 0;
+}
+
+static inline __init int indy_sc_probe(void)
+{
+	volatile u32 *cpu_control;
+	unsigned short cmd = 0xc220;
+	unsigned long data = 0;
+	int i, n;
+
+#ifdef __MIPSEB__
+	cpu_control = (volatile u32 *) KSEG1ADDR(0x1fa00034);
+#else
+	cpu_control = (volatile u32 *) KSEG1ADDR(0x1fa00030);
+#endif
+#define DEASSERT(bit) (*(cpu_control) &= (~(bit)))
+#define ASSERT(bit) (*(cpu_control) |= (bit))
+#define DELAY  for(n = 0; n < 100000; n++) __asm__ __volatile__("")
+	DEASSERT(SGIMC_EEPROM_PRE);
+	DEASSERT(SGIMC_EEPROM_SDATAO);
+	DEASSERT(SGIMC_EEPROM_SECLOCK);
+	DEASSERT(SGIMC_EEPROM_PRE);
+	DELAY;
+	ASSERT(SGIMC_EEPROM_CSEL); ASSERT(SGIMC_EEPROM_SECLOCK);
+	for(i = 0; i < 11; i++) {
+		if(cmd & (1<<15))
+			ASSERT(SGIMC_EEPROM_SDATAO);
+		else
+			DEASSERT(SGIMC_EEPROM_SDATAO);
+		DEASSERT(SGIMC_EEPROM_SECLOCK);
+		ASSERT(SGIMC_EEPROM_SECLOCK);
+		cmd <<= 1;
+	}
+	DEASSERT(SGIMC_EEPROM_SDATAO);
+	for(i = 0; i < (sizeof(unsigned short) * 8); i++) {
+		unsigned int tmp;
+
+		DEASSERT(SGIMC_EEPROM_SECLOCK);
+		DELAY;
+		ASSERT(SGIMC_EEPROM_SECLOCK);
+		DELAY;
+		data <<= 1;
+		tmp = *cpu_control;
+		if(tmp & SGIMC_EEPROM_SDATAI)
+			data |= 1;
+	}
+	DEASSERT(SGIMC_EEPROM_SECLOCK);
+	DEASSERT(SGIMC_EEPROM_CSEL);
+	ASSERT(SGIMC_EEPROM_PRE);
+	ASSERT(SGIMC_EEPROM_SECLOCK);
+
+	data <<= PAGE_SHIFT;
+	if (data == 0)
+		return 0;
+
+	scache_size = data;
+
+	printk("R4600/R5000 SCACHE size %ldK, linesize 32 bytes.\n",
+	       scache_size >> 10);
+
+	return 1;
+}
+
+/* XXX Check with wje if the Indy caches can differenciate between
+   writeback + invalidate and just invalidate.  */
+static struct bcache_ops indy_sc_ops = {
+	indy_sc_enable,
+	indy_sc_disable,
+	indy_sc_wback_invalidate,
+	indy_sc_wback_invalidate
+};
+
+void __init indy_sc_init(void)
+{
+return;  /* Not for now, debugging ... */
+	if (indy_sc_probe()) {
+		indy_sc_enable();
+		bcops = &indy_sc_ops;
+	}
+}

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)