patch-2.3.99-pre9 linux/arch/mips64/sgi-ip27/ip27-init.c

Next file: linux/arch/mips64/sgi-ip27/ip27-irq-glue.S
Previous file: linux/arch/mips64/sgi-ip27/ip27-berr.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.99-pre8/linux/arch/mips64/sgi-ip27/ip27-init.c linux/arch/mips64/sgi-ip27/ip27-init.c
@@ -1,5 +1,11 @@
+#include <linux/config.h>
 #include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
 #include <linux/mmzone.h>	/* for numnodes */
+#include <linux/mm.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
 #include <asm/sn/types.h>
 #include <asm/sn/sn0/addrs.h>
 #include <asm/sn/sn0/hubni.h>
@@ -8,16 +14,33 @@
 #include <asm/ioc3.h>
 #include <asm/mipsregs.h>
 #include <asm/sn/gda.h>
+#include <asm/sn/intr.h>
+#include <asm/current.h>
+#include <asm/smp.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/sn/launch.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/sn0/ip27.h>
 
-typedef unsigned long cpumask_t;	/* into asm/sn/types.h */
-typedef unsigned long cpuid_t;
+#define CPU_NONE		(cpuid_t)-1
 
 #define	CPUMASK_CLRALL(p)	(p) = 0
 #define CPUMASK_SETB(p, bit)	(p) |= 1 << (bit)
+#define CPUMASK_CLRB(p, bit)	(p) &= ~(1ULL << (bit))
+#define CPUMASK_TSTB(p, bit)	((p) & (1ULL << (bit)))
+
+#define CNODEMASK_CLRALL(p)	(p) = 0
+#define CNODEMASK_TSTB(p, bit)	((p) & (1ULL << (bit)))
+#define CNODEMASK_SETB(p, bit)	((p) |= 1ULL << (bit))
 
 cpumask_t	boot_cpumask;
+static volatile cpumask_t boot_barrier;
 hubreg_t	region_mask = 0;
 static int	fine_mode = 0;
+int		maxcpus;
+static spinlock_t hub_mask_lock = SPIN_LOCK_UNLOCKED;
+static cnodemask_t hub_init_mask;
 
 cnodeid_t	nasid_to_compact_node[MAX_NASIDS];
 nasid_t		compact_to_nasid_node[MAX_COMPACT_NODES];
@@ -47,51 +70,6 @@
 		>> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
 }
 
-lboard_t * find_lboard_real(lboard_t *start, unsigned char brd_type)
-{
-	/* Search all boards stored on this node. */
-	while (start) {
-		if (start->brd_type == brd_type)
-			return start;
-		start = KLCF_NEXT(start);
-	}
-	/* Didn't find it. */
-	return (lboard_t *)NULL;
-}
-
-klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
-{
-	int index, j;
-
-	if (kli == (klinfo_t *)NULL) {
-		index = 0;
-	} else {
-		for (j = 0; j < KLCF_NUM_COMPS(brd); j++)
-			if (kli == KLCF_COMP(brd, j))
-				break;
-		index = j;
-		if (index == KLCF_NUM_COMPS(brd)) {
-			printk("find_component: Bad pointer: 0x%p\n", kli);
-			return (klinfo_t *)NULL;
-		}
-		index++;		/* next component */
-	}
-
-	for (; index < KLCF_NUM_COMPS(brd); index++) {
-		kli = KLCF_COMP(brd, index);
-		if (KLCF_COMP_TYPE(kli) == struct_type)
-			return kli;
-	}
-
-	/* Didn't find it. */
-	return (klinfo_t *)NULL;
-}
-
-klinfo_t *find_first_component(lboard_t *brd, unsigned char struct_type)
-{
-	return find_component(brd, (klinfo_t *)NULL, struct_type);
-}
-
 nasid_t get_actual_nasid(lboard_t *brd)
 {
 	klhub_t *hub;
@@ -117,7 +95,7 @@
 	int cpus_found = 0;
 	cpuid_t cpuid;
 
-	brd = find_lboard_real((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
+	brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
 
 	do {
 		acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
@@ -138,7 +116,7 @@
 		}
 		brd = KLCF_NEXT(brd);
 		if (brd)
-			brd = find_lboard_real(brd,KLTYPE_IP27);
+			brd = find_lboard(brd,KLTYPE_IP27);
 		else
 			break;
 	} while (brd);
@@ -182,9 +160,38 @@
 	return(highest + 1);
 }
 
+void alloc_cpupda(int i)
+{
+	cnodeid_t	node;
+	nasid_t		nasid;
+
+	node = get_cpu_cnode(i);
+	nasid = COMPACT_TO_NASID_NODEID(node);
+
+	cputonasid(i) = nasid;
+	cputocnode(i) = node;
+	cputoslice(i) = get_cpu_slice(i);
+}
+
+int cpu_enabled(cpuid_t cpu)
+{
+	if (cpu == CPU_NONE)
+		return 0;
+	return (CPUMASK_TSTB(boot_cpumask, cpu) != 0);
+}
+
+void initpdas(void)
+{
+	cpuid_t i;
+
+	for (i = 0; i < maxcpus; i++)
+		if (cpu_enabled(i))
+			alloc_cpupda(i);
+}
+
 void mlreset (void)
 {
-	int i, maxcpus;
+	int i;
 
 	fine_mode = is_fine_dirmode();
 
@@ -194,8 +201,11 @@
 	 */
 	CPUMASK_CLRALL(boot_cpumask);
 	maxcpus = cpu_node_probe(&boot_cpumask, &numnodes);
+	printk("Discovered %d cpus on %d nodes\n", maxcpus, numnodes);
+	initpdas();
 
 	gen_region_mask(&region_mask, numnodes);
+	CNODEMASK_CLRALL(hub_init_mask);
 
 	/*
 	 * Set all nodes' calias sizes to 8k
@@ -225,3 +235,257 @@
 	}
 }
 
+
+void intr_clear_bits(nasid_t nasid, volatile hubreg_t *pend, int base_level,
+							char *name)
+{
+	volatile hubreg_t bits;
+	int i;
+
+	/* Check pending interrupts */
+	if ((bits = HUB_L(pend)) != 0)
+		for (i = 0; i < N_INTPEND_BITS; i++)
+			if (bits & (1 << i))
+				LOCAL_HUB_CLR_INTR(base_level + i);
+}
+	
+void intr_clear_all(nasid_t nasid)
+{
+	REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0);
+	REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0);
+	REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0);
+	REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0);
+	intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND0),
+		INT_PEND0_BASELVL, "INT_PEND0");
+	intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND1),
+		INT_PEND1_BASELVL, "INT_PEND1");
+}
+
+void sn_mp_setup(void)
+{
+	cnodeid_t	cnode;
+#if 0
+	cpuid_t		cpu;
+#endif
+
+	for (cnode = 0; cnode < numnodes; cnode++) {
+#if 0
+		init_platform_nodepda();
+#endif
+		intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
+	}
+#if 0
+	for (cpu = 0; cpu < maxcpus; cpu++) {
+		init_platform_pda();
+	}
+#endif
+}
+
+void per_hub_init(cnodeid_t cnode)
+{
+	cnodemask_t	done;
+
+	spin_lock(&hub_mask_lock);
+	/* Test our bit. */
+	if (!(done = CNODEMASK_TSTB(hub_init_mask, cnode))) {
+		/* Turn our bit on in the mask. */
+		CNODEMASK_SETB(hub_init_mask, cnode);
+	}
+	spin_unlock(&hub_mask_lock);
+
+	/*
+	 * Do the actual initialization if it hasn't been done yet.
+	 * We don't need to hold a lock for this work.
+	 */
+	if (!done) {
+		hub_rtc_init(cnode);
+	}
+}
+
+/*
+ * This is similar to hard_smp_processor_id().
+ */
+cpuid_t getcpuid(void)
+{
+	klcpu_t *klcpu;
+
+	klcpu = nasid_slice_to_cpuinfo(get_nasid(),LOCAL_HUB_L(PI_CPU_NUM));
+	return klcpu->cpu_info.virtid;
+}
+
+void per_cpu_init(void)
+{
+	extern void install_cpu_nmi_handler(int slice);
+	extern void load_mmu(void);
+	static int is_slave = 0;
+	cpuid_t cpu = getcpuid();
+	cnodeid_t cnode = get_compact_nodeid();
+
+	current_cpu_data.asid_cache = ASID_FIRST_VERSION;
+#if 0
+	intr_init();
+#endif
+	set_cp0_status(ST0_IM, 0);
+	per_hub_init(cnode);
+	cpu_time_init();
+	if (smp_processor_id())	/* master can't do this early, no kmalloc */
+		install_cpuintr(cpu);
+	/* Install our NMI handler if symmon hasn't installed one. */
+	install_cpu_nmi_handler(cputoslice(smp_processor_id()));
+#if 0
+	install_tlbintr(cpu);
+#endif
+	set_cp0_status(SRB_DEV0 | SRB_DEV1, SRB_DEV0 | SRB_DEV1);
+	if (is_slave) {
+		set_cp0_status(ST0_BEV, 0);
+		if (mips4_available)
+			set_cp0_status(ST0_XX, ST0_XX);
+		set_cp0_status(ST0_KX|ST0_SX|ST0_UX, ST0_KX|ST0_SX|ST0_UX);
+		sti();
+		load_mmu();
+	}
+	if (is_slave == 0)
+		is_slave = 1;
+}
+
+cnodeid_t get_compact_nodeid(void)
+{
+	nasid_t nasid;
+
+	nasid = get_nasid();
+	/*
+	 * Map the physical node id to a virtual node id (virtual node ids
+	 * are contiguous).
+	 */
+	return NASID_TO_COMPACT_NODEID(nasid);
+}
+
+#ifdef CONFIG_SMP
+
+void __init smp_callin(void)
+{
+#if 0
+	calibrate_delay();
+	smp_store_cpu_info(cpuid);
+#endif
+}
+
+int __init start_secondary(void)
+{
+	extern int cpu_idle(void);
+	extern atomic_t smp_commenced;
+
+	smp_callin();
+	while (!atomic_read(&smp_commenced));
+	return cpu_idle();
+}
+
+static atomic_t numstarted = ATOMIC_INIT(0);
+void cboot(void)
+{
+	atomic_inc(&numstarted);
+	CPUMASK_CLRB(boot_barrier, getcpuid());	/* needs atomicity */
+	per_cpu_init();
+#if 0
+	ecc_init();
+	bte_lateinit();
+	init_mfhi_war();
+#endif
+	_flush_tlb_all();
+	flush_cache_all();
+	start_secondary();
+}
+
+void allowboot(void)
+{
+	int		num_cpus = 0;
+	cpuid_t		cpu;
+	cnodeid_t	cnode;
+	extern void	bootstrap(void);
+
+	sn_mp_setup();
+	/* Master has already done per_cpu_init() */
+	install_cpuintr(getcpuid());
+#if 0
+	bte_lateinit();
+	ecc_init();
+#endif
+
+	boot_barrier = boot_cpumask;
+	/* Launch slaves. */
+	for (cpu = 0; cpu < maxcpus; cpu++) {
+		if (cpu == smp_processor_id()) {
+			num_cpus++;
+			/* We're already started, clear our bit */
+			CPUMASK_CLRB(boot_barrier, cpu);
+			continue;
+		}
+
+		/* Skip holes in CPU space */
+		if (CPUMASK_TSTB(boot_cpumask, cpu)) {
+			struct task_struct *p;
+
+			/*
+			 * The following code is purely to make sure
+			 * Linux can schedule processes on this slave.
+			 */
+			kernel_thread(0, NULL, CLONE_PID);
+			p = init_task.prev_task;
+			sprintf(p->comm, "%s%d", "Idle", num_cpus);
+			init_tasks[num_cpus] = p;
+			p->processor = num_cpus;
+			p->has_cpu = 1; /* we schedule the first task manually */
+			del_from_runqueue(p);
+			unhash_process(p);
+			/* Attach to the address space of init_task. */
+			atomic_inc(&init_mm.mm_count);
+			p->active_mm = &init_mm;
+			
+			/*
+		 	 * Launch a slave into bootstrap().
+		 	 * It doesn't take an argument, and we
+			 * set sp to the kernel stack of the newly 
+			 * created idle process, gp to the proc struct
+			 * (so that current-> works).
+		 	 */
+			LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu), 
+				(launch_proc_t)bootstrap, 0, 
+				(void *)((unsigned long)p+KERNEL_STACK_SIZE - 32),
+				(void *)p);
+
+			/*
+			 * Now optimistically set the mapping arrays. We
+			 * need to wait here, verify the cpu booted up, then
+			 * fire up the next cpu.
+			 */
+			__cpu_number_map[cpu] = num_cpus;
+			__cpu_logical_map[num_cpus] = cpu;
+			num_cpus++;
+			/* smp_num_cpus++; Do after smp_send_reschedule works */
+		}
+	}
+
+	/* while(atomic_read(&numstarted) != (maxcpus - num_cpus)) */
+	if (maxcpus > 1) while(atomic_read(&numstarted) == 0);
+	printk("Holding %d cpus slave\n", atomic_read(&numstarted));
+
+#ifdef LATER
+	Wait logic goes here.
+#endif
+	for (cnode = 0; cnode < numnodes; cnode++) {
+#if 0
+		if (cnodetocpu(cnode) == -1) {
+			printk("Initializing headless hub,cnode %d", cnode);
+			per_hub_init(cnode);
+		}
+#endif
+	}
+#if 0
+	cpu_io_setup();
+	init_mfhi_war();
+#endif
+}
+
+#else /* CONFIG_SMP */
+void cboot(void) {}
+#endif /* CONFIG_SMP */

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)