patch-2.3.99-pre6 linux/arch/arm/mm/init.c

Next file: linux/arch/arm/mm/map.h
Previous file: linux/arch/arm/mm/fault-common.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.99-pre5/linux/arch/arm/mm/init.c linux/arch/arm/mm/init.c
@@ -1,9 +1,8 @@
 /*
  *  linux/arch/arm/mm/init.c
  *
- *  Copyright (C) 1995-1999 Russell King
+ *  Copyright (C) 1995-2000 Russell King
  */
-
 #include <linux/config.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
@@ -32,9 +31,22 @@
 
 #include "map.h"
 
+#ifdef CONFIG_CPU_32
+#define TABLE_OFFSET	(PTRS_PER_PTE)
+#else
+#define TABLE_OFFSET	0
+#endif
+#define TABLE_SIZE	((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(void *))
+
 static unsigned long totalram_pages;
-struct meminfo meminfo;
 pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern int _stext, _text, _etext, _edata, _end;
+
+/*
+ * The sole use of this is to pass memory configuration
+ * data from paging_init to mem_init.
+ */
+static struct meminfo __initdata meminfo;
 
 /*
  * empty_bad_page is the page that is used for page faults when
@@ -119,33 +131,36 @@
 void show_mem(void)
 {
 	int free = 0, total = 0, reserved = 0;
-	int shared = 0, cached = 0;
-	struct page *page, *end;
+	int shared = 0, cached = 0, node;
 
 	printk("Mem-info:\n");
 	show_free_areas();
 	printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
 
-	page = mem_map;
-	end  = mem_map + max_mapnr;
+	for (node = 0; node < numnodes; node++) {
+		struct page *page, *end;
 
-	do {
-		if (PageSkip(page)) {
-			page = page->next_hash;
-			if (page == NULL)
-				break;
-		}
-		total++;
-		if (PageReserved(page))
-			reserved++;
-		else if (PageSwapCache(page))
-			cached++;
-		else if (!page_count(page))
-			free++;
-		else
-			shared += atomic_read(&page->count) - 1;
-		page++;
-	} while (page < end);
+		page = NODE_MEM_MAP(node);
+		end  = page + NODE_DATA(node)->node_size;
+
+		do {
+			if (PageSkip(page)) {
+				page = page->next_hash;
+				if (page == NULL)
+					break;
+			}
+			total++;
+			if (PageReserved(page))
+				reserved++;
+			else if (PageSwapCache(page))
+				cached++;
+			else if (!page_count(page))
+				free++;
+			else
+				shared += atomic_read(&page->count) - 1;
+			page++;
+		} while (page < end);
+	}
 
 	printk("%d pages of RAM\n", total);
 	printk("%d free pages\n", free);
@@ -158,24 +173,173 @@
 	show_buffers();
 }
 
+#define O_PFN_DOWN(x)	((x) >> PAGE_SHIFT)
+#define V_PFN_DOWN(x)	O_PFN_DOWN(__pa(x))
+
+#define O_PFN_UP(x)	(PAGE_ALIGN(x) >> PAGE_SHIFT)
+#define V_PFN_UP(x)	O_PFN_UP(__pa(x))
+
+#define PFN_SIZE(x)	((x) >> PAGE_SHIFT)
+#define PFN_RANGE(s,e)	PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \
+				(((unsigned long)(s)) & PAGE_MASK))
+
+static unsigned int __init
+find_bootmap_pfn(struct meminfo *mi, unsigned int bootmap_pages)
+{
+	unsigned int start_pfn, bank, bootmap_pfn;
+
+	start_pfn   = V_PFN_UP(&_end);
+	bootmap_pfn = 0;
+
+	/*
+	 * FIXME: We really want to avoid allocating the bootmap
+	 * over the top of the initrd.
+	 */
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (initrd_start) {
+		if (__pa(initrd_end) > mi->end) {
+			printk ("initrd extends beyond end of memory "
+				"(0x%08lx > 0x%08lx) - disabling initrd\n",
+				__pa(initrd_end), mi->end);
+			initrd_start = 0;
+			initrd_end   = 0;
+		}
+	}
+#endif
+
+	for (bank = 0; bank < mi->nr_banks; bank ++) {
+		unsigned int start, end;
+
+		if (mi->bank[bank].size == 0)
+			continue;
+
+		start = O_PFN_UP(mi->bank[bank].start);
+		end   = O_PFN_DOWN(mi->bank[bank].size +
+				   mi->bank[bank].start);
+
+		if (end < start_pfn)
+			continue;
+
+		if (start < start_pfn)
+			start = start_pfn;
+
+		if (end <= start)
+			continue;
+
+		if (end - start >= bootmap_pages) {
+			bootmap_pfn = start;
+			break;
+		}
+	}
+
+	if (bootmap_pfn == 0)
+		BUG();
+
+	return bootmap_pfn;
+}
+
+/*
+ * Initialise one node of the bootmem allocator.  For now, we
+ * only initialise node 0.  Notice that we have a bootmem
+ * bitmap per node.
+ */
+static void __init setup_bootmem_node(int node, struct meminfo *mi)
+{
+	unsigned int end_pfn, start_pfn, bootmap_pages, bootmap_pfn;
+	unsigned int i;
+
+	if (node != 0)	/* only initialise node 0 for now */
+		return;
+
+	start_pfn     = O_PFN_UP(PHYS_OFFSET);
+	end_pfn	      = O_PFN_DOWN(mi->end);
+	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
+	bootmap_pfn   = find_bootmap_pfn(mi, bootmap_pages);
+
+	/*
+	 * Initialise the boot-time allocator
+	 */
+	init_bootmem_node(node, bootmap_pfn, start_pfn, end_pfn);
+
+	/*
+	 * Register all available RAM with the bootmem allocator.
+	 */
+	for (i = 0; i < mi->nr_banks; i++)
+		if (mi->bank[i].size)
+			free_bootmem_node(node, mi->bank[i].start,
+					  PFN_SIZE(mi->bank[i].size) << PAGE_SHIFT);
+
+	reserve_bootmem_node(node, bootmap_pfn << PAGE_SHIFT,
+			     bootmap_pages << PAGE_SHIFT);
+}
+
+/*
+ * Initialise the bootmem allocator.
+ */
+void __init bootmem_init(struct meminfo *mi)
+{
+	unsigned int i, node;
+
+	/*
+	 * Calculate the  physical address of the top of memory.
+	 * Note that there are no guarantees assumed about the
+	 * ordering of the bank information.
+	 */
+	mi->end = 0;
+	for (i = 0; i < mi->nr_banks; i++) {
+		unsigned long end;
+
+		if (mi->bank[i].size != 0) {
+			end = mi->bank[i].start + mi->bank[i].size;
+			if (mi->end < end)
+				mi->end = end;
+		}
+	}
+
+	max_low_pfn = O_PFN_DOWN(mi->end - PHYS_OFFSET);
+
+	/*
+	 * Setup each node
+	 */
+	for (node = 0; node < numnodes; node++)
+		setup_bootmem_node(node, mi);
+
+	/*
+	 * Register the kernel text and data with bootmem.
+	 * Note that this can only be in node 0.
+	 */
+	reserve_bootmem_node(0, V_PFN_DOWN(&_stext) << PAGE_SHIFT,
+			     PFN_RANGE(&_stext, &_end) << PAGE_SHIFT);
+
+#ifdef CONFIG_CPU_32
+	/*
+	 * Reserve the page tables.  These are already in use,
+	 * and can only be in node 0.
+	 */
+	reserve_bootmem_node(0, V_PFN_DOWN(swapper_pg_dir) << PAGE_SHIFT,
+			     PFN_SIZE(PTRS_PER_PGD * sizeof(void *)) << PAGE_SHIFT);
+#endif
+#ifdef CONFIG_BLK_DEV_INITRD
+	/*
+	 * This may be in any bank.  Currently, we assume that
+	 * it is in bank 0.
+	 */
+	if (initrd_start)
+		reserve_bootmem_node(0, V_PFN_DOWN(initrd_start) << PAGE_SHIFT,
+				     PFN_RANGE(initrd_start, initrd_end) << PAGE_SHIFT);
+#endif
+}
+
 /*
  * paging_init() sets up the page tables...
  */
 void __init paging_init(struct meminfo *mi)
 {
 	void *zero_page, *bad_page, *bad_table;
-	unsigned long zone_size[MAX_NR_ZONES];
-	int i;
+	int node;
 
 	memcpy(&meminfo, mi, sizeof(meminfo));
 
-#ifdef CONFIG_CPU_32
-#define TABLE_OFFSET	(PTRS_PER_PTE)
-#else
-#define TABLE_OFFSET	0
-#endif
-#define TABLE_SIZE	((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(void *))
-
 	/*
 	 * allocate what we need for the bad pages
 	 */
@@ -186,31 +350,42 @@
 	/*
 	 * initialise the page tables
 	 */
-	pagetable_init();
+	pagetable_init(mi);
 	flush_tlb_all();
 
 	/*
-	 * Initialise the zones and mem_map
+	 * initialise the zones within each node
 	 */
-	for (i = 0; i < MAX_NR_ZONES; i++)
-		zone_size[i] = 0;
+	for (node = 0; node < numnodes; node++) {
+		unsigned long zone_size[MAX_NR_ZONES];
+		unsigned long zhole_size[MAX_NR_ZONES];
+		struct bootmem_data *bdata;
+		pg_data_t *pgdat;
+		int i;
+
+		/*
+		 * Initialise the zone size information.
+		 */
+		for (i = 0; i < MAX_NR_ZONES; i++) {
+			zone_size[i]  = 0;
+			zhole_size[i] = 0;
+		}
 
-	/*
-	 * Calculate the size of the zones.  On ARM, we don't have
-	 * any problems with DMA or highmem, so all memory is
-	 * allocated to the DMA zone.
-	 */
-	for (i = 0; i < mi->nr_banks; i++) {
-		if (mi->bank[i].size) {
-			unsigned int end;
+		pgdat = NODE_DATA(node);
+		bdata = pgdat->bdata;
 
-			end = (mi->bank[i].start - PHYS_OFFSET +
-			       mi->bank[i].size) >> PAGE_SHIFT;
-			if (zone_size[0] < end)
-				zone_size[0] = end;
-		}
+		/*
+		 * The size of this node has already been determined.
+		 * If we need to do anything fancy with the allocation
+		 * of this memory to the zones, now is the time to do
+		 * it.  For now, we don't touch zhole_size.
+		 */
+		zone_size[0] = bdata->node_low_pfn -
+				(bdata->node_boot_start >> PAGE_SHIFT);
+
+		free_area_init_node(node, pgdat, zone_size,
+				bdata->node_boot_start, zhole_size);
 	}
-	free_area_init(zone_size);
 
 	/*
 	 * finish off the bad pages once
@@ -256,32 +431,33 @@
  */
 void __init mem_init(void)
 {
-	extern char __init_begin, __init_end, _text, _etext, _end;
+	extern char __init_begin, __init_end;
 	unsigned int codepages, datapages, initpages;
-	int i;
+	int i, node;
 
 	codepages = &_etext - &_text;
 	datapages = &_end - &_etext;
 	initpages = &__init_end - &__init_begin;
 
-	max_mapnr   = max_low_pfn;
-	high_memory = (void *)__va(PHYS_OFFSET + max_low_pfn * PAGE_SIZE);
+	high_memory = (void *)__va(meminfo.end);
+	max_mapnr   = MAP_NR(high_memory);
 
 	/*
 	 * We may have non-contiguous memory.  Setup the PageSkip stuff,
 	 * and mark the areas of mem_map which can be freed
 	 */
 	if (meminfo.nr_banks != 1)
-		create_memmap_holes();
+		create_memmap_holes(&meminfo);
 
 	/* this will put all unused low memory onto the freelists */
-	totalram_pages += free_all_bootmem();
+	for (node = 0; node < numnodes; node++)
+		totalram_pages += free_all_bootmem_node(node);
 
 	/*
 	 * Since our memory may not be contiguous, calculate the
 	 * real number of pages we have in this system
 	 */
-	printk("Memory:");
+	printk(KERN_INFO "Memory:");
 
 	num_physpages = 0;
 	for (i = 0; i < meminfo.nr_banks; i++) {
@@ -290,7 +466,8 @@
 	}
 
 	printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
-	printk("Memory: %luKB available (%dK code, %dK data, %dK init)\n",
+	printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
+		"%dK data, %dK init)\n",
 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
 		codepages >> 10, datapages >> 10, initpages >> 10);
 

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)