patch-2.3.99-pre7 linux/fs/buffer.c

Next file: linux/fs/coda/pioctl.c
Previous file: linux/fs/binfmt_elf.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.99-pre6/linux/fs/buffer.c linux/fs/buffer.c
@@ -789,7 +789,7 @@
 	/*
 	 * Run the hooks that have to be done when a page I/O has completed.
 	 */
-	if (test_and_clear_bit(PG_decr_after, &page->flags))
+	if (PageTestandClearDecrAfter(page))
 		atomic_dec(&nr_async_pages);
 
 	UnlockPage(page);
@@ -1578,7 +1578,6 @@
 		nr++;
 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
 
-	++current->maj_flt;
 	if (nr) {
 		if (Page_Uptodate(page))
 			BUG();
@@ -1958,7 +1957,7 @@
 
 	if (!PageLocked(page))
 		panic("brw_page: page not locked for I/O");
-//	clear_bit(PG_error, &page->flags);
+//	ClearPageError(page);
 	/*
 	 * We pretty much rely on the page lock for this, because
 	 * create_page_buffers() might sleep.
@@ -2002,8 +2001,6 @@
 		}
 		bh = bh->b_this_page;
 	} while (bh != head);
-	if (rw == READ)
-		++current->maj_flt;
 	if ((rw == READ) && nr) {
 		if (Page_Uptodate(page))
 			BUG();
@@ -2115,6 +2112,29 @@
 }
 
 /*
+ * Sync all the buffers on one page..
+ *
+ * If we have old buffers that are locked, we'll
+ * wait on them, but we won't wait on the new ones
+ * we're writing out now.
+ *
+ * This all is required so that we can free up memory
+ * later.
+ */
+static void sync_page_buffers(struct buffer_head *bh)
+{
+	struct buffer_head * tmp;
+
+	tmp = bh;
+	do {
+		struct buffer_head *p = tmp;
+		tmp = tmp->b_this_page;
+		if (buffer_dirty(p) && !buffer_locked(p))
+			ll_rw_block(WRITE, 1, &p);
+	} while (tmp != bh);
+}
+
+/*
  * Can the buffer be thrown out?
  */
 #define BUFFER_BUSY_BITS	((1<<BH_Dirty) | (1<<BH_Lock) | (1<<BH_Protected))
@@ -2133,16 +2153,15 @@
  */
 int try_to_free_buffers(struct page * page)
 {
-	struct buffer_head * tmp, * p, * bh = page->buffers;
+	struct buffer_head * tmp, * bh = page->buffers;
 	int index = BUFSIZE_INDEX(bh->b_size);
-	int ret;
 
 	spin_lock(&lru_list_lock);
 	write_lock(&hash_table_lock);
 	spin_lock(&free_list[index].lock);
 	tmp = bh;
 	do {
-		p = tmp;
+		struct buffer_head *p = tmp;
 
 		tmp = tmp->b_this_page;
 		if (buffer_busy(p))
@@ -2172,19 +2191,18 @@
 	/* And free the page */
 	page->buffers = NULL;
 	__free_page(page);
-	ret = 1;
-out:
 	spin_unlock(&free_list[index].lock);
 	write_unlock(&hash_table_lock);
 	spin_unlock(&lru_list_lock);
-	return ret;
+	return 1;
 
 busy_buffer_page:
 	/* Uhhuh, start writeback so that we don't end up with all dirty pages */
-	if (buffer_dirty(p))
-		wakeup_bdflush(0);
-	ret = 0;
-	goto out;
+	spin_unlock(&free_list[index].lock);
+	write_unlock(&hash_table_lock);
+	spin_unlock(&lru_list_lock);	
+	sync_page_buffers(bh);
+	return 0;
 }
 
 /* ================== Debugging =================== */
@@ -2277,7 +2295,7 @@
 		    __get_free_pages(GFP_ATOMIC, order);
 	} while (hash_table == NULL && --order > 0);
 	printk("Buffer-cache hash table entries: %d (order: %d, %ld bytes)\n",
-	       nr_hash, order, (1UL<<order) * PAGE_SIZE);
+	       nr_hash, order, (PAGE_SIZE << order));
 
 	if (!hash_table)
 		panic("Failed to allocate buffer hash table\n");

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)