patch-2.4.2 linux/fs/smbfs/cache.c

Next file: linux/fs/smbfs/dir.c
Previous file: linux/fs/smbfs/Makefile
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.1/linux/fs/smbfs/cache.c linux/fs/smbfs/cache.c
@@ -4,8 +4,7 @@
  * Copyright (C) 1997 by Bill Hawes
  *
  * Routines to support directory cacheing using the page cache.
- * Right now this only works for smbfs, but will be generalized
- * for use with other filesystems.
+ * This cache code is almost directly taken from ncpfs.
  *
  * Please add a note about your changes to smbfs in the ChangeLog file.
  */
@@ -22,271 +21,222 @@
 
 #include "smb_debug.h"
 
-
-static inline struct address_space * 
-get_cache_inode(struct cache_head *cachep)
-{
-	return page_cache_entry((unsigned long) cachep)->mapping;
-}
-
 /*
- * Try to reassemble the old dircache. If we fail - set ->valid to 0.
- * In any case, get at least the page at offset 0 (with ->valid==0 if
- * the old one didn't make it, indeed).
+ * Force the next attempt to use the cache to be a timeout.
+ * If we can't find the page that's fine, it will cause a refresh.
  */
-struct cache_head *
-smb_get_dircache(struct dentry * dentry)
+void
+smb_invalid_dir_cache(struct inode * dir)
 {
-	struct address_space * mapping = &dentry->d_inode->i_data;
-	struct cache_head * cachep = NULL;
-	struct page *page;
-
-	page = find_lock_page(mapping, 0);
-	if (!page) {
-		/* Sorry, not even page 0 around */
-		page = grab_cache_page(mapping, 0);
-		if (!page)
-			goto out;
-		cachep = kmap(page);
-		memset((char*)cachep, 0, PAGE_SIZE);
+	struct smb_sb_info *server = server_from_inode(dir);
+	union  smb_dir_cache *cache = NULL;
+	struct page *page = NULL;
+
+	page = grab_cache_page(&dir->i_data, 0);
+	if (!page)
 		goto out;
-	}
-	cachep = kmap(page);
-	if (cachep->valid) {
-		/*
-		 * OK, at least the page 0 survived and seems to be promising.
-		 * Let's try to reassemble the rest.
-		 */
-		struct cache_index * index = cachep->index;
-		unsigned long offset;
-		int i;
-
-		for (offset = 0, i = 0; i < cachep->pages; i++, index++) {
-			offset += PAGE_SIZE;
-			page = find_lock_page(mapping,offset>>PAGE_CACHE_SHIFT);
-			if (!page) {
-				/* Alas, poor Yorick */
-				cachep->valid = 0;
-				goto out;
-			}
-			index->block = kmap(page);
-		}
-	}
-out:
-	return cachep;
-}
 
-/*
- * Unlock and release the data blocks.
- */
-static void
-smb_free_cache_blocks(struct cache_head * cachep)
-{
-	struct cache_index * index = cachep->index;
-	struct page * page;
-	int i;
-
-	VERBOSE("freeing %d blocks\n", cachep->pages);
-	for (i = 0; i < cachep->pages; i++, index++) {
-		if (!index->block)
-			continue;
-		page = page_cache_entry((unsigned long) index->block);
-		index->block = NULL;
-		kunmap(page);
-		UnlockPage(page);
-		page_cache_release(page);
-	}
-}
+	if (!Page_Uptodate(page))
+		goto out_unlock;
+
+	cache = kmap(page);
+	cache->head.time = jiffies - SMB_MAX_AGE(server);
 
-/*
- * Unlocks and releases the dircache.
- */
-void
-smb_free_dircache(struct cache_head * cachep)
-{
-	struct page *page;
-	VERBOSE("freeing cache\n");
-	smb_free_cache_blocks(cachep);
-	page = page_cache_entry((unsigned long) cachep);
 	kunmap(page);
+	SetPageUptodate(page);
+out_unlock:
 	UnlockPage(page);
 	page_cache_release(page);
+out:
 }
 
 /*
- * Initializes the dircache. We release any existing data blocks,
- * and then clear the cache_head structure.
+ * Mark all dentries for 'parent' as invalid, forcing them to be re-read
  */
 void
-smb_init_dircache(struct cache_head * cachep)
+smb_invalidate_dircache_entries(struct dentry *parent)
 {
-	VERBOSE("initializing cache, %d blocks\n", cachep->pages);
-	smb_free_cache_blocks(cachep);
-	memset(cachep, 0, sizeof(struct cache_head));
+	struct smb_sb_info *server = server_from_dentry(parent);
+	struct list_head *next;
+	struct dentry *dentry;
+
+	spin_lock(&dcache_lock);
+	next = parent->d_subdirs.next;
+	while (next != &parent->d_subdirs) {
+		dentry = list_entry(next, struct dentry, d_child);
+		dentry->d_fsdata = NULL;
+		smb_age_dentry(server, dentry);
+		next = next->next;
+	}
+	spin_unlock(&dcache_lock);
 }
 
-/*
- * Add a new entry to the cache.  This assumes that the
- * entries are coming in order and are added to the end.
- */
-void
-smb_add_to_cache(struct cache_head * cachep, struct cache_dirent *entry,
-			off_t fpos)
-{
-	struct address_space * mapping = get_cache_inode(cachep);
-	struct cache_index * index;
-	struct cache_block * block;
-	struct page *page;
-	unsigned long page_off;
-	unsigned int nent, offset, len = entry->len;
-	unsigned int needed = len + sizeof(struct cache_entry);
-
-	VERBOSE("cache %p, status %d, adding %.*s at %ld\n",
-		mapping, cachep->status, entry->len, entry->name, fpos);
 
+static int
+smb_d_validate(struct dentry *dentry)
+{
+	unsigned long dent_addr = (unsigned long) dentry;
+	unsigned long min_addr = PAGE_OFFSET;
+	unsigned long align_mask = 0x0F;
+	unsigned int len;
+	int valid = 0;
+
+	if (dent_addr < min_addr)
+		goto bad_addr;
+	if (dent_addr > (unsigned long)high_memory - sizeof(struct dentry))
+		goto bad_addr;
+	if ((dent_addr & ~align_mask) != dent_addr)
+		goto bad_align;
+	if ((!kern_addr_valid(dent_addr)) || (!kern_addr_valid(dent_addr -1 +
+						       sizeof(struct dentry))))
+		goto bad_addr;
 	/*
-	 * Don't do anything if we've had an error ...
+	 * Looks safe enough to dereference ...
 	 */
-	if (cachep->status)
+	len = dentry->d_name.len;
+	if (len > SMB_MAXPATHLEN)
 		goto out;
-
-	index = &cachep->index[cachep->idx];
-	if (!index->block)
-		goto get_block;
-
-	/* space available? */
-	if (needed < index->space) {
-	add_entry:
-		nent = index->num_entries;
-		index->num_entries++;
-		index->space -= needed;
-		offset = index->space + 
-			 index->num_entries * sizeof(struct cache_entry);
-		block = index->block;
-		memcpy(&block->cb_data.names[offset], entry->name, len);
-		block->cb_data.table[nent].namelen = len;
-		block->cb_data.table[nent].offset = offset;
-		block->cb_data.table[nent].ino = entry->ino;
-		cachep->entries++;
-
-		VERBOSE("added entry %.*s, len=%d, pos=%ld, entries=%d\n",
-			entry->len, entry->name, len, fpos, cachep->entries);
-		return;
-	}
-	/*
-	 * This block is full ... advance the index.
-	 */
-	cachep->idx++;
-	if (cachep->idx > NINDEX) /* not likely */
-		goto out_full;
-	index++;
 	/*
-	 * Get the next cache block. We don't care for its contents.
+	 * Note: d_validate doesn't dereference the parent pointer ...
+	 * just combines it with the name hash to find the hash chain.
 	 */
-get_block:
-	cachep->pages++;
-	page_off = PAGE_SIZE + (cachep->idx << PAGE_SHIFT);
-	page = grab_cache_page(mapping, page_off>>PAGE_CACHE_SHIFT);
-	if (page) {
-		block = kmap(page);
-		index->block = block;
-		index->space = PAGE_SIZE;
-		goto add_entry;
-	}
-	/*
-	 * On failure, just set the return status ...
-	 */
-out_full:
-	cachep->status = -ENOMEM;
+	valid = d_validate(dentry, dentry->d_parent, dentry->d_name.hash, len);
 out:
-	return;
-}
+	return valid;
 
-int
-smb_find_in_cache(struct cache_head * cachep, off_t pos, 
-		struct cache_dirent *entry)
-{
-	struct cache_index * index = cachep->index;
-	struct cache_block * block;
-	unsigned int i, nent, offset = 0;
-	off_t next_pos = 2;
-
-	VERBOSE("smb_find_in_cache: cache %p, looking for pos=%ld\n",
-		cachep, pos);
-	for (i = 0; i < cachep->pages; i++, index++)
-	{
-		if (pos < next_pos)
-			break;
-		nent = pos - next_pos;
-		next_pos += index->num_entries;
-		if (pos >= next_pos)
-			continue;
-		/*
-		 * The entry is in this block. Note: we return
-		 * then name as a reference with _no_ null byte.
-		 */
-		block = index->block;
-		entry->ino = block->cb_data.table[nent].ino;
-		entry->len = block->cb_data.table[nent].namelen;
-		offset = block->cb_data.table[nent].offset;
-		entry->name = &block->cb_data.names[offset];
-
-		VERBOSE("found %.*s, len=%d, pos=%ld\n",
-			entry->len, entry->name, entry->len, pos);
-		break;
-	}
-	return offset;
+bad_addr:
+	printk(KERN_ERR "smb_d_validate: invalid address %lx\n", dent_addr);
+	goto out;
+bad_align:
+	printk(KERN_ERR "smb_d_validate: unaligned address %lx\n", dent_addr);
+	goto out;
 }
 
-int
-smb_refill_dircache(struct cache_head * cachep, struct dentry *dentry)
+/*
+ * dget, but require that fpos and parent matches what the dentry contains.
+ * dentry is not known to be a valid pointer at entry.
+ */
+struct dentry *
+smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
 {
-	struct inode * inode = dentry->d_inode;
-	int result;
+	struct dentry *dent = dentry;
+	struct list_head *next;
 
-	VERBOSE("smb_refill_dircache: cache %s/%s, blocks=%d\n",
-		DENTRY_PATH(dentry), cachep->pages);
-	/*
-	 * Fill the cache, starting at position 2.
-	 */
-retry:
-	inode->u.smbfs_i.cache_valid |= SMB_F_CACHEVALID;
-	result = smb_proc_readdir(dentry, 2, cachep);
-	if (result < 0)
-	{
-		PARANOIA("readdir failed, result=%d\n", result);
-		goto out;
+	if (smb_d_validate(dent)) {
+		if (dent->d_parent == parent &&
+		    (unsigned long)dent->d_fsdata == fpos) {
+			if (!dent->d_inode) {
+				dput(dent);
+				dent = NULL;
+			}
+			return dent;
+		}
+		dput(dent);
 	}
 
-	/*
-	 * Check whether the cache was invalidated while
-	 * we were doing the scan ...
-	 */
-	if (!(inode->u.smbfs_i.cache_valid & SMB_F_CACHEVALID))
-	{
-		PARANOIA("cache invalidated, retrying\n");
-		goto retry;
+	/* If a pointer is invalid, we search the dentry. */
+	spin_lock(&dcache_lock);
+	next = parent->d_subdirs.next;
+	while (next != &parent->d_subdirs) {
+		dent = list_entry(next, struct dentry, d_child);
+		if ((unsigned long)dent->d_fsdata == fpos) {
+			if (dent->d_inode)
+				dget_locked(dent);
+			else
+				dent = NULL;
+			goto out_unlock;
+		}
+		next = next->next;
 	}
+	dent = NULL;
+out_unlock:
+	spin_unlock(&dcache_lock);
+	return dent;
+}
+
+
+/*
+ * Create dentry/inode for this file and add it to the dircache.
+ */
+int
+smb_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
+	       struct smb_cache_control *ctrl, struct qstr *qname,
+	       struct smb_fattr *entry)
+{
+	struct dentry *newdent, *dentry = filp->f_dentry;
+	struct inode *newino, *inode = dentry->d_inode;
+	struct smb_cache_control ctl = *ctrl;
+	int valid = 0;
+	ino_t ino = 0;
+
+	qname->hash = full_name_hash(qname->name, qname->len);
+
+	if (dentry->d_op && dentry->d_op->d_hash)
+		if (dentry->d_op->d_hash(dentry, qname) != 0)
+			goto end_advance;
+
+	newdent = d_lookup(dentry, qname);
+
+	if (!newdent) {
+		newdent = d_alloc(dentry, qname);
+		if (!newdent)
+			goto end_advance;
+	} else
+		memcpy((char *) newdent->d_name.name, qname->name,
+		       newdent->d_name.len);
+
+	if (!newdent->d_inode) {
+		smb_renew_times(newdent);
+		entry->f_ino = iunique(inode->i_sb, 2);
+		newino = smb_iget(inode->i_sb, entry);
+		if (newino) {
+			smb_new_dentry(newdent);
+			d_add(newdent, newino);
+		}
+	} else
+		smb_set_inode_attr(newdent->d_inode, entry);
 
-	result = cachep->status;
-	if (!result)
-	{
-		cachep->valid = 1;
+        if (newdent->d_inode) {
+		ino = newdent->d_inode->i_ino;
+		newdent->d_fsdata = (void *) ctl.fpos;
+		smb_new_dentry(newdent);
 	}
-	VERBOSE("cache %s/%s status=%d, entries=%d\n",
-		DENTRY_PATH(dentry), cachep->status, cachep->entries);
-out:
-	return result;
-}
 
-void
-smb_invalid_dir_cache(struct inode * dir)
-{
-	/*
-	 * Get rid of any unlocked pages, and clear the
-	 * 'valid' flag in case a scan is in progress.
-	 */
-	invalidate_inode_pages(dir);
-	dir->u.smbfs_i.cache_valid &= ~SMB_F_CACHEVALID;
-	dir->u.smbfs_i.oldmtime = 0;
+	if (ctl.idx >= SMB_DIRCACHE_SIZE) {
+		if (ctl.page) {
+			kunmap(ctl.page);
+			SetPageUptodate(ctl.page);
+			UnlockPage(ctl.page);
+			page_cache_release(ctl.page);
+		}
+		ctl.cache = NULL;
+		ctl.idx  -= SMB_DIRCACHE_SIZE;
+		ctl.ofs  += 1;
+		ctl.page  = grab_cache_page(&inode->i_data, ctl.ofs);
+		if (ctl.page)
+			ctl.cache = kmap(ctl.page);
+	}
+	if (ctl.cache) {
+		ctl.cache->dentry[ctl.idx] = newdent;
+		valid = 1;
+	}
+	dput(newdent);
+
+end_advance:
+	if (!valid)
+		ctl.valid = 0;
+	if (!ctl.filled && (ctl.fpos == filp->f_pos)) {
+		if (!ino)
+			ino = find_inode_number(dentry, qname);
+		if (!ino)
+			ino = iunique(inode->i_sb, 2);
+		ctl.filled = filldir(dirent, qname->name, qname->len,
+				     filp->f_pos, ino, DT_UNKNOWN);
+		if (!ctl.filled)
+			filp->f_pos += 1;
+	}
+	ctl.fpos += 1;
+	ctl.idx  += 1;
+	*ctrl = ctl;
+	return (ctl.valid || !ctl.filled);
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)