patch-2.4.8 linux/mm/shmem.c

Next file: linux/mm/swap.c
Previous file: linux/mm/page_alloc.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.4.7/linux/mm/shmem.c linux/mm/shmem.c
@@ -402,7 +402,7 @@
 	int error;
 
 	down (&inode->i_sem);
-	if (inode->i_size < (loff_t) idx * PAGE_CACHE_SIZE)
+	if (inode->i_size <= (loff_t) idx * PAGE_CACHE_SIZE)
 		goto sigbus;
 	*ptr = shmem_getpage_locked(inode, idx);
 	if (IS_ERR (*ptr))
@@ -537,6 +537,30 @@
 	return inode;
 }
 
+static int shmem_set_size(struct shmem_sb_info *info,
+			  unsigned long max_blocks, unsigned long max_inodes)
+{
+	int error;
+	unsigned long blocks, inodes;
+
+	spin_lock(&info->stat_lock);
+	blocks = info->max_blocks - info->free_blocks;
+	inodes = info->max_inodes - info->free_inodes;
+	error = -EINVAL;
+	if (max_blocks < blocks)
+		goto out;
+	if (max_inodes < inodes)
+		goto out;
+	error = 0;
+	info->max_blocks  = max_blocks;
+	info->free_blocks = max_blocks - blocks;
+	info->max_inodes  = max_inodes;
+	info->free_inodes = max_inodes - inodes;
+out:
+	spin_unlock(&info->stat_lock);
+	return error;
+}
+
 #ifdef CONFIG_TMPFS
 static ssize_t
 shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
@@ -754,18 +778,8 @@
 	buf->f_type = TMPFS_MAGIC;
 	buf->f_bsize = PAGE_CACHE_SIZE;
 	spin_lock (&sb->u.shmem_sb.stat_lock);
-	if (sb->u.shmem_sb.max_blocks == ULONG_MAX) {
-		/*
-		 * This is only a guestimate and not honoured.
-		 * We need it to make some programs happy which like to
-		 * test the free space of a file system.
-		 */
-		buf->f_bavail = buf->f_bfree = nr_free_pages() + nr_swap_pages + atomic_read(&buffermem_pages);
-		buf->f_blocks = buf->f_bfree + ULONG_MAX - sb->u.shmem_sb.free_blocks;
-	} else {
-		buf->f_blocks = sb->u.shmem_sb.max_blocks;
-		buf->f_bavail = buf->f_bfree = sb->u.shmem_sb.free_blocks;
-	}
+	buf->f_blocks = sb->u.shmem_sb.max_blocks;
+	buf->f_bavail = buf->f_bfree = sb->u.shmem_sb.free_blocks;
 	buf->f_files = sb->u.shmem_sb.max_inodes;
 	buf->f_ffree = sb->u.shmem_sb.free_inodes;
 	spin_unlock (&sb->u.shmem_sb.stat_lock);
@@ -1015,32 +1029,13 @@
 
 static int shmem_remount_fs (struct super_block *sb, int *flags, char *data)
 {
-	int error;
-	unsigned long max_blocks, blocks;
-	unsigned long max_inodes, inodes;
 	struct shmem_sb_info *info = &sb->u.shmem_sb;
+	unsigned long max_blocks = info->max_blocks;
+	unsigned long max_inodes = info->max_inodes;
 
-	max_blocks = info->max_blocks;
-	max_inodes = info->max_inodes;
 	if (shmem_parse_options (data, NULL, &max_blocks, &max_inodes))
 		return -EINVAL;
-
-	spin_lock(&info->stat_lock);
-	blocks = info->max_blocks - info->free_blocks;
-	inodes = info->max_inodes - info->free_inodes;
-	error = -EINVAL;
-	if (max_blocks < blocks)
-		goto out;
-	if (max_inodes < inodes)
-		goto out;
-	error = 0;
-	info->max_blocks  = max_blocks;
-	info->free_blocks = max_blocks - blocks;
-	info->max_inodes  = max_inodes;
-	info->free_inodes = max_inodes - inodes;
-out:
-	spin_unlock(&info->stat_lock);
-	return error;
+	return shmem_set_size(info, max_blocks, max_inodes);
 }
 
 int shmem_sync_file(struct file * file, struct dentry *dentry, int datasync)
@@ -1053,9 +1048,16 @@
 {
 	struct inode * inode;
 	struct dentry * root;
-	unsigned long blocks = ULONG_MAX;	/* unlimited */
-	unsigned long inodes = ULONG_MAX;	/* unlimited */
+	unsigned long blocks, inodes;
 	int mode   = S_IRWXUGO | S_ISVTX;
+	struct sysinfo si;
+
+	/*
+	 * Per default we only allow half of the physical ram per
+	 * tmpfs instance
+	 */
+	si_meminfo(&si);
+	blocks = inodes = si.totalram / 2;
 
 #ifdef CONFIG_TMPFS
 	if (shmem_parse_options (data, &mode, &blocks, &inodes)) {
@@ -1179,6 +1181,10 @@
 		unregister_filesystem(&tmpfs_fs_type);
 		return PTR_ERR(res);
 	}
+
+	/* The internal instance should not do size checking */
+	if ((error = shmem_set_size(&res->mnt_sb->u.shmem_sb, ULONG_MAX, ULONG_MAX)))
+		printk (KERN_ERR "could not set limits on internal tmpfs\n");
 
 	return 0;
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)