patch-2.1.20 linux/mm/memory.c
Next file: linux/mm/mmap.c
Previous file: linux/kernel/fork.c
Back to the patch index
Back to the overall index
- Lines: 102
- Date:
Wed Jan 1 16:20:45 1997
- Orig file:
v2.1.19/linux/mm/memory.c
- Orig date:
Sun Dec 22 16:37:42 1996
diff -u --recursive --new-file v2.1.19/linux/mm/memory.c linux/mm/memory.c
@@ -137,10 +137,8 @@
printk("%s trying to clear kernel page-directory: not good\n", tsk->comm);
return;
}
- flush_cache_mm(tsk->mm);
for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
free_one_pgd(page_dir + i);
- flush_tlb_mm(tsk->mm);
}
/*
@@ -171,10 +169,8 @@
if (!(new_pg = pgd_alloc()))
return -ENOMEM;
page_dir = pgd_offset(&init_mm, 0);
- flush_cache_mm(tsk->mm);
memcpy(new_pg + USER_PTRS_PER_PGD, page_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof (pgd_t));
- flush_tlb_mm(tsk->mm);
SET_PAGE_DIR(tsk, new_pg);
tsk->mm->pgd = new_pg;
return 0;
@@ -286,17 +282,12 @@
cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;
src_pgd = pgd_offset(src, address);
dst_pgd = pgd_offset(dst, address);
- flush_cache_range(src, vma->vm_start, vma->vm_end);
- flush_cache_range(dst, vma->vm_start, vma->vm_end);
while (address < end) {
error = copy_pmd_range(dst_pgd++, src_pgd++, address, end - address, cow);
if (error)
break;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
}
- /* Note that the src ptes get c-o-w treatment, so they change too. */
- flush_tlb_range(src, vma->vm_start, vma->vm_end);
- flush_tlb_range(dst, vma->vm_start, vma->vm_end);
return error;
}
@@ -380,20 +371,17 @@
/*
* remove user pages in a given range.
*/
-int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
+void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
{
pgd_t * dir;
unsigned long end = address + size;
dir = pgd_offset(mm, address);
- flush_cache_range(mm, end - size, end);
while (address < end) {
zap_pmd_range(dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
- flush_tlb_range(mm, end - size, end);
- return 0;
}
static inline void zeromap_pte_range(pte_t * pte, unsigned long address, unsigned long size, pte_t zero_pte)
@@ -580,7 +568,7 @@
}
flush_page_to_ram(page);
set_pte(pte, pte_mkwrite(pte_mkdirty(mk_pte(page, PAGE_COPY))));
-/* no need for invalidate */
+/* no need for flush_tlb */
return page;
}
@@ -731,13 +719,17 @@
return;
mpnt = inode->i_mmap;
do {
+ struct mm_struct *mm = mpnt->vm_mm;
unsigned long start = mpnt->vm_start;
- unsigned long len = mpnt->vm_end - start;
+ unsigned long end = mpnt->vm_end;
+ unsigned long len = end - start;
unsigned long diff;
/* mapping wholly truncated? */
if (mpnt->vm_offset >= offset) {
- zap_page_range(mpnt->vm_mm, start, len);
+ flush_cache_range(mm, start, end);
+ zap_page_range(mm, start, len);
+ flush_tlb_range(mm, start, end);
continue;
}
/* mapping wholly unaffected? */
@@ -751,7 +743,9 @@
partial_clear(mpnt, start);
start = (start + ~PAGE_MASK) & PAGE_MASK;
}
- zap_page_range(mpnt->vm_mm, start, len);
+ flush_cache_range(mm, start, end);
+ zap_page_range(mm, start, len);
+ flush_tlb_range(mm, start, end);
} while ((mpnt = mpnt->vm_next_share) != inode->i_mmap);
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov