Page MenuHomeFreeBSD

D39029.diff
No OneTemporary

D39029.diff

diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -195,7 +195,7 @@
"the page fault handler");
static inline void
-fault_page_release(vm_page_t *mp)
+vm_fault_page_release(vm_page_t *mp)
{
vm_page_t m;
@@ -213,7 +213,7 @@
}
static inline void
-fault_page_free(vm_page_t *mp)
+vm_fault_page_free(vm_page_t *mp)
{
vm_page_t m;
@@ -235,7 +235,7 @@
* be checked quickly.
*/
static inline bool
-fault_object_needs_getpages(vm_object_t object)
+vm_fault_object_needs_getpages(vm_object_t object)
{
VM_OBJECT_ASSERT_LOCKED(object);
@@ -244,7 +244,7 @@
}
static inline void
-unlock_map(struct faultstate *fs)
+vm_fault_unlock_map(struct faultstate *fs)
{
if (fs->lookup_still_valid) {
@@ -254,7 +254,7 @@
}
static void
-unlock_vp(struct faultstate *fs)
+vm_fault_unlock_vp(struct faultstate *fs)
{
if (fs->vp != NULL) {
@@ -264,29 +264,29 @@
}
static void
-fault_deallocate(struct faultstate *fs)
+vm_fault_deallocate(struct faultstate *fs)
{
- fault_page_release(&fs->m_cow);
- fault_page_release(&fs->m);
+ vm_fault_page_release(&fs->m_cow);
+ vm_fault_page_release(&fs->m);
vm_object_pip_wakeup(fs->object);
if (fs->object != fs->first_object) {
VM_OBJECT_WLOCK(fs->first_object);
- fault_page_free(&fs->first_m);
+ vm_fault_page_free(&fs->first_m);
VM_OBJECT_WUNLOCK(fs->first_object);
vm_object_pip_wakeup(fs->first_object);
}
vm_object_deallocate(fs->first_object);
- unlock_map(fs);
- unlock_vp(fs);
+ vm_fault_unlock_map(fs);
+ vm_fault_unlock_vp(fs);
}
static void
-unlock_and_deallocate(struct faultstate *fs)
+vm_fault_unlock_and_deallocate(struct faultstate *fs)
{
VM_OBJECT_UNLOCK(fs->object);
- fault_deallocate(fs);
+ vm_fault_deallocate(fs);
}
static void
@@ -471,8 +471,8 @@
pager_first = OFF_TO_IDX(fs->entry->offset);
pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1;
- unlock_map(fs);
- unlock_vp(fs);
+ vm_fault_unlock_map(fs);
+ vm_fault_unlock_vp(fs);
res = FAULT_SUCCESS;
@@ -781,7 +781,7 @@
* Perform an unlock in case the desired vnode changed while
* the map was unlocked during a retry.
*/
- unlock_vp(fs);
+ vm_fault_unlock_vp(fs);
locked = VOP_ISLOCKED(vp);
if (locked != LK_EXCLUSIVE)
@@ -801,9 +801,9 @@
vhold(vp);
if (objlocked)
- unlock_and_deallocate(fs);
+ vm_fault_unlock_and_deallocate(fs);
else
- fault_deallocate(fs);
+ vm_fault_deallocate(fs);
error = vget(vp, locked | LK_RETRY | LK_CANRECURSE);
vdrop(vp);
fs->vp = vp;
@@ -875,7 +875,7 @@
VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object,
&fs->first_pindex, &fs->prot, &fs->wired);
if (result != KERN_SUCCESS) {
- unlock_vp(fs);
+ vm_fault_unlock_vp(fs);
return (result);
}
@@ -892,7 +892,7 @@
vm_map_lock(fs->map);
if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) &&
(fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) {
- unlock_vp(fs);
+ vm_fault_unlock_vp(fs);
fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
vm_map_unlock_and_wait(fs->map, 0);
} else
@@ -1095,10 +1095,10 @@
} else if (fs->m != NULL) {
if (!vm_fault_object_ensure_wlocked(fs)) {
fs->can_read_lock = false;
- unlock_and_deallocate(fs);
+ vm_fault_unlock_and_deallocate(fs);
return (FAULT_NEXT_RESTART);
}
- fault_page_free(&fs->m);
+ vm_fault_page_free(&fs->m);
}
/*
@@ -1163,7 +1163,7 @@
{
struct timeval now;
- unlock_and_deallocate(fs);
+ vm_fault_unlock_and_deallocate(fs);
if (vm_pfault_oom_attempts < 0)
return (true);
if (!fs->oom_started) {
@@ -1203,7 +1203,7 @@
}
if (fs->pindex >= fs->object->size) {
- unlock_and_deallocate(fs);
+ vm_fault_unlock_and_deallocate(fs);
return (FAULT_OUT_OF_BOUNDS);
}
@@ -1215,7 +1215,7 @@
case FAULT_SUCCESS:
case FAULT_FAILURE:
case FAULT_RESTART:
- unlock_and_deallocate(fs);
+ vm_fault_unlock_and_deallocate(fs);
return (res);
case FAULT_CONTINUE:
/*
@@ -1248,7 +1248,7 @@
vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex);
#endif
if (!vm_pager_can_alloc_page(fs->object, fs->pindex)) {
- unlock_and_deallocate(fs);
+ vm_fault_unlock_and_deallocate(fs);
return (FAULT_FAILURE);
}
fs->m = vm_page_alloc(fs->object, fs->pindex,
@@ -1309,7 +1309,7 @@
* a shadow, then an earlier iteration of this loop
* may have already unlocked the map.)
*/
- unlock_map(fs);
+ vm_fault_unlock_map(fs);
status = vm_fault_lock_vnode(fs, false);
MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART);
@@ -1364,8 +1364,8 @@
*/
if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
VM_OBJECT_WLOCK(fs->object);
- fault_page_free(&fs->m);
- unlock_and_deallocate(fs);
+ vm_fault_page_free(&fs->m);
+ vm_fault_unlock_and_deallocate(fs);
return (FAULT_OUT_OF_BOUNDS);
}
KASSERT(rv == VM_PAGER_FAIL,
@@ -1396,11 +1396,11 @@
*/
vm_page_aflag_set(fs->m, PGA_REFERENCED);
if (fs->object != fs->first_object) {
- fault_page_release(&fs->first_m);
+ vm_fault_page_release(&fs->first_m);
vm_object_pip_wakeup(fs->first_object);
}
vm_object_pip_wakeup(fs->object);
- unlock_map(fs);
+ vm_fault_unlock_map(fs);
if (fs->m != vm_page_lookup(fs->object, fs->pindex) ||
!vm_page_busy_sleep(fs->m, "vmpfw", 0))
VM_OBJECT_UNLOCK(fs->object);
@@ -1434,7 +1434,7 @@
*/
if ((fs->object->flags & OBJ_DEAD) != 0) {
dead = fs->object->type == OBJT_DEAD;
- unlock_and_deallocate(fs);
+ vm_fault_unlock_and_deallocate(fs);
if (dead)
return (FAULT_PROTECTION_FAILURE);
pause("vmf_de", 1);
@@ -1467,11 +1467,11 @@
* or this is the beginning of the search, allocate a new
* page.
*/
- if (fs->m == NULL && (fault_object_needs_getpages(fs->object) ||
+ if (fs->m == NULL && (vm_fault_object_needs_getpages(fs->object) ||
fs->object == fs->first_object)) {
if (!vm_fault_object_ensure_wlocked(fs)) {
fs->can_read_lock = false;
- unlock_and_deallocate(fs);
+ vm_fault_unlock_and_deallocate(fs);
return (FAULT_RESTART);
}
res = vm_fault_allocate(fs);
@@ -1484,7 +1484,7 @@
* If not, skip to the next object without dropping the lock to
* preserve atomicity of shadow faults.
*/
- if (fault_object_needs_getpages(fs->object)) {
+ if (vm_fault_object_needs_getpages(fs->object)) {
/*
* At this point, we have either allocated a new page
* or found an existing page that is only partially
@@ -1647,8 +1647,8 @@
MPASS(res_next == FAULT_NEXT_NOOBJ);
if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) {
if (fs.first_object == fs.object)
- fault_page_free(&fs.first_m);
- unlock_and_deallocate(&fs);
+ vm_fault_page_free(&fs.first_m);
+ vm_fault_unlock_and_deallocate(&fs);
return (KERN_OUT_OF_BOUNDS);
}
VM_OBJECT_UNLOCK(fs.object);
@@ -1698,7 +1698,7 @@
if (!fs.lookup_still_valid) {
rv = vm_fault_relookup(&fs);
if (rv != KERN_SUCCESS) {
- fault_deallocate(&fs);
+ vm_fault_deallocate(&fs);
if (rv == KERN_RESTART)
goto RetryFault;
return (rv);
@@ -1757,7 +1757,7 @@
/*
* Unlock everything, and return
*/
- fault_deallocate(&fs);
+ vm_fault_deallocate(&fs);
if (hardfault) {
VM_CNT_INC(v_io_faults);
curthread->td_ru.ru_majflt++;
@@ -1909,7 +1909,7 @@
if (!obj_locked)
VM_OBJECT_RLOCK(lobject);
while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
- !fault_object_needs_getpages(lobject) &&
+ !vm_fault_object_needs_getpages(lobject) &&
(backing_object = lobject->backing_object) != NULL) {
KASSERT((lobject->backing_object_offset & PAGE_MASK) ==
0, ("vm_fault_prefault: unaligned object offset"));

File Metadata

Mime Type
text/plain
Expires
Sat, Feb 15, 6:46 PM (12 h, 22 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16663252
Default Alt Text
D39029.diff (7 KB)

Event Timeline