Page MenuHomeFreeBSD

D49942.diff
No OneTemporary

D49942.diff

diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -100,6 +100,7 @@
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
+#include <vm/vm_radix.h>
#include <vm/swap_pager.h>
struct shm_mapping {
@@ -195,6 +196,7 @@
static int
uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
{
+ struct pctrie_iter pages;
vm_page_t m;
vm_pindex_t idx;
size_t tlen;
@@ -214,8 +216,9 @@
* page: use zero_region. This is intended to avoid instantiating
* pages on read from a sparse region.
*/
+ vm_page_iter_init(&pages, obj);
VM_OBJECT_WLOCK(obj);
- m = vm_page_lookup(obj, idx);
+ m = vm_radix_iter_lookup(&pages, idx);
if (uio->uio_rw == UIO_READ && m == NULL &&
!vm_pager_has_page(obj, idx, NULL, NULL)) {
VM_OBJECT_WUNLOCK(obj);
@@ -229,7 +232,7 @@
* lock to page out tobj's pages because tobj is a OBJT_SWAP
* type object.
*/
- rv = vm_page_grab_valid(&m, obj, idx,
+ rv = vm_page_grab_valid_iter(&m, obj, &pages, idx,
VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
if (rv != VM_PAGER_OK) {
VM_OBJECT_WUNLOCK(obj);
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -641,6 +641,8 @@
int allocflags, vm_page_t *ma, int count);
int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
int allocflags);
+int vm_page_grab_valid_iter(vm_page_t *mp, vm_object_t object,
+ struct pctrie_iter *, vm_pindex_t pindex, int allocflags);
int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object,
vm_pindex_t pindex, int allocflags);
void vm_page_deactivate(vm_page_t);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -4946,17 +4946,17 @@
}
/*
- * Grab a page and make it valid, paging in if necessary. Pages missing from
- * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied
- * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought
- * in simultaneously. Additional pages will be left on a paging queue but
- * will neither be wired nor busy regardless of allocflags.
+ * Grab a page and make it valid, paging in if necessary. Use an iterator
+ * parameter. Pages missing from their pager are zero filled and validated. If
+ * a VM_ALLOC_COUNT is supplied and the page is not valid as many as
+ * VM_INITIAL_PAGEIN pages can be brought in simultaneously. Additional pages
+ * will be left on a paging queue but will neither be wired nor busy regardless
+ * of allocflags.
*/
int
-vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
- int allocflags)
+vm_page_grab_valid_iter(vm_page_t *mp, vm_object_t object,
+ struct pctrie_iter *pages, vm_pindex_t pindex, int allocflags)
{
- struct pctrie_iter pages;
vm_page_t m, mpred;
vm_page_t ma[VM_INITIAL_PAGEIN];
int after, i, pflags, rv;
@@ -4971,10 +4971,9 @@
pflags = allocflags & ~(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY |
VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY);
pflags |= VM_ALLOC_WAITFAIL;
- vm_page_iter_init(&pages, object);
retrylookup:
- if ((m = vm_radix_iter_lookup(&pages, pindex)) != NULL) {
+ if ((m = vm_radix_iter_lookup(pages, pindex)) != NULL) {
/*
* If the page is fully valid it can only become invalid
* with the object lock held. If it is not valid it can
@@ -4988,7 +4987,7 @@
vm_page_all_valid(m) ? allocflags : 0)) {
(void)vm_page_grab_sleep(object, m, pindex, "pgrbwt",
allocflags, true);
- pctrie_iter_reset(&pages);
+ pctrie_iter_reset(pages);
goto retrylookup;
}
if (vm_page_all_valid(m))
@@ -5002,13 +5001,14 @@
*mp = NULL;
return (VM_PAGER_FAIL);
} else {
- mpred = vm_radix_iter_lookup_lt(&pages, pindex);
- m = vm_page_alloc_after(object, &pages, pindex, pflags, mpred);
+ mpred = vm_radix_iter_lookup_lt(pages, pindex);
+ m = vm_page_alloc_after(object, pages, pindex, pflags, mpred);
if (m == NULL) {
if (!vm_pager_can_alloc_page(object, pindex)) {
*mp = NULL;
return (VM_PAGER_AGAIN);
}
+ pctrie_iter_reset(pages);
goto retrylookup;
}
}
@@ -5019,10 +5019,11 @@
after = MIN(after, allocflags >> VM_ALLOC_COUNT_SHIFT);
after = MAX(after, 1);
ma[0] = mpred = m;
+ pctrie_iter_reset(pages);
for (i = 1; i < after; i++) {
- m = vm_radix_iter_lookup(&pages, pindex + i);
+ m = vm_radix_iter_lookup(pages, pindex + i);
if (m == NULL) {
- m = vm_page_alloc_after(object, &pages,
+ m = vm_page_alloc_after(object, pages,
pindex + i, VM_ALLOC_NORMAL, mpred);
if (m == NULL)
break;
@@ -5054,6 +5055,7 @@
} else {
vm_page_zero_invalid(m, TRUE);
}
+ pctrie_iter_reset(pages);
out:
if ((allocflags & VM_ALLOC_WIRED) != 0)
vm_page_wire(m);
@@ -5065,6 +5067,25 @@
return (VM_PAGER_OK);
}
+/*
+ * Grab a page and make it valid, paging in if necessary. Pages missing from
+ * their pager are zero filled and validated. If a VM_ALLOC_COUNT is supplied
+ * and the page is not valid as many as VM_INITIAL_PAGEIN pages can be brought
+ * in simultaneously. Additional pages will be left on a paging queue but
+ * will neither be wired nor busy regardless of allocflags.
+ */
+int
+vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex,
+ int allocflags)
+{
+ struct pctrie_iter pages;
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ vm_page_iter_init(&pages, object);
+ return (vm_page_grab_valid_iter(mp, object, &pages, pindex,
+ allocflags));
+}
+
/*
* Grab a page. Keep on waiting, as long as the page exists in the object. If
* the page doesn't exist, and the pager has it, allocate it and zero part of

File Metadata

Mime Type
text/plain
Expires
Tue, Apr 29, 6:22 PM (9 h, 31 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17844187
Default Alt Text
D49942.diff (5 KB)

Event Timeline