Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F110717981
D49096.id.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
5 KB
Referenced Files
None
Subscribers
None
D49096.id.diff
View Options
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -493,50 +493,11 @@
tmpfs_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
int end, boolean_t ignerr)
{
- vm_page_t m;
- int rv, error;
-
- VM_OBJECT_ASSERT_WLOCKED(object);
- KASSERT(base >= 0, ("%s: base %d", __func__, base));
- KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
- end));
- error = 0;
-
-retry:
- m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
- if (m != NULL) {
- MPASS(vm_page_all_valid(m));
- } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
- m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL |
- VM_ALLOC_WAITFAIL);
- if (m == NULL)
- goto retry;
- vm_object_pip_add(object, 1);
- VM_OBJECT_WUNLOCK(object);
- rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
- VM_OBJECT_WLOCK(object);
- vm_object_pip_wakeup(object);
- if (rv == VM_PAGER_OK) {
- /*
- * Since the page was not resident, and therefore not
- * recently accessed, immediately enqueue it for
- * asynchronous laundering. The current operation is
- * not regarded as an access.
- */
- vm_page_launder(m);
- } else {
- vm_page_free(m);
- m = NULL;
- if (!ignerr)
- error = EIO;
- }
- }
- if (m != NULL) {
- pmap_zero_page_area(m, base, end - base);
- vm_page_set_dirty(m);
- vm_page_xunbusy(m);
- }
+ int error;
+ error = vm_page_grab_zero_partial(object, idx, base, end);
+ if (ignerr)
+ error = 0;
return (error);
}
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -697,51 +697,12 @@
shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
int end)
{
- vm_page_t m;
- int rv;
+ int error;
- VM_OBJECT_ASSERT_WLOCKED(object);
- KASSERT(base >= 0, ("%s: base %d", __func__, base));
- KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
- end));
-
-retry:
- m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
- if (m != NULL) {
- MPASS(vm_page_all_valid(m));
- } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
- m = vm_page_alloc(object, idx,
- VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
- if (m == NULL)
- goto retry;
- vm_object_pip_add(object, 1);
+ error = vm_page_grab_zero_partial(object, idx, base, end);
+ if (error == EIO)
VM_OBJECT_WUNLOCK(object);
- rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
- VM_OBJECT_WLOCK(object);
- vm_object_pip_wakeup(object);
- if (rv == VM_PAGER_OK) {
- /*
- * Since the page was not resident, and therefore not
- * recently accessed, immediately enqueue it for
- * asynchronous laundering. The current operation is
- * not regarded as an access.
- */
- vm_page_launder(m);
- } else {
- vm_page_free(m);
- VM_OBJECT_WUNLOCK(object);
- return (EIO);
- }
- }
- if (m != NULL) {
- pmap_zero_page_area(m, base, end - base);
- KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
- __func__, m));
- vm_page_set_dirty(m);
- vm_page_xunbusy(m);
- }
-
- return (0);
+ return (error);
}
static int
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -627,6 +627,8 @@
vm_memattr_t memattr);
void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
+int vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base,
+ int end);
vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -5086,6 +5086,57 @@
return (VM_PAGER_OK);
}
+/*
+ * Fill a partial page with zeroes. The object write lock is held on entry and
+ * exit, but may be temporarily released.
+ */
+int
+vm_page_grab_zero_partial(vm_object_t object, vm_pindex_t pindex, int base,
+ int end)
+{
+ vm_page_t m;
+ int rv;
+
+ VM_OBJECT_ASSERT_WLOCKED(object);
+ KASSERT(base >= 0, ("%s: base %d", __func__, base));
+ KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
+ end));
+
+retry:
+ m = vm_page_grab(object, pindex, VM_ALLOC_NOCREAT);
+ if (m != NULL) {
+ MPASS(vm_page_all_valid(m));
+ } else if (vm_pager_has_page(object, pindex, NULL, NULL)) {
+ m = vm_page_alloc(object, pindex,
+ VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
+ if (m == NULL)
+ goto retry;
+ vm_object_pip_add(object, 1);
+ VM_OBJECT_WUNLOCK(object);
+ rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
+ VM_OBJECT_WLOCK(object);
+ vm_object_pip_wakeup(object);
+ if (rv != VM_PAGER_OK) {
+ vm_page_free(m);
+ return (EIO);
+ }
+
+ /*
+ * Since the page was not resident, and therefore not recently
+ * accessed, immediately enqueue it for asynchronous laundering.
+ * The current operation is not regarded as an access.
+ */
+ vm_page_launder(m);
+ } else
+ return (0);
+
+ pmap_zero_page_area(m, base, end - base);
+ KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid", __func__, m));
+ vm_page_set_dirty(m);
+ vm_page_xunbusy(m);
+ return (0);
+}
+
/*
* Locklessly grab a valid page. If the page is not valid or not yet
* allocated this will fall back to the object lock method.
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sun, Feb 23, 7:06 AM (11 h, 2 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16784406
Default Alt Text
D49096.id.diff (5 KB)
Attached To
Mode
D49096: vm_page: define partial page invalidate
Attached
Detach File
Event Timeline
Log In to Comment