Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F115466889
D45409.id143046.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
23 KB
Referenced Files
None
Subscribers
None
D45409.id143046.diff
View Options
Index: sys/vm/vm_kern.c
===================================================================
--- sys/vm/vm_kern.c
+++ sys/vm/vm_kern.c
@@ -908,6 +908,8 @@
vm_dom[domain].vmd_kernel_nofree_arena =
vm_dom[domain].vmd_kernel_arena;
#endif
+ vm_dom[domain].vmd_free_page = NULL;
+ vm_dom[domain].vmd_num_free = 0;
}
/*
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -774,48 +774,27 @@
seg = &vm_phys_segs[segind];
/*
- * If lazy vm_page initialization is not enabled, simply
- * initialize all of the pages in the segment. Otherwise, we
- * only initialize:
- * 1. Pages not covered by phys_avail[], since they might be
- * freed to the allocator at some future point, e.g., by
- * kmem_bootstrap_free().
- * 2. The first page of each run of free pages handed to the
- * vm_phys allocator, which in turn defers initialization
- * of pages until they are needed.
- * This avoids blocking the boot process for long periods, which
- * may be relevant for VMs (which ought to boot as quickly as
- * possible) and/or systems with large amounts of physical
- * memory.
+ * Initialize pages not covered by phys_avail[], since they
+ * might be freed to the allocator at some future point, e.g.,
+ * by kmem_bootstrap_free().
*/
-#ifdef VM_FREEPOOL_LAZYINIT
- if (lazyinit) {
- startp = seg->start;
- for (i = 0; phys_avail[i + 1] != 0; i += 2) {
- if (startp >= seg->end)
- break;
-
- if (phys_avail[i + 1] < startp)
- continue;
- if (phys_avail[i] <= startp) {
- startp = phys_avail[i + 1];
- continue;
- }
-
- m = vm_phys_seg_paddr_to_vm_page(seg, startp);
- for (endp = MIN(phys_avail[i], seg->end);
- startp < endp; startp += PAGE_SIZE, m++) {
- vm_page_init_page(m, startp, segind,
- VM_FREEPOOL_DEFAULT);
- }
+ startp = seg->start;
+ for (i = 0; phys_avail[i + 1] != 0; i += 2) {
+ if (startp >= seg->end)
+ break;
+ if (phys_avail[i + 1] < startp)
+ continue;
+ if (phys_avail[i] <= startp) {
+ startp = phys_avail[i + 1];
+ continue;
}
- } else
-#endif
- for (m = seg->first_page, pa = seg->start;
- pa < seg->end; m++, pa += PAGE_SIZE) {
- vm_page_init_page(m, pa, segind,
+ m = vm_phys_seg_paddr_to_vm_page(seg, startp);
+ for (endp = MIN(phys_avail[i], seg->end);
+ startp < endp; startp += PAGE_SIZE, m++) {
+ vm_page_init_page(m, startp, segind,
VM_FREEPOOL_DEFAULT);
}
+ }
/*
* Add the segment's pages that are covered by one of
@@ -832,16 +811,41 @@
if (pagecount == 0)
continue;
+ /*
+ * If lazy vm_page initialization is not enabled, simply
+ * initialize all of the pages in the segment covered by
+ * phys_avail. Otherwise, initialize only the first
+ * page of each run of free pages handed to the vm_phys
+ * allocator, which in turn defers initialization of
+ * pages until they are needed.
+ *
+ * This avoids blocking the boot process for long
+ * periods, which may be relevant for VMs (which ought
+ * to boot as quickly as possible) and/or systems with
+ * large amounts of physical memory.
+ */
m = vm_phys_seg_paddr_to_vm_page(seg, startp);
#ifdef VM_FREEPOOL_LAZYINIT
if (lazyinit) {
vm_page_init_page(m, startp, segind,
VM_FREEPOOL_LAZYINIT);
- }
+ } else
#endif
+ for (int j = 0; j < pagecount; j++) {
+ vm_page_init_page(&m[j],
+ startp + ptoa(j), segind,
+ VM_NFREEPOOL);
+ }
vmd = VM_DOMAIN(seg->domain);
vm_domain_free_lock(vmd);
- vm_phys_enqueue_contig(m, pagecount);
+#ifdef VM_FREEPOOL_LAZYINIT
+ if (lazyinit)
+ vm_phys_enqueue_contig(m, VM_FREEPOOL_LAZYINIT,
+ pagecount);
+ else
+#endif
+ vm_phys_enqueue_contig(m, VM_FREEPOOL_DEFAULT,
+ pagecount);
vm_domain_free_unlock(vmd);
vm_domain_freecnt_inc(vmd, pagecount);
vm_cnt.v_page_count += (u_int)pagecount;
@@ -2120,6 +2124,7 @@
m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DEFAULT].zone,
M_NOWAIT | M_NOVM);
if (m != NULL) {
+ m->pool = VM_FREEPOOL_DEFAULT;
flags |= PG_PCPU_CACHE;
goto found;
}
@@ -2446,6 +2451,7 @@
m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone,
M_NOWAIT | M_NOVM);
if (m != NULL) {
+ m->pool = VM_FREEPOOL_DIRECT;
flags |= PG_PCPU_CACHE;
goto found;
}
@@ -2697,16 +2703,11 @@
{
struct vm_domain *vmd;
struct vm_pgcache *pgcache;
- vm_page_t m;
- int i;
pgcache = arg;
vmd = VM_DOMAIN(pgcache->domain);
vm_domain_free_lock(vmd);
- for (i = 0; i < cnt; i++) {
- m = (vm_page_t)store[i];
- vm_phys_free_pages(m, 0);
- }
+ vm_phys_free_npages(pgcache->pool, cnt, (vm_page_t *)store);
vm_domain_free_unlock(vmd);
vm_domain_freecnt_inc(vmd, cnt);
}
@@ -4105,6 +4106,7 @@
vmd = vm_pagequeue_domain(m);
zone = vmd->vmd_pgcache[m->pool].zone;
if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
+ m->pool = VM_NFREEPOOL;
uma_zfree(zone, m);
return;
}
Index: sys/vm/vm_pagequeue.h
===================================================================
--- sys/vm/vm_pagequeue.h
+++ sys/vm/vm_pagequeue.h
@@ -297,6 +297,10 @@
/* Name for sysctl etc. */
struct sysctl_oid *vmd_oid;
char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
+
+ /* A deferred free page range. */
+ vm_page_t vmd_free_page;
+ int vmd_num_free;
} __aligned(CACHE_LINE_SIZE);
extern struct vm_domain vm_dom[MAXMEMDOM];
Index: sys/vm/vm_phys.h
===================================================================
--- sys/vm/vm_phys.h
+++ sys/vm/vm_phys.h
@@ -62,9 +62,10 @@
vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[]);
+void vm_phys_free_npages(int pool, int npages, vm_page_t ma[]);
vm_page_t vm_phys_alloc_pages(int domain, int pool, int order);
int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high);
-void vm_phys_enqueue_contig(vm_page_t m, u_long npages);
+void vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages);
int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
vm_memattr_t memattr);
void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end);
Index: sys/vm/vm_phys.c
===================================================================
--- sys/vm/vm_phys.c
+++ sys/vm/vm_phys.c
@@ -391,10 +391,12 @@
#endif
static void
-vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
+vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int pool,
+ int tail)
{
m->order = order;
+ m->pool = pool;
if (tail)
TAILQ_INSERT_TAIL(&fl[order].pl, m, listq);
else
@@ -690,6 +692,7 @@
int tail)
{
vm_page_t m_buddy;
+ int pool = m->pool;
while (oind > order) {
oind--;
@@ -697,19 +700,22 @@
KASSERT(m_buddy->order == VM_NFREEORDER,
("vm_phys_split_pages: page %p has unexpected order %d",
m_buddy, m_buddy->order));
- vm_freelist_add(fl, m_buddy, oind, tail);
+ KASSERT(m_buddy->pool == VM_NFREEPOOL,
+ ("vm_phys_split_pages: page %p has unexpected pool %d",
+ m_buddy, m_buddy->pool));
+ vm_freelist_add(fl, m_buddy, oind, pool, tail);
}
}
static void
-vm_phys_enq_chunk(struct vm_freelist *fl, vm_page_t m, int order, int tail)
+vm_phys_enq_chunk(struct vm_freelist *fl, vm_page_t m, int order, int pool,
+ int tail)
{
KASSERT(order >= 0 && order < VM_NFREEORDER,
("%s: invalid order %d", __func__, order));
- vm_freelist_add(fl, m, order, tail);
#ifdef VM_FREEPOOL_LAZYINIT
- if (__predict_false(m->pool == VM_FREEPOOL_LAZYINIT)) {
+ if (__predict_false(pool == VM_FREEPOOL_LAZYINIT)) {
vm_page_t m_next;
vm_paddr_t pa;
int npages;
@@ -723,6 +729,7 @@
}
}
#endif
+ vm_freelist_add(fl, m, order, pool, tail);
}
/*
@@ -738,7 +745,8 @@
* The physical page m's buddy must not be free.
*/
static void
-vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
+vm_phys_enq_beg(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
+ int tail)
{
int order;
@@ -754,7 +762,7 @@
order = ilog2(npages);
KASSERT(order < VM_NFREEORDER,
("%s: order %d is out of range", __func__, order));
- vm_phys_enq_chunk(fl, m, order, tail);
+ vm_phys_enq_chunk(fl, m, order, pool, tail);
m += 1 << order;
npages -= 1 << order;
}
@@ -774,7 +782,8 @@
* parameter m. Otherwise, the physical page m's buddy must not be free.
*/
static vm_page_t
-vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
+vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int pool,
+ int tail)
{
int order;
@@ -788,7 +797,7 @@
("vm_phys_enq_range: page %p has unexpected order %d",
m, m->order));
order = ffs(npages) - 1;
- vm_phys_enq_chunk(fl, m, order, tail);
+ vm_phys_enq_chunk(fl, m, order, pool, tail);
m += 1 << order;
npages -= 1 << order;
}
@@ -796,33 +805,45 @@
}
/*
- * Set the pool for a contiguous, power of two-sized set of physical pages.
+ * Complete initialization a contiguous, power of two-sized set of physical
+ * pages.
*
* If the pages currently belong to the lazy init pool, then the corresponding
* page structures must be initialized. In this case it is assumed that the
* first page in the run has already been initialized.
*/
static void
-vm_phys_set_pool(int pool, vm_page_t m, int order)
+vm_phys_finish_init(vm_page_t m, int order)
{
#ifdef VM_FREEPOOL_LAZYINIT
if (__predict_false(m->pool == VM_FREEPOOL_LAZYINIT)) {
vm_paddr_t pa;
int segind;
- m->pool = pool;
-
TSENTER();
pa = m->phys_addr + PAGE_SIZE;
segind = m->segind;
for (vm_page_t m_tmp = m + 1; m_tmp < &m[1 << order];
m_tmp++, pa += PAGE_SIZE)
- vm_page_init_page(m_tmp, pa, segind, pool);
+ vm_page_init_page(m_tmp, pa, segind, VM_NFREEPOOL);
TSEXIT();
- } else
+ }
#endif
- for (vm_page_t m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
- m_tmp->pool = pool;
+ m->pool = VM_NFREEPOOL;
+}
+
+/*
+ * Immediately flush pages waiting to be freed.
+ */
+static void
+vm_phys_flush_freed_pages(struct vm_domain *vmd)
+{
+ vm_domain_free_assert_locked(vmd);
+ if (vmd->vmd_num_free > 0) {
+ vm_phys_free_contig(vmd->vmd_free_page, vmd->vmd_num_free);
+ vmd->vmd_free_page = NULL;
+ vmd->vmd_num_free = 0;
+ }
}
/*
@@ -833,7 +854,8 @@
* The returned pages may not be physically contiguous. However, in contrast
* to performing multiple, back-to-back calls to vm_phys_alloc_pages(..., 0),
* calling this function once to allocate the desired number of pages will
- * avoid wasted time in vm_phys_split_pages().
+ * avoid wasted time in vm_phys_split_pages(). The allocated pages have no
+ * valid pool field set.
*
* The free page queues for the specified domain must be locked.
*/
@@ -851,6 +873,7 @@
KASSERT(npages <= 1 << (VM_NFREEORDER - 1),
("vm_phys_alloc_npages: npages %d is out of range", npages));
vm_domain_free_assert_locked(VM_DOMAIN(domain));
+ vm_phys_flush_freed_pages(VM_DOMAIN(domain));
i = 0;
for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
flind = vm_freelist_to_flind[freelist];
@@ -860,6 +883,7 @@
for (oind = 0; oind < VM_NFREEORDER; oind++) {
while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
vm_freelist_rem(fl, m, oind);
+ m->pool = VM_NFREEPOOL;
avail = i + (1 << oind);
end = imin(npages, avail);
while (i < end)
@@ -869,7 +893,8 @@
* Return excess pages to fl. Its order
* [0, oind) queues are empty.
*/
- vm_phys_enq_range(m, avail - i, fl, 1);
+ vm_phys_enq_range(m, avail - i, fl,
+ pool, 1);
return (npages);
}
}
@@ -881,7 +906,7 @@
while ((m = TAILQ_FIRST(&alt[oind].pl)) !=
NULL) {
vm_freelist_rem(alt, m, oind);
- vm_phys_set_pool(pool, m, oind);
+ vm_phys_finish_init(m, oind);
avail = i + (1 << oind);
end = imin(npages, avail);
while (i < end)
@@ -893,7 +918,7 @@
* are empty.
*/
vm_phys_enq_range(m, avail - i,
- fl, 1);
+ fl, pool, 1);
return (npages);
}
}
@@ -906,7 +931,8 @@
/*
* Allocate a contiguous, power of two-sized set of physical pages from the
* specified free list. The free list must be specified using one of the
- * manifest constants VM_FREELIST_*.
+ * manifest constants VM_FREELIST_*. Sets the pool field in the first page
+ * only.
*
* The free page queues must be locked.
*/
@@ -957,7 +983,8 @@
m = TAILQ_FIRST(&alt[oind].pl);
if (m != NULL) {
vm_freelist_rem(alt, m, oind);
- vm_phys_set_pool(pool, m, oind);
+ vm_phys_finish_init(m, oind);
+ m->pool = pool;
/* The order [order, oind) queues are empty. */
vm_phys_split_pages(m, oind, fl, order, 1);
return (m);
@@ -969,7 +996,7 @@
/*
* Allocate a contiguous, power of two-sized set of physical pages
- * from the free lists.
+ * from the free lists. Sets the pool field in the first page only.
*
* The free page queues must be locked.
*/
@@ -979,6 +1006,7 @@
vm_page_t m;
int freelist;
+ vm_phys_flush_freed_pages(VM_DOMAIN(domain));
for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
m = vm_phys_alloc_freelist_pages(domain, freelist, pool, order);
if (m != NULL)
@@ -1197,12 +1225,13 @@
}
/*
- * Free a contiguous, power of two-sized set of physical pages.
+ * Free a contiguous, power of two-sized set of physical pages. Assumes that
+ * no page has a valid pool field.
*
* The free page queues must be locked.
*/
-void
-vm_phys_free_pages(vm_page_t m, int order)
+static void
+vm_phys_merge_pages(vm_page_t m, int order, int pool)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
@@ -1210,13 +1239,15 @@
vm_page_t m_buddy;
KASSERT(m->order == VM_NFREEORDER,
- ("vm_phys_free_pages: page %p has unexpected order %d",
- m, m->order));
- KASSERT(vm_phys_pool_valid(m->pool),
- ("vm_phys_free_pages: page %p has unexpected pool %d",
- m, m->pool));
+ ("%s: page %p has unexpected order %d",
+ __func__, m, m->order));
+ KASSERT(vm_phys_pool_valid(pool),
+ ("%s: unexpected pool param %d", __func__, pool));
+ KASSERT(m->pool == VM_NFREEPOOL,
+ ("%s: page %p has unexpected pool %d",
+ __func__, m, m->pool));
KASSERT(order < VM_NFREEORDER,
- ("vm_phys_free_pages: order %d is out of range", order));
+ ("%s: order %d is out of range", __func__, order));
seg = &vm_phys_segs[m->segind];
vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
if (order < VM_NFREEORDER - 1) {
@@ -1230,15 +1261,65 @@
break;
fl = (*seg->free_queues)[m_buddy->pool];
vm_freelist_rem(fl, m_buddy, order);
- if (m_buddy->pool != m->pool)
- vm_phys_set_pool(m->pool, m_buddy, order);
+ vm_phys_finish_init(m_buddy, order);
order++;
pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
m = vm_phys_seg_paddr_to_vm_page(seg, pa);
} while (order < VM_NFREEORDER - 1);
}
- fl = (*seg->free_queues)[m->pool];
- vm_freelist_add(fl, m, order, 1);
+ fl = (*seg->free_queues)[pool];
+ vm_freelist_add(fl, m, order, pool, 1);
+}
+
+/*
+ * Free the pages allocated in an array by vm_phys_alloc_npages.
+ */
+void
+vm_phys_free_npages(int pool, int npages, vm_page_t ma[])
+{
+ for (int i = 0; i < npages; i++)
+ vm_phys_merge_pages(ma[i], 0, pool);
+}
+
+/*
+ * Schedule for freeing a contiguous, power of two-sized set of physical pages.
+ *
+ * The free page queues must be locked.
+ */
+void
+vm_phys_free_pages(vm_page_t m, int order)
+{
+ struct vm_domain *vmd;
+
+ KASSERT(m->order == VM_NFREEORDER,
+ ("vm_phys_free_pages: page %p has unexpected order %d",
+ m, m->order));
+ KASSERT(order < VM_NFREEORDER,
+ ("vm_phys_free_pages: order %d is out of range", order));
+ vmd = VM_DOMAIN(vm_phys_segs[m->segind].domain);
+ vm_domain_free_assert_locked(vmd);
+ if (vmd->vmd_num_free > 0) {
+ /* There is a deferred free range. */
+ if (m == vmd->vmd_free_page + vmd->vmd_num_free) {
+ /* This page could extend it. */
+ if (m->pool != VM_NFREEPOOL &&
+ vmd->vmd_free_page->pool == VM_NFREEPOOL) {
+ /* Move the pool from here to range start. */
+ vmd->vmd_free_page->pool = m->pool;
+ m->pool = VM_NFREEPOOL;
+ }
+ if (m->pool == VM_NFREEPOOL) {
+ /* Extend the free range. */
+ vmd->vmd_num_free += 1 << order;
+ return;
+ }
+ }
+ /* Free the deferred free range. */
+ vm_phys_free_contig(vmd->vmd_free_page, vmd->vmd_num_free);
+ }
+ /* Start a new deferred free range. */
+ vmd->vmd_free_page = m;
+ vmd->vmd_num_free = 1 << order;
}
#ifdef VM_FREEPOOL_LAZYINIT
@@ -1290,12 +1371,13 @@
VM_ALLOC_NORMAL, 1 << oind);
if (unlocked)
vm_domain_free_unlock(vmd);
- vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
+ vm_phys_finish_init(m, oind);
if (unlocked) {
vm_domain_freecnt_inc(vmd, 1 << oind);
vm_domain_free_lock(vmd);
}
- vm_phys_free_pages(m, oind);
+ vm_phys_merge_pages(m, oind,
+ VM_FREEPOOL_DEFAULT);
}
}
}
@@ -1344,12 +1426,12 @@
/*
* Free a contiguous, arbitrarily sized set of physical pages, without
- * merging across set boundaries.
+ * merging across set boundaries. Assumes no pages have a valid pool field.
*
* The free page queues must be locked.
*/
void
-vm_phys_enqueue_contig(vm_page_t m, u_long npages)
+vm_phys_enqueue_contig(vm_page_t m, int pool, u_long npages)
{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
@@ -1363,14 +1445,15 @@
*/
vm_domain_free_assert_locked(vm_pagequeue_domain(m));
seg = &vm_phys_segs[m->segind];
- fl = (*seg->free_queues)[m->pool];
+ fl = (*seg->free_queues)[pool];
m_end = m + npages;
/* Free blocks of increasing size. */
lo = atop(VM_PAGE_TO_PHYS(m));
if (m < m_end &&
(diff = lo ^ (lo + npages - 1)) != 0) {
order = min(ilog2(diff), VM_NFREEORDER - 1);
- m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl, 1);
+ m = vm_phys_enq_range(m, roundup2(lo, 1 << order) - lo, fl,
+ pool, 1);
}
/* Free blocks of maximum size. */
@@ -1379,15 +1462,26 @@
KASSERT(seg == &vm_phys_segs[m->segind],
("%s: page range [%p,%p) spans multiple segments",
__func__, m_end - npages, m));
- vm_phys_enq_chunk(fl, m, order, 1);
+ vm_phys_enq_chunk(fl, m, order, pool, 1);
m += 1 << order;
}
/* Free blocks of diminishing size. */
- vm_phys_enq_beg(m, m_end - m, fl, 1);
+ vm_phys_enq_beg(m, m_end - m, fl, pool, 1);
}
+static long vm_phys_free_contig_count;
+static long vm_phys_free_contig_avg, vm_phys_free_contig_extra;
+SYSCTL_LONG(_vm, OID_AUTO, contig_numer, CTLFLAG_RD,
+ &vm_phys_free_contig_extra, 0, "Numer of free_contig calls.");
+SYSCTL_LONG(_vm, OID_AUTO, contig_denom, CTLFLAG_RD,
+ &vm_phys_free_contig_count, 0, "Denom of free_contig calls.");
+SYSCTL_LONG(_vm, OID_AUTO, contig_pages, CTLFLAG_RD,
+ &vm_phys_free_contig_avg, 0, "Pages per free_contig call.");
+
/*
* Free a contiguous, arbitrarily sized set of physical pages.
+ * Assumes that every page but the first has no valid pool field.
+ * Uses the pool value in the first page if valid, otherwise default.
*
* The free page queues must be locked.
*/
@@ -1397,6 +1491,21 @@
vm_paddr_t lo;
vm_page_t m_start, m_end;
unsigned max_order, order_start, order_end;
+ int pool = m->pool;
+
+ m->pool = VM_NFREEPOOL;
+ if (pool == VM_NFREEPOOL)
+ pool = VM_FREEPOOL_DEFAULT;
+
+ vm_phys_free_contig_count++;
+ vm_phys_free_contig_extra += npages - vm_phys_free_contig_avg;
+ vm_phys_free_contig_avg +=
+ vm_phys_free_contig_extra / vm_phys_free_contig_count;
+ vm_phys_free_contig_extra %= vm_phys_free_contig_count;
+ if (vm_phys_free_contig_extra < 0) {
+ vm_phys_free_contig_avg--;
+ vm_phys_free_contig_extra += vm_phys_free_contig_count;
+ }
vm_domain_free_assert_locked(vm_pagequeue_domain(m));
@@ -1416,11 +1525,11 @@
* end of the range last.
*/
if (m_start < m_end)
- vm_phys_enqueue_contig(m_start, m_end - m_start);
+ vm_phys_enqueue_contig(m_start, pool, m_end - m_start);
if (order_start < max_order)
- vm_phys_free_pages(m, order_start);
+ vm_phys_merge_pages(m, order_start, pool);
if (order_end < max_order)
- vm_phys_free_pages(m_end, order_end);
+ vm_phys_merge_pages(m_end, order_end, pool);
}
/*
@@ -1474,15 +1583,16 @@
struct vm_phys_seg *seg;
vm_paddr_t pa_half;
vm_page_t m, m_set, m_tmp;
- int order;
+ int order, pool;
seg = vm_phys_paddr_to_seg(pa);
vm_domain_free_assert_locked(VM_DOMAIN(seg->domain));
+ vm_phys_flush_freed_pages(VM_DOMAIN(seg->domain));
+#ifdef VM_FREEPOOL_LAZYINIT
/*
* The pages on the free lists must be initialized.
*/
-#ifdef VM_FREEPOOL_LAZYINIT
vm_phys_lazy_init_domain(seg->domain, true);
#endif
@@ -1515,7 +1625,8 @@
* is larger than a page, shrink "m_set" by returning the half
* of "m_set" that does not contain "m" to the free lists.
*/
- fl = (*seg->free_queues)[m_set->pool];
+ pool = m_set->pool;
+ fl = (*seg->free_queues)[pool];
order = m_set->order;
vm_freelist_rem(fl, m_set, order);
while (order > 0) {
@@ -1527,8 +1638,9 @@
m_tmp = m_set;
m_set = vm_phys_seg_paddr_to_vm_page(seg, pa_half);
}
- vm_freelist_add(fl, m_tmp, order, 0);
+ vm_freelist_add(fl, m_tmp, order, pool, 0);
}
+ m_set->pool = pool;
KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
return (true);
}
@@ -1668,7 +1780,8 @@
* alignment of the first physical page in the set. If the given value
* "boundary" is non-zero, then the set of physical pages cannot cross
* any physical address boundary that is a multiple of that value. Both
- * "alignment" and "boundary" must be a power of two.
+ * "alignment" and "boundary" must be a power of two. Sets the pool
+ * field to DEFAULT in the first allocated page.
*/
vm_page_t
vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
@@ -1713,6 +1826,7 @@
*/
if (seg->free_queues == queues)
continue;
+ vm_phys_flush_freed_pages(VM_DOMAIN(seg->domain));
queues = seg->free_queues;
m_run = vm_phys_find_queues_contig(queues, npages,
low, high, alignment, boundary);
@@ -1727,12 +1841,13 @@
fl = (*queues)[m->pool];
oind = m->order;
vm_freelist_rem(fl, m, oind);
- if (m->pool != VM_FREEPOOL_DEFAULT)
- vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
+ vm_phys_finish_init(m, oind);
}
+ m_run->pool = VM_FREEPOOL_DEFAULT;
/* Return excess pages to the free lists. */
fl = (*queues)[VM_FREEPOOL_DEFAULT];
- vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
+ vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl,
+ VM_FREEPOOL_DEFAULT, 0);
/* Return page verified to satisfy conditions of request. */
pa_start = VM_PAGE_TO_PHYS(m_run);
Index: sys/vm/vm_reserv.c
===================================================================
--- sys/vm/vm_reserv.c
+++ sys/vm/vm_reserv.c
@@ -943,7 +943,7 @@
vm_reserv_break(vm_reserv_t rv)
{
vm_page_t m;
- int hi, lo, pos;
+ int pool, pos, pos0, pos1;
vm_reserv_assert_locked(rv);
CTR5(KTR_VM, "%s: rv %p object %p popcnt %d inpartpop %d",
@@ -954,23 +954,28 @@
for (; m < rv->pages + VM_LEVEL_0_NPAGES; m += VM_SUBLEVEL_0_NPAGES)
#endif
m->psind = 0;
- hi = lo = -1;
- pos = 0;
- for (;;) {
- bit_ff_at(rv->popmap, pos, VM_LEVEL_0_NPAGES, lo != hi, &pos);
- if (lo == hi) {
- if (pos == -1)
- break;
- lo = pos;
- continue;
- }
+ pool = rv->pages->pool;
+ rv->pages->pool = VM_NFREEPOOL;
+ pos0 = bit_test(rv->popmap, 0) ? -1 : 0;
+ pos1 = -1 - pos0;
+ for (pos = 0; pos < VM_LEVEL_0_NPAGES; ) {
+ /* Find the first different bit after pos. */
+ bit_ff_at(rv->popmap, pos + 1, VM_LEVEL_0_NPAGES,
+ pos1 < pos0, &pos);
if (pos == -1)
pos = VM_LEVEL_0_NPAGES;
- hi = pos;
+ if (pos0 < pos1) {
+ /* Set pool for pages from pos1 to pos. */
+ pos0 = pos1;
+ while (pos0 < pos)
+ rv->pages[pos0++].pool = pool;
+ continue;
+ }
+ /* Free unused pages from pos0 to pos. */
+ pos1 = pos;
vm_domain_free_lock(VM_DOMAIN(rv->domain));
- vm_phys_enqueue_contig(&rv->pages[lo], hi - lo);
+ vm_phys_enqueue_contig(&rv->pages[pos0], pool, pos1 - pos0);
vm_domain_free_unlock(VM_DOMAIN(rv->domain));
- lo = hi;
}
bit_nclear(rv->popmap, 0, VM_LEVEL_0_NPAGES - 1);
rv->popcnt = 0;
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Fri, Apr 25, 3:58 AM (14 h, 49 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17775020
Default Alt Text
D45409.id143046.diff (23 KB)
Attached To
Mode
D45409: vm_phys: reduce touching of page->pool fields
Attached
Detach File
Event Timeline
Log In to Comment