Page MenuHomeFreeBSD

D45863.id141388.diff
No OneTemporary

D45863.id141388.diff

diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -163,6 +163,7 @@
static uma_zone_t fakepg_zone;
static void vm_page_alloc_check(vm_page_t m);
+static vm_page_t vm_page_alloc_nofree_domain(int domain, int req);
static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked);
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
@@ -2099,6 +2100,11 @@
if (!vm_pager_can_alloc_page(object, pindex))
return (NULL);
again:
+ if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) {
+ m = vm_page_alloc_nofree_domain(domain, req);
+ if (m != NULL)
+ goto found;
+ }
#if VM_NRESERVLEVEL > 0
/*
* Can we allocate the page from a reservation?
@@ -2431,6 +2437,11 @@
((req & VM_ALLOC_NOFREE) != 0 ? PG_NOFREE : 0);
vmd = VM_DOMAIN(domain);
again:
+ if (__predict_false((req & VM_ALLOC_NOFREE) != 0)) {
+ m = vm_page_alloc_nofree_domain(domain, req);
+ if (m != NULL)
+ goto found;
+ }
if (freelist == VM_NFREELIST &&
vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone != NULL) {
m = uma_zalloc(vmd->vmd_pgcache[VM_FREEPOOL_DIRECT].zone,
@@ -2513,6 +2524,56 @@
return (_vm_page_alloc_noobj_domain(domain, freelist, req));
}
+#if VM_NRESERVLEVEL > 1
+#define VM_NOFREE_IMPORT_ORDER (VM_LEVEL_1_ORDER + VM_LEVEL_0_ORDER)
+#elif VM_NRESERVLEVEL > 0
+#define VM_NOFREE_IMPORT_ORDER VM_LEVEL_0_ORDER
+#else
+#define VM_NOFREE_IMPORT_ORDER 8
+#endif
+
+/*
+ * Allocate a single NOFREE page.
+ *
+ * This routine hands out NOFREE pages from higher-order
+ * physical memory blocks in order to reduce memory fragmentation.
+ * When a NOFREE for a given domain chunk is used up,
+ * the routine will try to fetch a new one from the freelists
+ * and discard the old one.
+ */
+static vm_page_t
+vm_page_alloc_nofree_domain(int domain, int req)
+{
+ vm_page_t m;
+ struct vm_domain *vmd;
+ struct vm_nofreeq *nqp;
+
+ KASSERT((req & VM_ALLOC_NOFREE) != 0, ("invalid request %#x", req));
+
+ vmd = VM_DOMAIN(domain);
+ nqp = &vmd->vmd_nofreeq;
+ vm_domain_free_lock(vmd);
+ if (nqp->offs >= (1 << VM_NOFREE_IMPORT_ORDER) || nqp->ma == NULL) {
+ if (!vm_domain_allocate(vmd, req,
+ 1 << VM_NOFREE_IMPORT_ORDER)) {
+ vm_domain_free_unlock(vmd);
+ return (NULL);
+ }
+ nqp->ma = vm_phys_alloc_pages(domain, VM_FREEPOOL_DEFAULT,
+ VM_LEVEL_0_ORDER);
+ if (nqp->ma == NULL) {
+ vm_domain_freecnt_inc(vmd, 1 << VM_NOFREE_IMPORT_ORDER);
+ vm_domain_free_unlock(vmd);
+ return (NULL);
+ }
+ nqp->offs = 0;
+ }
+ m = &nqp->ma[nqp->offs++];
+ vm_domain_free_unlock(vmd);
+
+ return (m);
+}
+
vm_page_t
vm_page_alloc_noobj(int req)
{
diff --git a/sys/vm/vm_pagequeue.h b/sys/vm/vm_pagequeue.h
--- a/sys/vm/vm_pagequeue.h
+++ b/sys/vm/vm_pagequeue.h
@@ -246,6 +246,10 @@
u_int vmd_domain; /* (c) Domain number. */
u_int vmd_page_count; /* (c) Total page count. */
long vmd_segs; /* (c) bitmask of the segments */
+ struct vm_nofreeq {
+ vm_page_t ma;
+ int offs;
+ } vmd_nofreeq; /* (f) NOFREE page bump allocator. */
u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */
uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];

File Metadata

Mime Type
text/plain
Expires
Wed, Apr 30, 9:56 AM (4 h, 31 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17855493
Default Alt Text
D45863.id141388.diff (3 KB)

Event Timeline