Page MenuHomeFreeBSD

D23048.id66477.diff
No OneTemporary

D23048.id66477.diff

Index: sys/vm/uma_core.c
===================================================================
--- sys/vm/uma_core.c
+++ sys/vm/uma_core.c
@@ -258,8 +258,10 @@
static int zone_ctor(void *, int, void *, int);
static void zone_dtor(void *, int, void *);
static int zero_init(void *, int, int);
+#if 0
static void keg_small_init(uma_keg_t keg);
static void keg_large_init(uma_keg_t keg);
+#endif
static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
static void zone_timeout(uma_zone_t zone, void *);
static int hash_alloc(struct uma_hash *, u_int);
@@ -1669,25 +1671,207 @@
return (UMA_SLAB_SIZE - slab_sizeof(nitems));
}
+#define UMA_FIXPT_SHIFT 31
+#define UMA_FRAC_FIXPT(n, d) \
+ ((uint32_t)(((uint64_t)(n) << UMA_FIXPT_SHIFT) / (d)))
+#define UMA_FIXPT_PCT(f) \
+ ((u_int)(((uint64_t)100 * (f)) >> UMA_FIXPT_SHIFT))
+#define UMA_PCT_FIXPT(pct) UMA_FRAC_FIXPT((pct), 100)
+#define UMA_MIN_EFF UMA_PCT_FIXPT(100 - UMA_MAX_WASTE)
+
/*
- * Compute the number of items that will fit in an embedded (!OFFPAGE) slab
- * with a given size and alignment.
+ * Compute the number of items that will fit in a slab. If hdr is true, the
+ * item count may be limited to provide space in the slab for an inline slab
+ * header. Otherwise, all slab space will be provided for item storage.
+ */
+static u_int
+slab_ipers_hdr(u_int size, u_int rsize, u_int slabsize, bool hdr)
+{
+ u_int ipers;
+ u_int padpi;
+
+ /* The padding between items is not needed after the last item. */
+ padpi = rsize - size;
+
+ if (hdr) {
+ /*
+ * Start with the maximum item count and remove items until
+ * the slab header first alongside the allocatable memory.
+ */
+ for (ipers = MIN(SLAB_MAX_SETSIZE,
+ (slabsize + padpi - slab_sizeof(1)) / rsize);
+ ipers > 0 &&
+ ipers * rsize - padpi + slab_sizeof(ipers) > slabsize;
+ ipers--)
+ continue;
+ } else {
+ ipers = MIN((slabsize + padpi) / rsize, SLAB_MAX_SETSIZE);
+ }
+
+ return (ipers);
+}
+
+/*
+ * Compute the number of items that will fit in a slab for a startup zone.
*/
int
slab_ipers(size_t size, int align)
{
int rsize;
- int nitems;
- /*
- * Compute the ideal number of items that will fit in a page and
- * then compute the actual number based on a bitset nitems wide.
- */
- rsize = roundup(size, align + 1);
- nitems = UMA_SLAB_SIZE / rsize;
- return (slab_space(nitems) / rsize);
+ rsize = roundup(size, align + 1); /* Assume no CACHESPREAD */
+ return (slab_ipers_hdr(size, rsize, UMA_SLAB_SIZE, true));
}
+/*
+ * Determine the format of a uma keg. This determines where the slab header
+ * will be placed (inline or offpage) and calculates ipers, rsize, and ppera.
+ *
+ * Arguments
+ * keg The zone we should initialize
+ *
+ * Returns
+ * Nothing
+ */
+static void
+keg_layout(uma_keg_t keg)
+{
+ u_int alignsize;
+ u_int eff;
+ u_int eff_nohdr;
+ u_int format;
+ u_int ipers;
+ u_int ipers_nohdr;
+ u_int pages;
+ u_int rsize;
+ u_int slabsize;
+
+ KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
+ (keg->uk_size <= UMA_PCPU_ALLOC_SIZE &&
+ (keg->uk_flags & UMA_ZONE_CACHESPREAD) == 0),
+ ("%s: cannot configure for PCPU: keg=%s, size=%u, flags=0x%b",
+ __func__, keg->uk_name, keg->uk_size, keg->uk_flags,
+ PRINT_UMA_ZFLAGS));
+ KASSERT((keg->uk_flags &
+ (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)) == 0 ||
+ (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0,
+ ("%s: incompatible flags 0x%b", __func__, keg->uk_flags,
+ PRINT_UMA_ZFLAGS));
+
+ alignsize = keg->uk_align + 1;
+ format = 0;
+ ipers = 0;
+
+ /*
+ * Calculate the size of each allocation (rsize) according to
+ * alignment. If the requested size is smaller than we have
+ * allocation bits for we round it up.
+ */
+ rsize = MAX(keg->uk_size, UMA_SLAB_SIZE / SLAB_MAX_SETSIZE);
+ rsize = roundup2(rsize, alignsize);
+
+ if ((keg->uk_flags & UMA_ZONE_PCPU) != 0) {
+ slabsize = UMA_PCPU_ALLOC_SIZE;
+ pages = mp_maxid + 1;
+ } else if ((keg->uk_flags & UMA_ZONE_CACHESPREAD) != 0) {
+ /*
+ * We want one item to start on every align boundary in a page.
+ * To do this we will span pages. We will also extend the item
+ * by the size of align if it is an even multiple of align.
+ * Otherwise, it would fall on the same boundary every time.
+ */
+ if ((rsize & alignsize) == 0)
+ rsize += alignsize;
+ slabsize = rsize * (PAGE_SIZE / alignsize);
+ slabsize = MIN(slabsize, rsize * SLAB_MAX_SETSIZE);
+ slabsize = MIN(slabsize, UMA_CACHESPREAD_MAX_SIZE);
+ pages = howmany(slabsize, PAGE_SIZE);
+ slabsize = ptoa(pages);
+ } else {
+ /*
+ * Choose a slab size of as many pages as it takes to represent
+ * a single item. We will then try to fit as many additional
+ * items into the slab as possible. At some point, we may want
+ * to increase the slab size for awkward item sizes in order to
+ * increase efficiency.
+ */
+ pages = howmany(keg->uk_size, PAGE_SIZE);
+ slabsize = ptoa(pages);
+ }
+
+ /* Evaluate an inline slab layout. */
+ if ((keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) == 0)
+ ipers = slab_ipers_hdr(keg->uk_size, rsize, slabsize, true);
+
+ /* TODO: vm_page-embedded slab. */
+
+ /*
+ * We can't do OFFPAGE if we're internal or if we've been
+ * asked to not go to the VM for buckets. If we do this we
+ * may end up going to the VM for slabs which we do not
+ * want to do if we're UMA_ZFLAG_CACHEONLY as a result
+ * of UMA_ZONE_VM, which clearly forbids it.
+ */
+ if ((keg->uk_flags &
+ (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)) != 0) {
+ if (ipers == 0) {
+ /* We need an extra page for the slab header. */
+ pages++;
+ slabsize = ptoa(pages);
+ ipers = slab_ipers_hdr(keg->uk_size, rsize, slabsize,
+ true);
+ }
+ goto out;
+ }
+
+ /*
+ * See if using an OFFPAGE slab will improve our efficiency.
+ * Only do this if we are below our efficiency threshold.
+ *
+ * XXX We could try growing slabsize to limit max waste as well.
+ * Historically this was not done because the VM could not
+ * efficiently handle contiguous allocations.
+ */
+ eff = UMA_FRAC_FIXPT(ipers * rsize, slabsize);
+ ipers_nohdr = slab_ipers_hdr(keg->uk_size, rsize, slabsize, false);
+ eff_nohdr = UMA_FRAC_FIXPT(ipers_nohdr * rsize,
+ slabsize + slab_sizeof(SLAB_MAX_SETSIZE));
+ if (ipers == 0 || (eff < UMA_MIN_EFF && eff < eff_nohdr)) {
+ CTR5(KTR_UMA, "UMA decided we need offpage slab headers for "
+ "keg: %s(%p), minimum efficiency allowed = %u%%, "
+ "old efficiency = %u%%, offpage efficiency = %u%%\n",
+ keg->uk_name, keg, UMA_FIXPT_PCT(UMA_MIN_EFF),
+ UMA_FIXPT_PCT(eff), UMA_FIXPT_PCT(eff_nohdr));
+ format = UMA_ZFLAG_OFFPAGE;
+ ipers = ipers_nohdr;
+ }
+
+out:
+ /*
+ * How do we find the slab header if it is offpage or if not all item
+ * start addresses are in the same page? We could solve the latter
+ * case with vaddr alignment, but we don't.
+ */
+ if ((format & UMA_ZFLAG_OFFPAGE) != 0 ||
+ (ipers - 1) * rsize >= PAGE_SIZE) {
+ if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0)
+ format |= UMA_ZFLAG_HASH;
+ else
+ format |= UMA_ZFLAG_VTOSLAB;
+ }
+ keg->uk_ipers = ipers;
+ keg->uk_rsize = rsize;
+ keg->uk_flags |= format;
+ keg->uk_ppera = pages;
+ CTR6(KTR_UMA, "%s: keg=%s, flags=%#x, rsize=%u, ipers=%u, ppera=%u\n",
+ __func__, keg->uk_name, keg->uk_flags, rsize, ipers, pages);
+ KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
+ ("%s: keg=%s, flags=0x%b, rsize=%u, ipers=%u, ppera=%u", __func__,
+ keg->uk_name, keg->uk_flags, PRINT_UMA_ZFLAGS, rsize, ipers,
+ pages));
+}
+
+#if 0
/*
* Finish creating a small uma keg. This calculates ipers, and the keg size.
*
@@ -1890,6 +2074,7 @@
("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
keg->uk_ipers));
}
+#endif
/*
* Keg header ctor. This initializes all fields, locks, etc. And inserts
@@ -1942,6 +2127,7 @@
keg->uk_flags &= ~UMA_ZONE_PCPU;
#endif
+#if 0
if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
keg_cachespread_init(keg);
} else {
@@ -1950,6 +2136,9 @@
else
keg_small_init(keg);
}
+#else
+ keg_layout(keg);
+#endif
/*
* Use a first-touch NUMA policy for all kegs that pmap_extract()
Index: sys/vm/uma_int.h
===================================================================
--- sys/vm/uma_int.h
+++ sys/vm/uma_int.h
@@ -139,6 +139,9 @@
/* Max waste percentage before going to off page slab management */
#define UMA_MAX_WASTE 10
+/* Max size of a CACHESPREAD slab. */
+#define UMA_CACHESPREAD_MAX_SIZE (128 * 1024)
+
/*
* These flags must not overlap with the UMA_ZONE flags specified in uma.h.
*/

File Metadata

Mime Type
text/plain
Expires
Fri, May 2, 12:46 AM (8 h, 7 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17894335
Default Alt Text
D23048.id66477.diff (8 KB)

Event Timeline