Page MenuHomeFreeBSD

D49571.id152953.diff
No OneTemporary

D49571.id152953.diff

diff --git a/sys/compat/linuxkpi/common/include/linux/slab.h b/sys/compat/linuxkpi/common/include/linux/slab.h
--- a/sys/compat/linuxkpi/common/include/linux/slab.h
+++ b/sys/compat/linuxkpi/common/include/linux/slab.h
@@ -93,7 +93,9 @@
extern void *lkpi_kmalloc(size_t size, gfp_t flags);
void *lkpi___kmalloc(size_t size, gfp_t flags);
+void *lkpi___kmalloc_node(size_t size, gfp_t flags, int node);
#define __kmalloc(_s, _f) lkpi___kmalloc(_s, _f)
+void *lkpi_krealloc(void *, size_t, gfp_t);
static inline gfp_t
linux_check_m_flags(gfp_t flags)
@@ -113,77 +115,79 @@
static inline void *
kmalloc_node(size_t size, gfp_t flags, int node)
{
- return (malloc_domainset(size, M_KMALLOC,
- linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+ return (lkpi___kmalloc_node(size, flags, node));
}
static inline void *
-kcalloc(size_t n, size_t size, gfp_t flags)
+kmalloc_array(size_t n, size_t size, gfp_t flags)
{
- flags |= __GFP_ZERO;
- return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
+ if (WOULD_OVERFLOW(n, size))
+ panic("%s: %zu * %zu overflowed", __func__, n, size);
+
+ return (kmalloc(size * n, flags));
}
static inline void *
-kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+kcalloc(size_t n, size_t size, gfp_t flags)
{
flags |= __GFP_ZERO;
- return (mallocarray_domainset(n, size, M_KMALLOC,
- linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+ return (kmalloc_array(n, size, linux_check_m_flags(flags)));
}
static inline void *
-__vmalloc(size_t size, gfp_t flags, int other)
+kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
{
- return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
+ if (WOULD_OVERFLOW(n, size))
+ panic("%s: %zu * %zu overflowed", __func__, n, size);
+
+ return (kmalloc_node(size * n, flags, node));
}
static inline void *
-__vmalloc_node(size_t size, gfp_t flags, int node)
+kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
{
- return (malloc_domainset(size, M_KMALLOC,
- linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+ flags |= __GFP_ZERO;
+ return (kmalloc_array_node(n, size, flags, node));
}
static inline void *
-vmalloc_32(size_t size)
+krealloc(void *ptr, size_t size, gfp_t flags)
{
- return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1));
+ return (lkpi_krealloc(ptr, size, flags));
}
static inline void *
-kmalloc_array(size_t n, size_t size, gfp_t flags)
+krealloc_array(void *ptr, size_t n, size_t size, gfp_t flags)
{
- return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
+ if (WOULD_OVERFLOW(n, size))
+ return NULL;
+
+ return (krealloc(ptr, n * size, flags));
}
static inline void *
-kmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
+__vmalloc(size_t size, gfp_t flags, int other)
{
- return (mallocarray_domainset(n, size, M_KMALLOC,
- linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+ return (malloc(size, M_KMALLOC, linux_check_m_flags(flags)));
}
static inline void *
-kvmalloc_array(size_t n, size_t size, gfp_t flags)
+__vmalloc_node(size_t size, gfp_t flags, int node)
{
- return (mallocarray(n, size, M_KMALLOC, linux_check_m_flags(flags)));
+ return (malloc_domainset(size, M_KMALLOC,
+ linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
}
static inline void *
-krealloc(void *ptr, size_t size, gfp_t flags)
+vmalloc_32(size_t size)
{
- return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags)));
+ return (contigmalloc(size, M_KMALLOC, M_WAITOK, 0, UINT_MAX, 1, 1));
}
static inline void *
-krealloc_array(void *ptr, size_t n, size_t size, gfp_t flags)
+kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
- if (WOULD_OVERFLOW(n, size)) {
- return NULL;
- }
-
- return (realloc(ptr, n * size, M_KMALLOC, linux_check_m_flags(flags)));
+ return (kmalloc_array(n, size, linux_check_m_flags(flags)));
}
extern void linux_kfree_async(void *);
diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c
--- a/sys/compat/linuxkpi/common/src/linux_compat.c
+++ b/sys/compat/linuxkpi/common/src/linux_compat.c
@@ -2767,8 +2767,8 @@
boot_cpu_data.x86_model = CPUID_TO_MODEL(cpu_id);
boot_cpu_data.x86_vendor = x86_vendor;
- __cpu_data = mallocarray(mp_maxid + 1,
- sizeof(*__cpu_data), M_KMALLOC, M_WAITOK | M_ZERO);
+ __cpu_data = kmalloc_array(mp_maxid + 1,
+ sizeof(*__cpu_data), M_WAITOK | M_ZERO);
CPU_FOREACH(i) {
__cpu_data[i].x86_clflush_size = cpu_clflush_line_size;
__cpu_data[i].x86_max_cores = mp_ncpus;
@@ -2810,8 +2810,8 @@
* This is used by cpumask_of() (and possibly others in the future) for,
* e.g., drivers to pass hints to irq_set_affinity_hint().
*/
- static_single_cpu_mask = mallocarray(mp_maxid + 1,
- sizeof(static_single_cpu_mask), M_KMALLOC, M_WAITOK | M_ZERO);
+ static_single_cpu_mask = kmalloc_array(mp_maxid + 1,
+ sizeof(static_single_cpu_mask), M_WAITOK | M_ZERO);
/*
* When the number of CPUs reach a threshold, we start to save memory
@@ -2830,9 +2830,9 @@
* (_BITSET_BITS / 8)' bytes (for comparison with the
* overlapping scheme).
*/
- static_single_cpu_mask_lcs = mallocarray(mp_ncpus,
+ static_single_cpu_mask_lcs = kmalloc_array(mp_ncpus,
sizeof(*static_single_cpu_mask_lcs),
- M_KMALLOC, M_WAITOK | M_ZERO);
+ M_WAITOK | M_ZERO);
sscm_ptr = static_single_cpu_mask_lcs;
CPU_FOREACH(i) {
diff --git a/sys/compat/linuxkpi/common/src/linux_slab.c b/sys/compat/linuxkpi/common/src/linux_slab.c
--- a/sys/compat/linuxkpi/common/src/linux_slab.c
+++ b/sys/compat/linuxkpi/common/src/linux_slab.c
@@ -207,6 +207,18 @@
free(c, M_KMALLOC);
}
+void *
+lkpi___kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (size <= PAGE_SIZE)
+ return (malloc_domainset(size, M_KMALLOC,
+ linux_get_vm_domain_set(node), linux_check_m_flags(flags)));
+ else
+ return (contigmalloc_domainset(size, M_KMALLOC,
+ linux_get_vm_domain_set(node), linux_check_m_flags(flags),
+ 0, -1UL, PAGE_SIZE, 0));
+}
+
void *
lkpi___kmalloc(size_t size, gfp_t flags)
{
@@ -222,6 +234,40 @@
0, -1UL, PAGE_SIZE, 0));
}
+void *
+lkpi_krealloc(void *ptr, size_t size, gfp_t flags)
+{
+ void *nptr;
+ size_t osize;
+
+ /*
+ * First handle invariants based on function arguments.
+ */
+ if (ptr == NULL)
+ return (kmalloc(size, flags));
+
+ osize = ksize(ptr);
+ if (size <= osize)
+ return (ptr);
+
+ /*
+ * We know the new size > origonal size. realloc(9) does not (and cannot)
+ * know about our requirements for physically contiguous memory, so we can
+ * only call it up-to including PAGE_SIZE and otherwise have to replicate
+ * its functionality using kmalloc to get the contigmalloc(9) backing.
+ */
+ if (size <= PAGE_SIZE)
+ return (realloc(ptr, size, M_KMALLOC, linux_check_m_flags(flags)));
+
+ nptr = kmalloc(size, flags);
+ if (nptr == NULL)
+ return (NULL);
+
+ memcpy(nptr, ptr, osize);
+ kfree(ptr);
+ return (nptr);
+}
+
struct lkpi_kmalloc_ctx {
size_t size;
gfp_t flags;
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -1142,6 +1142,9 @@
case SLAB_COOKIE_MALLOC_LARGE:
size = malloc_large_size(slab);
break;
+ case SLAB_COOKIE_CONTIG_MALLOC:
+ size = round_page(contigmalloc_size(slab));
+ break;
default:
__assert_unreachable();
size = 0;

File Metadata

Mime Type
text/plain
Expires
Mon, Apr 28, 6:58 PM (16 h, 23 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17832748
Default Alt Text
D49571.id152953.diff (7 KB)

Event Timeline