Page MenuHomeFreeBSD

D30760.diff
No OneTemporary

D30760.diff

diff --git a/sys/compat/linuxkpi/common/include/linux/slab.h b/sys/compat/linuxkpi/common/include/linux/slab.h
--- a/sys/compat/linuxkpi/common/include/linux/slab.h
+++ b/sys/compat/linuxkpi/common/include/linux/slab.h
@@ -31,12 +31,9 @@
#ifndef _LINUX_SLAB_H_
#define _LINUX_SLAB_H_
-#include <sys/param.h>
-#include <sys/systm.h>
+#include <sys/types.h>
#include <sys/malloc.h>
#include <sys/limits.h>
-#include <sys/proc.h>
-#include <vm/uma.h>
#include <linux/compat.h>
#include <linux/types.h>
@@ -65,8 +62,9 @@
*/
#define kmem_cache linux_kmem_cache
#define kmem_cache_create(...) linux_kmem_cache_create(__VA_ARGS__)
-#define kmem_cache_alloc(...) linux_kmem_cache_alloc(__VA_ARGS__)
-#define kmem_cache_free(...) linux_kmem_cache_free(__VA_ARGS__)
+#define kmem_cache_alloc(...) lkpi_kmem_cache_alloc(__VA_ARGS__)
+#define kmem_cache_zalloc(...) lkpi_kmem_cache_zalloc(__VA_ARGS__)
+#define kmem_cache_free(...) lkpi_kmem_cache_free(__VA_ARGS__)
#define kmem_cache_destroy(...) linux_kmem_cache_destroy(__VA_ARGS__)
#define KMEM_CACHE(__struct, flags) \
@@ -75,12 +73,7 @@
typedef void linux_kmem_ctor_t (void *);
-struct linux_kmem_cache {
- uma_zone_t cache_zone;
- linux_kmem_ctor_t *cache_ctor;
- unsigned cache_flags;
- unsigned cache_size;
-};
+struct linux_kmem_cache;
#define SLAB_HWCACHE_ALIGN (1 << 0)
#define SLAB_TYPESAFE_BY_RCU (1 << 1)
@@ -212,32 +205,9 @@
extern struct linux_kmem_cache *linux_kmem_cache_create(const char *name,
size_t size, size_t align, unsigned flags, linux_kmem_ctor_t *ctor);
-
-static inline void *
-linux_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
-{
- return (uma_zalloc_arg(c->cache_zone, c,
- linux_check_m_flags(flags)));
-}
-
-static inline void *
-kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
-{
- return (uma_zalloc_arg(c->cache_zone, c,
- linux_check_m_flags(flags | M_ZERO)));
-}
-
-extern void linux_kmem_cache_free_rcu(struct linux_kmem_cache *, void *);
-
-static inline void
-linux_kmem_cache_free(struct linux_kmem_cache *c, void *m)
-{
- if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
- linux_kmem_cache_free_rcu(c, m);
- else
- uma_zfree(c->cache_zone, m);
-}
-
+extern void *lkpi_kmem_cache_alloc(struct linux_kmem_cache *, gfp_t);
+extern void *lkpi_kmem_cache_zalloc(struct linux_kmem_cache *, gfp_t);
+extern void lkpi_kmem_cache_free(struct linux_kmem_cache *, void *);
extern void linux_kmem_cache_destroy(struct linux_kmem_cache *);
void linux_kfree_async(void *);
diff --git a/sys/compat/linuxkpi/common/src/linux_rcu.c b/sys/compat/linuxkpi/common/src/linux_rcu.c
--- a/sys/compat/linuxkpi/common/src/linux_rcu.c
+++ b/sys/compat/linuxkpi/common/src/linux_rcu.c
@@ -48,6 +48,8 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/compat.h>
+#include <linux/llist.h>
+#include <linux/irq_work.h>
/*
* By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will
@@ -60,13 +62,15 @@
#endif
struct callback_head {
- STAILQ_ENTRY(callback_head) entry;
+ union {
+ STAILQ_ENTRY(callback_head) entry;
+ struct llist_node node;
+ };
rcu_callback_t func;
};
struct linux_epoch_head {
- STAILQ_HEAD(, callback_head) cb_head;
- struct mtx lock;
+ struct llist_head cb_head;
struct task task;
} __aligned(CACHE_LINE_SIZE);
@@ -120,9 +124,8 @@
head = &linux_epoch_head[j];
- mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF);
TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head);
- STAILQ_INIT(&head->cb_head);
+ init_llist_head(&head->cb_head);
CPU_FOREACH(i) {
struct linux_epoch_record *record;
@@ -139,37 +142,22 @@
}
SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL);
-static void
-linux_rcu_runtime_uninit(void *arg __unused)
-{
- struct linux_epoch_head *head;
- int j;
-
- for (j = 0; j != RCU_TYPE_MAX; j++) {
- head = &linux_epoch_head[j];
-
- mtx_destroy(&head->lock);
- }
-}
-SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL);
-
static void
linux_rcu_cleaner_func(void *context, int pending __unused)
{
- struct linux_epoch_head *head;
+ struct linux_epoch_head *head = context;
struct callback_head *rcu;
STAILQ_HEAD(, callback_head) tmp_head;
+ struct llist_node *node, *next;
uintptr_t offset;
- linux_set_current(curthread);
-
- head = context;
-
/* move current callbacks into own queue */
- mtx_lock(&head->lock);
STAILQ_INIT(&tmp_head);
- STAILQ_CONCAT(&tmp_head, &head->cb_head);
- mtx_unlock(&head->lock);
+ llist_for_each_safe(node, next, llist_del_all(&head->cb_head)) {
+ rcu = container_of(node, struct callback_head, node);
+ /* re-reverse list to restore chronological order */
+ STAILQ_INSERT_HEAD(&tmp_head, rcu, entry);
+ }
/* synchronize */
linux_synchronize_rcu(head - linux_epoch_head);
@@ -384,7 +372,7 @@
head = &linux_epoch_head[type];
/* wait for callbacks to complete */
- taskqueue_drain(taskqueue_fast, &head->task);
+ taskqueue_drain(linux_irq_work_tq, &head->task);
}
void
@@ -398,11 +386,9 @@
rcu = (struct callback_head *)context;
head = &linux_epoch_head[type];
- mtx_lock(&head->lock);
rcu->func = func;
- STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry);
- taskqueue_enqueue(taskqueue_fast, &head->task);
- mtx_unlock(&head->lock);
+ llist_add(&rcu->node, &head->cb_head);
+ taskqueue_enqueue(linux_irq_work_tq, &head->task);
}
int
diff --git a/sys/compat/linuxkpi/common/src/linux_slab.c b/sys/compat/linuxkpi/common/src/linux_slab.c
--- a/sys/compat/linuxkpi/common/src/linux_slab.c
+++ b/sys/compat/linuxkpi/common/src/linux_slab.c
@@ -35,12 +35,22 @@
#include <sys/param.h>
#include <sys/taskqueue.h>
+#include <vm/uma.h>
struct linux_kmem_rcu {
struct rcu_head rcu_head;
struct linux_kmem_cache *cache;
};
+struct linux_kmem_cache {
+ uma_zone_t cache_zone;
+ linux_kmem_ctor_t *cache_ctor;
+ unsigned cache_flags;
+ unsigned cache_size;
+ struct llist_head cache_items;
+ struct task cache_task;
+};
+
#define LINUX_KMEM_TO_RCU(c, m) \
((struct linux_kmem_rcu *)((char *)(m) + \
(c)->cache_size - sizeof(struct linux_kmem_rcu)))
@@ -51,6 +61,22 @@
static LLIST_HEAD(linux_kfree_async_list);
+static void lkpi_kmem_cache_free_async_fn(void *, int);
+
+void *
+lkpi_kmem_cache_alloc(struct linux_kmem_cache *c, gfp_t flags)
+{
+ return (uma_zalloc_arg(c->cache_zone, c,
+ linux_check_m_flags(flags)));
+}
+
+void *
+lkpi_kmem_cache_zalloc(struct linux_kmem_cache *c, gfp_t flags)
+{
+ return (uma_zalloc_arg(c->cache_zone, c,
+ linux_check_m_flags(flags | M_ZERO)));
+}
+
static int
linux_kmem_ctor(void *mem, int size, void *arg, int flags)
{
@@ -102,6 +128,9 @@
linux_kmem_ctor, NULL, NULL, NULL,
align, UMA_ZONE_ZINIT);
} else {
+ /* make room for async task list items */
+ size = MAX(size, sizeof(struct llist_node));
+
/* create cache_zone */
c->cache_zone = uma_zcreate(name, size,
ctor ? linux_kmem_ctor : NULL, NULL,
@@ -111,17 +140,56 @@
c->cache_flags = flags;
c->cache_ctor = ctor;
c->cache_size = size;
+ init_llist_head(&c->cache_items);
+ TASK_INIT(&c->cache_task, 0, lkpi_kmem_cache_free_async_fn, c);
return (c);
}
-void
-linux_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
+static inline void
+lkpi_kmem_cache_free_rcu(struct linux_kmem_cache *c, void *m)
{
struct linux_kmem_rcu *rcu = LINUX_KMEM_TO_RCU(c, m);
call_rcu(&rcu->rcu_head, linux_kmem_cache_free_rcu_callback);
}
+static inline void
+lkpi_kmem_cache_free_sync(struct linux_kmem_cache *c, void *m)
+{
+ uma_zfree(c->cache_zone, m);
+}
+
+static void
+lkpi_kmem_cache_free_async_fn(void *context, int pending)
+{
+ struct linux_kmem_cache *c = context;
+ struct llist_node *freed, *next;
+
+ llist_for_each_safe(freed, next, llist_del_all(&c->cache_items))
+ lkpi_kmem_cache_free_sync(c, freed);
+}
+
+static inline void
+lkpi_kmem_cache_free_async(struct linux_kmem_cache *c, void *m)
+{
+ if (m == NULL)
+ return;
+
+ llist_add(m, &c->cache_items);
+ taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
+}
+
+void
+lkpi_kmem_cache_free(struct linux_kmem_cache *c, void *m)
+{
+ if (unlikely(c->cache_flags & SLAB_TYPESAFE_BY_RCU))
+ lkpi_kmem_cache_free_rcu(c, m);
+ else if (unlikely(curthread->td_critnest != 0))
+ lkpi_kmem_cache_free_async(c, m);
+ else
+ lkpi_kmem_cache_free_sync(c, m);
+}
+
void
linux_kmem_cache_destroy(struct linux_kmem_cache *c)
{
@@ -130,6 +198,9 @@
rcu_barrier();
}
+ if (!llist_empty(&c->cache_items))
+ taskqueue_enqueue(linux_irq_work_tq, &c->cache_task);
+ taskqueue_drain(linux_irq_work_tq, &c->cache_task);
uma_zdestroy(c->cache_zone);
free(c, M_KMALLOC);
}

File Metadata

Mime Type
text/plain
Expires
Tue, Oct 1, 9:31 AM (21 h, 52 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
13243258
Default Alt Text
D30760.diff (8 KB)

Event Timeline