Page MenuHomeFreeBSD

D21254.diff
No OneTemporary

D21254.diff

Index: head/sys/kern/kern_synch.c
===================================================================
--- head/sys/kern/kern_synch.c
+++ head/sys/kern/kern_synch.c
@@ -52,6 +52,7 @@
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
+#include <sys/refcount.h>
#include <sys/sched.h>
#include <sys/sdt.h>
#include <sys/signalvar.h>
@@ -331,6 +332,75 @@
}
return (_sleep(&pause_wchan[curcpu], NULL,
(flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags));
+}
+
+/*
+ * Potentially release the last reference for refcount. Check for
+ * unlikely conditions and signal the caller as to whether it was
+ * the final ref.
+ */
+bool
+refcount_release_last(volatile u_int *count, u_int n, u_int old)
+{
+ u_int waiter;
+
+ waiter = old & REFCOUNT_WAITER;
+ old = REFCOUNT_COUNT(old);
+ if (__predict_false(n > old || REFCOUNT_SATURATED(old))) {
+ /*
+ * Avoid multiple destructor invocations if underflow occurred.
+ * This is not perfect since the memory backing the containing
+ * object may already have been reallocated.
+ */
+ _refcount_update_saturated(count);
+ return (false);
+ }
+
+ /*
+ * Attempt to atomically clear the waiter bit. Wakeup waiters
+ * if we are successful.
+ */
+ if (waiter != 0 && atomic_cmpset_int(count, REFCOUNT_WAITER, 0))
+ wakeup(__DEVOLATILE(u_int *, count));
+
+ /*
+ * Last reference. Signal the user to call the destructor.
+ *
+ * Ensure that the destructor sees all updates. The fence_rel
+ * at the start of refcount_releasen synchronizes with this fence.
+ */
+ atomic_thread_fence_acq();
+ return (true);
+}
+
+/*
+ * Wait for a refcount wakeup. This does not guarantee that the ref is still
+ * zero on return and may be subject to transient wakeups. Callers wanting
+ * a precise answer should use refcount_wait().
+ */
+void
+refcount_sleep(volatile u_int *count, const char *wmesg, int pri)
+{
+ void *wchan;
+ u_int old;
+
+ if (REFCOUNT_COUNT(*count) == 0)
+ return;
+ wchan = __DEVOLATILE(void *, count);
+ sleepq_lock(wchan);
+ old = *count;
+ for (;;) {
+ if (REFCOUNT_COUNT(old) == 0) {
+ sleepq_release(wchan);
+ return;
+ }
+ if (old & REFCOUNT_WAITER)
+ break;
+ if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER))
+ break;
+ }
+ sleepq_add(wchan, NULL, wmesg, 0, 0);
+ sleepq_wait(wchan, pri);
}
/*
Index: head/sys/sys/refcount.h
===================================================================
--- head/sys/sys/refcount.h
+++ head/sys/sys/refcount.h
@@ -39,9 +39,15 @@
#define KASSERT(exp, msg) /* */
#endif
-#define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0)
-#define REFCOUNT_SATURATION_VALUE (3U << 30)
+#define REFCOUNT_WAITER (1 << 31) /* Refcount has waiter. */
+#define REFCOUNT_SATURATION_VALUE (3U << 29)
+#define REFCOUNT_SATURATED(val) (((val) & (1U << 30)) != 0)
+#define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER)
+
+bool refcount_release_last(volatile u_int *count, u_int n, u_int old);
+void refcount_sleep(volatile u_int *count, const char *wmesg, int prio);
+
/*
* Attempt to handle reference count overflow and underflow. Force the counter
* to stay at the saturation value so that a counter overflow cannot trigger
@@ -76,6 +82,19 @@
_refcount_update_saturated(count);
}
+static __inline void
+refcount_acquiren(volatile u_int *count, u_int n)
+{
+
+ u_int old;
+
+ KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
+ ("refcount_acquiren: n %d too large", n));
+ old = atomic_fetchadd_int(count, n);
+ if (__predict_false(REFCOUNT_SATURATED(old)))
+ _refcount_update_saturated(count);
+}
+
static __inline __result_use_check bool
refcount_acquire_checked(volatile u_int *count)
{
@@ -91,34 +110,35 @@
}
static __inline bool
-refcount_release(volatile u_int *count)
+refcount_releasen(volatile u_int *count, u_int n)
{
u_int old;
+ KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
+ ("refcount_releasen: n %d too large", n));
atomic_thread_fence_rel();
- old = atomic_fetchadd_int(count, -1);
- if (__predict_false(old == 0 || REFCOUNT_SATURATED(old))) {
- /*
- * Avoid multiple destructor invocations if underflow occurred.
- * This is not perfect since the memory backing the containing
- * object may already have been reallocated.
- */
- _refcount_update_saturated(count);
- return (false);
- }
- if (old > 1)
- return (false);
+ old = atomic_fetchadd_int(count, -n);
+ if (__predict_false(n >= REFCOUNT_COUNT(old) ||
+ REFCOUNT_SATURATED(old)))
+ return (refcount_release_last(count, n, old));
+ return (false);
+}
- /*
- * Last reference. Signal the user to call the destructor.
- *
- * Ensure that the destructor sees all updates. The fence_rel
- * at the start of the function synchronizes with this fence.
- */
- atomic_thread_fence_acq();
- return (true);
+static __inline bool
+refcount_release(volatile u_int *count)
+{
+
+ return (refcount_releasen(count, 1));
}
+static __inline void
+refcount_wait(volatile u_int *count, const char *wmesg, int prio)
+{
+
+ while (*count != 0)
+ refcount_sleep(count, wmesg, prio);
+}
+
/*
* This functions returns non-zero if the refcount was
* incremented. Else zero is returned.
@@ -130,7 +150,7 @@
old = *count;
for (;;) {
- if (old == 0)
+ if (REFCOUNT_COUNT(old) == 0)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
@@ -146,7 +166,7 @@
old = *count;
for (;;) {
- if (old == 1)
+ if (REFCOUNT_COUNT(old) == 1)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);

File Metadata

Mime Type
text/plain
Expires
Thu, Sep 26, 1:29 PM (22 h, 16 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
12837064
Default Alt Text
D21254.diff (5 KB)

Event Timeline