Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F116019457
D48007.id154481.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
19 KB
Referenced Files
None
Subscribers
None
D48007.id154481.diff
View Options
Index: sys/vm/swap_pager.c
===================================================================
--- sys/vm/swap_pager.c
+++ sys/vm/swap_pager.c
@@ -1935,11 +1935,9 @@
if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL))
break;
} else {
- m = vm_radix_iter_lookup_lt(&pages,
- blks.index + i);
- m = vm_page_alloc_after(
+ m = vm_page_alloc_iter(
object, &pages, blks.index + i,
- VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL, m);
+ VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
if (m == NULL)
break;
}
Index: sys/vm/vm_fault.c
===================================================================
--- sys/vm/vm_fault.c
+++ sys/vm/vm_fault.c
@@ -1291,9 +1291,8 @@
vm_fault_unlock_and_deallocate(fs);
return (FAULT_FAILURE);
}
- fs->m = vm_page_alloc_after(fs->object, pages, fs->pindex,
- P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0,
- vm_radix_iter_lookup_lt(pages, fs->pindex));
+ fs->m = vm_page_alloc_iter(fs->object, pages, fs->pindex,
+ P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0);
}
if (fs->m == NULL) {
if (vm_fault_allocate_oom(fs))
@@ -2103,7 +2102,7 @@
vm_pindex_t dst_pindex, pindex, src_pindex;
vm_prot_t access, prot;
vm_offset_t vaddr;
- vm_page_t dst_m, mpred;
+ vm_page_t dst_m;
vm_page_t src_m;
bool upgrade;
@@ -2176,11 +2175,9 @@
* regardless of whether they can be written.
*/
vm_page_iter_init(&pages, dst_object);
- mpred = (src_object == dst_object) ?
- vm_page_mpred(src_object, src_pindex) : NULL;
for (vaddr = dst_entry->start, dst_pindex = 0;
vaddr < dst_entry->end;
- vaddr += PAGE_SIZE, dst_pindex++, mpred = dst_m) {
+ vaddr += PAGE_SIZE, dst_pindex++) {
again:
/*
* Find the page in the source object, and copy it in.
@@ -2220,15 +2217,14 @@
*/
pindex = (src_object == dst_object ? src_pindex : 0) +
dst_pindex;
- dst_m = vm_page_alloc_after(dst_object, &pages, pindex,
- VM_ALLOC_NORMAL, mpred);
+ dst_m = vm_page_alloc_iter(dst_object, &pages, pindex,
+ VM_ALLOC_NORMAL);
if (dst_m == NULL) {
VM_OBJECT_WUNLOCK(dst_object);
VM_OBJECT_RUNLOCK(object);
vm_wait(dst_object);
VM_OBJECT_WLOCK(dst_object);
pctrie_iter_reset(&pages);
- mpred = vm_radix_iter_lookup_lt(&pages, pindex);
goto again;
}
Index: sys/vm/vm_glue.c
===================================================================
--- sys/vm/vm_glue.c
+++ sys/vm/vm_glue.c
@@ -615,7 +615,7 @@
struct pctrie_iter pages;
vm_object_t obj = vm_thread_kstack_size_to_obj(npages);
vm_pindex_t pindex;
- vm_page_t m, mpred;
+ vm_page_t m;
int n;
pindex = vm_kstack_pindex(ks, npages);
@@ -627,10 +627,8 @@
VM_ALLOC_NOCREAT | VM_ALLOC_WIRED);
if (m != NULL)
continue;
- mpred = (n > 0) ? ma[n - 1] :
- vm_radix_iter_lookup_lt(&pages, pindex);
- m = vm_page_alloc_domain_after(obj, &pages, pindex + n,
- domain, req_class | VM_ALLOC_WIRED, mpred);
+ m = vm_page_alloc_domain_iter(obj, &pages, pindex + n,
+ domain, req_class | VM_ALLOC_WIRED);
if (m != NULL)
continue;
for (int i = 0; i < n; i++) {
Index: sys/vm/vm_kern.c
===================================================================
--- sys/vm/vm_kern.c
+++ sys/vm/vm_kern.c
@@ -532,7 +532,7 @@
{
struct pctrie_iter pages;
vm_offset_t offset, i;
- vm_page_t m, mpred;
+ vm_page_t m;
vm_prot_t prot;
int pflags;
@@ -550,10 +550,9 @@
vm_page_iter_init(&pages, object);
VM_OBJECT_WLOCK(object);
retry:
- mpred = vm_radix_iter_lookup_lt(&pages, atop(offset + i));
- for (; i < size; i += PAGE_SIZE, mpred = m) {
- m = vm_page_alloc_domain_after(object, &pages, atop(offset + i),
- domain, pflags, mpred);
+ for (; i < size; i += PAGE_SIZE) {
+ m = vm_page_alloc_domain_iter(object, &pages, atop(offset + i),
+ domain, pflags);
/*
* Ran out of space, free everything up and return. Don't need
Index: sys/vm/vm_object.h
===================================================================
--- sys/vm/vm_object.h
+++ sys/vm/vm_object.h
@@ -98,7 +98,6 @@
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
- struct pglist memq; /* list of resident pages */
struct vm_radix rtree; /* root of the resident page radix trie*/
vm_pindex_t size; /* Object size */
struct domainset_ref domain; /* NUMA policy. */
Index: sys/vm/vm_object.c
===================================================================
--- sys/vm/vm_object.c
+++ sys/vm/vm_object.c
@@ -181,8 +181,6 @@
object = (vm_object_t)mem;
KASSERT(object->ref_count == 0,
("object %p ref_count = %d", object, object->ref_count));
- KASSERT(TAILQ_EMPTY(&object->memq),
- ("object %p has resident pages in its memq", object));
KASSERT(vm_radix_is_empty(&object->rtree),
("object %p has resident pages in its trie", object));
#if VM_NRESERVLEVEL > 0
@@ -235,8 +233,6 @@
_vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags,
vm_object_t object, void *handle)
{
-
- TAILQ_INIT(&object->memq);
LIST_INIT(&object->shadow_head);
object->type = type;
@@ -922,7 +918,6 @@
vm_radix_reclaim_callback(&object->rtree,
vm_object_terminate_single_page, object);
- TAILQ_INIT(&object->memq);
object->resident_page_count = 0;
if (object->type == OBJT_VNODE)
vdrop(object->handle);
@@ -2293,10 +2288,9 @@
mpred = vm_radix_iter_lookup_lt(&pages, pindex);
*rbehind = MIN(*rbehind,
pindex - (mpred != NULL ? mpred->pindex + 1 : 0));
- /* Stepping backward from pindex, mpred doesn't change. */
for (int i = 0; i < *rbehind; i++) {
- m = vm_page_alloc_after(object, &pages, pindex - i - 1,
- VM_ALLOC_NORMAL, mpred);
+ m = vm_page_alloc_iter(object, &pages, pindex - i - 1,
+ VM_ALLOC_NORMAL);
if (m == NULL) {
/* Shift the array. */
for (int j = 0; j < i; j++)
@@ -2316,15 +2310,14 @@
msucc = vm_radix_iter_lookup_ge(&pages, pindex);
*rahead = MIN(*rahead,
(msucc != NULL ? msucc->pindex : object->size) - pindex);
- mpred = m;
for (int i = 0; i < *rahead; i++) {
- m = vm_page_alloc_after(object, &pages, pindex + i,
- VM_ALLOC_NORMAL, mpred);
+ m = vm_page_alloc_iter(object, &pages, pindex + i,
+ VM_ALLOC_NORMAL);
if (m == NULL) {
*rahead = i;
break;
}
- ma_dst[*rbehind + count + i] = mpred = m;
+ ma_dst[*rbehind + count + i] = m;
}
}
}
Index: sys/vm/vm_page.h
===================================================================
--- sys/vm/vm_page.h
+++ sys/vm/vm_page.h
@@ -78,10 +78,6 @@
* A radix tree used to quickly
* perform object/offset lookups
*
- * A list of all pages for a given object,
- * so they can be quickly deactivated at
- * time of deallocation.
- *
* An ordered list of pages due for pageout.
*
* In addition, the structure contains the object
@@ -606,12 +602,11 @@
void vm_page_activate (vm_page_t);
void vm_page_advise(vm_page_t m, int advice);
-vm_page_t vm_page_mpred(vm_object_t, vm_pindex_t);
vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
-vm_page_t vm_page_alloc_after(vm_object_t, struct pctrie_iter *, vm_pindex_t,
- int, vm_page_t);
-vm_page_t vm_page_alloc_domain_after(vm_object_t, struct pctrie_iter *,
- vm_pindex_t, int, int, vm_page_t);
+vm_page_t vm_page_alloc_iter(vm_object_t, struct pctrie_iter *, vm_pindex_t,
+ int);
+vm_page_t vm_page_alloc_domain_iter(vm_object_t, struct pctrie_iter *,
+ vm_pindex_t, int, int);
vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
vm_paddr_t boundary, vm_memattr_t memattr);
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -171,8 +171,7 @@
static bool vm_page_free_prep(vm_page_t m);
static void vm_page_free_toq(vm_page_t m);
static void vm_page_init(void *dummy);
-static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
- vm_page_t mpred);
+static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object);
static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
const uint16_t nflag);
static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
@@ -1470,18 +1469,14 @@
}
/*
- * Insert the given page into the given object at the given pindex. mpred is
- * used for memq linkage. From vm_page_insert, iter is false, mpred is
- * initially NULL, and this procedure looks it up. From vm_page_iter_insert,
- * iter is true and mpred is known to the caller to be valid, and may be NULL if
- * this will be the page with the lowest pindex.
+ * Insert the given page into the given object at the given pindex.
*
* The procedure is marked __always_inline to suggest to the compiler to
* eliminate the lookup parameter and the associated alternate branch.
*/
static __always_inline int
vm_page_insert_lookup(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
- struct pctrie_iter *pages, bool iter, vm_page_t mpred)
+ struct pctrie_iter *pages, bool iter)
{
int error;
@@ -1497,13 +1492,12 @@
m->ref_count |= VPRC_OBJREF;
/*
- * Add this page to the object's radix tree, and look up mpred if
- * needed.
+ * Add this page to the object's radix tree.
*/
if (iter)
error = vm_radix_iter_insert(pages, m);
else
- error = vm_radix_insert_lookup_lt(&object->rtree, m, &mpred);
+ error = vm_radix_insert(&object->rtree, m);
if (__predict_false(error != 0)) {
m->object = NULL;
m->pindex = 0;
@@ -1514,7 +1508,7 @@
/*
* Now link into the object's ordered list of backed pages.
*/
- vm_page_insert_radixdone(m, object, mpred);
+ vm_page_insert_radixdone(m, object);
vm_pager_page_inserted(object, m);
return (0);
}
@@ -1529,7 +1523,7 @@
int
vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
{
- return (vm_page_insert_lookup(m, object, pindex, NULL, false, NULL));
+ return (vm_page_insert_lookup(m, object, pindex, NULL, false));
}
/*
@@ -1539,16 +1533,13 @@
* "pindex" using the iterator "pages". Returns 0 if the insertion was
* successful.
*
- * The page "mpred" must immediately precede the offset "pindex" within
- * the specified object.
- *
* The object must be locked.
*/
static int
vm_page_iter_insert(struct pctrie_iter *pages, vm_page_t m, vm_object_t object,
- vm_pindex_t pindex, vm_page_t mpred)
+ vm_pindex_t pindex)
{
- return (vm_page_insert_lookup(m, object, pindex, pages, true, mpred));
+ return (vm_page_insert_lookup(m, object, pindex, pages, true));
}
/*
@@ -1557,13 +1548,10 @@
* Complete page "m" insertion into the specified object after the
* radix trie hooking.
*
- * The page "mpred" must precede the offset "m->pindex" within the
- * specified object.
- *
* The object must be locked.
*/
static void
-vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
+vm_page_insert_radixdone(vm_page_t m, vm_object_t object)
{
VM_OBJECT_ASSERT_WLOCKED(object);
@@ -1571,24 +1559,6 @@
("vm_page_insert_radixdone: page %p has inconsistent object", m));
KASSERT((m->ref_count & VPRC_OBJREF) != 0,
("vm_page_insert_radixdone: page %p is missing object ref", m));
- if (mpred != NULL) {
- KASSERT(mpred->object == object,
- ("vm_page_insert_radixdone: object doesn't contain mpred"));
- KASSERT(mpred->pindex < m->pindex,
- ("vm_page_insert_radixdone: mpred doesn't precede pindex"));
- KASSERT(TAILQ_NEXT(mpred, listq) == NULL ||
- m->pindex < TAILQ_NEXT(mpred, listq)->pindex,
- ("vm_page_insert_radixdone: pindex doesn't precede msucc"));
- } else {
- KASSERT(TAILQ_EMPTY(&object->memq) ||
- m->pindex < TAILQ_FIRST(&object->memq)->pindex,
- ("vm_page_insert_radixdone: no mpred but not first page"));
- }
-
- if (mpred != NULL)
- TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
- else
- TAILQ_INSERT_HEAD(&object->memq, m, listq);
/*
* Show that the object has one more resident page.
@@ -1636,11 +1606,6 @@
vm_pager_page_removed(object, m);
m->object = NULL;
- /*
- * Now remove from the object's list of backed pages.
- */
- TAILQ_REMOVE(&object->memq, m, listq);
-
/*
* And show that the object has one fewer resident page.
*/
@@ -1914,9 +1879,6 @@
(mnew->oflags & VPO_UNMANAGED),
("vm_page_replace: mismatched VPO_UNMANAGED"));
- /* Keep the resident page list in sorted order. */
- TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq);
- TAILQ_REMOVE(&object->memq, mold, listq);
mold->object = NULL;
/*
@@ -1964,7 +1926,6 @@
vm_page_iter_rename(struct pctrie_iter *old_pages, vm_page_t m,
vm_object_t new_object, vm_pindex_t new_pindex)
{
- vm_page_t mpred;
vm_pindex_t opidx;
KASSERT((m->ref_count & VPRC_OBJREF) != 0,
@@ -1979,15 +1940,11 @@
*/
opidx = m->pindex;
m->pindex = new_pindex;
- if (vm_radix_insert_lookup_lt(&new_object->rtree, m, &mpred) != 0) {
+ if (vm_radix_insert(&new_object->rtree, m) != 0) {
m->pindex = opidx;
return (false);
}
- /*
- * The operation cannot fail anymore. The removal must happen before
- * the listq iterator is tainted.
- */
m->pindex = opidx;
vm_radix_iter_remove(old_pages);
vm_page_remove_radixdone(m);
@@ -1996,25 +1953,13 @@
m->pindex = new_pindex;
m->object = new_object;
- vm_page_insert_radixdone(m, new_object, mpred);
+ vm_page_insert_radixdone(m, new_object);
if (vm_page_any_valid(m))
vm_page_dirty(m);
vm_pager_page_inserted(new_object, m);
return (true);
}
-/*
- * vm_page_mpred:
- *
- * Return the greatest page of the object with index <= pindex,
- * or NULL, if there is none. Assumes object lock is held.
- */
-vm_page_t
-vm_page_mpred(vm_object_t object, vm_pindex_t pindex)
-{
- return (vm_radix_lookup_le(&object->rtree, pindex));
-}
-
/*
* vm_page_alloc:
*
@@ -2043,8 +1988,7 @@
struct pctrie_iter pages;
vm_page_iter_init(&pages, object);
- return (vm_page_alloc_after(object, &pages, pindex, req,
- vm_page_mpred(object, pindex)));
+ return (vm_page_alloc_iter(object, &pages, pindex, req));
}
/*
@@ -2054,8 +1998,8 @@
* page index, or NULL if no such page exists.
*/
vm_page_t
-vm_page_alloc_after(vm_object_t object, struct pctrie_iter *pages,
- vm_pindex_t pindex, int req, vm_page_t mpred)
+vm_page_alloc_iter(vm_object_t object, struct pctrie_iter *pages,
+ vm_pindex_t pindex, int req)
{
struct vm_domainset_iter di;
vm_page_t m;
@@ -2063,8 +2007,8 @@
vm_domainset_iter_page_init(&di, object, pindex, &domain, &req);
do {
- m = vm_page_alloc_domain_after(object, pages, pindex, domain,
- req, mpred);
+ m = vm_page_alloc_domain_iter(object, pages, pindex, domain,
+ req);
if (m != NULL)
break;
} while (vm_domainset_iter_page(&di, object, &domain) == 0);
@@ -2126,8 +2070,8 @@
}
vm_page_t
-vm_page_alloc_domain_after(vm_object_t object, struct pctrie_iter *pages,
- vm_pindex_t pindex, int domain, int req, vm_page_t mpred)
+vm_page_alloc_domain_iter(vm_object_t object, struct pctrie_iter *pages,
+ vm_pindex_t pindex, int domain, int req)
{
struct vm_domain *vmd;
vm_page_t m;
@@ -2143,9 +2087,6 @@
KASSERT(((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
(VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
("invalid request %#x", req));
- KASSERT(mpred == NULL || mpred->pindex < pindex,
- ("mpred %p doesn't precede pindex 0x%jx", mpred,
- (uintmax_t)pindex));
VM_OBJECT_ASSERT_WLOCKED(object);
flags = 0;
@@ -2233,7 +2174,7 @@
}
m->a.act_count = 0;
- if (vm_page_insert_lookup(m, object, pindex, pages, true, mpred)) {
+ if (vm_page_iter_insert(pages, m, object, pindex)) {
if (req & VM_ALLOC_WIRED) {
vm_wire_sub(1);
m->ref_count = 0;
@@ -2441,7 +2382,7 @@
m->a.act_count = 0;
m->oflags = oflags;
m->pool = VM_FREEPOOL_DEFAULT;
- if (vm_page_iter_insert(&pages, m, object, pindex, mpred)) {
+ if (vm_page_iter_insert(&pages, m, object, pindex)) {
if ((req & VM_ALLOC_WIRED) != 0)
vm_wire_sub(npages);
KASSERT(m->object == NULL,
@@ -4806,7 +4747,7 @@
vm_page_grab_iter(vm_object_t object, struct pctrie_iter *pages,
vm_pindex_t pindex, int allocflags)
{
- vm_page_t m, mpred;
+ vm_page_t m;
bool found;
VM_OBJECT_ASSERT_WLOCKED(object);
@@ -4819,9 +4760,8 @@
if (found &&
(allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0)
return (NULL);
- mpred = vm_radix_iter_lookup_lt(pages, pindex);
- m = vm_page_alloc_after(object, pages, pindex,
- vm_page_grab_pflags(allocflags), mpred);
+ m = vm_page_alloc_iter(object, pages, pindex,
+ vm_page_grab_pflags(allocflags));
if (m != NULL) {
if ((allocflags & VM_ALLOC_ZERO) != 0 &&
(m->flags & PG_ZERO) == 0)
@@ -4983,7 +4923,7 @@
return (VM_PAGER_FAIL);
} else {
mpred = vm_radix_iter_lookup_lt(pages, pindex);
- m = vm_page_alloc_after(object, pages, pindex, pflags, mpred);
+ m = vm_page_alloc_iter(object, pages, pindex, pflags);
if (m == NULL) {
if (!vm_pager_can_alloc_page(object, pindex)) {
*mp = NULL;
@@ -5004,8 +4944,8 @@
for (i = 1; i < after; i++) {
m = vm_radix_iter_lookup(pages, pindex + i);
if (m == NULL) {
- m = vm_page_alloc_after(object, pages,
- pindex + i, VM_ALLOC_NORMAL, mpred);
+ m = vm_page_alloc_iter(object, pages,
+ pindex + i, VM_ALLOC_NORMAL);
if (m == NULL)
break;
} else if (vm_page_any_valid(m) || !vm_page_tryxbusy(m))
@@ -5080,7 +5020,7 @@
int end)
{
struct pctrie_iter pages;
- vm_page_t m, mpred;
+ vm_page_t m;
int allocflags, rv;
bool found;
@@ -5095,9 +5035,8 @@
&pages, object, pindex, allocflags, &found)) == NULL) {
if (!vm_pager_has_page(object, pindex, NULL, NULL))
return (0);
- mpred = vm_radix_iter_lookup_lt(&pages, pindex);
- m = vm_page_alloc_after(object, &pages, pindex,
- vm_page_grab_pflags(allocflags), mpred);
+ m = vm_page_alloc_iter(object, &pages, pindex,
+ vm_page_grab_pflags(allocflags));
if (m != NULL) {
vm_object_pip_add(object, 1);
VM_OBJECT_WUNLOCK(object);
@@ -5208,7 +5147,7 @@
vm_page_t *ma, int count)
{
struct pctrie_iter pages;
- vm_page_t m, mpred;
+ vm_page_t m;
int pflags;
int i;
@@ -5223,7 +5162,6 @@
i = 0;
vm_page_iter_init(&pages, object);
retrylookup:
- mpred = vm_radix_iter_lookup_lt(&pages, pindex + i);
for (; i < count; i++) {
m = vm_radix_iter_lookup(&pages, pindex + i);
if (m != NULL) {
@@ -5238,8 +5176,8 @@
} else {
if ((allocflags & VM_ALLOC_NOCREAT) != 0)
break;
- m = vm_page_alloc_after(object, &pages, pindex + i,
- pflags | VM_ALLOC_COUNT(count - i), mpred);
+ m = vm_page_alloc_iter(object, &pages, pindex + i,
+ pflags | VM_ALLOC_COUNT(count - i));
if (m == NULL) {
if ((allocflags & (VM_ALLOC_NOWAIT |
VM_ALLOC_WAITFAIL)) != 0)
@@ -5254,7 +5192,7 @@
vm_page_valid(m);
}
vm_page_grab_release(m, allocflags);
- ma[i] = mpred = m;
+ ma[i] = m;
}
return (i);
}
Index: sys/vm/vm_radix.h
===================================================================
--- sys/vm/vm_radix.h
+++ sys/vm/vm_radix.h
@@ -79,26 +79,6 @@
return (VM_RADIX_PCTRIE_ITER_INSERT(pages, page));
}
-/*
- * Insert the page into the vm_radix tree with its pindex as the key. Panic if
- * the pindex already exists. Return zero on success or a non-zero error on
- * memory allocation failure. Set the out parameter mpred to the previous page
- * in the tree as if found by a previous call to vm_radix_lookup_le with the
- * new page pindex.
- */
-static __inline int
-vm_radix_insert_lookup_lt(struct vm_radix *rtree, vm_page_t page,
- vm_page_t *mpred)
-{
- int error;
-
- error = VM_RADIX_PCTRIE_INSERT_LOOKUP_LE(&rtree->rt_trie, page, mpred);
- if (__predict_false(error == EEXIST))
- panic("vm_radix_insert_lookup_lt: page already present, %p",
- *mpred);
- return (error);
-}
-
/*
* Returns the value stored at the index assuming there is an external lock.
*
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Fri, May 2, 4:58 PM (19 h, 4 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
17908418
Default Alt Text
D48007.id154481.diff (19 KB)
Attached To
Mode
D48007: vm_object: drop memq field
Attached
Detach File
Event Timeline
Log In to Comment