#include <mach/mach_types.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#include <vm/vm_purgeable_internal.h>
#include <sys/kdebug.h>
#include <kern/sched_prim.h>
struct token {
token_cnt_t count;
token_idx_t next;
};
struct token *tokens;
token_idx_t token_q_max_cnt = 0;
vm_size_t token_q_cur_size = 0;
token_idx_t token_free_idx = 0;
token_idx_t token_init_idx = 1;
int32_t token_new_pagecount = 0;
int available_for_purge = 0;
static int token_q_allocating = 0;
struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
decl_lck_mtx_data(,vm_purgeable_queue_lock)
#define TOKEN_ADD 0x40
#define TOKEN_DELETE 0x41
#define TOKEN_RIPEN 0x42
#define OBJECT_ADD 0x48
#define OBJECT_REMOVE 0x49
#define OBJECT_PURGE 0x4a
#define OBJECT_PURGE_ALL 0x4b
static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
#if MACH_ASSERT
static void
vm_purgeable_token_check_queue(purgeable_q_t queue)
{
int token_cnt = 0, page_cnt = 0;
token_idx_t token = queue->token_q_head;
token_idx_t unripe = 0;
int our_inactive_count;
while (token) {
if (tokens[token].count != 0) {
assert(queue->token_q_unripe);
if (unripe == 0) {
assert(token == queue->token_q_unripe);
unripe = token;
}
page_cnt += tokens[token].count;
}
if (tokens[token].next == 0)
assert(queue->token_q_tail == token);
token_cnt++;
token = tokens[token].next;
}
if (unripe)
assert(queue->token_q_unripe == unripe);
assert(token_cnt == queue->debug_count_tokens);
if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
{
our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
assert(our_inactive_count >= 0);
assert((uint32_t) our_inactive_count == vm_page_inactive_count);
}
}
#endif
kern_return_t
vm_purgeable_token_add(purgeable_q_t queue)
{
#if MACH_ASSERT
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
#endif
token_idx_t token;
enum purgeable_q_type i;
find_available_token:
if (token_free_idx) {
token = token_free_idx;
token_free_idx = tokens[token_free_idx].next;
} else if (token_init_idx < token_q_max_cnt) {
token = token_init_idx;
token_init_idx++;
} else {
while(token_q_allocating) {
wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
LCK_SLEEP_DEFAULT,
(event_t)&token_q_allocating,
THREAD_UNINT);
if(res != THREAD_AWAKENED) return KERN_ABORTED;
};
if(token_init_idx < token_q_max_cnt)
goto find_available_token;
token_q_allocating = 1;
vm_page_unlock_queues();
struct token *new_loc;
vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
kern_return_t result;
if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) {
result = KERN_RESOURCE_SHORTAGE;
} else {
if (token_q_cur_size) {
result = kmem_realloc(kernel_map,
(vm_offset_t) tokens,
token_q_cur_size,
(vm_offset_t *) &new_loc,
alloc_size);
} else {
result = kmem_alloc(kernel_map,
(vm_offset_t *) &new_loc,
alloc_size);
}
}
vm_page_lock_queues();
if (result) {
token_q_allocating = 0;
thread_wakeup((event_t)&token_q_allocating);
return result;
}
struct token *old_tokens=tokens;
tokens=new_loc;
vm_size_t old_token_q_cur_size=token_q_cur_size;
token_q_cur_size=alloc_size;
token_q_max_cnt = (token_idx_t) (token_q_cur_size /
sizeof(struct token));
assert (token_init_idx < token_q_max_cnt);
if (old_token_q_cur_size) {
vm_page_unlock_queues();
kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
vm_page_lock_queues();
}
token_q_allocating = 0;
thread_wakeup((event_t)&token_q_allocating);
goto find_available_token;
}
assert (token);
for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
assert(pages >= 0);
assert(pages <= TOKEN_COUNT_MAX);
purgeable_queues[i].new_pages = (int32_t) pages;
assert(purgeable_queues[i].new_pages == pages);
}
token_new_pagecount = 0;
if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
tokens[token].count = queue->new_pages;
else
tokens[token].count = 0;
queue->new_pages = 0;
tokens[token].next = 0;
if (queue->token_q_tail == 0) {
assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
queue->token_q_head = token;
} else {
tokens[queue->token_q_tail].next = token;
}
if (queue->token_q_unripe == 0) {
if (tokens[token].count > 0)
queue->token_q_unripe = token;
else
available_for_purge++;
}
queue->token_q_tail = token;
#if MACH_ASSERT
queue->debug_count_tokens++;
vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)),
queue->type,
tokens[token].count,
queue->debug_count_tokens,
0,
0);
#endif
return KERN_SUCCESS;
}
static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue)
{
#if MACH_ASSERT
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
#endif
token_idx_t token;
token = queue->token_q_head;
assert(token);
if (token) {
assert(queue->token_q_tail);
if (queue->token_q_head == queue->token_q_unripe) {
queue->token_q_unripe = tokens[token].next;
} else {
available_for_purge--;
assert(available_for_purge >= 0);
}
if (queue->token_q_tail == queue->token_q_head)
assert(tokens[token].next == 0);
queue->token_q_head = tokens[token].next;
if (queue->token_q_head) {
tokens[queue->token_q_head].count += tokens[token].count;
} else {
queue->new_pages += tokens[token].count;
queue->token_q_tail = 0;
}
#if MACH_ASSERT
queue->debug_count_tokens--;
vm_purgeable_token_check_queue(queue);
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
queue->type,
tokens[queue->token_q_head].count,
token_new_pagecount,
available_for_purge,
0);
#endif
}
return token;
}
void
vm_purgeable_token_delete_first(purgeable_q_t queue)
{
#if MACH_ASSERT
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
#endif
token_idx_t token = vm_purgeable_token_remove_first(queue);
if (token) {
tokens[token].next = token_free_idx;
token_free_idx = token;
}
}
void
vm_purgeable_q_advance_all()
{
#if MACH_ASSERT
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
#endif
int i;
if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1))
{
for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
assert(pages >= 0);
assert(pages <= TOKEN_COUNT_MAX);
purgeable_queues[i].new_pages = (int32_t) pages;
assert(purgeable_queues[i].new_pages == pages);
}
token_new_pagecount = 0;
}
for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
purgeable_q_t queue = &purgeable_queues[i];
uint32_t num_pages = 1;
while (queue->token_q_unripe) {
if (tokens[queue->token_q_unripe].count && num_pages)
{
tokens[queue->token_q_unripe].count -= 1;
num_pages -= 1;
}
if (tokens[queue->token_q_unripe].count == 0) {
queue->token_q_unripe = tokens[queue->token_q_unripe].next;
available_for_purge++;
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
queue->type,
tokens[queue->token_q_head].count,
0,
available_for_purge,
0);
continue;
}
if (num_pages == 0)
break;
}
if (!queue->token_q_unripe) {
queue->new_pages -= num_pages;
assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
}
#if MACH_ASSERT
vm_purgeable_token_check_queue(queue);
#endif
}
}
static void
vm_purgeable_token_remove_ripe(purgeable_q_t queue)
{
#if MACH_ASSERT
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
#endif
assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
token_idx_t new_head = tokens[queue->token_q_head].next;
tokens[queue->token_q_head].next = token_free_idx;
token_free_idx = queue->token_q_head;
queue->token_q_head = new_head;
if (new_head == 0)
queue->token_q_tail = 0;
#if MACH_ASSERT
queue->debug_count_tokens--;
vm_purgeable_token_check_queue(queue);
#endif
available_for_purge--;
assert(available_for_purge >= 0);
}
static void
vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
{
#if MACH_ASSERT
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
#endif
assert(queue->token_q_head);
if (tokens[queue->token_q_head].count == 0) {
vm_purgeable_token_remove_ripe(queue);
} else {
assert(queue2);
vm_purgeable_token_remove_ripe(queue2);
token_idx_t token;
token_cnt_t count;
assert(queue->token_q_unripe == queue->token_q_head);
token = vm_purgeable_token_remove_first(queue);
assert(token);
count = tokens[token].count;
token_idx_t *token_in_queue2 = &queue2->token_q_head;
while (*token_in_queue2 && count > tokens[*token_in_queue2].count) {
count -= tokens[*token_in_queue2].count;
token_in_queue2 = &tokens[*token_in_queue2].next;
}
if ((*token_in_queue2 == queue2->token_q_unripe) ||
(queue2->token_q_unripe == 0))
queue2->token_q_unripe = token;
tokens[token].count = count;
tokens[token].next = *token_in_queue2;
if (*token_in_queue2 == 0) {
queue2->token_q_tail = token;
assert(queue2->new_pages >= (int32_t) count);
queue2->new_pages -= count;
} else {
assert(tokens[*token_in_queue2].count >= count);
tokens[*token_in_queue2].count -= count;
}
*token_in_queue2 = token;
#if MACH_ASSERT
queue2->debug_count_tokens++;
vm_purgeable_token_check_queue(queue2);
#endif
}
}
static vm_object_t
vm_purgeable_object_find_and_lock(purgeable_q_t queue, int group)
{
lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
vm_object_t object;
for (object = (vm_object_t) queue_first(&queue->objq[group]);
!queue_end(&queue->objq[group], (queue_entry_t) object);
object = (vm_object_t) queue_next(&object->objq)) {
if (vm_object_lock_try(object)) {
queue_remove(&queue->objq[group], object,
vm_object_t, objq);
object->objq.next = 0;
object->objq.prev = 0;
#if MACH_ASSERT
queue->debug_count_objects--;
#endif
return object;
}
}
return 0;
}
void
vm_purgeable_object_purge_all(void)
{
enum purgeable_q_type i;
int group;
vm_object_t object;
unsigned int purged_count;
uint32_t collisions;
purged_count = 0;
collisions = 0;
restart:
lck_mtx_lock(&vm_purgeable_queue_lock);
for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
purgeable_q_t queue;
queue = &purgeable_queues[i];
for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
while (!queue_empty(&queue->objq[group])) {
object = vm_purgeable_object_find_and_lock(queue, group);
if (object == VM_OBJECT_NULL) {
lck_mtx_unlock(&vm_purgeable_queue_lock);
mutex_pause(collisions++);
goto restart;
}
lck_mtx_unlock(&vm_purgeable_queue_lock);
vm_page_lock_queues();
vm_purgeable_token_remove_first(queue);
vm_page_unlock_queues();
assert(object->purgable == VM_PURGABLE_VOLATILE);
(void) vm_object_purge(object);
vm_object_unlock(object);
purged_count++;
goto restart;
}
assert(queue->debug_count_objects >= 0);
}
}
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
purged_count,
0,
available_for_purge,
0,
0);
lck_mtx_unlock(&vm_purgeable_queue_lock);
return;
}
boolean_t
vm_purgeable_object_purge_one(void)
{
enum purgeable_q_type i;
int group;
vm_object_t object = 0;
purgeable_q_t queue, queue2;
#if MACH_ASSERT
lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
#endif
lck_mtx_lock(&vm_purgeable_queue_lock);
for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
queue = &purgeable_queues[i];
if (!(queue->token_q_head && tokens[queue->token_q_head].count == 0))
continue;
for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
if (!queue_empty(&queue->objq[group]) &&
(object = vm_purgeable_object_find_and_lock(queue, group))) {
lck_mtx_unlock(&vm_purgeable_queue_lock);
vm_purgeable_token_choose_and_delete_ripe(queue, 0);
goto purge_now;
}
if (i != PURGEABLE_Q_TYPE_OBSOLETE) {
queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ?
PURGEABLE_Q_TYPE_FIFO :
PURGEABLE_Q_TYPE_LIFO];
if (!queue_empty(&queue2->objq[group]) &&
(object = vm_purgeable_object_find_and_lock(queue2, group))) {
lck_mtx_unlock(&vm_purgeable_queue_lock);
vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
goto purge_now;
}
}
assert(queue->debug_count_objects >= 0);
}
}
lck_mtx_unlock(&vm_purgeable_queue_lock);
return FALSE;
purge_now:
assert(object);
assert(object->purgable == VM_PURGABLE_VOLATILE);
vm_page_unlock_queues();
(void) vm_object_purge(object);
vm_object_unlock(object);
vm_page_lock_queues();
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
object,
0,
available_for_purge,
0,
0);
return TRUE;
}
void
vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
{
vm_object_lock_assert_exclusive(object);
lck_mtx_lock(&vm_purgeable_queue_lock);
if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
group = 0;
if (queue->type != PURGEABLE_Q_TYPE_LIFO)
queue_enter(&queue->objq[group], object, vm_object_t, objq);
else
queue_enter_first(&queue->objq[group], object, vm_object_t, objq);
#if MACH_ASSERT
queue->debug_count_objects++;
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
0,
tokens[queue->token_q_head].count,
queue->type,
group,
0);
#endif
lck_mtx_unlock(&vm_purgeable_queue_lock);
}
purgeable_q_t
vm_purgeable_object_remove(vm_object_t object)
{
enum purgeable_q_type i;
int group;
vm_object_lock_assert_exclusive(object);
lck_mtx_lock(&vm_purgeable_queue_lock);
for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
purgeable_q_t queue = &purgeable_queues[i];
for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
vm_object_t o;
for (o = (vm_object_t) queue_first(&queue->objq[group]);
!queue_end(&queue->objq[group], (queue_entry_t) o);
o = (vm_object_t) queue_next(&o->objq)) {
if (o == object) {
queue_remove(&queue->objq[group], object,
vm_object_t, objq);
#if MACH_ASSERT
queue->debug_count_objects--;
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
0,
tokens[queue->token_q_head].count,
queue->type,
group,
0);
#endif
lck_mtx_unlock(&vm_purgeable_queue_lock);
object->objq.next = 0;
object->objq.prev = 0;
return &purgeable_queues[i];
}
}
}
}
lck_mtx_unlock(&vm_purgeable_queue_lock);
return 0;
}