#include <kern/assert.h>
#include <kern/cpu_data.h>
#include <kern/counter.h>
#include <kern/zalloc.h>
#include <machine/atomic.h>
#include <machine/machine_routines.h>
#include <machine/cpu_number.h>
SECURITY_READ_ONLY_LATE(zone_t) counters_zone;
ZONE_INIT(&counters_zone, "per_cpu_counters", sizeof(uint64_t),
ZC_PERCPU | ZC_ALIGNMENT_REQUIRED, ZONE_ID_ANY, NULL);
uint64_t num_static_scalable_counters;
__startup_func void
scalable_counter_static_boot_mangle(scalable_counter_t *counter)
{
*counter = __zpcpu_mangle_for_boot(*counter);
}
__startup_func void
scalable_counter_static_init(scalable_counter_t *counter)
{
uint64_t current_value = os_atomic_load_wide(zpercpu_get(*counter), relaxed);
*counter = zalloc_percpu_permanent(sizeof(uint64_t), ZALIGN_64);
os_atomic_store_wide(zpercpu_get(*counter), current_value, relaxed);
num_static_scalable_counters++;
}
OS_OVERLOADABLE
void
counter_alloc(scalable_counter_t *counter)
{
*counter = zalloc_percpu(counters_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
}
OS_OVERLOADABLE
void
counter_alloc(atomic_counter_t *counter)
{
os_atomic_store_wide(counter, 0, relaxed);
}
OS_OVERLOADABLE
void
counter_free(scalable_counter_t *counter)
{
zfree_percpu(counters_zone, *counter);
}
OS_OVERLOADABLE
void
counter_free(atomic_counter_t *counter)
{
(void)counter;
}
OS_OVERLOADABLE
void
counter_add(atomic_counter_t *counter, uint64_t amount)
{
os_atomic_add(counter, amount, relaxed);
}
OS_OVERLOADABLE
void
counter_inc(atomic_counter_t *counter)
{
os_atomic_inc(counter, relaxed);
}
OS_OVERLOADABLE
void
counter_dec(atomic_counter_t *counter)
{
os_atomic_dec(counter, relaxed);
}
OS_OVERLOADABLE
void
counter_add_preemption_disabled(atomic_counter_t *counter, uint64_t amount)
{
counter_add(counter, amount);
}
OS_OVERLOADABLE
void
counter_inc_preemption_disabled(atomic_counter_t *counter)
{
counter_inc(counter);
}
OS_OVERLOADABLE
void
counter_dec_preemption_disabled(atomic_counter_t *counter)
{
counter_dec(counter);
}
OS_OVERLOADABLE
uint64_t
counter_load(atomic_counter_t *counter)
{
return os_atomic_load_wide(counter, relaxed);
}
OS_OVERLOADABLE
uint64_t
counter_load(scalable_counter_t *counter)
{
uint64_t value = 0;
zpercpu_foreach(it, *counter) {
value += os_atomic_load_wide(it, relaxed);
}
return value;
}