#include <cpus.h>
#include <platforms.h>
#include <mp_v1_1.h>
#include <mach_kdb.h>
#include <kern/cpu_number.h>
#include <kern/cpu_data.h>
#include <kern/clock.h>
#include <kern/macro_help.h>
#include <kern/misc_protos.h>
#include <kern/spl.h>
#include <machine/mach_param.h>
#include <mach/vm_prot.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <i386/ipl.h>
#include <i386/pit.h>
#include <i386/pio.h>
#include <i386/misc_protos.h>
#include <i386/rtclock_entries.h>
#include <i386/hardclock_entries.h>
int sysclk_config(void);
int sysclk_init(void);
kern_return_t sysclk_gettime(
mach_timespec_t *cur_time);
kern_return_t sysclk_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count);
kern_return_t sysclk_setattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t count);
void sysclk_setalarm(
mach_timespec_t *alarm_time);
extern void (*IOKitRegisterInterruptHook)(void *, int irq, int isclock);
struct clock_ops sysclk_ops = {
sysclk_config, sysclk_init,
sysclk_gettime, 0,
sysclk_getattr, sysclk_setattr,
sysclk_setalarm,
};
int calend_config(void);
int calend_init(void);
kern_return_t calend_gettime(
mach_timespec_t *cur_time);
kern_return_t calend_settime(
mach_timespec_t *cur_time);
kern_return_t calend_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count);
struct clock_ops calend_ops = {
calend_config, calend_init,
calend_gettime, calend_settime,
calend_getattr, 0,
0,
};
mach_timespec_t *RtcTime = (mach_timespec_t *)0;
mach_timespec_t *RtcAlrm;
clock_res_t RtcDelt;
struct {
uint64_t abstime;
mach_timespec_t time;
mach_timespec_t alarm_time;
mach_timespec_t calend_offset;
boolean_t calend_is_set;
uint64_t timer_deadline;
boolean_t timer_is_set;
clock_timer_func_t timer_expire;
clock_res_t new_ires;
clock_res_t intr_nsec;
decl_simple_lock_data(,lock)
} rtclock;
unsigned int clknum;
unsigned int new_clknum;
unsigned int time_per_clk;
unsigned int clks_per_int;
unsigned int clks_per_int_99;
int rtc_intr_count;
int rtc_intr_hertz;
int rtc_intr_freq;
int rtc_print_lost_tick;
#define LOCK_RTC(s) \
MACRO_BEGIN \
(s) = splclock(); \
simple_lock(&rtclock.lock); \
MACRO_END
#define UNLOCK_RTC(s) \
MACRO_BEGIN \
simple_unlock(&rtclock.lock); \
splx(s); \
MACRO_END
#define RTC_MINRES (NSEC_PER_SEC / HZ)
#define RTC_MAXRES (RTC_MINRES / 20)
#define ZANO (1000000000)
#define ZHZ (ZANO / (NSEC_PER_SEC / HZ))
#define READ_8254(val) { \
outb(PITCTL_PORT, PIT_C0); \
(val) = inb(PITCTR0_PORT); \
(val) |= inb(PITCTR0_PORT) << 8 ; }
unsigned int delaycount = 100;
unsigned int microdata = 50;
extern int measure_delay(int us);
void rtc_setvals( unsigned int, clock_res_t );
void
rtc_setvals(
unsigned int new_clknum,
clock_res_t new_ires
)
{
unsigned int timeperclk;
unsigned int scale0;
unsigned int scale1;
unsigned int res;
clknum = new_clknum;
rtc_intr_freq = (NSEC_PER_SEC / new_ires);
rtc_intr_hertz = rtc_intr_freq / HZ;
clks_per_int = (clknum + (rtc_intr_freq / 2)) / rtc_intr_freq;
clks_per_int_99 = clks_per_int - clks_per_int/100;
timeperclk = div_scale(ZANO, clknum, &scale0);
time_per_clk = mul_scale(ZHZ, timeperclk, &scale1);
if (scale0 > scale1)
time_per_clk >>= (scale0 - scale1);
else if (scale0 < scale1)
panic("rtc_clock: time_per_clk overflow\n");
res = mul_scale(clks_per_int, timeperclk, &scale1);
if (scale0 > scale1)
rtclock.intr_nsec = res >> (scale0 - scale1);
else
panic("rtc_clock: rtclock.intr_nsec overflow\n");
rtc_intr_count = 1;
RtcDelt = rtclock.intr_nsec/2;
}
int
sysclk_config(void)
{
int RtcFlag;
int pic;
#if NCPUS > 1
mp_disable_preemption();
if (cpu_number() != master_cpu) {
mp_enable_preemption();
return(1);
}
mp_enable_preemption();
#endif
#if MP_V1_1
{
extern boolean_t mp_v1_1_initialized;
if (mp_v1_1_initialized)
pic = 2;
else
pic = 0;
}
#else
pic = 0;
#endif
RtcFlag = 1;
printf("realtime clock configured\n");
simple_lock_init(&rtclock.lock, ETAP_NO_TRACE);
return (RtcFlag);
}
int
sysclk_init(void)
{
vm_offset_t *vp;
#if NCPUS > 1
mp_disable_preemption();
if (cpu_number() != master_cpu) {
mp_enable_preemption();
return(1);
}
mp_enable_preemption();
#endif
RtcTime = &rtclock.time;
rtc_setvals( CLKNUM, RTC_MINRES );
return (1);
}
static volatile unsigned int last_ival = 0;
kern_return_t
sysclk_gettime(
mach_timespec_t *cur_time)
{
mach_timespec_t itime = {0, 0};
unsigned int val, val2;
int s;
if (!RtcTime) {
cur_time->tv_nsec = 0;
cur_time->tv_sec = 0;
return (KERN_SUCCESS);
}
LOCK_RTC(s);
do {
READ_8254(val);
READ_8254(val2);
} while ( val2 > val || val2 < val - 10 );
if ( val > clks_per_int_99 ) {
outb( 0x0a, 0x20 );
if ( inb( 0x20 ) & 1 )
itime.tv_nsec = rtclock.intr_nsec;
}
itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ;
if ( itime.tv_nsec < last_ival ) {
if (rtc_print_lost_tick)
printf( "rtclock: missed clock interrupt.\n" );
}
last_ival = itime.tv_nsec;
cur_time->tv_sec = rtclock.time.tv_sec;
cur_time->tv_nsec = rtclock.time.tv_nsec;
UNLOCK_RTC(s);
ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime));
return (KERN_SUCCESS);
}
kern_return_t
sysclk_gettime_internal(
mach_timespec_t *cur_time)
{
mach_timespec_t itime = {0, 0};
unsigned int val, val2;
if (!RtcTime) {
cur_time->tv_nsec = 0;
cur_time->tv_sec = 0;
return (KERN_SUCCESS);
}
do {
READ_8254(val);
READ_8254(val2);
} while ( val2 > val || val2 < val - 10 );
if ( val > clks_per_int_99 ) {
outb( 0x0a, 0x20 );
if ( inb( 0x20 ) & 1 )
itime.tv_nsec = rtclock.intr_nsec;
}
itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ;
if ( itime.tv_nsec < last_ival ) {
if (rtc_print_lost_tick)
printf( "rtclock: missed clock interrupt.\n" );
}
last_ival = itime.tv_nsec;
cur_time->tv_sec = rtclock.time.tv_sec;
cur_time->tv_nsec = rtclock.time.tv_nsec;
ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime));
return (KERN_SUCCESS);
}
void
sysclk_gettime_interrupts_disabled(
mach_timespec_t *cur_time)
{
mach_timespec_t itime = {0, 0};
unsigned int val;
if (!RtcTime) {
cur_time->tv_nsec = 0;
cur_time->tv_sec = 0;
return;
}
simple_lock(&rtclock.lock);
READ_8254(val);
if ( val > clks_per_int_99 ) {
outb( 0x0a, 0x20 );
if ( inb( 0x20 ) & 1 )
itime.tv_nsec = rtclock.intr_nsec;
}
itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ;
if ( itime.tv_nsec < last_ival ) {
if (rtc_print_lost_tick)
printf( "rtclock: missed clock interrupt.\n" );
}
last_ival = itime.tv_nsec;
cur_time->tv_sec = rtclock.time.tv_sec;
cur_time->tv_nsec = rtclock.time.tv_nsec;
ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime));
simple_unlock(&rtclock.lock);
}
static
natural_t
get_uptime_ticks(void)
{
natural_t result = 0;
unsigned int val, val2;
if (!RtcTime)
return (result);
do {
READ_8254(val);
READ_8254(val2);
} while (val2 > val || val2 < val - 10);
if (val > clks_per_int_99) {
outb(0x0a, 0x20);
if (inb(0x20) & 1)
result = rtclock.intr_nsec;
}
result += ((clks_per_int - val) * time_per_clk) / ZHZ;
if (result < last_ival) {
if (rtc_print_lost_tick)
printf( "rtclock: missed clock interrupt.\n" );
}
return (result);
}
kern_return_t
sysclk_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count)
{
spl_t s;
if (*count != 1)
return (KERN_FAILURE);
switch (flavor) {
case CLOCK_GET_TIME_RES:
#if (NCPUS == 1 || (MP_V1_1 && 0))
LOCK_RTC(s);
*(clock_res_t *) attr = 1000;
UNLOCK_RTC(s);
break;
#endif
case CLOCK_ALARM_CURRES:
LOCK_RTC(s);
*(clock_res_t *) attr = rtclock.intr_nsec;
UNLOCK_RTC(s);
break;
case CLOCK_ALARM_MAXRES:
*(clock_res_t *) attr = RTC_MAXRES;
break;
case CLOCK_ALARM_MINRES:
*(clock_res_t *) attr = RTC_MINRES;
break;
default:
return (KERN_INVALID_VALUE);
}
return (KERN_SUCCESS);
}
kern_return_t
sysclk_setattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t count)
{
spl_t s;
int freq;
int adj;
clock_res_t new_ires;
if (count != 1)
return (KERN_FAILURE);
switch (flavor) {
case CLOCK_GET_TIME_RES:
case CLOCK_ALARM_MAXRES:
case CLOCK_ALARM_MINRES:
return (KERN_FAILURE);
case CLOCK_ALARM_CURRES:
new_ires = *(clock_res_t *) attr;
if (new_ires < RTC_MAXRES || new_ires > RTC_MINRES)
return (KERN_INVALID_VALUE);
freq = (NSEC_PER_SEC / new_ires);
adj = (((clknum % freq) * new_ires) / clknum);
if (adj > (new_ires / 1000))
return (KERN_INVALID_VALUE);
LOCK_RTC(s);
if ( freq != rtc_intr_freq ) {
rtclock.new_ires = new_ires;
new_clknum = clknum;
}
UNLOCK_RTC(s);
return (KERN_SUCCESS);
default:
return (KERN_INVALID_VALUE);
}
}
void
sysclk_setalarm(
mach_timespec_t *alarm_time)
{
spl_t s;
LOCK_RTC(s);
rtclock.alarm_time = *alarm_time;
RtcAlrm = &rtclock.alarm_time;
UNLOCK_RTC(s);
}
int
calend_config(void)
{
return bbc_config();
}
int
calend_init(void)
{
return (1);
}
kern_return_t
calend_gettime(
mach_timespec_t *cur_time)
{
spl_t s;
LOCK_RTC(s);
if (!rtclock.calend_is_set) {
UNLOCK_RTC(s);
return (KERN_FAILURE);
}
(void) sysclk_gettime_internal(cur_time);
ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset);
UNLOCK_RTC(s);
return (KERN_SUCCESS);
}
kern_return_t
calend_settime(
mach_timespec_t *new_time)
{
mach_timespec_t curr_time;
spl_t s;
LOCK_RTC(s);
(void) sysclk_gettime_internal(&curr_time);
rtclock.calend_offset = *new_time;
SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
rtclock.calend_is_set = TRUE;
UNLOCK_RTC(s);
(void) bbc_settime(new_time);
return (KERN_SUCCESS);
}
kern_return_t
calend_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count)
{
spl_t s;
if (*count != 1)
return (KERN_FAILURE);
switch (flavor) {
case CLOCK_GET_TIME_RES:
#if (NCPUS == 1 || (MP_V1_1 && 0))
LOCK_RTC(s);
*(clock_res_t *) attr = 1000;
UNLOCK_RTC(s);
break;
#else
LOCK_RTC(s);
*(clock_res_t *) attr = rtclock.intr_nsec;
UNLOCK_RTC(s);
break;
#endif
case CLOCK_ALARM_CURRES:
case CLOCK_ALARM_MINRES:
case CLOCK_ALARM_MAXRES:
*(clock_res_t *) attr = 0;
break;
default:
return (KERN_INVALID_VALUE);
}
return (KERN_SUCCESS);
}
void
clock_adjust_calendar(
clock_res_t nsec)
{
spl_t s;
LOCK_RTC(s);
if (rtclock.calend_is_set)
ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec);
UNLOCK_RTC(s);
}
void
clock_initialize_calendar(void)
{
mach_timespec_t bbc_time, curr_time;
spl_t s;
if (bbc_gettime(&bbc_time) != KERN_SUCCESS)
return;
LOCK_RTC(s);
if (!rtclock.calend_is_set) {
(void) sysclk_gettime_internal(&curr_time);
rtclock.calend_offset = bbc_time;
SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
rtclock.calend_is_set = TRUE;
}
UNLOCK_RTC(s);
}
mach_timespec_t
clock_get_calendar_offset(void)
{
mach_timespec_t result = MACH_TIMESPEC_ZERO;
spl_t s;
LOCK_RTC(s);
if (rtclock.calend_is_set)
result = rtclock.calend_offset;
UNLOCK_RTC(s);
return (result);
}
void
clock_timebase_info(
mach_timebase_info_t info)
{
spl_t s;
LOCK_RTC(s);
info->numer = info->denom = 1;
UNLOCK_RTC(s);
}
void
clock_set_timer_deadline(
uint64_t deadline)
{
spl_t s;
LOCK_RTC(s);
rtclock.timer_deadline = deadline;
rtclock.timer_is_set = TRUE;
UNLOCK_RTC(s);
}
void
clock_set_timer_func(
clock_timer_func_t func)
{
spl_t s;
LOCK_RTC(s);
if (rtclock.timer_expire == NULL)
rtclock.timer_expire = func;
UNLOCK_RTC(s);
}
#define RTCLOCK_RESET() { \
outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE); \
outb(PITCTR0_PORT, (clks_per_int & 0xff)); \
outb(PITCTR0_PORT, (clks_per_int >> 8)); \
}
void
rtclock_reset(void)
{
int s;
#if NCPUS > 1 && !(MP_V1_1 && 0)
mp_disable_preemption();
if (cpu_number() != master_cpu) {
mp_enable_preemption();
return;
}
mp_enable_preemption();
#endif
LOCK_RTC(s);
RTCLOCK_RESET();
UNLOCK_RTC(s);
}
int
rtclock_intr(void)
{
uint64_t abstime;
mach_timespec_t clock_time;
int i;
spl_t s;
LOCK_RTC(s);
i = rtclock.time.tv_nsec + rtclock.intr_nsec;
if (i < NSEC_PER_SEC)
rtclock.time.tv_nsec = i;
else {
rtclock.time.tv_nsec = i - NSEC_PER_SEC;
rtclock.time.tv_sec++;
}
last_ival = 0;
rtclock.abstime += (NSEC_PER_SEC/HZ);
abstime = rtclock.abstime;
if ( rtclock.timer_is_set &&
rtclock.timer_deadline <= abstime ) {
rtclock.timer_is_set = FALSE;
UNLOCK_RTC(s);
(*rtclock.timer_expire)(abstime);
LOCK_RTC(s);
}
if (RtcAlrm && (RtcAlrm->tv_sec < RtcTime->tv_sec ||
(RtcAlrm->tv_sec == RtcTime->tv_sec &&
RtcDelt >= RtcAlrm->tv_nsec - RtcTime->tv_nsec))) {
clock_time.tv_sec = 0;
clock_time.tv_nsec = RtcDelt;
ADD_MACH_TIMESPEC (&clock_time, RtcTime);
RtcAlrm = 0;
UNLOCK_RTC(s);
clock_alarm_intr(SYSTEM_CLOCK, &clock_time);
LOCK_RTC(s);
}
if ((i = --rtc_intr_count) == 0) {
if (rtclock.new_ires) {
rtc_setvals(new_clknum, rtclock.new_ires);
RTCLOCK_RESET();
rtclock.new_ires = 0;
}
rtc_intr_count = rtc_intr_hertz;
}
UNLOCK_RTC(s);
return (i);
}
void
clock_get_uptime(
uint64_t *result)
{
uint32_t ticks;
spl_t s;
LOCK_RTC(s);
ticks = get_uptime_ticks();
*result = rtclock.abstime;
UNLOCK_RTC(s);
*result += ticks;
}
void
clock_interval_to_deadline(
uint32_t interval,
uint32_t scale_factor,
uint64_t *result)
{
uint64_t abstime;
clock_get_uptime(result);
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
*result += abstime;
}
void
clock_interval_to_absolutetime_interval(
uint32_t interval,
uint32_t scale_factor,
uint64_t *result)
{
*result = (uint64_t)interval * scale_factor;
}
void
clock_absolutetime_interval_to_deadline(
uint64_t abstime,
uint64_t *result)
{
clock_get_uptime(result);
*result += abstime;
}
void
absolutetime_to_nanoseconds(
uint64_t abstime,
uint64_t *result)
{
*result = abstime;
}
void
nanoseconds_to_absolutetime(
uint64_t nanoseconds,
uint64_t *result)
{
*result = nanoseconds;
}
int
measure_delay(
int us)
{
unsigned int lsb, val;
outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE);
outb(PITCTR0_PORT, 0xff);
outb(PITCTR0_PORT, 0xff);
delay(us);
outb(PITCTL_PORT, PIT_C0);
lsb = inb(PITCTR0_PORT);
val = (inb(PITCTR0_PORT) << 8) | lsb;
val = 0xffff - val;
val *= 1000000;
val /= CLKNUM;
return(val);
}
void
calibrate_delay(void)
{
unsigned val;
int prev = 0;
register int i;
printf("adjusting delay count: %d", delaycount);
for (i=0; i<10; i++) {
prev = delaycount;
val = measure_delay(microdata);
if (val == 0) {
delaycount *= 2;
} else {
delaycount *= microdata;
delaycount += val-1;
delaycount /= val;
}
if (delaycount <= 0)
delaycount = 1;
if (delaycount != prev)
printf(" %d", delaycount);
}
printf("\n");
}
#if MACH_KDB
void
test_delay(void);
void
test_delay(void)
{
register i;
for (i = 0; i < 10; i++)
printf("%d, %d\n", i, measure_delay(i));
for (i = 10; i <= 100; i+=10)
printf("%d, %d\n", i, measure_delay(i));
}
#endif