#include <mach_assert.h>
#include <mach/vm_types.h>
#include <mach/mach_time.h>
#include <kern/timer.h>
#include <kern/clock.h>
#include <kern/machine.h>
#include <mach/machine.h>
#include <mach/machine/vm_param.h>
#include <mach_kdp.h>
#include <kdp/kdp_udp.h>
#if !MACH_KDP
#include <kdp/kdp_callout.h>
#endif
#include <arm/cpu_data.h>
#include <arm/cpu_data_internal.h>
#include <arm/caches_internal.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/pmap.h>
#include <arm/misc_protos.h>
#include <sys/errno.h>
#define INT_SIZE (BYTE_SIZE * sizeof (int))
void
bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
{
unsigned int src_index;
unsigned int dst_index;
vm_offset_t src_offset;
vm_offset_t dst_offset;
unsigned int cpu_num;
unsigned int wimg_bits_src, wimg_bits_dst;
ppnum_t pn_src = (src >> PAGE_SHIFT);
ppnum_t pn_dst = (dst >> PAGE_SHIFT);
wimg_bits_src = pmap_cache_attributes(pn_src);
wimg_bits_dst = pmap_cache_attributes(pn_dst);
if (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)) &&
((wimg_bits_src & VM_WIMG_MASK) == VM_WIMG_DEFAULT) &&
((wimg_bits_dst & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) {
bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes);
return;
}
src_offset = src & PAGE_MASK;
dst_offset = dst & PAGE_MASK;
if ((src_offset + bytes) > PAGE_SIZE || (dst_offset + bytes) > PAGE_SIZE)
panic("bcopy extends beyond copy windows");
mp_disable_preemption();
cpu_num = cpu_number();
src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src);
dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ|VM_PROT_WRITE, wimg_bits_dst);
bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num, src_index)+src_offset),
(char *)(pmap_cpu_windows_copy_addr(cpu_num, dst_index)+dst_offset),
bytes);
pmap_unmap_cpu_windows_copy(src_index);
pmap_unmap_cpu_windows_copy(dst_index);
mp_enable_preemption();
}
void
bzero_phys_nc(addr64_t src64, vm_size_t bytes)
{
bzero_phys(src64, bytes);
}
void
bzero_phys(addr64_t src, vm_size_t bytes)
{
unsigned int wimg_bits;
ppnum_t pn = (src >> PAGE_SHIFT);
wimg_bits = pmap_cache_attributes(pn);
if ((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT) {
bzero((char *)phystokv((pmap_paddr_t) src), bytes);
} else {
mp_disable_preemption();
unsigned int cpu_num = cpu_number();
while (bytes > 0) {
vm_offset_t offset = src & PAGE_MASK;
uint32_t count = PAGE_SIZE - offset;
if (count > bytes)
count = bytes;
unsigned int index = pmap_map_cpu_windows_copy(src >> PAGE_SHIFT, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
bzero((char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset), count);
pmap_unmap_cpu_windows_copy(index);
src += count;
bytes -= count;
}
mp_enable_preemption();
}
}
static unsigned int
ml_phys_read_data(pmap_paddr_t paddr, int size)
{
unsigned int index;
unsigned int result;
unsigned int wimg_bits;
ppnum_t pn = (paddr >> PAGE_SHIFT);
unsigned char s1;
unsigned short s2;
vm_offset_t copywindow_vaddr = 0;
mp_disable_preemption();
wimg_bits = pmap_cache_attributes(pn);
index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);;
switch (size) {
case 1:
s1 = *(volatile unsigned char *)(copywindow_vaddr);
result = s1;
break;
case 2:
s2 = *(volatile unsigned short *)(copywindow_vaddr);
result = s2;
break;
case 4:
default:
result = *(volatile unsigned int *)(copywindow_vaddr);
break;
}
pmap_unmap_cpu_windows_copy(index);
mp_enable_preemption();
return result;
}
static unsigned long long
ml_phys_read_long_long(pmap_paddr_t paddr)
{
unsigned int index;
unsigned int result;
unsigned int wimg_bits;
ppnum_t pn = (paddr >> PAGE_SHIFT);
mp_disable_preemption();
wimg_bits = pmap_cache_attributes(pn);
index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
result = *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
| ((uint32_t)paddr & PAGE_MASK));
pmap_unmap_cpu_windows_copy(index);
mp_enable_preemption();
return result;
}
unsigned int ml_phys_read( vm_offset_t paddr)
{
return ml_phys_read_data((pmap_paddr_t)paddr, 4);
}
unsigned int ml_phys_read_word(vm_offset_t paddr) {
return ml_phys_read_data((pmap_paddr_t)paddr, 4);
}
unsigned int ml_phys_read_64(addr64_t paddr64)
{
return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
}
unsigned int ml_phys_read_word_64(addr64_t paddr64)
{
return ml_phys_read_data((pmap_paddr_t)paddr64, 4);
}
unsigned int ml_phys_read_half(vm_offset_t paddr)
{
return ml_phys_read_data((pmap_paddr_t)paddr, 2);
}
unsigned int ml_phys_read_half_64(addr64_t paddr64)
{
return ml_phys_read_data((pmap_paddr_t)paddr64, 2);
}
unsigned int ml_phys_read_byte(vm_offset_t paddr)
{
return ml_phys_read_data((pmap_paddr_t)paddr, 1);
}
unsigned int ml_phys_read_byte_64(addr64_t paddr64)
{
return ml_phys_read_data((pmap_paddr_t)paddr64, 1);
}
unsigned long long ml_phys_read_double(vm_offset_t paddr)
{
return ml_phys_read_long_long((pmap_paddr_t)paddr);
}
unsigned long long ml_phys_read_double_64(addr64_t paddr64)
{
return ml_phys_read_long_long((pmap_paddr_t)paddr64);
}
static void
ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
{
unsigned int index;
unsigned int wimg_bits;
ppnum_t pn = (paddr >> PAGE_SHIFT);
vm_offset_t copywindow_vaddr = 0;
mp_disable_preemption();
wimg_bits = pmap_cache_attributes(pn);
index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits);
copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t) paddr & PAGE_MASK);
switch (size) {
case 1:
*(volatile unsigned char *)(copywindow_vaddr) = (unsigned char)data;
break;
case 2:
*(volatile unsigned short *)(copywindow_vaddr) = (unsigned short)data;
break;
case 4:
default:
*(volatile unsigned int *)(copywindow_vaddr) = (uint32_t)data;
break;
}
pmap_unmap_cpu_windows_copy(index);
mp_enable_preemption();
}
static void
ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
{
unsigned int index;
unsigned int wimg_bits;
ppnum_t pn = (paddr >> PAGE_SHIFT);
mp_disable_preemption();
wimg_bits = pmap_cache_attributes(pn);
index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits);
*(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index)
| ((uint32_t)paddr & PAGE_MASK)) = data;
pmap_unmap_cpu_windows_copy(index);
mp_enable_preemption();
}
void ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
{
ml_phys_write_data((pmap_paddr_t)paddr, data, 1);
}
void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
{
ml_phys_write_data((pmap_paddr_t)paddr64, data, 1);
}
void ml_phys_write_half(vm_offset_t paddr, unsigned int data)
{
ml_phys_write_data((pmap_paddr_t)paddr, data, 2);
}
void ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
{
ml_phys_write_data((pmap_paddr_t)paddr64, data, 2);
}
void ml_phys_write(vm_offset_t paddr, unsigned int data)
{
ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
}
void ml_phys_write_64(addr64_t paddr64, unsigned int data)
{
ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
}
void ml_phys_write_word(vm_offset_t paddr, unsigned int data)
{
ml_phys_write_data((pmap_paddr_t)paddr, data, 4);
}
void ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
{
ml_phys_write_data((pmap_paddr_t)paddr64, data, 4);
}
void ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
{
ml_phys_write_long_long((pmap_paddr_t)paddr, data);
}
void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
{
ml_phys_write_long_long((pmap_paddr_t)paddr64, data);
}
void
setbit(int bitno, int *s)
{
s[bitno / INT_SIZE] |= 1 << (bitno % INT_SIZE);
}
void
clrbit(int bitno, int *s)
{
s[bitno / INT_SIZE] &= ~(1 << (bitno % INT_SIZE));
}
int
testbit(int bitno, int *s)
{
return s[bitno / INT_SIZE] & (1 << (bitno % INT_SIZE));
}
int
ffsbit(int *s)
{
int offset;
for (offset = 0; !*s; offset += INT_SIZE, ++s);
return offset + __builtin_ctz(*s);
}
int
ffs(unsigned int mask)
{
if (mask == 0)
return 0;
return 1 + __builtin_ctz(mask);
}
int
ffsll(unsigned long long mask)
{
if (mask == 0)
return 0;
return 1 + __builtin_ctzll(mask);
}
int
fls(unsigned int mask)
{
if (mask == 0)
return 0;
return (sizeof (mask) << 3) - __builtin_clz(mask);
}
int
flsll(unsigned long long mask)
{
if (mask == 0)
return 0;
return (sizeof (mask) << 3) - __builtin_clzll(mask);
}
int
bcmp(
const void *pa,
const void *pb,
size_t len)
{
const char *a = (const char *) pa;
const char *b = (const char *) pb;
if (len == 0)
return 0;
do
if (*a++ != *b++)
break;
while (--len);
return len;
}
int
memcmp(const void *s1, const void *s2, size_t n)
{
if (n != 0) {
const unsigned char *p1 = s1, *p2 = s2;
do {
if (*p1++ != *p2++)
return (*--p1 - *--p2);
} while (--n != 0);
}
return (0);
}
kern_return_t
copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
{
kern_return_t retval = KERN_SUCCESS;
void *from, *to;
unsigned int from_wimg_bits, to_wimg_bits;
from = CAST_DOWN(void *, source);
to = CAST_DOWN(void *, sink);
if ((which & (cppvPsrc | cppvPsnk)) == 0)
panic("copypv: no more than 1 parameter may be virtual\n");
if (which & cppvPsrc)
from = (void *)phystokv(from);
if (which & cppvPsnk)
to = (void *)phystokv(to);
if ((which & (cppvPsrc | cppvKmap)) == 0)
retval = copyin((user_addr_t) from, to, size);
else if ((which & (cppvPsnk | cppvKmap)) == 0)
retval = copyout(from, (user_addr_t) to, size);
else
bcopy(from, to, size);
if (which & cppvFsrc) {
flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
} else if (which & cppvPsrc) {
from_wimg_bits = pmap_cache_attributes(source >> PAGE_SHIFT);
if ((from_wimg_bits != VM_WIMG_COPYBACK) && (from_wimg_bits != VM_WIMG_WTHRU))
flush_dcache64(source, size, TRUE);
}
if (which & cppvFsnk) {
flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
} else if (which & cppvPsnk) {
to_wimg_bits = pmap_cache_attributes(sink >> PAGE_SHIFT);
if (to_wimg_bits != VM_WIMG_COPYBACK)
flush_dcache64(sink, size, TRUE);
}
return retval;
}
const int copysize_limit_panic = (64 * 1024 * 1024);
static int
copy_validate(const user_addr_t user_addr,
uintptr_t kernel_addr, vm_size_t nbytes)
{
uintptr_t kernel_addr_last = kernel_addr + nbytes;
if (kernel_addr < VM_MIN_KERNEL_ADDRESS ||
kernel_addr > VM_MAX_KERNEL_ADDRESS ||
kernel_addr_last < kernel_addr ||
kernel_addr_last > VM_MAX_KERNEL_ADDRESS)
panic("%s(%p, %p, %u) - kaddr not in kernel", __func__,
(void *)user_addr, (void *)kernel_addr, nbytes);
user_addr_t user_addr_last = user_addr + nbytes;
if (user_addr_last < user_addr ||
user_addr_last > VM_MIN_KERNEL_ADDRESS)
return (EFAULT);
if (__improbable(nbytes > copysize_limit_panic))
panic("%s(%p, %p, %u) - transfer too large", __func__,
(void *)user_addr, (void *)kernel_addr, nbytes);
return (0);
}
int
copyin_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes)
{
return (copy_validate(ua, ka, nbytes));
}
int
copyout_validate(uintptr_t ka, const user_addr_t ua, vm_size_t nbytes)
{
return (copy_validate(ua, ka, nbytes));
}
#if MACH_ASSERT
extern int copyinframe(vm_address_t fp, char *frame);
void
machine_callstack(
uintptr_t * buf,
vm_size_t callstack_max)
{
uint32_t i=0;
uint32_t frame[2];
struct arm_saved_state* state = find_user_regs(current_thread());
if (!state) {
while (i<callstack_max)
buf[i++] = 0;
} else {
buf[i++] = (uintptr_t)state->pc;
frame[0] = state->r[7];
while (i<callstack_max && frame[0] != 0) {
if (copyinframe(frame[0], (void*) frame))
break;
buf[i++] = (uintptr_t)frame[1];
}
while (i<callstack_max)
buf[i++] = 0;
}
}
#endif
int
clr_be_bit(void)
{
panic("clr_be_bit");
return 0;
}
boolean_t
ml_probe_read(
__unused vm_offset_t paddr,
__unused unsigned int *val)
{
panic("ml_probe_read() unimplemented");
return 1;
}
boolean_t
ml_probe_read_64(
__unused addr64_t paddr,
__unused unsigned int *val)
{
panic("ml_probe_read_64() unimplemented");
return 1;
}
void
ml_thread_policy(
__unused thread_t thread,
__unused unsigned policy_id,
__unused unsigned policy_info)
{
}
#if !MACH_KDP
void
kdp_register_callout(kdp_callout_fn_t fn, void *arg)
{
#pragma unused(fn,arg)
}
#endif