#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <vm/pmap.h>
#include <kdebug.h>
#include <sys/kdebug.h>
#define DBG_UIO_COPYOUT 16
#define DBG_UIO_COPYIN 17
int
uiomove(cp, n, uio)
register caddr_t cp;
register int n;
register struct uio *uio;
{
return uiomove64((addr64_t)((unsigned int)cp), n, uio);
}
int
uiomove64(addr64_t cp, int n, struct uio *uio)
{
register struct iovec *iov;
u_int cnt;
int error = 0;
#if DIAGNOSTIC
if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
panic("uiomove: mode");
if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != current_proc())
panic("uiomove proc");
#endif
while (n > 0 && uio->uio_resid) {
iov = uio->uio_iov;
cnt = iov->iov_len;
if (cnt == 0) {
uio->uio_iov++;
uio->uio_iovcnt--;
continue;
}
if (cnt > n)
cnt = n;
switch (uio->uio_segflg) {
case UIO_USERSPACE:
case UIO_USERISPACE:
if (uio->uio_rw == UIO_READ)
{
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
(caddr_t)cp, iov->iov_base, cnt, 0,0);
error = copyout((caddr_t)cp, iov->iov_base, cnt);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
(caddr_t)cp, iov->iov_base, cnt, 0,0);
}
else
{
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
iov->iov_base, (caddr_t)cp, cnt, 0,0);
error = copyin(iov->iov_base, (caddr_t)cp, cnt);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
iov->iov_base, (caddr_t)cp, cnt, 0,0);
}
if (error)
return (error);
break;
case UIO_SYSSPACE:
if (uio->uio_rw == UIO_READ)
error = copywithin((caddr_t)cp, iov->iov_base,
cnt);
else
error = copywithin(iov->iov_base, (caddr_t)cp,
cnt);
break;
case UIO_PHYS_USERSPACE:
if (uio->uio_rw == UIO_READ)
{
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
cp, iov->iov_base, cnt, 1,0);
if (error = copypv((addr64_t)cp, (addr64_t)((unsigned int)iov->iov_base), cnt, cppvPsrc | cppvNoRefSrc))
error = EFAULT;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
cp, iov->iov_base, cnt, 1,0);
}
else
{
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
iov->iov_base, cp, cnt, 1,0);
if (error = copypv((addr64_t)((unsigned int)iov->iov_base), (addr64_t)cp, cnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk))
error = EFAULT;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
iov->iov_base, cp, cnt, 1,0);
}
if (error)
return (error);
break;
}
iov->iov_base += cnt;
iov->iov_len -= cnt;
uio->uio_resid -= cnt;
uio->uio_offset += cnt;
cp += cnt;
n -= cnt;
}
return (error);
}
int
ureadc(c, uio)
register int c;
register struct uio *uio;
{
register struct iovec *iov;
if (uio->uio_resid <= 0)
panic("ureadc: non-positive resid");
again:
if (uio->uio_iovcnt == 0)
panic("ureadc: non-positive iovcnt");
iov = uio->uio_iov;
if (iov->iov_len <= 0) {
uio->uio_iovcnt--;
uio->uio_iov++;
goto again;
}
switch (uio->uio_segflg) {
case UIO_USERSPACE:
if (subyte(iov->iov_base, c) < 0)
return (EFAULT);
break;
case UIO_SYSSPACE:
*iov->iov_base = c;
break;
case UIO_USERISPACE:
if (suibyte(iov->iov_base, c) < 0)
return (EFAULT);
break;
}
iov->iov_base++;
iov->iov_len--;
uio->uio_resid--;
uio->uio_offset++;
return (0);
}
#if defined(vax) || defined(ppc)
uwritec(uio)
struct uio *uio;
{
register struct iovec *iov;
register int c;
if (uio->uio_resid <= 0)
return (-1);
again:
if (uio->uio_iovcnt <= 0)
panic("uwritec: non-positive iovcnt");
iov = uio->uio_iov;
if (iov->iov_len == 0) {
uio->uio_iov++;
if (--uio->uio_iovcnt == 0)
return (-1);
goto again;
}
switch (uio->uio_segflg) {
case UIO_USERSPACE:
c = fubyte(iov->iov_base);
break;
case UIO_SYSSPACE:
c = *iov->iov_base & 0377;
break;
case UIO_USERISPACE:
c = fuibyte(iov->iov_base);
break;
default:
c = 0;
panic("uwritec: bogus uio_segflg");
break;
}
if (c < 0)
return (-1);
iov->iov_base++;
iov->iov_len--;
uio->uio_resid--;
uio->uio_offset++;
return (c);
}
#endif
void *
hashinit(elements, type, hashmask)
int elements, type;
u_long *hashmask;
{
long hashsize;
LIST_HEAD(generic, generic) *hashtbl;
int i;
if (elements <= 0)
panic("hashinit: bad cnt");
for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
continue;
hashsize >>= 1;
MALLOC(hashtbl, struct generic *,
(u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
bzero(hashtbl, (u_long)hashsize * sizeof(*hashtbl));
for (i = 0; i < hashsize; i++)
LIST_INIT(&hashtbl[i]);
*hashmask = hashsize - 1;
return (hashtbl);
}