#include <sys/param.h>
#include <sys/proc_internal.h>
#include <sys/kauth.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/conf.h>
#include <sys/buf_internal.h>
#include <sys/mount_internal.h>
#include <sys/vnode_internal.h>
#include <sys/file_internal.h>
#include <sys/namei.h>
#include <sys/stat.h>
#include <sys/errno.h>
#include <sys/ioctl.h>
#include <sys/file.h>
#include <sys/user.h>
#include <sys/malloc.h>
#include <sys/disk.h>
#include <sys/uio_internal.h>
#include <sys/resource.h>
#include <miscfs/specfs/specdev.h>
#include <vfs/vfs_support.h>
#include <kern/assert.h>
#include <kern/task.h>
#include <sys/kdebug.h>
extern dev_t chrtoblk(dev_t dev);
extern int iskmemdev(dev_t dev);
extern int bpfkqfilter(dev_t dev, struct knote *kn);
extern int ptsd_kqfilter(dev_t dev, struct knote *kn);
extern int ignore_is_ssd;
struct vnode *speclisth[SPECHSZ];
char devopn[] = "devopn";
char devio[] = "devio";
char devwait[] = "devwait";
char devin[] = "devin";
char devout[] = "devout";
char devioc[] = "devioc";
char devcls[] = "devcls";
#define VOPFUNC int (*)(void *)
int (**spec_vnodeop_p)(void *);
struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
{ &vnop_default_desc, (VOPFUNC)vn_default_error },
{ &vnop_lookup_desc, (VOPFUNC)spec_lookup },
{ &vnop_create_desc, (VOPFUNC)err_create },
{ &vnop_mknod_desc, (VOPFUNC)err_mknod },
{ &vnop_open_desc, (VOPFUNC)spec_open },
{ &vnop_close_desc, (VOPFUNC)spec_close },
{ &vnop_access_desc, (VOPFUNC)spec_access },
{ &vnop_getattr_desc, (VOPFUNC)spec_getattr },
{ &vnop_setattr_desc, (VOPFUNC)spec_setattr },
{ &vnop_read_desc, (VOPFUNC)spec_read },
{ &vnop_write_desc, (VOPFUNC)spec_write },
{ &vnop_ioctl_desc, (VOPFUNC)spec_ioctl },
{ &vnop_select_desc, (VOPFUNC)spec_select },
{ &vnop_revoke_desc, (VOPFUNC)nop_revoke },
{ &vnop_mmap_desc, (VOPFUNC)err_mmap },
{ &vnop_fsync_desc, (VOPFUNC)spec_fsync },
{ &vnop_remove_desc, (VOPFUNC)err_remove },
{ &vnop_link_desc, (VOPFUNC)err_link },
{ &vnop_rename_desc, (VOPFUNC)err_rename },
{ &vnop_mkdir_desc, (VOPFUNC)err_mkdir },
{ &vnop_rmdir_desc, (VOPFUNC)err_rmdir },
{ &vnop_symlink_desc, (VOPFUNC)err_symlink },
{ &vnop_readdir_desc, (VOPFUNC)err_readdir },
{ &vnop_readlink_desc, (VOPFUNC)err_readlink },
{ &vnop_inactive_desc, (VOPFUNC)nop_inactive },
{ &vnop_reclaim_desc, (VOPFUNC)nop_reclaim },
{ &vnop_strategy_desc, (VOPFUNC)spec_strategy },
{ &vnop_pathconf_desc, (VOPFUNC)spec_pathconf },
{ &vnop_advlock_desc, (VOPFUNC)err_advlock },
{ &vnop_bwrite_desc, (VOPFUNC)spec_bwrite },
{ &vnop_pagein_desc, (VOPFUNC)err_pagein },
{ &vnop_pageout_desc, (VOPFUNC)err_pageout },
{ &vnop_copyfile_desc, (VOPFUNC)err_copyfile },
{ &vnop_blktooff_desc, (VOPFUNC)spec_blktooff },
{ &vnop_offtoblk_desc, (VOPFUNC)spec_offtoblk },
{ &vnop_blockmap_desc, (VOPFUNC)spec_blockmap },
{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
};
struct vnodeopv_desc spec_vnodeop_opv_desc =
{ &spec_vnodeop_p, spec_vnodeop_entries };
static void set_blocksize(vnode_t, dev_t);
#define THROTTLE_LEVEL_NONE -1
#define THROTTLE_LEVEL_TIER0 0
#define THROTTLE_LEVEL_THROTTLED 1
#define THROTTLE_LEVEL_TIER1 1
#define THROTTLE_LEVEL_TIER2 2
#define THROTTLE_LEVEL_START 0
#define THROTTLE_LEVEL_END 2
struct _throttle_io_info_t {
struct timeval throttle_last_IO_timestamp[THROTTLE_LEVEL_END + 1];
struct timeval throttle_last_write_timestamp;
struct timeval throttle_start_IO_period_timestamp;
TAILQ_HEAD( , uthread) throttle_uthlist;
lck_mtx_t throttle_lock;
thread_call_t throttle_timer_call;
int32_t throttle_timer_running;
int32_t throttle_io_count;
int32_t throttle_io_count_begin;
int32_t throttle_io_period;
uint32_t throttle_io_period_num;
int32_t throttle_refcnt;
int32_t throttle_alloc;
};
struct _throttle_io_info_t _throttle_io_info[LOWPRI_MAX_NUM_DEV];
static void throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int policy, int flags, boolean_t isssd);
static int throttle_get_thread_throttle_level(uthread_t ut, int policy);
__private_extern__ int32_t throttle_legacy_process_count = 0;
int
spec_lookup(struct vnop_lookup_args *ap)
{
*ap->a_vpp = NULL;
return (ENOTDIR);
}
static void
set_blocksize(struct vnode *vp, dev_t dev)
{
int (*size)(dev_t);
int rsize;
if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) {
rsize = (*size)(dev);
if (rsize <= 0)
vp->v_specsize = DEV_BSIZE;
else
vp->v_specsize = rsize;
}
else
vp->v_specsize = DEV_BSIZE;
}
void
set_fsblocksize(struct vnode *vp)
{
if (vp->v_type == VBLK) {
dev_t dev = (dev_t)vp->v_rdev;
int maj = major(dev);
if ((u_int)maj >= (u_int)nblkdev)
return;
vnode_lock(vp);
set_blocksize(vp, dev);
vnode_unlock(vp);
}
}
int
spec_open(struct vnop_open_args *ap)
{
struct proc *p = vfs_context_proc(ap->a_context);
kauth_cred_t cred = vfs_context_ucred(ap->a_context);
struct vnode *vp = ap->a_vp;
dev_t bdev, dev = (dev_t)vp->v_rdev;
int maj = major(dev);
int error;
if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
return (ENXIO);
switch (vp->v_type) {
case VCHR:
if ((u_int)maj >= (u_int)nchrdev)
return (ENXIO);
if (cred != FSCRED && (ap->a_mode & FWRITE)) {
if (securelevel >= 2 && isdisk(dev, VCHR))
return (EPERM);
if (securelevel >= 1) {
if ((bdev = chrtoblk(dev)) != NODEV && check_mountedon(bdev, VBLK, &error))
return (error);
if (iskmemdev(dev))
return (EPERM);
}
}
devsw_lock(dev, S_IFCHR);
error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p);
if (error == 0) {
vp->v_specinfo->si_opencount++;
}
devsw_unlock(dev, S_IFCHR);
if (error == 0 && (D_TYPEMASK & cdevsw[maj].d_type) == D_DISK && !vp->v_un.vu_specinfo->si_initted) {
int isssd = 0;
uint64_t throttle_mask = 0;
uint32_t devbsdunit = 0;
if (VNOP_IOCTL(vp, DKIOCGETTHROTTLEMASK, (caddr_t)&throttle_mask, 0, NULL) == 0) {
if (throttle_mask != 0 &&
VNOP_IOCTL(vp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ap->a_context) == 0) {
devbsdunit = num_trailing_0(throttle_mask);
vnode_lock(vp);
vp->v_un.vu_specinfo->si_isssd = isssd;
vp->v_un.vu_specinfo->si_devbsdunit = devbsdunit;
vp->v_un.vu_specinfo->si_throttle_mask = throttle_mask;
vp->v_un.vu_specinfo->si_throttleable = 1;
vp->v_un.vu_specinfo->si_initted = 1;
vnode_unlock(vp);
}
}
if (vp->v_un.vu_specinfo->si_initted == 0) {
vnode_lock(vp);
vp->v_un.vu_specinfo->si_initted = 1;
vnode_unlock(vp);
}
}
return (error);
case VBLK:
if ((u_int)maj >= (u_int)nblkdev)
return (ENXIO);
if (securelevel >= 2 && cred != FSCRED &&
(ap->a_mode & FWRITE) && isdisk(dev, VBLK))
return (EPERM);
if ( (error = vfs_mountedon(vp)) )
return (error);
devsw_lock(dev, S_IFBLK);
error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p);
if (!error) {
vp->v_specinfo->si_opencount++;
}
devsw_unlock(dev, S_IFBLK);
if (!error) {
u_int64_t blkcnt;
u_int32_t blksize;
int setsize = 0;
u_int32_t size512 = 512;
if (!VNOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, ap->a_context)) {
if (!VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, ap->a_context)) {
if (!VNOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, ap->a_context)) {
setsize = 1;
}
}
if (VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, ap->a_context))
error = ENXIO;
}
vnode_lock(vp);
set_blocksize(vp, dev);
if (setsize)
vp->v_specdevsize = blkcnt * (u_int64_t)size512;
else
vp->v_specdevsize = (u_int64_t)0;
vnode_unlock(vp);
}
return(error);
default:
panic("spec_open type");
}
return (0);
}
int
spec_read(struct vnop_read_args *ap)
{
struct vnode *vp = ap->a_vp;
struct uio *uio = ap->a_uio;
struct buf *bp;
daddr64_t bn, nextbn;
long bsize, bscale;
int devBlockSize=0;
int n, on;
int error = 0;
dev_t dev;
#if DIAGNOSTIC
if (uio->uio_rw != UIO_READ)
panic("spec_read mode");
if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg))
panic("spec_read proc");
#endif
if (uio_resid(uio) == 0)
return (0);
switch (vp->v_type) {
case VCHR:
if ((D_TYPEMASK & cdevsw[major(vp->v_rdev)].d_type) == D_DISK && vp->v_un.vu_specinfo->si_throttleable) {
struct _throttle_io_info_t *throttle_info;
throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit];
throttle_info_update_internal(throttle_info, NULL, -1, 0, vp->v_un.vu_specinfo->si_isssd);
}
error = (*cdevsw[major(vp->v_rdev)].d_read)
(vp->v_rdev, uio, ap->a_ioflag);
return (error);
case VBLK:
if (uio->uio_offset < 0)
return (EINVAL);
dev = vp->v_rdev;
devBlockSize = vp->v_specsize;
if (devBlockSize > PAGE_SIZE)
return (EINVAL);
bscale = PAGE_SIZE / devBlockSize;
bsize = bscale * devBlockSize;
do {
on = uio->uio_offset % bsize;
bn = (daddr64_t)((uio->uio_offset / devBlockSize) &~ (bscale - 1));
if (vp->v_speclastr + bscale == bn) {
nextbn = bn + bscale;
error = buf_breadn(vp, bn, (int)bsize, &nextbn,
(int *)&bsize, 1, NOCRED, &bp);
} else
error = buf_bread(vp, bn, (int)bsize, NOCRED, &bp);
vnode_lock(vp);
vp->v_speclastr = bn;
vnode_unlock(vp);
n = bsize - buf_resid(bp);
if ((on > n) || error) {
if (!error)
error = EINVAL;
buf_brelse(bp);
return (error);
}
n = min((unsigned)(n - on), uio_resid(uio));
error = uiomove((char *)buf_dataptr(bp) + on, n, uio);
if (n + on == bsize)
buf_markaged(bp);
buf_brelse(bp);
} while (error == 0 && uio_resid(uio) > 0 && n != 0);
return (error);
default:
panic("spec_read type");
}
return (0);
}
int
spec_write(struct vnop_write_args *ap)
{
struct vnode *vp = ap->a_vp;
struct uio *uio = ap->a_uio;
struct buf *bp;
daddr64_t bn;
int bsize, blkmask, bscale;
int io_sync;
int devBlockSize=0;
int n, on;
int error = 0;
dev_t dev;
#if DIAGNOSTIC
if (uio->uio_rw != UIO_WRITE)
panic("spec_write mode");
if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg))
panic("spec_write proc");
#endif
switch (vp->v_type) {
case VCHR:
if ((D_TYPEMASK & cdevsw[major(vp->v_rdev)].d_type) == D_DISK && vp->v_un.vu_specinfo->si_throttleable) {
struct _throttle_io_info_t *throttle_info;
throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit];
throttle_info_update_internal(throttle_info, NULL, -1, 0, vp->v_un.vu_specinfo->si_isssd);
microuptime(&throttle_info->throttle_last_write_timestamp);
}
error = (*cdevsw[major(vp->v_rdev)].d_write)
(vp->v_rdev, uio, ap->a_ioflag);
return (error);
case VBLK:
if (uio_resid(uio) == 0)
return (0);
if (uio->uio_offset < 0)
return (EINVAL);
io_sync = (ap->a_ioflag & IO_SYNC);
dev = (vp->v_rdev);
devBlockSize = vp->v_specsize;
if (devBlockSize > PAGE_SIZE)
return(EINVAL);
bscale = PAGE_SIZE / devBlockSize;
blkmask = bscale - 1;
bsize = bscale * devBlockSize;
do {
bn = (daddr64_t)((uio->uio_offset / devBlockSize) &~ blkmask);
on = uio->uio_offset % bsize;
n = min((unsigned)(bsize - on), uio_resid(uio));
if (n == bsize &&
vp->v_specdevsize != (u_int64_t)0 &&
(uio->uio_offset + (u_int64_t)n) > vp->v_specdevsize) {
n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize;
}
if (n == bsize)
bp = buf_getblk(vp, bn, bsize, 0, 0, BLK_WRITE);
else
error = (int)buf_bread(vp, bn, bsize, NOCRED, &bp);
if (!error)
error = (int)buf_error(bp);
if (error) {
buf_brelse(bp);
return (error);
}
n = min(n, bsize - buf_resid(bp));
error = uiomove((char *)buf_dataptr(bp) + on, n, uio);
if (error) {
buf_brelse(bp);
return (error);
}
buf_markaged(bp);
if (io_sync)
error = buf_bwrite(bp);
else {
if ((n + on) == bsize)
error = buf_bawrite(bp);
else
error = buf_bdwrite(bp);
}
} while (error == 0 && uio_resid(uio) > 0 && n != 0);
return (error);
default:
panic("spec_write type");
}
return (0);
}
int
spec_ioctl(struct vnop_ioctl_args *ap)
{
proc_t p = vfs_context_proc(ap->a_context);
dev_t dev = ap->a_vp->v_rdev;
int retval = 0;
KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_START,
(unsigned int)dev, (unsigned int)ap->a_command, (unsigned int)ap->a_fflag, (unsigned int)ap->a_vp->v_type, 0);
switch (ap->a_vp->v_type) {
case VCHR:
retval = (*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data,
ap->a_fflag, p);
break;
case VBLK:
if (kdebug_enable) {
if (ap->a_command == DKIOCUNMAP) {
dk_unmap_t *unmap;
dk_extent_t *extent;
uint32_t i;
unmap = (dk_unmap_t *)ap->a_data;
extent = unmap->extents;
for (i = 0; i < unmap->extentsCount; i++, extent++) {
KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 1) | DBG_FUNC_NONE, dev, extent->offset/ap->a_vp->v_specsize, extent->length, 0, 0);
}
}
}
retval = (*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, ap->a_fflag, p);
break;
default:
panic("spec_ioctl");
}
KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_END,
(unsigned int)dev, (unsigned int)ap->a_command, (unsigned int)ap->a_fflag, retval, 0);
return (retval);
}
int
spec_select(struct vnop_select_args *ap)
{
proc_t p = vfs_context_proc(ap->a_context);
dev_t dev;
switch (ap->a_vp->v_type) {
default:
return (1);
case VCHR:
dev = ap->a_vp->v_rdev;
return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_wql, p);
}
}
static int filt_specattach(struct knote *kn);
int
spec_kqfilter(vnode_t vp, struct knote *kn)
{
dev_t dev;
int err = EINVAL;
dev = vnode_specrdev(vp);
if (vnode_istty(vp)) {
err = filt_specattach(kn);
} else {
err = bpfkqfilter(dev, kn);
}
return err;
}
int
spec_fsync_internal(vnode_t vp, int waitfor, __unused vfs_context_t context)
{
if (vp->v_type == VCHR)
return (0);
buf_flushdirtyblks(vp, (waitfor == MNT_WAIT || waitfor == MNT_DWAIT), 0, "spec_fsync");
return (0);
}
int
spec_fsync(struct vnop_fsync_args *ap)
{
return spec_fsync_internal(ap->a_vp, ap->a_waitfor, ap->a_context);
}
extern int hard_throttle_on_root;
void throttle_init(void);
#define LOWPRI_THROTTLE_WINDOW_MSECS 500
#define LOWPRI_LEGACY_THROTTLE_WINDOW_MSECS 200
#define LOWPRI_IO_PERIOD_MSECS 200
#define LOWPRI_IO_PERIOD_SSD_MSECS 20
#define LOWPRI_TIMER_PERIOD_MSECS 10
int lowpri_throttle_window_msecs = LOWPRI_THROTTLE_WINDOW_MSECS;
int lowpri_legacy_throttle_window_msecs = LOWPRI_LEGACY_THROTTLE_WINDOW_MSECS;
int lowpri_io_period_msecs = LOWPRI_IO_PERIOD_MSECS;
int lowpri_io_period_ssd_msecs = LOWPRI_IO_PERIOD_SSD_MSECS;
int lowpri_timer_period_msecs = LOWPRI_TIMER_PERIOD_MSECS;
#if CONFIG_EMBEDDED
#define THROTTLE_WINDOW (lowpri_throttle_window_msecs)
#else
#define THROTTLE_WINDOW (throttle_legacy_process_count == 0 ? lowpri_throttle_window_msecs : lowpri_legacy_throttle_window_msecs)
#endif
#if 0
#define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...) \
do { \
if ((debug_info)->alloc) \
printf("%s: "format, __FUNCTION__, ## args); \
} while(0)
#else
#define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...)
#endif
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_window_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &lowpri_throttle_window_msecs, 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_legacy_throttle_window_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &lowpri_legacy_throttle_window_msecs, 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_io_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &lowpri_io_period_msecs, 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_io_period_ssd_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &lowpri_io_period_ssd_msecs, 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_timer_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &lowpri_timer_period_msecs, 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_legacy_process_count, CTLFLAG_RD | CTLFLAG_LOCKED, &throttle_legacy_process_count, 0, "");
static lck_grp_t *throttle_mtx_grp;
static lck_attr_t *throttle_mtx_attr;
static lck_grp_attr_t *throttle_mtx_grp_attr;
int
num_trailing_0(uint64_t n)
{
if (n == 0)
return sizeof(n) * 8;
int count = 0;
while (!ISSET(n, 1)) {
n >>= 1;
++count;
}
return count;
}
static int
throttle_info_rel(struct _throttle_io_info_t *info)
{
SInt32 oldValue = OSDecrementAtomic(&info->throttle_refcnt);
DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n",
info, (int)(oldValue -1), info );
if (oldValue == 0)
panic("throttle info ref cnt went negative!");
if ((info->throttle_refcnt == 0) && (info->throttle_alloc)) {
DEBUG_ALLOC_THROTTLE_INFO("Freeing info = %p\n", info);
lck_mtx_destroy(&info->throttle_lock, throttle_mtx_grp);
FREE(info, M_TEMP);
}
return oldValue;
}
static SInt32
throttle_info_ref(struct _throttle_io_info_t *info)
{
SInt32 oldValue = OSIncrementAtomic(&info->throttle_refcnt);
DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n",
info, (int)(oldValue -1), info );
if (info->throttle_alloc && (oldValue == 0))
panic("Taking a reference without calling create throttle info!\n");
return oldValue;
}
static uint32_t
throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count)
{
struct timeval elapsed;
uint64_t elapsed_msecs;
int throttle_level;
uint64_t deadline;
if (update_io_count == TRUE) {
info->throttle_io_count_begin = info->throttle_io_count;
info->throttle_io_period_num++;
microuptime(&info->throttle_start_IO_period_timestamp);
}
for (throttle_level = THROTTLE_LEVEL_START; throttle_level < THROTTLE_LEVEL_END; throttle_level++) {
microuptime(&elapsed);
timevalsub(&elapsed, &info->throttle_last_IO_timestamp[throttle_level]);
elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
if (elapsed_msecs < (uint64_t)THROTTLE_WINDOW) {
break;
}
}
if (throttle_level >= THROTTLE_LEVEL_END) {
info->throttle_timer_running = 0;
return (THROTTLE_LEVEL_END);
}
if (info->throttle_timer_running == 0) {
throttle_info_ref(info);
info->throttle_timer_running = 1;
}
clock_interval_to_deadline(lowpri_timer_period_msecs, 1000000, &deadline);
thread_call_enter_delayed(info->throttle_timer_call, deadline);
return (throttle_level);
}
static void
throttle_timer(struct _throttle_io_info_t *info)
{
uthread_t ut, utlist;
struct timeval elapsed;
uint64_t elapsed_msecs;
int throttle_level;
boolean_t update_io_count = FALSE;
boolean_t need_wakeup = FALSE;
boolean_t need_release = FALSE;
lck_mtx_lock(&info->throttle_lock);
microuptime(&elapsed);
timevalsub(&elapsed, &info->throttle_start_IO_period_timestamp);
elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
if (elapsed_msecs >= (uint64_t)info->throttle_io_period) {
need_wakeup = TRUE;
update_io_count = TRUE;
}
if ((throttle_level = throttle_timer_start(info, update_io_count)) == THROTTLE_LEVEL_END) {
need_release = TRUE;
}
TAILQ_FOREACH_SAFE(ut, &info->throttle_uthlist, uu_throttlelist, utlist) {
if (throttle_level == THROTTLE_LEVEL_END || throttle_get_thread_throttle_level(ut, -1) <= throttle_level) {
TAILQ_REMOVE(&info->throttle_uthlist, ut, uu_throttlelist);
ut->uu_on_throttlelist = 0;
wakeup(&ut->uu_on_throttlelist);
}
}
if (need_wakeup && !TAILQ_EMPTY(&info->throttle_uthlist)) {
ut = (uthread_t)TAILQ_FIRST(&info->throttle_uthlist);
TAILQ_REMOVE(&info->throttle_uthlist, ut, uu_throttlelist);
ut->uu_on_throttlelist = 0;
wakeup(&ut->uu_on_throttlelist);
}
lck_mtx_unlock(&info->throttle_lock);
if (need_release == TRUE)
throttle_info_rel(info);
}
void
throttle_init(void)
{
struct _throttle_io_info_t *info;
int i;
throttle_mtx_grp_attr = lck_grp_attr_alloc_init();
throttle_mtx_grp = lck_grp_alloc_init("throttle I/O", throttle_mtx_grp_attr);
throttle_mtx_attr = lck_attr_alloc_init();
for (i = 0; i < LOWPRI_MAX_NUM_DEV; i++) {
info = &_throttle_io_info[i];
lck_mtx_init(&info->throttle_lock, throttle_mtx_grp, throttle_mtx_attr);
info->throttle_timer_call = thread_call_allocate((thread_call_func_t)throttle_timer, (thread_call_param_t)info);
TAILQ_INIT(&info->throttle_uthlist);
}
}
void
unthrottle_thread(uthread_t ut)
{
struct _throttle_io_info_t *info;
if ((info = ut->uu_throttle_info) == NULL)
return;
lck_mtx_lock(&info->throttle_lock);
if (ut->uu_on_throttlelist && throttle_get_thread_throttle_level(ut, -1) <= THROTTLE_LEVEL_THROTTLED) {
TAILQ_REMOVE(&info->throttle_uthlist, ut, uu_throttlelist);
ut->uu_on_throttlelist = 0;
wakeup(&ut->uu_on_throttlelist);
}
lck_mtx_unlock(&info->throttle_lock);
}
void *
throttle_info_create(void)
{
struct _throttle_io_info_t *info;
MALLOC(info, struct _throttle_io_info_t *, sizeof(*info), M_TEMP, M_ZERO | M_WAITOK);
if (info == NULL)
return NULL;
DEBUG_ALLOC_THROTTLE_INFO("Creating info = %p\n", info, info );
info->throttle_alloc = TRUE;
lck_mtx_init(&info->throttle_lock, throttle_mtx_grp, throttle_mtx_attr);
info->throttle_timer_call = thread_call_allocate((thread_call_func_t)throttle_timer, (thread_call_param_t)info);
TAILQ_INIT(&info->throttle_uthlist);
OSIncrementAtomic(&info->throttle_refcnt);
return info;
}
void
throttle_info_release(void *throttle_info)
{
DEBUG_ALLOC_THROTTLE_INFO("Releaseing info = %p\n",
(struct _throttle_io_info_t *)throttle_info,
(struct _throttle_io_info_t *)throttle_info);
if (throttle_info)
throttle_info_rel(throttle_info);
}
void
throttle_info_mount_ref(mount_t mp, void *throttle_info)
{
if ((throttle_info == NULL) || (mp == NULL))
return;
throttle_info_ref(throttle_info);
if (mp->mnt_throttle_info)
throttle_info_rel(mp->mnt_throttle_info);
mp->mnt_throttle_info = throttle_info;
}
int
throttle_info_ref_by_mask(uint64_t throttle_mask, throttle_info_handle_t *throttle_info_handle)
{
int dev_index;
struct _throttle_io_info_t *info;
if (throttle_info_handle == NULL)
return EINVAL;
dev_index = num_trailing_0(throttle_mask);
info = &_throttle_io_info[dev_index];
throttle_info_ref(info);
*(struct _throttle_io_info_t**)throttle_info_handle = info;
return 0;
}
void
throttle_info_rel_by_mask(throttle_info_handle_t throttle_info_handle)
{
throttle_info_rel((struct _throttle_io_info_t*)throttle_info_handle);
}
void
throttle_info_mount_rel(mount_t mp)
{
if (mp->mnt_throttle_info)
throttle_info_rel(mp->mnt_throttle_info);
mp->mnt_throttle_info = NULL;
}
void
throttle_info_get_last_io_time(mount_t mp, struct timeval *tv)
{
struct _throttle_io_info_t *info;
if (mp == NULL)
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
else if (mp->mnt_throttle_info == NULL)
info = &_throttle_io_info[mp->mnt_devbsdunit];
else
info = mp->mnt_throttle_info;
*tv = info->throttle_last_write_timestamp;
}
void
update_last_io_time(mount_t mp)
{
struct _throttle_io_info_t *info;
if (mp == NULL)
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
else if (mp->mnt_throttle_info == NULL)
info = &_throttle_io_info[mp->mnt_devbsdunit];
else
info = mp->mnt_throttle_info;
microuptime(&info->throttle_last_write_timestamp);
}
int
throttle_get_io_policy(uthread_t *ut)
{
*ut = get_bsdthread_info(current_thread());
return (proc_get_task_selfdiskacc());
}
static int
throttle_get_thread_throttle_level(uthread_t ut, int policy)
{
int thread_throttle_level = THROTTLE_LEVEL_NONE;
if (ut == NULL)
ut = get_bsdthread_info(current_thread());
if (policy == -1)
policy = proc_get_diskacc(ut->uu_thread);
switch (policy) {
case IOPOL_DEFAULT:
case IOPOL_NORMAL:
thread_throttle_level = THROTTLE_LEVEL_TIER0;
case IOPOL_PASSIVE:
if (ut->uu_throttle_bc == TRUE)
thread_throttle_level = THROTTLE_LEVEL_TIER2;
break;
case IOPOL_THROTTLE:
thread_throttle_level = THROTTLE_LEVEL_TIER2;
break;
case IOPOL_UTILITY:
thread_throttle_level = THROTTLE_LEVEL_TIER1;
break;
default:
printf("unknown I/O policy %d", policy);
break;
}
return (thread_throttle_level);
}
static int
throttle_io_will_be_throttled_internal(void * throttle_info)
{
struct _throttle_io_info_t *info = throttle_info;
struct timeval elapsed;
uint64_t elapsed_msecs;
int thread_throttle_level;
int throttle_level;
if ((thread_throttle_level = throttle_get_thread_throttle_level(NULL, -1)) < THROTTLE_LEVEL_THROTTLED)
return (0);
for (throttle_level = THROTTLE_LEVEL_START; throttle_level < thread_throttle_level; throttle_level++) {
microuptime(&elapsed);
timevalsub(&elapsed, &info->throttle_last_IO_timestamp[throttle_level]);
elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
if (elapsed_msecs < (uint64_t)THROTTLE_WINDOW)
break;
}
if (throttle_level >= thread_throttle_level) {
return (0);
}
if (info->throttle_io_count != info->throttle_io_count_begin) {
return (2);
}
return (1);
}
int
throttle_io_will_be_throttled(__unused int lowpri_window_msecs, mount_t mp)
{
void *info;
if (mp == NULL)
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
else if (mp->mnt_throttle_info == NULL)
info = &_throttle_io_info[mp->mnt_devbsdunit];
else
info = mp->mnt_throttle_info;
return throttle_io_will_be_throttled_internal(info);
}
uint32_t
throttle_lowpri_io(int sleep_amount)
{
uthread_t ut;
struct _throttle_io_info_t *info;
int throttle_type = 0;
int sleep_cnt = 0;
int locked = 0;
uint32_t throttle_io_period_num = 0;
boolean_t insert_tail = TRUE;
ut = get_bsdthread_info(current_thread());
if (ut->uu_lowpri_window == 0)
return (0);
info = ut->uu_throttle_info;
if ((sleep_amount == 0) || (info == NULL))
goto done;
if (sleep_amount == 1 && ut->uu_throttle_bc == FALSE)
sleep_amount = 0;
throttle_io_period_num = info->throttle_io_period_num;
while ( (throttle_type = throttle_io_will_be_throttled_internal(info)) ) {
if (throttle_type == 1) {
if (sleep_amount == 0)
break;
if (info->throttle_io_period_num < throttle_io_period_num)
break;
if ((info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount)
break;
}
if (!locked) {
lck_mtx_lock(&info->throttle_lock);
locked = 1;
}
if (info->throttle_timer_running == 0) {
if (throttle_timer_start(info, TRUE) == THROTTLE_LEVEL_END)
goto done;
}
if (sleep_cnt == 0) {
KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START,
ut->uu_lowpri_window, info->throttle_io_period, info->throttle_io_count, 0, 0);
}
if (ut->uu_on_throttlelist == 0) {
if (insert_tail == TRUE)
TAILQ_INSERT_TAIL(&info->throttle_uthlist, ut, uu_throttlelist);
else
TAILQ_INSERT_HEAD(&info->throttle_uthlist, ut, uu_throttlelist);
ut->uu_on_throttlelist = 1;
}
msleep((caddr_t)&ut->uu_on_throttlelist, &info->throttle_lock, PRIBIO + 1, "throttle_lowpri_io", NULL);
sleep_cnt++;
if (sleep_amount == 0)
insert_tail = FALSE;
else if (info->throttle_io_period_num < throttle_io_period_num ||
(info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) {
insert_tail = FALSE;
sleep_amount = 0;
}
}
done:
if (ut->uu_on_throttlelist) {
if (!locked) {
lck_mtx_lock(&info->throttle_lock);
locked = 1;
}
if (ut->uu_on_throttlelist) {
TAILQ_REMOVE(&info->throttle_uthlist, ut, uu_throttlelist);
ut->uu_on_throttlelist = 0;
}
}
if (locked)
lck_mtx_unlock(&info->throttle_lock);
if (sleep_cnt)
KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END,
ut->uu_lowpri_window, info->throttle_io_period, info->throttle_io_count, 0, 0);
if (info)
throttle_info_rel(info);
ut->uu_throttle_info = NULL;
ut->uu_throttle_bc = FALSE;
ut->uu_lowpri_window = 0;
return (sleep_cnt);
}
void throttle_set_thread_io_policy(int policy)
{
proc_apply_thread_selfdiskacc(policy);
}
static
void throttle_info_reset_window(uthread_t ut)
{
struct _throttle_io_info_t *info;
if ( (info = ut->uu_throttle_info) ) {
throttle_info_rel(info);
ut->uu_throttle_info = NULL;
ut->uu_lowpri_window = 0;
ut->uu_throttle_bc = FALSE;
}
}
static
void throttle_info_set_initial_window(uthread_t ut, struct _throttle_io_info_t *info, boolean_t BC_throttle)
{
if (ut->uu_throttle_info == NULL) {
ut->uu_throttle_info = info;
throttle_info_ref(info);
DEBUG_ALLOC_THROTTLE_INFO("updating info = %p\n", info, info );
ut->uu_lowpri_window = THROTTLE_WINDOW;
ut->uu_throttle_bc = BC_throttle;
}
}
static
void throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int policy, int flags, boolean_t isssd)
{
int thread_throttle_level;
if (THROTTLE_WINDOW == 0)
return;
if (ut == NULL)
ut = get_bsdthread_info(current_thread());
thread_throttle_level = throttle_get_thread_throttle_level(ut, policy);
if (thread_throttle_level == THROTTLE_LEVEL_TIER0 && ISSET(flags, B_PASSIVE))
thread_throttle_level = THROTTLE_LEVEL_NONE;
if (thread_throttle_level != THROTTLE_LEVEL_NONE)
microuptime(&info->throttle_last_IO_timestamp[thread_throttle_level]);
if (thread_throttle_level >= THROTTLE_LEVEL_THROTTLED) {
if (info->throttle_io_period == 0) {
if (isssd == TRUE)
info->throttle_io_period = lowpri_io_period_ssd_msecs;
else
info->throttle_io_period = lowpri_io_period_msecs;
if (info->throttle_io_period < lowpri_timer_period_msecs)
info->throttle_io_period = lowpri_timer_period_msecs;
}
OSAddAtomic(1, &info->throttle_io_count);
throttle_info_set_initial_window(ut, info, FALSE);
}
}
void throttle_info_update_by_mount(mount_t mp)
{
struct _throttle_io_info_t *info;
uthread_t ut;
boolean_t isssd = FALSE;
ut = get_bsdthread_info(current_thread());
if (ut->uu_lowpri_window)
return;
if (mp != NULL) {
if ((mp->mnt_kern_flag & MNTK_SSD) && !ignore_is_ssd)
isssd = TRUE;
info = &_throttle_io_info[mp->mnt_devbsdunit];
} else
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
if (info->throttle_io_period == 0) {
if (isssd == TRUE)
info->throttle_io_period = lowpri_io_period_ssd_msecs;
else
info->throttle_io_period = lowpri_io_period_msecs;
if (info->throttle_io_period < lowpri_timer_period_msecs)
info->throttle_io_period = lowpri_timer_period_msecs;
}
throttle_info_set_initial_window(ut, info, FALSE);
}
void throttle_info_update(void *throttle_info, int flags)
{
if (throttle_info)
throttle_info_update_internal(throttle_info, NULL, -1, flags, FALSE);
}
void throttle_info_update_by_mask(void *throttle_info_handle, int flags)
{
void *throttle_info = throttle_info_handle;
throttle_info_update(throttle_info, flags);
}
int throttle_info_io_will_be_throttled(void * throttle_info, int policy)
{
struct _throttle_io_info_t *info = throttle_info;
struct timeval elapsed;
uint64_t elapsed_msecs;
int throttle_level;
int thread_throttle_level;
switch (policy) {
case IOPOL_THROTTLE:
thread_throttle_level = THROTTLE_LEVEL_TIER2;
break;
case IOPOL_UTILITY:
thread_throttle_level = THROTTLE_LEVEL_TIER1;
break;
default:
thread_throttle_level = THROTTLE_LEVEL_TIER0;
break;
}
for (throttle_level = THROTTLE_LEVEL_START; throttle_level < thread_throttle_level; throttle_level++) {
microuptime(&elapsed);
timevalsub(&elapsed, &info->throttle_last_IO_timestamp[throttle_level]);
elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
if (elapsed_msecs < (uint64_t)THROTTLE_WINDOW)
break;
}
if (throttle_level >= thread_throttle_level) {
return (0);
}
return (1);
}
void
throttle_legacy_process_incr(void)
{
OSIncrementAtomic(&throttle_legacy_process_count);
}
void
throttle_legacy_process_decr(void)
{
OSDecrementAtomic(&throttle_legacy_process_count);
}
int
spec_strategy(struct vnop_strategy_args *ap)
{
buf_t bp;
int bflags;
int policy;
dev_t bdev;
uthread_t ut;
mount_t mp;
int strategy_ret;
struct _throttle_io_info_t *throttle_info;
boolean_t isssd = FALSE;
#if !CONFIG_EMBEDDED
proc_t curproc = current_proc();
#endif
bp = ap->a_bp;
bdev = buf_device(bp);
mp = buf_vnode(bp)->v_mount;
policy = throttle_get_io_policy(&ut);
if (bp->b_flags & B_META)
bp->b_attr.ba_flags |= BA_META;
if (policy == IOPOL_THROTTLE || policy == IOPOL_UTILITY) {
bp->b_flags |= B_THROTTLED_IO;
bp->b_attr.ba_flags |= BA_THROTTLED_IO;
bp->b_flags &= ~B_PASSIVE;
} else if (policy == IOPOL_PASSIVE)
bp->b_flags |= B_PASSIVE;
#if !CONFIG_EMBEDDED
if ((curproc != NULL) && ((curproc->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP))
bp->b_attr.ba_flags |= BA_DELAYIDLESLEEP;
#endif
bflags = bp->b_flags;
if (kdebug_enable) {
int code = 0;
if (bflags & B_READ)
code |= DKIO_READ;
if (bflags & B_ASYNC)
code |= DKIO_ASYNC;
if (bflags & B_META)
code |= DKIO_META;
else if (bflags & B_PAGEIO)
code |= DKIO_PAGING;
if (bflags & B_THROTTLED_IO)
code |= DKIO_THROTTLE;
else if (bflags & B_PASSIVE)
code |= DKIO_PASSIVE;
if (bp->b_attr.ba_flags & BA_NOCACHE)
code |= DKIO_NOCACHE;
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
bp, bdev, (int)buf_blkno(bp), buf_count(bp), 0);
}
if (((bflags & (B_THROTTLED_IO | B_PASSIVE | B_IOSTREAMING | B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) &&
mp && (mp->mnt_kern_flag & MNTK_ROOTDEV))
hard_throttle_on_root = 1;
if (mp != NULL) {
if ((mp->mnt_kern_flag & MNTK_SSD) && !ignore_is_ssd)
isssd = TRUE;
throttle_info = &_throttle_io_info[mp->mnt_devbsdunit];
} else
throttle_info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
throttle_info_update_internal(throttle_info, ut, policy, bflags, isssd);
if ((bflags & B_READ) == 0) {
microuptime(&throttle_info->throttle_last_write_timestamp);
if (mp) {
INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_write_size);
}
} else if (mp) {
INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_read_size);
}
#define IO_SATISFIED_BY_CACHE ((int)0xcafefeed)
#define IO_SHOULD_BE_THROTTLED ((int)0xcafebeef)
typedef int strategy_fcn_ret_t(struct buf *bp);
strategy_ret = (*(strategy_fcn_ret_t*)bdevsw[major(bdev)].d_strategy)(bp);
if (IO_SATISFIED_BY_CACHE == strategy_ret) {
throttle_info_reset_window(ut);
} else if (IO_SHOULD_BE_THROTTLED == strategy_ret) {
throttle_info_set_initial_window(ut, throttle_info, TRUE);
}
return (0);
}
int
spec_blockmap(__unused struct vnop_blockmap_args *ap)
{
return (ENOTSUP);
}
int
spec_close(struct vnop_close_args *ap)
{
struct vnode *vp = ap->a_vp;
dev_t dev = vp->v_rdev;
int error = 0;
int flags = ap->a_fflag;
struct proc *p = vfs_context_proc(ap->a_context);
struct session *sessp;
int do_rele = 0;
switch (vp->v_type) {
case VCHR:
sessp = proc_session(p);
if (sessp != SESSION_NULL) {
if (vp == sessp->s_ttyvp && vcount(vp) == 1) {
struct tty *tp;
session_lock(sessp);
if (vp == sessp->s_ttyvp) {
tp = SESSION_TP(sessp);
sessp->s_ttyvp = NULL;
sessp->s_ttyvid = 0;
sessp->s_ttyp = TTY_NULL;
sessp->s_ttypgrpid = NO_PID;
do_rele = 1;
}
session_unlock(sessp);
if (do_rele) {
vnode_rele(vp);
if (NULL != tp)
ttyfree(tp);
}
}
session_rele(sessp);
}
devsw_lock(dev, S_IFCHR);
if (--vp->v_specinfo->si_opencount < 0)
panic("negative open count (c, %u, %u)", major(dev), minor(dev));
if ((D_TRACKCLOSE & cdevsw[major(dev)].d_type) != 0 ||
vcount(vp) == 0 || (flags & IO_REVOKE) != 0)
error = cdevsw[major(dev)].d_close(dev, flags, S_IFCHR, p);
devsw_unlock(dev, S_IFCHR);
break;
case VBLK:
devsw_lock(dev, S_IFBLK);
if (vcount(vp) > 1) {
vp->v_specinfo->si_opencount--;
devsw_unlock(dev, S_IFBLK);
return (0);
}
devsw_unlock(dev, S_IFBLK);
if ((error = spec_fsync_internal(vp, MNT_WAIT, ap->a_context)))
return (error);
error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
if (error)
return (error);
devsw_lock(dev, S_IFBLK);
if (--vp->v_specinfo->si_opencount < 0)
panic("negative open count (b, %u, %u)", major(dev), minor(dev));
if (vcount(vp) == 0)
error = bdevsw[major(dev)].d_close(dev, flags, S_IFBLK, p);
devsw_unlock(dev, S_IFBLK);
break;
default:
panic("spec_close: not special");
return(EBADF);
}
return error;
}
int
spec_pathconf(struct vnop_pathconf_args *ap)
{
switch (ap->a_name) {
case _PC_LINK_MAX:
*ap->a_retval = LINK_MAX;
return (0);
case _PC_MAX_CANON:
*ap->a_retval = MAX_CANON;
return (0);
case _PC_MAX_INPUT:
*ap->a_retval = MAX_INPUT;
return (0);
case _PC_PIPE_BUF:
*ap->a_retval = PIPE_BUF;
return (0);
case _PC_CHOWN_RESTRICTED:
*ap->a_retval = 200112;
return (0);
case _PC_VDISABLE:
*ap->a_retval = _POSIX_VDISABLE;
return (0);
default:
return (EINVAL);
}
}
int
spec_ebadf(__unused void *dummy)
{
return (EBADF);
}
int
spec_blktooff(struct vnop_blktooff_args *ap)
{
struct vnode *vp = ap->a_vp;
switch (vp->v_type) {
case VCHR:
*ap->a_offset = (off_t)-1;
return (ENOTSUP);
case VBLK:
printf("spec_blktooff: not implemented for VBLK\n");
*ap->a_offset = (off_t)-1;
return (ENOTSUP);
default:
panic("spec_blktooff type");
}
return (0);
}
int
spec_offtoblk(struct vnop_offtoblk_args *ap)
{
struct vnode *vp = ap->a_vp;
switch (vp->v_type) {
case VCHR:
*ap->a_lblkno = (daddr64_t)-1;
return (ENOTSUP);
case VBLK:
printf("spec_offtoblk: not implemented for VBLK\n");
*ap->a_lblkno = (daddr64_t)-1;
return (ENOTSUP);
default:
panic("spec_offtoblk type");
}
return (0);
}
static void filt_specdetach(struct knote *kn);
static int filt_spec(struct knote *kn, long hint);
static unsigned filt_specpeek(struct knote *kn);
struct filterops spec_filtops = {
.f_isfd = 1,
.f_attach = filt_specattach,
.f_detach = filt_specdetach,
.f_event = filt_spec,
.f_peek = filt_specpeek
};
static int
filter_to_seltype(int16_t filter)
{
switch (filter) {
case EVFILT_READ:
return FREAD;
case EVFILT_WRITE:
return FWRITE;
break;
default:
panic("filt_to_seltype(): invalid filter %d\n", filter);
return 0;
}
}
static int
filt_specattach(struct knote *kn)
{
vnode_t vp;
dev_t dev;
vp = (vnode_t)kn->kn_fp->f_fglob->fg_data;
assert(vnode_ischr(vp));
dev = vnode_specrdev(vp);
if (major(dev) > nchrdev) {
return ENXIO;
}
if ((cdevsw_flags[major(dev)] & CDEVSW_SELECT_KQUEUE) == 0) {
return EINVAL;
}
kn->kn_hook = wait_queue_link_allocate();
if (kn->kn_hook == NULL) {
return EAGAIN;
}
kn->kn_fop = &spec_filtops;
kn->kn_hookid = vnode_vid(vp);
knote_markstayqueued(kn);
return 0;
}
static void
filt_specdetach(struct knote *kn)
{
kern_return_t ret;
ret = wait_queue_set_unlink_one(kn->kn_kq->kq_wqs, kn->kn_hook);
if (ret != KERN_SUCCESS) {
panic("filt_specdetach(): failed to unlink wait queue link.");
}
(void)wait_queue_link_free(kn->kn_hook);
kn->kn_hook = NULL;
kn->kn_status &= ~KN_STAYQUEUED;
}
static int
filt_spec(struct knote *kn, long hint)
{
vnode_t vp;
uthread_t uth;
wait_queue_set_t old_wqs;
vfs_context_t ctx;
int selres;
int error;
int use_offset;
dev_t dev;
uint64_t flags;
assert(kn->kn_hook != NULL);
if (hint != 0) {
panic("filt_spec(): nonzero hint?");
}
uth = get_bsdthread_info(current_thread());
ctx = vfs_context_current();
vp = (vnode_t)kn->kn_fp->f_fglob->fg_data;
error = vnode_getwithvid(vp, kn->kn_hookid);
if (error != 0) {
kn->kn_flags |= (EV_EOF | EV_ONESHOT);
return 1;
}
dev = vnode_specrdev(vp);
flags = cdevsw_flags[major(dev)];
use_offset = ((flags & CDEVSW_USE_OFFSET) != 0);
assert((flags & CDEVSW_SELECT_KQUEUE) != 0);
old_wqs = uth->uu_wqset;
uth->uu_wqset = kn->kn_kq->kq_wqs;
selres = VNOP_SELECT(vp, filter_to_seltype(kn->kn_filter), 0, kn->kn_hook, ctx);
uth->uu_wqset = old_wqs;
if (use_offset) {
if (kn->kn_fp->f_fglob->fg_offset >= (uint32_t)selres) {
kn->kn_data = 0;
} else {
kn->kn_data = ((uint32_t)selres) - kn->kn_fp->f_fglob->fg_offset;
}
} else {
kn->kn_data = selres;
}
vnode_put(vp);
return (kn->kn_data != 0);
}
static unsigned
filt_specpeek(struct knote *kn)
{
vnode_t vp;
uthread_t uth;
wait_queue_set_t old_wqs;
vfs_context_t ctx;
int error, selres;
uth = get_bsdthread_info(current_thread());
ctx = vfs_context_current();
vp = (vnode_t)kn->kn_fp->f_fglob->fg_data;
error = vnode_getwithvid(vp, kn->kn_hookid);
if (error != 0) {
return 1;
}
old_wqs = uth->uu_wqset;
uth->uu_wqset = kn->kn_kq->kq_wqs;
selres = VNOP_SELECT(vp, filter_to_seltype(kn->kn_filter), 0, kn->kn_hook, ctx);
uth->uu_wqset = old_wqs;
vnode_put(vp);
return selres;
}