#include <sys/param.h>
#include <sys/proc_internal.h>
#include <sys/kauth.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/conf.h>
#include <sys/buf_internal.h>
#include <sys/mount_internal.h>
#include <sys/vnode_internal.h>
#include <sys/file_internal.h>
#include <sys/namei.h>
#include <sys/stat.h>
#include <sys/errno.h>
#include <sys/ioctl.h>
#include <sys/file.h>
#include <sys/user.h>
#include <sys/malloc.h>
#include <sys/disk.h>
#include <sys/uio_internal.h>
#include <sys/resource.h>
#include <machine/machine_routines.h>
#include <miscfs/specfs/specdev.h>
#include <vfs/vfs_support.h>
#include <vfs/vfs_disk_conditioner.h>
#include <kern/assert.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
#include <kern/policy_internal.h>
#include <kern/timer_call.h>
#include <kern/waitq.h>
#include <pexpert/pexpert.h>
#include <sys/kdebug.h>
#include <libkern/section_keywords.h>
extern dev_t chrtoblk(dev_t dev);
extern boolean_t iskmemdev(dev_t dev);
extern int bpfkqfilter(dev_t dev, struct knote *kn);
extern int ptsd_kqfilter(dev_t, struct knote *);
extern int ptmx_kqfilter(dev_t, struct knote *);
struct vnode *speclisth[SPECHSZ];
char devopn[] = "devopn";
char devio[] = "devio";
char devwait[] = "devwait";
char devin[] = "devin";
char devout[] = "devout";
char devioc[] = "devioc";
char devcls[] = "devcls";
#define VOPFUNC int (*)(void *)
int (**spec_vnodeop_p)(void *);
struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
{ &vnop_default_desc, (VOPFUNC)vn_default_error },
{ &vnop_lookup_desc, (VOPFUNC)spec_lookup },
{ &vnop_create_desc, (VOPFUNC)err_create },
{ &vnop_mknod_desc, (VOPFUNC)err_mknod },
{ &vnop_open_desc, (VOPFUNC)spec_open },
{ &vnop_close_desc, (VOPFUNC)spec_close },
{ &vnop_access_desc, (VOPFUNC)spec_access },
{ &vnop_getattr_desc, (VOPFUNC)spec_getattr },
{ &vnop_setattr_desc, (VOPFUNC)spec_setattr },
{ &vnop_read_desc, (VOPFUNC)spec_read },
{ &vnop_write_desc, (VOPFUNC)spec_write },
{ &vnop_ioctl_desc, (VOPFUNC)spec_ioctl },
{ &vnop_select_desc, (VOPFUNC)spec_select },
{ &vnop_revoke_desc, (VOPFUNC)nop_revoke },
{ &vnop_mmap_desc, (VOPFUNC)err_mmap },
{ &vnop_fsync_desc, (VOPFUNC)spec_fsync },
{ &vnop_remove_desc, (VOPFUNC)err_remove },
{ &vnop_link_desc, (VOPFUNC)err_link },
{ &vnop_rename_desc, (VOPFUNC)err_rename },
{ &vnop_mkdir_desc, (VOPFUNC)err_mkdir },
{ &vnop_rmdir_desc, (VOPFUNC)err_rmdir },
{ &vnop_symlink_desc, (VOPFUNC)err_symlink },
{ &vnop_readdir_desc, (VOPFUNC)err_readdir },
{ &vnop_readlink_desc, (VOPFUNC)err_readlink },
{ &vnop_inactive_desc, (VOPFUNC)nop_inactive },
{ &vnop_reclaim_desc, (VOPFUNC)nop_reclaim },
{ &vnop_strategy_desc, (VOPFUNC)spec_strategy },
{ &vnop_pathconf_desc, (VOPFUNC)spec_pathconf },
{ &vnop_advlock_desc, (VOPFUNC)err_advlock },
{ &vnop_bwrite_desc, (VOPFUNC)spec_bwrite },
{ &vnop_pagein_desc, (VOPFUNC)err_pagein },
{ &vnop_pageout_desc, (VOPFUNC)err_pageout },
{ &vnop_copyfile_desc, (VOPFUNC)err_copyfile },
{ &vnop_blktooff_desc, (VOPFUNC)spec_blktooff },
{ &vnop_offtoblk_desc, (VOPFUNC)spec_offtoblk },
{ &vnop_blockmap_desc, (VOPFUNC)spec_blockmap },
{ (struct vnodeop_desc*)NULL, (int(*)(void *))NULL }
};
struct vnodeopv_desc spec_vnodeop_opv_desc =
{ &spec_vnodeop_p, spec_vnodeop_entries };
static void set_blocksize(vnode_t, dev_t);
#define LOWPRI_TIER1_WINDOW_MSECS 25
#define LOWPRI_TIER2_WINDOW_MSECS 100
#define LOWPRI_TIER3_WINDOW_MSECS 500
#define LOWPRI_TIER1_IO_PERIOD_MSECS 40
#define LOWPRI_TIER2_IO_PERIOD_MSECS 85
#define LOWPRI_TIER3_IO_PERIOD_MSECS 200
#define LOWPRI_TIER1_IO_PERIOD_SSD_MSECS 5
#define LOWPRI_TIER2_IO_PERIOD_SSD_MSECS 15
#define LOWPRI_TIER3_IO_PERIOD_SSD_MSECS 25
int throttle_windows_msecs[THROTTLE_LEVEL_END + 1] = {
0,
LOWPRI_TIER1_WINDOW_MSECS,
LOWPRI_TIER2_WINDOW_MSECS,
LOWPRI_TIER3_WINDOW_MSECS,
};
int throttle_io_period_msecs[THROTTLE_LEVEL_END + 1] = {
0,
LOWPRI_TIER1_IO_PERIOD_MSECS,
LOWPRI_TIER2_IO_PERIOD_MSECS,
LOWPRI_TIER3_IO_PERIOD_MSECS,
};
int throttle_io_period_ssd_msecs[THROTTLE_LEVEL_END + 1] = {
0,
LOWPRI_TIER1_IO_PERIOD_SSD_MSECS,
LOWPRI_TIER2_IO_PERIOD_SSD_MSECS,
LOWPRI_TIER3_IO_PERIOD_SSD_MSECS,
};
int throttled_count[THROTTLE_LEVEL_END + 1];
struct _throttle_io_info_t {
lck_mtx_t throttle_lock;
struct timeval throttle_last_write_timestamp;
struct timeval throttle_min_timer_deadline;
struct timeval throttle_window_start_timestamp[THROTTLE_LEVEL_END + 1];
struct timeval throttle_last_IO_timestamp[THROTTLE_LEVEL_END + 1];
pid_t throttle_last_IO_pid[THROTTLE_LEVEL_END + 1];
struct timeval throttle_start_IO_period_timestamp[THROTTLE_LEVEL_END + 1];
int32_t throttle_inflight_count[THROTTLE_LEVEL_END + 1];
TAILQ_HEAD( , uthread) throttle_uthlist[THROTTLE_LEVEL_END + 1];
int throttle_next_wake_level;
thread_call_t throttle_timer_call;
int32_t throttle_timer_ref;
int32_t throttle_timer_active;
int32_t throttle_io_count;
int32_t throttle_io_count_begin;
int *throttle_io_periods;
uint32_t throttle_io_period_num;
int32_t throttle_refcnt;
int32_t throttle_alloc;
int32_t throttle_disabled;
int32_t throttle_is_fusion_with_priority;
};
struct _throttle_io_info_t _throttle_io_info[LOWPRI_MAX_NUM_DEV];
int lowpri_throttle_enabled = 1;
static void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level);
static int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap);
static int throttle_get_thread_throttle_level(uthread_t ut);
static int throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier);
void throttle_info_mount_reset_period(mount_t mp, int isssd);
int
spec_lookup(struct vnop_lookup_args *ap)
{
*ap->a_vpp = NULL;
return (ENOTDIR);
}
static void
set_blocksize(struct vnode *vp, dev_t dev)
{
int (*size)(dev_t);
int rsize;
if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) {
rsize = (*size)(dev);
if (rsize <= 0)
vp->v_specsize = DEV_BSIZE;
else
vp->v_specsize = rsize;
}
else
vp->v_specsize = DEV_BSIZE;
}
void
set_fsblocksize(struct vnode *vp)
{
if (vp->v_type == VBLK) {
dev_t dev = (dev_t)vp->v_rdev;
int maj = major(dev);
if ((u_int)maj >= (u_int)nblkdev)
return;
vnode_lock(vp);
set_blocksize(vp, dev);
vnode_unlock(vp);
}
}
int
spec_open(struct vnop_open_args *ap)
{
struct proc *p = vfs_context_proc(ap->a_context);
kauth_cred_t cred = vfs_context_ucred(ap->a_context);
struct vnode *vp = ap->a_vp;
dev_t bdev, dev = (dev_t)vp->v_rdev;
int maj = major(dev);
int error;
if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
return (ENXIO);
switch (vp->v_type) {
case VCHR:
if ((u_int)maj >= (u_int)nchrdev)
return (ENXIO);
if (cred != FSCRED && (ap->a_mode & FWRITE)) {
if (securelevel >= 2 && isdisk(dev, VCHR))
return (EPERM);
if (iskmemdev(dev))
return (EPERM);
if (securelevel >= 1) {
if ((bdev = chrtoblk(dev)) != NODEV && check_mountedon(bdev, VBLK, &error))
return (error);
}
}
devsw_lock(dev, S_IFCHR);
error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p);
if (error == 0) {
vp->v_specinfo->si_opencount++;
}
devsw_unlock(dev, S_IFCHR);
if (error == 0 && cdevsw[maj].d_type == D_DISK && !vp->v_un.vu_specinfo->si_initted) {
int isssd = 0;
uint64_t throttle_mask = 0;
uint32_t devbsdunit = 0;
if (VNOP_IOCTL(vp, DKIOCGETTHROTTLEMASK, (caddr_t)&throttle_mask, 0, NULL) == 0) {
if (throttle_mask != 0 &&
VNOP_IOCTL(vp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ap->a_context) == 0) {
devbsdunit = num_trailing_0(throttle_mask);
vnode_lock(vp);
vp->v_un.vu_specinfo->si_isssd = isssd;
vp->v_un.vu_specinfo->si_devbsdunit = devbsdunit;
vp->v_un.vu_specinfo->si_throttle_mask = throttle_mask;
vp->v_un.vu_specinfo->si_throttleable = 1;
vp->v_un.vu_specinfo->si_initted = 1;
vnode_unlock(vp);
}
}
if (vp->v_un.vu_specinfo->si_initted == 0) {
vnode_lock(vp);
vp->v_un.vu_specinfo->si_initted = 1;
vnode_unlock(vp);
}
}
return (error);
case VBLK:
if ((u_int)maj >= (u_int)nblkdev)
return (ENXIO);
if (securelevel >= 2 && cred != FSCRED &&
(ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK)
return (EPERM);
if ( (error = vfs_mountedon(vp)) )
return (error);
devsw_lock(dev, S_IFBLK);
error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p);
if (!error) {
vp->v_specinfo->si_opencount++;
}
devsw_unlock(dev, S_IFBLK);
if (!error) {
u_int64_t blkcnt;
u_int32_t blksize;
int setsize = 0;
u_int32_t size512 = 512;
if (!VNOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, ap->a_context)) {
if (!VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, ap->a_context)) {
if (!VNOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, ap->a_context)) {
setsize = 1;
}
}
if (VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, ap->a_context))
error = ENXIO;
}
vnode_lock(vp);
set_blocksize(vp, dev);
if (setsize)
vp->v_specdevsize = blkcnt * (u_int64_t)size512;
else
vp->v_specdevsize = (u_int64_t)0;
vnode_unlock(vp);
}
return(error);
default:
panic("spec_open type");
}
return (0);
}
int
spec_read(struct vnop_read_args *ap)
{
struct vnode *vp = ap->a_vp;
struct uio *uio = ap->a_uio;
struct buf *bp;
daddr64_t bn, nextbn;
long bsize, bscale;
int devBlockSize=0;
int n, on;
int error = 0;
dev_t dev;
#if DIAGNOSTIC
if (uio->uio_rw != UIO_READ)
panic("spec_read mode");
if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg))
panic("spec_read proc");
#endif
if (uio_resid(uio) == 0)
return (0);
switch (vp->v_type) {
case VCHR:
{
struct _throttle_io_info_t *throttle_info = NULL;
int thread_throttle_level;
if (cdevsw[major(vp->v_rdev)].d_type == D_DISK && vp->v_un.vu_specinfo->si_throttleable) {
throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit];
thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL);
}
error = (*cdevsw[major(vp->v_rdev)].d_read)
(vp->v_rdev, uio, ap->a_ioflag);
if (throttle_info) {
throttle_info_end_io_internal(throttle_info, thread_throttle_level);
}
return (error);
}
case VBLK:
if (uio->uio_offset < 0)
return (EINVAL);
dev = vp->v_rdev;
devBlockSize = vp->v_specsize;
if (devBlockSize > PAGE_SIZE)
return (EINVAL);
bscale = PAGE_SIZE / devBlockSize;
bsize = bscale * devBlockSize;
do {
on = uio->uio_offset % bsize;
bn = (daddr64_t)((uio->uio_offset / devBlockSize) &~ (bscale - 1));
if (vp->v_speclastr + bscale == bn) {
nextbn = bn + bscale;
error = buf_breadn(vp, bn, (int)bsize, &nextbn,
(int *)&bsize, 1, NOCRED, &bp);
} else
error = buf_bread(vp, bn, (int)bsize, NOCRED, &bp);
vnode_lock(vp);
vp->v_speclastr = bn;
vnode_unlock(vp);
n = bsize - buf_resid(bp);
if ((on > n) || error) {
if (!error)
error = EINVAL;
buf_brelse(bp);
return (error);
}
n = min((unsigned)(n - on), uio_resid(uio));
error = uiomove((char *)buf_dataptr(bp) + on, n, uio);
if (n + on == bsize)
buf_markaged(bp);
buf_brelse(bp);
} while (error == 0 && uio_resid(uio) > 0 && n != 0);
return (error);
default:
panic("spec_read type");
}
return (0);
}
int
spec_write(struct vnop_write_args *ap)
{
struct vnode *vp = ap->a_vp;
struct uio *uio = ap->a_uio;
struct buf *bp;
daddr64_t bn;
int bsize, blkmask, bscale;
int io_sync;
int devBlockSize=0;
int n, on;
int error = 0;
dev_t dev;
#if DIAGNOSTIC
if (uio->uio_rw != UIO_WRITE)
panic("spec_write mode");
if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg))
panic("spec_write proc");
#endif
switch (vp->v_type) {
case VCHR:
{
struct _throttle_io_info_t *throttle_info = NULL;
int thread_throttle_level;
if (cdevsw[major(vp->v_rdev)].d_type == D_DISK && vp->v_un.vu_specinfo->si_throttleable) {
throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit];
thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL);
microuptime(&throttle_info->throttle_last_write_timestamp);
}
error = (*cdevsw[major(vp->v_rdev)].d_write)
(vp->v_rdev, uio, ap->a_ioflag);
if (throttle_info) {
throttle_info_end_io_internal(throttle_info, thread_throttle_level);
}
return (error);
}
case VBLK:
if (uio_resid(uio) == 0)
return (0);
if (uio->uio_offset < 0)
return (EINVAL);
io_sync = (ap->a_ioflag & IO_SYNC);
dev = (vp->v_rdev);
devBlockSize = vp->v_specsize;
if (devBlockSize > PAGE_SIZE)
return(EINVAL);
bscale = PAGE_SIZE / devBlockSize;
blkmask = bscale - 1;
bsize = bscale * devBlockSize;
do {
bn = (daddr64_t)((uio->uio_offset / devBlockSize) &~ blkmask);
on = uio->uio_offset % bsize;
n = min((unsigned)(bsize - on), uio_resid(uio));
if (n == bsize &&
vp->v_specdevsize != (u_int64_t)0 &&
(uio->uio_offset + (u_int64_t)n) > vp->v_specdevsize) {
n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize;
}
if (n == bsize)
bp = buf_getblk(vp, bn, bsize, 0, 0, BLK_WRITE);
else
error = (int)buf_bread(vp, bn, bsize, NOCRED, &bp);
if (!error)
error = (int)buf_error(bp);
if (error) {
buf_brelse(bp);
return (error);
}
n = min(n, bsize - buf_resid(bp));
error = uiomove((char *)buf_dataptr(bp) + on, n, uio);
if (error) {
buf_brelse(bp);
return (error);
}
buf_markaged(bp);
if (io_sync)
error = buf_bwrite(bp);
else {
if ((n + on) == bsize)
error = buf_bawrite(bp);
else
error = buf_bdwrite(bp);
}
} while (error == 0 && uio_resid(uio) > 0 && n != 0);
return (error);
default:
panic("spec_write type");
}
return (0);
}
int
spec_ioctl(struct vnop_ioctl_args *ap)
{
proc_t p = vfs_context_proc(ap->a_context);
dev_t dev = ap->a_vp->v_rdev;
int retval = 0;
KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_START,
dev, ap->a_command, ap->a_fflag, ap->a_vp->v_type, 0);
switch (ap->a_vp->v_type) {
case VCHR:
retval = (*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data,
ap->a_fflag, p);
break;
case VBLK:
retval = (*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, ap->a_fflag, p);
if (!retval && ap->a_command == DKIOCSETBLOCKSIZE)
ap->a_vp->v_specsize = *(uint32_t *)ap->a_data;
break;
default:
panic("spec_ioctl");
}
KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_END,
dev, ap->a_command, ap->a_fflag, retval, 0);
return (retval);
}
int
spec_select(struct vnop_select_args *ap)
{
proc_t p = vfs_context_proc(ap->a_context);
dev_t dev;
switch (ap->a_vp->v_type) {
default:
return (1);
case VCHR:
dev = ap->a_vp->v_rdev;
return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_wql, p);
}
}
static int filt_specattach(struct knote *kn, struct kevent_internal_s *kev);
int
spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_internal_s *kev)
{
dev_t dev;
assert(vnode_ischr(vp));
dev = vnode_specrdev(vp);
#if NETWORKING
int32_t tmp_flags = kn->kn_flags;
int64_t tmp_data = kn->kn_data;
int res;
res = bpfkqfilter(dev, kn);
if ((kn->kn_flags & EV_ERROR) == 0) {
return res;
}
kn->kn_flags = tmp_flags;
kn->kn_data = tmp_data;
#endif
if (major(dev) > nchrdev) {
knote_set_error(kn, ENXIO);
return 0;
}
kn->kn_vnode_kqok = !!(cdevsw_flags[major(dev)] & CDEVSW_SELECT_KQUEUE);
kn->kn_vnode_use_ofst = !!(cdevsw_flags[major(dev)] & CDEVSW_USE_OFFSET);
if (cdevsw_flags[major(dev)] & CDEVSW_IS_PTS) {
kn->kn_filtid = EVFILTID_PTSD;
return ptsd_kqfilter(dev, kn);
} else if (cdevsw_flags[major(dev)] & CDEVSW_IS_PTC) {
kn->kn_filtid = EVFILTID_PTMX;
return ptmx_kqfilter(dev, kn);
} else if (cdevsw[major(dev)].d_type == D_TTY && kn->kn_vnode_kqok) {
kn->kn_filtid = EVFILTID_TTY;
return knote_fops(kn)->f_attach(kn, kev);
}
return filt_specattach(kn, kev);
}
int
spec_fsync_internal(vnode_t vp, int waitfor, __unused vfs_context_t context)
{
if (vp->v_type == VCHR)
return (0);
buf_flushdirtyblks(vp, (waitfor == MNT_WAIT || waitfor == MNT_DWAIT), 0, "spec_fsync");
return (0);
}
int
spec_fsync(struct vnop_fsync_args *ap)
{
return spec_fsync_internal(ap->a_vp, ap->a_waitfor, ap->a_context);
}
void throttle_init(void);
#if 0
#define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...) \
do { \
if ((debug_info)->alloc) \
printf("%s: "format, __FUNCTION__, ## args); \
} while(0)
#else
#define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...)
#endif
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier1_window_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_windows_msecs[THROTTLE_LEVEL_TIER1], 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier2_window_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_windows_msecs[THROTTLE_LEVEL_TIER2], 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier3_window_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_windows_msecs[THROTTLE_LEVEL_TIER3], 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier1_io_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_msecs[THROTTLE_LEVEL_TIER1], 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier2_io_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_msecs[THROTTLE_LEVEL_TIER2], 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier3_io_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_msecs[THROTTLE_LEVEL_TIER3], 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier1_io_period_ssd_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_ssd_msecs[THROTTLE_LEVEL_TIER1], 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier2_io_period_ssd_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_ssd_msecs[THROTTLE_LEVEL_TIER2], 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_tier3_io_period_ssd_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &throttle_io_period_ssd_msecs[THROTTLE_LEVEL_TIER3], 0, "");
SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &lowpri_throttle_enabled, 0, "");
static lck_grp_t *throttle_lock_grp;
static lck_attr_t *throttle_lock_attr;
static lck_grp_attr_t *throttle_lock_grp_attr;
int
num_trailing_0(uint64_t n)
{
if (n == 0)
return sizeof(n) * 8;
int count = 0;
while (!ISSET(n, 1)) {
n >>= 1;
++count;
}
return count;
}
static int
throttle_info_rel(struct _throttle_io_info_t *info)
{
SInt32 oldValue = OSDecrementAtomic(&info->throttle_refcnt);
DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n",
info, (int)(oldValue -1), info );
if (oldValue == 0)
panic("throttle info ref cnt went negative!");
if ((info->throttle_refcnt == 0) && (info->throttle_alloc)) {
DEBUG_ALLOC_THROTTLE_INFO("Freeing info = %p\n", info);
lck_mtx_destroy(&info->throttle_lock, throttle_lock_grp);
FREE(info, M_TEMP);
}
return oldValue;
}
static SInt32
throttle_info_ref(struct _throttle_io_info_t *info)
{
SInt32 oldValue = OSIncrementAtomic(&info->throttle_refcnt);
DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n",
info, (int)(oldValue -1), info );
if (info->throttle_alloc && (oldValue == 0))
panic("Taking a reference without calling create throttle info!\n");
return oldValue;
}
static uint32_t
throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count, int wakelevel)
{
struct timeval elapsed;
struct timeval now;
struct timeval period;
uint64_t elapsed_msecs;
int throttle_level;
int level;
int msecs;
boolean_t throttled = FALSE;
boolean_t need_timer = FALSE;
microuptime(&now);
if (update_io_count == TRUE) {
info->throttle_io_count_begin = info->throttle_io_count;
info->throttle_io_period_num++;
while (wakelevel >= THROTTLE_LEVEL_THROTTLED)
info->throttle_start_IO_period_timestamp[wakelevel--] = now;
info->throttle_min_timer_deadline = now;
msecs = info->throttle_io_periods[THROTTLE_LEVEL_THROTTLED];
period.tv_sec = msecs / 1000;
period.tv_usec = (msecs % 1000) * 1000;
timevaladd(&info->throttle_min_timer_deadline, &period);
}
for (throttle_level = THROTTLE_LEVEL_START; throttle_level < THROTTLE_LEVEL_END; throttle_level++) {
elapsed = now;
timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]);
elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
for (level = throttle_level + 1; level <= THROTTLE_LEVEL_END; level++) {
if (!TAILQ_EMPTY(&info->throttle_uthlist[level])) {
if (elapsed_msecs < (uint64_t)throttle_windows_msecs[level] || info->throttle_inflight_count[throttle_level]) {
throttled = TRUE;
}
break;
}
}
if (throttled == TRUE)
break;
}
if (throttled == TRUE) {
uint64_t deadline = 0;
struct timeval target;
struct timeval min_target;
for (level = throttle_level+1; level <= THROTTLE_LEVEL_END; level++) {
if (TAILQ_EMPTY(&info->throttle_uthlist[level]))
continue;
target = info->throttle_start_IO_period_timestamp[level];
msecs = info->throttle_io_periods[level];
period.tv_sec = msecs / 1000;
period.tv_usec = (msecs % 1000) * 1000;
timevaladd(&target, &period);
if (need_timer == FALSE || timevalcmp(&target, &min_target, <)) {
min_target = target;
need_timer = TRUE;
}
}
if (timevalcmp(&info->throttle_min_timer_deadline, &now, >)) {
if (timevalcmp(&info->throttle_min_timer_deadline, &min_target, >))
min_target = info->throttle_min_timer_deadline;
}
if (info->throttle_timer_active) {
if (thread_call_cancel(info->throttle_timer_call) == FALSE) {
need_timer = FALSE;
} else
info->throttle_timer_active = 0;
}
if (need_timer == TRUE) {
int target_msecs;
if (info->throttle_timer_ref == 0) {
throttle_info_ref(info);
info->throttle_timer_ref = 1;
}
elapsed = min_target;
timevalsub(&elapsed, &now);
target_msecs = elapsed.tv_sec * 1000 + elapsed.tv_usec / 1000;
if (target_msecs <= 0) {
target_msecs = 1;
}
clock_interval_to_deadline(target_msecs, 1000000, &deadline);
thread_call_enter_delayed(info->throttle_timer_call, deadline);
info->throttle_timer_active = 1;
}
}
return (throttle_level);
}
static void
throttle_timer(struct _throttle_io_info_t *info)
{
uthread_t ut, utlist;
struct timeval elapsed;
struct timeval now;
uint64_t elapsed_msecs;
int throttle_level;
int level;
int wake_level;
caddr_t wake_address = NULL;
boolean_t update_io_count = FALSE;
boolean_t need_wakeup = FALSE;
boolean_t need_release = FALSE;
ut = NULL;
lck_mtx_lock(&info->throttle_lock);
info->throttle_timer_active = 0;
microuptime(&now);
elapsed = now;
timevalsub(&elapsed, &info->throttle_start_IO_period_timestamp[THROTTLE_LEVEL_THROTTLED]);
elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
if (elapsed_msecs >= (uint64_t)info->throttle_io_periods[THROTTLE_LEVEL_THROTTLED]) {
wake_level = info->throttle_next_wake_level;
for (level = THROTTLE_LEVEL_START; level < THROTTLE_LEVEL_END; level++) {
elapsed = now;
timevalsub(&elapsed, &info->throttle_start_IO_period_timestamp[wake_level]);
elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
if (elapsed_msecs >= (uint64_t)info->throttle_io_periods[wake_level] && !TAILQ_EMPTY(&info->throttle_uthlist[wake_level])) {
need_wakeup = TRUE;
update_io_count = TRUE;
info->throttle_next_wake_level = wake_level - 1;
if (info->throttle_next_wake_level == THROTTLE_LEVEL_START)
info->throttle_next_wake_level = THROTTLE_LEVEL_END;
break;
}
wake_level--;
if (wake_level == THROTTLE_LEVEL_START)
wake_level = THROTTLE_LEVEL_END;
}
}
if (need_wakeup == TRUE) {
if (!TAILQ_EMPTY(&info->throttle_uthlist[wake_level])) {
ut = (uthread_t)TAILQ_FIRST(&info->throttle_uthlist[wake_level]);
TAILQ_REMOVE(&info->throttle_uthlist[wake_level], ut, uu_throttlelist);
ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
ut->uu_is_throttled = FALSE;
wake_address = (caddr_t)&ut->uu_on_throttlelist;
}
} else
wake_level = THROTTLE_LEVEL_START;
throttle_level = throttle_timer_start(info, update_io_count, wake_level);
if (wake_address != NULL)
wakeup(wake_address);
for (level = THROTTLE_LEVEL_THROTTLED; level <= throttle_level; level++) {
TAILQ_FOREACH_SAFE(ut, &info->throttle_uthlist[level], uu_throttlelist, utlist) {
TAILQ_REMOVE(&info->throttle_uthlist[level], ut, uu_throttlelist);
ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
ut->uu_is_throttled = FALSE;
wakeup(&ut->uu_on_throttlelist);
}
}
if (info->throttle_timer_active == 0 && info->throttle_timer_ref) {
info->throttle_timer_ref = 0;
need_release = TRUE;
}
lck_mtx_unlock(&info->throttle_lock);
if (need_release == TRUE)
throttle_info_rel(info);
}
static int
throttle_add_to_list(struct _throttle_io_info_t *info, uthread_t ut, int mylevel, boolean_t insert_tail)
{
boolean_t start_timer = FALSE;
int level = THROTTLE_LEVEL_START;
if (TAILQ_EMPTY(&info->throttle_uthlist[mylevel])) {
info->throttle_start_IO_period_timestamp[mylevel] = info->throttle_last_IO_timestamp[mylevel];
start_timer = TRUE;
}
if (insert_tail == TRUE)
TAILQ_INSERT_TAIL(&info->throttle_uthlist[mylevel], ut, uu_throttlelist);
else
TAILQ_INSERT_HEAD(&info->throttle_uthlist[mylevel], ut, uu_throttlelist);
ut->uu_on_throttlelist = mylevel;
if (start_timer == TRUE) {
level = throttle_timer_start(info, FALSE, THROTTLE_LEVEL_START);
if (level == THROTTLE_LEVEL_END) {
if (ut->uu_on_throttlelist >= THROTTLE_LEVEL_THROTTLED) {
TAILQ_REMOVE(&info->throttle_uthlist[ut->uu_on_throttlelist], ut, uu_throttlelist);
ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
}
}
}
return (level);
}
static void
throttle_init_throttle_window(void)
{
int throttle_window_size;
if (PE_get_default("kern.io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size)))
throttle_windows_msecs[THROTTLE_LEVEL_TIER1] = throttle_window_size;
if (PE_get_default("kern.io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size)))
throttle_windows_msecs[THROTTLE_LEVEL_TIER2] = throttle_window_size;
if (PE_get_default("kern.io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size)))
throttle_windows_msecs[THROTTLE_LEVEL_TIER3] = throttle_window_size;
if (PE_parse_boot_argn("io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size)))
throttle_windows_msecs[THROTTLE_LEVEL_TIER1] = throttle_window_size;
if (PE_parse_boot_argn("io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size)))
throttle_windows_msecs[THROTTLE_LEVEL_TIER2] = throttle_window_size;
if (PE_parse_boot_argn("io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size)))
throttle_windows_msecs[THROTTLE_LEVEL_TIER3] = throttle_window_size;
}
static void
throttle_init_throttle_period(struct _throttle_io_info_t *info, boolean_t isssd)
{
int throttle_period_size;
if ((isssd == TRUE) && (info->throttle_is_fusion_with_priority == 0))
info->throttle_io_periods = &throttle_io_period_ssd_msecs[0];
else
info->throttle_io_periods = &throttle_io_period_msecs[0];
if (PE_get_default("kern.io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size)))
info->throttle_io_periods[THROTTLE_LEVEL_TIER1] = throttle_period_size;
if (PE_get_default("kern.io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size)))
info->throttle_io_periods[THROTTLE_LEVEL_TIER2] = throttle_period_size;
if (PE_get_default("kern.io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size)))
info->throttle_io_periods[THROTTLE_LEVEL_TIER3] = throttle_period_size;
if (PE_parse_boot_argn("io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size)))
info->throttle_io_periods[THROTTLE_LEVEL_TIER1] = throttle_period_size;
if (PE_parse_boot_argn("io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size)))
info->throttle_io_periods[THROTTLE_LEVEL_TIER2] = throttle_period_size;
if (PE_parse_boot_argn("io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size)))
info->throttle_io_periods[THROTTLE_LEVEL_TIER3] = throttle_period_size;
}
#if CONFIG_IOSCHED
extern void vm_io_reprioritize_init(void);
int iosched_enabled = 1;
#endif
void
throttle_init(void)
{
struct _throttle_io_info_t *info;
int i;
int level;
#if CONFIG_IOSCHED
int iosched;
#endif
throttle_lock_grp_attr = lck_grp_attr_alloc_init();
throttle_lock_grp = lck_grp_alloc_init("throttle I/O", throttle_lock_grp_attr);
throttle_init_throttle_window();
throttle_lock_attr = lck_attr_alloc_init();
for (i = 0; i < LOWPRI_MAX_NUM_DEV; i++) {
info = &_throttle_io_info[i];
lck_mtx_init(&info->throttle_lock, throttle_lock_grp, throttle_lock_attr);
info->throttle_timer_call = thread_call_allocate((thread_call_func_t)throttle_timer, (thread_call_param_t)info);
for (level = 0; level <= THROTTLE_LEVEL_END; level++) {
TAILQ_INIT(&info->throttle_uthlist[level]);
info->throttle_last_IO_pid[level] = 0;
info->throttle_inflight_count[level] = 0;
}
info->throttle_next_wake_level = THROTTLE_LEVEL_END;
info->throttle_disabled = 0;
info->throttle_is_fusion_with_priority = 0;
}
#if CONFIG_IOSCHED
if (PE_parse_boot_argn("iosched", &iosched, sizeof(iosched))) {
iosched_enabled = iosched;
}
if (iosched_enabled) {
vm_io_reprioritize_init();
}
#endif
}
void
sys_override_io_throttle(int flag)
{
if (flag == THROTTLE_IO_ENABLE)
lowpri_throttle_enabled = 1;
if (flag == THROTTLE_IO_DISABLE)
lowpri_throttle_enabled = 0;
}
int rethrottle_wakeups = 0;
void
rethrottle_thread(uthread_t ut)
{
if (ut->uu_throttle_info == NULL)
return;
boolean_t s = ml_set_interrupts_enabled(FALSE);
lck_spin_lock(&ut->uu_rethrottle_lock);
if (ut->uu_is_throttled == FALSE)
ut->uu_was_rethrottled = TRUE;
else {
int my_new_level = throttle_get_thread_throttle_level(ut);
if (my_new_level != ut->uu_on_throttlelist) {
ut->uu_is_throttled = FALSE;
wakeup(&ut->uu_on_throttlelist);
rethrottle_wakeups++;
KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 102)), thread_tid(ut->uu_thread), ut->uu_on_throttlelist, my_new_level, 0, 0);
}
}
lck_spin_unlock(&ut->uu_rethrottle_lock);
ml_set_interrupts_enabled(s);
}
void *
throttle_info_create(void)
{
struct _throttle_io_info_t *info;
int level;
MALLOC(info, struct _throttle_io_info_t *, sizeof(*info), M_TEMP, M_ZERO | M_WAITOK);
if (info == NULL)
return NULL;
DEBUG_ALLOC_THROTTLE_INFO("Creating info = %p\n", info, info );
info->throttle_alloc = TRUE;
lck_mtx_init(&info->throttle_lock, throttle_lock_grp, throttle_lock_attr);
info->throttle_timer_call = thread_call_allocate((thread_call_func_t)throttle_timer, (thread_call_param_t)info);
for (level = 0; level <= THROTTLE_LEVEL_END; level++) {
TAILQ_INIT(&info->throttle_uthlist[level]);
}
info->throttle_next_wake_level = THROTTLE_LEVEL_END;
OSIncrementAtomic(&info->throttle_refcnt);
return info;
}
void
throttle_info_release(void *throttle_info)
{
DEBUG_ALLOC_THROTTLE_INFO("Releaseing info = %p\n",
(struct _throttle_io_info_t *)throttle_info,
(struct _throttle_io_info_t *)throttle_info);
if (throttle_info)
throttle_info_rel(throttle_info);
}
void
throttle_info_mount_ref(mount_t mp, void *throttle_info)
{
if ((throttle_info == NULL) || (mp == NULL))
return;
throttle_info_ref(throttle_info);
if (mp->mnt_throttle_info)
throttle_info_rel(mp->mnt_throttle_info);
mp->mnt_throttle_info = throttle_info;
}
int
throttle_info_ref_by_mask(uint64_t throttle_mask, throttle_info_handle_t *throttle_info_handle)
{
int dev_index;
struct _throttle_io_info_t *info;
if (throttle_info_handle == NULL)
return EINVAL;
dev_index = num_trailing_0(throttle_mask);
info = &_throttle_io_info[dev_index];
throttle_info_ref(info);
*(struct _throttle_io_info_t**)throttle_info_handle = info;
return 0;
}
void
throttle_info_rel_by_mask(throttle_info_handle_t throttle_info_handle)
{
throttle_info_rel((struct _throttle_io_info_t*)throttle_info_handle);
}
void
throttle_info_mount_rel(mount_t mp)
{
if (mp->mnt_throttle_info)
throttle_info_rel(mp->mnt_throttle_info);
mp->mnt_throttle_info = NULL;
}
void
throttle_info_mount_reset_period(mount_t mp, int isssd)
{
struct _throttle_io_info_t *info;
if (mp == NULL)
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
else if (mp->mnt_throttle_info == NULL)
info = &_throttle_io_info[mp->mnt_devbsdunit];
else
info = mp->mnt_throttle_info;
throttle_init_throttle_period(info, isssd);
}
void
throttle_info_get_last_io_time(mount_t mp, struct timeval *tv)
{
struct _throttle_io_info_t *info;
if (mp == NULL)
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
else if (mp->mnt_throttle_info == NULL)
info = &_throttle_io_info[mp->mnt_devbsdunit];
else
info = mp->mnt_throttle_info;
*tv = info->throttle_last_write_timestamp;
}
void
update_last_io_time(mount_t mp)
{
struct _throttle_io_info_t *info;
if (mp == NULL)
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
else if (mp->mnt_throttle_info == NULL)
info = &_throttle_io_info[mp->mnt_devbsdunit];
else
info = mp->mnt_throttle_info;
microuptime(&info->throttle_last_write_timestamp);
if (mp != NULL)
mp->mnt_last_write_completed_timestamp = info->throttle_last_write_timestamp;
}
int
throttle_get_io_policy(uthread_t *ut)
{
if (ut != NULL)
*ut = get_bsdthread_info(current_thread());
return (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO));
}
int
throttle_get_passive_io_policy(uthread_t *ut)
{
if (ut != NULL)
*ut = get_bsdthread_info(current_thread());
return (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_PASSIVE_IO));
}
static int
throttle_get_thread_throttle_level(uthread_t ut)
{
uthread_t *ut_p = (ut == NULL) ? &ut : NULL;
int io_tier = throttle_get_io_policy(ut_p);
return throttle_get_thread_throttle_level_internal(ut, io_tier);
}
static int
throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) {
int thread_throttle_level = io_tier;
int user_idle_level;
assert(ut != NULL);
if (ut->uu_throttle_bc == TRUE)
thread_throttle_level = THROTTLE_LEVEL_TIER3;
if (thread_throttle_level >= THROTTLE_LEVEL_TIER3) {
user_idle_level = timer_get_user_idle_level();
if (user_idle_level > 0) {
thread_throttle_level--;
}
}
return (thread_throttle_level);
}
static int
throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int * throttling_level)
{
struct _throttle_io_info_t *info = throttle_info;
struct timeval elapsed;
struct timeval now;
uint64_t elapsed_msecs;
int thread_throttle_level;
int throttle_level;
if ((thread_throttle_level = throttle_get_thread_throttle_level(NULL)) < THROTTLE_LEVEL_THROTTLED)
return (THROTTLE_DISENGAGED);
microuptime(&now);
for (throttle_level = THROTTLE_LEVEL_START; throttle_level < thread_throttle_level; throttle_level++) {
if (info->throttle_inflight_count[throttle_level]) {
break;
}
elapsed = now;
timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]);
elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level])
break;
}
if (throttle_level >= thread_throttle_level) {
return (THROTTLE_DISENGAGED);
}
if (mylevel)
*mylevel = thread_throttle_level;
if (throttling_level)
*throttling_level = throttle_level;
if (info->throttle_io_count != info->throttle_io_count_begin) {
return (THROTTLE_NOW);
}
return (THROTTLE_ENGAGED);
}
int
throttle_io_will_be_throttled(__unused int lowpri_window_msecs, mount_t mp)
{
struct _throttle_io_info_t *info;
if (mp == NULL)
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
else if (mp->mnt_throttle_info == NULL)
info = &_throttle_io_info[mp->mnt_devbsdunit];
else
info = mp->mnt_throttle_info;
if (info->throttle_is_fusion_with_priority) {
uthread_t ut = get_bsdthread_info(current_thread());
if (ut->uu_lowpri_window == 0)
return (THROTTLE_DISENGAGED);
}
if (info->throttle_disabled)
return (THROTTLE_DISENGAGED);
else
return throttle_io_will_be_throttled_internal(info, NULL, NULL);
}
static void
throttle_update_proc_stats(pid_t throttling_pid, int count)
{
proc_t throttling_proc;
proc_t throttled_proc = current_proc();
OSAddAtomic64(count, &(throttled_proc->was_throttled));
throttling_proc = proc_find(throttling_pid);
if (throttling_proc != PROC_NULL) {
OSAddAtomic64(count, &(throttling_proc->did_throttle));
proc_rele(throttling_proc);
}
}
uint32_t
throttle_lowpri_io(int sleep_amount)
{
uthread_t ut;
struct _throttle_io_info_t *info;
int throttle_type = 0;
int mylevel = 0;
int throttling_level = THROTTLE_LEVEL_NONE;
int sleep_cnt = 0;
uint32_t throttle_io_period_num = 0;
boolean_t insert_tail = TRUE;
boolean_t s;
ut = get_bsdthread_info(current_thread());
if (ut->uu_lowpri_window == 0)
return (0);
info = ut->uu_throttle_info;
if (info == NULL) {
ut->uu_throttle_bc = FALSE;
ut->uu_lowpri_window = 0;
return (0);
}
lck_mtx_lock(&info->throttle_lock);
assert(ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED);
if (sleep_amount == 0)
goto done;
if (sleep_amount == 1 && ut->uu_throttle_bc == FALSE)
sleep_amount = 0;
throttle_io_period_num = info->throttle_io_period_num;
ut->uu_was_rethrottled = FALSE;
while ( (throttle_type = throttle_io_will_be_throttled_internal(info, &mylevel, &throttling_level)) ) {
if (throttle_type == THROTTLE_ENGAGED) {
if (sleep_amount == 0)
break;
if (info->throttle_io_period_num < throttle_io_period_num)
break;
if ((info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount)
break;
}
if (ut->uu_on_throttlelist >= THROTTLE_LEVEL_THROTTLED && ut->uu_on_throttlelist != mylevel) {
TAILQ_REMOVE(&info->throttle_uthlist[ut->uu_on_throttlelist], ut, uu_throttlelist);
ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
insert_tail = TRUE;
}
if (ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED) {
if (throttle_add_to_list(info, ut, mylevel, insert_tail) == THROTTLE_LEVEL_END)
goto done;
}
assert(throttling_level >= THROTTLE_LEVEL_START && throttling_level <= THROTTLE_LEVEL_END);
s = ml_set_interrupts_enabled(FALSE);
lck_spin_lock(&ut->uu_rethrottle_lock);
if (ut->uu_was_rethrottled == TRUE) {
lck_spin_unlock(&ut->uu_rethrottle_lock);
ml_set_interrupts_enabled(s);
lck_mtx_yield(&info->throttle_lock);
KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 103)), thread_tid(ut->uu_thread), ut->uu_on_throttlelist, 0, 0, 0);
ut->uu_was_rethrottled = FALSE;
continue;
}
KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, PROCESS_THROTTLED)) | DBG_FUNC_NONE,
info->throttle_last_IO_pid[throttling_level], throttling_level, proc_selfpid(), mylevel, 0);
if (sleep_cnt == 0) {
KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START,
throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0);
throttled_count[mylevel]++;
}
ut->uu_wmesg = "throttle_lowpri_io";
assert_wait((caddr_t)&ut->uu_on_throttlelist, THREAD_UNINT);
ut->uu_is_throttled = TRUE;
lck_spin_unlock(&ut->uu_rethrottle_lock);
ml_set_interrupts_enabled(s);
lck_mtx_unlock(&info->throttle_lock);
thread_block(THREAD_CONTINUE_NULL);
ut->uu_wmesg = NULL;
ut->uu_is_throttled = FALSE;
ut->uu_was_rethrottled = FALSE;
lck_mtx_lock(&info->throttle_lock);
sleep_cnt++;
if (sleep_amount == 0)
insert_tail = FALSE;
else if (info->throttle_io_period_num < throttle_io_period_num ||
(info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) {
insert_tail = FALSE;
sleep_amount = 0;
}
}
done:
if (ut->uu_on_throttlelist >= THROTTLE_LEVEL_THROTTLED) {
TAILQ_REMOVE(&info->throttle_uthlist[ut->uu_on_throttlelist], ut, uu_throttlelist);
ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE;
}
lck_mtx_unlock(&info->throttle_lock);
if (sleep_cnt) {
KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END,
throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0);
throttle_update_proc_stats(info->throttle_last_IO_pid[throttling_level], sleep_cnt);
}
ut->uu_throttle_info = NULL;
ut->uu_throttle_bc = FALSE;
ut->uu_lowpri_window = 0;
throttle_info_rel(info);
return (sleep_cnt);
}
void throttle_set_thread_io_policy(int policy)
{
proc_set_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_IOPOL, policy);
}
int throttle_get_thread_effective_io_policy()
{
return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
}
void throttle_info_reset_window(uthread_t ut)
{
struct _throttle_io_info_t *info;
if (ut == NULL)
ut = get_bsdthread_info(current_thread());
if ( (info = ut->uu_throttle_info) ) {
throttle_info_rel(info);
ut->uu_throttle_info = NULL;
ut->uu_lowpri_window = 0;
ut->uu_throttle_bc = FALSE;
}
}
static
void throttle_info_set_initial_window(uthread_t ut, struct _throttle_io_info_t *info, boolean_t BC_throttle, boolean_t isssd)
{
if (lowpri_throttle_enabled == 0 || info->throttle_disabled)
return;
if (info->throttle_io_periods == 0) {
throttle_init_throttle_period(info, isssd);
}
if (ut->uu_throttle_info == NULL) {
ut->uu_throttle_info = info;
throttle_info_ref(info);
DEBUG_ALLOC_THROTTLE_INFO("updating info = %p\n", info, info );
ut->uu_lowpri_window = 1;
ut->uu_throttle_bc = BC_throttle;
}
}
void throttle_info_end_io(buf_t bp) {
mount_t mp;
struct bufattr *bap;
struct _throttle_io_info_t *info;
int io_tier;
bap = &bp->b_attr;
if (!ISSET(bap->ba_flags, BA_STRATEGY_TRACKED_IO)) {
return;
}
CLR(bap->ba_flags, BA_STRATEGY_TRACKED_IO);
mp = buf_vnode(bp)->v_mount;
if (mp != NULL) {
info = &_throttle_io_info[mp->mnt_devbsdunit];
} else {
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
}
io_tier = GET_BUFATTR_IO_TIER(bap);
if (ISSET(bap->ba_flags, BA_IO_TIER_UPGRADE)) {
io_tier--;
}
throttle_info_end_io_internal(info, io_tier);
}
static
void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level) {
if (throttle_level == THROTTLE_LEVEL_NONE) {
return;
}
microuptime(&info->throttle_window_start_timestamp[throttle_level]);
OSDecrementAtomic(&info->throttle_inflight_count[throttle_level]);
assert(info->throttle_inflight_count[throttle_level] >= 0);
}
static
int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap)
{
int thread_throttle_level;
if (lowpri_throttle_enabled == 0 || info->throttle_disabled)
return THROTTLE_LEVEL_NONE;
if (ut == NULL)
ut = get_bsdthread_info(current_thread());
if (bap && inflight && !ut->uu_throttle_bc) {
thread_throttle_level = GET_BUFATTR_IO_TIER(bap);
if (ISSET(bap->ba_flags, BA_IO_TIER_UPGRADE)) {
thread_throttle_level--;
}
} else {
thread_throttle_level = throttle_get_thread_throttle_level(ut);
}
if (thread_throttle_level != THROTTLE_LEVEL_NONE) {
if(!ISSET(flags, B_PASSIVE)) {
info->throttle_last_IO_pid[thread_throttle_level] = proc_selfpid();
if (inflight && !ut->uu_throttle_bc) {
if (NULL != bap) {
SET(bap->ba_flags, BA_STRATEGY_TRACKED_IO);
}
OSIncrementAtomic(&info->throttle_inflight_count[thread_throttle_level]);
} else {
microuptime(&info->throttle_window_start_timestamp[thread_throttle_level]);
}
KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, OPEN_THROTTLE_WINDOW)) | DBG_FUNC_NONE,
current_proc()->p_pid, thread_throttle_level, 0, 0, 0);
}
microuptime(&info->throttle_last_IO_timestamp[thread_throttle_level]);
}
if (thread_throttle_level >= THROTTLE_LEVEL_THROTTLED) {
OSAddAtomic(1, &info->throttle_io_count);
throttle_info_set_initial_window(ut, info, FALSE, isssd);
}
return thread_throttle_level;
}
void *throttle_info_update_by_mount(mount_t mp)
{
struct _throttle_io_info_t *info;
uthread_t ut;
boolean_t isssd = FALSE;
ut = get_bsdthread_info(current_thread());
if (mp != NULL) {
if (disk_conditioner_mount_is_ssd(mp))
isssd = TRUE;
info = &_throttle_io_info[mp->mnt_devbsdunit];
} else
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
if (!ut->uu_lowpri_window)
throttle_info_set_initial_window(ut, info, FALSE, isssd);
return info;
}
void throttle_info_update(void *throttle_info, int flags)
{
if (throttle_info)
throttle_info_update_internal(throttle_info, NULL, flags, FALSE, FALSE, NULL);
}
void throttle_info_update_by_mask(void *throttle_info_handle, int flags)
{
void *throttle_info = throttle_info_handle;
throttle_info_update(throttle_info, flags);
}
void throttle_info_disable_throttle(int devno, boolean_t isfusion)
{
struct _throttle_io_info_t *info;
if (devno < 0 || devno >= LOWPRI_MAX_NUM_DEV)
panic("Illegal devno (%d) passed into throttle_info_disable_throttle()", devno);
info = &_throttle_io_info[devno];
if (isfusion) {
info->throttle_is_fusion_with_priority = isfusion;
throttle_init_throttle_period(info, FALSE);
}
info->throttle_disabled = !info->throttle_is_fusion_with_priority;
return;
}
int throttle_info_io_will_be_throttled(void * throttle_info, int policy)
{
struct _throttle_io_info_t *info = throttle_info;
struct timeval elapsed;
uint64_t elapsed_msecs;
int throttle_level;
int thread_throttle_level;
switch (policy) {
case IOPOL_THROTTLE:
thread_throttle_level = THROTTLE_LEVEL_TIER3;
break;
case IOPOL_UTILITY:
thread_throttle_level = THROTTLE_LEVEL_TIER2;
break;
case IOPOL_STANDARD:
thread_throttle_level = THROTTLE_LEVEL_TIER1;
break;
default:
thread_throttle_level = THROTTLE_LEVEL_TIER0;
break;
}
for (throttle_level = THROTTLE_LEVEL_START; throttle_level < thread_throttle_level; throttle_level++) {
if (info->throttle_inflight_count[throttle_level]) {
break;
}
microuptime(&elapsed);
timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]);
elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000);
if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level])
break;
}
if (throttle_level >= thread_throttle_level) {
return (THROTTLE_DISENGAGED);
}
return (THROTTLE_ENGAGED);
}
int throttle_lowpri_window(void)
{
struct uthread *ut = get_bsdthread_info(current_thread());
return ut->uu_lowpri_window;
}
#if CONFIG_IOSCHED
int upl_get_cached_tier(void *);
#endif
int
spec_strategy(struct vnop_strategy_args *ap)
{
buf_t bp;
int bflags;
int io_tier;
int passive;
dev_t bdev;
uthread_t ut;
mount_t mp;
struct bufattr *bap;
int strategy_ret;
struct _throttle_io_info_t *throttle_info;
boolean_t isssd = FALSE;
boolean_t inflight = FALSE;
boolean_t upgrade = FALSE;
int code = 0;
#if !CONFIG_EMBEDDED
proc_t curproc = current_proc();
#endif
bp = ap->a_bp;
bdev = buf_device(bp);
mp = buf_vnode(bp)->v_mount;
bap = &bp->b_attr;
#if CONFIG_IOSCHED
if (bp->b_flags & B_CLUSTER) {
io_tier = upl_get_cached_tier(bp->b_upl);
if (io_tier == -1)
io_tier = throttle_get_io_policy(&ut);
#if DEVELOPMENT || DEBUG
else {
int my_io_tier = throttle_get_io_policy(&ut);
if (io_tier != my_io_tier)
KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, IO_TIER_UPL_MISMATCH)) | DBG_FUNC_NONE, buf_kernel_addrperm_addr(bp), my_io_tier, io_tier, 0, 0);
}
#endif
} else
io_tier = throttle_get_io_policy(&ut);
#else
io_tier = throttle_get_io_policy(&ut);
#endif
passive = throttle_get_passive_io_policy(&ut);
if (mp && io_tier > throttle_get_thread_throttle_level_internal(ut, io_tier)) {
#if CONFIG_IOSCHED
if (!(mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)) {
upgrade = TRUE;
}
#else
upgrade = TRUE;
#endif
}
if (bp->b_flags & B_META)
bap->ba_flags |= BA_META;
#if CONFIG_IOSCHED
if (bap->ba_flags & BA_META) {
if (mp && (mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)) {
if (bp->b_flags & B_READ) {
if (io_tier > IOSCHED_METADATA_TIER) {
io_tier = IOSCHED_METADATA_TIER;
passive = 1;
}
} else {
io_tier = IOSCHED_METADATA_TIER;
passive = 1;
}
}
}
#endif
SET_BUFATTR_IO_TIER(bap, io_tier);
if (passive) {
bp->b_flags |= B_PASSIVE;
bap->ba_flags |= BA_PASSIVE;
}
#if !CONFIG_EMBEDDED
if ((curproc != NULL) && ((curproc->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP))
bap->ba_flags |= BA_DELAYIDLESLEEP;
#endif
bflags = bp->b_flags;
if (((bflags & B_READ) == 0) && ((bflags & B_ASYNC) == 0))
bufattr_markquickcomplete(bap);
if (bflags & B_READ)
code |= DKIO_READ;
if (bflags & B_ASYNC)
code |= DKIO_ASYNC;
if (bap->ba_flags & BA_META)
code |= DKIO_META;
else if (bflags & B_PAGEIO)
code |= DKIO_PAGING;
if (io_tier != 0)
code |= DKIO_THROTTLE;
code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK);
if (bflags & B_PASSIVE)
code |= DKIO_PASSIVE;
if (bap->ba_flags & BA_NOCACHE)
code |= DKIO_NOCACHE;
if (upgrade) {
code |= DKIO_TIER_UPGRADE;
SET(bap->ba_flags, BA_IO_TIER_UPGRADE);
}
if (kdebug_enable) {
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
buf_kernel_addrperm_addr(bp), bdev, (int)buf_blkno(bp), buf_count(bp), 0);
}
thread_update_io_stats(current_thread(), buf_count(bp), code);
if (mp != NULL) {
if (disk_conditioner_mount_is_ssd(mp))
isssd = TRUE;
if (mp->mnt_devbsdunit || (mp->mnt_throttle_mask != LOWPRI_MAX_NUM_DEV - 1 && mp->mnt_throttle_mask & 0x1)) {
inflight = TRUE;
}
throttle_info = &_throttle_io_info[mp->mnt_devbsdunit];
} else
throttle_info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
throttle_info_update_internal(throttle_info, ut, bflags, isssd, inflight, bap);
if ((bflags & B_READ) == 0) {
microuptime(&throttle_info->throttle_last_write_timestamp);
if (mp) {
mp->mnt_last_write_issued_timestamp = throttle_info->throttle_last_write_timestamp;
INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_write_size);
}
} else if (mp) {
INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_read_size);
}
#define IO_SATISFIED_BY_CACHE ((int)0xcafefeed)
#define IO_SHOULD_BE_THROTTLED ((int)0xcafebeef)
typedef int strategy_fcn_ret_t(struct buf *bp);
strategy_ret = (*(strategy_fcn_ret_t*)bdevsw[major(bdev)].d_strategy)(bp);
microuptime(&bp->b_timestamp_tv);
if (IO_SATISFIED_BY_CACHE == strategy_ret) {
throttle_info_reset_window(ut);
} else if (IO_SHOULD_BE_THROTTLED == strategy_ret) {
throttle_info_set_initial_window(ut, throttle_info, TRUE, isssd);
}
return (0);
}
int
spec_blockmap(__unused struct vnop_blockmap_args *ap)
{
return (ENOTSUP);
}
int
spec_close(struct vnop_close_args *ap)
{
struct vnode *vp = ap->a_vp;
dev_t dev = vp->v_rdev;
int error = 0;
int flags = ap->a_fflag;
struct proc *p = vfs_context_proc(ap->a_context);
struct session *sessp;
switch (vp->v_type) {
case VCHR:
sessp = proc_session(p);
devsw_lock(dev, S_IFCHR);
if (sessp != SESSION_NULL) {
if (vp == sessp->s_ttyvp && vcount(vp) == 1) {
struct tty *tp = TTY_NULL;
devsw_unlock(dev, S_IFCHR);
session_lock(sessp);
if (vp == sessp->s_ttyvp) {
tp = SESSION_TP(sessp);
sessp->s_ttyvp = NULL;
sessp->s_ttyvid = 0;
sessp->s_ttyp = TTY_NULL;
sessp->s_ttypgrpid = NO_PID;
}
session_unlock(sessp);
if (tp != TTY_NULL) {
tty_lock(tp);
ttyclrpgrphup(tp);
tty_unlock(tp);
ttyfree(tp);
}
devsw_lock(dev, S_IFCHR);
}
session_rele(sessp);
}
if (--vp->v_specinfo->si_opencount < 0)
panic("negative open count (c, %u, %u)", major(dev), minor(dev));
if (vcount(vp) == 0 || (flags & IO_REVOKE) != 0)
error = cdevsw[major(dev)].d_close(dev, flags, S_IFCHR, p);
devsw_unlock(dev, S_IFCHR);
break;
case VBLK:
devsw_lock(dev, S_IFBLK);
if (vcount(vp) > 1) {
vp->v_specinfo->si_opencount--;
devsw_unlock(dev, S_IFBLK);
return (0);
}
devsw_unlock(dev, S_IFBLK);
if ((error = spec_fsync_internal(vp, MNT_WAIT, ap->a_context)))
return (error);
error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
if (error)
return (error);
devsw_lock(dev, S_IFBLK);
if (--vp->v_specinfo->si_opencount < 0)
panic("negative open count (b, %u, %u)", major(dev), minor(dev));
if (vcount(vp) == 0)
error = bdevsw[major(dev)].d_close(dev, flags, S_IFBLK, p);
devsw_unlock(dev, S_IFBLK);
break;
default:
panic("spec_close: not special");
return(EBADF);
}
return error;
}
int
spec_pathconf(struct vnop_pathconf_args *ap)
{
switch (ap->a_name) {
case _PC_LINK_MAX:
*ap->a_retval = LINK_MAX;
return (0);
case _PC_MAX_CANON:
*ap->a_retval = MAX_CANON;
return (0);
case _PC_MAX_INPUT:
*ap->a_retval = MAX_INPUT;
return (0);
case _PC_PIPE_BUF:
*ap->a_retval = PIPE_BUF;
return (0);
case _PC_CHOWN_RESTRICTED:
*ap->a_retval = 200112;
return (0);
case _PC_VDISABLE:
*ap->a_retval = _POSIX_VDISABLE;
return (0);
default:
return (EINVAL);
}
}
int
spec_ebadf(__unused void *dummy)
{
return (EBADF);
}
int
spec_blktooff(struct vnop_blktooff_args *ap)
{
struct vnode *vp = ap->a_vp;
switch (vp->v_type) {
case VCHR:
*ap->a_offset = (off_t)-1;
return (ENOTSUP);
case VBLK:
printf("spec_blktooff: not implemented for VBLK\n");
*ap->a_offset = (off_t)-1;
return (ENOTSUP);
default:
panic("spec_blktooff type");
}
return (0);
}
int
spec_offtoblk(struct vnop_offtoblk_args *ap)
{
struct vnode *vp = ap->a_vp;
switch (vp->v_type) {
case VCHR:
*ap->a_lblkno = (daddr64_t)-1;
return (ENOTSUP);
case VBLK:
printf("spec_offtoblk: not implemented for VBLK\n");
*ap->a_lblkno = (daddr64_t)-1;
return (ENOTSUP);
default:
panic("spec_offtoblk type");
}
return (0);
}
static void filt_specdetach(struct knote *kn);
static int filt_specevent(struct knote *kn, long hint);
static int filt_spectouch(struct knote *kn, struct kevent_internal_s *kev);
static int filt_specprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
static unsigned filt_specpeek(struct knote *kn);
SECURITY_READ_ONLY_EARLY(struct filterops) spec_filtops = {
.f_isfd = 1,
.f_attach = filt_specattach,
.f_detach = filt_specdetach,
.f_event = filt_specevent,
.f_touch = filt_spectouch,
.f_process = filt_specprocess,
.f_peek = filt_specpeek
};
#define selinfo_from_waitq(wq) \
qe_element((wq), struct selinfo, si_waitq)
static int
spec_knote_select_and_link(struct knote *kn)
{
uthread_t uth;
vfs_context_t ctx;
vnode_t vp;
struct waitq_set *old_wqs;
uint64_t rsvd, rsvd_arg;
uint64_t *rlptr = NULL;
struct selinfo *si = NULL;
int selres = 0;
uth = get_bsdthread_info(current_thread());
ctx = vfs_context_current();
vp = (vnode_t)kn->kn_fp->f_fglob->fg_data;
int error = vnode_getwithvid(vp, kn->kn_hookid);
if (error != 0) {
knote_set_error(kn, ENOENT);
return 0;
}
rsvd = rsvd_arg = waitq_link_reserve(NULL);
rlptr = (void *)&rsvd_arg;
old_wqs = uth->uu_wqset;
uth->uu_wqset = &(knote_get_kq(kn)->kq_wqs);
selres = VNOP_SELECT(vp, knote_get_seltype(kn), 0, rlptr, ctx);
uth->uu_wqset = old_wqs;
waitq_link_release(rsvd);
if (rsvd != rsvd_arg) {
struct waitq *wq;
memcpy(&wq, rlptr, sizeof(void *));
si = selinfo_from_waitq(wq);
kn->kn_hook_data = waitq_get_prepost_id(wq);
} else if (selres == 0) {
knote_set_error(kn, ENODEV);
}
vnode_put(vp);
return selres;
}
static void filt_spec_common(struct knote *kn, int selres)
{
if (kn->kn_vnode_use_ofst) {
if (kn->kn_fp->f_fglob->fg_offset >= (uint32_t)selres) {
kn->kn_data = 0;
} else {
kn->kn_data = ((uint32_t)selres) - kn->kn_fp->f_fglob->fg_offset;
}
} else {
kn->kn_data = selres;
}
}
static int
filt_specattach(struct knote *kn, __unused struct kevent_internal_s *kev)
{
vnode_t vp;
dev_t dev;
vp = (vnode_t)kn->kn_fp->f_fglob->fg_data;
assert(vnode_ischr(vp));
dev = vnode_specrdev(vp);
if (!kn->kn_vnode_kqok &&
((kn->kn_sfflags & NOTE_LOWAT) == 0 || kn->kn_sdata != 1)) {
knote_set_error(kn, EINVAL);
return 0;
}
if (cdevsw_flags[major(dev)] & CDEVSW_IS_PTC) {
kn->kn_vnode_kqok = 0;
}
kn->kn_filtid = EVFILTID_SPEC;
kn->kn_hook_data = 0;
kn->kn_hookid = vnode_vid(vp);
knote_markstayactive(kn);
return spec_knote_select_and_link(kn);
}
static void
filt_specdetach(struct knote *kn)
{
knote_clearstayactive(kn);
if (kn->kn_hook_data) {
waitq_unlink_by_prepost_id(kn->kn_hook_data, &(knote_get_kq(kn)->kq_wqs));
kn->kn_hook_data = 0;
}
}
static int
filt_specevent(struct knote *kn, __unused long hint)
{
panic("filt_specevent(%p)", kn);
return 0;
}
static int
filt_spectouch(struct knote *kn, struct kevent_internal_s *kev)
{
kn->kn_sdata = kev->data;
kn->kn_sfflags = kev->fflags;
if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
kn->kn_udata = kev->udata;
if (kev->flags & EV_ENABLE) {
return spec_knote_select_and_link(kn);
}
return 0;
}
static int
filt_specprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
{
#pragma unused(data)
vnode_t vp;
uthread_t uth;
vfs_context_t ctx;
int res;
int selres;
int error;
uth = get_bsdthread_info(current_thread());
ctx = vfs_context_current();
vp = (vnode_t)kn->kn_fp->f_fglob->fg_data;
error = vnode_getwithvid(vp, kn->kn_hookid);
if (error != 0) {
kn->kn_flags |= (EV_EOF | EV_ONESHOT);
*kev = kn->kn_kevent;
return 1;
}
selres = spec_knote_select_and_link(kn);
filt_spec_common(kn, selres);
vnode_put(vp);
res = ((kn->kn_sfflags & NOTE_LOWAT) != 0) ?
(kn->kn_data >= kn->kn_sdata) : kn->kn_data;
if (res) {
*kev = kn->kn_kevent;
if (kn->kn_flags & EV_CLEAR) {
kn->kn_fflags = 0;
kn->kn_data = 0;
}
}
return res;
}
static unsigned
filt_specpeek(struct knote *kn)
{
int selres = 0;
selres = spec_knote_select_and_link(kn);
filt_spec_common(kn, selres);
return kn->kn_data;
}