#include <sys/param.h>
#include <sys/proc.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/trace.h>
#include <sys/malloc.h>
#include <sys/time.h>
#include <sys/kernel.h>
#include <sys/resourcevar.h>
#include <libkern/libkern.h>
#include <machine/machine_routines.h>
#include <sys/ubc.h>
#include <vm/vm_pageout.h>
#include <mach/mach_types.h>
#include <mach/memory_object_types.h>
#include <sys/kdebug.h>
#define CL_READ 0x01
#define CL_ASYNC 0x02
#define CL_COMMIT 0x04
#define CL_PAGEOUT 0x10
#define CL_AGE 0x20
#define CL_DUMP 0x40
#define CL_NOZERO 0x80
#define CL_PAGEIN 0x100
#define CL_DEV_MEMORY 0x200
#define CL_PRESERVE 0x400
#define CL_THROTTLE 0x800
struct clios {
u_int io_completed;
u_int io_issued;
int io_error;
int io_wanted;
};
static void cluster_zero(upl_t upl, vm_offset_t upl_offset,
int size, struct buf *bp);
static int cluster_read_x(struct vnode *vp, struct uio *uio,
off_t filesize, int devblocksize, int flags);
static int cluster_write_x(struct vnode *vp, struct uio *uio,
off_t oldEOF, off_t newEOF, off_t headOff,
off_t tailOff, int devblocksize, int flags);
static int cluster_nocopy_read(struct vnode *vp, struct uio *uio,
off_t filesize, int devblocksize, int flags);
static int cluster_nocopy_write(struct vnode *vp, struct uio *uio,
off_t newEOF, int devblocksize, int flags);
static int cluster_phys_read(struct vnode *vp, struct uio *uio,
off_t filesize, int devblocksize, int flags);
static int cluster_phys_write(struct vnode *vp, struct uio *uio,
off_t newEOF, int devblocksize, int flags);
static int cluster_align_phys_io(struct vnode *vp, struct uio *uio,
addr64_t usr_paddr, int xsize, int devblocksize, int flags);
static int cluster_push_x(struct vnode *vp, off_t EOF, unsigned int first, unsigned int last, int can_delay);
static int cluster_try_push(struct vnode *vp, off_t EOF, int can_delay, int push_all);
static int sparse_cluster_switch(struct vnode *vp, off_t EOF);
static int sparse_cluster_push(struct vnode *vp, off_t EOF, int push_all);
static int sparse_cluster_add(struct vnode *vp, off_t EOF, daddr_t first, daddr_t last);
static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, int *setcountp);
static kern_return_t vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length);
static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp);
static kern_return_t vfs_drt_control(void **cmapp, int op_type);
int ubc_page_op_with_control __P((memory_object_control_t, off_t, int, ppnum_t *, int *));
#define ASYNC_THROTTLE 18
#define HARD_THROTTLE_MAXCNT 1
#define HARD_THROTTLE_MAXSIZE (64 * 1024)
int hard_throttle_on_root = 0;
struct timeval priority_IO_timestamp_for_root;
static int
cluster_hard_throttle_on(vp)
struct vnode *vp;
{
static struct timeval hard_throttle_maxelapsed = { 0, 300000 };
if (vp->v_mount->mnt_kern_flag & MNTK_ROOTDEV) {
struct timeval elapsed;
if (hard_throttle_on_root)
return(1);
elapsed = time;
timevalsub(&elapsed, &priority_IO_timestamp_for_root);
if (timevalcmp(&elapsed, &hard_throttle_maxelapsed, <))
return(1);
}
return(0);
}
static int
cluster_iodone(bp)
struct buf *bp;
{
int b_flags;
int error;
int total_size;
int total_resid;
int upl_offset;
int zero_offset;
upl_t upl;
struct buf *cbp;
struct buf *cbp_head;
struct buf *cbp_next;
struct buf *real_bp;
struct vnode *vp;
struct clios *iostate;
int commit_size;
int pg_offset;
cbp_head = (struct buf *)(bp->b_trans_head);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START,
(int)cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) {
if ( !(cbp->b_flags & B_DONE)) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
(int)cbp_head, (int)cbp, cbp->b_bcount, cbp->b_flags, 0);
return 0;
}
}
error = 0;
total_size = 0;
total_resid = 0;
cbp = cbp_head;
upl_offset = cbp->b_uploffset;
upl = cbp->b_pagelist;
b_flags = cbp->b_flags;
real_bp = cbp->b_real_bp;
vp = cbp->b_vp;
zero_offset= cbp->b_validend;
iostate = (struct clios *)cbp->b_iostate;
while (cbp) {
if ((cbp->b_flags & B_ERROR) && error == 0)
error = cbp->b_error;
total_resid += cbp->b_resid;
total_size += cbp->b_bcount;
cbp_next = cbp->b_trans_next;
free_io_buf(cbp);
cbp = cbp_next;
}
if (zero_offset)
cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
if ((vp->v_flag & VTHROTTLED) && (vp->v_numoutput <= (ASYNC_THROTTLE / 3))) {
vp->v_flag &= ~VTHROTTLED;
wakeup((caddr_t)&vp->v_numoutput);
}
if (iostate) {
if (error && iostate->io_error == 0)
iostate->io_error = error;
iostate->io_completed += total_size;
if (iostate->io_wanted) {
iostate->io_wanted = 0;
wakeup((caddr_t)&iostate->io_wanted);
}
}
if ((b_flags & B_NEED_IODONE) && real_bp) {
if (error) {
real_bp->b_flags |= B_ERROR;
real_bp->b_error = error;
}
real_bp->b_resid = total_resid;
biodone(real_bp);
}
if (error == 0 && total_resid)
error = EIO;
if (b_flags & B_COMMIT_UPL) {
pg_offset = upl_offset & PAGE_MASK;
commit_size = (pg_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
if (error || (b_flags & B_NOCACHE)) {
int upl_abort_code;
if ((b_flags & B_PAGEOUT) && (error != ENXIO))
upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
else if (b_flags & B_PGIN)
upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
else
upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
ubc_upl_abort_range(upl, upl_offset - pg_offset, commit_size,
upl_abort_code);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
(int)upl, upl_offset - pg_offset, commit_size,
0x80000000|upl_abort_code, 0);
} else {
int upl_commit_flags = UPL_COMMIT_FREE_ON_EMPTY;
if (b_flags & B_PHYS) {
if (b_flags & B_READ)
upl_commit_flags |= UPL_COMMIT_SET_DIRTY;
} else if ( !(b_flags & B_PAGEOUT))
upl_commit_flags |= UPL_COMMIT_CLEAR_DIRTY;
if (b_flags & B_AGE)
upl_commit_flags |= UPL_COMMIT_INACTIVATE;
ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size,
upl_commit_flags);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
(int)upl, upl_offset - pg_offset, commit_size,
upl_commit_flags, 0);
}
} else
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END,
(int)upl, upl_offset, 0, error, 0);
return (error);
}
static void
cluster_zero(upl, upl_offset, size, bp)
upl_t upl;
vm_offset_t upl_offset;
int size;
struct buf *bp;
{
upl_page_info_t *pl;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START,
upl_offset, size, (int)bp, 0, 0);
if (bp == NULL || bp->b_data == NULL) {
pl = ubc_upl_pageinfo(upl);
while (size) {
int page_offset;
int page_index;
addr64_t zero_addr;
int zero_cnt;
page_index = upl_offset / PAGE_SIZE;
page_offset = upl_offset & PAGE_MASK;
zero_addr = ((addr64_t)upl_phys_page(pl, page_index) << 12) + page_offset;
zero_cnt = min(PAGE_SIZE - page_offset, size);
bzero_phys(zero_addr, zero_cnt);
size -= zero_cnt;
upl_offset += zero_cnt;
}
} else
bzero((caddr_t)((vm_offset_t)bp->b_data + upl_offset), size);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END,
upl_offset, size, 0, 0, 0);
}
static int
cluster_io(vp, upl, upl_offset, f_offset, non_rounded_size, devblocksize, flags, real_bp, iostate)
struct vnode *vp;
upl_t upl;
vm_offset_t upl_offset;
off_t f_offset;
int non_rounded_size;
int devblocksize;
int flags;
struct buf *real_bp;
struct clios *iostate;
{
struct buf *cbp;
u_int size;
u_int io_size;
int io_flags;
int error = 0;
int retval = 0;
struct buf *cbp_head = 0;
struct buf *cbp_tail = 0;
int buf_count = 0;
int pg_count;
int pg_offset;
u_int max_iosize;
u_int max_vectors;
int priv;
int zero_offset = 0;
int async_throttle;
if (devblocksize)
size = (non_rounded_size + (devblocksize - 1)) & ~(devblocksize - 1);
else
size = non_rounded_size;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START,
(int)f_offset, size, upl_offset, flags, 0);
if (flags & CL_READ) {
io_flags = (B_VECTORLIST | B_READ);
vfs_io_attributes(vp, B_READ, &max_iosize, &max_vectors);
} else {
io_flags = (B_VECTORLIST | B_WRITEINPROG);
vfs_io_attributes(vp, B_WRITE, &max_iosize, &max_vectors);
}
max_iosize &= ~PAGE_MASK;
if (flags & CL_THROTTLE) {
if ( !(flags & CL_PAGEOUT) && cluster_hard_throttle_on(vp)) {
if (max_iosize > HARD_THROTTLE_MAXSIZE)
max_iosize = HARD_THROTTLE_MAXSIZE;
async_throttle = HARD_THROTTLE_MAXCNT;
} else
async_throttle = ASYNC_THROTTLE;
}
if (flags & CL_AGE)
io_flags |= B_AGE;
if (flags & CL_DUMP)
io_flags |= B_NOCACHE;
if (flags & CL_PAGEIN)
io_flags |= B_PGIN;
if (flags & CL_PAGEOUT)
io_flags |= B_PAGEOUT;
if (flags & CL_COMMIT)
io_flags |= B_COMMIT_UPL;
if (flags & CL_PRESERVE)
io_flags |= B_PHYS;
if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
zero_offset = upl_offset + non_rounded_size;
}
while (size) {
int vsize;
int i;
int pg_resid;
int num_contig;
daddr_t lblkno;
daddr_t blkno;
if (size > max_iosize)
io_size = max_iosize;
else
io_size = size;
if (error = VOP_CMAP(vp, f_offset, io_size, &blkno, (size_t *)&io_size, NULL)) {
if (error == EOPNOTSUPP)
panic("VOP_CMAP Unimplemented");
break;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE,
(int)f_offset, (int)blkno, io_size, zero_offset, 0);
if ( (!(flags & CL_READ) && (long)blkno == -1) || io_size == 0) {
if (flags & CL_PAGEOUT) {
error = EINVAL;
break;
};
ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
if (ubc_pushdirty_range(vp, f_offset, PAGE_SIZE_64) == 0) {
error = EINVAL;
break;
};
f_offset += PAGE_SIZE_64;
upl_offset += PAGE_SIZE;
size -= PAGE_SIZE;
continue;
}
lblkno = (daddr_t)(f_offset / PAGE_SIZE_64);
pg_offset = upl_offset & PAGE_MASK;
if (flags & CL_DEV_MEMORY) {
if ((long)blkno == -1) {
error = EINVAL;
break;
}
pg_count = 1;
} else
pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
if ((flags & CL_READ) && (long)blkno == -1) {
int bytes_to_zero;
if (zero_offset && io_size == size) {
bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
zero_offset = 0;
} else
bytes_to_zero = io_size;
cluster_zero(upl, upl_offset, bytes_to_zero, real_bp);
if (cbp_head)
pg_count = (io_size - pg_offset) / PAGE_SIZE;
else {
pg_count = (io_size + pg_offset) / PAGE_SIZE;
pg_offset = 0;
}
if (io_size == size && ((upl_offset + io_size) & PAGE_MASK))
pg_count++;
if (pg_count) {
if (pg_offset)
pg_resid = PAGE_SIZE - pg_offset;
else
pg_resid = 0;
if (flags & CL_COMMIT)
ubc_upl_commit_range(upl,
(upl_offset + pg_resid) & ~PAGE_MASK,
pg_count * PAGE_SIZE,
UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY);
}
upl_offset += io_size;
f_offset += io_size;
size -= io_size;
if (cbp_head && pg_count)
goto start_io;
continue;
} else if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) {
real_bp->b_blkno = blkno;
}
if (pg_count > max_vectors) {
io_size -= (pg_count - max_vectors) * PAGE_SIZE;
if (io_size < 0) {
io_size = PAGE_SIZE - pg_offset;
pg_count = 1;
} else
pg_count = max_vectors;
}
if ( !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV))
priv = 1;
else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT))
priv = 0;
else
priv = 1;
cbp = alloc_io_buf(vp, priv);
if (flags & CL_PAGEOUT) {
for (i = 0; i < pg_count; i++) {
int s;
struct buf *bp;
s = splbio();
if (bp = incore(vp, lblkno + i)) {
if (!ISSET(bp->b_flags, B_BUSY)) {
bremfree(bp);
SET(bp->b_flags, (B_BUSY | B_INVAL));
splx(s);
brelse(bp);
} else
panic("BUSY bp found in cluster_io");
}
splx(s);
}
}
if (flags & CL_ASYNC) {
cbp->b_flags |= (B_CALL | B_ASYNC);
cbp->b_iodone = (void *)cluster_iodone;
}
cbp->b_flags |= io_flags;
cbp->b_lblkno = lblkno;
cbp->b_blkno = blkno;
cbp->b_bcount = io_size;
cbp->b_pagelist = upl;
cbp->b_uploffset = upl_offset;
cbp->b_trans_next = (struct buf *)0;
if (cbp->b_iostate = (void *)iostate)
iostate->io_issued += io_size;
if (flags & CL_READ)
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE,
cbp->b_lblkno, cbp->b_blkno, upl_offset, io_size, 0);
else
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE,
cbp->b_lblkno, cbp->b_blkno, upl_offset, io_size, 0);
if (cbp_head) {
cbp_tail->b_trans_next = cbp;
cbp_tail = cbp;
} else {
cbp_head = cbp;
cbp_tail = cbp;
}
(struct buf *)(cbp->b_trans_head) = cbp_head;
buf_count++;
upl_offset += io_size;
f_offset += io_size;
size -= io_size;
if ( (!(upl_offset & PAGE_MASK) && !(flags & CL_DEV_MEMORY) && ((flags & CL_ASYNC) || buf_count > 8)) || size == 0) {
start_io:
if (real_bp) {
cbp_head->b_flags |= B_NEED_IODONE;
cbp_head->b_real_bp = real_bp;
} else
cbp_head->b_real_bp = (struct buf *)NULL;
if (size == 0) {
cbp_head->b_validend = zero_offset;
} else
cbp_head->b_validend = 0;
if (flags & CL_THROTTLE) {
while (vp->v_numoutput >= async_throttle) {
vp->v_flag |= VTHROTTLED;
tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "cluster_io", 0);
}
}
for (cbp = cbp_head; cbp;) {
struct buf * cbp_next;
if (io_flags & B_WRITEINPROG)
cbp->b_vp->v_numoutput++;
cbp_next = cbp->b_trans_next;
(void) VOP_STRATEGY(cbp);
cbp = cbp_next;
}
if ( !(flags & CL_ASYNC)) {
for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next)
biowait(cbp);
if (error = cluster_iodone(cbp_head)) {
if ((flags & CL_PAGEOUT) && (error == ENXIO))
retval = 0;
else
retval = error;
error = 0;
}
}
cbp_head = (struct buf *)0;
cbp_tail = (struct buf *)0;
buf_count = 0;
}
}
if (error) {
int abort_size;
io_size = 0;
for (cbp = cbp_head; cbp;) {
struct buf * cbp_next;
upl_offset -= cbp->b_bcount;
size += cbp->b_bcount;
io_size += cbp->b_bcount;
cbp_next = cbp->b_trans_next;
free_io_buf(cbp);
cbp = cbp_next;
}
if (iostate) {
if (iostate->io_error == 0)
iostate->io_error = error;
iostate->io_issued -= io_size;
if (iostate->io_wanted) {
iostate->io_wanted = 0;
wakeup((caddr_t)&iostate->io_wanted);
}
}
pg_offset = upl_offset & PAGE_MASK;
abort_size = (size + pg_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK;
if (flags & CL_COMMIT) {
int upl_abort_code;
if (flags & CL_PRESERVE) {
ubc_upl_commit_range(upl, upl_offset - pg_offset, abort_size,
UPL_COMMIT_FREE_ON_EMPTY);
} else {
if ((flags & CL_PAGEOUT) && (error != ENXIO))
upl_abort_code = UPL_ABORT_FREE_ON_EMPTY;
else if (flags & CL_PAGEIN)
upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR;
else
upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES;
ubc_upl_abort_range(upl, upl_offset - pg_offset, abort_size,
upl_abort_code);
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE,
(int)upl, upl_offset - pg_offset, abort_size, error, 0);
}
if (real_bp) {
real_bp->b_flags |= B_ERROR;
real_bp->b_error = error;
biodone(real_bp);
}
if (retval == 0)
retval = error;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END,
(int)f_offset, size, upl_offset, retval, 0);
return (retval);
}
static int
cluster_rd_prefetch(vp, f_offset, size, filesize, devblocksize)
struct vnode *vp;
off_t f_offset;
u_int size;
off_t filesize;
int devblocksize;
{
int pages_in_prefetch;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START,
(int)f_offset, size, (int)filesize, 0, 0);
if (f_offset >= filesize) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
(int)f_offset, 0, 0, 0, 0);
return(0);
}
if (size > (MAX_UPL_TRANSFER * PAGE_SIZE))
size = (MAX_UPL_TRANSFER * PAGE_SIZE);
else
size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
if ((off_t)size > (filesize - f_offset))
size = filesize - f_offset;
pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
advisory_read(vp, filesize, f_offset, size, devblocksize);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END,
(int)f_offset + size, pages_in_prefetch, 0, 1, 0);
return (pages_in_prefetch);
}
static void
cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize)
struct vnode *vp;
daddr_t b_lblkno;
daddr_t e_lblkno;
off_t filesize;
int devblocksize;
{
daddr_t r_lblkno;
off_t f_offset;
int size_of_prefetch;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
b_lblkno, e_lblkno, vp->v_lastr, 0, 0);
if (b_lblkno == vp->v_lastr && b_lblkno == e_lblkno) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
vp->v_ralen, vp->v_maxra, vp->v_lastr, 0, 0);
return;
}
if (vp->v_lastr == -1 || (b_lblkno != vp->v_lastr && b_lblkno != (vp->v_lastr + 1) &&
(b_lblkno != (vp->v_maxra + 1) || vp->v_ralen == 0))) {
vp->v_ralen = 0;
vp->v_maxra = 0;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
vp->v_ralen, vp->v_maxra, vp->v_lastr, 1, 0);
return;
}
if (e_lblkno < vp->v_maxra) {
if ((vp->v_maxra - e_lblkno) > (MAX_UPL_TRANSFER / 4)) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
vp->v_ralen, vp->v_maxra, vp->v_lastr, 2, 0);
return;
}
}
r_lblkno = max(e_lblkno, vp->v_maxra) + 1;
f_offset = (off_t)r_lblkno * PAGE_SIZE_64;
size_of_prefetch = 0;
ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
if (size_of_prefetch) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
vp->v_ralen, vp->v_maxra, vp->v_lastr, 3, 0);
return;
}
if (f_offset < filesize) {
vp->v_ralen = vp->v_ralen ? min(MAX_UPL_TRANSFER, vp->v_ralen << 1) : 1;
if (((e_lblkno + 1) - b_lblkno) > vp->v_ralen)
vp->v_ralen = min(MAX_UPL_TRANSFER, (e_lblkno + 1) - b_lblkno);
size_of_prefetch = cluster_rd_prefetch(vp, f_offset, vp->v_ralen * PAGE_SIZE, filesize, devblocksize);
if (size_of_prefetch)
vp->v_maxra = (r_lblkno + size_of_prefetch) - 1;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
vp->v_ralen, vp->v_maxra, vp->v_lastr, 4, 0);
}
int
cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flags)
struct vnode *vp;
upl_t upl;
vm_offset_t upl_offset;
off_t f_offset;
int size;
off_t filesize;
int devblocksize;
int flags;
{
int io_size;
int rounded_size;
off_t max_size;
int local_flags;
if (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)
local_flags = CL_PAGEOUT;
else
local_flags = CL_PAGEOUT | CL_THROTTLE;
if ((flags & UPL_IOSYNC) == 0)
local_flags |= CL_ASYNC;
if ((flags & UPL_NOCOMMIT) == 0)
local_flags |= CL_COMMIT;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE,
(int)f_offset, size, (int)filesize, local_flags, 0);
if (size <= 0)
return (EINVAL);
if (vp->v_mount->mnt_flag & MNT_RDONLY) {
if (local_flags & CL_COMMIT)
ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
return (EROFS);
}
if (f_offset < 0 || f_offset >= filesize ||
(f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) {
if (local_flags & CL_COMMIT)
ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
return (EINVAL);
}
max_size = filesize - f_offset;
if (size < max_size)
io_size = size;
else
io_size = max_size;
rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
if (size > rounded_size) {
if (local_flags & CL_COMMIT)
ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
UPL_ABORT_FREE_ON_EMPTY);
}
vp->v_flag |= VHASBEENPAGED;
return (cluster_io(vp, upl, upl_offset, f_offset, io_size, devblocksize,
local_flags, (struct buf *)0, (struct clios *)0));
}
int
cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flags)
struct vnode *vp;
upl_t upl;
vm_offset_t upl_offset;
off_t f_offset;
int size;
off_t filesize;
int devblocksize;
int flags;
{
u_int io_size;
int rounded_size;
off_t max_size;
int retval;
int local_flags = 0;
if (upl == NULL || size < 0)
panic("cluster_pagein: NULL upl passed in");
if ((flags & UPL_IOSYNC) == 0)
local_flags |= CL_ASYNC;
if ((flags & UPL_NOCOMMIT) == 0)
local_flags |= CL_COMMIT;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE,
(int)f_offset, size, (int)filesize, local_flags, 0);
if (f_offset < 0 || f_offset >= filesize ||
(f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) {
if (local_flags & CL_COMMIT)
ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
return (EINVAL);
}
max_size = filesize - f_offset;
if (size < max_size)
io_size = size;
else
io_size = max_size;
rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
if (size > rounded_size && (local_flags & CL_COMMIT))
ubc_upl_abort_range(upl, upl_offset + rounded_size,
size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
retval = cluster_io(vp, upl, upl_offset, f_offset, io_size, devblocksize,
local_flags | CL_READ | CL_PAGEIN, (struct buf *)0, (struct clios *)0);
if (retval == 0) {
int b_lblkno;
int e_lblkno;
b_lblkno = (int)(f_offset / PAGE_SIZE_64);
e_lblkno = (int)
((f_offset + ((off_t)io_size - 1)) / PAGE_SIZE_64);
if (!(flags & UPL_NORDAHEAD) && !(vp->v_flag & VRAOFF) && rounded_size == PAGE_SIZE) {
cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize);
}
vp->v_lastr = e_lblkno;
}
return (retval);
}
int
cluster_bp(bp)
struct buf *bp;
{
off_t f_offset;
int flags;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START,
(int)bp, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0);
if (bp->b_pagelist == (upl_t) 0)
panic("cluster_bp: can't handle NULL upl yet\n");
if (bp->b_flags & B_READ)
flags = CL_ASYNC | CL_READ;
else
flags = CL_ASYNC;
f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno);
return (cluster_io(bp->b_vp, bp->b_pagelist, 0, f_offset, bp->b_bcount, 0, flags, bp, (struct clios *)0));
}
int
cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
struct vnode *vp;
struct uio *uio;
off_t oldEOF;
off_t newEOF;
off_t headOff;
off_t tailOff;
int devblocksize;
int flags;
{
int prev_resid;
int clip_size;
off_t max_io_size;
struct iovec *iov;
int upl_size;
int upl_flags;
upl_t upl;
int retval = 0;
if (vp->v_flag & VHASBEENPAGED)
{
cluster_push(vp);
vp->v_flag &= ~VHASBEENPAGED;
}
if ( (!(vp->v_flag & VNOCACHE_DATA)) || (!uio) || (uio->uio_segflg != UIO_USERSPACE))
{
return (cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags));
}
while (uio->uio_resid && uio->uio_offset < newEOF && retval == 0)
{
iov = uio->uio_iov;
while (iov->iov_len == 0) {
uio->uio_iov++;
uio->uio_iovcnt--;
iov = uio->uio_iov;
}
upl_size = PAGE_SIZE;
upl_flags = UPL_QUERY_OBJECT_TYPE;
if ((vm_map_get_upl(current_map(),
(vm_offset_t)iov->iov_base & ~PAGE_MASK,
&upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS)
{
return (EFAULT);
}
if (upl_flags & UPL_PHYS_CONTIG)
{
if (flags & IO_HEADZEROFILL)
{
flags &= ~IO_HEADZEROFILL;
if (retval = cluster_write_x(vp, (struct uio *)0, 0, uio->uio_offset, headOff, 0, devblocksize, IO_HEADZEROFILL))
return(retval);
}
retval = cluster_phys_write(vp, uio, newEOF, devblocksize, flags);
if (uio->uio_resid == 0 && (flags & IO_TAILZEROFILL))
{
return (cluster_write_x(vp, (struct uio *)0, 0, tailOff, uio->uio_offset, 0, devblocksize, IO_HEADZEROFILL));
}
}
else if ((uio->uio_resid < PAGE_SIZE) || (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)))
{
return (cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags));
}
else if (((int)uio->uio_offset & PAGE_MASK) || ((int)iov->iov_base & PAGE_MASK))
{
if (((int)uio->uio_offset & PAGE_MASK) == ((int)iov->iov_base & PAGE_MASK))
{
clip_size = (PAGE_SIZE - (uio->uio_offset & PAGE_MASK_64));
prev_resid = uio->uio_resid;
uio->uio_resid = clip_size;
retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
}
else
{
clip_size = iov->iov_len;
prev_resid = uio->uio_resid;
uio->uio_resid = clip_size;
retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
}
}
else
{
max_io_size = newEOF - uio->uio_offset;
clip_size = uio->uio_resid;
if (iov->iov_len < clip_size)
clip_size = iov->iov_len;
if (max_io_size < clip_size)
clip_size = max_io_size;
if (clip_size < PAGE_SIZE)
{
prev_resid = uio->uio_resid;
uio->uio_resid = clip_size;
retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
}
else
{
clip_size = clip_size & ~(PAGE_MASK);
prev_resid = uio->uio_resid;
uio->uio_resid = clip_size;
retval = cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags);
if ((retval == 0) && uio->uio_resid)
retval = cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags);
uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
}
}
}
return(retval);
}
static int
cluster_nocopy_write(vp, uio, newEOF, devblocksize, flags)
struct vnode *vp;
struct uio *uio;
off_t newEOF;
int devblocksize;
int flags;
{
upl_t upl;
upl_page_info_t *pl;
off_t upl_f_offset;
vm_offset_t upl_offset;
off_t max_io_size;
int io_size;
int io_flag;
int upl_size;
int upl_needed_size;
int pages_in_pl;
int upl_flags;
kern_return_t kret;
struct iovec *iov;
int i;
int force_data_sync;
int error = 0;
struct clios iostate;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START,
(int)uio->uio_offset, (int)uio->uio_resid,
(int)newEOF, devblocksize, 0);
cluster_try_push(vp, newEOF, 0, 1);
iostate.io_completed = 0;
iostate.io_issued = 0;
iostate.io_error = 0;
iostate.io_wanted = 0;
iov = uio->uio_iov;
while (uio->uio_resid && uio->uio_offset < newEOF && error == 0) {
io_size = uio->uio_resid;
if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK;
upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START,
(int)upl_offset, upl_needed_size, (int)iov->iov_base, io_size, 0);
for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
pages_in_pl = 0;
upl_size = upl_needed_size;
upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
kret = vm_map_get_upl(current_map(),
(vm_offset_t)iov->iov_base & ~PAGE_MASK,
&upl_size,
&upl,
NULL,
&pages_in_pl,
&upl_flags,
force_data_sync);
if (kret != KERN_SUCCESS) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
0, 0, 0, kret, 0);
goto wait_for_writes;
}
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
pages_in_pl = upl_size / PAGE_SIZE;
for (i = 0; i < pages_in_pl; i++) {
if (!upl_valid_page(pl, i))
break;
}
if (i == pages_in_pl)
break;
ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
UPL_ABORT_FREE_ON_EMPTY);
}
if (force_data_sync >= 3) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
i, pages_in_pl, upl_size, kret, 0);
goto wait_for_writes;
}
if (upl_size != upl_needed_size)
io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END,
(int)upl_offset, upl_size, (int)iov->iov_base, io_size, 0);
if (io_size == 0) {
ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
UPL_ABORT_FREE_ON_EMPTY);
goto wait_for_writes;
}
ubc_range_op(vp, uio->uio_offset, uio->uio_offset + io_size, UPL_ROP_DUMP, NULL);
while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_UPL_TRANSFER * PAGE_SIZE)) {
iostate.io_wanted = 1;
tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_nocopy_write", 0);
}
if (iostate.io_error) {
ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
UPL_ABORT_FREE_ON_EMPTY);
goto wait_for_writes;
}
io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START,
(int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
io_size, devblocksize, io_flag, (struct buf *)0, &iostate);
iov->iov_len -= io_size;
iov->iov_base += io_size;
uio->uio_resid -= io_size;
uio->uio_offset += io_size;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END,
(int)upl_offset, (int)uio->uio_offset, (int)uio->uio_resid, error, 0);
}
wait_for_writes:
while (iostate.io_issued != iostate.io_completed) {
iostate.io_wanted = 1;
tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_nocopy_write", 0);
}
if (iostate.io_error)
error = iostate.io_error;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END,
(int)uio->uio_offset, (int)uio->uio_resid, error, 4, 0);
return (error);
}
static int
cluster_phys_write(vp, uio, newEOF, devblocksize, flags)
struct vnode *vp;
struct uio *uio;
off_t newEOF;
int devblocksize;
int flags;
{
upl_page_info_t *pl;
addr64_t src_paddr;
upl_t upl;
vm_offset_t upl_offset;
int tail_size;
int io_size;
int upl_size;
int upl_needed_size;
int pages_in_pl;
int upl_flags;
kern_return_t kret;
struct iovec *iov;
int error = 0;
cluster_try_push(vp, newEOF, 0, 1);
iov = uio->uio_iov;
io_size = iov->iov_len;
upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK;
upl_needed_size = upl_offset + io_size;
pages_in_pl = 0;
upl_size = upl_needed_size;
upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC |
UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
kret = vm_map_get_upl(current_map(),
(vm_offset_t)iov->iov_base & ~PAGE_MASK,
&upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0);
if (kret != KERN_SUCCESS) {
return(EINVAL);
}
if (upl_size < upl_needed_size) {
kernel_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
return(EINVAL);
}
pl = ubc_upl_pageinfo(upl);
src_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + ((addr64_t)((u_int)iov->iov_base & PAGE_MASK));
while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
int head_size;
head_size = devblocksize - (int)(uio->uio_offset & (devblocksize - 1));
if (head_size > io_size)
head_size = io_size;
error = cluster_align_phys_io(vp, uio, src_paddr, head_size, devblocksize, 0);
if (error) {
ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
return(EINVAL);
}
upl_offset += head_size;
src_paddr += head_size;
io_size -= head_size;
}
tail_size = io_size & (devblocksize - 1);
io_size -= tail_size;
if (io_size) {
error = cluster_io(vp, upl, upl_offset, uio->uio_offset,
io_size, 0, CL_DEV_MEMORY, (struct buf *)0, (struct clios *)0);
}
if (error == 0) {
uio->uio_resid -= io_size;
iov->iov_len -= io_size;
iov->iov_base += io_size;
uio->uio_offset += io_size;
src_paddr += io_size;
if (tail_size)
error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, devblocksize, 0);
}
ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
return (error);
}
static int
cluster_write_x(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags)
struct vnode *vp;
struct uio *uio;
off_t oldEOF;
off_t newEOF;
off_t headOff;
off_t tailOff;
int devblocksize;
int flags;
{
upl_page_info_t *pl;
upl_t upl;
vm_offset_t upl_offset;
int upl_size;
off_t upl_f_offset;
int pages_in_upl;
int start_offset;
int xfer_resid;
int io_size;
int io_flags;
int io_offset;
int bytes_to_zero;
int bytes_to_move;
kern_return_t kret;
int retval = 0;
int uio_resid;
long long total_size;
long long zero_cnt;
off_t zero_off;
long long zero_cnt1;
off_t zero_off1;
daddr_t start_blkno;
daddr_t last_blkno;
int intersection;
if (uio) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
(int)uio->uio_offset, uio->uio_resid, (int)oldEOF, (int)newEOF, 0);
uio_resid = uio->uio_resid;
} else {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
0, 0, (int)oldEOF, (int)newEOF, 0);
uio_resid = 0;
}
zero_cnt = 0;
zero_cnt1 = 0;
if (flags & IO_HEADZEROFILL) {
if (uio) {
if (headOff < uio->uio_offset) {
zero_cnt = uio->uio_offset - headOff;
zero_off = headOff;
}
} else if (headOff < newEOF) {
zero_cnt = newEOF - headOff;
zero_off = headOff;
}
}
if (flags & IO_TAILZEROFILL) {
if (uio) {
zero_off1 = uio->uio_offset + uio->uio_resid;
if (zero_off1 < tailOff)
zero_cnt1 = tailOff - zero_off1;
}
}
if (zero_cnt == 0 && uio == (struct uio *) 0) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
retval, 0, 0, 0, 0);
return (0);
}
while ((total_size = (uio_resid + zero_cnt + zero_cnt1)) && retval == 0) {
if (zero_cnt) {
start_offset = (int)(zero_off & PAGE_MASK_64);
upl_f_offset = zero_off - start_offset;
} else if (uio_resid) {
start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
upl_f_offset = uio->uio_offset - start_offset;
} else {
start_offset = (int)(zero_off1 & PAGE_MASK_64);
upl_f_offset = zero_off1 - start_offset;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
(int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
if (total_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
total_size = MAX_UPL_TRANSFER * PAGE_SIZE;
start_blkno = (daddr_t)(upl_f_offset / PAGE_SIZE_64);
if (uio && !(vp->v_flag & VNOCACHE_DATA) &&
(flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0) {
if ((start_offset + total_size) > (MAX_UPL_TRANSFER * PAGE_SIZE))
total_size -= start_offset;
xfer_resid = total_size;
retval = cluster_copy_ubc_data(vp, uio, &xfer_resid, 1);
if (retval)
break;
uio_resid -= (total_size - xfer_resid);
total_size = xfer_resid;
start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
upl_f_offset = uio->uio_offset - start_offset;
if (total_size == 0) {
if (start_offset) {
upl_f_offset += PAGE_SIZE_64;
}
upl_size = 0;
goto check_cluster;
}
}
upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
upl_size = MAX_UPL_TRANSFER * PAGE_SIZE;
pages_in_upl = upl_size / PAGE_SIZE;
io_size = upl_size - start_offset;
if ((long long)io_size > total_size)
io_size = total_size;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
kret = ubc_create_upl(vp,
upl_f_offset,
upl_size,
&upl,
&pl,
UPL_SET_LITE);
if (kret != KERN_SUCCESS)
panic("cluster_write: failed to get pagelist");
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END,
(int)upl, (int)upl_f_offset, start_offset, 0, 0);
if (start_offset && !upl_valid_page(pl, 0)) {
int read_size;
read_size = PAGE_SIZE;
if ((upl_f_offset + read_size) > newEOF)
read_size = newEOF - upl_f_offset;
retval = cluster_io(vp, upl, 0, upl_f_offset, read_size, devblocksize,
CL_READ, (struct buf *)0, (struct clios *)0);
if (retval) {
ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES);
ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
(int)upl, 0, 0, retval, 0);
break;
}
}
if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
upl_offset = upl_size - PAGE_SIZE;
if ((upl_f_offset + start_offset + io_size) < oldEOF &&
!upl_valid_page(pl, upl_offset / PAGE_SIZE)) {
int read_size;
read_size = PAGE_SIZE;
if ((upl_f_offset + upl_offset + read_size) > newEOF)
read_size = newEOF - (upl_f_offset + upl_offset);
retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size, devblocksize,
CL_READ, (struct buf *)0, (struct clios *)0);
if (retval) {
ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES);
ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
(int)upl, 0, 0, retval, 0);
break;
}
}
}
xfer_resid = io_size;
io_offset = start_offset;
while (zero_cnt && xfer_resid) {
if (zero_cnt < (long long)xfer_resid)
bytes_to_zero = zero_cnt;
else
bytes_to_zero = xfer_resid;
if ( !(flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
cluster_zero(upl, io_offset, bytes_to_zero, NULL);
} else {
int zero_pg_index;
bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64));
zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64);
if ( !upl_valid_page(pl, zero_pg_index)) {
cluster_zero(upl, io_offset, bytes_to_zero, NULL);
} else if ((flags & (IO_NOZERODIRTY | IO_NOZEROVALID)) == IO_NOZERODIRTY &&
!upl_dirty_page(pl, zero_pg_index)) {
cluster_zero(upl, io_offset, bytes_to_zero, NULL);
}
}
xfer_resid -= bytes_to_zero;
zero_cnt -= bytes_to_zero;
zero_off += bytes_to_zero;
io_offset += bytes_to_zero;
}
if (xfer_resid && uio_resid) {
bytes_to_move = min(uio_resid, xfer_resid);
retval = cluster_copy_upl_data(uio, upl, io_offset, bytes_to_move);
if (retval) {
ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE,
(int)upl, 0, 0, retval, 0);
} else {
uio_resid -= bytes_to_move;
xfer_resid -= bytes_to_move;
io_offset += bytes_to_move;
}
}
while (xfer_resid && zero_cnt1 && retval == 0) {
if (zero_cnt1 < (long long)xfer_resid)
bytes_to_zero = zero_cnt1;
else
bytes_to_zero = xfer_resid;
if ( !(flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
cluster_zero(upl, io_offset, bytes_to_zero, NULL);
} else {
int zero_pg_index;
bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off1 & PAGE_MASK_64));
zero_pg_index = (int)((zero_off1 - upl_f_offset) / PAGE_SIZE_64);
if ( !upl_valid_page(pl, zero_pg_index)) {
cluster_zero(upl, io_offset, bytes_to_zero, NULL);
} else if ((flags & (IO_NOZERODIRTY | IO_NOZEROVALID)) == IO_NOZERODIRTY &&
!upl_dirty_page(pl, zero_pg_index)) {
cluster_zero(upl, io_offset, bytes_to_zero, NULL);
}
}
xfer_resid -= bytes_to_zero;
zero_cnt1 -= bytes_to_zero;
zero_off1 += bytes_to_zero;
io_offset += bytes_to_zero;
}
if (retval == 0) {
int cl_index;
int can_delay;
io_size += start_offset;
if ((upl_f_offset + io_size) >= newEOF && io_size < upl_size) {
cluster_zero(upl, io_size, upl_size - io_size, NULL);
}
if (flags & IO_SYNC)
goto issue_io;
check_cluster:
last_blkno = (upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64;
if (vp->v_flag & VHASDIRTY) {
if ( !(vp->v_flag & VNOCACHE_DATA)) {
if (upl_size)
ubc_upl_commit_range(upl, 0, upl_size,
UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
sparse_cluster_add(vp, newEOF, start_blkno, last_blkno);
continue;
}
if (upl_size) {
ubc_upl_commit_range(upl, 0, upl_size,
UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
upl_size = 0;
}
sparse_cluster_push(vp, ubc_getsize(vp), 1);
goto start_new_cluster;
}
upl_offset = 0;
if (vp->v_clen == 0)
goto start_new_cluster;
for (cl_index = 0; cl_index < vp->v_clen; cl_index++) {
if (start_blkno >= vp->v_clusters[cl_index].start_pg) {
if (last_blkno <= (vp->v_clusters[cl_index].start_pg + MAX_UPL_TRANSFER)) {
if (last_blkno > vp->v_clusters[cl_index].last_pg)
vp->v_clusters[cl_index].last_pg = last_blkno;
break;
}
if (start_blkno < (vp->v_clusters[cl_index].start_pg + MAX_UPL_TRANSFER)) {
vp->v_clusters[cl_index].last_pg = vp->v_clusters[cl_index].start_pg + MAX_UPL_TRANSFER;
if (upl_size) {
int start_pg_in_upl;
start_pg_in_upl = upl_f_offset / PAGE_SIZE_64;
if (start_pg_in_upl < vp->v_clusters[cl_index].last_pg) {
intersection = (vp->v_clusters[cl_index].last_pg - start_pg_in_upl) * PAGE_SIZE;
ubc_upl_commit_range(upl, upl_offset, intersection,
UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
upl_f_offset += intersection;
upl_offset += intersection;
upl_size -= intersection;
}
}
start_blkno = vp->v_clusters[cl_index].last_pg;
}
} else {
if ((vp->v_clusters[cl_index].last_pg - start_blkno) <= MAX_UPL_TRANSFER) {
vp->v_clusters[cl_index].start_pg = start_blkno;
if (last_blkno > vp->v_clusters[cl_index].last_pg) {
vp->v_clusters[cl_index].last_pg = last_blkno;
}
break;
}
if (last_blkno > vp->v_clusters[cl_index].last_pg - MAX_UPL_TRANSFER) {
vp->v_clusters[cl_index].start_pg = vp->v_clusters[cl_index].last_pg - MAX_UPL_TRANSFER;
if (upl_size) {
intersection = (last_blkno - vp->v_clusters[cl_index].start_pg) * PAGE_SIZE;
if (intersection > upl_size)
intersection = upl_size;
ubc_upl_commit_range(upl, upl_offset + (upl_size - intersection), intersection,
UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
upl_size -= intersection;
}
last_blkno = vp->v_clusters[cl_index].start_pg;
}
}
}
if (cl_index < vp->v_clen)
goto delay_io;
if (vp->v_clen < MAX_CLUSTERS && !(vp->v_flag & VNOCACHE_DATA))
goto start_new_cluster;
if (vp->v_flag & VNOCACHE_DATA)
can_delay = 0;
else
can_delay = 1;
if (cluster_try_push(vp, newEOF, can_delay, 0) == 0) {
if (upl_size)
ubc_upl_commit_range(upl, upl_offset, upl_size,
UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
sparse_cluster_switch(vp, newEOF);
sparse_cluster_add(vp, newEOF, start_blkno, last_blkno);
continue;
}
while (vp->v_clen && (vp->v_numoutput <= (ASYNC_THROTTLE / 2)))
cluster_try_push(vp, newEOF, 0, 0);
start_new_cluster:
if (vp->v_clen == 0)
vp->v_ciosiz = devblocksize;
vp->v_clusters[vp->v_clen].start_pg = start_blkno;
vp->v_clusters[vp->v_clen].last_pg = last_blkno;
vp->v_clen++;
delay_io:
if (upl_size)
ubc_upl_commit_range(upl, upl_offset, upl_size,
UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
continue;
issue_io:
io_size = upl_size;
if ((upl_f_offset + io_size) > newEOF) {
io_size = newEOF - upl_f_offset;
io_size = (io_size + (devblocksize - 1)) & ~(devblocksize - 1);
}
if (flags & IO_SYNC)
io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE;
else
io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | CL_ASYNC;
if (vp->v_flag & VNOCACHE_DATA)
io_flags |= CL_DUMP;
retval = cluster_io(vp, upl, 0, upl_f_offset, io_size, devblocksize,
io_flags, (struct buf *)0, (struct clios *)0);
}
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END,
retval, 0, uio_resid, 0, 0);
return (retval);
}
int
cluster_read(vp, uio, filesize, devblocksize, flags)
struct vnode *vp;
struct uio *uio;
off_t filesize;
int devblocksize;
int flags;
{
int prev_resid;
int clip_size;
off_t max_io_size;
struct iovec *iov;
int upl_size;
int upl_flags;
upl_t upl;
int retval = 0;
if (!((vp->v_flag & VNOCACHE_DATA) && (uio->uio_segflg == UIO_USERSPACE)))
{
return (cluster_read_x(vp, uio, filesize, devblocksize, flags));
}
while (uio->uio_resid && uio->uio_offset < filesize && retval == 0)
{
iov = uio->uio_iov;
while (iov->iov_len == 0) {
uio->uio_iov++;
uio->uio_iovcnt--;
iov = uio->uio_iov;
}
upl_size = PAGE_SIZE;
upl_flags = UPL_QUERY_OBJECT_TYPE;
if ((vm_map_get_upl(current_map(),
(vm_offset_t)iov->iov_base & ~PAGE_MASK,
&upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS)
{
return (EFAULT);
}
if (upl_flags & UPL_PHYS_CONTIG)
{
retval = cluster_phys_read(vp, uio, filesize, devblocksize, flags);
}
else if (uio->uio_resid < PAGE_SIZE)
{
return (cluster_read_x(vp, uio, filesize, devblocksize, flags));
}
else if (((int)uio->uio_offset & PAGE_MASK) || ((int)iov->iov_base & PAGE_MASK))
{
if (((int)uio->uio_offset & PAGE_MASK) == ((int)iov->iov_base & PAGE_MASK))
{
clip_size = (PAGE_SIZE - (int)(uio->uio_offset & PAGE_MASK_64));
prev_resid = uio->uio_resid;
uio->uio_resid = clip_size;
retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
}
else
{
clip_size = iov->iov_len;
prev_resid = uio->uio_resid;
uio->uio_resid = clip_size;
retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
}
}
else
{
max_io_size = filesize - uio->uio_offset;
clip_size = uio->uio_resid;
if (iov->iov_len < clip_size)
clip_size = iov->iov_len;
if (max_io_size < clip_size)
clip_size = (int)max_io_size;
if (clip_size < PAGE_SIZE)
{
prev_resid = uio->uio_resid;
uio->uio_resid = clip_size;
retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
}
else
{
clip_size = clip_size & ~(PAGE_MASK);
prev_resid = uio->uio_resid;
uio->uio_resid = clip_size;
retval = cluster_nocopy_read(vp, uio, filesize, devblocksize, flags);
if ((retval==0) && uio->uio_resid)
retval = cluster_read_x(vp, uio, filesize, devblocksize, flags);
uio->uio_resid = prev_resid - (clip_size - uio->uio_resid);
}
}
}
return(retval);
}
static int
cluster_read_x(vp, uio, filesize, devblocksize, flags)
struct vnode *vp;
struct uio *uio;
off_t filesize;
int devblocksize;
int flags;
{
upl_page_info_t *pl;
upl_t upl;
vm_offset_t upl_offset;
int upl_size;
off_t upl_f_offset;
int start_offset;
int start_pg;
int last_pg;
int uio_last;
int pages_in_upl;
off_t max_size;
off_t last_ioread_offset;
off_t last_request_offset;
u_int size_of_prefetch;
int io_size;
kern_return_t kret;
int error = 0;
int retval = 0;
u_int b_lblkno;
u_int e_lblkno;
struct clios iostate;
u_int max_rd_size = MAX_UPL_TRANSFER * PAGE_SIZE;
u_int rd_ahead_enabled = 1;
u_int prefetch_enabled = 1;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
(int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0);
if (cluster_hard_throttle_on(vp)) {
rd_ahead_enabled = 0;
prefetch_enabled = 0;
max_rd_size = HARD_THROTTLE_MAXSIZE;
}
if (vp->v_flag & (VRAOFF|VNOCACHE_DATA))
rd_ahead_enabled = 0;
last_request_offset = uio->uio_offset + uio->uio_resid;
if (last_request_offset > filesize)
last_request_offset = filesize;
b_lblkno = (u_int)(uio->uio_offset / PAGE_SIZE_64);
e_lblkno = (u_int)((last_request_offset - 1) / PAGE_SIZE_64);
if (vp->v_ralen && (vp->v_lastr == b_lblkno || (vp->v_lastr + 1) == b_lblkno)) {
last_ioread_offset = (vp->v_maxra * PAGE_SIZE_64) + PAGE_SIZE_64;
if (last_ioread_offset < uio->uio_offset)
last_ioread_offset = (off_t)0;
else if (last_ioread_offset > last_request_offset)
last_ioread_offset = last_request_offset;
} else
last_ioread_offset = (off_t)0;
while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) {
start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
upl_f_offset = uio->uio_offset - (off_t)start_offset;
max_size = filesize - uio->uio_offset;
if ((off_t)((unsigned int)uio->uio_resid) < max_size)
io_size = uio->uio_resid;
else
io_size = max_size;
if (!(vp->v_flag & VNOCACHE_DATA)) {
while (io_size) {
u_int io_resid;
u_int io_requested;
if (last_request_offset && last_ioread_offset && (size_of_prefetch = (last_request_offset - last_ioread_offset))) {
if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) {
if (size_of_prefetch > max_rd_size)
size_of_prefetch = max_rd_size;
size_of_prefetch = cluster_rd_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, devblocksize);
last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
if (last_ioread_offset > last_request_offset)
last_ioread_offset = last_request_offset;
}
}
if (last_ioread_offset && io_size > ((MAX_UPL_TRANSFER * PAGE_SIZE) / 4))
io_resid = ((MAX_UPL_TRANSFER * PAGE_SIZE) / 4);
else
io_resid = io_size;
io_requested = io_resid;
retval = cluster_copy_ubc_data(vp, uio, &io_resid, 0);
io_size -= (io_requested - io_resid);
if (retval || io_resid)
break;
if ((io_size == 0 || last_ioread_offset == last_request_offset) && rd_ahead_enabled) {
cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize);
}
}
if (retval)
break;
if (io_size == 0) {
if (e_lblkno < vp->v_lastr)
vp->v_maxra = 0;
vp->v_lastr = e_lblkno;
break;
}
start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
upl_f_offset = uio->uio_offset - (off_t)start_offset;
max_size = filesize - uio->uio_offset;
}
if (io_size > max_rd_size)
io_size = max_rd_size;
upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE) / 4)
upl_size = (MAX_UPL_TRANSFER * PAGE_SIZE) / 4;
pages_in_upl = upl_size / PAGE_SIZE;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START,
(int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
kret = ubc_create_upl(vp,
upl_f_offset,
upl_size,
&upl,
&pl,
UPL_SET_LITE);
if (kret != KERN_SUCCESS)
panic("cluster_read: failed to get pagelist");
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END,
(int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
for (start_pg = 0; start_pg < pages_in_upl; start_pg++) {
if (!upl_valid_page(pl, start_pg))
break;
}
for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
if (upl_valid_page(pl, last_pg))
break;
}
iostate.io_completed = 0;
iostate.io_issued = 0;
iostate.io_error = 0;
iostate.io_wanted = 0;
if (start_pg < last_pg) {
upl_offset = start_pg * PAGE_SIZE;
io_size = (last_pg - start_pg) * PAGE_SIZE;
if ((upl_f_offset + upl_offset + io_size) > filesize)
io_size = filesize - (upl_f_offset + upl_offset);
error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
io_size, devblocksize, CL_READ | CL_ASYNC, (struct buf *)0, &iostate);
}
if (error == 0) {
u_int val_size;
for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) {
if (!upl_valid_page(pl, uio_last))
break;
}
val_size = (uio_last * PAGE_SIZE) - start_offset;
if (val_size > max_size)
val_size = max_size;
if (val_size > uio->uio_resid)
val_size = uio->uio_resid;
if (last_ioread_offset == 0)
last_ioread_offset = uio->uio_offset + val_size;
if ((size_of_prefetch = (last_request_offset - last_ioread_offset)) && prefetch_enabled) {
size_of_prefetch = cluster_rd_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, devblocksize);
last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE);
if (last_ioread_offset > last_request_offset)
last_ioread_offset = last_request_offset;
} else if ((uio->uio_offset + val_size) == last_request_offset) {
if (rd_ahead_enabled)
cluster_rd_ahead(vp, b_lblkno, e_lblkno, filesize, devblocksize);
if (e_lblkno < vp->v_lastr)
vp->v_maxra = 0;
vp->v_lastr = e_lblkno;
}
while (iostate.io_issued != iostate.io_completed) {
iostate.io_wanted = 1;
tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_read_x", 0);
}
if (iostate.io_error)
error = iostate.io_error;
else
retval = cluster_copy_upl_data(uio, upl, start_offset, val_size);
}
if (start_pg < last_pg) {
io_size = (last_pg - start_pg) * PAGE_SIZE;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
(int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
if (error || (vp->v_flag & VNOCACHE_DATA))
ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
else
ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size,
UPL_COMMIT_CLEAR_DIRTY |
UPL_COMMIT_FREE_ON_EMPTY |
UPL_COMMIT_INACTIVATE);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END,
(int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
}
if ((last_pg - start_pg) < pages_in_upl) {
int cur_pg;
int commit_flags;
if (error)
ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
else {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START,
(int)upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
if (start_pg) {
for (cur_pg = 0; cur_pg < start_pg; cur_pg++) {
commit_flags = UPL_COMMIT_FREE_ON_EMPTY
| UPL_COMMIT_INACTIVATE;
if (upl_dirty_page(pl, cur_pg))
commit_flags |= UPL_COMMIT_SET_DIRTY;
if ( !(commit_flags & UPL_COMMIT_SET_DIRTY) && (vp->v_flag & VNOCACHE_DATA))
ubc_upl_abort_range(upl, cur_pg * PAGE_SIZE, PAGE_SIZE,
UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
else
ubc_upl_commit_range(upl, cur_pg * PAGE_SIZE,
PAGE_SIZE, commit_flags);
}
}
if (last_pg < uio_last) {
for (cur_pg = last_pg; cur_pg < uio_last; cur_pg++) {
commit_flags = UPL_COMMIT_FREE_ON_EMPTY
| UPL_COMMIT_INACTIVATE;
if (upl_dirty_page(pl, cur_pg))
commit_flags |= UPL_COMMIT_SET_DIRTY;
if ( !(commit_flags & UPL_COMMIT_SET_DIRTY) && (vp->v_flag & VNOCACHE_DATA))
ubc_upl_abort_range(upl, cur_pg * PAGE_SIZE, PAGE_SIZE,
UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
else
ubc_upl_commit_range(upl, cur_pg * PAGE_SIZE,
PAGE_SIZE, commit_flags);
}
}
if (uio_last < pages_in_upl) {
ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
(pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END,
(int)upl, -1, -1, 0, 0);
}
}
if (retval == 0)
retval = error;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END,
(int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0);
return (retval);
}
static int
cluster_nocopy_read(vp, uio, filesize, devblocksize, flags)
struct vnode *vp;
struct uio *uio;
off_t filesize;
int devblocksize;
int flags;
{
upl_t upl;
upl_page_info_t *pl;
vm_offset_t upl_offset;
off_t max_io_size;
int io_size;
int upl_size;
int upl_needed_size;
int pages_in_pl;
int upl_flags;
kern_return_t kret;
struct iovec *iov;
int i;
int force_data_sync;
int retval = 0;
struct clios iostate;
u_int max_rd_size = MAX_UPL_TRANSFER * PAGE_SIZE;
u_int max_rd_ahead = MAX_UPL_TRANSFER * PAGE_SIZE * 2;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
(int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0);
iostate.io_completed = 0;
iostate.io_issued = 0;
iostate.io_error = 0;
iostate.io_wanted = 0;
iov = uio->uio_iov;
if (cluster_hard_throttle_on(vp)) {
max_rd_size = HARD_THROTTLE_MAXSIZE;
max_rd_ahead = HARD_THROTTLE_MAXSIZE - 1;
}
while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) {
max_io_size = filesize - uio->uio_offset;
if (max_io_size < (off_t)((unsigned int)uio->uio_resid))
io_size = max_io_size;
else
io_size = uio->uio_resid;
retval = cluster_copy_ubc_data(vp, uio, &io_size, 0);
if (retval) {
goto wait_for_reads;
}
if (io_size == 0) {
goto wait_for_reads;
}
max_io_size = io_size;
if (max_io_size > max_rd_size)
max_io_size = max_rd_size;
io_size = 0;
ubc_range_op(vp, uio->uio_offset, uio->uio_offset + max_io_size, UPL_ROP_ABSENT, &io_size);
if (io_size == 0)
goto wait_for_reads;
upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK;
upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
(int)upl_offset, upl_needed_size, (int)iov->iov_base, io_size, 0);
for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
pages_in_pl = 0;
upl_size = upl_needed_size;
upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
kret = vm_map_get_upl(current_map(),
(vm_offset_t)iov->iov_base & ~PAGE_MASK,
&upl_size, &upl, NULL, &pages_in_pl, &upl_flags, force_data_sync);
if (kret != KERN_SUCCESS) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
(int)upl_offset, upl_size, io_size, kret, 0);
goto wait_for_reads;
}
pages_in_pl = upl_size / PAGE_SIZE;
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
for (i = 0; i < pages_in_pl; i++) {
if (!upl_valid_page(pl, i))
break;
}
if (i == pages_in_pl)
break;
ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
UPL_ABORT_FREE_ON_EMPTY);
}
if (force_data_sync >= 3) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
(int)upl_offset, upl_size, io_size, kret, 0);
goto wait_for_reads;
}
if (upl_size != upl_needed_size)
io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK;
if (io_size == 0) {
ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
UPL_ABORT_FREE_ON_EMPTY);
goto wait_for_reads;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
(int)upl_offset, upl_size, io_size, kret, 0);
while ((iostate.io_issued - iostate.io_completed) > max_rd_ahead) {
iostate.io_wanted = 1;
tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_nocopy_read", 0);
}
if (iostate.io_error) {
ubc_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size,
UPL_ABORT_FREE_ON_EMPTY);
goto wait_for_reads;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START,
(int)upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
io_size, devblocksize,
CL_PRESERVE | CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO,
(struct buf *)0, &iostate);
iov->iov_base += io_size;
iov->iov_len -= io_size;
uio->uio_resid -= io_size;
uio->uio_offset += io_size;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END,
(int)upl, (int)uio->uio_offset, (int)uio->uio_resid, retval, 0);
}
wait_for_reads:
while (iostate.io_issued != iostate.io_completed) {
iostate.io_wanted = 1;
tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_nocopy_read", 0);
}
if (iostate.io_error)
retval = iostate.io_error;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END,
(int)uio->uio_offset, (int)uio->uio_resid, 6, retval, 0);
return (retval);
}
static int
cluster_phys_read(vp, uio, filesize, devblocksize, flags)
struct vnode *vp;
struct uio *uio;
off_t filesize;
int devblocksize;
int flags;
{
upl_page_info_t *pl;
upl_t upl;
vm_offset_t upl_offset;
addr64_t dst_paddr;
off_t max_size;
int io_size;
int tail_size;
int upl_size;
int upl_needed_size;
int pages_in_pl;
int upl_flags;
kern_return_t kret;
struct iovec *iov;
struct clios iostate;
int error;
iov = uio->uio_iov;
max_size = filesize - uio->uio_offset;
if (max_size > (off_t)((unsigned int)iov->iov_len))
io_size = iov->iov_len;
else
io_size = max_size;
upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK;
upl_needed_size = upl_offset + io_size;
error = 0;
pages_in_pl = 0;
upl_size = upl_needed_size;
upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
kret = vm_map_get_upl(current_map(),
(vm_offset_t)iov->iov_base & ~PAGE_MASK,
&upl_size, &upl, NULL, &pages_in_pl, &upl_flags, 0);
if (kret != KERN_SUCCESS) {
return(EINVAL);
}
if (upl_size < upl_needed_size) {
ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
return(EINVAL);
}
pl = ubc_upl_pageinfo(upl);
dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + ((addr64_t)((u_int)iov->iov_base & PAGE_MASK));
while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
int head_size;
head_size = devblocksize - (int)(uio->uio_offset & (devblocksize - 1));
if (head_size > io_size)
head_size = io_size;
error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, devblocksize, CL_READ);
if (error) {
ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
return(EINVAL);
}
upl_offset += head_size;
dst_paddr += head_size;
io_size -= head_size;
}
tail_size = io_size & (devblocksize - 1);
io_size -= tail_size;
iostate.io_completed = 0;
iostate.io_issued = 0;
iostate.io_error = 0;
iostate.io_wanted = 0;
while (io_size && error == 0) {
int xsize;
if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
xsize = MAX_UPL_TRANSFER * PAGE_SIZE;
else
xsize = io_size;
while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_UPL_TRANSFER * PAGE_SIZE)) {
iostate.io_wanted = 1;
tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_phys_read", 0);
}
error = cluster_io(vp, upl, upl_offset, uio->uio_offset, xsize, 0,
CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC,
(struct buf *)0, &iostate);
if (error == 0) {
uio->uio_resid -= xsize;
iov->iov_len -= xsize;
iov->iov_base += xsize;
uio->uio_offset += xsize;
dst_paddr += xsize;
upl_offset += xsize;
io_size -= xsize;
}
}
while (iostate.io_issued != iostate.io_completed) {
iostate.io_wanted = 1;
tsleep((caddr_t)&iostate.io_wanted, PRIBIO + 1, "cluster_phys_read", 0);
}
if (iostate.io_error) {
error = iostate.io_error;
}
if (error == 0 && tail_size)
error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, devblocksize, CL_READ);
ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
return (error);
}
int
advisory_read(vp, filesize, f_offset, resid, devblocksize)
struct vnode *vp;
off_t filesize;
off_t f_offset;
int resid;
int devblocksize;
{
upl_page_info_t *pl;
upl_t upl;
vm_offset_t upl_offset;
int upl_size;
off_t upl_f_offset;
int start_offset;
int start_pg;
int last_pg;
int pages_in_upl;
off_t max_size;
int io_size;
kern_return_t kret;
int retval = 0;
int issued_io;
int skip_range;
if (!UBCINFOEXISTS(vp))
return(EINVAL);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
(int)f_offset, resid, (int)filesize, devblocksize, 0);
while (resid && f_offset < filesize && retval == 0) {
start_offset = (int)(f_offset & PAGE_MASK_64);
upl_f_offset = f_offset - (off_t)start_offset;
max_size = filesize - f_offset;
if (resid < max_size)
io_size = resid;
else
io_size = max_size;
upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
upl_size = MAX_UPL_TRANSFER * PAGE_SIZE;
skip_range = 0;
ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
if (skip_range) {
io_size = skip_range - start_offset;
f_offset += io_size;
resid -= io_size;
if (skip_range == upl_size)
continue;
start_offset = 0;
upl_f_offset += skip_range;
upl_size -= skip_range;
}
pages_in_upl = upl_size / PAGE_SIZE;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START,
(int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
kret = ubc_create_upl(vp,
upl_f_offset,
upl_size,
&upl,
&pl,
UPL_RET_ONLY_ABSENT | UPL_SET_LITE);
if (kret != KERN_SUCCESS)
return(retval);
issued_io = 0;
for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
if (upl_page_present(pl, last_pg))
break;
}
pages_in_upl = last_pg + 1;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END,
(int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
for (last_pg = 0; last_pg < pages_in_upl; ) {
for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
if (upl_page_present(pl, start_pg))
break;
}
for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
if (!upl_page_present(pl, last_pg))
break;
}
if (last_pg > start_pg) {
upl_offset = start_pg * PAGE_SIZE;
io_size = (last_pg - start_pg) * PAGE_SIZE;
if ((upl_f_offset + upl_offset + io_size) > filesize)
io_size = filesize - (upl_f_offset + upl_offset);
retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, devblocksize,
CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE, (struct buf *)0, (struct clios *)0);
issued_io = 1;
}
}
if (issued_io == 0)
ubc_upl_abort(upl, 0);
io_size = upl_size - start_offset;
if (io_size > resid)
io_size = resid;
f_offset += io_size;
resid -= io_size;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END,
(int)f_offset, resid, retval, 0, 0);
return(retval);
}
int
cluster_push(vp)
struct vnode *vp;
{
int retval;
if (!UBCINFOEXISTS(vp) || (vp->v_clen == 0 && !(vp->v_flag & VHASDIRTY)))
return(0);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START,
vp->v_flag & VHASDIRTY, vp->v_clen, 0, 0, 0);
if (vp->v_flag & VHASDIRTY) {
sparse_cluster_push(vp, ubc_getsize(vp), 1);
vp->v_clen = 0;
retval = 1;
} else
retval = cluster_try_push(vp, ubc_getsize(vp), 0, 1);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END,
vp->v_flag & VHASDIRTY, vp->v_clen, retval, 0, 0);
return (retval);
}
int
cluster_release(vp)
struct vnode *vp;
{
off_t offset;
u_int length;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0);
if (vp->v_flag & VHASDIRTY) {
vfs_drt_control(&(vp->v_scmap), 0);
vp->v_flag &= ~VHASDIRTY;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0);
}
static int
cluster_try_push(vp, EOF, can_delay, push_all)
struct vnode *vp;
off_t EOF;
int can_delay;
int push_all;
{
int cl_index;
int cl_index1;
int min_index;
int cl_len;
int cl_total;
int cl_pushed = 0;
struct v_cluster l_clusters[MAX_CLUSTERS];
for (cl_index = 0; cl_index < vp->v_clen; cl_index++) {
for (min_index = -1, cl_index1 = 0; cl_index1 < vp->v_clen; cl_index1++) {
if (vp->v_clusters[cl_index1].start_pg == vp->v_clusters[cl_index1].last_pg)
continue;
if (min_index == -1)
min_index = cl_index1;
else if (vp->v_clusters[cl_index1].start_pg < vp->v_clusters[min_index].start_pg)
min_index = cl_index1;
}
if (min_index == -1)
break;
l_clusters[cl_index].start_pg = vp->v_clusters[min_index].start_pg;
l_clusters[cl_index].last_pg = vp->v_clusters[min_index].last_pg;
vp->v_clusters[min_index].start_pg = vp->v_clusters[min_index].last_pg;
}
cl_len = cl_index;
vp->v_clen = 0;
if (can_delay && cl_len == MAX_CLUSTERS) {
int i;
for (i = 0; i < MAX_CLUSTERS - 1; i++) {
if ((l_clusters[i].last_pg - l_clusters[i].start_pg) != MAX_UPL_TRANSFER)
goto dont_try;
if (l_clusters[i].last_pg != l_clusters[i+1].start_pg)
goto dont_try;
}
}
for (cl_index = 0; cl_index < cl_len; cl_index++) {
if (cluster_push_x(vp, EOF, l_clusters[cl_index].start_pg, l_clusters[cl_index].last_pg, can_delay)) {
l_clusters[cl_index].start_pg = 0;
l_clusters[cl_index].last_pg = 0;
cl_pushed++;
if (push_all == 0)
break;
}
}
dont_try:
if (cl_len > cl_pushed) {
if ((MAX_CLUSTERS - vp->v_clen) < (cl_len - cl_pushed)) {
sparse_cluster_switch(vp, EOF);
for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) {
if (l_clusters[cl_index].start_pg == l_clusters[cl_index].last_pg)
continue;
vp->v_clusters[cl_index1].start_pg = l_clusters[cl_index].start_pg;
vp->v_clusters[cl_index1].last_pg = l_clusters[cl_index].last_pg;
cl_index1++;
}
vp->v_clen = cl_index1;
sparse_cluster_switch(vp, EOF);
} else {
for (cl_index = 0, cl_index1 = vp->v_clen; cl_index < cl_len; cl_index++) {
if (l_clusters[cl_index].start_pg == l_clusters[cl_index].last_pg)
continue;
vp->v_clusters[cl_index1].start_pg = l_clusters[cl_index].start_pg;
vp->v_clusters[cl_index1].last_pg = l_clusters[cl_index].last_pg;
cl_index1++;
}
vp->v_clen = cl_index1;
}
}
return(MAX_CLUSTERS - vp->v_clen);
}
static int
cluster_push_x(vp, EOF, first, last, can_delay)
struct vnode *vp;
off_t EOF;
unsigned int first;
unsigned int last;
int can_delay;
{
upl_page_info_t *pl;
upl_t upl;
vm_offset_t upl_offset;
int upl_size;
off_t upl_f_offset;
int pages_in_upl;
int start_pg;
int last_pg;
int io_size;
int io_flags;
int upl_flags;
int size;
kern_return_t kret;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START,
vp->v_clen, first, last, EOF, 0);
if ((pages_in_upl = last - first) == 0) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0);
return (1);
}
upl_size = pages_in_upl * PAGE_SIZE;
upl_f_offset = (off_t)((unsigned long long)first * PAGE_SIZE_64);
if (upl_f_offset + upl_size >= EOF) {
if (upl_f_offset >= EOF) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0);
return(1);
}
size = EOF - upl_f_offset;
upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
pages_in_upl = upl_size / PAGE_SIZE;
} else
size = upl_size;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0);
if (vp->v_flag & VNOCACHE_DATA)
upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED;
else
upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE;
kret = ubc_create_upl(vp,
upl_f_offset,
upl_size,
&upl,
&pl,
upl_flags);
if (kret != KERN_SUCCESS)
panic("cluster_push: failed to get pagelist");
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, (int)upl, upl_f_offset, 0, 0, 0);
for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) {
if (upl_page_present(pl, last_pg))
break;
}
pages_in_upl = last_pg + 1;
if (pages_in_upl == 0) {
ubc_upl_abort(upl, 0);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0);
return(1);
}
for (last_pg = 0; last_pg < pages_in_upl; ) {
for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) {
if (upl_dirty_page(pl, start_pg))
break;
if (upl_page_present(pl, start_pg))
ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
}
if (start_pg >= pages_in_upl)
break;
if (start_pg > last_pg)
size -= ((start_pg - last_pg) * PAGE_SIZE);
for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) {
if (!upl_dirty_page(pl, last_pg))
break;
}
upl_offset = start_pg * PAGE_SIZE;
io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
if (vp->v_flag & VNOCACHE_DATA)
io_flags = CL_THROTTLE | CL_COMMIT | CL_ASYNC | CL_DUMP;
else
io_flags = CL_THROTTLE | CL_COMMIT | CL_ASYNC;
cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, vp->v_ciosiz, io_flags, (struct buf *)0, (struct clios *)0);
size -= io_size;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, 0, 0, 0);
return(1);
}
static int
sparse_cluster_switch(struct vnode *vp, off_t EOF)
{
int cl_index;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0);
if ( !(vp->v_flag & VHASDIRTY)) {
vp->v_flag |= VHASDIRTY;
vp->v_scdirty = 0;
vp->v_scmap = 0;
}
for (cl_index = 0; cl_index < vp->v_clen; cl_index++) {
int flags;
int start_pg;
int last_pg;
for (start_pg = vp->v_clusters[cl_index].start_pg; start_pg < vp->v_clusters[cl_index].last_pg; start_pg++) {
if (ubc_page_op(vp, (off_t)(((off_t)start_pg) * PAGE_SIZE_64), 0, 0, &flags) == KERN_SUCCESS) {
if (flags & UPL_POP_DIRTY)
sparse_cluster_add(vp, EOF, start_pg, start_pg + 1);
}
}
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0);
}
static int
sparse_cluster_push(struct vnode *vp, off_t EOF, int push_all)
{
unsigned int first;
unsigned int last;
off_t offset;
u_int length;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, (int)vp, (int)vp->v_scmap, vp->v_scdirty, push_all, 0);
if (push_all)
vfs_drt_control(&(vp->v_scmap), 1);
for (;;) {
if (vfs_drt_get_cluster(&(vp->v_scmap), &offset, &length) != KERN_SUCCESS) {
vp->v_flag &= ~VHASDIRTY;
vp->v_clen = 0;
break;
}
first = (unsigned int)(offset / PAGE_SIZE_64);
last = (unsigned int)((offset + length) / PAGE_SIZE_64);
cluster_push_x(vp, EOF, first, last, 0);
vp->v_scdirty -= (last - first);
if (push_all == 0)
break;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0);
}
static int
sparse_cluster_add(struct vnode *vp, off_t EOF, daddr_t first, daddr_t last)
{
u_int new_dirty;
u_int length;
off_t offset;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (int)vp->v_scmap, vp->v_scdirty, first, last, 0);
offset = (off_t)first * PAGE_SIZE_64;
length = (last - first) * PAGE_SIZE;
while (vfs_drt_mark_pages(&(vp->v_scmap), offset, length, &new_dirty) != KERN_SUCCESS) {
vp->v_scdirty += new_dirty;
sparse_cluster_push(vp, EOF, 0);
offset += (new_dirty * PAGE_SIZE_64);
length -= (new_dirty * PAGE_SIZE);
}
vp->v_scdirty += new_dirty;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, (int)vp, (int)vp->v_scmap, vp->v_scdirty, 0, 0);
}
static int
cluster_align_phys_io(struct vnode *vp, struct uio *uio, addr64_t usr_paddr, int xsize, int devblocksize, int flags)
{
struct iovec *iov;
upl_page_info_t *pl;
upl_t upl;
addr64_t ubc_paddr;
kern_return_t kret;
int error = 0;
iov = uio->uio_iov;
kret = ubc_create_upl(vp,
uio->uio_offset & ~PAGE_MASK_64,
PAGE_SIZE,
&upl,
&pl,
UPL_SET_LITE);
if (kret != KERN_SUCCESS)
return(EINVAL);
if (!upl_valid_page(pl, 0)) {
error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, devblocksize,
CL_READ, (struct buf *)0, (struct clios *)0);
if (error) {
ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
return(error);
}
}
ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << 12) + (addr64_t)(uio->uio_offset & PAGE_MASK_64);
if (flags & CL_READ)
copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4);
else
copypv(usr_paddr, ubc_paddr, xsize, 2 | 1 | 8);
if ( !(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, devblocksize,
0, (struct buf *)0, (struct clios *)0);
}
if (error == 0) {
uio->uio_offset += xsize;
iov->iov_base += xsize;
iov->iov_len -= xsize;
uio->uio_resid -= xsize;
}
ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
return (error);
}
int
cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int xsize)
{
int pg_offset;
int pg_index;
int csize;
int segflg;
int retval = 0;
upl_page_info_t *pl;
boolean_t funnel_state = FALSE;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
(int)uio->uio_offset, uio->uio_resid, upl_offset, xsize, 0);
if (xsize >= (16 * 1024))
funnel_state = thread_funnel_set(kernel_flock, FALSE);
segflg = uio->uio_segflg;
switch(segflg) {
case UIO_USERSPACE:
case UIO_USERISPACE:
uio->uio_segflg = UIO_PHYS_USERSPACE;
break;
case UIO_SYSSPACE:
uio->uio_segflg = UIO_PHYS_SYSSPACE;
break;
}
pl = ubc_upl_pageinfo(upl);
pg_index = upl_offset / PAGE_SIZE;
pg_offset = upl_offset & PAGE_MASK;
csize = min(PAGE_SIZE - pg_offset, xsize);
while (xsize && retval == 0) {
addr64_t paddr;
paddr = ((addr64_t)upl_phys_page(pl, pg_index) << 12) + pg_offset;
retval = uiomove64(paddr, csize, uio);
pg_index += 1;
pg_offset = 0;
xsize -= csize;
csize = min(PAGE_SIZE, xsize);
}
uio->uio_segflg = segflg;
if (funnel_state == TRUE)
thread_funnel_set(kernel_flock, TRUE);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
(int)uio->uio_offset, uio->uio_resid, retval, segflg, 0);
return (retval);
}
int
cluster_copy_ubc_data(struct vnode *vp, struct uio *uio, int *io_resid, int mark_dirty)
{
int segflg;
int io_size;
int xsize;
int start_offset;
off_t f_offset;
int retval = 0;
memory_object_control_t control;
int op_flags = UPL_POP_SET | UPL_POP_BUSY;
boolean_t funnel_state = FALSE;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START,
(int)uio->uio_offset, uio->uio_resid, 0, *io_resid, 0);
control = ubc_getobject(vp, UBC_FLAGS_NONE);
if (control == MEMORY_OBJECT_CONTROL_NULL) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
(int)uio->uio_offset, uio->uio_resid, retval, 3, 0);
return(0);
}
if (mark_dirty)
op_flags |= UPL_POP_DIRTY;
segflg = uio->uio_segflg;
switch(segflg) {
case UIO_USERSPACE:
case UIO_USERISPACE:
uio->uio_segflg = UIO_PHYS_USERSPACE;
break;
case UIO_SYSSPACE:
uio->uio_segflg = UIO_PHYS_SYSSPACE;
break;
}
io_size = *io_resid;
start_offset = (int)(uio->uio_offset & PAGE_MASK_64);
f_offset = uio->uio_offset - start_offset;
xsize = min(PAGE_SIZE - start_offset, io_size);
while (io_size && retval == 0) {
ppnum_t pgframe;
if (ubc_page_op_with_control(control, f_offset, op_flags, &pgframe, 0) != KERN_SUCCESS)
break;
if (funnel_state == FALSE && io_size >= (16 * 1024))
funnel_state = thread_funnel_set(kernel_flock, FALSE);
retval = uiomove64((addr64_t)(((addr64_t)pgframe << 12) + start_offset), xsize, uio);
ubc_page_op_with_control(control, f_offset, UPL_POP_CLR | UPL_POP_BUSY, 0, 0);
io_size -= xsize;
start_offset = 0;
f_offset = uio->uio_offset;
xsize = min(PAGE_SIZE, io_size);
}
uio->uio_segflg = segflg;
*io_resid = io_size;
if (funnel_state == TRUE)
thread_funnel_set(kernel_flock, TRUE);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END,
(int)uio->uio_offset, uio->uio_resid, retval, 0x80000000 | segflg, 0);
return(retval);
}
int
is_file_clean(struct vnode *vp, off_t filesize)
{
off_t f_offset;
int flags;
int total_dirty = 0;
for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) {
if (ubc_page_op(vp, f_offset, 0, 0, &flags) == KERN_SUCCESS) {
if (flags & UPL_POP_DIRTY) {
total_dirty++;
}
}
}
if (total_dirty)
return(EINVAL);
return (0);
}
#define DRT_BITVECTOR_PAGES 256
#define DRT_ADDRESS_MASK (~((1 << 20) - 1))
#define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK)
#define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK)
#define DRT_HASH_SET_ADDRESS(scm, i, a) \
do { \
(scm)->scm_hashtable[(i)].dhe_control = \
((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \
} while (0)
#define DRT_HASH_COUNT_MASK 0x1ff
#define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK)
#define DRT_HASH_SET_COUNT(scm, i, c) \
do { \
(scm)->scm_hashtable[(i)].dhe_control = \
((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \
} while (0)
#define DRT_HASH_CLEAR(scm, i) \
do { \
(scm)->scm_hashtable[(i)].dhe_control = 0; \
} while (0)
#define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK)
#define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK)
#define DRT_HASH_COPY(oscm, oi, scm, i) \
do { \
(scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \
DRT_BITVECTOR_COPY(oscm, oi, scm, i); \
} while(0);
#define DRT_HASH_SMALL_MODULUS 23
#define DRT_HASH_LARGE_MODULUS 401
#define DRT_SMALL_ALLOCATION 1024
#define DRT_LARGE_ALLOCATION 16384
#define DRT_HASH_SET_BIT(scm, i, bit) \
(scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32))
#define DRT_HASH_CLEAR_BIT(scm, i, bit) \
(scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32))
#define DRT_HASH_TEST_BIT(scm, i, bit) \
((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32)))
#define DRT_BITVECTOR_CLEAR(scm, i) \
bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
#define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \
bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \
&(scm)->scm_hashtable[(i)].dhe_bitvector[0], \
(DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t))
struct vfs_drt_hashentry {
u_int64_t dhe_control;
u_int32_t dhe_bitvector[DRT_BITVECTOR_PAGES / 32];
};
struct vfs_drt_clustermap {
u_int32_t scm_magic;
#define DRT_SCM_MAGIC 0x12020003
u_int32_t scm_modulus;
u_int32_t scm_buckets;
u_int32_t scm_lastclean;
u_int32_t scm_iskips;
struct vfs_drt_hashentry scm_hashtable[0];
};
#define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus)
#define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus)
#define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82))
#define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83))
#define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84))
#define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85))
#define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86))
#define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87))
#define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88))
static void vfs_drt_sanity(struct vfs_drt_clustermap *cmap);
static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp);
static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap);
static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap,
u_int64_t offset, int *indexp);
static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp,
u_int64_t offset,
int *indexp,
int recursed);
static kern_return_t vfs_drt_do_mark_pages(
void **cmapp,
u_int64_t offset,
u_int length,
int *setcountp,
int dirty);
static void vfs_drt_trace(
struct vfs_drt_clustermap *cmap,
int code,
int arg1,
int arg2,
int arg3,
int arg4);
static kern_return_t
vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp)
{
struct vfs_drt_clustermap *cmap, *ocmap;
kern_return_t kret;
u_int64_t offset;
int nsize, i, active_buckets, index, copycount;
ocmap = NULL;
if (cmapp != NULL)
ocmap = *cmapp;
if (ocmap == NULL) {
nsize = DRT_HASH_SMALL_MODULUS;
} else {
active_buckets = 0;
for (i = 0; i < ocmap->scm_modulus; i++) {
if (!DRT_HASH_VACANT(ocmap, i) &&
(DRT_HASH_GET_COUNT(ocmap, i) != 0))
active_buckets++;
}
if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) {
if (active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) {
nsize = DRT_HASH_LARGE_MODULUS;
} else {
nsize = DRT_HASH_SMALL_MODULUS;
}
} else {
nsize = DRT_HASH_LARGE_MODULUS;
if (active_buckets >= DRT_HASH_LARGE_MODULUS)
return(KERN_SUCCESS);
}
}
kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap,
(nsize == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
if (kret != KERN_SUCCESS)
return(kret);
cmap->scm_magic = DRT_SCM_MAGIC;
cmap->scm_modulus = nsize;
cmap->scm_buckets = 0;
cmap->scm_lastclean = 0;
cmap->scm_iskips = 0;
for (i = 0; i < cmap->scm_modulus; i++) {
DRT_HASH_CLEAR(cmap, i);
DRT_HASH_VACATE(cmap, i);
DRT_BITVECTOR_CLEAR(cmap, i);
}
copycount = 0;
if (ocmap != NULL) {
for (i = 0; i < ocmap->scm_modulus; i++) {
if (DRT_HASH_VACANT(ocmap, i) ||
(DRT_HASH_GET_COUNT(ocmap, i) == 0))
continue;
offset = DRT_HASH_GET_ADDRESS(ocmap, i);
kret = vfs_drt_get_index(&cmap, offset, &index, 1);
if (kret != KERN_SUCCESS) {
panic("vfs_drt: new cluster map mysteriously too small");
}
DRT_HASH_COPY(ocmap, i, cmap, index);
copycount++;
}
}
vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0);
*cmapp = cmap;
if (ocmap != NULL) {
vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA,
ocmap->scm_modulus,
ocmap->scm_buckets,
ocmap->scm_lastclean,
ocmap->scm_iskips);
vfs_drt_free_map(ocmap);
}
return(KERN_SUCCESS);
}
static kern_return_t
vfs_drt_free_map(struct vfs_drt_clustermap *cmap)
{
kern_return_t ret;
kmem_free(kernel_map, (vm_offset_t)cmap,
(cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION);
return(KERN_SUCCESS);
}
static kern_return_t
vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp)
{
kern_return_t kret;
int index, i, tries;
offset = DRT_ALIGN_ADDRESS(offset);
index = DRT_HASH(cmap, offset);
for (i = 0; i < cmap->scm_modulus; i++) {
if (DRT_HASH_VACANT(cmap, index))
break;
if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) {
*indexp = index;
return(KERN_SUCCESS);
}
index = DRT_HASH_NEXT(cmap, index);
}
return(KERN_FAILURE);
}
static kern_return_t
vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed)
{
struct vfs_drt_clustermap *cmap;
kern_return_t kret;
int index, i;
cmap = *cmapp;
kret = vfs_drt_search_index(cmap, offset, indexp);
if (kret == KERN_SUCCESS)
return(kret);
offset = DRT_ALIGN_ADDRESS(offset);
index = DRT_HASH(cmap, offset);
for (i = 0; i < cmap->scm_modulus; i++) {
if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap,index) == 0) {
cmap->scm_buckets++;
if (index < cmap->scm_lastclean)
cmap->scm_lastclean = index;
DRT_HASH_SET_ADDRESS(cmap, index, offset);
DRT_HASH_SET_COUNT(cmap, index, 0);
DRT_BITVECTOR_CLEAR(cmap, index);
*indexp = index;
vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0);
return(KERN_SUCCESS);
}
cmap->scm_iskips += i;
index = DRT_HASH_NEXT(cmap, index);
}
if (recursed)
return(KERN_FAILURE);
kret = vfs_drt_alloc_map(cmapp);
if (kret == KERN_SUCCESS) {
kret = vfs_drt_get_index(cmapp, offset, indexp, 1);
}
return(kret);
}
static kern_return_t
vfs_drt_do_mark_pages(
void **private,
u_int64_t offset,
u_int length,
int *setcountp,
int dirty)
{
struct vfs_drt_clustermap *cmap, **cmapp;
kern_return_t kret;
int i, index, pgoff, pgcount, setcount, ecount;
cmapp = (struct vfs_drt_clustermap **)private;
cmap = *cmapp;
vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0);
if (setcountp != NULL)
*setcountp = 0;
if (cmap == NULL) {
if (!dirty) {
vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0);
return(KERN_SUCCESS);
}
kret = vfs_drt_alloc_map(cmapp);
if (kret != KERN_SUCCESS) {
vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0);
return(kret);
}
}
setcount = 0;
while (length > 0) {
kret = vfs_drt_get_index(cmapp, offset, &index, 0);
cmap = *cmapp;
if (kret != KERN_SUCCESS) {
if (setcountp != NULL)
*setcountp = setcount;
vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0);
return(kret);
}
pgoff = (offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE;
pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff));
ecount = DRT_HASH_GET_COUNT(cmap, index);
for (i = 0; i < pgcount; i++) {
if (dirty) {
if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
DRT_HASH_SET_BIT(cmap, index, pgoff + i);
ecount++;
setcount++;
}
} else {
if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) {
DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i);
ecount--;
setcount++;
}
}
}
DRT_HASH_SET_COUNT(cmap, index, ecount);
next:
offset += pgcount * PAGE_SIZE;
length -= pgcount * PAGE_SIZE;
}
if (setcountp != NULL)
*setcountp = setcount;
vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0);
return(KERN_SUCCESS);
}
static kern_return_t
vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, int *setcountp)
{
return(vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1));
}
static kern_return_t
vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length)
{
return(vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0));
}
static kern_return_t
vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp)
{
struct vfs_drt_clustermap *cmap;
u_int64_t offset;
u_int length;
int index, i, j, fs, ls;
if ((cmapp == NULL) || (*cmapp == NULL))
return(KERN_FAILURE);
cmap = *cmapp;
for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) {
index = DRT_HASH(cmap, offset);
if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0))
continue;
fs = -1;
for (i = 0; i < DRT_BITVECTOR_PAGES; i++) {
if (DRT_HASH_TEST_BIT(cmap, index, i)) {
fs = i;
break;
}
}
if (fs == -1) {
panic("vfs_drt: entry summary count > 0 but no bits set in map");
}
for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) {
if (!DRT_HASH_TEST_BIT(cmap, index, i))
break;
}
offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs);
length = ls * PAGE_SIZE;
vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0);
cmap->scm_lastclean = index;
*offsetp = (off_t)offset;
*lengthp = length;
vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0);
return(KERN_SUCCESS);
}
vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
cmap->scm_modulus,
cmap->scm_buckets,
cmap->scm_lastclean,
cmap->scm_iskips);
vfs_drt_free_map(cmap);
*cmapp = NULL;
return(KERN_FAILURE);
}
static kern_return_t
vfs_drt_control(void **cmapp, int op_type)
{
struct vfs_drt_clustermap *cmap;
if ((cmapp == NULL) || (*cmapp == NULL))
return(KERN_FAILURE);
cmap = *cmapp;
switch (op_type) {
case 0:
vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA,
cmap->scm_modulus,
cmap->scm_buckets,
cmap->scm_lastclean,
cmap->scm_iskips);
vfs_drt_free_map(cmap);
*cmapp = NULL;
break;
case 1:
cmap->scm_lastclean = 0;
break;
}
return(KERN_SUCCESS);
}
static void
vfs_drt_trace(struct vfs_drt_clustermap *cmap, int code, int arg1, int arg2, int arg3, int arg4)
{
KERNEL_DEBUG(code, arg1, arg2, arg3, arg4, 0);
}
static void
vfs_drt_sanity(struct vfs_drt_clustermap *cmap)
{
int index, i;
int bits_on;
for (index = 0; index < cmap->scm_modulus; index++) {
if (DRT_HASH_VACANT(cmap, index))
continue;
for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) {
if (DRT_HASH_TEST_BIT(cmap, index, i))
bits_on++;
}
if (bits_on != DRT_HASH_GET_COUNT(cmap, index))
panic("bits_on = %d, index = %d\n", bits_on, index);
}
}