#include <sys/param.h>
#include <sys/systm.h>
#include <sys/resourcevar.h>
#include <sys/kernel.h>
#include <sys/fcntl.h>
#include <sys/stat.h>
#include <sys/buf.h>
#include <sys/proc.h>
#include <sys/vnode.h>
#include <sys/uio.h>
#include <miscfs/specfs/specdev.h>
#include <sys/ubc.h>
#include <vm/vm_pageout.h>
#include <sys/kdebug.h>
#include "hfs.h"
#include "hfs_endian.h"
#include "hfs_quota.h"
#include "hfscommon/headers/FileMgrInternal.h"
#include "hfscommon/headers/BTreesInternal.h"
#include "hfs_cnode.h"
#include "hfs_dbg.h"
extern int overflow_extents(struct filefork *fp);
#define can_cluster(size) ((((size & (4096-1))) == 0) && (size <= (MAXPHYSIO/2)))
enum {
MAXHFSFILESIZE = 0x7FFFFFFF
};
extern u_int32_t GetLogicalBlockSize(struct vnode *vp);
int
hfs_read(ap)
struct vop_read_args *ap;
{
register struct uio *uio = ap->a_uio;
register struct vnode *vp = ap->a_vp;
struct cnode *cp;
struct filefork *fp;
struct buf *bp;
daddr_t logBlockNo;
u_long fragSize, moveSize, startOffset, ioxfersize;
int devBlockSize = 0;
off_t bytesRemaining;
int retval = 0;
off_t filesize;
off_t filebytes;
if (vp->v_type != VREG && vp->v_type != VLNK)
return (EISDIR);
if (uio->uio_resid == 0)
return (0);
if (uio->uio_offset < 0)
return (EINVAL);
cp = VTOC(vp);
fp = VTOF(vp);
filesize = fp->ff_size;
filebytes = (off_t)fp->ff_blocks * (off_t)VTOVCB(vp)->blockSize;
if (uio->uio_offset > filesize) {
if ((!ISHFSPLUS(VTOVCB(vp))) && (uio->uio_offset > (off_t)MAXHFSFILESIZE))
return (EFBIG);
else
return (0);
}
VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_START,
(int)uio->uio_offset, uio->uio_resid, (int)filesize, (int)filebytes, 0);
if (UBCISVALID(vp)) {
retval = cluster_read(vp, uio, filesize, devBlockSize, 0);
} else {
for (retval = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
if ((bytesRemaining = (filesize - uio->uio_offset)) <= 0)
break;
logBlockNo = (daddr_t)(uio->uio_offset / PAGE_SIZE_64);
startOffset = (u_long) (uio->uio_offset & PAGE_MASK_64);
fragSize = PAGE_SIZE;
if (((logBlockNo * PAGE_SIZE) + fragSize) < filesize)
ioxfersize = fragSize;
else {
ioxfersize = filesize - (logBlockNo * PAGE_SIZE);
ioxfersize = (ioxfersize + (devBlockSize - 1)) & ~(devBlockSize - 1);
}
moveSize = ioxfersize;
moveSize -= startOffset;
if (bytesRemaining < moveSize)
moveSize = bytesRemaining;
if (uio->uio_resid < moveSize) {
moveSize = uio->uio_resid;
};
if (moveSize == 0) {
break;
};
if (( uio->uio_offset + fragSize) >= filesize) {
retval = bread(vp, logBlockNo, ioxfersize, NOCRED, &bp);
} else if (logBlockNo - 1 == vp->v_lastr && !(vp->v_flag & VRAOFF)) {
daddr_t nextLogBlockNo = logBlockNo + 1;
int nextsize;
if (((nextLogBlockNo * PAGE_SIZE) +
(daddr_t)fragSize) < filesize)
nextsize = fragSize;
else {
nextsize = filesize - (nextLogBlockNo * PAGE_SIZE);
nextsize = (nextsize + (devBlockSize - 1)) & ~(devBlockSize - 1);
}
retval = breadn(vp, logBlockNo, ioxfersize, &nextLogBlockNo, &nextsize, 1, NOCRED, &bp);
} else {
retval = bread(vp, logBlockNo, ioxfersize, NOCRED, &bp);
};
if (retval != E_NONE) {
if (bp) {
brelse(bp);
bp = NULL;
}
break;
};
vp->v_lastr = logBlockNo;
ioxfersize -= bp->b_resid;
if (ioxfersize < moveSize) {
if (ioxfersize == 0)
break;
moveSize = ioxfersize;
}
if ((startOffset + moveSize) > bp->b_bcount)
panic("hfs_read: bad startOffset or moveSize\n");
if ((retval = uiomove((caddr_t)bp->b_data + startOffset, (int)moveSize, uio)))
break;
if (S_ISREG(cp->c_mode) &&
(((startOffset + moveSize) == fragSize) || (uio->uio_offset == filesize))) {
bp->b_flags |= B_AGE;
};
brelse(bp);
}
if (bp != NULL) {
brelse(bp);
}
}
cp->c_flag |= C_ACCESS;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_END,
(int)uio->uio_offset, uio->uio_resid, (int)filesize, (int)filebytes, 0);
return (retval);
}
int
hfs_write(ap)
struct vop_write_args *ap;
{
struct vnode *vp = ap->a_vp;
struct uio *uio = ap->a_uio;
struct cnode *cp;
struct filefork *fp;
struct buf *bp;
struct proc *p;
struct timeval tv;
ExtendedVCB *vcb;
int devBlockSize = 0;
daddr_t logBlockNo;
long fragSize;
off_t origFileSize, currOffset, writelimit, bytesToAdd;
off_t actualBytesAdded;
u_long blkoffset, resid, xfersize, clearSize;
int eflags, ioflag;
int retval;
off_t filebytes;
u_long fileblocks;
struct hfsmount *hfsmp;
int started_tr = 0, grabbed_lock = 0;
ioflag = ap->a_ioflag;
if (uio->uio_offset < 0)
return (EINVAL);
if (uio->uio_resid == 0)
return (E_NONE);
if (vp->v_type != VREG && vp->v_type != VLNK)
return (EISDIR);
cp = VTOC(vp);
fp = VTOF(vp);
vcb = VTOVCB(vp);
fileblocks = fp->ff_blocks;
filebytes = (off_t)fileblocks * (off_t)vcb->blockSize;
if (ioflag & IO_APPEND)
uio->uio_offset = fp->ff_size;
if ((cp->c_flags & APPEND) && uio->uio_offset != fp->ff_size)
return (EPERM);
if (VTOHFS(vp)->jnl && cp->c_datafork) {
struct HFSPlusExtentDescriptor *extd;
extd = &cp->c_datafork->ff_data.cf_extents[0];
if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
return EPERM;
}
}
writelimit = uio->uio_offset + uio->uio_resid;
p = uio->uio_procp;
if (vp->v_type == VREG && p &&
writelimit > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
psignal(p, SIGXFSZ);
return (EFBIG);
}
p = current_proc();
VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize);
resid = uio->uio_resid;
origFileSize = fp->ff_size;
eflags = kEFDeferMask;
filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize;
currOffset = MIN(uio->uio_offset, fp->ff_size);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_START,
(int)uio->uio_offset, uio->uio_resid, (int)fp->ff_size, (int)filebytes, 0);
retval = 0;
#if QUOTA
if(writelimit > filebytes) {
bytesToAdd = writelimit - filebytes;
retval = hfs_chkdq(cp, (int64_t)(roundup(bytesToAdd, vcb->blockSize)),
ap->a_cred, 0);
if (retval)
return (retval);
}
#endif
hfsmp = VTOHFS(vp);
if (writelimit > filebytes) {
hfs_global_shared_lock_acquire(hfsmp);
grabbed_lock = 1;
}
if (hfsmp->jnl && (writelimit > filebytes)) {
if (journal_start_transaction(hfsmp->jnl) != 0) {
hfs_global_shared_lock_release(hfsmp);
return EINVAL;
}
started_tr = 1;
}
while (writelimit > filebytes) {
bytesToAdd = writelimit - filebytes;
if (suser(ap->a_cred, NULL) != 0)
eflags |= kEFReserveMask;
retval = hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_EXCLUSIVE, current_proc());
if (retval != E_NONE)
break;
retval = MacToVFSError(ExtendFileC (vcb, (FCB*)fp, bytesToAdd,
0, eflags, &actualBytesAdded));
(void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, p);
if ((actualBytesAdded == 0) && (retval == E_NONE))
retval = ENOSPC;
if (retval != E_NONE)
break;
filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_NONE,
(int)uio->uio_offset, uio->uio_resid, (int)fp->ff_size, (int)filebytes, 0);
}
if (started_tr) {
hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0);
journal_end_transaction(hfsmp->jnl);
started_tr = 0;
}
if (grabbed_lock) {
hfs_global_shared_lock_release(hfsmp);
grabbed_lock = 0;
}
if (UBCISVALID(vp) && retval == E_NONE) {
off_t filesize;
off_t zero_off;
off_t tail_off;
off_t inval_start;
off_t inval_end;
off_t io_start, io_end;
int lflag;
struct rl_entry *invalid_range;
if (writelimit > fp->ff_size)
filesize = writelimit;
else
filesize = fp->ff_size;
lflag = (ioflag & IO_SYNC);
if (uio->uio_offset <= fp->ff_size) {
zero_off = uio->uio_offset & ~PAGE_MASK_64;
if (rl_scan(&fp->ff_invalidranges, zero_off, uio->uio_offset - 1, &invalid_range) != RL_NOOVERLAP)
lflag |= IO_HEADZEROFILL;
} else {
off_t eof_page_base = fp->ff_size & ~PAGE_MASK_64;
inval_start = (fp->ff_size + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64;
inval_end = uio->uio_offset & ~PAGE_MASK_64;
zero_off = fp->ff_size;
if ((fp->ff_size & PAGE_MASK_64) &&
(rl_scan(&fp->ff_invalidranges,
eof_page_base,
fp->ff_size - 1,
&invalid_range) != RL_NOOVERLAP)) {
if (inval_end > eof_page_base) {
inval_start = eof_page_base;
} else {
zero_off = eof_page_base;
};
};
if (inval_start < inval_end) {
if (zero_off < inval_start) {
retval = cluster_write(vp, (struct uio *) 0,
fp->ff_size, inval_start,
zero_off, (off_t)0, devBlockSize,
lflag | IO_HEADZEROFILL | IO_NOZERODIRTY);
if (retval) goto ioerr_exit;
};
rl_add(inval_start, inval_end - 1 , &fp->ff_invalidranges);
cp->c_zftimeout = time.tv_sec + ZFTIMELIMIT;
zero_off = fp->ff_size = inval_end;
};
if (uio->uio_offset > zero_off) lflag |= IO_HEADZEROFILL;
};
tail_off = (writelimit + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64;
if (tail_off > filesize) tail_off = filesize;
if (tail_off > writelimit) {
if (rl_scan(&fp->ff_invalidranges, writelimit, tail_off - 1, &invalid_range) != RL_NOOVERLAP) {
lflag |= IO_TAILZEROFILL;
};
};
io_start = (lflag & IO_HEADZEROFILL) ? zero_off : uio->uio_offset;
io_end = (lflag & IO_TAILZEROFILL) ? tail_off : writelimit;
if (io_start < fp->ff_size) {
rl_remove(io_start, io_end - 1, &fp->ff_invalidranges);
};
retval = cluster_write(vp, uio, fp->ff_size, filesize, zero_off,
tail_off, devBlockSize, lflag | IO_NOZERODIRTY);
if (uio->uio_offset > fp->ff_size) {
fp->ff_size = uio->uio_offset;
ubc_setsize(vp, fp->ff_size);
}
if (resid > uio->uio_resid)
cp->c_flag |= C_CHANGE | C_UPDATE;
} else {
while (retval == E_NONE && uio->uio_resid > 0) {
logBlockNo = currOffset / PAGE_SIZE;
blkoffset = currOffset & PAGE_MASK;
if ((filebytes - currOffset) < PAGE_SIZE_64)
fragSize = filebytes - ((off_t)logBlockNo * PAGE_SIZE_64);
else
fragSize = PAGE_SIZE;
xfersize = fragSize - blkoffset;
if (currOffset + (off_t)xfersize > writelimit)
xfersize = writelimit - currOffset;
if ((blkoffset == 0) && (xfersize >= fragSize)) {
bp = getblk(vp, logBlockNo, fragSize, 0, 0, BLK_READ);
retval = 0;
if (bp->b_blkno == -1) {
brelse(bp);
retval = EIO;
break;
}
} else {
if (currOffset == fp->ff_size && blkoffset == 0) {
bp = getblk(vp, logBlockNo, fragSize, 0, 0, BLK_READ);
retval = 0;
if (bp->b_blkno == -1) {
brelse(bp);
retval = EIO;
break;
}
} else {
retval = bread(vp, logBlockNo, fragSize, ap->a_cred, &bp);
if (retval != E_NONE) {
if (bp)
brelse(bp);
break;
}
}
}
if (uio->uio_offset > currOffset) {
clearSize = MIN(uio->uio_offset - currOffset, xfersize);
bzero(bp->b_data + blkoffset, clearSize);
currOffset += clearSize;
blkoffset += clearSize;
xfersize -= clearSize;
}
if (xfersize > 0) {
retval = uiomove((caddr_t)bp->b_data + blkoffset, (int)xfersize, uio);
currOffset += xfersize;
}
if (ioflag & IO_SYNC) {
(void)VOP_BWRITE(bp);
} else if ((xfersize + blkoffset) == fragSize) {
bp->b_flags |= B_AGE;
bawrite(bp);
} else {
bdwrite(bp);
}
if (currOffset > fp->ff_size) {
fp->ff_size = currOffset;
if (UBCISVALID(vp))
ubc_setsize(vp, fp->ff_size);
}
if (retval || (resid == 0))
break;
cp->c_flag |= C_CHANGE | C_UPDATE;
}
}
ioerr_exit:
if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
cp->c_mode &= ~(S_ISUID | S_ISGID);
if (retval) {
if (ioflag & IO_UNIT) {
(void)VOP_TRUNCATE(vp, origFileSize,
ioflag & IO_SYNC, ap->a_cred, uio->uio_procp);
uio->uio_offset -= resid - uio->uio_resid;
uio->uio_resid = resid;
filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize;
}
} else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
tv = time;
retval = VOP_UPDATE(vp, &tv, &tv, 1);
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_END,
(int)uio->uio_offset, uio->uio_resid, (int)fp->ff_size, (int)filebytes, 0);
return (retval);
}
int
hfs_ioctl(ap)
struct vop_ioctl_args *ap;
{
switch (ap->a_command) {
case 1: {
register struct cnode *cp;
register struct vnode *vp;
register struct radvisory *ra;
struct filefork *fp;
int devBlockSize = 0;
int error;
vp = ap->a_vp;
if (vp->v_type != VREG)
return EINVAL;
VOP_LEASE(vp, ap->a_p, ap->a_cred, LEASE_READ);
error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);
if (error)
return (error);
ra = (struct radvisory *)(ap->a_data);
cp = VTOC(vp);
fp = VTOF(vp);
if (ra->ra_offset >= fp->ff_size) {
VOP_UNLOCK(vp, 0, ap->a_p);
return (EFBIG);
}
VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize);
error = advisory_read(vp, fp->ff_size, ra->ra_offset, ra->ra_count, devBlockSize);
VOP_UNLOCK(vp, 0, ap->a_p);
return (error);
}
case 2:
case 3:
{
struct vnode *vp = ap->a_vp;
struct vnode *devvp = NULL;
struct fbootstraptransfer *btd = (struct fbootstraptransfer *)ap->a_data;
int devBlockSize;
int error;
struct iovec aiov;
struct uio auio;
u_long blockNumber;
u_long blockOffset;
u_long xfersize;
struct buf *bp;
if ((vp->v_flag & VROOT) == 0) return EINVAL;
if (btd->fbt_offset + btd->fbt_length > 1024) return EINVAL;
devvp = VTOHFS(vp)->hfs_devvp;
aiov.iov_base = btd->fbt_buffer;
aiov.iov_len = btd->fbt_length;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_offset = btd->fbt_offset;
auio.uio_resid = btd->fbt_length;
auio.uio_segflg = UIO_USERSPACE;
auio.uio_rw = (ap->a_command == 3) ? UIO_WRITE : UIO_READ;
auio.uio_procp = ap->a_p;
VOP_DEVBLOCKSIZE(devvp, &devBlockSize);
while (auio.uio_resid > 0) {
blockNumber = auio.uio_offset / devBlockSize;
error = bread(devvp, blockNumber, devBlockSize, ap->a_cred, &bp);
if (error) {
if (bp) brelse(bp);
return error;
};
blockOffset = auio.uio_offset % devBlockSize;
xfersize = devBlockSize - blockOffset;
error = uiomove((caddr_t)bp->b_data + blockOffset, (int)xfersize, &auio);
if (error) {
brelse(bp);
return error;
};
if (auio.uio_rw == UIO_WRITE) {
error = VOP_BWRITE(bp);
if (error) return error;
} else {
brelse(bp);
};
};
};
return 0;
case _IOC(IOC_OUT,'h', 4, 0):
{
*(time_t *)(ap->a_data) = to_bsd_time(VTOVCB(ap->a_vp)->localCreateDate);
return 0;
}
default:
return (ENOTTY);
}
return 0;
}
int
hfs_select(ap)
struct vop_select_args *ap;
{
return (1);
}
int
hfs_bmap(ap)
struct vop_bmap_args *ap;
{
struct vnode *vp = ap->a_vp;
struct cnode *cp = VTOC(vp);
struct filefork *fp = VTOF(vp);
struct hfsmount *hfsmp = VTOHFS(vp);
int retval = E_NONE;
daddr_t logBlockSize;
size_t bytesContAvail = 0;
off_t blockposition;
struct proc *p = NULL;
int lockExtBtree;
struct rl_entry *invalid_range;
enum rl_overlaptype overlaptype;
if (ap->a_vpp != NULL)
*ap->a_vpp = cp->c_devvp;
if (ap->a_bnp == NULL)
return (0);
DBG_ASSERT(fp->ff_unallocblocks == 0);
logBlockSize = GetLogicalBlockSize(vp);
blockposition = (off_t)ap->a_bn * (off_t)logBlockSize;
lockExtBtree = overflow_extents(fp);
if (lockExtBtree) {
p = current_proc();
retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID,
LK_EXCLUSIVE | LK_CANRECURSE, p);
if (retval)
return (retval);
}
retval = MacToVFSError(
MapFileBlockC (HFSTOVCB(hfsmp),
(FCB*)fp,
MAXPHYSIO,
blockposition,
ap->a_bnp,
&bytesContAvail));
if (lockExtBtree) (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p);
if (retval == E_NONE) {
overlaptype = rl_scan(&fp->ff_invalidranges,
blockposition,
blockposition + MAXPHYSIO - 1,
&invalid_range);
if (overlaptype != RL_NOOVERLAP) {
switch(overlaptype) {
case RL_MATCHINGOVERLAP:
case RL_OVERLAPCONTAINSRANGE:
case RL_OVERLAPSTARTSBEFORE:
*ap->a_bnp = (daddr_t)-1;
bytesContAvail = invalid_range->rl_end + 1 - blockposition;
break;
case RL_OVERLAPISCONTAINED:
case RL_OVERLAPENDSAFTER:
if (invalid_range->rl_start == blockposition) {
*ap->a_bnp = (daddr_t)-1;
if ((fp->ff_size > (invalid_range->rl_end + 1)) &&
(invalid_range->rl_end + 1 - blockposition < bytesContAvail)) {
bytesContAvail = invalid_range->rl_end + 1 - blockposition;
};
} else {
bytesContAvail = invalid_range->rl_start - blockposition;
};
break;
};
if (bytesContAvail > MAXPHYSIO) bytesContAvail = MAXPHYSIO;
};
if (ap->a_runp != NULL) {
if (can_cluster(logBlockSize)) {
*ap->a_runp = (bytesContAvail < logBlockSize) ? 0 : (bytesContAvail / logBlockSize) - 1;
} else {
*ap->a_runp = 0;
};
};
};
return (retval);
}
int
hfs_blktooff(ap)
struct vop_blktooff_args *ap;
{
if (ap->a_vp == NULL)
return (EINVAL);
*ap->a_offset = (off_t)ap->a_lblkno * PAGE_SIZE_64;
return(0);
}
int
hfs_offtoblk(ap)
struct vop_offtoblk_args *ap;
{
if (ap->a_vp == NULL)
return (EINVAL);
*ap->a_lblkno = ap->a_offset / PAGE_SIZE_64;
return(0);
}
int
hfs_cmap(ap)
struct vop_cmap_args *ap;
{
struct hfsmount *hfsmp = VTOHFS(ap->a_vp);
struct filefork *fp = VTOF(ap->a_vp);
size_t bytesContAvail = 0;
int retval = E_NONE;
int lockExtBtree = 0;
struct proc *p = NULL;
struct rl_entry *invalid_range;
enum rl_overlaptype overlaptype;
int started_tr = 0, grabbed_lock = 0;
if (ap->a_bpn == NULL)
return (0);
p = current_proc();
retry:
if (fp->ff_unallocblocks) {
lockExtBtree = 1;
hfs_global_shared_lock_acquire(hfsmp);
grabbed_lock = 1;
if (hfsmp->jnl) {
if (journal_start_transaction(hfsmp->jnl) != 0) {
hfs_global_shared_lock_release(hfsmp);
return EINVAL;
} else {
started_tr = 1;
}
}
if (retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE | LK_CANRECURSE, p)) {
if (started_tr) {
journal_end_transaction(hfsmp->jnl);
}
if (grabbed_lock) {
hfs_global_shared_lock_release(hfsmp);
}
return (retval);
}
} else if (overflow_extents(fp)) {
lockExtBtree = 1;
if (retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE | LK_CANRECURSE, p)) {
return retval;
}
}
if (fp->ff_unallocblocks) {
SInt64 reqbytes, actbytes;
if (hfsmp->jnl && started_tr == 0) {
if (lockExtBtree) {
(void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p);
lockExtBtree = 0;
}
goto retry;
}
reqbytes = (SInt64)fp->ff_unallocblocks *
(SInt64)HFSTOVCB(hfsmp)->blockSize;
HFSTOVCB(hfsmp)->loanedBlocks -= fp->ff_unallocblocks;
FTOC(fp)->c_blocks -= fp->ff_unallocblocks;
fp->ff_blocks -= fp->ff_unallocblocks;
fp->ff_unallocblocks = 0;
while (retval == 0 && reqbytes > 0) {
retval = MacToVFSError(ExtendFileC(HFSTOVCB(hfsmp),
(FCB*)fp, reqbytes, 0,
kEFAllMask | kEFNoClumpMask, &actbytes));
if (retval == 0 && actbytes == 0)
retval = ENOSPC;
if (retval) {
fp->ff_unallocblocks =
reqbytes / HFSTOVCB(hfsmp)->blockSize;
HFSTOVCB(hfsmp)->loanedBlocks += fp->ff_unallocblocks;
FTOC(fp)->c_blocks += fp->ff_unallocblocks;
fp->ff_blocks += fp->ff_unallocblocks;
}
reqbytes -= actbytes;
}
if (retval) {
(void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p);
if (started_tr) {
hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0);
journal_end_transaction(hfsmp->jnl);
}
if (grabbed_lock) {
hfs_global_shared_lock_release(hfsmp);
}
return (retval);
}
VTOC(ap->a_vp)->c_flag |= C_MODIFIED;
}
retval = MacToVFSError(
MapFileBlockC (HFSTOVCB(hfsmp),
(FCB *)fp,
ap->a_size,
ap->a_foffset,
ap->a_bpn,
&bytesContAvail));
if (lockExtBtree)
(void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p);
if (started_tr) {
hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0);
journal_end_transaction(hfsmp->jnl);
started_tr = 0;
}
if (grabbed_lock) {
hfs_global_shared_lock_release(hfsmp);
grabbed_lock = 0;
}
if (retval == E_NONE) {
overlaptype = rl_scan(&fp->ff_invalidranges,
ap->a_foffset,
ap->a_foffset + (off_t)bytesContAvail - 1,
&invalid_range);
if (overlaptype != RL_NOOVERLAP) {
switch(overlaptype) {
case RL_MATCHINGOVERLAP:
case RL_OVERLAPCONTAINSRANGE:
case RL_OVERLAPSTARTSBEFORE:
*ap->a_bpn = (daddr_t)-1;
if ((fp->ff_size > (invalid_range->rl_end + 1)) &&
(invalid_range->rl_end + 1 - ap->a_foffset < bytesContAvail)) {
bytesContAvail = invalid_range->rl_end + 1 - ap->a_foffset;
};
break;
case RL_OVERLAPISCONTAINED:
case RL_OVERLAPENDSAFTER:
if (invalid_range->rl_start == ap->a_foffset) {
*ap->a_bpn = (daddr_t)-1;
if ((fp->ff_size > (invalid_range->rl_end + 1)) &&
(invalid_range->rl_end + 1 - ap->a_foffset < bytesContAvail)) {
bytesContAvail = invalid_range->rl_end + 1 - ap->a_foffset;
};
} else {
bytesContAvail = invalid_range->rl_start - ap->a_foffset;
};
break;
};
if (bytesContAvail > ap->a_size) bytesContAvail = ap->a_size;
};
if (ap->a_run) *ap->a_run = bytesContAvail;
};
if (ap->a_poff)
*(int *)ap->a_poff = 0;
return (retval);
}
static int
hfs_strategy_fragmented(struct buf *bp)
{
register struct vnode *vp = bp->b_vp;
register struct cnode *cp = VTOC(vp);
register struct vnode *devvp = cp->c_devvp;
caddr_t ioaddr;
struct buf *frag = NULL;
int retval = 0;
long remaining;
off_t offset;
u_long block_size;
bp->b_blkno = bp->b_lblkno;
offset = (off_t) bp->b_lblkno * (off_t) GetLogicalBlockSize(vp);
block_size = VTOHFS(vp)->hfs_phys_block_size;
frag = geteblk(block_size);
if (ISSET(bp->b_flags, B_READ))
SET(frag->b_flags, B_READ);
for (ioaddr = bp->b_data, remaining = bp->b_bcount; remaining != 0;
ioaddr += block_size, offset += block_size,
remaining -= block_size) {
frag->b_resid = frag->b_bcount;
CLR(frag->b_flags, B_DONE);
retval = VOP_CMAP(vp, offset, block_size, &frag->b_lblkno,
NULL, NULL);
if (retval != 0)
break;
if ((long)frag->b_lblkno == -1) {
bzero(ioaddr, block_size);
continue;
}
if (!ISSET(bp->b_flags, B_READ))
bcopy(ioaddr, frag->b_data, block_size);
frag->b_blkno = frag->b_lblkno;
frag->b_vp = devvp;
frag->b_dev = devvp->v_rdev;
retval = VOP_STRATEGY(frag);
frag->b_vp = NULL;
if (retval != 0)
break;
retval = biowait(frag);
if (retval != 0)
break;
if (ISSET(bp->b_flags, B_READ))
bcopy(frag->b_data, ioaddr, block_size);
}
frag->b_vp = NULL;
SET(frag->b_flags, B_INVAL);
brelse(frag);
if ((bp->b_error = retval) != 0)
SET(bp->b_flags, B_ERROR);
biodone(bp);
return retval;
}
int
hfs_strategy(ap)
struct vop_strategy_args *ap;
{
register struct buf *bp = ap->a_bp;
register struct vnode *vp = bp->b_vp;
register struct cnode *cp = VTOC(vp);
int retval = 0;
off_t offset;
size_t bytes_contig;
if ( !(bp->b_flags & B_VECTORLIST)) {
if (vp->v_type == VBLK || vp->v_type == VCHR)
panic("hfs_strategy: device vnode passed!");
if (bp->b_flags & B_PAGELIST) {
retval = cluster_bp(bp);
vp = cp->c_devvp;
bp->b_dev = vp->v_rdev;
return (retval);
}
if (bp->b_blkno == bp->b_lblkno) {
offset = (off_t) bp->b_lblkno *
(off_t) GetLogicalBlockSize(vp);
if ((retval = VOP_CMAP(vp, offset, bp->b_bcount,
&bp->b_blkno, &bytes_contig, NULL))) {
bp->b_error = retval;
bp->b_flags |= B_ERROR;
biodone(bp);
return (retval);
}
if (bytes_contig < bp->b_bcount)
{
return hfs_strategy_fragmented(bp);
}
if ((long)bp->b_blkno == -1)
clrbuf(bp);
}
if ((long)bp->b_blkno == -1) {
biodone(bp);
return (0);
}
if (bp->b_validend == 0) {
bp->b_validend = bp->b_bcount;
}
}
vp = cp->c_devvp;
bp->b_dev = vp->v_rdev;
return VOCALL (vp->v_op, VOFFSET(vop_strategy), ap);
}
int hfs_truncate(ap)
struct vop_truncate_args *ap;
{
register struct vnode *vp = ap->a_vp;
register struct cnode *cp = VTOC(vp);
struct filefork *fp = VTOF(vp);
off_t length;
long vflags;
struct timeval tv;
int retval;
off_t bytesToAdd;
off_t actualBytesAdded;
off_t filebytes;
u_long fileblocks;
int blksize;
struct hfsmount *hfsmp;
if (vp->v_type != VREG && vp->v_type != VLNK)
return (EISDIR);
length = ap->a_length;
blksize = VTOVCB(vp)->blockSize;
fileblocks = fp->ff_blocks;
filebytes = (off_t)fileblocks * (off_t)blksize;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_START,
(int)length, (int)fp->ff_size, (int)filebytes, 0, 0);
if (length < 0)
return (EINVAL);
if ((!ISHFSPLUS(VTOVCB(vp))) && (length > (off_t)MAXHFSFILESIZE))
return (EFBIG);
hfsmp = VTOHFS(vp);
tv = time;
retval = E_NONE;
#if QUOTA
if (retval = hfs_getinoquota(cp))
return(retval);
#endif
if (length > fp->ff_size) {
#if QUOTA
retval = hfs_chkdq(cp, (int64_t)(roundup(length - filebytes, blksize)),
ap->a_cred, 0);
if (retval)
goto Err_Exit;
#endif
if (length > filebytes) {
int eflags;
eflags = kEFAllMask | kEFNoClumpMask;
if (suser(ap->a_cred, NULL) != 0)
eflags |= kEFReserveMask;
hfs_global_shared_lock_acquire(hfsmp);
if (hfsmp->jnl) {
if (journal_start_transaction(hfsmp->jnl) != 0) {
retval = EINVAL;
goto Err_Exit;
}
}
retval = hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p);
if (retval) {
if (hfsmp->jnl) {
journal_end_transaction(hfsmp->jnl);
}
hfs_global_shared_lock_release(hfsmp);
goto Err_Exit;
}
while ((length > filebytes) && (retval == E_NONE)) {
bytesToAdd = length - filebytes;
retval = MacToVFSError(ExtendFileC(VTOVCB(vp),
(FCB*)fp,
bytesToAdd,
0,
eflags,
&actualBytesAdded));
filebytes = (off_t)fp->ff_blocks * (off_t)blksize;
if (actualBytesAdded == 0 && retval == E_NONE) {
if (length > filebytes)
length = filebytes;
break;
}
}
(void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, ap->a_p);
if (hfsmp->jnl) {
hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0);
journal_end_transaction(hfsmp->jnl);
}
hfs_global_shared_lock_release(hfsmp);
if (retval)
goto Err_Exit;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_NONE,
(int)length, (int)fp->ff_size, (int)filebytes, 0, 0);
}
if (!(ap->a_flags & IO_NOZEROFILL)) {
if (UBCINFOEXISTS(vp) && retval == E_NONE) {
struct rl_entry *invalid_range;
int devBlockSize;
off_t zero_limit;
zero_limit = (fp->ff_size + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64;
if (length < zero_limit) zero_limit = length;
if (length > fp->ff_size) {
if ((fp->ff_size & PAGE_MASK_64) &&
(rl_scan(&fp->ff_invalidranges, fp->ff_size & ~PAGE_MASK_64,
fp->ff_size - 1, &invalid_range) == RL_NOOVERLAP)) {
VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize);
retval = cluster_write(vp, (struct uio *) 0, fp->ff_size, zero_limit,
fp->ff_size, (off_t)0, devBlockSize,
(ap->a_flags & IO_SYNC) | IO_HEADZEROFILL | IO_NOZERODIRTY);
if (retval) goto Err_Exit;
if (length > zero_limit) {
rl_add(zero_limit, length - 1, &fp->ff_invalidranges);
cp->c_zftimeout = time.tv_sec + ZFTIMELIMIT;
}
} else {
rl_add(fp->ff_size, length - 1, &fp->ff_invalidranges);
cp->c_zftimeout = time.tv_sec + ZFTIMELIMIT;
};
}
} else {
panic("hfs_truncate: invoked on non-UBC object?!");
};
}
cp->c_flag |= C_UPDATE;
fp->ff_size = length;
if (UBCISVALID(vp))
ubc_setsize(vp, fp->ff_size);
} else {
if (fp->ff_size > length) {
if (UBCISVALID(vp))
ubc_setsize(vp, length);
vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA;
retval = vinvalbuf(vp, vflags, ap->a_cred, ap->a_p, 0, 0);
rl_remove(length, fp->ff_size - 1, &fp->ff_invalidranges);
}
if (fp->ff_unallocblocks > 0) {
u_int32_t finalblks;
retval = hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID,
LK_EXCLUSIVE, ap->a_p);
if (retval)
goto Err_Exit;
VTOVCB(vp)->loanedBlocks -= fp->ff_unallocblocks;
cp->c_blocks -= fp->ff_unallocblocks;
fp->ff_blocks -= fp->ff_unallocblocks;
fp->ff_unallocblocks = 0;
finalblks = (length + blksize - 1) / blksize;
if (finalblks > fp->ff_blocks) {
fp->ff_unallocblocks = finalblks - fp->ff_blocks;
VTOVCB(vp)->loanedBlocks += fp->ff_unallocblocks;
cp->c_blocks += fp->ff_unallocblocks;
fp->ff_blocks += fp->ff_unallocblocks;
}
(void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID,
LK_RELEASE, ap->a_p);
}
if ((ap->a_flags & IO_NDELAY) || (!ISSET(ap->a_p->p_flag, P_TBE))) {
#if QUOTA
off_t savedbytes = ((off_t)fp->ff_blocks * (off_t)blksize);
#endif
hfs_global_shared_lock_acquire(hfsmp);
if (hfsmp->jnl) {
if (journal_start_transaction(hfsmp->jnl) != 0) {
retval = EINVAL;
goto Err_Exit;
}
}
retval = hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p);
if (retval) {
if (hfsmp->jnl) {
journal_end_transaction(hfsmp->jnl);
}
hfs_global_shared_lock_release(hfsmp);
goto Err_Exit;
}
if (fp->ff_unallocblocks == 0)
retval = MacToVFSError(TruncateFileC(VTOVCB(vp),
(FCB*)fp, length, false));
(void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, ap->a_p);
if (hfsmp->jnl) {
hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0);
journal_end_transaction(hfsmp->jnl);
}
hfs_global_shared_lock_release(hfsmp);
filebytes = (off_t)fp->ff_blocks * (off_t)blksize;
if (retval)
goto Err_Exit;
#if QUOTA
(void) hfs_chkdq(cp, (int64_t)-(savedbytes - filebytes), NOCRED, 0);
#endif
}
if (fp->ff_size != length)
cp->c_flag |= C_UPDATE;
fp->ff_size = length;
}
cp->c_flag |= C_CHANGE;
retval = VOP_UPDATE(vp, &tv, &tv, MNT_WAIT);
if (retval) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_NONE,
-1, -1, -1, retval, 0);
}
Err_Exit:
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_END,
(int)length, (int)fp->ff_size, (int)filebytes, retval, 0);
return (retval);
}
int hfs_allocate(ap)
struct vop_allocate_args *ap;
{
struct vnode *vp = ap->a_vp;
struct cnode *cp = VTOC(vp);
struct filefork *fp = VTOF(vp);
off_t length = ap->a_length;
off_t startingPEOF;
off_t moreBytesRequested;
off_t actualBytesAdded;
off_t filebytes;
u_long fileblocks;
long vflags;
struct timeval tv;
int retval, retval2;
UInt32 blockHint;
UInt32 extendFlags =0;
struct hfsmount *hfsmp;
hfsmp = VTOHFS(vp);
*(ap->a_bytesallocated) = 0;
fileblocks = fp->ff_blocks;
filebytes = (off_t)fileblocks * (off_t)VTOVCB(vp)->blockSize;
if (length < (off_t)0)
return (EINVAL);
if (vp->v_type != VREG && vp->v_type != VLNK)
return (EISDIR);
if ((ap->a_flags & ALLOCATEFROMVOL) && (length <= filebytes))
return (EINVAL);
if (ap->a_flags & ALLOCATECONTIG)
extendFlags |= kEFContigMask;
if (ap->a_flags & ALLOCATEALL)
extendFlags |= kEFAllMask;
if (suser(ap->a_cred, NULL) != 0)
extendFlags |= kEFReserveMask;
tv = time;
retval = E_NONE;
blockHint = 0;
startingPEOF = filebytes;
if (ap->a_flags & ALLOCATEFROMPEOF)
length += filebytes;
else if (ap->a_flags & ALLOCATEFROMVOL)
blockHint = ap->a_offset / VTOVCB(vp)->blockSize;
if (filebytes == length)
goto Std_Exit;
if (length > filebytes) {
moreBytesRequested = length - filebytes;
#if QUOTA
retval = hfs_chkdq(cp,
(int64_t)(roundup(moreBytesRequested, VTOVCB(vp)->blockSize)),
ap->a_cred, 0);
if (retval)
return (retval);
#endif
hfs_global_shared_lock_acquire(hfsmp);
if (hfsmp->jnl) {
if (journal_start_transaction(hfsmp->jnl) != 0) {
retval = EINVAL;
goto Err_Exit;
}
}
retval = hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p);
if (retval) {
if (hfsmp->jnl) {
journal_end_transaction(hfsmp->jnl);
}
hfs_global_shared_lock_release(hfsmp);
goto Err_Exit;
}
retval = MacToVFSError(ExtendFileC(VTOVCB(vp),
(FCB*)fp,
moreBytesRequested,
blockHint,
extendFlags,
&actualBytesAdded));
*(ap->a_bytesallocated) = actualBytesAdded;
filebytes = (off_t)fp->ff_blocks * (off_t)VTOVCB(vp)->blockSize;
(void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, ap->a_p);
if (hfsmp->jnl) {
hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0);
journal_end_transaction(hfsmp->jnl);
}
hfs_global_shared_lock_release(hfsmp);
if (retval && (startingPEOF == filebytes))
goto Err_Exit;
if ((actualBytesAdded != 0) && (moreBytesRequested < actualBytesAdded))
*(ap->a_bytesallocated) =
roundup(moreBytesRequested, (off_t)VTOVCB(vp)->blockSize);
} else {
if (fp->ff_size > length) {
vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA;
(void) vinvalbuf(vp, vflags, ap->a_cred, ap->a_p, 0, 0);
}
hfs_global_shared_lock_acquire(hfsmp);
if (hfsmp->jnl) {
if (journal_start_transaction(hfsmp->jnl) != 0) {
retval = EINVAL;
goto Err_Exit;
}
}
retval = hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p);
if (retval) {
if (hfsmp->jnl) {
journal_end_transaction(hfsmp->jnl);
}
hfs_global_shared_lock_release(hfsmp);
goto Err_Exit;
}
retval = MacToVFSError(
TruncateFileC(
VTOVCB(vp),
(FCB*)fp,
length,
false));
(void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, ap->a_p);
filebytes = (off_t)fp->ff_blocks * (off_t)VTOVCB(vp)->blockSize;
if (hfsmp->jnl) {
hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0);
journal_end_transaction(hfsmp->jnl);
}
hfs_global_shared_lock_release(hfsmp);
if (retval && (startingPEOF == filebytes)) goto Err_Exit;
#if QUOTA
(void) hfs_chkdq(cp, (int64_t)-((startingPEOF - filebytes)), NOCRED,0);
#endif
if (fp->ff_size > filebytes) {
fp->ff_size = filebytes;
if (UBCISVALID(vp))
ubc_setsize(vp, fp->ff_size);
}
}
Std_Exit:
cp->c_flag |= C_CHANGE | C_UPDATE;
retval2 = VOP_UPDATE(vp, &tv, &tv, MNT_WAIT);
if (retval == 0)
retval = retval2;
Err_Exit:
return (retval);
}
int
hfs_pagein(ap)
struct vop_pagein_args *ap;
{
register struct vnode *vp = ap->a_vp;
int devBlockSize = 0;
int error;
if (vp->v_type != VREG && vp->v_type != VLNK)
panic("hfs_pagein: vp not UBC type\n");
VOP_DEVBLOCKSIZE(VTOC(vp)->c_devvp, &devBlockSize);
error = cluster_pagein(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset,
ap->a_size, (off_t)VTOF(vp)->ff_size, devBlockSize,
ap->a_flags);
return (error);
}
int
hfs_pageout(ap)
struct vop_pageout_args *ap;
{
struct vnode *vp = ap->a_vp;
struct cnode *cp = VTOC(vp);
struct filefork *fp = VTOF(vp);
int retval;
int devBlockSize = 0;
off_t end_of_range;
off_t filesize;
if (UBCINVALID(vp))
panic("hfs_pageout: Not a VREG: vp=%x", vp);
VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize);
filesize = fp->ff_size;
end_of_range = ap->a_f_offset + ap->a_size - 1;
if (end_of_range >= filesize)
end_of_range = (off_t)(filesize - 1);
if (ap->a_f_offset < filesize)
rl_remove(ap->a_f_offset, end_of_range, &fp->ff_invalidranges);
retval = cluster_pageout(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, ap->a_size,
filesize, devBlockSize, ap->a_flags);
if (retval == 0 && ap->a_cred && ap->a_cred->cr_uid != 0)
cp->c_mode &= ~(S_ISUID | S_ISGID);
return (retval);
}
int
hfs_bwrite(ap)
struct vop_bwrite_args *ap;
{
int retval = 0;
register struct buf *bp = ap->a_bp;
register struct vnode *vp = bp->b_vp;
#if BYTE_ORDER == LITTLE_ENDIAN
BlockDescriptor block;
if ((VTOC(vp)->c_fileid == kHFSExtentsFileID) ||
(VTOC(vp)->c_fileid == kHFSCatalogFileID)) {
if (((UInt16 *)((char *)bp->b_data + bp->b_bcount - 2))[0] == 0x000e) {
block.blockHeader = bp;
block.buffer = bp->b_data;
block.blockReadFromDisk = (bp->b_flags & B_CACHE) == 0;
block.blockSize = bp->b_bcount;
SWAP_BT_NODE (&block, ISHFSPLUS (VTOVCB(vp)), VTOC(vp)->c_fileid, 1);
}
}
#endif
if (ISSET(bp->b_flags, B_LOCKED)) {
if (VTOHFS(vp)->jnl) {
panic("hfs: CLEARING the lock bit on bp 0x%x\n", bp);
}
CLR(bp->b_flags, B_LOCKED);
printf("hfs_bwrite: called with lock bit set\n");
}
retval = vn_bwrite (ap);
return (retval);
}