#include <sys/param.h>
#include <sys/systm.h>
#include <sys/resourcevar.h>
#include <sys/kernel.h>
#include <sys/fcntl.h>
#include <sys/filedesc.h>
#include <sys/stat.h>
#include <sys/buf.h>
#include <sys/proc.h>
#include <sys/kauth.h>
#include <sys/vnode.h>
#include <sys/vnode_internal.h>
#include <sys/uio.h>
#include <sys/vfs_context.h>
#include <sys/fsevents.h>
#include <kern/kalloc.h>
#include <sys/disk.h>
#include <sys/sysctl.h>
#include <sys/fsctl.h>
#include <miscfs/specfs/specdev.h>
#include <sys/ubc.h>
#include <sys/ubc_internal.h>
#include <vm/vm_pageout.h>
#include <vm/vm_kern.h>
#include <sys/kdebug.h>
#include "hfs.h"
#include "hfs_attrlist.h"
#include "hfs_endian.h"
#include "hfs_fsctl.h"
#include "hfs_quota.h"
#include "hfscommon/headers/FileMgrInternal.h"
#include "hfscommon/headers/BTreesInternal.h"
#include "hfs_cnode.h"
#include "hfs_dbg.h"
#define can_cluster(size) ((((size & (4096-1))) == 0) && (size <= (MAXPHYSIO/2)))
enum {
MAXHFSFILESIZE = 0x7FFFFFFF
};
extern int hfs_vfs_vget (struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context);
static int hfs_clonelink(struct vnode *, int, kauth_cred_t, struct proc *);
static int hfs_clonefile(struct vnode *, int, int, int);
static int hfs_clonesysfile(struct vnode *, int, int, int, kauth_cred_t, struct proc *);
static int hfs_minorupdate(struct vnode *vp);
static int do_hfs_truncate(struct vnode *vp, off_t length, int flags, int skip, vfs_context_t context);
int flush_cache_on_write = 0;
SYSCTL_INT (_kern, OID_AUTO, flush_cache_on_write, CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0, "always flush the drive cache on writes to uncached files");
int
hfs_vnop_read(struct vnop_read_args *ap)
{
uio_t uio = ap->a_uio;
struct vnode *vp = ap->a_vp;
struct cnode *cp;
struct filefork *fp;
struct hfsmount *hfsmp;
off_t filesize;
off_t filebytes;
off_t start_resid = uio_resid(uio);
off_t offset = uio_offset(uio);
int retval = 0;
int took_truncate_lock = 0;
if (!vnode_isreg(vp)) {
if (vnode_isdir(vp))
return (EISDIR);
else
return (EPERM);
}
if (start_resid == 0)
return (0);
if (offset < 0)
return (EINVAL);
#if HFS_COMPRESSION
if (VNODE_IS_RSRC(vp)) {
if (hfs_hides_rsrc(ap->a_context, VTOC(vp), 1)) {
return 0;
}
} else {
int compressed = hfs_file_is_compressed(VTOC(vp), 1);
if (compressed) {
retval = decmpfs_read_compressed(ap, &compressed, VTOCMP(vp));
if (compressed) {
if (retval == 0) {
VTOC(vp)->c_touch_acctime = TRUE;
if (VTOHFS(vp)->hfc_stage == HFC_RECORDING) {
VTOF(vp)->ff_bytesread = 0;
}
}
return retval;
}
retval = 0;
} else if ((VTOC(vp)->c_flags & UF_COMPRESSED)) {
int error;
error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
if (error) {
return error;
}
}
}
#endif
cp = VTOC(vp);
fp = VTOF(vp);
hfsmp = VTOHFS(vp);
#if CONFIG_PROTECT
if ((retval = cp_handle_vnop (cp, CP_READ_ACCESS)) != 0) {
goto exit;
}
#endif
hfs_lock_truncate(cp, HFS_SHARED_LOCK);
took_truncate_lock = 1;
filesize = fp->ff_size;
filebytes = (off_t)fp->ff_blocks * (off_t)hfsmp->blockSize;
if (offset > filesize) {
if ((hfsmp->hfs_flags & HFS_STANDARD) &&
(offset > (off_t)MAXHFSFILESIZE)) {
retval = EFBIG;
}
goto exit;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_START,
(int)uio_offset(uio), uio_resid(uio), (int)filesize, (int)filebytes, 0);
retval = cluster_read(vp, uio, filesize, ap->a_ioflag);
cp->c_touch_acctime = TRUE;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_END,
(int)uio_offset(uio), uio_resid(uio), (int)filesize, (int)filebytes, 0);
if (hfsmp->hfc_stage == HFC_RECORDING && retval == 0) {
int took_cnode_lock = 0;
off_t bytesread;
bytesread = start_resid - uio_resid(uio);
if ((fp->ff_bytesread + bytesread) > 0x00000000ffffffff) {
hfs_lock(cp, HFS_FORCE_LOCK);
took_cnode_lock = 1;
}
if (cp->c_atime < hfsmp->hfc_timebase) {
struct timeval tv;
fp->ff_bytesread = bytesread;
microtime(&tv);
cp->c_atime = tv.tv_sec;
} else {
fp->ff_bytesread += bytesread;
}
if (took_cnode_lock)
hfs_unlock(cp);
}
exit:
if (took_truncate_lock) {
hfs_unlock_truncate(cp, 0);
}
return (retval);
}
int
hfs_vnop_write(struct vnop_write_args *ap)
{
uio_t uio = ap->a_uio;
struct vnode *vp = ap->a_vp;
struct cnode *cp;
struct filefork *fp;
struct hfsmount *hfsmp;
kauth_cred_t cred = NULL;
off_t origFileSize;
off_t writelimit;
off_t bytesToAdd = 0;
off_t actualBytesAdded;
off_t filebytes;
off_t offset;
ssize_t resid;
int eflags;
int ioflag = ap->a_ioflag;
int retval = 0;
int lockflags;
int cnode_locked = 0;
int partialwrite = 0;
int do_snapshot = 1;
time_t orig_ctime=VTOC(vp)->c_ctime;
int took_truncate_lock = 0;
struct rl_entry *invalid_range;
#if HFS_COMPRESSION
if ( hfs_file_is_compressed(VTOC(vp), 1) ) {
int state = decmpfs_cnode_get_vnode_state(VTOCMP(vp));
switch(state) {
case FILE_IS_COMPRESSED:
return EACCES;
case FILE_IS_CONVERTING:
do_snapshot = 0;
break;
default:
printf("invalid state %d for compressed file\n", state);
}
} else if ((VTOC(vp)->c_flags & UF_COMPRESSED)) {
int error;
error = check_for_dataless_file(vp, NAMESPACE_HANDLER_WRITE_OP);
if (error != 0) {
return error;
}
}
if (do_snapshot) {
check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, uio);
}
#endif
resid = uio_resid(uio);
offset = uio_offset(uio);
if (offset < 0)
return (EINVAL);
if (resid == 0)
return (E_NONE);
if (!vnode_isreg(vp))
return (EPERM);
cp = VTOC(vp);
fp = VTOF(vp);
hfsmp = VTOHFS(vp);
#if CONFIG_PROTECT
if ((retval = cp_handle_vnop (cp, CP_WRITE_ACCESS)) != 0) {
goto exit;
}
#endif
eflags = kEFDeferMask;
#if HFS_SPARSE_DEV
if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) &&
(hfs_freeblks(hfsmp, 0) < 2048)) {
eflags &= ~kEFDeferMask;
ioflag |= IO_SYNC;
}
#endif
again:
if (ioflag & IO_APPEND || took_truncate_lock) {
hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
}
else {
hfs_lock_truncate(cp, HFS_SHARED_LOCK);
}
took_truncate_lock = 1;
if (ioflag & IO_APPEND) {
uio_setoffset(uio, fp->ff_size);
offset = fp->ff_size;
}
if ((cp->c_flags & APPEND) && offset != fp->ff_size) {
retval = EPERM;
goto exit;
}
origFileSize = fp->ff_size;
writelimit = offset + resid;
filebytes = (off_t)fp->ff_blocks * (off_t)hfsmp->blockSize;
if ((cp->c_truncatelockowner == HFS_SHARED_OWNER) &&
((fp->ff_unallocblocks != 0) ||
(writelimit > origFileSize))) {
if (lck_rw_lock_shared_to_exclusive(&cp->c_truncatelock) == FALSE) {
goto again;
}
else {
cp->c_truncatelockowner = current_thread();
}
}
if ( (retval = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
goto exit;
}
cnode_locked = 1;
if ((cp->c_truncatelockowner == HFS_SHARED_OWNER) &&
(rl_scan(&fp->ff_invalidranges, offset, writelimit-1, &invalid_range) != RL_NOOVERLAP)) {
hfs_unlock(cp);
cnode_locked = 0;
hfs_unlock_truncate(cp, 0);
goto again;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_START,
(int)offset, uio_resid(uio), (int)fp->ff_size,
(int)filebytes, 0);
if (writelimit <= filebytes) {
goto sizeok;
}
cred = vfs_context_ucred(ap->a_context);
bytesToAdd = writelimit - filebytes;
#if QUOTA
retval = hfs_chkdq(cp, (int64_t)(roundup(bytesToAdd, hfsmp->blockSize)),
cred, 0);
if (retval)
goto exit;
#endif
if (hfs_start_transaction(hfsmp) != 0) {
retval = EINVAL;
goto exit;
}
while (writelimit > filebytes) {
bytesToAdd = writelimit - filebytes;
if (cred && suser(cred, NULL) != 0)
eflags |= kEFReserveMask;
lockflags = SFL_BITMAP;
if (overflow_extents(fp))
lockflags |= SFL_EXTENTS;
lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
if (hfsmp->hfc_stage == HFC_RECORDING) {
fp->ff_bytesread = 0;
}
retval = MacToVFSError(ExtendFileC (hfsmp, (FCB*)fp, bytesToAdd,
0, eflags, &actualBytesAdded));
hfs_systemfile_unlock(hfsmp, lockflags);
if ((actualBytesAdded == 0) && (retval == E_NONE))
retval = ENOSPC;
if (retval != E_NONE)
break;
filebytes = (off_t)fp->ff_blocks * (off_t)hfsmp->blockSize;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_NONE,
(int)offset, uio_resid(uio), (int)fp->ff_size, (int)filebytes, 0);
}
(void) hfs_update(vp, TRUE);
(void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
(void) hfs_end_transaction(hfsmp);
if ((retval == ENOSPC) && (filebytes > offset)) {
retval = 0;
partialwrite = 1;
uio_setresid(uio, (uio_resid(uio) - bytesToAdd));
resid -= bytesToAdd;
writelimit = filebytes;
}
sizeok:
if (retval == E_NONE) {
off_t filesize;
off_t zero_off;
off_t tail_off;
off_t inval_start;
off_t inval_end;
off_t io_start;
int lflag;
if (writelimit > fp->ff_size)
filesize = writelimit;
else
filesize = fp->ff_size;
lflag = ioflag & ~(IO_TAILZEROFILL | IO_HEADZEROFILL | IO_NOZEROVALID | IO_NOZERODIRTY);
if (offset <= fp->ff_size) {
zero_off = offset & ~PAGE_MASK_64;
if (offset > zero_off) {
if (rl_scan(&fp->ff_invalidranges, zero_off, offset - 1, &invalid_range) != RL_NOOVERLAP)
lflag |= IO_HEADZEROFILL;
}
} else {
off_t eof_page_base = fp->ff_size & ~PAGE_MASK_64;
inval_start = (fp->ff_size + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64;
inval_end = offset & ~PAGE_MASK_64;
zero_off = fp->ff_size;
if ((fp->ff_size & PAGE_MASK_64) &&
(rl_scan(&fp->ff_invalidranges,
eof_page_base,
fp->ff_size - 1,
&invalid_range) != RL_NOOVERLAP)) {
if (inval_end > eof_page_base) {
inval_start = eof_page_base;
} else {
zero_off = eof_page_base;
};
};
if (inval_start < inval_end) {
struct timeval tv;
if (zero_off < inval_start) {
hfs_unlock(cp);
cnode_locked = 0;
retval = cluster_write(vp, (uio_t) 0,
fp->ff_size, inval_start,
zero_off, (off_t)0,
lflag | IO_HEADZEROFILL | IO_NOZERODIRTY);
hfs_lock(cp, HFS_FORCE_LOCK);
cnode_locked = 1;
if (retval) goto ioerr_exit;
offset = uio_offset(uio);
};
rl_add(inval_start, inval_end - 1 , &fp->ff_invalidranges);
microuptime(&tv);
cp->c_zftimeout = tv.tv_sec + ZFTIMELIMIT;
zero_off = fp->ff_size = inval_end;
};
if (offset > zero_off) lflag |= IO_HEADZEROFILL;
};
tail_off = (writelimit + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64;
if (tail_off > filesize) tail_off = filesize;
if (tail_off > writelimit) {
if (rl_scan(&fp->ff_invalidranges, writelimit, tail_off - 1, &invalid_range) != RL_NOOVERLAP) {
lflag |= IO_TAILZEROFILL;
};
};
io_start = (lflag & IO_HEADZEROFILL) ? zero_off : offset;
if (io_start < fp->ff_size) {
off_t io_end;
io_end = (lflag & IO_TAILZEROFILL) ? tail_off : writelimit;
rl_remove(io_start, io_end - 1, &fp->ff_invalidranges);
};
hfs_unlock(cp);
cnode_locked = 0;
if (filesize > fp->ff_size) {
fp->ff_new_size = filesize;
ubc_setsize(vp, filesize);
}
retval = cluster_write(vp, uio, fp->ff_size, filesize, zero_off,
tail_off, lflag | IO_NOZERODIRTY);
if (retval) {
fp->ff_new_size = 0;
if (filesize > origFileSize) {
ubc_setsize(vp, origFileSize);
}
goto ioerr_exit;
}
if (filesize > origFileSize) {
fp->ff_size = filesize;
if (hfsmp->hfc_stage == HFC_RECORDING) {
fp->ff_bytesread = 0;
}
}
fp->ff_new_size = 0;
if (resid > uio_resid(uio)) {
cp->c_touch_chgtime = TRUE;
cp->c_touch_modtime = TRUE;
}
}
if (partialwrite) {
uio_setresid(uio, (uio_resid(uio) + bytesToAdd));
resid += bytesToAdd;
}
{
if (flush_cache_on_write && ((ioflag & IO_NOCACHE) || vnode_isnocache(vp))) {
VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
}
}
ioerr_exit:
if (cp->c_mode & (S_ISUID | S_ISGID)) {
cred = vfs_context_ucred(ap->a_context);
if (resid > uio_resid(uio) && cred && suser(cred, NULL)) {
if (!cnode_locked) {
hfs_lock(cp, HFS_FORCE_LOCK);
cnode_locked = 1;
}
cp->c_mode &= ~(S_ISUID | S_ISGID);
}
}
if (retval) {
if (ioflag & IO_UNIT) {
if (!cnode_locked) {
hfs_lock(cp, HFS_FORCE_LOCK);
cnode_locked = 1;
}
(void)hfs_truncate(vp, origFileSize, ioflag & IO_SYNC,
0, 0, ap->a_context);
uio_setoffset(uio, (uio_offset(uio) - (resid - uio_resid(uio))));
uio_setresid(uio, resid);
filebytes = (off_t)fp->ff_blocks * (off_t)hfsmp->blockSize;
}
} else if ((ioflag & IO_SYNC) && (resid > uio_resid(uio))) {
if (!cnode_locked) {
hfs_lock(cp, HFS_FORCE_LOCK);
cnode_locked = 1;
}
retval = hfs_update(vp, TRUE);
}
hfsmp->vcbWrCnt++;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_END,
(int)uio_offset(uio), uio_resid(uio), (int)fp->ff_size, (int)filebytes, 0);
exit:
if (cnode_locked)
hfs_unlock(cp);
if (took_truncate_lock) {
hfs_unlock_truncate(cp, 0);
}
return (retval);
}
#define CACHE_LEVELS 16
#define NUM_CACHE_ENTRIES (64*16)
#define PARENT_IDS_FLAG 0x100
struct access_cache {
int numcached;
int cachehits;
int lookups;
unsigned int *acache;
unsigned char *haveaccess;
};
struct access_t {
uid_t uid;
short flags;
short num_groups;
int num_files;
int *file_ids;
gid_t *groups;
short *access;
} __attribute__((unavailable));
struct user32_access_t {
uid_t uid;
short flags;
short num_groups;
int num_files;
user32_addr_t file_ids;
user32_addr_t groups;
user32_addr_t access;
};
struct user64_access_t {
uid_t uid;
short flags;
short num_groups;
int num_files;
user64_addr_t file_ids;
user64_addr_t groups;
user64_addr_t access;
};
struct ext_access_t {
uint32_t flags;
uint32_t num_files;
uint32_t map_size;
uint32_t *file_ids;
char *bitmap;
short *access;
uint32_t num_parents;
cnid_t *parents;
} __attribute__((unavailable));
struct user32_ext_access_t {
uint32_t flags;
uint32_t num_files;
uint32_t map_size;
user32_addr_t file_ids;
user32_addr_t bitmap;
user32_addr_t access;
uint32_t num_parents;
user32_addr_t parents;
};
struct user64_ext_access_t {
uint32_t flags;
uint32_t num_files;
uint32_t map_size;
user64_addr_t file_ids;
user64_addr_t bitmap;
user64_addr_t access;
uint32_t num_parents;
user64_addr_t parents;
};
static int cache_binSearch(cnid_t *array, unsigned int hi, cnid_t parent_id, int *no_match_indexp)
{
int index=-1;
unsigned int lo=0;
do {
unsigned int mid = ((hi - lo)/2) + lo;
unsigned int this_id = array[mid];
if (parent_id == this_id) {
hi = mid;
break;
}
if (parent_id < this_id) {
hi = mid;
continue;
}
if (parent_id > this_id) {
lo = mid + 1;
continue;
}
} while(lo < hi);
if (parent_id == array[hi]) {
index = hi;
}
if (no_match_indexp) {
*no_match_indexp = hi;
}
return index;
}
static int
lookup_bucket(struct access_cache *cache, int *indexp, cnid_t parent_id)
{
unsigned int hi;
int matches = 0;
int index, no_match_index;
if (cache->numcached == 0) {
*indexp = 0;
return 0; }
if (cache->numcached > NUM_CACHE_ENTRIES) {
cache->numcached = NUM_CACHE_ENTRIES;
}
hi = cache->numcached - 1;
index = cache_binSearch(cache->acache, hi, parent_id, &no_match_index);
if (index == -1) {
index = no_match_index;
matches = 0;
} else {
matches = 1;
}
*indexp = index;
return matches;
}
static void
add_node(struct access_cache *cache, int index, cnid_t nodeID, int access)
{
int lookup_index = -1;
if (index == -1) {
if (lookup_bucket(cache, &lookup_index, nodeID)) {
if (cache->haveaccess[lookup_index] != access && cache->haveaccess[lookup_index] == ESRCH) {
cache->haveaccess[lookup_index] = access;
}
return;
} else {
index = lookup_index;
}
}
if (cache->numcached >= NUM_CACHE_ENTRIES) {
cache->numcached = NUM_CACHE_ENTRIES-1;
if (index > cache->numcached) {
index = cache->numcached;
}
}
if (index < cache->numcached && index < NUM_CACHE_ENTRIES && nodeID > cache->acache[index]) {
index++;
}
if (index >= 0 && index < cache->numcached) {
bcopy( cache->acache+index, cache->acache+(index+1), (cache->numcached - index)*sizeof(int) );
bcopy( cache->haveaccess+index, cache->haveaccess+(index+1), (cache->numcached - index)*sizeof(unsigned char) );
}
cache->acache[index] = nodeID;
cache->haveaccess[index] = access;
cache->numcached++;
}
struct cinfo {
uid_t uid;
gid_t gid;
mode_t mode;
cnid_t parentcnid;
u_int16_t recflags;
};
static int
snoop_callback(const struct cat_desc *descp, const struct cat_attr *attrp, void * arg)
{
struct cinfo *cip = (struct cinfo *)arg;
cip->uid = attrp->ca_uid;
cip->gid = attrp->ca_gid;
cip->mode = attrp->ca_mode;
cip->parentcnid = descp->cd_parentcnid;
cip->recflags = attrp->ca_recflags;
return (0);
}
static int
do_attr_lookup(struct hfsmount *hfsmp, struct access_cache *cache, cnid_t cnid,
struct cnode *skip_cp, CatalogKey *keyp, struct cat_attr *cnattrp)
{
int error = 0;
if (cnid == skip_cp->c_cnid) {
cnattrp->ca_uid = skip_cp->c_uid;
cnattrp->ca_gid = skip_cp->c_gid;
cnattrp->ca_mode = skip_cp->c_mode;
cnattrp->ca_recflags = skip_cp->c_attr.ca_recflags;
keyp->hfsPlus.parentID = skip_cp->c_parentcnid;
} else {
struct cinfo c_info;
if (hfs_chash_snoop(hfsmp, cnid, 0, snoop_callback, &c_info) == 0) {
cnattrp->ca_uid = c_info.uid;
cnattrp->ca_gid = c_info.gid;
cnattrp->ca_mode = c_info.mode;
cnattrp->ca_recflags = c_info.recflags;
keyp->hfsPlus.parentID = c_info.parentcnid;
} else {
int lockflags;
lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
error = cat_getkeyplusattr(hfsmp, cnid, keyp, cnattrp);
hfs_systemfile_unlock(hfsmp, lockflags);
cache->lookups++;
}
}
return (error);
}
static int
do_access_check(struct hfsmount *hfsmp, int *err, struct access_cache *cache, HFSCatalogNodeID nodeID,
struct cnode *skip_cp, struct proc *theProcPtr, kauth_cred_t myp_ucred,
struct vfs_context *my_context,
char *bitmap,
uint32_t map_size,
cnid_t* parents,
uint32_t num_parents)
{
int myErr = 0;
int myResult;
HFSCatalogNodeID thisNodeID;
unsigned int myPerms;
struct cat_attr cnattr;
int cache_index = -1, scope_index = -1, scope_idx_start = -1;
CatalogKey catkey;
int i = 0, ids_to_cache = 0;
int parent_ids[CACHE_LEVELS];
thisNodeID = nodeID;
while (thisNodeID >= kRootDirID) {
myResult = 0;
if (lookup_bucket(cache, &cache_index, thisNodeID)) {
cache->cachehits++;
myErr = cache->haveaccess[cache_index];
if (scope_index != -1) {
if (myErr == ESRCH) {
myErr = 0;
}
} else {
scope_index = 0; scope_idx_start = ids_to_cache;
}
myResult = (myErr == 0) ? 1 : 0;
goto ExitThisRoutine;
}
if (parents) {
int tmp;
tmp = cache_binSearch(parents, num_parents-1, thisNodeID, NULL);
if (scope_index == -1)
scope_index = tmp;
if (tmp != -1 && scope_idx_start == -1 && ids_to_cache < CACHE_LEVELS) {
scope_idx_start = ids_to_cache;
}
}
if (ids_to_cache < CACHE_LEVELS) {
parent_ids[ids_to_cache] = thisNodeID;
ids_to_cache++;
}
if (bitmap && map_size) {
bitmap[(thisNodeID/8)%(map_size)]|=(1<<(thisNodeID&7));
}
myErr = do_attr_lookup(hfsmp, cache, thisNodeID, skip_cp, &catkey, &cnattr);
if (myErr) {
goto ExitThisRoutine;
}
if (suser(myp_ucred, NULL) == 0) {
thisNodeID = catkey.hfsPlus.parentID;
myResult = 1;
continue;
}
if ((cnattr.ca_recflags & kHFSHasSecurityMask) != 0) {
struct vnode *vp;
myErr = hfs_vget(hfsmp, thisNodeID, &vp, 0, 0);
if ( myErr ) {
myResult = 0;
goto ExitThisRoutine;
}
thisNodeID = VTOC(vp)->c_parentcnid;
hfs_unlock(VTOC(vp));
if (vnode_vtype(vp) == VDIR) {
myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), my_context);
} else {
myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, my_context);
}
vnode_put(vp);
if (myErr) {
myResult = 0;
goto ExitThisRoutine;
}
} else {
unsigned int flags;
int mode = cnattr.ca_mode & S_IFMT;
myPerms = DerivePermissionSummary(cnattr.ca_uid, cnattr.ca_gid, cnattr.ca_mode, hfsmp->hfs_mp,myp_ucred, theProcPtr);
if (mode == S_IFDIR) {
flags = R_OK | X_OK;
} else {
flags = R_OK;
}
if ( (myPerms & flags) != flags) {
myResult = 0;
myErr = EACCES;
goto ExitThisRoutine;
}
thisNodeID = catkey.hfsPlus.parentID;
}
}
myResult = 1;
ExitThisRoutine:
if (parents && myErr == 0 && scope_index == -1) {
myErr = ESRCH;
}
if (myErr) {
myResult = 0;
}
*err = myErr;
for (i = 0; i < ids_to_cache; i++) {
if (myErr == 0 && parents && (scope_idx_start == -1 || i > scope_idx_start)) {
add_node(cache, -1, parent_ids[i], ESRCH);
} else {
add_node(cache, -1, parent_ids[i], myErr);
}
}
return (myResult);
}
static int
do_bulk_access_check(struct hfsmount *hfsmp, struct vnode *vp,
struct vnop_ioctl_args *ap, int arg_size, vfs_context_t context)
{
boolean_t is64bit;
Boolean check_leaf = true;
struct user64_ext_access_t *user_access_structp;
struct user64_ext_access_t tmp_user_access;
struct access_cache cache;
int error = 0, prev_parent_check_ok=1;
unsigned int i;
short flags;
unsigned int num_files = 0;
int map_size = 0;
int num_parents = 0;
int *file_ids=NULL;
short *access=NULL;
char *bitmap=NULL;
cnid_t *parents=NULL;
int leaf_index;
cnid_t cnid;
cnid_t prevParent_cnid = 0;
unsigned int myPerms;
short myaccess = 0;
struct cat_attr cnattr;
CatalogKey catkey;
struct cnode *skip_cp = VTOC(vp);
kauth_cred_t cred = vfs_context_ucred(context);
proc_t p = vfs_context_proc(context);
is64bit = proc_is64bit(p);
cache.numcached = 0;
cache.cachehits = 0;
cache.lookups = 0;
cache.acache = NULL;
cache.haveaccess = NULL;
if (ap->a_data == NULL) {
error = EINVAL;
goto err_exit_bulk_access;
}
if (is64bit) {
if (arg_size != sizeof(struct user64_ext_access_t)) {
error = EINVAL;
goto err_exit_bulk_access;
}
user_access_structp = (struct user64_ext_access_t *)ap->a_data;
} else if (arg_size == sizeof(struct user32_access_t)) {
struct user32_access_t *accessp = (struct user32_access_t *)ap->a_data;
tmp_user_access.flags = accessp->flags;
tmp_user_access.num_files = accessp->num_files;
tmp_user_access.map_size = 0;
tmp_user_access.file_ids = CAST_USER_ADDR_T(accessp->file_ids);
tmp_user_access.bitmap = USER_ADDR_NULL;
tmp_user_access.access = CAST_USER_ADDR_T(accessp->access);
tmp_user_access.num_parents = 0;
user_access_structp = &tmp_user_access;
} else if (arg_size == sizeof(struct user32_ext_access_t)) {
struct user32_ext_access_t *accessp = (struct user32_ext_access_t *)ap->a_data;
tmp_user_access.flags = accessp->flags;
tmp_user_access.num_files = accessp->num_files;
tmp_user_access.map_size = accessp->map_size;
tmp_user_access.num_parents = accessp->num_parents;
tmp_user_access.file_ids = CAST_USER_ADDR_T(accessp->file_ids);
tmp_user_access.bitmap = CAST_USER_ADDR_T(accessp->bitmap);
tmp_user_access.access = CAST_USER_ADDR_T(accessp->access);
tmp_user_access.parents = CAST_USER_ADDR_T(accessp->parents);
user_access_structp = &tmp_user_access;
} else {
error = EINVAL;
goto err_exit_bulk_access;
}
map_size = user_access_structp->map_size;
num_files = user_access_structp->num_files;
num_parents= user_access_structp->num_parents;
if (num_files < 1) {
goto err_exit_bulk_access;
}
if (num_files > 1024) {
error = EINVAL;
goto err_exit_bulk_access;
}
if (num_parents > 1024) {
error = EINVAL;
goto err_exit_bulk_access;
}
file_ids = (int *) kalloc(sizeof(int) * num_files);
access = (short *) kalloc(sizeof(short) * num_files);
if (map_size) {
bitmap = (char *) kalloc(sizeof(char) * map_size);
}
if (num_parents) {
parents = (cnid_t *) kalloc(sizeof(cnid_t) * num_parents);
}
cache.acache = (unsigned int *) kalloc(sizeof(int) * NUM_CACHE_ENTRIES);
cache.haveaccess = (unsigned char *) kalloc(sizeof(unsigned char) * NUM_CACHE_ENTRIES);
if (file_ids == NULL || access == NULL || (map_size != 0 && bitmap == NULL) || cache.acache == NULL || cache.haveaccess == NULL) {
if (file_ids) {
kfree(file_ids, sizeof(int) * num_files);
}
if (bitmap) {
kfree(bitmap, sizeof(char) * map_size);
}
if (access) {
kfree(access, sizeof(short) * num_files);
}
if (cache.acache) {
kfree(cache.acache, sizeof(int) * NUM_CACHE_ENTRIES);
}
if (cache.haveaccess) {
kfree(cache.haveaccess, sizeof(unsigned char) * NUM_CACHE_ENTRIES);
}
if (parents) {
kfree(parents, sizeof(cnid_t) * num_parents);
}
return ENOMEM;
}
if (bitmap) {
bzero(bitmap, (sizeof(char) * map_size));
}
if ((error = copyin(user_access_structp->file_ids, (caddr_t)file_ids,
num_files * sizeof(int)))) {
goto err_exit_bulk_access;
}
if (num_parents) {
if ((error = copyin(user_access_structp->parents, (caddr_t)parents,
num_parents * sizeof(cnid_t)))) {
goto err_exit_bulk_access;
}
}
flags = user_access_structp->flags;
if ((flags & (F_OK | R_OK | W_OK | X_OK)) == 0) {
flags = R_OK;
}
if (flags & PARENT_IDS_FLAG) {
check_leaf = false;
}
for (i = 0; i < num_files; i++) {
leaf_index=-1;
cnid = (cnid_t) file_ids[i];
if ((!parents) && (!suser(cred, NULL))) {
access[i] = 0;
continue;
}
if (check_leaf) {
error = do_attr_lookup(hfsmp, &cache, cnid, skip_cp, &catkey, &cnattr);
if (error) {
access[i] = (short) error;
continue;
}
if (parents) {
leaf_index = cache_binSearch(parents, num_parents-1, cnid, NULL);
if (leaf_index >= 0 && parents[leaf_index] == cnid)
prev_parent_check_ok = 0;
else if (leaf_index >= 0)
prev_parent_check_ok = 1;
}
if ((cnattr.ca_recflags & kHFSHasSecurityMask) != 0) {
struct vnode *cvp;
int myErr = 0;
myErr = hfs_vget(hfsmp, cnid, &cvp, 0, 0);
if ( myErr ) {
access[i] = myErr;
continue;
}
hfs_unlock(VTOC(cvp));
if (vnode_vtype(cvp) == VDIR) {
myErr = vnode_authorize(cvp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), context);
} else {
myErr = vnode_authorize(cvp, NULL, KAUTH_VNODE_READ_DATA, context);
}
vnode_put(cvp);
if (myErr) {
access[i] = myErr;
continue;
}
} else {
myPerms = DerivePermissionSummary(cnattr.ca_uid, cnattr.ca_gid,
cnattr.ca_mode, hfsmp->hfs_mp, cred, p);
if ((myPerms & flags) == 0) {
access[i] = EACCES;
continue;
}
}
} else {
catkey.hfsPlus.parentID = cnid;
}
if (i > 0 && catkey.hfsPlus.parentID == prevParent_cnid && access[i-1] == 0 && prev_parent_check_ok) {
cache.cachehits++;
access[i] = 0;
continue;
}
myaccess = do_access_check(hfsmp, &error, &cache, catkey.hfsPlus.parentID,
skip_cp, p, cred, context,bitmap, map_size, parents, num_parents);
if (myaccess || (error == ESRCH && leaf_index != -1)) {
access[i] = 0; } else {
access[i] = (error != 0 ? (short) error : EACCES);
}
prevParent_cnid = catkey.hfsPlus.parentID;
}
if ((error = copyout((caddr_t)access, user_access_structp->access,
num_files * sizeof (short)))) {
goto err_exit_bulk_access;
}
if (map_size && bitmap) {
if ((error = copyout((caddr_t)bitmap, user_access_structp->bitmap,
map_size * sizeof (char)))) {
goto err_exit_bulk_access;
}
}
err_exit_bulk_access:
if (file_ids)
kfree(file_ids, sizeof(int) * num_files);
if (parents)
kfree(parents, sizeof(cnid_t) * num_parents);
if (bitmap)
kfree(bitmap, sizeof(char) * map_size);
if (access)
kfree(access, sizeof(short) * num_files);
if (cache.acache)
kfree(cache.acache, sizeof(int) * NUM_CACHE_ENTRIES);
if (cache.haveaccess)
kfree(cache.haveaccess, sizeof(unsigned char) * NUM_CACHE_ENTRIES);
return (error);
}
static int
hfs_freezewrite_callback(struct vnode *vp, __unused void *cargs)
{
vnode_waitforwrites(vp, 0, 0, 0, "hfs freeze");
return 0;
}
int
hfs_vnop_ioctl( struct vnop_ioctl_args *ap)
{
struct vnode * vp = ap->a_vp;
struct hfsmount *hfsmp = VTOHFS(vp);
vfs_context_t context = ap->a_context;
kauth_cred_t cred = vfs_context_ucred(context);
proc_t p = vfs_context_proc(context);
struct vfsstatfs *vfsp;
boolean_t is64bit;
off_t jnl_start, jnl_size;
struct hfs_journal_info *jip;
#if HFS_COMPRESSION
int compressed = 0;
off_t uncompressed_size = -1;
int decmpfs_error = 0;
if (ap->a_command == F_RDADVISE) {
compressed = hfs_file_is_compressed(VTOC(vp), 0);
if (compressed) {
if (VNODE_IS_RSRC(vp)) {
uncompressed_size = 0;
} else {
decmpfs_error = hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0);
if (decmpfs_error != 0) {
uncompressed_size = -1;
}
}
}
}
#endif
is64bit = proc_is64bit(p);
#if CONFIG_PROTECT
{
int error = 0;
if ((error = cp_handle_vnop(VTOC(vp), CP_WRITE_ACCESS)) != 0) {
return error;
}
}
#endif
switch (ap->a_command) {
case HFS_GETPATH:
{
struct vnode *file_vp;
cnid_t cnid;
int outlen;
char *bufptr;
int error;
vfsp = vfs_statfs(HFSTOVFS(hfsmp));
if (suser(cred, NULL) &&
kauth_cred_getuid(cred) != vfsp->f_owner) {
return (EACCES);
}
if (!vnode_isvroot(vp)) {
return (EINVAL);
}
bufptr = (char *)ap->a_data;
cnid = strtoul(bufptr, NULL, 10);
if ((error = hfs_vfs_vget(HFSTOVFS(hfsmp), cnid, &file_vp, context))) {
return (error);
}
error = build_path(file_vp, bufptr, sizeof(pathname_t), &outlen, 0, context);
vnode_put(file_vp);
return (error);
}
case HFS_PREV_LINK:
case HFS_NEXT_LINK:
{
cnid_t linkfileid;
cnid_t nextlinkid;
cnid_t prevlinkid;
int error;
vfsp = vfs_statfs(HFSTOVFS(hfsmp));
if (suser(cred, NULL) &&
kauth_cred_getuid(cred) != vfsp->f_owner) {
return (EACCES);
}
if (!vnode_isvroot(vp)) {
return (EINVAL);
}
linkfileid = *(cnid_t *)ap->a_data;
if (linkfileid < kHFSFirstUserCatalogNodeID) {
return (EINVAL);
}
if ((error = hfs_lookup_siblinglinks(hfsmp, linkfileid, &prevlinkid, &nextlinkid))) {
return (error);
}
if (ap->a_command == HFS_NEXT_LINK) {
*(cnid_t *)ap->a_data = nextlinkid;
} else {
*(cnid_t *)ap->a_data = prevlinkid;
}
return (0);
}
case HFS_RESIZE_PROGRESS: {
vfsp = vfs_statfs(HFSTOVFS(hfsmp));
if (suser(cred, NULL) &&
kauth_cred_getuid(cred) != vfsp->f_owner) {
return (EACCES);
}
if (!vnode_isvroot(vp)) {
return (EINVAL);
}
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
return hfs_resize_progress(hfsmp, (u_int32_t *)ap->a_data);
}
case HFS_RESIZE_VOLUME: {
u_int64_t newsize;
u_int64_t cursize;
vfsp = vfs_statfs(HFSTOVFS(hfsmp));
if (suser(cred, NULL) &&
kauth_cred_getuid(cred) != vfsp->f_owner) {
return (EACCES);
}
if (!vnode_isvroot(vp)) {
return (EINVAL);
}
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
newsize = *(u_int64_t *)ap->a_data;
cursize = (u_int64_t)hfsmp->totalBlocks * (u_int64_t)hfsmp->blockSize;
if (newsize > cursize) {
return hfs_extendfs(hfsmp, *(u_int64_t *)ap->a_data, context);
} else if (newsize < cursize) {
return hfs_truncatefs(hfsmp, *(u_int64_t *)ap->a_data, context);
} else {
return (0);
}
}
case HFS_CHANGE_NEXT_ALLOCATION: {
int error = 0;
u_int32_t location;
if (vnode_vfsisrdonly(vp)) {
return (EROFS);
}
vfsp = vfs_statfs(HFSTOVFS(hfsmp));
if (suser(cred, NULL) &&
kauth_cred_getuid(cred) != vfsp->f_owner) {
return (EACCES);
}
if (!vnode_isvroot(vp)) {
return (EINVAL);
}
HFS_MOUNT_LOCK(hfsmp, TRUE);
location = *(u_int32_t *)ap->a_data;
if ((location >= hfsmp->allocLimit) &&
(location != HFS_NO_UPDATE_NEXT_ALLOCATION)) {
error = EINVAL;
goto fail_change_next_allocation;
}
*(u_int32_t *)ap->a_data = hfsmp->nextAllocation;
if (location == HFS_NO_UPDATE_NEXT_ALLOCATION) {
if (hfsmp->hfs_metazone_end != 0) {
HFS_UPDATE_NEXT_ALLOCATION(hfsmp, hfsmp->hfs_metazone_end + 1);
}
hfsmp->hfs_flags |= HFS_SKIP_UPDATE_NEXT_ALLOCATION;
} else {
hfsmp->hfs_flags &= ~HFS_SKIP_UPDATE_NEXT_ALLOCATION;
HFS_UPDATE_NEXT_ALLOCATION(hfsmp, location);
}
MarkVCBDirty(hfsmp);
fail_change_next_allocation:
HFS_MOUNT_UNLOCK(hfsmp, TRUE);
return (error);
}
#if HFS_SPARSE_DEV
case HFS_SETBACKINGSTOREINFO: {
struct vnode * bsfs_rootvp;
struct vnode * di_vp;
struct hfs_backingstoreinfo *bsdata;
int error = 0;
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
return (EALREADY);
}
vfsp = vfs_statfs(HFSTOVFS(hfsmp));
if (suser(cred, NULL) &&
kauth_cred_getuid(cred) != vfsp->f_owner) {
return (EACCES);
}
bsdata = (struct hfs_backingstoreinfo *)ap->a_data;
if (bsdata == NULL) {
return (EINVAL);
}
if ((error = file_vnode(bsdata->backingfd, &di_vp))) {
return (error);
}
if ((error = vnode_getwithref(di_vp))) {
file_drop(bsdata->backingfd);
return(error);
}
if (vnode_mount(vp) == vnode_mount(di_vp)) {
(void)vnode_put(di_vp);
file_drop(bsdata->backingfd);
return (EINVAL);
}
error = VFS_ROOT(vnode_mount(di_vp), &bsfs_rootvp, NULL);
if (error) {
(void)vnode_put(di_vp);
file_drop(bsdata->backingfd);
return (error);
}
vnode_ref(bsfs_rootvp);
vnode_put(bsfs_rootvp);
hfsmp->hfs_backingfs_rootvp = bsfs_rootvp;
hfsmp->hfs_flags |= HFS_HAS_SPARSE_DEVICE;
ResetVCBFreeExtCache(hfsmp);
hfsmp->hfs_sparsebandblks = bsdata->bandsize / HFSTOVCB(hfsmp)->blockSize;
hfsmp->hfs_sparsebandblks *= 4;
vfs_markdependency(hfsmp->hfs_mp);
hfsmp->hfs_backingfs_maxblocks = 0;
if (vnode_vtype(di_vp) == VREG) {
int terr;
int hostbits;
terr = vn_pathconf(di_vp, _PC_FILESIZEBITS, &hostbits, context);
if (terr == 0 && hostbits != 0 && hostbits < 64) {
u_int64_t hostfilesizemax = ((u_int64_t)1) << hostbits;
hfsmp->hfs_backingfs_maxblocks = hostfilesizemax / hfsmp->blockSize;
}
}
(void)vnode_put(di_vp);
file_drop(bsdata->backingfd);
return (0);
}
case HFS_CLRBACKINGSTOREINFO: {
struct vnode * tmpvp;
vfsp = vfs_statfs(HFSTOVFS(hfsmp));
if (suser(cred, NULL) &&
kauth_cred_getuid(cred) != vfsp->f_owner) {
return (EACCES);
}
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) &&
hfsmp->hfs_backingfs_rootvp) {
hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE;
tmpvp = hfsmp->hfs_backingfs_rootvp;
hfsmp->hfs_backingfs_rootvp = NULLVP;
hfsmp->hfs_sparsebandblks = 0;
vnode_rele(tmpvp);
}
return (0);
}
#endif
case F_FREEZE_FS: {
struct mount *mp;
mp = vnode_mount(vp);
hfsmp = VFSTOHFS(mp);
if (!(hfsmp->jnl))
return (ENOTSUP);
vfsp = vfs_statfs(mp);
if (kauth_cred_getuid(cred) != vfsp->f_owner &&
!kauth_cred_issuser(cred))
return (EACCES);
lck_rw_lock_exclusive(&hfsmp->hfs_insync);
vnode_iterate(mp, 0, hfs_freezewrite_callback, NULL);
hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
journal_flush(hfsmp->jnl, TRUE);
if (HFSTOVCB(hfsmp)->extentsRefNum)
vnode_waitforwrites(HFSTOVCB(hfsmp)->extentsRefNum, 0, 0, 0, "hfs freeze");
if (HFSTOVCB(hfsmp)->catalogRefNum)
vnode_waitforwrites(HFSTOVCB(hfsmp)->catalogRefNum, 0, 0, 0, "hfs freeze");
if (HFSTOVCB(hfsmp)->allocationsRefNum)
vnode_waitforwrites(HFSTOVCB(hfsmp)->allocationsRefNum, 0, 0, 0, "hfs freeze");
if (hfsmp->hfs_attribute_vp)
vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs freeze");
vnode_waitforwrites(hfsmp->hfs_devvp, 0, 0, 0, "hfs freeze");
hfsmp->hfs_freezing_proc = current_proc();
return (0);
}
case F_THAW_FS: {
vfsp = vfs_statfs(vnode_mount(vp));
if (kauth_cred_getuid(cred) != vfsp->f_owner &&
!kauth_cred_issuser(cred))
return (EACCES);
if (hfsmp->hfs_freezing_proc != current_proc()) {
return EPERM;
}
hfsmp->hfs_freezing_proc = NULL;
hfs_unlock_global (hfsmp);
lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
return (0);
}
case HFS_BULKACCESS_FSCTL: {
int size;
if (hfsmp->hfs_flags & HFS_STANDARD) {
return EINVAL;
}
if (is64bit) {
size = sizeof(struct user64_access_t);
} else {
size = sizeof(struct user32_access_t);
}
return do_bulk_access_check(hfsmp, vp, ap, size, context);
}
case HFS_EXT_BULKACCESS_FSCTL: {
int size;
if (hfsmp->hfs_flags & HFS_STANDARD) {
return EINVAL;
}
if (is64bit) {
size = sizeof(struct user64_ext_access_t);
} else {
size = sizeof(struct user32_ext_access_t);
}
return do_bulk_access_check(hfsmp, vp, ap, size, context);
}
case HFS_SET_XATTREXTENTS_STATE: {
int state;
if (ap->a_data == NULL) {
return (EINVAL);
}
state = *(int *)ap->a_data;
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
if (!is_suser()) {
return (EPERM);
}
if (state == 0 || state == 1)
return hfs_set_volxattr(hfsmp, HFS_SET_XATTREXTENTS_STATE, state);
else
return (EINVAL);
}
case F_FULLFSYNC: {
int error;
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
if (error == 0) {
error = hfs_fsync(vp, MNT_WAIT, TRUE, p);
hfs_unlock(VTOC(vp));
}
return error;
}
case F_CHKCLEAN: {
register struct cnode *cp;
int error;
if (!vnode_isreg(vp))
return EINVAL;
error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
if (error == 0) {
cp = VTOC(vp);
error = is_file_clean(vp, VTOF(vp)->ff_size);
hfs_unlock(cp);
}
return (error);
}
case F_RDADVISE: {
register struct radvisory *ra;
struct filefork *fp;
int error;
if (!vnode_isreg(vp))
return EINVAL;
ra = (struct radvisory *)(ap->a_data);
fp = VTOF(vp);
hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK);
#if HFS_COMPRESSION
if (compressed && (uncompressed_size == -1)) {
error = decmpfs_error;
} else if ((compressed && (ra->ra_offset >= uncompressed_size)) ||
(!compressed && (ra->ra_offset >= fp->ff_size))) {
error = EFBIG;
}
#else
if (ra->ra_offset >= fp->ff_size) {
error = EFBIG;
}
#endif
else {
error = advisory_read(vp, fp->ff_size, ra->ra_offset, ra->ra_count);
}
hfs_unlock_truncate(VTOC(vp), 0);
return (error);
}
case F_READBOOTSTRAP:
case F_WRITEBOOTSTRAP:
return 0;
case _IOC(IOC_OUT,'h', 4, 0):
{
if (is64bit) {
*(user_time_t *)(ap->a_data) = (user_time_t) (to_bsd_time(VTOVCB(vp)->localCreateDate));
}
else {
*(user32_time_t *)(ap->a_data) = (user32_time_t) (to_bsd_time(VTOVCB(vp)->localCreateDate));
}
return 0;
}
case SPOTLIGHT_FSCTL_GET_MOUNT_TIME:
*(uint32_t *)ap->a_data = hfsmp->hfs_mount_time;
break;
case SPOTLIGHT_FSCTL_GET_LAST_MTIME:
*(uint32_t *)ap->a_data = hfsmp->hfs_last_mounted_mtime;
break;
case HFS_FSCTL_SET_VERY_LOW_DISK:
if (*(uint32_t *)ap->a_data >= hfsmp->hfs_freespace_notify_warninglimit) {
return EINVAL;
}
hfsmp->hfs_freespace_notify_dangerlimit = *(uint32_t *)ap->a_data;
break;
case HFS_FSCTL_SET_LOW_DISK:
if ( *(uint32_t *)ap->a_data >= hfsmp->hfs_freespace_notify_desiredlevel
|| *(uint32_t *)ap->a_data <= hfsmp->hfs_freespace_notify_dangerlimit) {
return EINVAL;
}
hfsmp->hfs_freespace_notify_warninglimit = *(uint32_t *)ap->a_data;
break;
case HFS_FSCTL_SET_DESIRED_DISK:
if (*(uint32_t *)ap->a_data <= hfsmp->hfs_freespace_notify_warninglimit) {
return EINVAL;
}
hfsmp->hfs_freespace_notify_desiredlevel = *(uint32_t *)ap->a_data;
break;
case HFS_VOLUME_STATUS:
*(uint32_t *)ap->a_data = hfsmp->hfs_notification_conditions;
break;
case HFS_SET_BOOT_INFO:
if (!vnode_isvroot(vp))
return(EINVAL);
if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(HFSTOVFS(hfsmp))->f_owner))
return(EACCES);
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
HFS_MOUNT_LOCK(hfsmp, TRUE);
bcopy(ap->a_data, &hfsmp->vcbFndrInfo, sizeof(hfsmp->vcbFndrInfo));
HFS_MOUNT_UNLOCK(hfsmp, TRUE);
(void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
break;
case HFS_GET_BOOT_INFO:
if (!vnode_isvroot(vp))
return(EINVAL);
HFS_MOUNT_LOCK(hfsmp, TRUE);
bcopy(&hfsmp->vcbFndrInfo, ap->a_data, sizeof(hfsmp->vcbFndrInfo));
HFS_MOUNT_UNLOCK(hfsmp, TRUE);
break;
case HFS_MARK_BOOT_CORRUPT:
if (!is_suser()) {
return EACCES;
}
if (!(vfs_flags(HFSTOVFS(hfsmp)) & MNT_ROOTFS) ||
!vnode_isvroot(vp)) {
return EINVAL;
}
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
printf ("hfs_vnop_ioctl: Marking the boot volume corrupt.\n");
hfs_mark_volume_inconsistent(hfsmp);
break;
case HFS_FSCTL_GET_JOURNAL_INFO:
jip = (struct hfs_journal_info*)ap->a_data;
if (vp == NULLVP)
return EINVAL;
if (hfsmp->jnl == NULL) {
jnl_start = 0;
jnl_size = 0;
} else {
jnl_start = (off_t)(hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset;
jnl_size = (off_t)hfsmp->jnl_size;
}
jip->jstart = jnl_start;
jip->jsize = jnl_size;
break;
case HFS_SET_ALWAYS_ZEROFILL: {
struct cnode *cp = VTOC(vp);
if (*(int *)ap->a_data) {
cp->c_flag |= C_ALWAYS_ZEROFILL;
} else {
cp->c_flag &= ~C_ALWAYS_ZEROFILL;
}
break;
}
case HFS_DISABLE_METAZONE: {
if (!is_suser()) {
return EACCES;
}
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
return (EROFS);
}
(void) hfs_metadatazone_init(hfsmp, true);
printf ("hfs: Disabling metadata zone on %s\n", hfsmp->vcbVN);
break;
}
default:
return (ENOTTY);
}
return 0;
}
int
hfs_vnop_select(__unused struct vnop_select_args *ap)
{
return (1);
}
int
hfs_bmap(struct vnode *vp, daddr_t bn, struct vnode **vpp, daddr64_t *bnp, unsigned int *runp)
{
struct filefork *fp = VTOF(vp);
struct hfsmount *hfsmp = VTOHFS(vp);
int retval = E_NONE;
u_int32_t logBlockSize;
size_t bytesContAvail = 0;
off_t blockposition;
int lockExtBtree;
int lockflags = 0;
if (vpp != NULL)
*vpp = hfsmp->hfs_devvp;
if (bnp == NULL)
return (0);
logBlockSize = GetLogicalBlockSize(vp);
blockposition = (off_t)bn * logBlockSize;
lockExtBtree = overflow_extents(fp);
if (lockExtBtree)
lockflags = hfs_systemfile_lock(hfsmp, SFL_EXTENTS, HFS_EXCLUSIVE_LOCK);
retval = MacToVFSError(
MapFileBlockC (HFSTOVCB(hfsmp),
(FCB*)fp,
MAXPHYSIO,
blockposition,
bnp,
&bytesContAvail));
if (lockExtBtree)
hfs_systemfile_unlock(hfsmp, lockflags);
if (retval == E_NONE) {
if (runp != NULL) {
if (can_cluster(logBlockSize)) {
*runp = (bytesContAvail < logBlockSize) ? 0 : (bytesContAvail / logBlockSize) - 1;
} else {
*runp = 0;
}
}
}
return (retval);
}
int
hfs_vnop_blktooff(struct vnop_blktooff_args *ap)
{
if (ap->a_vp == NULL)
return (EINVAL);
*ap->a_offset = (off_t)ap->a_lblkno * (off_t)GetLogicalBlockSize(ap->a_vp);
return(0);
}
int
hfs_vnop_offtoblk(struct vnop_offtoblk_args *ap)
{
if (ap->a_vp == NULL)
return (EINVAL);
*ap->a_lblkno = (daddr64_t)(ap->a_offset / (off_t)GetLogicalBlockSize(ap->a_vp));
return(0);
}
int
hfs_vnop_blockmap(struct vnop_blockmap_args *ap)
{
struct vnode *vp = ap->a_vp;
struct cnode *cp;
struct filefork *fp;
struct hfsmount *hfsmp;
size_t bytesContAvail = 0;
int retval = E_NONE;
int syslocks = 0;
int lockflags = 0;
struct rl_entry *invalid_range;
enum rl_overlaptype overlaptype;
int started_tr = 0;
int tooklock = 0;
#if HFS_COMPRESSION
if (VNODE_IS_RSRC(vp)) {
} else {
if ( hfs_file_is_compressed(VTOC(vp), 1) ) {
int state = decmpfs_cnode_get_vnode_state(VTOCMP(vp));
switch(state) {
case FILE_IS_COMPRESSED:
return ENOTSUP;
case FILE_IS_CONVERTING:
break;
default:
printf("invalid state %d for compressed file\n", state);
}
}
}
#endif
if (vnode_isdir(vp)) {
return (ENOTSUP);
}
if (ap->a_bpn == NULL)
return (0);
if ( !vnode_issystem(vp) && !vnode_islnk(vp) && !vnode_isswap(vp)) {
if (VTOC(vp)->c_lockowner != current_thread()) {
hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
tooklock = 1;
}
}
hfsmp = VTOHFS(vp);
cp = VTOC(vp);
fp = VTOF(vp);
retry:
if ((ap->a_flags & VNODE_WRITE) && (fp->ff_unallocblocks != 0)) {
if (hfs_start_transaction(hfsmp) != 0) {
retval = EINVAL;
goto exit;
} else {
started_tr = 1;
}
syslocks = SFL_EXTENTS | SFL_BITMAP;
} else if (overflow_extents(fp)) {
syslocks = SFL_EXTENTS;
}
if (syslocks)
lockflags = hfs_systemfile_lock(hfsmp, syslocks, HFS_EXCLUSIVE_LOCK);
if ((ap->a_flags & VNODE_WRITE) && (fp->ff_unallocblocks != 0)) {
int64_t actbytes;
u_int32_t loanedBlocks;
if (started_tr == 0) {
if (syslocks) {
hfs_systemfile_unlock(hfsmp, lockflags);
syslocks = 0;
}
goto retry;
}
loanedBlocks = fp->ff_unallocblocks;
retval = ExtendFileC(hfsmp, (FCB*)fp, 0, 0,
kEFAllMask | kEFNoClumpMask, &actbytes);
if (retval) {
fp->ff_unallocblocks = loanedBlocks;
cp->c_blocks += loanedBlocks;
fp->ff_blocks += loanedBlocks;
HFS_MOUNT_LOCK(hfsmp, TRUE);
hfsmp->loanedBlocks += loanedBlocks;
HFS_MOUNT_UNLOCK(hfsmp, TRUE);
hfs_systemfile_unlock(hfsmp, lockflags);
cp->c_flag |= C_MODIFIED;
if (started_tr) {
(void) hfs_update(vp, TRUE);
(void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
hfs_end_transaction(hfsmp);
started_tr = 0;
}
goto exit;
}
}
retval = MapFileBlockC(hfsmp, (FCB *)fp, ap->a_size, ap->a_foffset,
ap->a_bpn, &bytesContAvail);
if (syslocks) {
hfs_systemfile_unlock(hfsmp, lockflags);
syslocks = 0;
}
if (started_tr) {
(void) hfs_update(vp, TRUE);
(void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
hfs_end_transaction(hfsmp);
started_tr = 0;
}
if (retval) {
if ((MacToVFSError(retval) != ERANGE) ||
(ap->a_flags & VNODE_WRITE) ||
((ap->a_flags & VNODE_READ) && (fp->ff_unallocblocks == 0))) {
goto exit;
}
if (ap->a_foffset > fp->ff_size) {
goto exit;
}
overlaptype = rl_scan(&fp->ff_invalidranges, ap->a_foffset,
ap->a_foffset + (off_t)(ap->a_size - 1),
&invalid_range);
switch(overlaptype) {
case RL_OVERLAPISCONTAINED:
if (ap->a_foffset != invalid_range->rl_start) {
break;
}
case RL_MATCHINGOVERLAP:
case RL_OVERLAPCONTAINSRANGE:
case RL_OVERLAPSTARTSBEFORE:
if ((off_t)fp->ff_size > (invalid_range->rl_end + 1)) {
bytesContAvail = (invalid_range->rl_end + 1) - ap->a_foffset;
} else {
bytesContAvail = fp->ff_size - ap->a_foffset;
}
if (bytesContAvail > ap->a_size) {
bytesContAvail = ap->a_size;
}
*ap->a_bpn = (daddr64_t)-1;
retval = 0;
break;
case RL_OVERLAPENDSAFTER:
case RL_NOOVERLAP:
break;
}
goto exit;
}
overlaptype = rl_scan(&fp->ff_invalidranges, ap->a_foffset,
ap->a_foffset + (off_t)bytesContAvail - 1,
&invalid_range);
if (overlaptype != RL_NOOVERLAP) {
switch(overlaptype) {
case RL_MATCHINGOVERLAP:
case RL_OVERLAPCONTAINSRANGE:
case RL_OVERLAPSTARTSBEFORE:
*ap->a_bpn = (daddr64_t)-1;
if (((off_t)fp->ff_size > (invalid_range->rl_end + 1)) &&
((size_t)(invalid_range->rl_end + 1 - ap->a_foffset) < bytesContAvail)) {
bytesContAvail = invalid_range->rl_end + 1 - ap->a_foffset;
}
break;
case RL_OVERLAPISCONTAINED:
case RL_OVERLAPENDSAFTER:
if (invalid_range->rl_start == ap->a_foffset) {
*ap->a_bpn = (daddr64_t)-1;
if (((off_t)fp->ff_size > (invalid_range->rl_end + 1)) &&
((size_t)(invalid_range->rl_end + 1 - ap->a_foffset) < bytesContAvail)) {
bytesContAvail = invalid_range->rl_end + 1 - ap->a_foffset;
}
} else {
bytesContAvail = invalid_range->rl_start - ap->a_foffset;
}
break;
case RL_NOOVERLAP:
break;
}
if (bytesContAvail > ap->a_size)
bytesContAvail = ap->a_size;
}
exit:
if (retval == 0) {
if (ap->a_run)
*ap->a_run = bytesContAvail;
if (ap->a_poff)
*(int *)ap->a_poff = 0;
}
if (tooklock)
hfs_unlock(cp);
return (MacToVFSError(retval));
}
int
hfs_vnop_strategy(struct vnop_strategy_args *ap)
{
buf_t bp = ap->a_bp;
vnode_t vp = buf_vnode(bp);
int error = 0;
#if CONFIG_PROTECT
cnode_t *cp = NULL;
if ((cp = cp_get_protected_cnode(vp)) != NULL) {
hfs_lock(cp, HFS_SHARED_LOCK);
buf_setcpaddr(bp, cp->c_cpentry);
}
#endif
error = buf_strategy(VTOHFS(vp)->hfs_devvp, ap);
#if CONFIG_PROTECT
if (cp) {
hfs_unlock(cp);
}
#endif
return error;
}
static int
hfs_minorupdate(struct vnode *vp) {
struct cnode *cp = VTOC(vp);
cp->c_flag &= ~C_MODIFIED;
cp->c_touch_acctime = 0;
cp->c_touch_chgtime = 0;
cp->c_touch_modtime = 0;
return 0;
}
int
do_hfs_truncate(struct vnode *vp, off_t length, int flags, int skipupdate, vfs_context_t context)
{
register struct cnode *cp = VTOC(vp);
struct filefork *fp = VTOF(vp);
struct proc *p = vfs_context_proc(context);;
kauth_cred_t cred = vfs_context_ucred(context);
int retval;
off_t bytesToAdd;
off_t actualBytesAdded;
off_t filebytes;
u_int32_t fileblocks;
int blksize;
struct hfsmount *hfsmp;
int lockflags;
blksize = VTOVCB(vp)->blockSize;
fileblocks = fp->ff_blocks;
filebytes = (off_t)fileblocks * (off_t)blksize;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_START,
(int)length, (int)fp->ff_size, (int)filebytes, 0, 0);
if (length < 0)
return (EINVAL);
if ((off_t)fp->ff_size < 0)
return (EINVAL);
if ((!ISHFSPLUS(VTOVCB(vp))) && (length > (off_t)MAXHFSFILESIZE))
return (EFBIG);
hfsmp = VTOHFS(vp);
retval = E_NONE;
if (hfsmp->hfc_stage == HFC_RECORDING) {
fp->ff_bytesread = 0;
}
#if QUOTA
if ((retval = hfs_getinoquota(cp)))
return(retval);
#endif
if (length > (off_t)fp->ff_size) {
#if QUOTA
retval = hfs_chkdq(cp, (int64_t)(roundup(length - filebytes, blksize)),
cred, 0);
if (retval)
goto Err_Exit;
#endif
if (length > filebytes) {
int eflags;
u_int32_t blockHint = 0;
eflags = kEFAllMask | kEFNoClumpMask;
if (cred && suser(cred, NULL) != 0)
eflags |= kEFReserveMask;
if (filebytes == 0 &&
hfsmp->hfs_flags & HFS_METADATA_ZONE &&
hfs_virtualmetafile(cp)) {
eflags |= kEFMetadataMask;
blockHint = hfsmp->hfs_metazone_start;
}
if (hfs_start_transaction(hfsmp) != 0) {
retval = EINVAL;
goto Err_Exit;
}
lockflags = SFL_BITMAP;
if (overflow_extents(fp))
lockflags |= SFL_EXTENTS;
lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
while ((length > filebytes) && (retval == E_NONE)) {
bytesToAdd = length - filebytes;
retval = MacToVFSError(ExtendFileC(VTOVCB(vp),
(FCB*)fp,
bytesToAdd,
blockHint,
eflags,
&actualBytesAdded));
filebytes = (off_t)fp->ff_blocks * (off_t)blksize;
if (actualBytesAdded == 0 && retval == E_NONE) {
if (length > filebytes)
length = filebytes;
break;
}
}
hfs_systemfile_unlock(hfsmp, lockflags);
if (hfsmp->jnl) {
if (skipupdate) {
(void) hfs_minorupdate(vp);
}
else {
(void) hfs_update(vp, TRUE);
(void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
}
}
hfs_end_transaction(hfsmp);
if (retval)
goto Err_Exit;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_NONE,
(int)length, (int)fp->ff_size, (int)filebytes, 0, 0);
}
if (!(flags & IO_NOZEROFILL)) {
if (UBCINFOEXISTS(vp) && (vnode_issystem(vp) == 0) && retval == E_NONE) {
struct rl_entry *invalid_range;
off_t zero_limit;
zero_limit = (fp->ff_size + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64;
if (length < zero_limit) zero_limit = length;
if (length > (off_t)fp->ff_size) {
struct timeval tv;
if ((fp->ff_size & PAGE_MASK_64) &&
(rl_scan(&fp->ff_invalidranges, fp->ff_size & ~PAGE_MASK_64,
fp->ff_size - 1, &invalid_range) == RL_NOOVERLAP)) {
hfs_unlock(cp);
retval = cluster_write(vp, (struct uio *) 0, fp->ff_size, zero_limit,
fp->ff_size, (off_t)0,
(flags & IO_SYNC) | IO_HEADZEROFILL | IO_NOZERODIRTY);
hfs_lock(cp, HFS_FORCE_LOCK);
if (retval) goto Err_Exit;
if (length > zero_limit) {
microuptime(&tv);
rl_add(zero_limit, length - 1, &fp->ff_invalidranges);
cp->c_zftimeout = tv.tv_sec + ZFTIMELIMIT;
}
} else {
microuptime(&tv);
rl_add(fp->ff_size, length - 1, &fp->ff_invalidranges);
cp->c_zftimeout = tv.tv_sec + ZFTIMELIMIT;
};
}
} else {
panic("hfs_truncate: invoked on non-UBC object?!");
};
}
cp->c_touch_modtime = TRUE;
fp->ff_size = length;
} else {
if ((off_t)fp->ff_size > length) {
rl_remove(length, fp->ff_size - 1, &fp->ff_invalidranges);
}
if (fp->ff_unallocblocks > 0) {
u_int32_t finalblks;
u_int32_t loanedBlocks;
HFS_MOUNT_LOCK(hfsmp, TRUE);
loanedBlocks = fp->ff_unallocblocks;
cp->c_blocks -= loanedBlocks;
fp->ff_blocks -= loanedBlocks;
fp->ff_unallocblocks = 0;
hfsmp->loanedBlocks -= loanedBlocks;
finalblks = (length + blksize - 1) / blksize;
if (finalblks > fp->ff_blocks) {
loanedBlocks = finalblks - fp->ff_blocks;
hfsmp->loanedBlocks += loanedBlocks;
fp->ff_unallocblocks = loanedBlocks;
cp->c_blocks += loanedBlocks;
fp->ff_blocks += loanedBlocks;
}
HFS_MOUNT_UNLOCK(hfsmp, TRUE);
}
if ((flags & IO_NDELAY) || (proc_tbe(p) == 0)) {
#if QUOTA
off_t savedbytes = ((off_t)fp->ff_blocks * (off_t)blksize);
#endif
if (hfs_start_transaction(hfsmp) != 0) {
retval = EINVAL;
goto Err_Exit;
}
if (fp->ff_unallocblocks == 0) {
lockflags = SFL_BITMAP;
if (overflow_extents(fp))
lockflags |= SFL_EXTENTS;
lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
retval = MacToVFSError(TruncateFileC(VTOVCB(vp), (FCB*)fp, length, 0,
FORK_IS_RSRC (fp), FTOC(fp)->c_fileid, false));
hfs_systemfile_unlock(hfsmp, lockflags);
}
if (hfsmp->jnl) {
if (retval == 0) {
fp->ff_size = length;
}
if (skipupdate) {
(void) hfs_minorupdate(vp);
}
else {
(void) hfs_update(vp, TRUE);
(void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
}
}
hfs_end_transaction(hfsmp);
filebytes = (off_t)fp->ff_blocks * (off_t)blksize;
if (retval)
goto Err_Exit;
#if QUOTA
(void) hfs_chkdq(cp, (int64_t)-(savedbytes - filebytes), NOCRED, 0);
#endif
}
if ((off_t)fp->ff_size != length)
cp->c_touch_modtime = TRUE;
fp->ff_size = length;
}
if (cp->c_mode & (S_ISUID | S_ISGID)) {
if (!vfs_context_issuser(context)) {
cp->c_mode &= ~(S_ISUID | S_ISGID);
skipupdate = 0;
}
}
if (skipupdate) {
retval = hfs_minorupdate(vp);
}
else {
cp->c_touch_chgtime = TRUE;
cp->c_touch_modtime = TRUE;
retval = hfs_update(vp, MNT_WAIT);
}
if (retval) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_NONE,
-1, -1, -1, retval, 0);
}
Err_Exit:
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_END,
(int)length, (int)fp->ff_size, (int)filebytes, retval, 0);
return (retval);
}
int
hfs_prepare_release_storage (struct hfsmount *hfsmp, struct vnode *vp) {
struct filefork *fp = VTOF(vp);
struct cnode *cp = VTOC(vp);
int retval = 0;
if (vnode_isdir(vp)) {
return (EISDIR);
}
ubc_setsize(vp, 0);
if ((off_t)fp->ff_size < 0)
return (EINVAL);
#if QUOTA
if ((retval = hfs_getinoquota(cp))) {
return(retval);
}
#endif
rl_remove(0, fp->ff_size - 1, &fp->ff_invalidranges);
if (fp->ff_unallocblocks > 0) {
u_int32_t loanedBlocks;
HFS_MOUNT_LOCK(hfsmp, TRUE);
loanedBlocks = fp->ff_unallocblocks;
cp->c_blocks -= loanedBlocks;
fp->ff_blocks -= loanedBlocks;
fp->ff_unallocblocks = 0;
hfsmp->loanedBlocks -= loanedBlocks;
HFS_MOUNT_UNLOCK(hfsmp, TRUE);
}
return 0;
}
int
hfs_release_storage (struct hfsmount *hfsmp, struct filefork *datafork,
struct filefork *rsrcfork, u_int32_t fileid) {
off_t filebytes;
u_int32_t fileblocks;
int blksize = 0;
int error = 0;
int lockflags;
blksize = hfsmp->blockSize;
if (datafork->ff_blocks > 0) {
fileblocks = datafork->ff_blocks;
filebytes = (off_t)fileblocks * (off_t)blksize;
while (filebytes > 0) {
if (filebytes > HFS_BIGFILE_SIZE && overflow_extents(datafork)) {
filebytes -= HFS_BIGFILE_SIZE;
} else {
filebytes = 0;
}
if (hfs_start_transaction(hfsmp) != 0) {
error = EINVAL;
break;
}
if (datafork->ff_unallocblocks == 0) {
lockflags = SFL_BITMAP;
if (overflow_extents(datafork))
lockflags |= SFL_EXTENTS;
lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
error = MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp), datafork, filebytes, 1, 0, fileid, false));
hfs_systemfile_unlock(hfsmp, lockflags);
}
if (error == 0) {
datafork->ff_size = filebytes;
}
(void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
hfs_end_transaction(hfsmp);
if (error) {
break;
}
}
}
if (error == 0 && (rsrcfork != NULL) && rsrcfork->ff_blocks > 0) {
fileblocks = rsrcfork->ff_blocks;
filebytes = (off_t)fileblocks * (off_t)blksize;
while (filebytes > 0) {
if (filebytes > HFS_BIGFILE_SIZE && overflow_extents(rsrcfork)) {
filebytes -= HFS_BIGFILE_SIZE;
} else {
filebytes = 0;
}
if (hfs_start_transaction(hfsmp) != 0) {
error = EINVAL;
break;
}
if (rsrcfork->ff_unallocblocks == 0) {
lockflags = SFL_BITMAP;
if (overflow_extents(rsrcfork))
lockflags |= SFL_EXTENTS;
lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
error = MacToVFSError(TruncateFileC(HFSTOVCB(hfsmp), rsrcfork, filebytes, 1, 1, fileid, false));
hfs_systemfile_unlock(hfsmp, lockflags);
}
if (error == 0) {
rsrcfork->ff_size = filebytes;
}
(void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
hfs_end_transaction(hfsmp);
if (error) {
break;
}
}
}
return error;
}
int
hfs_truncate(struct vnode *vp, off_t length, int flags, int skipsetsize,
int skipupdate, vfs_context_t context)
{
struct filefork *fp = VTOF(vp);
off_t filebytes;
u_int32_t fileblocks;
int blksize, error = 0;
struct cnode *cp = VTOC(vp);
if (vnode_isdir(vp)) {
return (EISDIR);
}
if (vnode_isswap(vp) && (length != 0)) {
return (EPERM);
}
blksize = VTOVCB(vp)->blockSize;
fileblocks = fp->ff_blocks;
filebytes = (off_t)fileblocks * (off_t)blksize;
if (!skipsetsize || length == 0)
ubc_setsize(vp, length);
if (length < filebytes) {
while (filebytes > length) {
if ((filebytes - length) > HFS_BIGFILE_SIZE && overflow_extents(fp)) {
filebytes -= HFS_BIGFILE_SIZE;
} else {
filebytes = length;
}
cp->c_flag |= C_FORCEUPDATE;
error = do_hfs_truncate(vp, filebytes, flags, skipupdate, context);
if (error)
break;
}
} else if (length > filebytes) {
while (filebytes < length) {
if ((length - filebytes) > HFS_BIGFILE_SIZE && overflow_extents(fp)) {
filebytes += HFS_BIGFILE_SIZE;
} else {
filebytes = length;
}
cp->c_flag |= C_FORCEUPDATE;
error = do_hfs_truncate(vp, filebytes, flags, skipupdate, context);
if (error)
break;
}
} else {
error = do_hfs_truncate(vp, length, flags, skipupdate, context);
}
if (VTOHFS(vp)->hfc_stage == HFC_RECORDING) {
fp->ff_bytesread = 0;
}
return (error);
}
int
hfs_vnop_allocate(struct vnop_allocate_args *ap)
{
struct vnode *vp = ap->a_vp;
struct cnode *cp;
struct filefork *fp;
ExtendedVCB *vcb;
off_t length = ap->a_length;
off_t startingPEOF;
off_t moreBytesRequested;
off_t actualBytesAdded;
off_t filebytes;
u_int32_t fileblocks;
int retval, retval2;
u_int32_t blockHint;
u_int32_t extendFlags;
struct hfsmount *hfsmp;
kauth_cred_t cred = vfs_context_ucred(ap->a_context);
int lockflags;
time_t orig_ctime;
*(ap->a_bytesallocated) = 0;
if (!vnode_isreg(vp))
return (EISDIR);
if (length < (off_t)0)
return (EINVAL);
cp = VTOC(vp);
orig_ctime = VTOC(vp)->c_ctime;
check_for_tracked_file(vp, orig_ctime, ap->a_length == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK))) {
goto Err_Exit;
}
fp = VTOF(vp);
hfsmp = VTOHFS(vp);
vcb = VTOVCB(vp);
fileblocks = fp->ff_blocks;
filebytes = (off_t)fileblocks * (off_t)vcb->blockSize;
if ((ap->a_flags & ALLOCATEFROMVOL) && (length < filebytes)) {
retval = EINVAL;
goto Err_Exit;
}
extendFlags = kEFNoClumpMask;
if (ap->a_flags & ALLOCATECONTIG)
extendFlags |= kEFContigMask;
if (ap->a_flags & ALLOCATEALL)
extendFlags |= kEFAllMask;
if (cred && suser(cred, NULL) != 0)
extendFlags |= kEFReserveMask;
if (hfs_virtualmetafile(cp))
extendFlags |= kEFMetadataMask;
retval = E_NONE;
blockHint = 0;
startingPEOF = filebytes;
if (ap->a_flags & ALLOCATEFROMPEOF)
length += filebytes;
else if (ap->a_flags & ALLOCATEFROMVOL)
blockHint = ap->a_offset / VTOVCB(vp)->blockSize;
if (filebytes == length)
goto Std_Exit;
if (length > filebytes) {
off_t total_bytes_added = 0, orig_request_size;
orig_request_size = moreBytesRequested = length - filebytes;
#if QUOTA
retval = hfs_chkdq(cp,
(int64_t)(roundup(moreBytesRequested, vcb->blockSize)),
cred, 0);
if (retval)
goto Err_Exit;
#endif
if (hfsmp->hfs_flags & HFS_METADATA_ZONE) {
if (hfs_virtualmetafile(cp)) {
blockHint = hfsmp->hfs_metazone_start;
} else if ((blockHint >= hfsmp->hfs_metazone_start) &&
(blockHint <= hfsmp->hfs_metazone_end)) {
blockHint = hfsmp->hfs_metazone_end + 1;
}
}
while ((length > filebytes) && (retval == E_NONE)) {
off_t bytesRequested;
if (hfs_start_transaction(hfsmp) != 0) {
retval = EINVAL;
goto Err_Exit;
}
lockflags = SFL_BITMAP;
if (overflow_extents(fp))
lockflags |= SFL_EXTENTS;
lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
if (moreBytesRequested >= HFS_BIGFILE_SIZE) {
bytesRequested = HFS_BIGFILE_SIZE;
} else {
bytesRequested = moreBytesRequested;
}
if (extendFlags & kEFContigMask) {
hfsmp->hfs_flags &= ~HFS_DID_CONTIG_SCAN;
}
retval = MacToVFSError(ExtendFileC(vcb,
(FCB*)fp,
bytesRequested,
blockHint,
extendFlags,
&actualBytesAdded));
if (retval == E_NONE) {
*(ap->a_bytesallocated) += actualBytesAdded;
total_bytes_added += actualBytesAdded;
moreBytesRequested -= actualBytesAdded;
if (blockHint != 0) {
blockHint += actualBytesAdded / vcb->blockSize;
}
}
filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize;
hfs_systemfile_unlock(hfsmp, lockflags);
if (hfsmp->jnl) {
(void) hfs_update(vp, TRUE);
(void) hfs_volupdate(hfsmp, VOL_UPDATE, 0);
}
hfs_end_transaction(hfsmp);
}
if (retval && (startingPEOF == filebytes))
goto Err_Exit;
if (total_bytes_added != 0 && orig_request_size < total_bytes_added)
*(ap->a_bytesallocated) =
roundup(orig_request_size, (off_t)vcb->blockSize);
} else {
if (fp->ff_size > length) {
}
retval = hfs_truncate(vp, length, 0, 0, 0, ap->a_context);
filebytes = (off_t)fp->ff_blocks * (off_t)vcb->blockSize;
if (retval && (startingPEOF == filebytes)) goto Err_Exit;
#if QUOTA
(void) hfs_chkdq(cp, (int64_t)-((startingPEOF - filebytes)), NOCRED,0);
#endif
if (fp->ff_size > filebytes) {
fp->ff_size = filebytes;
hfs_unlock(cp);
ubc_setsize(vp, fp->ff_size);
hfs_lock(cp, HFS_FORCE_LOCK);
}
}
Std_Exit:
cp->c_touch_chgtime = TRUE;
cp->c_touch_modtime = TRUE;
retval2 = hfs_update(vp, MNT_WAIT);
if (retval == 0)
retval = retval2;
Err_Exit:
hfs_unlock_truncate(cp, 0);
hfs_unlock(cp);
return (retval);
}
int
hfs_vnop_pagein(struct vnop_pagein_args *ap)
{
vnode_t vp;
struct cnode *cp;
struct filefork *fp;
int error = 0;
upl_t upl;
upl_page_info_t *pl;
off_t f_offset;
int offset;
int isize;
int pg_index;
boolean_t truncate_lock_held = FALSE;
boolean_t file_converted = FALSE;
kern_return_t kret;
vp = ap->a_vp;
cp = VTOC(vp);
fp = VTOF(vp);
#if CONFIG_PROTECT
if ((error = cp_handle_vnop(cp, CP_READ_ACCESS | CP_WRITE_ACCESS)) != 0) {
return error;
}
#endif
if (ap->a_pl != NULL) {
error = cluster_pagein(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset,
ap->a_size, (off_t)fp->ff_size, ap->a_flags);
goto pagein_done;
}
retry_pagein:
if (vfs_isforce(vp->v_mount)) {
if (cp->c_flag & C_DELETED) {
truncate_lock_held = hfs_try_trunclock(cp, HFS_RECURSE_TRUNCLOCK);
}
}
else {
hfs_lock_truncate(cp, HFS_RECURSE_TRUNCLOCK);
truncate_lock_held = TRUE;
}
kret = ubc_create_upl(vp, ap->a_f_offset, ap->a_size, &upl, &pl, UPL_UBC_PAGEIN | UPL_RET_ONLY_ABSENT);
if ((kret != KERN_SUCCESS) || (upl == (upl_t) NULL)) {
error = EINVAL;
goto pagein_done;
}
isize = ap->a_size;
for (pg_index = ((isize) / PAGE_SIZE); pg_index > 0;) {
if (upl_page_present(pl, --pg_index))
break;
if (pg_index == 0) {
ubc_upl_abort_range(upl, 0, isize, UPL_ABORT_FREE_ON_EMPTY);
goto pagein_done;
}
}
isize = ((pg_index + 1) * PAGE_SIZE);
pg_index = 0;
offset = 0;
f_offset = ap->a_f_offset;
while (isize) {
int xsize;
int num_of_pages;
if ( !upl_page_present(pl, pg_index)) {
f_offset += PAGE_SIZE;
offset += PAGE_SIZE;
isize -= PAGE_SIZE;
pg_index++;
continue;
}
num_of_pages = 1;
xsize = isize - PAGE_SIZE;
while (xsize) {
if ( !upl_page_present(pl, pg_index + num_of_pages))
break;
num_of_pages++;
xsize -= PAGE_SIZE;
}
xsize = num_of_pages * PAGE_SIZE;
#if HFS_COMPRESSION
if (VNODE_IS_RSRC(vp)) {
} else {
int compressed = hfs_file_is_compressed(VTOC(vp), 1);
if (compressed) {
if (truncate_lock_held) {
hfs_unlock_truncate(cp, 1);
truncate_lock_held = FALSE;
}
ap->a_pl = upl;
ap->a_pl_offset = offset;
ap->a_f_offset = f_offset;
ap->a_size = xsize;
error = decmpfs_pagein_compressed(ap, &compressed, VTOCMP(vp));
if (compressed) {
if (error == 0) {
VTOC(vp)->c_touch_acctime = TRUE;
if (VTOHFS(vp)->hfc_stage == HFC_RECORDING) {
fp->ff_bytesread = 0;
}
} else if (error == EAGAIN) {
ubc_upl_abort_range(upl, (upl_offset_t) offset, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_RESTART);
}
goto pagein_next_range;
}
else {
file_converted = TRUE;
}
}
if (file_converted == TRUE) {
ubc_upl_abort_range(upl, (upl_offset_t) offset, isize, UPL_ABORT_FREE_ON_EMPTY);
ap->a_size = isize;
ap->a_pl = NULL;
ap->a_pl_offset = 0;
file_converted = FALSE;
goto retry_pagein;
}
}
#endif
error = cluster_pagein(vp, upl, offset, f_offset, xsize, (off_t)fp->ff_size, ap->a_flags);
if ( !vnode_isswap(vp) && VTOHFS(vp)->hfc_stage == HFC_RECORDING && error == 0) {
int bytesread;
int took_cnode_lock = 0;
if (ap->a_f_offset == 0 && fp->ff_size < PAGE_SIZE)
bytesread = fp->ff_size;
else
bytesread = xsize;
if ((fp->ff_bytesread + bytesread) > 0x00000000ffffffff && cp->c_lockowner != current_thread()) {
hfs_lock(cp, HFS_FORCE_LOCK);
took_cnode_lock = 1;
}
if (cp->c_atime < VTOHFS(vp)->hfc_timebase) {
struct timeval tv;
fp->ff_bytesread = bytesread;
microtime(&tv);
cp->c_atime = tv.tv_sec;
} else {
fp->ff_bytesread += bytesread;
}
cp->c_touch_acctime = TRUE;
if (took_cnode_lock)
hfs_unlock(cp);
}
pagein_next_range:
f_offset += xsize;
offset += xsize;
isize -= xsize;
pg_index += num_of_pages;
error = 0;
}
pagein_done:
if (truncate_lock_held == TRUE) {
hfs_unlock_truncate(cp, 1);
}
return (error);
}
int
hfs_vnop_pageout(struct vnop_pageout_args *ap)
{
vnode_t vp = ap->a_vp;
struct cnode *cp;
struct filefork *fp;
int retval = 0;
off_t filesize;
upl_t upl;
upl_page_info_t* pl;
vm_offset_t a_pl_offset;
int a_flags;
int is_pageoutv2 = 0;
kern_return_t kret;
cp = VTOC(vp);
fp = VTOF(vp);
filesize = fp->ff_size;
if (fp->ff_new_size > filesize)
filesize = fp->ff_new_size;
a_flags = ap->a_flags;
a_pl_offset = ap->a_pl_offset;
if ((upl = ap->a_pl) == NULL) {
int request_flags;
is_pageoutv2 = 1;
a_flags &= ~UPL_NOCOMMIT;
a_pl_offset = 0;
hfs_lock_truncate(cp, HFS_SHARED_LOCK);
if (a_flags & UPL_MSYNC) {
request_flags = UPL_UBC_MSYNC | UPL_RET_ONLY_DIRTY;
}
else {
request_flags = UPL_UBC_PAGEOUT | UPL_RET_ONLY_DIRTY;
}
kret = ubc_create_upl(vp, ap->a_f_offset, ap->a_size, &upl, &pl, request_flags);
if ((kret != KERN_SUCCESS) || (upl == (upl_t) NULL)) {
retval = EINVAL;
goto pageout_done;
}
}
if (is_pageoutv2) {
off_t f_offset;
int offset;
int isize;
int pg_index;
int error;
int error_ret = 0;
isize = ap->a_size;
f_offset = ap->a_f_offset;
for (pg_index = ((isize) / PAGE_SIZE); pg_index > 0;) {
if (upl_page_present(pl, --pg_index))
break;
if (pg_index == 0) {
ubc_upl_abort_range(upl, 0, isize, UPL_ABORT_FREE_ON_EMPTY);
goto pageout_done;
}
}
isize = ((pg_index + 1) * PAGE_SIZE);
offset = 0;
pg_index = 0;
while (isize) {
int xsize;
int num_of_pages;
if ( !upl_page_present(pl, pg_index)) {
f_offset += PAGE_SIZE;
offset += PAGE_SIZE;
isize -= PAGE_SIZE;
pg_index++;
continue;
}
if ( !upl_dirty_page(pl, pg_index)) {
panic ("hfs_vnop_pageout: unforeseen clean page @ index %d for UPL %p\n", pg_index, upl);
}
num_of_pages = 1;
xsize = isize - PAGE_SIZE;
while (xsize) {
if ( !upl_dirty_page(pl, pg_index + num_of_pages))
break;
num_of_pages++;
xsize -= PAGE_SIZE;
}
xsize = num_of_pages * PAGE_SIZE;
if (!vnode_isswap(vp)) {
off_t end_of_range;
int tooklock;
tooklock = 0;
if (cp->c_lockowner != current_thread()) {
if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK))) {
ubc_upl_abort_range(upl,
offset,
ap->a_size - offset,
UPL_ABORT_FREE_ON_EMPTY);
goto pageout_done;
}
tooklock = 1;
}
end_of_range = f_offset + xsize - 1;
if (end_of_range >= filesize) {
end_of_range = (off_t)(filesize - 1);
}
if (f_offset < filesize) {
rl_remove(f_offset, end_of_range, &fp->ff_invalidranges);
cp->c_flag |= C_MODIFIED;
}
if (tooklock) {
hfs_unlock(cp);
}
}
if ((error = cluster_pageout(vp, upl, offset, f_offset,
xsize, filesize, a_flags))) {
if (error_ret == 0)
error_ret = error;
}
f_offset += xsize;
offset += xsize;
isize -= xsize;
pg_index += num_of_pages;
}
if (error_ret != 0) {
retval = error_ret;
}
}
else {
if (!vnode_isswap(vp)) {
off_t end_of_range;
int tooklock = 0;
if (cp->c_lockowner != current_thread()) {
if ((retval = hfs_lock(cp, HFS_EXCLUSIVE_LOCK))) {
if (!(a_flags & UPL_NOCOMMIT)) {
ubc_upl_abort_range(upl,
a_pl_offset,
ap->a_size,
UPL_ABORT_FREE_ON_EMPTY);
}
goto pageout_done;
}
tooklock = 1;
}
end_of_range = ap->a_f_offset + ap->a_size - 1;
if (end_of_range >= filesize) {
end_of_range = (off_t)(filesize - 1);
}
if (ap->a_f_offset < filesize) {
rl_remove(ap->a_f_offset, end_of_range, &fp->ff_invalidranges);
cp->c_flag |= C_MODIFIED;
}
if (tooklock) {
hfs_unlock(cp);
}
}
retval = cluster_pageout(vp, upl, a_pl_offset, ap->a_f_offset,
ap->a_size, filesize, a_flags);
}
if (retval == 0) {
cp->c_touch_modtime = TRUE;
cp->c_touch_chgtime = TRUE;
if ((cp->c_mode & (S_ISUID | S_ISGID)) &&
(vfs_context_suser(ap->a_context) != 0)) {
hfs_lock(cp, HFS_FORCE_LOCK);
cp->c_mode &= ~(S_ISUID | S_ISGID);
hfs_unlock(cp);
}
}
pageout_done:
if (is_pageoutv2) {
hfs_unlock_truncate(cp, 0);
}
return (retval);
}
int
hfs_vnop_bwrite(struct vnop_bwrite_args *ap)
{
int retval = 0;
register struct buf *bp = ap->a_bp;
register struct vnode *vp = buf_vnode(bp);
BlockDescriptor block;
if ((VTOC(vp)->c_fileid == kHFSExtentsFileID) ||
(VTOC(vp)->c_fileid == kHFSCatalogFileID) ||
(VTOC(vp)->c_fileid == kHFSAttributesFileID) ||
(vp == VTOHFS(vp)->hfc_filevp)) {
if (((u_int16_t *)((char *)buf_dataptr(bp) + buf_count(bp) - 2))[0] == 0x000e) {
block.blockHeader = bp;
block.buffer = (char *)buf_dataptr(bp);
block.blockNum = buf_lblkno(bp);
block.blockReadFromDisk = (buf_fromcache(bp) == 0);
block.blockSize = buf_count(bp);
retval = hfs_swap_BTNode (&block, vp, kSwapBTNodeHostToBig, false);
if (retval)
panic("hfs_vnop_bwrite: about to write corrupt node!\n");
}
}
if ((buf_flags(bp) & B_LOCKED)) {
if (VTOHFS(vp)->jnl) {
panic("hfs: CLEARING the lock bit on bp %p\n", bp);
}
buf_clearflags(bp, B_LOCKED);
}
retval = vn_bwrite (ap);
return (retval);
}
int
hfs_relocate(struct vnode *vp, u_int32_t blockHint, kauth_cred_t cred,
struct proc *p)
{
struct cnode *cp;
struct filefork *fp;
struct hfsmount *hfsmp;
u_int32_t headblks;
u_int32_t datablks;
u_int32_t blksize;
u_int32_t growsize;
u_int32_t nextallocsave;
daddr64_t sector_a, sector_b;
int eflags;
off_t newbytes;
int retval;
int lockflags = 0;
int took_trunc_lock = 0;
int started_tr = 0;
enum vtype vnodetype;
vnodetype = vnode_vtype(vp);
if (vnodetype != VREG && vnodetype != VLNK) {
return (EPERM);
}
hfsmp = VTOHFS(vp);
if (hfsmp->hfs_flags & HFS_FRAGMENTED_FREESPACE) {
return (ENOSPC);
}
cp = VTOC(vp);
fp = VTOF(vp);
if (fp->ff_unallocblocks)
return (EINVAL);
#if CONFIG_PROTECT
if (cp_fs_protected (hfsmp->hfs_mp)) {
return EINVAL;
}
#endif
if (hfsmp->hfs_flags & HFS_SSD) {
return EINVAL;
}
blksize = hfsmp->blockSize;
if (blockHint == 0)
blockHint = hfsmp->nextAllocation;
if ((fp->ff_size > 0x7fffffff) ||
((fp->ff_size > blksize) && vnodetype == VLNK)) {
return (EFBIG);
}
if (!vnode_issystem(vp) && (vnodetype != VLNK)) {
hfs_unlock(cp);
hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK);
if ((retval = hfs_lock(cp, HFS_FORCE_LOCK))) {
hfs_unlock_truncate(cp, 0);
return (retval);
}
if (cp->c_flag & C_NOEXISTS) {
hfs_unlock_truncate(cp, 0);
return (ENOENT);
}
took_trunc_lock = 1;
}
headblks = fp->ff_blocks;
datablks = howmany(fp->ff_size, blksize);
growsize = datablks * blksize;
eflags = kEFContigMask | kEFAllMask | kEFNoClumpMask;
if (blockHint >= hfsmp->hfs_metazone_start &&
blockHint <= hfsmp->hfs_metazone_end)
eflags |= kEFMetadataMask;
if (hfs_start_transaction(hfsmp) != 0) {
if (took_trunc_lock)
hfs_unlock_truncate(cp, 0);
return (EINVAL);
}
started_tr = 1;
lockflags = SFL_BITMAP;
if (overflow_extents(fp))
lockflags |= SFL_EXTENTS;
lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
retval = MapFileBlockC(hfsmp, (FCB *)fp, 1, growsize - 1, §or_a, NULL);
if (retval) {
retval = MacToVFSError(retval);
goto out;
}
nextallocsave = hfsmp->nextAllocation;
retval = ExtendFileC(hfsmp, (FCB*)fp, growsize, blockHint, eflags, &newbytes);
if (eflags & kEFMetadataMask) {
HFS_MOUNT_LOCK(hfsmp, TRUE);
HFS_UPDATE_NEXT_ALLOCATION(hfsmp, nextallocsave);
MarkVCBDirty(hfsmp);
HFS_MOUNT_UNLOCK(hfsmp, TRUE);
}
retval = MacToVFSError(retval);
if (retval == 0) {
cp->c_flag |= C_MODIFIED;
if (newbytes < growsize) {
retval = ENOSPC;
goto restore;
} else if (fp->ff_blocks < (headblks + datablks)) {
printf("hfs_relocate: allocation failed");
retval = ENOSPC;
goto restore;
}
retval = MapFileBlockC(hfsmp, (FCB *)fp, 1, growsize, §or_b, NULL);
if (retval) {
retval = MacToVFSError(retval);
} else if ((sector_a + 1) == sector_b) {
retval = ENOSPC;
goto restore;
} else if ((eflags & kEFMetadataMask) &&
((((u_int64_t)sector_b * hfsmp->hfs_logical_block_size) / blksize) >
hfsmp->hfs_metazone_end)) {
#if 0
const char * filestr;
char emptystr = '\0';
if (cp->c_desc.cd_nameptr != NULL) {
filestr = (const char *)&cp->c_desc.cd_nameptr[0];
} else if (vnode_name(vp) != NULL) {
filestr = vnode_name(vp);
} else {
filestr = &emptystr;
}
#endif
retval = ENOSPC;
goto restore;
}
}
hfs_systemfile_unlock(hfsmp, lockflags);
lockflags = 0;
hfs_end_transaction(hfsmp);
started_tr = 0;
if (retval) {
if ((retval == ENOSPC) &&
(hfs_freeblks(hfsmp, 0) > (datablks * 2))) {
hfsmp->hfs_flags |= HFS_FRAGMENTED_FREESPACE;
}
goto out;
}
if (vnodetype == VLNK)
retval = hfs_clonelink(vp, blksize, cred, p);
else if (vnode_issystem(vp))
retval = hfs_clonesysfile(vp, headblks, datablks, blksize, cred, p);
else
retval = hfs_clonefile(vp, headblks, datablks, blksize);
if (hfs_start_transaction(hfsmp) != 0) {
retval = EINVAL;
goto out;
}
started_tr = 1;
if (retval)
goto restore;
lockflags = SFL_BITMAP;
if (overflow_extents(fp))
lockflags |= SFL_EXTENTS;
lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
retval = HeadTruncateFile(hfsmp, (FCB*)fp, headblks);
hfs_systemfile_unlock(hfsmp, lockflags);
lockflags = 0;
if (retval)
goto restore;
out:
if (took_trunc_lock)
hfs_unlock_truncate(cp, 0);
if (lockflags) {
hfs_systemfile_unlock(hfsmp, lockflags);
lockflags = 0;
}
if (retval == 0) {
(void) hfs_update(vp, MNT_WAIT);
}
if (hfsmp->jnl) {
if (cp->c_cnid < kHFSFirstUserCatalogNodeID)
(void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
else
(void) hfs_flushvolumeheader(hfsmp, MNT_NOWAIT, 0);
}
exit:
if (started_tr)
hfs_end_transaction(hfsmp);
return (retval);
restore:
if (fp->ff_blocks == headblks) {
if (took_trunc_lock)
hfs_unlock_truncate(cp, 0);
goto exit;
}
if (lockflags == 0) {
lockflags = SFL_BITMAP;
if (overflow_extents(fp))
lockflags |= SFL_EXTENTS;
lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_EXCLUSIVE_LOCK);
}
(void) TruncateFileC(hfsmp, (FCB*)fp, fp->ff_size, 0, FORK_IS_RSRC(fp),
FTOC(fp)->c_fileid, false);
hfs_systemfile_unlock(hfsmp, lockflags);
lockflags = 0;
if (took_trunc_lock)
hfs_unlock_truncate(cp, 0);
goto exit;
}
static int
hfs_clonelink(struct vnode *vp, int blksize, kauth_cred_t cred, __unused struct proc *p)
{
struct buf *head_bp = NULL;
struct buf *tail_bp = NULL;
int error;
error = (int)buf_meta_bread(vp, (daddr64_t)0, blksize, cred, &head_bp);
if (error)
goto out;
tail_bp = buf_getblk(vp, (daddr64_t)1, blksize, 0, 0, BLK_META);
if (tail_bp == NULL) {
error = EIO;
goto out;
}
bcopy((char *)buf_dataptr(head_bp), (char *)buf_dataptr(tail_bp), blksize);
error = (int)buf_bwrite(tail_bp);
out:
if (head_bp) {
buf_markinvalid(head_bp);
buf_brelse(head_bp);
}
(void) buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0);
return (error);
}
static int
hfs_clonefile(struct vnode *vp, int blkstart, int blkcnt, int blksize)
{
caddr_t bufp;
size_t bufsize;
size_t copysize;
size_t iosize;
size_t offset;
off_t writebase;
uio_t auio;
int error = 0;
writebase = blkstart * blksize;
copysize = blkcnt * blksize;
iosize = bufsize = MIN(copysize, 128 * 1024);
offset = 0;
hfs_unlock(VTOC(vp));
#if CONFIG_PROTECT
if ((error = cp_handle_vnop(VTOC(vp), CP_WRITE_ACCESS)) != 0) {
hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
return (error);
}
#endif
if (kmem_alloc(kernel_map, (vm_offset_t *)&bufp, bufsize)) {
hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
return (ENOMEM);
}
auio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ);
while (offset < copysize) {
iosize = MIN(copysize - offset, iosize);
uio_reset(auio, offset, UIO_SYSSPACE, UIO_READ);
uio_addiov(auio, (uintptr_t)bufp, iosize);
error = cluster_read(vp, auio, copysize, IO_NOCACHE);
if (error) {
printf("hfs_clonefile: cluster_read failed - %d\n", error);
break;
}
if (uio_resid(auio) != 0) {
printf("hfs_clonefile: cluster_read: uio_resid = %lld\n", uio_resid(auio));
error = EIO;
break;
}
uio_reset(auio, writebase + offset, UIO_SYSSPACE, UIO_WRITE);
uio_addiov(auio, (uintptr_t)bufp, iosize);
error = cluster_write(vp, auio, writebase + offset,
writebase + offset + iosize,
uio_offset(auio), 0, IO_NOCACHE | IO_SYNC);
if (error) {
printf("hfs_clonefile: cluster_write failed - %d\n", error);
break;
}
if (uio_resid(auio) != 0) {
printf("hfs_clonefile: cluster_write failed - uio_resid not zero\n");
error = EIO;
break;
}
offset += iosize;
}
uio_free(auio);
if ((blksize & PAGE_MASK)) {
ubc_msync(vp, writebase, writebase + offset, NULL, UBC_INVALIDATE | UBC_PUSHDIRTY);
} else {
}
kmem_free(kernel_map, (vm_offset_t)bufp, bufsize);
hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
return (error);
}
static int
hfs_clonesysfile(struct vnode *vp, int blkstart, int blkcnt, int blksize,
kauth_cred_t cred, struct proc *p)
{
caddr_t bufp;
char * offset;
size_t bufsize;
size_t iosize;
struct buf *bp = NULL;
daddr64_t blkno;
daddr64_t blk;
daddr64_t start_blk;
daddr64_t last_blk;
int breadcnt;
int i;
int error = 0;
iosize = GetLogicalBlockSize(vp);
bufsize = MIN(blkcnt * blksize, 1024 * 1024) & ~(iosize - 1);
breadcnt = bufsize / iosize;
if (kmem_alloc(kernel_map, (vm_offset_t *)&bufp, bufsize)) {
return (ENOMEM);
}
start_blk = ((daddr64_t)blkstart * blksize) / iosize;
last_blk = ((daddr64_t)blkcnt * blksize) / iosize;
blkno = 0;
while (blkno < last_blk) {
offset = bufp;
for (i = 0, blk = blkno; (i < breadcnt) && (blk < last_blk); ++i, ++blk) {
error = (int)buf_meta_bread(vp, blk, iosize, cred, &bp);
if (error) {
printf("hfs_clonesysfile: meta_bread error %d\n", error);
goto out;
}
if (buf_count(bp) != iosize) {
printf("hfs_clonesysfile: b_bcount is only %d\n", buf_count(bp));
goto out;
}
bcopy((char *)buf_dataptr(bp), offset, iosize);
buf_markinvalid(bp);
buf_brelse(bp);
bp = NULL;
offset += iosize;
}
offset = bufp;
for (i = 0; (i < breadcnt) && (blkno < last_blk); ++i, ++blkno) {
bp = buf_getblk(vp, start_blk + blkno, iosize, 0, 0, BLK_META);
if (bp == NULL) {
printf("hfs_clonesysfile: getblk failed on blk %qd\n", start_blk + blkno);
error = EIO;
goto out;
}
bcopy(offset, (char *)buf_dataptr(bp), iosize);
error = (int)buf_bwrite(bp);
bp = NULL;
if (error)
goto out;
offset += iosize;
}
}
out:
if (bp) {
buf_brelse(bp);
}
kmem_free(kernel_map, (vm_offset_t)bufp, bufsize);
error = hfs_fsync(vp, MNT_WAIT, 0, p);
return (error);
}