#include <rev_endian_fs.h>
#include <sys/param.h>
#include <sys/buf.h>
#include <sys/proc.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/resourcevar.h>
#include <sys/trace.h>
#include <sys/quota.h>
#include <miscfs/specfs/specdev.h>
#include <ufs/ufs/quota.h>
#include <ufs/ufs/inode.h>
#include <ufs/ufs/ufsmount.h>
#include <ufs/ufs/ufs_extern.h>
#if REV_ENDIAN_FS
#include <ufs/ufs/ufs_byte_order.h>
#include <architecture/byte_order.h>
#endif
int
ufs_bmap(ap)
struct vop_bmap_args *ap;
{
if (ap->a_vpp != NULL)
*ap->a_vpp = VTOI(ap->a_vp)->i_devvp;
if (ap->a_bnp == NULL)
return (0);
return (ufs_bmaparray(ap->a_vp, ap->a_bn, ap->a_bnp, NULL, NULL,
ap->a_runp));
}
int
ufs_bmaparray(vp, bn, bnp, ap, nump, runp)
struct vnode *vp;
ufs_daddr_t bn;
ufs_daddr_t *bnp;
struct indir *ap;
int *nump;
int *runp;
{
register struct inode *ip;
struct buf *bp;
struct ufsmount *ump;
struct mount *mp;
struct vnode *devvp;
struct indir a[NIADDR], *xap;
ufs_daddr_t daddr;
long metalbn;
int error, maxrun, num;
#if REV_ENDIAN_FS
int rev_endian=0;
#endif
ip = VTOI(vp);
mp = vp->v_mount;
ump = VFSTOUFS(mp);
#if REV_ENDIAN_FS
rev_endian=(mp->mnt_flag & MNT_REVEND);
#endif
#if DIAGNOSTIC
if (ap != NULL && nump == NULL || ap == NULL && nump != NULL)
panic("ufs_bmaparray: invalid arguments");
#endif
if (runp) {
*runp = 0;
maxrun = MAXPHYSIO / mp->mnt_stat.f_iosize - 1;
}
xap = ap == NULL ? a : ap;
if (!nump)
nump = #
if (error = ufs_getlbns(vp, bn, xap, nump))
return (error);
num = *nump;
if (num == 0) {
*bnp = blkptrtodb(ump, ip->i_db[bn]);
if (*bnp == 0)
*bnp = -1;
else if (runp)
for (++bn; bn < NDADDR && *runp < maxrun &&
is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
++bn, ++*runp);
return (0);
}
daddr = ip->i_ib[xap->in_off];
devvp = VFSTOUFS(vp->v_mount)->um_devvp;
for (bp = NULL, ++xap; --num; ++xap) {
metalbn = xap->in_lbn;
if (daddr == 0 && !incore(vp, metalbn) || metalbn == bn)
break;
if (bp)
brelse(bp);
xap->in_exists = 1;
bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0, BLK_META);
if (bp->b_flags & (B_DONE | B_DELWRI)) {
trace(TR_BREADHIT, pack(vp, mp->mnt_stat.f_iosize), metalbn);
}
#if DIAGNOSTIC
else if (!daddr)
panic("ufs_bmaparry: indirect block not in cache");
#endif
else {
trace(TR_BREADMISS, pack(vp, mp->mnt_stat.f_iosize), metalbn);
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_flags |= B_READ;
VOP_STRATEGY(bp);
current_proc()->p_stats->p_ru.ru_inblock++;
if (error = biowait(bp)) {
brelse(bp);
return (error);
}
}
daddr = ((ufs_daddr_t *)bp->b_data)[xap->in_off];
#if REV_ENDIAN_FS
if (rev_endian)
daddr = NXSwapLong(daddr);
#endif
if (num == 1 && daddr && runp) {
#if REV_ENDIAN_FS
if (rev_endian) {
for (bn = xap->in_off + 1;
bn < MNINDIR(ump) && *runp < maxrun &&
is_sequential(ump,
NXSwapLong(((ufs_daddr_t *)bp->b_data)[bn - 1]),
NXSwapLong(((ufs_daddr_t *)bp->b_data)[bn]));
++bn, ++*runp);
} else {
#endif
for (bn = xap->in_off + 1;
bn < MNINDIR(ump) && *runp < maxrun &&
is_sequential(ump,
((ufs_daddr_t *)bp->b_data)[bn - 1],
((ufs_daddr_t *)bp->b_data)[bn]);
++bn, ++*runp);
#if REV_ENDIAN_FS
}
#endif
}
}
if (bp)
brelse(bp);
daddr = blkptrtodb(ump, daddr);
*bnp = daddr == 0 ? -1 : daddr;
return (0);
}
int
ufs_getlbns(vp, bn, ap, nump)
struct vnode *vp;
ufs_daddr_t bn;
struct indir *ap;
int *nump;
{
long metalbn, realbn;
struct ufsmount *ump;
int blockcnt, i, numlevels, off;
ump = VFSTOUFS(vp->v_mount);
if (nump)
*nump = 0;
numlevels = 0;
realbn = bn;
if ((long)bn < 0)
bn = -(long)bn;
if (bn < NDADDR)
return (0);
for (blockcnt = 1, i = NIADDR, bn -= NDADDR;; i--, bn -= blockcnt) {
if (i == 0)
return (EFBIG);
blockcnt *= MNINDIR(ump);
if (bn < blockcnt)
break;
}
if (realbn >= 0)
metalbn = -(realbn - bn + NIADDR - i);
else
metalbn = -(-realbn - bn + NIADDR - i);
ap->in_lbn = metalbn;
ap->in_off = off = NIADDR - i;
ap->in_exists = 0;
ap++;
for (++numlevels; i <= NIADDR; i++) {
if (metalbn == realbn)
break;
blockcnt /= MNINDIR(ump);
off = (bn / blockcnt) % MNINDIR(ump);
++numlevels;
ap->in_lbn = metalbn;
ap->in_off = off;
ap->in_exists = 0;
++ap;
metalbn -= -1 + off * blockcnt;
}
if (nump)
*nump = numlevels;
return (0);
}
int
ufs_cmap(ap)
struct vop_cmap_args *ap;
{
struct vnode * vp = ap->a_vp;
ufs_daddr_t *bnp = ap->a_bpn;
size_t *runp = ap->a_run;
int size = ap->a_size;
daddr_t bn;
int nblks;
register struct inode *ip;
ufs_daddr_t daddr = 0;
int devBlockSize=0;
struct fs *fs;
int retsize=0;
int error=0;
ip = VTOI(vp);
fs = ip->i_fs;
if (blkoff(fs, ap->a_foffset)) {
panic("ufs_cmap; allocation requested inside a block");
}
bn = (daddr_t)lblkno(fs, ap->a_foffset);
VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize);
if (size % devBlockSize) {
panic("ufs_cmap: size is not multiple of device block size\n");
}
if (error = VOP_BMAP(vp, bn, (struct vnode **) 0, &daddr, &nblks)) {
return(error);
}
retsize = nblks * fs->fs_bsize;
if (bnp)
*bnp = daddr;
if (ap->a_poff)
*(int *)ap->a_poff = 0;
if (daddr == -1) {
if (size < fs->fs_bsize) {
retsize = fragroundup(fs, size);
if(size >= retsize)
*runp = retsize;
else
*runp = size;
} else {
*runp = fs->fs_bsize;
}
return(0);
}
if (runp) {
if ((size < fs->fs_bsize)) {
*runp = size;
return(0);
}
if (retsize) {
retsize += fs->fs_bsize;
if(size >= retsize)
*runp = retsize;
else
*runp = size;
} else {
if (size < fs->fs_bsize) {
retsize = fragroundup(fs, size);
if(size >= retsize)
*runp = retsize;
else
*runp = size;
} else {
*runp = fs->fs_bsize;
}
}
}
return (0);
}
#if NOTTOBEUSED
int
ufs_cmap(ap)
struct vop_cmap_args *ap;
{
struct vnode * vp = ap->a_vp;
ufs_daddr_t *bnp = ap->a_bpn;
size_t *runp = ap->a_run;
daddr_t bn;
int nblks, blks;
int *nump;
register struct inode *ip;
struct buf *bp;
struct ufsmount *ump;
struct mount *mp;
struct vnode *devvp;
struct indir a[NIADDR], *xap;
ufs_daddr_t daddr;
long metalbn;
int error, maxrun, num;
int devBlockSize=0;
struct fs *fs;
int size = ap->a_size;
int block_offset=0;
int retsize=0;
#if 1
daddr_t orig_blkno;
daddr_t orig_bblkno;
#endif
#if REV_ENDIAN_FS
int rev_endian=0;
#endif
ip = VTOI(vp);
fs = ip->i_fs;
mp = vp->v_mount;
ump = VFSTOUFS(mp);
VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize);
bn = (daddr_t)lblkno(fs, ap->a_foffset);
if (size % devBlockSize) {
panic("ufs_cmap: size is not multiple of device block size\n");
}
block_offset = blkoff(fs, ap->a_foffset);
if (block_offset) {
panic("ufs_cmap; allocation requested inside a block");
}
#if 1
VOP_OFFTOBLK(vp, ap->a_foffset, & orig_blkno);
#endif
if ( (size < fs->fs_bsize) && fragoff(fs, size) && block_offset ) {
panic("ffs_cmap: size not a mult of fragment\n");
}
#if 0
if (size > fs->fs_bsize && fragoff(fs, size)) {
panic("ffs_cmap: more than bsize & not a multiple of fragment\n");
}
#endif
#if REV_ENDIAN_FS
rev_endian=(mp->mnt_flag & MNT_REVEND);
#endif
if(runp)
*runp = 0;
if ( size > MAXPHYSIO)
size = MAXPHYSIO;
nblks = (blkroundup(fs, size))/fs->fs_bsize;
xap = a;
num = 0;
if (error = ufs_getlbns(vp, bn, xap, &num))
return (error);
blks = 0;
if (num == 0) {
daddr = blkptrtodb(ump, ip->i_db[bn]);
*bnp = ((daddr == 0) ? -1 : daddr);
if (daddr && runp) {
for (++bn; bn < NDADDR && blks < nblks &&
ip->i_db[bn] &&
is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
++bn, ++blks);
if (blks) {
retsize = lblktosize(fs, blks);
if(size >= retsize)
*runp = retsize;
else
*runp = size;
} else {
if (size < fs->fs_bsize) {
retsize = fragroundup(fs, size);
if(size >= retsize)
*runp = retsize;
else
*runp = size;
} else {
*runp = fs->fs_bsize;
}
}
if (ap->a_poff)
*(int *)ap->a_poff = 0;
}
#if 1
if (VOP_BMAP(vp, orig_blkno, NULL, &orig_bblkno, NULL)) {
panic("vop_bmap failed\n");
}
if(daddr != orig_bblkno) {
panic("vop_bmap and vop_cmap differ\n");
}
#endif
return (0);
}
daddr = ip->i_ib[xap->in_off];
devvp = VFSTOUFS(vp->v_mount)->um_devvp;
for (bp = NULL, ++xap; --num; ++xap) {
metalbn = xap->in_lbn;
if (daddr == 0 || metalbn == bn)
break;
if (bp)
brelse(bp);
xap->in_exists = 1;
bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0, BLK_META);
if (bp->b_flags & (B_DONE | B_DELWRI)) {
trace(TR_BREADHIT, pack(vp, mp->mnt_stat.f_iosize), metalbn);
}
else {
trace(TR_BREADMISS, pack(vp, mp->mnt_stat.f_iosize), metalbn);
bp->b_blkno = blkptrtodb(ump, daddr);
bp->b_flags |= B_READ;
VOP_STRATEGY(bp);
current_proc()->p_stats->p_ru.ru_inblock++;
if (error = biowait(bp)) {
brelse(bp);
return (error);
}
}
daddr = ((ufs_daddr_t *)bp->b_data)[xap->in_off];
#if REV_ENDIAN_FS
if (rev_endian)
daddr = NXSwapLong(daddr);
#endif
if (num == 1 && daddr && runp) {
blks = 0;
#if REV_ENDIAN_FS
if (rev_endian) {
for (bn = xap->in_off + 1;
bn < MNINDIR(ump) && blks < maxrun &&
is_sequential(ump,
NXSwapLong(((ufs_daddr_t *)bp->b_data)[bn - 1]),
NXSwapLong(((ufs_daddr_t *)bp->b_data)[bn]));
++bn, ++blks);
} else {
#endif
for (bn = xap->in_off + 1;
bn < MNINDIR(ump) && blks < maxrun &&
is_sequential(ump,
((ufs_daddr_t *)bp->b_data)[bn - 1],
((ufs_daddr_t *)bp->b_data)[bn]);
++bn, ++blks);
#if REV_ENDIAN_FS
}
#endif
}
}
if (bp)
brelse(bp);
daddr = blkptrtodb(ump, daddr);
*bnp = ((daddr == 0) ? -1 : daddr);
if (daddr && runp) {
if (blks) {
retsize = lblktosize(fs, blks);
if(size >= retsize)
*runp = retsize;
else
*runp = size;
} else {
if (size < fs->fs_bsize) {
retsize = fragroundup(fs, size);
if(size >= retsize)
*runp = retsize;
else
*runp = size;
} else {
*runp = fs->fs_bsize;
}
}
}
if (daddr && ap->a_poff)
*(int *)ap->a_poff = 0;
#if 1
if (VOP_BMAP(vp, orig_blkno, (struct vnode **) 0, &orig_bblkno, 0)) {
panic("vop_bmap failed\n");
}
if(daddr != orig_bblkno) {
panic("vop_bmap and vop_cmap differ\n");
}
#endif
return (0);
}
#endif