VolumeAllocation.c [plain text]
#include <sys/types.h>
#include <sys/buf.h>
#if HFS_ALLOC_TEST
#define KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e) do {} while (0)
#else // !HFS_ALLOC_TEST
#include "hfs_macos_defs.h"
#include <sys/systm.h>
#include <sys/ubc.h>
#include <libkern/libkern.h>
#include "hfs_journal.h"
#include "hfs.h"
#include "hfs_endian.h"
#include "FileMgrInternal.h"
#endif // !HFS_ALLOC_TEST
#include <sys/sysctl.h>
#include <sys/disk.h>
#include <sys/uio.h>
#include <sys/malloc.h>
#include "hfs_dbg.h"
#include "hfs_format.h"
#include "hfs_kdebug.h"
#include "rangelist.h"
#include "hfs_extents.h"
#include <sys/disk.h>
static int hfs_kdebug_allocation = 0;
SYSCTL_DECL(_vfs_generic);
HFS_SYSCTL(NODE, _vfs_generic, OID_AUTO, hfs, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "HFS file system")
HFS_SYSCTL(NODE, _vfs_generic_hfs, OID_AUTO, kdebug, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "HFS kdebug")
HFS_SYSCTL(INT, _vfs_generic_hfs_kdebug, OID_AUTO, allocation, CTLFLAG_RW|CTLFLAG_LOCKED, &hfs_kdebug_allocation, 0, "Enable kdebug logging for HFS allocations")
enum {
HFSDBG_ALLOC_ENABLED = 1,
HFSDBG_EXT_CACHE_ENABLED = 2,
HFSDBG_UNMAP_ENABLED = 4,
HFSDBG_BITMAP_ENABLED = 8
};
enum {
kBytesPerWord = 4,
kBitsPerByte = 8,
kBitsPerWord = 32,
kBitsWithinWordMask = kBitsPerWord-1
};
#define kLowBitInWordMask 0x00000001ul
#define kHighBitInWordMask 0x80000000ul
#define kAllBitsSetInWord 0xFFFFFFFFul
#define HFS_MIN_SUMMARY_BLOCKSIZE 4096
#define ALLOC_DEBUG 0
static OSErr ReadBitmapBlock(
ExtendedVCB *vcb,
u_int32_t bit,
u_int32_t **buffer,
uintptr_t *blockRef,
hfs_block_alloc_flags_t flags);
static OSErr ReleaseBitmapBlock(
ExtendedVCB *vcb,
uintptr_t blockRef,
Boolean dirty);
static OSErr hfs_block_alloc_int(hfsmount_t *hfsmp,
HFSPlusExtentDescriptor *extent,
hfs_block_alloc_flags_t flags,
hfs_alloc_extra_args_t *ap);
static OSErr BlockFindAny(
ExtendedVCB *vcb,
u_int32_t startingBlock,
u_int32_t endingBlock,
u_int32_t maxBlocks,
hfs_block_alloc_flags_t flags,
Boolean trustSummary,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks);
static OSErr BlockFindAnyBitmap(
ExtendedVCB *vcb,
u_int32_t startingBlock,
u_int32_t endingBlock,
u_int32_t maxBlocks,
hfs_block_alloc_flags_t flags,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks);
static OSErr BlockFindContig(
ExtendedVCB *vcb,
u_int32_t startingBlock,
u_int32_t minBlocks,
u_int32_t maxBlocks,
hfs_block_alloc_flags_t flags,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks);
static OSErr BlockFindContiguous(
ExtendedVCB *vcb,
u_int32_t startingBlock,
u_int32_t endingBlock,
u_int32_t minBlocks,
u_int32_t maxBlocks,
Boolean useMetaZone,
Boolean trustSummary,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks,
hfs_block_alloc_flags_t flags);
static OSErr BlockFindKnown(
ExtendedVCB *vcb,
u_int32_t maxBlocks,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks);
static OSErr hfs_alloc_try_hard(hfsmount_t *hfsmp,
HFSPlusExtentDescriptor *extent,
uint32_t max_blocks,
hfs_block_alloc_flags_t flags);
static OSErr BlockMarkAllocatedInternal (
ExtendedVCB *vcb,
u_int32_t startingBlock,
u_int32_t numBlocks,
hfs_block_alloc_flags_t flags);
static OSErr BlockMarkFreeInternal(
ExtendedVCB *vcb,
u_int32_t startingBlock,
u_int32_t numBlocks,
Boolean do_validate);
static OSErr ReadBitmapRange (struct hfsmount *hfsmp, uint32_t offset, uint32_t iosize,
uint32_t **buffer, struct buf **blockRef);
static OSErr ReleaseScanBitmapRange( struct buf *bp );
static int hfs_track_unmap_blocks (struct hfsmount *hfsmp, u_int32_t offset,
u_int32_t numBlocks, struct jnl_trim_list *list);
static int hfs_issue_unmap (struct hfsmount *hfsmp, struct jnl_trim_list *list);
static int hfs_alloc_scan_range(struct hfsmount *hfsmp,
u_int32_t startbit,
u_int32_t *bitToScan,
struct jnl_trim_list *list);
static int hfs_scan_range_size (struct hfsmount* hfsmp, uint32_t start, uint32_t *iosize);
static uint32_t CheckUnmappedBytes (struct hfsmount *hfsmp, uint64_t blockno, uint64_t numblocks, int *recent, uint32_t *next);
static inline int extents_overlap (uint32_t start1, uint32_t len1,
uint32_t start2, uint32_t len2) {
return !( ((start1 + len1) <= start2) || ((start2 + len2) <= start1) );
}
int hfs_isallocated_scan (struct hfsmount *hfsmp,
u_int32_t startingBlock,
u_int32_t *bp_buf);
static int hfs_set_summary (struct hfsmount *hfsmp, uint32_t summarybit, uint32_t inuse);
static int hfs_get_summary_index (struct hfsmount *hfsmp, uint32_t block, uint32_t *index);
static int hfs_find_summary_free (struct hfsmount *hfsmp, uint32_t block, uint32_t *newblock);
static int hfs_get_summary_allocblock (struct hfsmount *hfsmp, uint32_t summarybit, uint32_t *alloc);
static int hfs_release_summary (struct hfsmount *hfsmp, uint32_t start, uint32_t length);
static int hfs_check_summary (struct hfsmount *hfsmp, uint32_t start, uint32_t *freeblocks);
static int hfs_rebuild_summary (struct hfsmount *hfsmp);
#if 0
static int hfs_get_next_summary (struct hfsmount *hfsmp, uint32_t block, uint32_t *newblock);
#endif
int hfs_init_summary (struct hfsmount *hfsmp);
#if ALLOC_DEBUG
void hfs_validate_summary (struct hfsmount *hfsmp);
#endif
static void remove_free_extent_cache(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount);
static Boolean add_free_extent_cache(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount);
static void sanity_check_free_ext(struct hfsmount *hfsmp, int check_allocated);
static void hfs_release_reserved(hfsmount_t *hfsmp, struct rl_entry *range, int list);
typedef struct bitmap_context {
void *bitmap; uint32_t run_offset; uint32_t chunk_current; uint32_t chunk_end; struct hfsmount *hfsmp;
struct buf *bp;
uint32_t last_free_summary_bit; int lockflags;
uint64_t lock_start;
} bitmap_context_t;
static errno_t get_more_bits(bitmap_context_t *bitmap_ctx);
static int bit_count_set(void *bitmap, int start, int end);
static int bit_count_clr(void *bitmap, int start, int end);
static errno_t hfs_bit_count(bitmap_context_t *bitmap_ctx, int (*fn)(void *, int ,int), uint32_t *bit_count);
static errno_t hfs_bit_count_set(bitmap_context_t *bitmap_ctx, uint32_t *count);
static errno_t hfs_bit_count_clr(bitmap_context_t *bitmap_ctx, uint32_t *count);
static errno_t update_summary_table(bitmap_context_t *bitmap_ctx, uint32_t start, uint32_t count, bool set);
static int clzll(uint64_t x);
#if ALLOC_DEBUG
int trim_validate_bitmap (struct hfsmount *hfsmp);
int trim_validate_bitmap (struct hfsmount *hfsmp) {
u_int64_t blockno_offset;
u_int64_t numblocks;
int i;
int count;
u_int32_t startblk;
u_int32_t blks;
int err = 0;
uint32_t alloccount = 0;
if (hfsmp->jnl) {
struct journal *jnl = (struct journal*)hfsmp->jnl;
if (jnl->active_tr) {
struct jnl_trim_list *trim = &(jnl->active_tr->trim);
count = trim->extent_count;
for (i = 0; i < count; i++) {
blockno_offset = trim->extents[i].offset;
blockno_offset = blockno_offset - (uint64_t)hfsmp->hfsPlusIOPosOffset;
blockno_offset = blockno_offset / hfsmp->blockSize;
numblocks = trim->extents[i].length / hfsmp->blockSize;
startblk = (u_int32_t)blockno_offset;
blks = (u_int32_t) numblocks;
err = hfs_count_allocated (hfsmp, startblk, blks, &alloccount);
if (err == 0 && alloccount != 0) {
panic ("trim_validate_bitmap: %d blocks @ ABN %d are allocated!", alloccount, startblk);
}
}
}
}
return 0;
}
#endif
static void hfs_unmap_free_extent(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t numBlocks)
{
u_int64_t offset;
u_int64_t length;
u_int64_t device_sz;
int err = 0;
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_FREE | DBG_FUNC_START, startingBlock, numBlocks, 0, 0, 0);
if (ALLOC_DEBUG) {
if (hfs_isallocated(hfsmp, startingBlock, numBlocks)) {
panic("hfs: %p: (%u,%u) unmapping allocated blocks", hfsmp, startingBlock, numBlocks);
}
}
if (hfsmp->jnl != NULL) {
device_sz = hfsmp->hfs_logical_bytes;
offset = (u_int64_t) startingBlock * hfsmp->blockSize + (u_int64_t) hfsmp->hfsPlusIOPosOffset;
length = (u_int64_t) numBlocks * hfsmp->blockSize;
if ((offset >= device_sz) || ((offset + length) > device_sz)) {
printf("hfs_unmap_free_ext: ignoring trim vol=%s @ off %lld len %lld \n", hfsmp->vcbVN, offset, length);
err = EINVAL;
}
if (err == 0) {
err = journal_trim_add_extent(hfsmp->jnl, offset, length);
if (err) {
printf("hfs_unmap_free_extent: error %d from journal_trim_add_extent for vol=%s", err, hfsmp->vcbVN);
}
}
}
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_FREE | DBG_FUNC_END, err, 0, 0, 0, 0);
}
static int hfs_track_unmap_blocks (struct hfsmount *hfsmp, u_int32_t start,
u_int32_t numBlocks, struct jnl_trim_list *list) {
u_int64_t offset;
u_int64_t length;
int error = 0;
if ((hfsmp->hfs_flags & HFS_UNMAP) && (hfsmp->jnl != NULL) && list->allocated_count && list->extents != NULL) {
int extent_no = list->extent_count;
offset = (u_int64_t) start * hfsmp->blockSize + (u_int64_t) hfsmp->hfsPlusIOPosOffset;
length = (u_int64_t) numBlocks * hfsmp->blockSize;
list->extents[extent_no].offset = offset;
list->extents[extent_no].length = length;
list->extent_count++;
if (list->extent_count == list->allocated_count) {
error = hfs_issue_unmap (hfsmp, list);
}
}
return error;
}
static int hfs_issue_unmap (struct hfsmount *hfsmp, struct jnl_trim_list *list)
{
dk_unmap_t unmap;
int error = 0;
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED) {
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_SCAN_TRIM | DBG_FUNC_START, hfsmp->hfs_raw_dev, 0, 0, 0, 0);
}
if (list->extent_count > 0 && list->extents != NULL) {
bzero(&unmap, sizeof(unmap));
unmap.extents = list->extents;
unmap.extentsCount = list->extent_count;
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED) {
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_SCAN_TRIM | DBG_FUNC_NONE, hfsmp->hfs_raw_dev, unmap.extentsCount, 0, 0, 0);
}
#if CONFIG_PROTECT
if ((hfsmp->scan_var & HFS_ALLOCATOR_SCAN_COMPLETED) == 0) {
unmap.options |= _DK_UNMAP_INITIALIZE;
}
#endif
error = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCUNMAP, (caddr_t)&unmap, 0, vfs_context_kernel());
bzero (list->extents, (list->allocated_count * sizeof(dk_extent_t)));
bzero (&unmap, sizeof(unmap));
list->extent_count = 0;
}
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED) {
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_SCAN_TRIM | DBG_FUNC_END, error, hfsmp->hfs_raw_dev, 0, 0, 0);
}
return error;
}
static void hfs_unmap_alloc_extent(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t numBlocks)
{
u_int64_t offset;
u_int64_t length;
int err = 0;
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_ALLOC | DBG_FUNC_START, startingBlock, numBlocks, 0, 0, 0);
if (hfsmp->jnl != NULL) {
offset = (u_int64_t) startingBlock * hfsmp->blockSize + (u_int64_t) hfsmp->hfsPlusIOPosOffset;
length = (u_int64_t) numBlocks * hfsmp->blockSize;
err = journal_trim_remove_extent(hfsmp->jnl, offset, length);
if (err) {
printf("hfs_unmap_alloc_extent: error %d from journal_trim_remove_extent for vol=%s", err, hfsmp->vcbVN);
}
}
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_ALLOC | DBG_FUNC_END, err, 0, 0, 0, 0);
}
void
hfs_trim_callback(void *arg, uint32_t extent_count, const dk_extent_t *extents)
{
uint32_t i;
uint32_t startBlock, numBlocks;
struct hfsmount *hfsmp = arg;
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_CALLBACK | DBG_FUNC_START, 0, extent_count, 0, 0, 0);
for (i=0; i<extent_count; ++i) {
startBlock = (extents[i].offset - hfsmp->hfsPlusIOPosOffset) / hfsmp->blockSize;
numBlocks = extents[i].length / hfsmp->blockSize;
(void) add_free_extent_cache(hfsmp, startBlock, numBlocks);
}
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_CALLBACK | DBG_FUNC_END, 0, 0, 0, 0, 0);
}
u_int32_t
CheckUnmappedBytes (struct hfsmount *hfsmp, uint64_t blockno, uint64_t numblocks, int *recently_freed, uint32_t *overlap_end) {
uint64_t device_offset;
uint64_t numbytes;
uint32_t err = 0;
uint64_t lba_overlap_end;
if (hfsmp->jnl != NULL) {
uint64_t device_sz = hfsmp->hfs_logical_bytes;
device_offset = blockno * ((uint64_t)hfsmp->blockSize);
device_offset += hfsmp->hfsPlusIOPosOffset;
numbytes = (((uint64_t)hfsmp->blockSize) * numblocks);
if ((device_offset >= device_sz) || (numbytes > (device_sz - device_offset))) {
return EINVAL;
}
if (journal_trim_extent_overlap (hfsmp->jnl, device_offset, numbytes, &lba_overlap_end)) {
*recently_freed = 1;
uint64_t end_offset = lba_overlap_end - hfsmp->hfsPlusIOPosOffset;
end_offset = end_offset / ((uint64_t) hfsmp->blockSize);
*overlap_end = (uint32_t) end_offset;
}
else {
*recently_freed = 0;
}
err = 0;
}
else {
*recently_freed = 0;
}
return err;
}
u_int32_t ScanUnmapBlocks (struct hfsmount *hfsmp)
{
u_int32_t blocks_scanned = 0;
int error = 0;
struct jnl_trim_list trimlist;
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED) {
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_SCAN | DBG_FUNC_START, hfsmp->hfs_raw_dev, 0, 0, 0, 0);
}
bzero (&trimlist, sizeof(trimlist));
if ((hfsmp->hfs_flags & HFS_UNMAP) &&
((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)) {
int alloc_count = PAGE_SIZE / sizeof(dk_extent_t);
void *extents = hfs_malloc(alloc_count * sizeof(dk_extent_t));
trimlist.extents = (dk_extent_t*)extents;
trimlist.allocated_count = alloc_count;
trimlist.extent_count = 0;
}
while ((blocks_scanned < hfsmp->totalBlocks) && (error == 0)){
error = hfs_alloc_scan_range (hfsmp, blocks_scanned, &blocks_scanned, &trimlist);
if (error) {
printf("HFS: bitmap scan range error: %d on vol=%s\n", error, hfsmp->vcbVN);
break;
}
}
if ((hfsmp->hfs_flags & HFS_UNMAP) &&
((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)) {
if (error == 0) {
hfs_issue_unmap(hfsmp, &trimlist);
}
if (trimlist.extents) {
hfs_free(trimlist.extents, trimlist.allocated_count * sizeof(dk_extent_t));
}
}
#if ALLOC_DEBUG
sanity_check_free_ext(hfsmp, 1);
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
hfs_validate_summary(hfsmp);
printf("HFS: Summary validation complete on %s\n", hfsmp->vcbVN);
}
#endif
if (hfs_kdebug_allocation & HFSDBG_UNMAP_ENABLED) {
KERNEL_DEBUG_CONSTANT(HFSDBG_UNMAP_SCAN | DBG_FUNC_END, error, hfsmp->hfs_raw_dev, 0, 0, 0);
}
return error;
}
static void add_to_reserved_list(hfsmount_t *hfsmp, uint32_t start,
uint32_t count, int list,
struct rl_entry **reservation)
{
struct rl_entry *range, *next_range;
if (list == HFS_TENTATIVE_BLOCKS) {
int nranges = 0;
TAILQ_FOREACH_SAFE(range, &hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS],
rl_link, next_range) {
if (++nranges > 3)
hfs_release_reserved(hfsmp, range, HFS_TENTATIVE_BLOCKS);
}
}
range = hfs_malloc(sizeof(*range));
range->rl_start = start;
range->rl_end = start + count - 1;
TAILQ_INSERT_HEAD(&hfsmp->hfs_reserved_ranges[list], range, rl_link);
*reservation = range;
}
static void hfs_release_reserved(hfsmount_t *hfsmp,
struct rl_entry *range,
int list)
{
if (range->rl_start == -1)
return;
TAILQ_REMOVE(&hfsmp->hfs_reserved_ranges[list], range, rl_link);
if (rl_len(range) > 0) {
if (list == HFS_TENTATIVE_BLOCKS)
hfsmp->tentativeBlocks -= rl_len(range);
else {
dk_extent_t extent = {
.offset = (hfs_blk_to_bytes(range->rl_start, hfsmp->blockSize)
+ hfsmp->hfsPlusIOPosOffset),
.length = hfs_blk_to_bytes(rl_len(range), hfsmp->blockSize)
};
dk_unmap_t unmap = {
.extents = &extent,
.extentsCount = 1,
};
VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCUNMAP, (caddr_t)&unmap,
0, vfs_context_kernel());
hfs_assert(hfsmp->lockedBlocks >= rl_len(range));
hfsmp->lockedBlocks -= rl_len(range);
}
hfs_release_summary(hfsmp, range->rl_start, rl_len(range));
add_free_extent_cache(hfsmp, range->rl_start, rl_len(range));
}
range->rl_start = -1;
range->rl_end = -2;
}
static void hfs_free_locked_internal(hfsmount_t *hfsmp,
struct rl_entry **reservation,
int list)
{
if (*reservation) {
hfs_release_reserved(hfsmp, *reservation, list);
hfs_free(*reservation, sizeof(**reservation));
*reservation = NULL;
}
}
void hfs_free_tentative(hfsmount_t *hfsmp, struct rl_entry **reservation)
{
hfs_free_locked_internal(hfsmp, reservation, HFS_TENTATIVE_BLOCKS);
}
void hfs_free_locked(hfsmount_t *hfsmp, struct rl_entry **reservation)
{
hfs_free_locked_internal(hfsmp, reservation, HFS_LOCKED_BLOCKS);
}
OSErr BlockAllocate (
hfsmount_t *hfsmp,
u_int32_t startingBlock,
u_int32_t minBlocks,
u_int32_t maxBlocks,
hfs_block_alloc_flags_t flags,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks)
{
hfs_alloc_extra_args_t extra_args = {
.max_blocks = maxBlocks
};
HFSPlusExtentDescriptor extent = { startingBlock, minBlocks };
OSErr err = hfs_block_alloc_int(hfsmp, &extent, flags, &extra_args);
*actualStartBlock = extent.startBlock;
*actualNumBlocks = extent.blockCount;
return err;
}
errno_t hfs_block_alloc(hfsmount_t *hfsmp,
HFSPlusExtentDescriptor *extent,
hfs_block_alloc_flags_t flags,
hfs_alloc_extra_args_t *ap)
{
return MacToVFSError(hfs_block_alloc_int(hfsmp, extent, flags, ap));
}
OSErr hfs_block_alloc_int(hfsmount_t *hfsmp,
HFSPlusExtentDescriptor *extent,
hfs_block_alloc_flags_t flags,
hfs_alloc_extra_args_t *ap)
{
u_int32_t freeBlocks;
OSErr err = 0;
Boolean updateAllocPtr = false; Boolean forceContiguous = false;
Boolean forceFlush;
uint32_t startingBlock = extent->startBlock;
uint32_t minBlocks = extent->blockCount;
uint32_t maxBlocks = (ap && ap->max_blocks) ? ap->max_blocks : minBlocks;
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_BLOCK_ALLOCATE | DBG_FUNC_START, startingBlock, minBlocks, maxBlocks, flags, 0);
if (ISSET(flags, HFS_ALLOC_COMMIT)) {
extent->startBlock = (*ap->reservation_in)->rl_start;
extent->blockCount = rl_len(*ap->reservation_in);
goto mark_allocated;
}
if (ISSET(flags, HFS_ALLOC_ROLL_BACK))
goto mark_allocated;
freeBlocks = hfs_freeblks(hfsmp, 0);
if (ISSET(flags, HFS_ALLOC_USE_TENTATIVE)) {
struct rl_entry *range = *ap->reservation_in;
if (range && range->rl_start != -1) {
uint32_t count = min(min(maxBlocks, rl_len(range)), freeBlocks);
if (count >= minBlocks) {
extent->startBlock = range->rl_start;
extent->blockCount = count;
if (!ISSET(flags, HFS_ALLOC_LOCKED))
SET(flags, HFS_ALLOC_COMMIT);
goto mark_allocated;
}
}
hfs_free_tentative(hfsmp, ap->reservation_in);
CLR(flags, HFS_ALLOC_USE_TENTATIVE);
}
if (ISSET(flags, HFS_ALLOC_FORCECONTIG | HFS_ALLOC_TRY_HARD))
forceContiguous = true;
if (flags & HFS_ALLOC_FLUSHTXN) {
forceFlush = true;
}
else {
forceFlush = false;
}
hfs_assert(hfsmp->freeBlocks >= hfsmp->tentativeBlocks);
if (freeBlocks < hfsmp->tentativeBlocks + minBlocks)
SET(flags, HFS_ALLOC_IGNORE_TENTATIVE);
if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) {
if (freeBlocks == 0) {
err = dskFulErr;
goto exit;
}
if (forceContiguous && freeBlocks < minBlocks) {
err = dskFulErr;
goto exit;
}
if (minBlocks > freeBlocks) {
minBlocks = freeBlocks;
}
if (maxBlocks > freeBlocks) {
maxBlocks = freeBlocks;
}
}
if (ISSET(flags, HFS_ALLOC_TRY_HARD)) {
err = hfs_alloc_try_hard(hfsmp, extent, maxBlocks, flags);
if (err)
goto exit;
goto mark_allocated;
}
if (startingBlock == 0) {
hfs_lock_mount (hfsmp);
if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
startingBlock = hfsmp->sparseAllocation;
}
else {
startingBlock = hfsmp->nextAllocation;
}
hfs_unlock_mount(hfsmp);
updateAllocPtr = true;
}
if (startingBlock >= hfsmp->allocLimit) {
startingBlock = 0;
}
if (forceContiguous) {
err = BlockFindContig(hfsmp, startingBlock, minBlocks, maxBlocks,
flags, &extent->startBlock, &extent->blockCount);
if ((err == noErr) &&
(extent->startBlock > startingBlock) &&
((extent->startBlock < hfsmp->hfs_metazone_start) ||
(extent->startBlock > hfsmp->hfs_metazone_end))) {
updateAllocPtr = true;
}
} else {
if (forceFlush) {
flags &= ~HFS_ALLOC_FLUSHTXN;
}
err = BlockFindKnown(hfsmp, maxBlocks, &extent->startBlock,
&extent->blockCount);
if (err == dskFulErr) {
err = BlockFindAny(hfsmp, startingBlock, hfsmp->allocLimit,
maxBlocks, flags, true,
&extent->startBlock, &extent->blockCount);
}
if (err == dskFulErr) {
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
err = BlockFindAny(hfsmp, 1, hfsmp->allocLimit, maxBlocks,
flags, false,
&extent->startBlock, &extent->blockCount);
}
else {
err = BlockFindAny(hfsmp, 1, startingBlock, maxBlocks,
flags, false,
&extent->startBlock, &extent->blockCount);
}
if (err == dskFulErr && forceFlush) {
flags |= HFS_ALLOC_FLUSHTXN;
err = BlockFindAny(hfsmp, 1, hfsmp->allocLimit, maxBlocks,
flags, false,
&extent->startBlock, &extent->blockCount);
}
}
}
if (err)
goto exit;
mark_allocated:
if (ap && ap->alignment && extent->blockCount < ap->max_blocks) {
uint32_t rounding = ((extent->blockCount + ap->alignment_offset)
% ap->alignment);
if (extent->blockCount >= minBlocks + rounding)
extent->blockCount -= rounding;
}
err = BlockMarkAllocatedInternal(hfsmp, extent->startBlock,
extent->blockCount, flags);
if (err)
goto exit;
if (ISSET(hfsmp->hfs_flags, HFS_CS) && extent->blockCount != 0
&& !ISSET(flags, HFS_ALLOC_TENTATIVE)) {
if (ISSET(flags, HFS_ALLOC_FAST_DEV)) {
#if !HFS_ALLOC_TEST
hfs_pin_block_range(hfsmp, HFS_PIN_IT,
extent->startBlock, extent->blockCount);
#endif
} else {
_dk_cs_map_t cm = {
.cm_extent = {
(hfs_blk_to_bytes(extent->startBlock, hfsmp->blockSize)
+ hfsmp->hfsPlusIOPosOffset),
hfs_blk_to_bytes(extent->blockCount, hfsmp->blockSize)
}
};
errno_t err2 = VNOP_IOCTL(hfsmp->hfs_devvp, _DKIOCCSMAP,
(caddr_t)&cm, 0, vfs_context_current());
if (err2 || cm.cm_bytes_mapped < cm.cm_extent.length) {
printf("hfs: _DKIOCCSMAP error: %d, bytes_mapped: %llu\n",
err2, cm.cm_bytes_mapped);
}
}
}
if (extent->blockCount != 0) {
hfs_lock_mount (hfsmp);
if (!ISSET(flags, HFS_ALLOC_USE_TENTATIVE | HFS_ALLOC_COMMIT)) {
lck_spin_lock(&hfsmp->vcbFreeExtLock);
if (hfsmp->vcbFreeExtCnt == 0 && hfsmp->hfs_freed_block_count == 0) {
hfsmp->sparseAllocation = extent->startBlock;
}
lck_spin_unlock(&hfsmp->vcbFreeExtLock);
if (extent->blockCount < hfsmp->hfs_freed_block_count) {
hfsmp->hfs_freed_block_count -= extent->blockCount;
} else {
hfsmp->hfs_freed_block_count = 0;
}
if (updateAllocPtr &&
((extent->startBlock < hfsmp->hfs_metazone_start) ||
(extent->startBlock > hfsmp->hfs_metazone_end))) {
HFS_UPDATE_NEXT_ALLOCATION(hfsmp, extent->startBlock);
}
(void) remove_free_extent_cache(hfsmp, extent->startBlock, extent->blockCount);
}
if (ISSET(flags, HFS_ALLOC_USE_TENTATIVE)) {
(*ap->reservation_in)->rl_start += extent->blockCount;
hfsmp->tentativeBlocks -= extent->blockCount;
if (rl_len(*ap->reservation_in) <= 0)
hfs_free_tentative(hfsmp, ap->reservation_in);
} else if (ISSET(flags, HFS_ALLOC_COMMIT)) {
hfs_assert(hfsmp->lockedBlocks >= extent->blockCount);
(*ap->reservation_in)->rl_start += extent->blockCount;
hfsmp->lockedBlocks -= extent->blockCount;
hfs_free_locked(hfsmp, ap->reservation_in);
}
if (ISSET(flags, HFS_ALLOC_TENTATIVE)) {
hfsmp->tentativeBlocks += extent->blockCount;
} else if (ISSET(flags, HFS_ALLOC_LOCKED)) {
hfsmp->lockedBlocks += extent->blockCount;
} else if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) {
hfsmp->freeBlocks -= extent->blockCount;
}
MarkVCBDirty(hfsmp);
hfs_unlock_mount(hfsmp);
hfs_generate_volume_notifications(hfsmp);
if (ISSET(flags, HFS_ALLOC_TENTATIVE)) {
hfs_assert(ap);
add_to_reserved_list(hfsmp, extent->startBlock, extent->blockCount,
0, ap->reservation_out);
} else if (ISSET(flags, HFS_ALLOC_LOCKED)) {
hfs_assert(ap);
add_to_reserved_list(hfsmp, extent->startBlock, extent->blockCount,
1, ap->reservation_out);
}
if (ISSET(flags, HFS_ALLOC_IGNORE_TENTATIVE)) {
struct rl_entry *range, *next_range;
struct rl_head *ranges = &hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS];
const uint32_t start = extent->startBlock;
const uint32_t end = start + extent->blockCount - 1;
TAILQ_FOREACH_SAFE(range, ranges, rl_link, next_range) {
switch (rl_overlap(range, start, end)) {
case RL_OVERLAPCONTAINSRANGE:
if (start - range->rl_start > range->rl_end - end) {
hfsmp->tentativeBlocks -= range->rl_end + 1 - start;
hfs_release_summary(hfsmp, end + 1, range->rl_end - end);
const uint32_t old_end = range->rl_end;
range->rl_end = start - 1;
add_free_extent_cache(hfsmp, end + 1, old_end - end);
} else {
hfsmp->tentativeBlocks -= end + 1 - range->rl_start;
hfs_release_summary(hfsmp, range->rl_start,
start - range->rl_start);
const uint32_t old_start = range->rl_start;
range->rl_start = end + 1;
add_free_extent_cache(hfsmp, old_start,
start - old_start);
}
hfs_assert(range->rl_end >= range->rl_start);
break;
case RL_MATCHINGOVERLAP:
case RL_OVERLAPISCONTAINED:
hfsmp->tentativeBlocks -= rl_len(range);
range->rl_end = range->rl_start - 1;
hfs_release_reserved(hfsmp, range, HFS_TENTATIVE_BLOCKS);
break;
case RL_OVERLAPSTARTSBEFORE:
hfsmp->tentativeBlocks -= range->rl_end + 1 - start;
range->rl_end = start - 1;
hfs_assert(range->rl_end >= range->rl_start);
break;
case RL_OVERLAPENDSAFTER:
hfsmp->tentativeBlocks -= end + 1 - range->rl_start;
range->rl_start = end + 1;
hfs_assert(range->rl_end >= range->rl_start);
break;
case RL_NOOVERLAP:
break;
}
}
}
}
exit:
if (ALLOC_DEBUG) {
if (err == noErr) {
if (extent->startBlock >= hfsmp->totalBlocks) {
panic ("BlockAllocate: vending invalid blocks!");
}
if (extent->startBlock >= hfsmp->allocLimit) {
panic ("BlockAllocate: vending block past allocLimit!");
}
if ((extent->startBlock + extent->blockCount) >= hfsmp->totalBlocks) {
panic ("BlockAllocate: vending too many invalid blocks!");
}
if ((extent->startBlock + extent->blockCount) >= hfsmp->allocLimit) {
panic ("BlockAllocate: vending too many invalid blocks past allocLimit!");
}
}
}
if (err) {
extent->startBlock = 0;
extent->blockCount = 0;
}
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_BLOCK_ALLOCATE | DBG_FUNC_END, err, extent->startBlock, extent->blockCount, 0, 0);
return err;
}
OSErr BlockDeallocate (
ExtendedVCB *vcb, u_int32_t firstBlock, u_int32_t numBlocks, hfs_block_alloc_flags_t flags)
{
if (ISSET(flags, HFS_ALLOC_TENTATIVE | HFS_ALLOC_LOCKED))
return 0;
OSErr err;
struct hfsmount *hfsmp;
hfsmp = VCBTOHFS(vcb);
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_BLOCK_DEALLOCATE | DBG_FUNC_START, firstBlock, numBlocks, flags, 0, 0);
if (numBlocks == 0) {
err = noErr;
goto Exit;
}
if (ALLOC_DEBUG) {
if (firstBlock >= hfsmp->totalBlocks) {
panic ("BlockDeallocate: freeing invalid blocks!");
}
if ((firstBlock + numBlocks) >= hfsmp->totalBlocks) {
panic ("BlockDeallocate: freeing too many invalid blocks!");
}
}
(void) hfs_release_summary (hfsmp, firstBlock, numBlocks);
err = BlockMarkFreeInternal(vcb, firstBlock, numBlocks, true);
if (err) {
goto Exit;
}
hfs_lock_mount(hfsmp);
if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) {
vcb->freeBlocks += numBlocks;
}
vcb->hfs_freed_block_count += numBlocks;
if (vcb->nextAllocation == (firstBlock + numBlocks)) {
HFS_UPDATE_NEXT_ALLOCATION(vcb, (vcb->nextAllocation - numBlocks));
}
if (hfsmp->jnl == NULL) {
(void) add_free_extent_cache(vcb, firstBlock, numBlocks);
if (firstBlock < vcb->sparseAllocation) {
vcb->sparseAllocation = firstBlock;
}
}
MarkVCBDirty(vcb);
hfs_unlock_mount(hfsmp);
hfs_generate_volume_notifications(VCBTOHFS(vcb));
Exit:
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_BLOCK_DEALLOCATE | DBG_FUNC_END, err, 0, 0, 0, 0);
return err;
}
u_int8_t freebitcount[16] = {
4, 3, 3, 2, 3, 2, 2, 1,
3, 2, 2, 1, 2, 1, 1, 0,
};
u_int32_t
MetaZoneFreeBlocks(ExtendedVCB *vcb)
{
u_int32_t freeblocks;
u_int32_t *currCache;
uintptr_t blockRef;
u_int32_t bit;
u_int32_t lastbit;
int bytesleft;
int bytesperblock;
u_int8_t byte;
u_int8_t *buffer;
blockRef = 0;
bytesleft = freeblocks = 0;
buffer = NULL;
bit = VCBTOHFS(vcb)->hfs_metazone_start;
if (bit == 1)
bit = 0;
lastbit = VCBTOHFS(vcb)->hfs_metazone_end;
bytesperblock = vcb->vcbVBMIOSize;
while (bit < lastbit) {
if (bytesleft == 0) {
if (blockRef) {
(void) ReleaseBitmapBlock(vcb, blockRef, false);
blockRef = 0;
}
if (ReadBitmapBlock(vcb, bit, &currCache, &blockRef,
HFS_ALLOC_IGNORE_TENTATIVE) != 0) {
return (0);
}
buffer = (u_int8_t *)currCache;
bytesleft = bytesperblock;
}
byte = *buffer++;
freeblocks += freebitcount[byte & 0x0F];
freeblocks += freebitcount[(byte >> 4) & 0x0F];
bit += kBitsPerByte;
--bytesleft;
}
if (blockRef)
(void) ReleaseBitmapBlock(vcb, blockRef, false);
return (freeblocks);
}
static u_int32_t NextBitmapBlock(
ExtendedVCB *vcb,
u_int32_t bit)
{
struct hfsmount *hfsmp = VCBTOHFS(vcb);
if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) == 0)
return (bit);
if ((bit >= hfsmp->hfs_metazone_start) &&
(bit <= hfsmp->hfs_metazone_end)) {
bit = hfsmp->hfs_metazone_end + 1;
}
return (bit);
}
static void bits_set(void *bitmap, int start, int end)
{
const int start_bit = start & 63;
const int end_bit = end & 63;
#define LEFT_MASK(bit) OSSwapHostToBigInt64(0xffffffffffffffffull << (64 - bit))
#define RIGHT_MASK(bit) OSSwapHostToBigInt64(0xffffffffffffffffull >> bit)
uint64_t *p = (uint64_t *)bitmap + start / 64;
if ((start & ~63) == (end & ~63)) {
*p |= RIGHT_MASK(start_bit) & LEFT_MASK(end_bit);
} else {
*p++ |= RIGHT_MASK(start_bit);
int nquads = (end - end_bit - start - 1) / 64;
while (nquads--)
*p++ = 0xffffffffffffffffull;
if (end_bit)
*p |= LEFT_MASK(end_bit);
}
}
static buf_t process_reservations(hfsmount_t *hfsmp, buf_t bp, off_t offset,
hfs_block_alloc_flags_t flags,
bool always_copy)
{
bool taken_copy = false;
void *buffer = (void *)buf_dataptr(bp);
const uint32_t nbytes = buf_count(bp);
const off_t end = offset + nbytes * 8 - 1;
for (int i = (ISSET(flags, HFS_ALLOC_IGNORE_TENTATIVE)
? HFS_LOCKED_BLOCKS : HFS_TENTATIVE_BLOCKS); i < 2; ++i) {
struct rl_entry *entry;
TAILQ_FOREACH(entry, &hfsmp->hfs_reserved_ranges[i], rl_link) {
uint32_t a, b;
enum rl_overlaptype overlap_type = rl_overlap(entry, offset, end);
if (overlap_type == RL_NOOVERLAP)
continue;
if (!taken_copy && (always_copy || ISSET(buf_flags(bp), B_LOCKED))) {
buf_t new_bp = buf_create_shadow(bp, true, 0, NULL, NULL);
buf_brelse(bp);
bp = new_bp;
buf_setflags(bp, B_NOCACHE);
buffer = (void *)buf_dataptr(bp);
taken_copy = true;
}
switch (overlap_type) {
case RL_OVERLAPCONTAINSRANGE:
case RL_MATCHINGOVERLAP:
memset(buffer, 0xff, nbytes);
return bp;
case RL_OVERLAPISCONTAINED:
a = entry->rl_start;
b = entry->rl_end;
break;
case RL_OVERLAPSTARTSBEFORE:
a = offset;
b = entry->rl_end;
break;
case RL_OVERLAPENDSAFTER:
a = entry->rl_start;
b = end;
break;
case RL_NOOVERLAP:
__builtin_unreachable();
}
a -= offset;
b -= offset;
hfs_assert(a < buf_count(bp) * 8);
hfs_assert(b < buf_count(bp) * 8);
hfs_assert(b >= a);
bits_set(buffer, a, b + 1);
}
}
return bp;
}
static OSErr ReadBitmapBlock(ExtendedVCB *vcb,
u_int32_t bit,
u_int32_t **buffer,
uintptr_t *blockRef,
hfs_block_alloc_flags_t flags)
{
OSErr err;
struct buf *bp = NULL;
struct vnode *vp = NULL;
daddr64_t block;
u_int32_t blockSize;
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_READ_BITMAP_BLOCK | DBG_FUNC_START, bit, 0, 0, 0, 0);
REQUIRE_FILE_LOCK(vcb->hfs_allocation_vp, false);
blockSize = (u_int32_t)vcb->vcbVBMIOSize;
block = (daddr64_t)(bit / (blockSize * kBitsPerByte));
if (vcb->vcbSigWord != kHFSSigWord) {
vp = vcb->hfs_allocation_vp;
}
#if CONFIG_HFS_STD
else {
vp = VCBTOHFS(vcb)->hfs_devvp;
block += vcb->vcbVBMSt;
}
#endif
err = (int)buf_meta_bread(vp, block, blockSize, NOCRED, &bp);
if (bp) {
if (err) {
buf_brelse(bp);
*blockRef = 0;
*buffer = NULL;
} else {
if (!ISSET(flags, HFS_ALLOC_IGNORE_RESERVED)) {
bp = process_reservations(vcb, bp, block * blockSize * 8,
flags, true);
}
buf_setfsprivate(bp, (void *)(uintptr_t)flags);
*blockRef = (uintptr_t)bp;
*buffer = (u_int32_t *)buf_dataptr(bp);
}
} else
hfs_assert(err);
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_READ_BITMAP_BLOCK | DBG_FUNC_END, err, 0, 0, 0, 0);
return err;
}
static OSErr ReadBitmapRange(struct hfsmount *hfsmp, uint32_t offset,
uint32_t iosize, uint32_t **buffer, struct buf **blockRef)
{
OSErr err;
struct buf *bp = NULL;
struct vnode *vp = NULL;
daddr64_t block;
if (hfsmp->vcbSigWord != kHFSPlusSigWord) {
return EINVAL;
}
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED) {
KERNEL_DEBUG_CONSTANT(HFSDBG_READ_BITMAP_RANGE | DBG_FUNC_START, offset, iosize, 0, 0, 0);
}
REQUIRE_FILE_LOCK(hfsmp->hfs_allocation_vp, false);
vp = hfsmp->hfs_allocation_vp;
block = (daddr64_t)(offset / hfsmp->vcbVBMIOSize);
err = (int) buf_meta_bread(vp, block, iosize, NOCRED, &bp);
if (bp) {
if (err) {
buf_brelse(bp);
*blockRef = 0;
*buffer = NULL;
} else {
bp = process_reservations(hfsmp, bp, (offset * 8), 0,
false);
*blockRef = bp;
*buffer = (u_int32_t *)buf_dataptr(bp);
}
}
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED) {
KERNEL_DEBUG_CONSTANT(HFSDBG_READ_BITMAP_RANGE | DBG_FUNC_END, err, 0, 0, 0, 0);
}
return err;
}
static OSErr ReleaseBitmapBlock(
ExtendedVCB *vcb,
uintptr_t blockRef,
Boolean dirty)
{
struct buf *bp = (struct buf *)blockRef;
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_RELEASE_BITMAP_BLOCK | DBG_FUNC_START, dirty, 0, 0, 0, 0);
if (blockRef == 0) {
if (dirty)
panic("hfs: ReleaseBitmapBlock: missing bp");
return (0);
}
if (bp) {
if (dirty) {
hfs_block_alloc_flags_t flags = (uintptr_t)buf_fsprivate(bp);
if (!ISSET(flags, HFS_ALLOC_IGNORE_RESERVED))
panic("Modified read-only bitmap buffer!");
struct hfsmount *hfsmp = VCBTOHFS(vcb);
if (hfsmp->jnl) {
journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
} else {
buf_bdwrite(bp);
}
} else {
buf_brelse(bp);
}
}
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_RELEASE_BITMAP_BLOCK | DBG_FUNC_END, 0, 0, 0, 0, 0);
return (0);
}
static OSErr ReleaseScanBitmapRange(struct buf *bp ) {
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED) {
KERNEL_DEBUG_CONSTANT(HFSDBG_RELEASE_BITMAP_BLOCK | DBG_FUNC_START, 0, 0, 0, 0, 0);
}
if (bp) {
if ((buf_flags(bp) & B_LOCKED) == 0) {
buf_markinvalid(bp);
}
buf_brelse(bp);
}
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED) {
KERNEL_DEBUG_CONSTANT(HFSDBG_RELEASE_SCAN_BITMAP | DBG_FUNC_END, 0, 0, 0, 0, 0);
}
return (0);
}
static OSErr hfs_alloc_try_hard(hfsmount_t *hfsmp,
HFSPlusExtentDescriptor *extent,
uint32_t max_blocks,
hfs_block_alloc_flags_t flags)
{
OSErr err = dskFulErr;
const uint32_t min_blocks = extent->blockCount;
if (extent->startBlock > 0 && extent->startBlock < hfsmp->allocLimit
&& hfsmp->allocLimit - extent->startBlock > max_blocks) {
err = BlockFindContiguous(hfsmp, extent->startBlock, extent->startBlock + max_blocks,
max_blocks, max_blocks, true, true,
&extent->startBlock, &extent->blockCount, flags);
if (err != dskFulErr)
return err;
}
err = BlockFindKnown(hfsmp, max_blocks, &extent->startBlock,
&extent->blockCount);
if (!err) {
if (extent->blockCount >= max_blocks)
return 0;
} else if (err != dskFulErr)
return err;
return BlockFindContiguous(hfsmp, 1, hfsmp->allocLimit,
min_blocks, max_blocks,
true,
true,
&extent->startBlock, &extent->blockCount, flags);
}
static OSErr BlockFindContig(
ExtendedVCB *vcb,
u_int32_t startingBlock,
u_int32_t minBlocks,
u_int32_t maxBlocks,
hfs_block_alloc_flags_t flags,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks)
{
OSErr retval = noErr;
uint32_t currentStart = startingBlock;
uint32_t foundStart = 0; uint32_t foundCount = 0;
uint32_t collision_start = 0; uint32_t collision_count = 0;
int err;
int allowReuse = (flags & HFS_ALLOC_FLUSHTXN);
Boolean useMetaZone = (flags & HFS_ALLOC_METAZONE);
int recently_deleted = 0;
struct hfsmount *hfsmp = VCBTOHFS(vcb);
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_FIND_CONTIG_BITMAP | DBG_FUNC_START, startingBlock, minBlocks, maxBlocks, useMetaZone, 0);
while ((retval == noErr) && (foundStart == 0) && (foundCount == 0)) {
retval = BlockFindContiguous(vcb, currentStart, vcb->allocLimit, minBlocks,
maxBlocks, useMetaZone, true, &foundStart, &foundCount, flags);
if (retval == dskFulErr && currentStart != 0) {
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
retval = BlockFindContiguous(vcb, 1, vcb->allocLimit, minBlocks,
maxBlocks, useMetaZone, false, &foundStart, &foundCount, flags);
}
else {
retval = BlockFindContiguous(vcb, 1, currentStart, minBlocks,
maxBlocks, useMetaZone, false, &foundStart, &foundCount, flags);
}
}
if (retval != noErr) {
goto bailout;
}
if (collision_start) {
if (extents_overlap (foundStart, foundCount, collision_start, collision_count)) {
if(allowReuse) {
foundStart = collision_start;
foundCount = collision_count;
goto bailout;
}
else {
retval = dskFulErr;
goto bailout;
}
}
}
if (hfsmp->jnl) {
recently_deleted = 0;
uint32_t nextStart;
err = CheckUnmappedBytes (hfsmp, (uint64_t)foundStart,
(uint64_t) foundCount, &recently_deleted, &nextStart);
if (err == 0) {
if(recently_deleted != 0) {
if (collision_start == 0) {
collision_start = foundStart;
collision_count = foundCount;
}
recently_deleted = 0;
currentStart = nextStart;
foundStart = 0;
foundCount = 0;
}
}
}
}
bailout:
if (retval == noErr) {
*actualStartBlock = foundStart;
*actualNumBlocks = foundCount;
}
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_FIND_CONTIG_BITMAP | DBG_FUNC_END, foundStart, foundCount, retval, 0, 0);
return retval;
}
static OSErr BlockFindAny(
ExtendedVCB *vcb,
u_int32_t startingBlock,
register u_int32_t endingBlock,
u_int32_t maxBlocks,
hfs_block_alloc_flags_t flags,
Boolean trustSummary,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks)
{
uint32_t start_blk = startingBlock;
uint32_t end_blk = endingBlock;
struct hfsmount *hfsmp;
OSErr err;
hfsmp = (struct hfsmount*)vcb;
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
uint32_t suggested_start;
err = hfs_find_summary_free (hfsmp, startingBlock, &suggested_start);
if (err == 0) {
start_blk = suggested_start;
}
else {
if ((err == ENOSPC) && (trustSummary)) {
return dskFulErr;
}
}
}
err = BlockFindAnyBitmap(vcb, start_blk, end_blk, maxBlocks,
flags, actualStartBlock, actualNumBlocks);
return err;
}
static OSErr BlockFindAnyBitmap(
ExtendedVCB *vcb,
u_int32_t startingBlock,
register u_int32_t endingBlock,
u_int32_t maxBlocks,
hfs_block_alloc_flags_t flags,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks)
{
OSErr err;
register u_int32_t block = 0; register u_int32_t currentWord; register u_int32_t bitMask; register u_int32_t wordsLeft; u_int32_t *buffer = NULL;
u_int32_t *currCache = NULL;
uintptr_t blockRef = 0;
u_int32_t bitsPerBlock;
u_int32_t wordsPerBlock;
struct hfsmount *hfsmp = VCBTOHFS(vcb);
Boolean useMetaZone = (flags & HFS_ALLOC_METAZONE);
Boolean forceFlush = (flags & HFS_ALLOC_FLUSHTXN);
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_ALLOC_ANY_BITMAP | DBG_FUNC_START, startingBlock, endingBlock, maxBlocks, useMetaZone, 0);
restartSearchAny:
if (!useMetaZone && (vcb->hfs_flags & HFS_METADATA_ZONE)) {
if (startingBlock <= vcb->hfs_metazone_end) {
if (endingBlock > (vcb->hfs_metazone_end + 2))
startingBlock = vcb->hfs_metazone_end + 1;
else {
err = dskFulErr;
goto Exit;
}
}
}
if (maxBlocks > (endingBlock - startingBlock)) {
maxBlocks = endingBlock - startingBlock;
}
err = ReadBitmapBlock(vcb, startingBlock, &currCache, &blockRef, flags);
if (err != noErr) goto Exit;
buffer = currCache;
{
u_int32_t wordIndexInBlock;
bitsPerBlock = vcb->vcbVBMIOSize * kBitsPerByte;
wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord;
wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord;
buffer += wordIndexInBlock;
wordsLeft = wordsPerBlock - wordIndexInBlock;
currentWord = SWAP_BE32 (*buffer);
bitMask = kHighBitInWordMask >> (startingBlock & kBitsWithinWordMask);
}
uint32_t summary_block_scan = 0;
block=startingBlock;
while (block < endingBlock) {
if ((currentWord & bitMask) == 0)
break;
++block;
bitMask >>= 1;
if (bitMask == 0) {
bitMask = kHighBitInWordMask;
++buffer;
if (--wordsLeft == 0) {
buffer = currCache = NULL;
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
if (summary_block_scan != 0) {
uint32_t summary_bit;
(void) hfs_get_summary_index (hfsmp, summary_block_scan, &summary_bit);
hfs_set_summary (hfsmp, summary_bit, 1);
}
}
err = ReleaseBitmapBlock(vcb, blockRef, false);
if (err != noErr) goto Exit;
if (!useMetaZone) {
block = NextBitmapBlock(vcb, block);
}
if (block >= endingBlock) {
err = dskFulErr;
goto Exit;
}
err = ReadBitmapBlock(vcb, block, &currCache, &blockRef, flags);
if (err != noErr) goto Exit;
buffer = currCache;
summary_block_scan = block;
wordsLeft = wordsPerBlock;
}
currentWord = SWAP_BE32 (*buffer);
}
}
if (block >= endingBlock) {
err = dskFulErr;
goto Exit;
}
if (hfsmp->jnl != NULL && (forceFlush == false)) {
int recently_deleted = 0;
uint32_t nextblk;
err = CheckUnmappedBytes (hfsmp, (uint64_t) block, 1, &recently_deleted, &nextblk);
if ((err == 0) && (recently_deleted)) {
err = ReleaseBitmapBlock(vcb, blockRef, false);
currCache = NULL;
if (err != noErr) {
goto Exit;
}
startingBlock = nextblk;
goto restartSearchAny;
}
}
*actualStartBlock = block;
if (block < (endingBlock-maxBlocks)) {
endingBlock = block + maxBlocks; }
while ((currentWord & bitMask) == 0) {
++block;
if (block == endingBlock) {
break;
}
bitMask >>= 1;
if (bitMask == 0) {
bitMask = kHighBitInWordMask;
++buffer;
if (--wordsLeft == 0) {
buffer = currCache = NULL;
err = ReleaseBitmapBlock(vcb, blockRef, false);
if (err != noErr) {
goto Exit;
}
if (!useMetaZone) {
u_int32_t nextBlock;
nextBlock = NextBitmapBlock(vcb, block);
if (nextBlock != block) {
goto Exit;
}
}
if (block >= endingBlock) {
goto Exit;
}
err = ReadBitmapBlock(vcb, block, &currCache, &blockRef, flags);
if (err != noErr) {
goto Exit;
}
buffer = currCache;
wordsLeft = wordsPerBlock;
}
currentWord = SWAP_BE32 (*buffer);
}
}
Exit:
if (currCache) {
(void) ReleaseBitmapBlock(vcb, blockRef, false);
currCache = NULL;
}
if (err == noErr) {
*actualNumBlocks = block - *actualStartBlock;
if ((*actualStartBlock + *actualNumBlocks) > vcb->allocLimit) {
panic("hfs: BlockFindAnyBitmap: allocation overflow on \"%s\"", vcb->vcbVN);
}
}
else {
*actualStartBlock = 0;
*actualNumBlocks = 0;
}
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_ALLOC_ANY_BITMAP | DBG_FUNC_END, err, *actualStartBlock, *actualNumBlocks, 0, 0);
return err;
}
static OSErr BlockFindKnown(
ExtendedVCB *vcb,
u_int32_t maxBlocks,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks)
{
OSErr err;
u_int32_t foundBlocks;
struct hfsmount *hfsmp = VCBTOHFS(vcb);
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_ALLOC_FIND_KNOWN | DBG_FUNC_START, 0, 0, maxBlocks, 0, 0);
hfs_lock_mount (hfsmp);
lck_spin_lock(&vcb->vcbFreeExtLock);
if ( vcb->vcbFreeExtCnt == 0 ||
vcb->vcbFreeExt[0].blockCount == 0) {
lck_spin_unlock(&vcb->vcbFreeExtLock);
hfs_unlock_mount(hfsmp);
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_ALLOC_FIND_KNOWN | DBG_FUNC_END, dskFulErr, *actualStartBlock, *actualNumBlocks, 0, 0);
return dskFulErr;
}
lck_spin_unlock(&vcb->vcbFreeExtLock);
hfs_unlock_mount(hfsmp);
lck_spin_lock(&vcb->vcbFreeExtLock);
*actualStartBlock = vcb->vcbFreeExt[0].startBlock;
foundBlocks = vcb->vcbFreeExt[0].blockCount;
if (foundBlocks > maxBlocks)
foundBlocks = maxBlocks;
*actualNumBlocks = foundBlocks;
lck_spin_unlock(&vcb->vcbFreeExtLock);
if ((*actualStartBlock + *actualNumBlocks) > vcb->allocLimit)
{
printf ("hfs: BlockAllocateKnown() found allocation overflow on \"%s\"", vcb->vcbVN);
hfs_mark_inconsistent(vcb, HFS_INCONSISTENCY_DETECTED);
err = EIO;
} else
err = 0;
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_ALLOC_FIND_KNOWN | DBG_FUNC_END, err, *actualStartBlock, *actualNumBlocks, 0, 0);
return err;
}
OSErr BlockMarkAllocated(
ExtendedVCB *vcb,
u_int32_t startingBlock,
register u_int32_t numBlocks)
{
return BlockMarkAllocatedInternal(vcb, startingBlock, numBlocks, 0);
}
static
OSErr BlockMarkAllocatedInternal (
ExtendedVCB *vcb,
u_int32_t startingBlock,
u_int32_t numBlocks,
hfs_block_alloc_flags_t flags)
{
OSErr err;
register u_int32_t *currentWord; register u_int32_t wordsLeft; register u_int32_t bitMask; u_int32_t firstBit; u_int32_t numBits; u_int32_t *buffer = NULL;
uintptr_t blockRef = 0;
u_int32_t bitsPerBlock;
u_int32_t wordsPerBlock;
struct hfsmount *hfsmp = VCBTOHFS(vcb);
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_MARK_ALLOC_BITMAP | DBG_FUNC_START, startingBlock, numBlocks, flags, 0, 0);
#if DEBUG
if (!ISSET(flags, HFS_ALLOC_COMMIT)
|| ISSET(flags, HFS_ALLOC_USE_TENTATIVE)) {
struct rl_entry *range;
TAILQ_FOREACH(range, &hfsmp->hfs_reserved_ranges[HFS_LOCKED_BLOCKS], rl_link) {
hfs_assert(rl_overlap(range, startingBlock,
startingBlock + numBlocks - 1) == RL_NOOVERLAP);
}
}
#endif
int force_flush = 0;
if (hfsmp->jnl) {
uint32_t ignore;
err = CheckUnmappedBytes (hfsmp, (uint64_t) startingBlock, (uint64_t)numBlocks, &force_flush, &ignore);
if ((err == 0) && (force_flush)) {
journal_request_immediate_flush (hfsmp->jnl);
}
}
hfs_unmap_alloc_extent(vcb, startingBlock, numBlocks);
if (ISSET(flags, HFS_ALLOC_LOCKED | HFS_ALLOC_TENTATIVE)) {
err = 0;
goto Exit;
}
err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_RESERVED);
if (err != noErr) goto Exit;
{
u_int32_t wordIndexInBlock;
bitsPerBlock = vcb->vcbVBMIOSize * kBitsPerByte;
wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord;
wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord;
currentWord = buffer + wordIndexInBlock;
wordsLeft = wordsPerBlock - wordIndexInBlock;
}
if (hfsmp->jnl) {
journal_modify_block_start(hfsmp->jnl, (struct buf *)blockRef);
}
firstBit = startingBlock % kBitsPerWord;
if (firstBit != 0) {
bitMask = kAllBitsSetInWord >> firstBit; numBits = kBitsPerWord - firstBit; if (numBits > numBlocks) {
numBits = numBlocks; bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); }
#if DEBUG
if ((*currentWord & SWAP_BE32 (bitMask)) != 0) {
panic("hfs: BlockMarkAllocatedInternal: blocks already allocated!");
}
#endif
*currentWord |= SWAP_BE32 (bitMask); numBlocks -= numBits;
++currentWord; --wordsLeft; }
bitMask = kAllBitsSetInWord; while (numBlocks >= kBitsPerWord) {
if (wordsLeft == 0) {
startingBlock += bitsPerBlock;
buffer = NULL;
err = ReleaseBitmapBlock(vcb, blockRef, true);
if (err != noErr) goto Exit;
err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_RESERVED);
if (err != noErr) goto Exit;
if (hfsmp->jnl) {
journal_modify_block_start(hfsmp->jnl, (struct buf *)blockRef);
}
currentWord = buffer;
wordsLeft = wordsPerBlock;
}
#if DEBUG
if (*currentWord != 0) {
panic("hfs: BlockMarkAllocatedInternal: blocks already allocated!");
}
#endif
*currentWord = SWAP_BE32 (bitMask);
numBlocks -= kBitsPerWord;
++currentWord; --wordsLeft; }
if (numBlocks != 0) {
bitMask = ~(kAllBitsSetInWord >> numBlocks); if (wordsLeft == 0) {
startingBlock += bitsPerBlock;
buffer = NULL;
err = ReleaseBitmapBlock(vcb, blockRef, true);
if (err != noErr) goto Exit;
err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_RESERVED);
if (err != noErr) goto Exit;
if (hfsmp->jnl) {
journal_modify_block_start(hfsmp->jnl, (struct buf *)blockRef);
}
currentWord = buffer;
}
#if DEBUG
if ((*currentWord & SWAP_BE32 (bitMask)) != 0) {
panic("hfs: BlockMarkAllocatedInternal: blocks already allocated!");
}
#endif
*currentWord |= SWAP_BE32 (bitMask);
}
Exit:
if (buffer)
(void)ReleaseBitmapBlock(vcb, blockRef, true);
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_MARK_ALLOC_BITMAP | DBG_FUNC_END, err, 0, 0, 0, 0);
return err;
}
OSErr BlockMarkFree(
ExtendedVCB *vcb,
u_int32_t startingBlock,
register u_int32_t numBlocks)
{
return BlockMarkFreeInternal(vcb, startingBlock, numBlocks, true);
}
OSErr BlockMarkFreeUnused(ExtendedVCB *vcb, u_int32_t startingBlock, register u_int32_t numBlocks)
{
int error = 0;
struct hfsmount *hfsmp = VCBTOHFS(vcb);
u_int32_t curNumBlocks;
u_int32_t bitsPerBlock;
u_int32_t lastBit;
bitsPerBlock = hfsmp->vcbVBMIOSize * kBitsPerByte;
lastBit = ((startingBlock + (bitsPerBlock - 1))/bitsPerBlock) * bitsPerBlock;
curNumBlocks = lastBit - startingBlock;
if (curNumBlocks > numBlocks) {
curNumBlocks = numBlocks;
}
error = BlockMarkFreeInternal(vcb, startingBlock, curNumBlocks, false);
if (error) {
return error;
}
startingBlock += curNumBlocks;
numBlocks -= curNumBlocks;
while (numBlocks) {
if (numBlocks >= bitsPerBlock) {
curNumBlocks = bitsPerBlock;
} else {
curNumBlocks = numBlocks;
}
if (hfs_isallocated(hfsmp, startingBlock, curNumBlocks) == true) {
error = BlockMarkFreeInternal(vcb, startingBlock, curNumBlocks, false);
if (error) {
return error;
}
}
startingBlock += curNumBlocks;
numBlocks -= curNumBlocks;
}
return error;
}
static
OSErr BlockMarkFreeInternal(
ExtendedVCB *vcb,
u_int32_t startingBlock_in,
register u_int32_t numBlocks_in,
Boolean do_validate)
{
OSErr err;
u_int32_t startingBlock = startingBlock_in;
u_int32_t numBlocks = numBlocks_in;
uint32_t unmapStart = startingBlock_in;
uint32_t unmapCount = numBlocks_in;
uint32_t wordIndexInBlock;
u_int32_t *currentWord; u_int32_t wordsLeft; u_int32_t bitMask; u_int32_t currentBit; u_int32_t numBits; u_int32_t *buffer = NULL;
uintptr_t blockRef = 0;
u_int32_t bitsPerBlock;
u_int32_t wordsPerBlock;
struct hfsmount *hfsmp = VCBTOHFS(vcb);
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_MARK_FREE_BITMAP | DBG_FUNC_START, startingBlock_in, numBlocks_in, do_validate, 0, 0);
if ((do_validate == true) &&
(startingBlock + numBlocks > vcb->totalBlocks)) {
#if ALLOC_DEBUG || DEBUG
panic ("BlockMarkFreeInternal() free non-existent blocks at %u (numBlock=%u) on vol %s\n", startingBlock, numBlocks, vcb->vcbVN);
__builtin_unreachable();
#else
printf ("hfs: BlockMarkFreeInternal() trying to free non-existent blocks starting at %u (numBlock=%u) on volume %s\n", startingBlock, numBlocks, vcb->vcbVN);
hfs_mark_inconsistent(vcb, HFS_INCONSISTENCY_DETECTED);
err = EIO;
goto Exit;
#endif
}
err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_RESERVED);
if (err != noErr) goto Exit;
if (hfsmp->jnl) {
journal_modify_block_start(hfsmp->jnl, (struct buf *)blockRef);
}
uint32_t min_unmap = 0, max_unmap = UINT32_MAX;
struct rl_entry *range;
for (int i = 0; i < 2; ++i) {
TAILQ_FOREACH(range, &hfsmp->hfs_reserved_ranges[i], rl_link) {
if (range->rl_start < startingBlock
&& range->rl_end >= min_unmap) {
min_unmap = range->rl_end + 1;
}
if (range->rl_end >= startingBlock + numBlocks
&& range->rl_start < max_unmap) {
max_unmap = range->rl_start;
}
}
}
bitsPerBlock = vcb->vcbVBMIOSize * kBitsPerByte;
wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord;
wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord;
currentWord = buffer + wordIndexInBlock;
currentBit = startingBlock % kBitsPerWord;
bitMask = kHighBitInWordMask >> currentBit;
while (unmapStart > min_unmap) {
bitMask <<= 1;
if (bitMask == 0) {
if (--currentWord < buffer)
break;
bitMask = kLowBitInWordMask;
}
if (*currentWord & SWAP_BE32(bitMask))
break; --unmapStart;
++unmapCount;
}
currentWord = buffer + wordIndexInBlock;
wordsLeft = wordsPerBlock - wordIndexInBlock;
currentBit = startingBlock % kBitsPerWord;
if (currentBit != 0) {
bitMask = kAllBitsSetInWord >> currentBit; numBits = kBitsPerWord - currentBit; if (numBits > numBlocks) {
numBits = numBlocks; bitMask &= ~(kAllBitsSetInWord >> (currentBit + numBits)); }
if ((do_validate == true) &&
(*currentWord & SWAP_BE32 (bitMask)) != SWAP_BE32 (bitMask)) {
goto Corruption;
}
*currentWord &= SWAP_BE32 (~bitMask); numBlocks -= numBits;
++currentWord; --wordsLeft; }
while (numBlocks >= kBitsPerWord) {
if (wordsLeft == 0) {
startingBlock += bitsPerBlock;
buffer = NULL;
err = ReleaseBitmapBlock(vcb, blockRef, true);
if (err != noErr) goto Exit;
err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_RESERVED);
if (err != noErr) goto Exit;
if (hfsmp->jnl) {
journal_modify_block_start(hfsmp->jnl, (struct buf *)blockRef);
}
currentWord = buffer;
wordsLeft = wordsPerBlock;
}
if ((do_validate == true) &&
(*currentWord != SWAP_BE32 (kAllBitsSetInWord))) {
goto Corruption;
}
*currentWord = 0; numBlocks -= kBitsPerWord;
++currentWord; --wordsLeft; }
if (numBlocks != 0) {
bitMask = ~(kAllBitsSetInWord >> numBlocks); if (wordsLeft == 0) {
startingBlock += bitsPerBlock;
buffer = NULL;
err = ReleaseBitmapBlock(vcb, blockRef, true);
if (err != noErr) goto Exit;
err = ReadBitmapBlock(vcb, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_RESERVED);
if (err != noErr) goto Exit;
if (hfsmp->jnl) {
journal_modify_block_start(hfsmp->jnl, (struct buf *)blockRef);
}
currentWord = buffer;
}
if ((do_validate == true) &&
(*currentWord & SWAP_BE32 (bitMask)) != SWAP_BE32 (bitMask)) {
goto Corruption;
}
*currentWord &= SWAP_BE32 (~bitMask);
}
wordIndexInBlock = ((startingBlock_in + numBlocks_in - 1) & (bitsPerBlock-1)) / kBitsPerWord;
wordsLeft = wordsPerBlock - wordIndexInBlock;
currentWord = buffer + wordIndexInBlock;
currentBit = (startingBlock_in + numBlocks_in - 1) % kBitsPerWord;
bitMask = kHighBitInWordMask >> currentBit;
while (unmapStart + unmapCount < max_unmap) {
bitMask >>= 1;
if (bitMask == 0) {
if (--wordsLeft == 0)
break;
++currentWord;
bitMask = kHighBitInWordMask;
}
if (*currentWord & SWAP_BE32(bitMask))
break; ++unmapCount;
}
Exit:
if (buffer)
(void)ReleaseBitmapBlock(vcb, blockRef, true);
if (err == noErr) {
hfs_unmap_free_extent(vcb, unmapStart, unmapCount);
}
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_MARK_FREE_BITMAP | DBG_FUNC_END, err, 0, 0, 0, 0);
return err;
Corruption:
#if DEBUG
panic("hfs: BlockMarkFreeInternal: blocks not allocated!");
__builtin_unreachable();
#else
printf ("hfs: BlockMarkFreeInternal() trying to free unallocated blocks on volume %s <%u, %u>\n",
vcb->vcbVN, startingBlock_in, numBlocks_in);
hfs_mark_inconsistent(vcb, HFS_INCONSISTENCY_DETECTED);
err = EIO;
goto Exit;
#endif
}
static OSErr BlockFindContiguous(
ExtendedVCB *vcb,
u_int32_t startingBlock,
u_int32_t endingBlock,
u_int32_t minBlocks,
u_int32_t maxBlocks,
Boolean useMetaZone,
Boolean trustSummary,
u_int32_t *actualStartBlock,
u_int32_t *actualNumBlocks,
hfs_block_alloc_flags_t flags)
{
OSErr err;
register u_int32_t currentBlock; u_int32_t firstBlock; u_int32_t stopBlock; u_int32_t foundBlocks; u_int32_t *buffer = NULL;
register u_int32_t *currentWord;
register u_int32_t bitMask;
register u_int32_t wordsLeft;
register u_int32_t tempWord;
uintptr_t blockRef = 0;
u_int32_t wordsPerBlock;
u_int32_t updated_free_extent = 0;
struct hfsmount *hfsmp = (struct hfsmount*) vcb;
HFSPlusExtentDescriptor best = { 0, 0 };
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_BLOCK_FIND_CONTIG | DBG_FUNC_START, startingBlock, endingBlock, minBlocks, maxBlocks, 0);
if (!useMetaZone && (vcb->hfs_flags & HFS_METADATA_ZONE)) {
if (startingBlock <= vcb->hfs_metazone_end) {
if (endingBlock > (vcb->hfs_metazone_end + 2))
startingBlock = vcb->hfs_metazone_end + 1;
else
goto DiskFull;
}
}
if ((endingBlock - startingBlock) < minBlocks)
{
goto DiskFull;
}
stopBlock = endingBlock - minBlocks + 1;
currentBlock = startingBlock;
firstBlock = 0;
if (!useMetaZone)
currentBlock = NextBitmapBlock(vcb, currentBlock);
if ((trustSummary) && (hfsmp->hfs_flags & HFS_SUMMARY_TABLE)) {
uint32_t suggestion;
err = hfs_find_summary_free (hfsmp, currentBlock, &suggestion);
if (err && err != ENOSPC)
goto ErrorExit;
if (err == ENOSPC || suggestion >= stopBlock)
goto DiskFull;
currentBlock = suggestion;
}
err = ReadBitmapBlock(vcb, currentBlock, &buffer, &blockRef, flags);
if ( err != noErr ) goto ErrorExit;
wordsPerBlock = vcb->vcbVBMIOSize / kBytesPerWord;
wordsLeft = (currentBlock / kBitsPerWord) & (wordsPerBlock-1); currentWord = buffer + wordsLeft;
wordsLeft = wordsPerBlock - wordsLeft;
uint32_t remaining = (hfsmp->freeBlocks - hfsmp->lockedBlocks
- (ISSET(flags, HFS_ALLOC_IGNORE_TENTATIVE)
? 0 : hfsmp->tentativeBlocks));
do
{
foundBlocks = 0;
uint32_t summary_block_scan = 0;
bitMask = currentBlock & kBitsWithinWordMask;
if (bitMask)
{
tempWord = SWAP_BE32(*currentWord); bitMask = kHighBitInWordMask >> bitMask;
while (tempWord & bitMask)
{
bitMask >>= 1;
++currentBlock;
}
if (bitMask)
goto FoundUnused;
++currentWord;
--wordsLeft;
}
while (currentBlock < stopBlock)
{
if (wordsLeft == 0)
{
buffer = NULL;
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
if (summary_block_scan != 0) {
uint32_t summary_bit;
(void) hfs_get_summary_index (hfsmp, summary_block_scan, &summary_bit);
hfs_set_summary (hfsmp, summary_bit, 1);
}
}
err = ReleaseBitmapBlock(vcb, blockRef, false);
if (err != noErr) goto ErrorExit;
if (!useMetaZone) {
currentBlock = NextBitmapBlock(vcb, currentBlock);
if (currentBlock >= stopBlock) {
goto LoopExit;
}
}
if ((trustSummary) && (hfsmp->hfs_flags & HFS_SUMMARY_TABLE)) {
uint32_t suggestion;
err = hfs_find_summary_free (hfsmp, currentBlock, &suggestion);
if (err && err != ENOSPC)
goto ErrorExit;
if (err == ENOSPC || suggestion >= stopBlock)
goto LoopExit;
currentBlock = suggestion;
}
err = ReadBitmapBlock(vcb, currentBlock, &buffer, &blockRef, flags);
if ( err != noErr ) goto ErrorExit;
summary_block_scan = currentBlock;
currentWord = buffer;
wordsLeft = wordsPerBlock;
}
if ((tempWord = SWAP_BE32(*currentWord)) + 1) {
bitMask = kHighBitInWordMask;
while (tempWord & bitMask)
{
bitMask >>= 1;
++currentBlock;
}
break; }
currentBlock += kBitsPerWord;
++currentWord;
--wordsLeft;
}
FoundUnused:
if (currentBlock >= stopBlock)
{
break;
}
firstBlock = currentBlock;
bitMask = currentBlock & kBitsWithinWordMask;
if (bitMask)
{
tempWord = SWAP_BE32(*currentWord); bitMask = kHighBitInWordMask >> bitMask;
while (bitMask && !(tempWord & bitMask))
{
bitMask >>= 1;
++currentBlock;
}
if (bitMask)
goto FoundUsed;
++currentWord;
--wordsLeft;
}
while (currentBlock < endingBlock)
{
if (wordsLeft == 0)
{
buffer = NULL;
err = ReleaseBitmapBlock(vcb, blockRef, false);
if (err != noErr) goto ErrorExit;
if (!useMetaZone) {
u_int32_t nextBlock;
nextBlock = NextBitmapBlock(vcb, currentBlock);
if (nextBlock != currentBlock) {
goto LoopExit;
}
}
err = ReadBitmapBlock(vcb, currentBlock, &buffer, &blockRef, flags);
if ( err != noErr ) goto ErrorExit;
currentWord = buffer;
wordsLeft = wordsPerBlock;
}
if ((tempWord = SWAP_BE32(*currentWord)) != 0)
{
bitMask = kHighBitInWordMask;
while (!(tempWord & bitMask))
{
bitMask >>= 1;
++currentBlock;
}
break; }
currentBlock += kBitsPerWord;
++currentWord;
--wordsLeft;
if ((currentBlock - firstBlock) >= maxBlocks)
break;
}
FoundUsed:
if (currentBlock > endingBlock)
currentBlock = endingBlock;
foundBlocks = currentBlock - firstBlock;
if (foundBlocks > maxBlocks)
foundBlocks = maxBlocks;
if (remaining) {
if (foundBlocks > remaining) {
hfs_debug("hfs: found more blocks than are indicated free!\n");
remaining = UINT32_MAX;
} else
remaining -= foundBlocks;
}
if (ISSET(flags, HFS_ALLOC_TRY_HARD)) {
if (foundBlocks > best.blockCount) {
best.startBlock = firstBlock;
best.blockCount = foundBlocks;
}
if (foundBlocks >= maxBlocks || best.blockCount >= remaining)
break;
} else if (foundBlocks >= minBlocks)
break;
if (ISSET(flags, HFS_ALLOC_IGNORE_TENTATIVE)) {
struct rl_entry free_extent = rl_make(firstBlock, firstBlock + foundBlocks - 1);
struct rl_entry *range;;
TAILQ_FOREACH(range, &hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS], rl_link) {
rl_subtract(&free_extent, range);
if (rl_len(range) == 0)
break;
}
firstBlock = free_extent.rl_start;
foundBlocks = rl_len(&free_extent);
}
if (foundBlocks) {
if (hfsmp->jnl == NULL) {
updated_free_extent = add_free_extent_cache(vcb, firstBlock, foundBlocks);
}
else {
int recently_deleted = 0;
uint32_t nextblock;
err = CheckUnmappedBytes(hfsmp, (uint64_t)firstBlock,
(uint64_t)foundBlocks, &recently_deleted, &nextblock);
if ((err) || (recently_deleted == 0)) {
updated_free_extent = add_free_extent_cache(vcb, firstBlock, foundBlocks);
}
}
}
} while (currentBlock < stopBlock);
LoopExit:
if (ISSET(flags, HFS_ALLOC_TRY_HARD)) {
firstBlock = best.startBlock;
foundBlocks = best.blockCount;
}
if (foundBlocks < minBlocks)
{
DiskFull:
err = dskFulErr;
ErrorExit:
*actualStartBlock = 0;
*actualNumBlocks = 0;
}
else
{
err = noErr;
*actualStartBlock = firstBlock;
*actualNumBlocks = foundBlocks;
if ((firstBlock + foundBlocks) > vcb->allocLimit) {
panic("hfs: blk allocation overflow on \"%s\" sb:0x%08x eb:0x%08x cb:0x%08x fb:0x%08x stop:0x%08x min:0x%08x found:0x%08x",
vcb->vcbVN, startingBlock, endingBlock, currentBlock,
firstBlock, stopBlock, minBlocks, foundBlocks);
}
}
if (updated_free_extent && (vcb->hfs_flags & HFS_HAS_SPARSE_DEVICE)) {
int i;
u_int32_t min_start = vcb->totalBlocks;
lck_spin_lock(&vcb->vcbFreeExtLock);
for(i=0; i < (int)vcb->vcbFreeExtCnt; i++) {
if (vcb->vcbFreeExt[i].startBlock < min_start) {
min_start = vcb->vcbFreeExt[i].startBlock;
}
}
lck_spin_unlock(&vcb->vcbFreeExtLock);
if (min_start != vcb->totalBlocks) {
if (min_start < vcb->nextAllocation) {
vcb->nextAllocation = min_start;
}
if (min_start < vcb->sparseAllocation) {
vcb->sparseAllocation = min_start;
}
}
}
if (buffer)
(void) ReleaseBitmapBlock(vcb, blockRef, false);
if (hfs_kdebug_allocation & HFSDBG_ALLOC_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_BLOCK_FIND_CONTIG | DBG_FUNC_END, err, *actualStartBlock, *actualNumBlocks, 0, 0);
return err;
}
static int num_bits_set(u_int32_t num)
{
int count;
for (count = 0; num; count++) {
num &= num - 1;
}
return count;
}
static int
hfs_isallocated_internal(struct hfsmount *hfsmp, u_int32_t startingBlock,
u_int32_t numBlocks, Boolean stop_on_first, u_int32_t *allocCount)
{
u_int32_t *currentWord; u_int32_t wordsLeft; u_int32_t bitMask; u_int32_t firstBit; u_int32_t numBits; u_int32_t *buffer = NULL;
uintptr_t blockRef;
u_int32_t bitsPerBlock;
u_int32_t wordsPerBlock;
u_int32_t blockCount = 0;
int error;
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_IS_ALLOCATED | DBG_FUNC_START, startingBlock, numBlocks, stop_on_first, 0, 0);
error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_TENTATIVE);
if (error)
goto JustReturn;
{
u_int32_t wordIndexInBlock;
bitsPerBlock = hfsmp->vcbVBMIOSize * kBitsPerByte;
wordsPerBlock = hfsmp->vcbVBMIOSize / kBytesPerWord;
wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord;
currentWord = buffer + wordIndexInBlock;
wordsLeft = wordsPerBlock - wordIndexInBlock;
}
firstBit = startingBlock % kBitsPerWord;
if (firstBit != 0) {
bitMask = kAllBitsSetInWord >> firstBit;
numBits = kBitsPerWord - firstBit;
if (numBits > numBlocks) {
numBits = numBlocks;
bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits));
}
if ((*currentWord & SWAP_BE32 (bitMask)) != 0) {
if (stop_on_first) {
blockCount = 1;
goto Exit;
}
blockCount += num_bits_set(*currentWord & SWAP_BE32 (bitMask));
}
numBlocks -= numBits;
++currentWord;
--wordsLeft;
}
while (numBlocks >= kBitsPerWord) {
if (wordsLeft == 0) {
startingBlock += bitsPerBlock;
buffer = NULL;
error = ReleaseBitmapBlock(hfsmp, blockRef, false);
if (error) goto Exit;
error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_TENTATIVE);
if (error) goto Exit;
currentWord = buffer;
wordsLeft = wordsPerBlock;
}
if (*currentWord != 0) {
if (stop_on_first) {
blockCount = 1;
goto Exit;
}
blockCount += num_bits_set(*currentWord);
}
numBlocks -= kBitsPerWord;
++currentWord;
--wordsLeft;
}
if (numBlocks != 0) {
bitMask = ~(kAllBitsSetInWord >> numBlocks);
if (wordsLeft == 0) {
startingBlock += bitsPerBlock;
buffer = NULL;
error = ReleaseBitmapBlock(hfsmp, blockRef, false);
if (error) goto Exit;
error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_TENTATIVE);
if (error) goto Exit;
currentWord = buffer;
}
if ((*currentWord & SWAP_BE32 (bitMask)) != 0) {
if (stop_on_first) {
blockCount = 1;
goto Exit;
}
blockCount += num_bits_set(*currentWord & SWAP_BE32 (bitMask));
}
}
Exit:
if (buffer) {
(void)ReleaseBitmapBlock(hfsmp, blockRef, false);
}
if (allocCount) {
*allocCount = blockCount;
}
JustReturn:
if (hfs_kdebug_allocation & HFSDBG_BITMAP_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_IS_ALLOCATED | DBG_FUNC_END, error, 0, blockCount, 0, 0);
return (error);
}
int
hfs_count_allocated(struct hfsmount *hfsmp, u_int32_t startBlock,
u_int32_t numBlocks, u_int32_t *allocCount)
{
return hfs_isallocated_internal(hfsmp, startBlock, numBlocks, false, allocCount);
}
int
hfs_isallocated(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t numBlocks)
{
int error;
u_int32_t allocCount;
error = hfs_isallocated_internal(hfsmp, startingBlock, numBlocks, true, &allocCount);
if (error) {
return 1;
} else {
return allocCount;
}
}
int
hfs_isrbtree_active(struct hfsmount *hfsmp){
#pragma unused (hfsmp)
return 0;
}
static int hfs_check_summary (struct hfsmount *hfsmp, uint32_t allocblock, uint32_t *freeblocks) {
int err = EINVAL;
if (hfsmp->vcbVBMIOSize) {
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
uint32_t index;
if (hfs_get_summary_index (hfsmp, allocblock, &index)) {
*freeblocks = 0;
return EINVAL;
}
uint32_t byteindex = index / kBitsPerByte;
uint8_t current_byte = hfsmp->hfs_summary_table[byteindex];
uint8_t bit_in_byte = index % kBitsPerByte;
if (current_byte & (1 << bit_in_byte)) {
*freeblocks = 0;
}
else {
*freeblocks = 1;
}
}
err = 0;
}
return err;
}
#if 0
static int
hfs_get_next_summary (struct hfsmount *hfsmp, uint32_t block, uint32_t *newblock) {
u_int32_t bits_per_iosize = hfsmp->vcbVBMIOSize * kBitsPerByte;
u_int32_t start_offset;
u_int32_t next_offset;
int err = EINVAL;
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
if ((err = hfs_get_summary_index(hfsmp, block, &start_offset))) {
return err;
}
next_offset = start_offset++;
if ((start_offset >= hfsmp->hfs_summary_size) || (next_offset >= hfsmp->hfs_summary_size)) {
return EINVAL;
}
*newblock = next_offset * bits_per_iosize;
if (*newblock >= hfsmp->totalBlocks) {
return EINVAL;
}
err = 0;
}
return err;
}
#endif
static int hfs_release_summary(struct hfsmount *hfsmp, uint32_t start_blk, uint32_t length) {
int err = EINVAL;
uint32_t end_blk = (start_blk + length) - 1;
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
uint32_t start_bit;
uint32_t end_bit;
uint32_t current_bit;
err = hfs_get_summary_index (hfsmp, start_blk, &start_bit);
if (err) {
goto release_err;
}
err = hfs_get_summary_index (hfsmp, end_blk, &end_bit);
if (err) {
goto release_err;
}
if (ALLOC_DEBUG) {
if (start_bit > end_bit) {
panic ("HFS: start > end!, %d %d ", start_bit, end_bit);
}
}
current_bit = start_bit;
while (current_bit <= end_bit) {
err = hfs_set_summary (hfsmp, current_bit, 0);
current_bit++;
}
}
release_err:
return err;
}
int hfs_find_summary_free (struct hfsmount *hfsmp, uint32_t block, uint32_t *newblock) {
int err = ENOSPC;
uint32_t bit_index = 0;
uint32_t maybe_has_blocks = 0;
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
uint32_t byte_index;
uint8_t curbyte;
uint8_t bit_in_byte;
uint32_t summary_cap;
err = hfs_get_summary_index (hfsmp, hfsmp->allocLimit - 1, &summary_cap);
if (err) {
goto summary_exit;
}
err = hfs_check_summary (hfsmp, block, &maybe_has_blocks);
if (err) {
goto summary_exit;
}
if (maybe_has_blocks) {
*newblock = block;
goto summary_exit;
}
maybe_has_blocks = 0;
err = hfs_get_summary_index (hfsmp, block, &bit_index);
if (err) {
goto summary_exit;
}
while (bit_index <= summary_cap) {
byte_index = bit_index / kBitsPerByte;
curbyte = hfsmp->hfs_summary_table[byte_index];
bit_in_byte = bit_index % kBitsPerByte;
if (curbyte & (1 << bit_in_byte)) {
bit_index++;
}
else {
err = hfs_get_summary_allocblock (hfsmp, bit_index, newblock);
if (err) {
goto summary_exit;
}
maybe_has_blocks = 1;
break;
}
}
if (maybe_has_blocks == 0) {
err = ENOSPC;
}
}
summary_exit:
if (maybe_has_blocks) {
err = 0;
}
return err;
}
int hfs_get_summary_allocblock (struct hfsmount *hfsmp, uint32_t
summarybit, uint32_t *alloc) {
uint32_t bits_per_iosize = hfsmp->vcbVBMIOSize * kBitsPerByte;
uint32_t allocblk;
allocblk = summarybit * bits_per_iosize;
if (allocblk >= hfsmp->totalBlocks) {
return EINVAL;
}
else {
*alloc = allocblk;
}
return 0;
}
static int hfs_set_summary (struct hfsmount *hfsmp, uint32_t summarybit, uint32_t inuse) {
int err = EINVAL;
if (hfsmp->vcbVBMIOSize) {
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
if (ALLOC_DEBUG) {
if (hfsmp->hfs_summary_table == NULL) {
panic ("hfs_set_summary: no table for %p ", hfsmp);
}
}
uint32_t byte_index = summarybit / kBitsPerByte;
uint8_t current_byte = hfsmp->hfs_summary_table[byte_index];
uint8_t bit_in_byte = summarybit % kBitsPerByte;
if (inuse) {
current_byte = (current_byte | (1 << bit_in_byte));
}
else {
current_byte = (current_byte & ~(1 << bit_in_byte));
}
hfsmp->hfs_summary_table[byte_index] = current_byte;
}
err = 0;
}
return err;
}
static int hfs_get_summary_index (struct hfsmount *hfsmp, uint32_t block, uint32_t* index) {
uint32_t summary_bit;
uint32_t bits_per_iosize;
int err = EINVAL;
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
if (block >= hfsmp->totalBlocks) {
return EINVAL;
}
if (hfsmp->vcbVBMIOSize == 0) {
return EINVAL;
}
bits_per_iosize = hfsmp->vcbVBMIOSize * kBitsPerByte;
summary_bit = block / bits_per_iosize;
*index = summary_bit;
err = 0;
}
return err;
}
int
hfs_init_summary (struct hfsmount *hfsmp) {
uint32_t summary_size;
uint32_t summary_size_bytes;
uint8_t *summary_table;
if (hfsmp->hfs_allocation_cp == NULL) {
if (ALLOC_DEBUG) {
printf("hfs: summary table cannot progress without a bitmap cnode! \n");
}
return EINVAL;
}
if (hfsmp->blockSize < HFS_MIN_SUMMARY_BLOCKSIZE) {
printf("hfs: summary table not allowed on FS with block size of %d\n", hfsmp->blockSize);
return EINVAL;
}
summary_size = hfsmp->hfs_allocation_cp->c_blocks;
if (ALLOC_DEBUG) {
printf("HFS Summary Table Initialization: Bitmap %u blocks\n",
hfsmp->hfs_allocation_cp->c_blocks);
}
if (hfsmp->blockSize != hfsmp->vcbVBMIOSize) {
uint64_t lrg_size = (uint64_t) hfsmp->hfs_allocation_cp->c_blocks * (uint64_t) hfsmp->blockSize;
lrg_size = lrg_size / (uint64_t)hfsmp->vcbVBMIOSize;
summary_size = (uint32_t) lrg_size;
}
summary_size_bytes = summary_size / kBitsPerByte;
summary_size_bytes++;
if (ALLOC_DEBUG) {
printf("HFS Summary Table: vcbVBMIOSize %d summary bits %d \n", hfsmp->vcbVBMIOSize, summary_size);
printf("HFS Summary Table Size (in bytes) %d \n", summary_size_bytes);
}
hfsmp->hfs_summary_size = summary_size;
hfsmp->hfs_summary_bytes = summary_size_bytes;
summary_table = hfs_mallocz(summary_size_bytes);
hfsmp->hfs_flags |= HFS_SUMMARY_TABLE;
hfsmp->hfs_summary_table = summary_table;
if (ALLOC_DEBUG) {
if (hfsmp->hfs_summary_table == NULL) {
panic ("HFS Summary Init: no table for %p\n", hfsmp);
}
}
return 0;
}
static int hfs_rebuild_summary (struct hfsmount *hfsmp) {
uint32_t new_summary_size;
new_summary_size = hfsmp->hfs_allocation_cp->c_blocks;
if (ALLOC_DEBUG) {
printf("HFS Summary Table Re-init: bitmap %u blocks\n", new_summary_size);
}
if (hfsmp->blockSize != hfsmp->vcbVBMIOSize) {
uint64_t lrg_size = (uint64_t) hfsmp->hfs_allocation_cp->c_blocks * (uint64_t) hfsmp->blockSize;
lrg_size = lrg_size / (uint64_t)hfsmp->vcbVBMIOSize;
new_summary_size = (uint32_t) lrg_size;
}
if (new_summary_size != hfsmp->hfs_summary_size) {
uint32_t summarybytes = new_summary_size / kBitsPerByte;
uint32_t copysize;
uint8_t *newtable;
summarybytes++;
if (ALLOC_DEBUG) {
printf("HFS Summary Table: vcbVBMIOSize %d summary bits %d \n", hfsmp->vcbVBMIOSize, new_summary_size);
printf("HFS Summary Table Size (in bytes) %d \n", summarybytes);
}
newtable = hfs_mallocz(summarybytes);
copysize = hfsmp->hfs_summary_bytes;
if (summarybytes < hfsmp->hfs_summary_bytes) {
copysize = summarybytes;
}
memcpy (newtable, hfsmp->hfs_summary_table, copysize);
hfs_free(hfsmp->hfs_summary_table, hfsmp->hfs_summary_bytes);
hfsmp->hfs_summary_table = newtable;
hfsmp->hfs_summary_size = new_summary_size;
hfsmp->hfs_summary_bytes = summarybytes;
}
return 0;
}
#if ALLOC_DEBUG
void hfs_validate_summary (struct hfsmount *hfsmp) {
uint32_t i;
int err;
if (hfsmp->hfs_summary_table == NULL) {
panic ("HFS Summary: No HFS summary table!");
}
if (hfsmp->hfs_summary_size == 0 || hfsmp->hfs_summary_size > 131080) {
panic("HFS Summary: Size is bad! %d", hfsmp->hfs_summary_size);
}
if (hfsmp->vcbVBMIOSize == 0) {
panic("HFS Summary: no VCB VBM IO Size !");
}
printf("hfs: summary validation beginning on %s\n", hfsmp->vcbVN);
printf("hfs: summary validation %d summary bits, %d summary blocks\n", hfsmp->hfs_summary_size, hfsmp->totalBlocks);
for (i = 0; i < hfsmp->hfs_summary_size ; i++) {
uint32_t bits_per_iosize = hfsmp->vcbVBMIOSize * kBitsPerByte;
uint32_t byte_offset = hfsmp->vcbVBMIOSize * i;
uint32_t alloc_block = i * bits_per_iosize;
uint32_t *block_data;
struct buf *bp;
int counter;
int counter_max;
int saw_free_bits = 0;
if ((err = ReadBitmapRange (hfsmp, byte_offset, hfsmp->vcbVBMIOSize, &block_data, &bp))) {
panic ("HFS Summary: error (%d) in ReadBitmapRange!", err);
}
uint32_t maybe_has_free_blocks;
err = hfs_check_summary (hfsmp, alloc_block, &maybe_has_free_blocks);
if (err) {
panic ("HFS Summary: hfs_check_summary returned error (%d) ", err);
}
counter_max = hfsmp->vcbVBMIOSize / kBytesPerWord;
for (counter = 0; counter < counter_max; counter++) {
uint32_t word = block_data[counter];
if (word != kAllBitsSetInWord) {
if (maybe_has_free_blocks) {
saw_free_bits = 1;
break;
}
else {
panic ("HFS Summary: hfs_check_summary saw free bits!");
}
}
}
if (maybe_has_free_blocks && (saw_free_bits == 0)) {
panic ("HFS Summary: did not see free bits !");
}
if ((err = ReleaseScanBitmapRange (bp))) {
panic ("HFS Summary: Error (%d) in ReleaseScanBitmapRange", err);
}
}
printf("hfs: summary validation completed successfully on %s\n", hfsmp->vcbVN);
return;
}
#endif
static int hfs_alloc_scan_range(struct hfsmount *hfsmp, u_int32_t startbit,
u_int32_t *bitToScan, struct jnl_trim_list *list) {
int error;
int readwrite = 1;
u_int32_t curAllocBlock;
struct buf *blockRef = NULL;
u_int32_t *buffer = NULL;
u_int32_t free_offset = 0; u_int32_t size = 0; u_int32_t iosize = 0; u_int32_t byte_off; u_int32_t completed_size; u_int32_t last_bitmap_block;
u_int32_t current_word;
u_int32_t word_index = 0;
uint32_t summary_bit = 0;
uint32_t saw_free_blocks = 0;
uint32_t last_marked = 0;
if (hfsmp->hfs_flags & HFS_READ_ONLY) {
readwrite = 0;
}
error = hfs_scan_range_size (hfsmp, startbit, &iosize);
if (error) {
if (ALLOC_DEBUG) {
panic ("hfs_alloc_scan_range: hfs_scan_range_size error %d\n", error);
}
return error;
}
if (iosize < hfsmp->vcbVBMIOSize) {
if (ALLOC_DEBUG) {
panic ("hfs_alloc_scan_range: iosize too small! (iosize %d)\n", iosize);
}
return EINVAL;
}
byte_off = startbit / kBitsPerByte;
error = ReadBitmapRange(hfsmp, byte_off, iosize, &buffer, &blockRef);
if (error) {
if (ALLOC_DEBUG) {
panic ("hfs_alloc_scan_range: start %d iosize %d ReadBitmapRange error %d\n", startbit, iosize, error);
}
return error;
}
completed_size = buf_count(blockRef);
last_bitmap_block = completed_size * kBitsPerByte;
last_bitmap_block = last_bitmap_block + startbit;
if (last_bitmap_block > hfsmp->totalBlocks) {
last_bitmap_block = hfsmp->totalBlocks;
}
curAllocBlock = startbit;
word_index = 0;
size = 0;
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
if (hfs_get_summary_index (hfsmp, startbit, &summary_bit)) {
error = EINVAL;
if (ALLOC_DEBUG) {
panic ("hfs_alloc_scan_range: Could not acquire summary index for %u", startbit);
}
return error;
}
}
saw_free_blocks = 0;
while (curAllocBlock < last_bitmap_block) {
u_int32_t bit;
if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
if (ALLOC_DEBUG) {
if (hfsmp->hfs_summary_table == NULL) {
panic ("hfs_alloc_scan_range: no summary table!");
}
}
uint32_t temp_summary;
error = hfs_get_summary_index (hfsmp, curAllocBlock, &temp_summary);
if (error) {
if (ALLOC_DEBUG) {
panic ("hfs_alloc_scan_range: could not get summary index for %u", curAllocBlock);
}
return EINVAL;
}
if (ALLOC_DEBUG) {
if (temp_summary < summary_bit) {
panic ("hfs_alloc_scan_range: backwards summary bit?\n");
}
}
if (temp_summary > summary_bit) {
if (saw_free_blocks == 0) {
hfs_set_summary (hfsmp, summary_bit, 1);
}
else {
hfs_set_summary (hfsmp, summary_bit, 0);
}
last_marked = summary_bit;
saw_free_blocks = 0;
summary_bit = temp_summary;
}
}
current_word = SWAP_BE32(buffer[word_index]);
for (bit = 0 ; bit < kBitsPerWord ; bit++, curAllocBlock++) {
if (curAllocBlock >= last_bitmap_block) {
break;
}
u_int32_t allocated = (current_word & (kHighBitInWordMask >> bit));
if (allocated) {
if (size != 0) {
if (readwrite) {
hfs_track_unmap_blocks (hfsmp, free_offset, size, list);
}
add_free_extent_cache (hfsmp, free_offset, size);
size = 0;
free_offset = 0;
}
}
else {
size++;
if (free_offset == 0) {
free_offset = curAllocBlock;
}
if (saw_free_blocks == 0) {
saw_free_blocks = 1;
}
}
}
if (curAllocBlock < last_bitmap_block) {
word_index++;
}
}
if ((curAllocBlock >= last_bitmap_block) &&
(hfsmp->hfs_flags & HFS_SUMMARY_TABLE)) {
uint32_t temp_summary;
uint32_t temp_block = last_bitmap_block - 1;
error = hfs_get_summary_index (hfsmp, temp_block, &temp_summary);
if (error) {
if (ALLOC_DEBUG) {
panic ("hfs_alloc_scan_range: end bit curAllocBlock %u, last_bitmap_block %u", curAllocBlock, last_bitmap_block);
}
return EINVAL;
}
if (temp_summary > last_marked) {
if (saw_free_blocks == 0) {
hfs_set_summary (hfsmp, temp_summary, 1);
}
else {
hfs_set_summary (hfsmp, temp_summary, 0);
}
}
}
if (size != 0) {
if (readwrite) {
hfs_track_unmap_blocks (hfsmp, free_offset, size, list);
}
add_free_extent_cache (hfsmp, free_offset, size);
}
*bitToScan = curAllocBlock;
ReleaseScanBitmapRange(blockRef);
return 0;
}
static int hfs_scan_range_size (struct hfsmount *hfsmp, uint32_t bitmap_st, uint32_t *iosize) {
uint32_t bitmap_len;
uint32_t remaining_bitmap;
uint32_t target_iosize;
uint32_t bitmap_off;
if (bitmap_st % kBitsPerWord) {
if (ALLOC_DEBUG) {
panic ("hfs_scan_range_size unaligned start bit! bitmap_st %d \n", bitmap_st);
}
return EINVAL;
}
bitmap_off = bitmap_st / kBitsPerByte;
if ((hfsmp->totalBlocks <= bitmap_st) || (bitmap_off > (512 * 1024 * 1024))) {
if (ALLOC_DEBUG) {
panic ("hfs_scan_range_size: invalid start! bitmap_st %d, bitmap_off %d\n", bitmap_st, bitmap_off);
}
return EINVAL;
}
if (bitmap_off & (hfsmp->vcbVBMIOSize - 1)) {
if (ALLOC_DEBUG) {
panic ("hfs_scan_range_size: unaligned start! bitmap_off %d\n", bitmap_off);
}
return EINVAL;
}
bitmap_len = roundup(hfsmp->totalBlocks, hfsmp->blockSize * 8) / 8;
remaining_bitmap = bitmap_len - bitmap_off;
target_iosize = MIN((MAXBSIZE), remaining_bitmap);
*iosize = target_iosize;
return 0;
}
int hfs_isallocated_scan(struct hfsmount *hfsmp, u_int32_t startingBlock, u_int32_t *bp_buf) {
u_int32_t *currentWord; u_int32_t bitMask; u_int32_t firstBit; u_int32_t numBits; u_int32_t bitsPerBlock;
uintptr_t blockRef = 0;
u_int32_t numBlocks = 1;
u_int32_t *buffer = NULL;
int inuse = 0;
int error;
if (bp_buf) {
buffer = bp_buf;
}
else {
error = ReadBitmapBlock(hfsmp, startingBlock, &buffer, &blockRef,
HFS_ALLOC_IGNORE_TENTATIVE);
if (error)
return (error);
}
u_int32_t wordIndexInBlock;
bitsPerBlock = hfsmp->vcbVBMIOSize * kBitsPerByte;
wordIndexInBlock = (startingBlock & (bitsPerBlock-1)) / kBitsPerWord;
currentWord = buffer + wordIndexInBlock;
firstBit = startingBlock % kBitsPerWord;
bitMask = kAllBitsSetInWord >> firstBit;
numBits = kBitsPerWord - firstBit;
if (numBits > numBlocks) {
numBits = numBlocks;
bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits));
}
if ((*currentWord & SWAP_BE32 (bitMask)) != 0) {
inuse = 1;
goto Exit;
}
++currentWord;
Exit:
if(bp_buf == NULL) {
if (buffer) {
(void)ReleaseBitmapBlock(hfsmp, blockRef, false);
}
}
return (inuse);
}
void ResetVCBFreeExtCache(struct hfsmount *hfsmp)
{
int bytes;
void *freeExt;
if (hfs_kdebug_allocation & HFSDBG_EXT_CACHE_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_RESET_EXTENT_CACHE | DBG_FUNC_START, 0, 0, 0, 0, 0);
lck_spin_lock(&hfsmp->vcbFreeExtLock);
hfsmp->vcbFreeExtCnt = 0;
bytes = kMaxFreeExtents * sizeof(HFSPlusExtentDescriptor);
freeExt = (void*)(hfsmp->vcbFreeExt);
bzero (freeExt, bytes);
lck_spin_unlock(&hfsmp->vcbFreeExtLock);
if (hfs_kdebug_allocation & HFSDBG_EXT_CACHE_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_RESET_EXTENT_CACHE | DBG_FUNC_END, 0, 0, 0, 0, 0);
return;
}
u_int32_t UpdateAllocLimit (struct hfsmount *hfsmp, u_int32_t new_end_block) {
hfsmp->allocLimit = new_end_block;
ResetVCBFreeExtCache(hfsmp);
(void) hfs_rebuild_summary (hfsmp);
struct rl_entry *range, *next_range;
TAILQ_FOREACH_SAFE(range, &hfsmp->hfs_reserved_ranges[HFS_TENTATIVE_BLOCKS],
rl_link, next_range) {
if (rl_overlap(range, new_end_block, RL_INFINITY) != RL_NOOVERLAP)
hfs_release_reserved(hfsmp, range, HFS_TENTATIVE_BLOCKS);
}
return 0;
}
static void remove_free_extent_list(struct hfsmount *hfsmp, int index)
{
if (index < 0 || (uint32_t)index >= hfsmp->vcbFreeExtCnt) {
if (ALLOC_DEBUG)
panic("hfs: remove_free_extent_list: %p: index (%d) out of range (0, %u)", hfsmp, index, hfsmp->vcbFreeExtCnt);
else
printf("hfs: remove_free_extent_list: %p: index (%d) out of range (0, %u)", hfsmp, index, hfsmp->vcbFreeExtCnt);
return;
}
int shift_count = hfsmp->vcbFreeExtCnt - index - 1;
if (shift_count > 0) {
memmove(&hfsmp->vcbFreeExt[index], &hfsmp->vcbFreeExt[index+1], shift_count * sizeof(hfsmp->vcbFreeExt[0]));
}
hfsmp->vcbFreeExtCnt--;
}
static int add_free_extent_list(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount)
{
uint32_t i;
if (ALLOC_DEBUG) {
uint32_t endBlock = startBlock + blockCount;
for (i = 0; i < hfsmp->vcbFreeExtCnt; ++i) {
if (endBlock < hfsmp->vcbFreeExt[i].startBlock ||
startBlock > (hfsmp->vcbFreeExt[i].startBlock + hfsmp->vcbFreeExt[i].blockCount)) {
continue;
}
panic("hfs: add_free_extent_list: %p: extent(%u %u) overlaps existing extent (%u %u) at index %d",
hfsmp, startBlock, blockCount, hfsmp->vcbFreeExt[i].startBlock, hfsmp->vcbFreeExt[i].blockCount, i);
}
}
for (i = 0; i < hfsmp->vcbFreeExtCnt; ++i) {
if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
if (startBlock < hfsmp->vcbFreeExt[i].startBlock) {
break;
}
} else {
if (blockCount > hfsmp->vcbFreeExt[i].blockCount) {
break;
}
}
}
if (i == kMaxFreeExtents) {
return i;
}
if (hfsmp->vcbFreeExtCnt < kMaxFreeExtents)
hfsmp->vcbFreeExtCnt++;
int shift_count = hfsmp->vcbFreeExtCnt - i - 1;
if (shift_count > 0) {
memmove(&hfsmp->vcbFreeExt[i+1], &hfsmp->vcbFreeExt[i], shift_count * sizeof(hfsmp->vcbFreeExt[0]));
}
hfsmp->vcbFreeExt[i].startBlock = startBlock;
hfsmp->vcbFreeExt[i].blockCount = blockCount;
return i;
}
static void remove_free_extent_cache(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount)
{
u_int32_t i, insertedIndex;
u_int32_t currentStart, currentEnd, endBlock;
int extentsRemoved = 0;
if (hfs_kdebug_allocation & HFSDBG_EXT_CACHE_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_REMOVE_EXTENT_CACHE | DBG_FUNC_START, startBlock, blockCount, 0, 0, 0);
endBlock = startBlock + blockCount;
lck_spin_lock(&hfsmp->vcbFreeExtLock);
for (i = 0; i < hfsmp->vcbFreeExtCnt; ++i) {
currentStart = hfsmp->vcbFreeExt[i].startBlock;
currentEnd = currentStart + hfsmp->vcbFreeExt[i].blockCount;
if (currentEnd <= startBlock || currentStart >= endBlock) {
continue;
}
if (startBlock <= currentStart && endBlock >= currentEnd) {
remove_free_extent_list(hfsmp, i);
--i;
++extentsRemoved;
continue;
}
if (startBlock > currentStart && endBlock < currentEnd) {
remove_free_extent_list(hfsmp, i);
add_free_extent_list(hfsmp, currentStart, startBlock - currentStart);
add_free_extent_list(hfsmp, endBlock, currentEnd - endBlock);
break;
}
remove_free_extent_list(hfsmp, i);
if (startBlock > currentStart) {
insertedIndex = add_free_extent_list(hfsmp, currentStart, startBlock - currentStart);
} else {
insertedIndex = add_free_extent_list(hfsmp, endBlock, currentEnd - endBlock);
}
if (insertedIndex > i) {
--i;
}
}
lck_spin_unlock(&hfsmp->vcbFreeExtLock);
sanity_check_free_ext(hfsmp, 0);
if (hfs_kdebug_allocation & HFSDBG_EXT_CACHE_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_REMOVE_EXTENT_CACHE | DBG_FUNC_END, 0, 0, 0, extentsRemoved, 0);
return;
}
static Boolean add_free_extent_cache(struct hfsmount *hfsmp, u_int32_t startBlock, u_int32_t blockCount)
{
Boolean retval = false;
uint32_t endBlock;
uint32_t currentEnd;
uint32_t i;
if (hfs_kdebug_allocation & HFSDBG_EXT_CACHE_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_ADD_EXTENT_CACHE | DBG_FUNC_START, startBlock, blockCount, 0, 0, 0);
#if DEBUG
for (i = 0; i < 2; ++i) {
struct rl_entry *range;
TAILQ_FOREACH(range, &hfsmp->hfs_reserved_ranges[i], rl_link) {
hfs_assert(rl_overlap(range, startBlock,
startBlock + blockCount - 1) == RL_NOOVERLAP);
}
}
#endif
if (startBlock >= hfsmp->allocLimit) {
goto out_not_locked;
}
if ((startBlock + blockCount) > hfsmp->allocLimit) {
blockCount = hfsmp->allocLimit - startBlock;
}
lck_spin_lock(&hfsmp->vcbFreeExtLock);
endBlock = startBlock + blockCount;
for (i=0; i < hfsmp->vcbFreeExtCnt; ++i) {
currentEnd = hfsmp->vcbFreeExt[i].startBlock + hfsmp->vcbFreeExt[i].blockCount;
if (hfsmp->vcbFreeExt[i].startBlock > endBlock || currentEnd < startBlock) {
continue;
} else {
if (hfsmp->vcbFreeExt[i].startBlock < startBlock)
startBlock = hfsmp->vcbFreeExt[i].startBlock;
if (currentEnd > endBlock)
endBlock = currentEnd;
remove_free_extent_list(hfsmp, i);
--i;
}
}
add_free_extent_list(hfsmp, startBlock, endBlock - startBlock);
lck_spin_unlock(&hfsmp->vcbFreeExtLock);
out_not_locked:
sanity_check_free_ext(hfsmp, 0);
if (hfs_kdebug_allocation & HFSDBG_EXT_CACHE_ENABLED)
KERNEL_DEBUG_CONSTANT(HFSDBG_ADD_EXTENT_CACHE | DBG_FUNC_END, 0, 0, 0, retval, 0);
return retval;
}
static void sanity_check_free_ext(struct hfsmount *hfsmp, int check_allocated)
{
u_int32_t i, j;
if (ALLOC_DEBUG == 0) {
return;
}
lck_spin_lock(&hfsmp->vcbFreeExtLock);
if (hfsmp->vcbFreeExtCnt > kMaxFreeExtents)
panic("hfs: %p: free extent count (%u) is too large", hfsmp, hfsmp->vcbFreeExtCnt);
for(i=0; i < hfsmp->vcbFreeExtCnt; i++) {
u_int32_t start, nblocks;
start = hfsmp->vcbFreeExt[i].startBlock;
nblocks = hfsmp->vcbFreeExt[i].blockCount;
if (check_allocated) {
lck_spin_unlock(&hfsmp->vcbFreeExtLock);
if (hfs_isallocated(hfsmp, start, nblocks)) {
panic("hfs: %p: slot %d:(%u,%u) in the free extent array is allocated\n",
hfsmp, i, start, nblocks);
}
lck_spin_lock(&hfsmp->vcbFreeExtLock);
}
if ((start > hfsmp->allocLimit) || ((start + nblocks) > hfsmp->allocLimit)) {
panic ("hfs: %p: slot %d:(%u,%u) in the free extent array is beyond allocLimit=%u\n",
hfsmp, i, start, nblocks, hfsmp->allocLimit);
}
for(j=i+1; j < hfsmp->vcbFreeExtCnt; j++) {
if (start == hfsmp->vcbFreeExt[j].startBlock) {
panic("hfs: %p: slot %d:(%u,%u) and %d:(%u,%u) are duplicate\n",
hfsmp, i, start, nblocks, j, hfsmp->vcbFreeExt[j].startBlock,
hfsmp->vcbFreeExt[j].blockCount);
}
}
if ((i+1) != hfsmp->vcbFreeExtCnt) {
if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
if (hfsmp->vcbFreeExt[i].startBlock > hfsmp->vcbFreeExt[i+1].startBlock) {
panic ("hfs: %p: SPARSE %d:(%u,%u) and %d:(%u,%u) are out of order\n",
hfsmp, i, start, nblocks, i+1, hfsmp->vcbFreeExt[i+1].startBlock,
hfsmp->vcbFreeExt[i+1].blockCount);
}
} else {
if (hfsmp->vcbFreeExt[i].blockCount < hfsmp->vcbFreeExt[i+1].blockCount) {
panic ("hfs: %p: %d:(%u,%u) and %d:(%u,%u) are out of order\n",
hfsmp, i, start, nblocks, i+1, hfsmp->vcbFreeExt[i+1].startBlock,
hfsmp->vcbFreeExt[i+1].blockCount);
}
}
}
}
lck_spin_unlock(&hfsmp->vcbFreeExtLock);
}
#define BIT_RIGHT_MASK(bit) (0xffffffffffffffffull >> (bit))
static int clzll(uint64_t x)
{
if (x == 0)
return 64;
else
return __builtin_clzll(x);
}
#if !HFS_ALLOC_TEST
static errno_t get_more_bits(bitmap_context_t *bitmap_ctx)
{
uint32_t start_bit;
uint32_t iosize = 0;
uint32_t byte_offset;
uint32_t last_bitmap_block;
int error;
struct hfsmount *hfsmp = bitmap_ctx->hfsmp;
#if !HFS_ALLOC_TEST
uint64_t lock_elapsed;
#endif
if (bitmap_ctx->bp)
ReleaseScanBitmapRange(bitmap_ctx->bp);
if (msleep(NULL, NULL, PINOD | PCATCH,
"hfs_fsinfo", NULL) == EINTR) {
return EINTR;
}
#if !HFS_ALLOC_TEST
absolutetime_to_nanoseconds(mach_absolute_time() - bitmap_ctx->lock_start, &lock_elapsed);
if (lock_elapsed >= HFS_FSINFO_MAX_LOCKHELD_TIME) {
hfs_systemfile_unlock(hfsmp, bitmap_ctx->lockflags);
tsleep((caddr_t)get_more_bits, PRIBIO, "hfs_fsinfo", 1);
hfs_journal_lock(hfsmp);
error = hfs_flush(hfsmp, HFS_FLUSH_JOURNAL_META);
if (error) {
hfs_journal_unlock(hfsmp);
return error;
}
bitmap_ctx->lockflags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
bitmap_ctx->lock_start = mach_absolute_time();
hfs_journal_unlock(hfsmp);
error = buf_invalidateblks(hfsmp->hfs_allocation_vp, 0, 0, 0);
if (error) {
return error;
}
}
#endif
start_bit = bitmap_ctx->run_offset;
if (start_bit >= bitmap_ctx->hfsmp->totalBlocks) {
bitmap_ctx->chunk_end = 0;
bitmap_ctx->bp = NULL;
bitmap_ctx->bitmap = NULL;
return 0;
}
hfs_assert(start_bit % 8 == 0);
error = hfs_scan_range_size (bitmap_ctx->hfsmp, start_bit, &iosize);
if (error)
return error;
hfs_assert(iosize != 0);
byte_offset = start_bit / kBitsPerByte;
error = ReadBitmapRange(bitmap_ctx->hfsmp, byte_offset, iosize, (uint32_t **)&bitmap_ctx->bitmap, &bitmap_ctx->bp);
if (error)
return error;
last_bitmap_block = start_bit + buf_count(bitmap_ctx->bp) * kBitsPerByte;
if (last_bitmap_block > bitmap_ctx->hfsmp->totalBlocks)
last_bitmap_block = bitmap_ctx->hfsmp->totalBlocks;
bitmap_ctx->chunk_current = 0; bitmap_ctx->chunk_end = last_bitmap_block - start_bit;
return 0;
}
#endif // !HFS_ALLOC_TEST
static int bit_count_set(void *bitmap, int start, int end)
{
if (start == end)
return 0;
hfs_assert(end > start);
const int start_bit = start & 63;
const int end_bit = end & 63;
uint64_t *p = (uint64_t *)bitmap + start / 64;
uint64_t x = ~OSSwapBigToHostInt64(*p);
if ((start & ~63) == (end & ~63)) {
x = (x & BIT_RIGHT_MASK(start_bit)) | BIT_RIGHT_MASK(end_bit);
return clzll(x) - start_bit;
}
x &= BIT_RIGHT_MASK(start_bit);
if (x)
return clzll(x) - start_bit;
++p;
int count = 64 - start_bit;
int nquads = (end - end_bit - start - 1) / 64;
while (nquads--) {
if (*p != 0xffffffffffffffffull) {
x = ~OSSwapBigToHostInt64(*p);
return count + clzll(x);
}
++p;
count += 64;
}
if (end_bit) {
x = ~OSSwapBigToHostInt64(*p) | BIT_RIGHT_MASK(end_bit);
count += clzll(x);
}
return count;
}
static int bit_count_clr(void *bitmap, int start, int end)
{
if (start == end)
return 0;
hfs_assert(end > start);
const int start_bit = start & 63;
const int end_bit = end & 63;
uint64_t *p = (uint64_t *)bitmap + start / 64;
uint64_t x = OSSwapBigToHostInt64(*p);
if ((start & ~63) == (end & ~63)) {
x = (x & BIT_RIGHT_MASK(start_bit)) | BIT_RIGHT_MASK(end_bit);
return clzll(x) - start_bit;
}
x &= BIT_RIGHT_MASK(start_bit);
if (x)
return clzll(x) - start_bit;
++p;
int count = 64 - start_bit;
int nquads = (end - end_bit - start - 1) / 64;
while (nquads--) {
if (*p) {
x = OSSwapBigToHostInt64(*p);
return count + clzll(x);
}
++p;
count += 64;
}
if (end_bit) {
x = OSSwapBigToHostInt64(*p) | BIT_RIGHT_MASK(end_bit);
count += clzll(x);
}
return count;
}
#if !HFS_ALLOC_TEST
static errno_t update_summary_table(bitmap_context_t *bitmap_ctx, uint32_t start, uint32_t count, bool set)
{
uint32_t end, start_summary_bit, end_summary_bit;
errno_t error = 0;
if (count == 0)
goto out;
if (!ISSET(bitmap_ctx->hfsmp->hfs_flags, HFS_SUMMARY_TABLE))
return 0;
if (hfs_get_summary_index (bitmap_ctx->hfsmp, start, &start_summary_bit)) {
error = EINVAL;
goto out;
}
end = start + count - 1;
if (hfs_get_summary_index (bitmap_ctx->hfsmp, end, &end_summary_bit)) {
error = EINVAL;
goto out;
}
if ((start_summary_bit == bitmap_ctx->last_free_summary_bit) && set)
start_summary_bit++;
for (uint32_t summary_bit = start_summary_bit; summary_bit <= end_summary_bit; summary_bit++)
hfs_set_summary (bitmap_ctx->hfsmp, summary_bit, set);
if (!set)
bitmap_ctx->last_free_summary_bit = end_summary_bit;
out:
return error;
}
#endif //!HFS_ALLOC_TEST
static errno_t hfs_bit_count(bitmap_context_t *bitmap_ctx, int (*fn)(void *, int ,int), uint32_t *bit_count)
{
int count;
errno_t error = 0;
*bit_count = 0;
do {
if (bitmap_ctx->run_offset == 0 || bitmap_ctx->chunk_current == bitmap_ctx->chunk_end) {
if ((error = get_more_bits(bitmap_ctx)) != 0)
goto out;
}
if (bitmap_ctx->chunk_end == 0)
break;
count = fn(bitmap_ctx->bitmap, bitmap_ctx->chunk_current, bitmap_ctx->chunk_end);
bitmap_ctx->run_offset += count;
bitmap_ctx->chunk_current += count;
*bit_count += count;
} while (bitmap_ctx->chunk_current >= bitmap_ctx->chunk_end && count);
out:
return error;
}
static errno_t hfs_bit_count_clr(bitmap_context_t *bitmap_ctx, uint32_t *count)
{
return hfs_bit_count(bitmap_ctx, bit_count_clr, count);
}
static errno_t hfs_bit_count_set(bitmap_context_t *bitmap_ctx, uint32_t *count)
{
return hfs_bit_count(bitmap_ctx, bit_count_set, count);
}
static uint32_t hfs_bit_offset(bitmap_context_t *bitmap_ctx)
{
return bitmap_ctx->run_offset;
}
errno_t hfs_find_free_extents(struct hfsmount *hfsmp,
void (*callback)(void *data, off_t free_extent_size), void *callback_arg)
{
struct bitmap_context bitmap_ctx;
uint32_t count;
errno_t error = 0;
if ((hfsmp->hfs_flags & HFS_SUMMARY_TABLE) == 0) {
error = hfs_init_summary(hfsmp);
if (error)
return error;
}
bzero(&bitmap_ctx, sizeof(struct bitmap_context));
hfs_journal_lock(hfsmp);
error = hfs_flush(hfsmp, HFS_FLUSH_JOURNAL_META);
if (error) {
hfs_journal_unlock(hfsmp);
return error;
}
bitmap_ctx.lockflags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
#if !HFS_ALLOC_TEST
bitmap_ctx.lock_start = mach_absolute_time();
#endif
hfs_journal_unlock(hfsmp);
error = buf_invalidateblks(hfsmp->hfs_allocation_vp, 0, 0, 0);
if (error)
goto out;
bitmap_ctx.hfsmp = hfsmp;
bitmap_ctx.run_offset = 0;
while (bitmap_ctx.run_offset < hfsmp->totalBlocks) {
uint32_t start = hfs_bit_offset(&bitmap_ctx);
if ((error = hfs_bit_count_clr(&bitmap_ctx, &count)) != 0)
goto out;
if (count)
callback(callback_arg, hfs_blk_to_bytes(count, hfsmp->blockSize));
if ((error = update_summary_table(&bitmap_ctx, start, count, false)) != 0)
goto out;
start = hfs_bit_offset(&bitmap_ctx);
if ((error = hfs_bit_count_set(&bitmap_ctx, &count)) != 0)
goto out;
if ((error = update_summary_table(&bitmap_ctx, start, count, true)) != 0)
goto out;
}
out:
if (bitmap_ctx.lockflags) {
hfs_systemfile_unlock(hfsmp, bitmap_ctx.lockflags);
}
return error;
}