IOMultiMemoryDescriptor.cpp [plain text]
#include <IOKit/IOLib.h>
#include <IOKit/IOMultiMemoryDescriptor.h>
#define super IOMemoryDescriptor
OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor)
IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors(
IOMemoryDescriptor ** descriptors,
UInt32 withCount,
IODirection withDirection,
bool asReference )
{
IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor;
if ( me && me->initWithDescriptors(
descriptors,
withCount,
withDirection,
asReference ) == false )
{
me->release();
me = 0;
}
return me;
}
bool IOMultiMemoryDescriptor::initWithDescriptors(
IOMemoryDescriptor ** descriptors,
UInt32 withCount,
IODirection withDirection,
bool asReference )
{
unsigned index;
IOOptionBits copyFlags;
assert(descriptors);
if ( _descriptors )
{
for ( unsigned index = 0; index < _descriptorsCount; index++ )
_descriptors[index]->release();
if ( _descriptorsIsAllocated )
IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
} else {
if ( super::init() == false ) return false;
}
_descriptors = 0;
_descriptorsCount = withCount;
_descriptorsIsAllocated = asReference ? false : true;
_flags = withDirection;
#ifndef __LP64__
_direction = (IODirection) (_flags & kIOMemoryDirectionMask);
#endif
_length = 0;
_mappings = 0;
_tag = 0;
if ( asReference )
{
_descriptors = descriptors;
}
else
{
_descriptors = IONew(IOMemoryDescriptor *, withCount);
if ( _descriptors == 0 ) return false;
bcopy( descriptors,
_descriptors,
withCount * sizeof(IOMemoryDescriptor *) );
}
for ( index = 0; index < withCount; index++ )
{
descriptors[index]->retain();
_length += descriptors[index]->getLength();
if ( _tag == 0 ) _tag = descriptors[index]->getTag();
assert(descriptors[index]->getDirection() ==
(withDirection & kIOMemoryDirectionMask));
}
enum { kCopyFlags = kIOMemoryBufferPageable };
copyFlags = 0;
for ( index = 0; index < withCount; index++ )
{
if (!index) copyFlags = (kCopyFlags & descriptors[index]->_flags);
else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) break;
}
if (index < withCount) return (false);
_flags |= copyFlags;
return true;
}
void IOMultiMemoryDescriptor::free()
{
if ( _descriptors )
{
for ( unsigned index = 0; index < _descriptorsCount; index++ )
_descriptors[index]->release();
if ( _descriptorsIsAllocated )
IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
}
super::free();
}
IOReturn IOMultiMemoryDescriptor::prepare(IODirection forDirection)
{
unsigned index;
IOReturn status = kIOReturnInternalError;
IOReturn statusUndo;
if ( forDirection == kIODirectionNone )
{
forDirection = getDirection();
}
for ( index = 0; index < _descriptorsCount; index++ )
{
status = _descriptors[index]->prepare(forDirection);
if ( status != kIOReturnSuccess ) break;
}
if ( status != kIOReturnSuccess )
{
for ( unsigned indexUndo = 0; indexUndo < index; indexUndo++ )
{
statusUndo = _descriptors[indexUndo]->complete(forDirection);
assert(statusUndo == kIOReturnSuccess);
}
}
return status;
}
IOReturn IOMultiMemoryDescriptor::complete(IODirection forDirection)
{
IOReturn status;
IOReturn statusFinal = kIOReturnSuccess;
if ( forDirection == kIODirectionNone )
{
forDirection = getDirection();
}
for ( unsigned index = 0; index < _descriptorsCount; index++ )
{
status = _descriptors[index]->complete(forDirection);
if ( status != kIOReturnSuccess ) statusFinal = status;
assert(status == kIOReturnSuccess);
}
return statusFinal;
}
addr64_t IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
IOByteCount * length,
IOOptionBits options)
{
assert(offset <= _length);
for ( unsigned index = 0; index < _descriptorsCount; index++ )
{
if ( offset < _descriptors[index]->getLength() )
{
return _descriptors[index]->getPhysicalSegment(offset, length, options);
}
offset -= _descriptors[index]->getLength();
}
if ( length ) *length = 0;
return 0;
}
#include "IOKitKernelInternal.h"
IOReturn IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap,
IOVirtualAddress * __address,
IOOptionBits options,
IOByteCount __offset,
IOByteCount __length)
{
IOMemoryMap * mapping = (IOMemoryMap *) *__address;
vm_map_t map = mapping->fAddressMap;
mach_vm_size_t offset = mapping->fOffset;
mach_vm_size_t length = mapping->fLength;
mach_vm_address_t address = mapping->fAddress;
kern_return_t err;
IOOptionBits subOptions;
mach_vm_size_t mapOffset;
mach_vm_size_t bytesRemaining, chunk;
mach_vm_address_t nextAddress;
IOMemoryDescriptorMapAllocRef ref;
vm_prot_t prot;
do
{
prot = VM_PROT_READ;
if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
if (kIOMapOverwrite & options)
{
if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
{
map = IOPageableMapForAddress(address);
}
err = KERN_SUCCESS;
}
else
{
ref.map = map;
ref.tag = IOMemoryTag(map);
ref.options = options;
ref.size = length;
ref.prot = prot;
if (options & kIOMapAnywhere)
ref.mapped = 0;
else
ref.mapped = mapping->fAddress;
if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
else
err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
if (KERN_SUCCESS != err) break;
address = ref.mapped;
mapping->fAddress = address;
}
mapOffset = offset;
bytesRemaining = length;
nextAddress = address;
assert(mapOffset <= _length);
subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite;
for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++)
{
chunk = _descriptors[index]->getLength();
if (mapOffset >= chunk)
{
mapOffset -= chunk;
continue;
}
chunk -= mapOffset;
if (chunk > bytesRemaining) chunk = bytesRemaining;
IOMemoryMap * subMap;
subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk );
if (!subMap) break;
subMap->release();
bytesRemaining -= chunk;
nextAddress += chunk;
mapOffset = 0;
}
if (bytesRemaining) err = kIOReturnUnderrun;
}
while (false);
if (kIOReturnSuccess == err)
{
#if IOTRACKING
IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
#endif
}
return (err);
}
IOReturn IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState,
IOOptionBits * oldState )
{
IOReturn err;
IOOptionBits totalState, state;
totalState = kIOMemoryPurgeableNonVolatile;
err = kIOReturnSuccess;
for (unsigned index = 0; index < _descriptorsCount; index++)
{
err = _descriptors[index]->setPurgeable(newState, &state);
if (kIOReturnSuccess != err) break;
if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
else if (kIOMemoryPurgeableEmpty == totalState) continue;
else if (kIOMemoryPurgeableVolatile == totalState) continue;
else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
else totalState = kIOMemoryPurgeableNonVolatile;
}
if (oldState) *oldState = totalState;
return (err);
}
IOReturn IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount,
IOByteCount * pDirtyPageCount)
{
IOReturn err;
IOByteCount totalResidentPageCount, totalDirtyPageCount;
IOByteCount residentPageCount, dirtyPageCount;
err = kIOReturnSuccess;
totalResidentPageCount = totalDirtyPageCount = 0;
for (unsigned index = 0; index < _descriptorsCount; index++)
{
err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount);
if (kIOReturnSuccess != err) break;
totalResidentPageCount += residentPageCount;
totalDirtyPageCount += dirtyPageCount;
}
if (pResidentPageCount) *pResidentPageCount = totalResidentPageCount;
if (pDirtyPageCount) *pDirtyPageCount = totalDirtyPageCount;
return (err);
}