diff --git a/src/LibHac/FsSystem/AlignmentMatchingStorage.cs b/src/LibHac/FsSystem/AlignmentMatchingStorage.cs new file mode 100644 index 00000000..a1aa15ff --- /dev/null +++ b/src/LibHac/FsSystem/AlignmentMatchingStorage.cs @@ -0,0 +1,518 @@ +using System; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using LibHac.Common; +using LibHac.Diag; +using LibHac.Fs; +using LibHac.Util; + +namespace LibHac.FsSystem; + +public interface IAlignmentMatchingStorageSize { } + +[StructLayout(LayoutKind.Sequential, Size = 1)] public struct AlignmentMatchingStorageSize1 : IAlignmentMatchingStorageSize { } +[StructLayout(LayoutKind.Sequential, Size = 16)] public struct AlignmentMatchingStorageSize16 : IAlignmentMatchingStorageSize { } +[StructLayout(LayoutKind.Sequential, Size = 512)] public struct AlignmentMatchingStorageSize512 : IAlignmentMatchingStorageSize { } + +/// +/// Handles accessing a base that must always be accessed via an aligned offset and size. +/// +/// The alignment of all accesses made to the base storage. +/// Must be a power of 2 that is less than or equal to 0x200. +/// The alignment of the destination buffer for the core read. Must be a power of 2. +/// This class uses a work buffer on the stack to avoid allocations. Because of this the data alignment +/// must be kept small; no larger than 0x200. The class +/// should be used for data alignment sizes larger than this. +/// Based on FS 13.1.0 (nnSdk 13.4.0) +[SkipLocalsInit] +public class AlignmentMatchingStorage : IStorage + where TDataAlignment : struct, IAlignmentMatchingStorageSize + where TBufferAlignment : struct, IAlignmentMatchingStorageSize +{ + public static uint DataAlign => (uint)Unsafe.SizeOf(); + public static uint BufferAlign => (uint)Unsafe.SizeOf(); + + public static uint DataAlignMax => 0x200; + + private static void VerifyTypeParameters() + { + Abort.DoAbortUnless(DataAlign <= DataAlignMax); + Abort.DoAbortUnless(BitUtil.IsPowerOfTwo(DataAlign)); + Abort.DoAbortUnless(BitUtil.IsPowerOfTwo(BufferAlign)); + } + + private IStorage _baseStorage; + private long _baseStorageSize; + private bool _isBaseStorageSizeDirty; + private SharedRef _sharedBaseStorage; + + public AlignmentMatchingStorage(ref SharedRef baseStorage) + { + VerifyTypeParameters(); + + _baseStorage = baseStorage.Get; + _isBaseStorageSizeDirty = true; + _sharedBaseStorage = SharedRef.CreateMove(ref baseStorage); + } + + public AlignmentMatchingStorage(IStorage baseStorage) + { + VerifyTypeParameters(); + + _baseStorage = baseStorage; + _isBaseStorageSizeDirty = true; + } + + public override void Dispose() + { + _sharedBaseStorage.Destroy(); + + base.Dispose(); + } + + public override Result Read(long offset, Span destination) + { + Span workBuffer = stackalloc byte[(int)DataAlign]; + + if (destination.Length == 0) + return Result.Success; + + Result rc = GetSize(out long totalSize); + if (rc.IsFailure()) return rc.Miss(); + + if (!CheckAccessRange(offset, destination.Length, totalSize)) + return ResultFs.OutOfRange.Log(); + + return AlignmentMatchingStorageImpl.Read(_baseStorage, workBuffer, DataAlign, BufferAlign, offset, destination); + } + + public override Result Write(long offset, ReadOnlySpan source) + { + Span workBuffer = stackalloc byte[(int)DataAlign]; + + if (source.Length == 0) + return Result.Success; + + Result rc = GetSize(out long totalSize); + if (rc.IsFailure()) return rc.Miss(); + + if (!CheckAccessRange(offset, source.Length, totalSize)) + return ResultFs.OutOfRange.Log(); + + return AlignmentMatchingStorageImpl.Write(_baseStorage, workBuffer, DataAlign, BufferAlign, offset, source); + } + + public override Result Flush() + { + return _baseStorage.Flush(); + } + + public override Result SetSize(long size) + { + Result rc = _baseStorage.SetSize(Alignment.AlignUpPow2(size, DataAlign)); + _isBaseStorageSizeDirty = true; + + return rc; + } + + public override Result GetSize(out long size) + { + UnsafeHelpers.SkipParamInit(out size); + + if (_isBaseStorageSizeDirty) + { + Result rc = _baseStorage.GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + _baseStorageSize = baseStorageSize; + _isBaseStorageSizeDirty = false; + } + + size = _baseStorageSize; + return Result.Success; + } + + public override Result OperateRange(Span outBuffer, OperationId operationId, long offset, long size, + ReadOnlySpan inBuffer) + { + if (operationId == OperationId.InvalidateCache) + { + return _baseStorage.OperateRange(OperationId.InvalidateCache, offset, size); + } + + if (size == 0) + return Result.Success; + + Result rc = GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + if (!CheckOffsetAndSize(offset, size)) + return ResultFs.OutOfRange.Log(); + + long validSize = Math.Min(size, baseStorageSize - offset); + long alignedOffset = Alignment.AlignDownPow2(offset, DataAlign); + long alignedOffsetEnd = Alignment.AlignUpPow2(offset + validSize, DataAlign); + long alignedSize = alignedOffsetEnd - alignedOffset; + + return _baseStorage.OperateRange(outBuffer, operationId, alignedOffset, alignedSize, inBuffer); + } +} + +/// +/// Handles accessing a base that must always be accessed via an aligned offset and size. +/// +/// The alignment of the destination buffer for the core read. Must be a power of 2. +/// On every access this class allocates a work buffer that is used for handling any partial blocks at +/// the beginning or end of the requested range. For data alignment sizes of 0x200 or smaller +/// should be used instead +/// to avoid these allocations. +/// Based on FS 13.1.0 (nnSdk 13.4.0) +public class AlignmentMatchingStoragePooledBuffer : IStorage + where TBufferAlignment : struct, IAlignmentMatchingStorageSize +{ + public static uint BufferAlign => (uint)Unsafe.SizeOf(); + + private IStorage _baseStorage; + private long _baseStorageSize; + private uint _dataAlignment; + private bool _isBaseStorageSizeDirty; + + // LibHac addition: This field goes unused if initialized with a plain IStorage. + // The original class uses a template for both the shared and non-shared IStorage which avoids needing this field. + private SharedRef _sharedBaseStorage; + + public AlignmentMatchingStoragePooledBuffer(IStorage baseStorage, int dataAlign) + { + Abort.DoAbortUnless(BitUtil.IsPowerOfTwo(BufferAlign)); + + _baseStorage = baseStorage; + _dataAlignment = (uint)dataAlign; + _isBaseStorageSizeDirty = true; + + Assert.SdkRequires(BitUtil.IsPowerOfTwo(dataAlign), "DataAlign must be a power of 2."); + } + + public AlignmentMatchingStoragePooledBuffer(in SharedRef baseStorage, int dataAlign) + { + Abort.DoAbortUnless(BitUtil.IsPowerOfTwo(BufferAlign)); + + _baseStorage = baseStorage.Get; + _dataAlignment = (uint)dataAlign; + _isBaseStorageSizeDirty = true; + + Assert.SdkRequires(BitUtil.IsPowerOfTwo(dataAlign), "DataAlign must be a power of 2."); + + _sharedBaseStorage = SharedRef.CreateCopy(in baseStorage); + } + + public override void Dispose() + { + _sharedBaseStorage.Destroy(); + + base.Dispose(); + } + + public override Result Read(long offset, Span destination) + { + if (destination.Length == 0) + return Result.Success; + + Result rc = GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + if (!CheckAccessRange(offset, destination.Length, baseStorageSize)) + return ResultFs.OutOfRange.Log(); + + using var pooledBuffer = new PooledBuffer(); + pooledBuffer.AllocateParticularlyLarge((int)_dataAlignment, (int)_dataAlignment); + + return AlignmentMatchingStorageImpl.Read(_baseStorage, pooledBuffer.GetBuffer(), _dataAlignment, BufferAlign, + offset, destination); + } + + public override Result Write(long offset, ReadOnlySpan source) + { + if (source.Length == 0) + return Result.Success; + + Result rc = GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + if (!CheckAccessRange(offset, source.Length, baseStorageSize)) + return ResultFs.OutOfRange.Log(); + + using var pooledBuffer = new PooledBuffer(); + pooledBuffer.AllocateParticularlyLarge((int)_dataAlignment, (int)_dataAlignment); + + return AlignmentMatchingStorageImpl.Write(_baseStorage, pooledBuffer.GetBuffer(), _dataAlignment, BufferAlign, + offset, source); + } + + public override Result Flush() + { + return _baseStorage.Flush(); + } + + public override Result SetSize(long size) + { + Result rc = _baseStorage.SetSize(Alignment.AlignUpPow2(size, _dataAlignment)); + _isBaseStorageSizeDirty = true; + + return rc; + } + + public override Result GetSize(out long size) + { + UnsafeHelpers.SkipParamInit(out size); + + if (_isBaseStorageSizeDirty) + { + Result rc = _baseStorage.GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + _isBaseStorageSizeDirty = false; + _baseStorageSize = baseStorageSize; + } + + size = _baseStorageSize; + return Result.Success; + } + + public override Result OperateRange(Span outBuffer, OperationId operationId, long offset, long size, + ReadOnlySpan inBuffer) + { + if (operationId == OperationId.InvalidateCache) + { + return _baseStorage.OperateRange(OperationId.InvalidateCache, offset, size); + } + + if (size == 0) + return Result.Success; + + Result rc = GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + if (!CheckOffsetAndSize(offset, size)) + return ResultFs.OutOfRange.Log(); + + long validSize = Math.Min(size, baseStorageSize - offset); + long alignedOffset = Alignment.AlignDownPow2(offset, _dataAlignment); + long alignedOffsetEnd = Alignment.AlignUpPow2(offset + validSize, _dataAlignment); + long alignedSize = alignedOffsetEnd - alignedOffset; + + return _baseStorage.OperateRange(outBuffer, operationId, alignedOffset, alignedSize, inBuffer); + } +} + +/// +/// Handles accessing a base that must always be accessed via an aligned offset and size. +/// +/// The alignment of the destination buffer for the core read. Must be a power of 2. +/// This class is basically the same as except +/// it doesn't allocate a work buffer for reads that are already aligned, and it ignores the buffer alignment for reads. +/// Based on FS 13.1.0 (nnSdk 13.4.0) +public class AlignmentMatchingStorageInBulkRead : IStorage + where TBufferAlignment : struct, IAlignmentMatchingStorageSize +{ + public static uint BufferAlign => (uint)Unsafe.SizeOf(); + + private IStorage _baseStorage; + private SharedRef _sharedBaseStorage; + private long _baseStorageSize; + private uint _dataAlignment; + + public AlignmentMatchingStorageInBulkRead(IStorage baseStorage, int dataAlignment) + { + Abort.DoAbortUnless(BitUtil.IsPowerOfTwo(BufferAlign)); + + _baseStorage = baseStorage; + _baseStorageSize = -1; + _dataAlignment = (uint)dataAlignment; + + Assert.SdkRequires(BitUtil.IsPowerOfTwo(dataAlignment)); + } + + public AlignmentMatchingStorageInBulkRead(in SharedRef baseStorage, int dataAlignment) + { + Abort.DoAbortUnless(BitUtil.IsPowerOfTwo(BufferAlign)); + + _baseStorage = baseStorage.Get; + _baseStorageSize = -1; + _dataAlignment = (uint)dataAlignment; + + Assert.SdkRequires(BitUtil.IsPowerOfTwo(dataAlignment)); + + _sharedBaseStorage = SharedRef.CreateCopy(in baseStorage); + } + + public override void Dispose() + { + _sharedBaseStorage.Destroy(); + + base.Dispose(); + } + + // The original template doesn't define this function, requiring a specialized function for each TBufferAlignment used. + // The only buffer alignment used by that template is 1, so we use that specialization for our Read method. + public override Result Read(long offset, Span destination) + { + if (destination.Length == 0) + return Result.Success; + + Result rc = GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + if (!CheckAccessRange(offset, destination.Length, baseStorageSize)) + return ResultFs.OutOfRange.Log(); + + // Calculate the aligned offsets of the requested region. + long offsetEnd = offset + destination.Length; + long alignedOffset = Alignment.AlignDownPow2(offset, _dataAlignment); + long alignedOffsetEnd = Alignment.AlignUpPow2(offsetEnd, _dataAlignment); + long alignedSize = alignedOffsetEnd - alignedOffset; + + using var pooledBuffer = new PooledBuffer(); + + // If we aren't aligned we need to allocate a buffer. + if (alignedOffset != offset || alignedSize != destination.Length) + { + if (alignedSize <= PooledBuffer.GetAllocatableSizeMax()) + { + // Try to allocate a buffer that will fit the entire aligned read. + pooledBuffer.Allocate((int)alignedSize, (int)_dataAlignment); + + // If we were able to get a buffer that fits the entire aligned read then read it + // into the buffer and copy the unaligned portion to the destination buffer. + if (alignedSize <= pooledBuffer.GetSize()) + { + rc = _baseStorage.Read(alignedOffset, pooledBuffer.GetBuffer().Slice(0, (int)alignedSize)); + if (rc.IsFailure()) return rc.Miss(); + + pooledBuffer.GetBuffer().Slice((int)(offset - alignedOffset), destination.Length) + .CopyTo(destination); + + return Result.Success; + } + + // We couldn't get as large a buffer as we wanted. + // Shrink the buffer since we only need a single block. + pooledBuffer.Shrink((int)_dataAlignment); + } + else + { + // The requested read is larger than we can allocate, so only allocate a single block. + pooledBuffer.Allocate((int)_dataAlignment, (int)_dataAlignment); + } + } + + // Determine read extents for the aligned portion. + long coreOffset = Alignment.AlignUpPow2(offset, _dataAlignment); + long coreOffsetEnd = Alignment.AlignDownPow2(offsetEnd, _dataAlignment); + + // Handle any data before the aligned portion. + if (offset < coreOffset) + { + int headSize = (int)(coreOffset - offset); + Assert.SdkLess(headSize, destination.Length); + + rc = _baseStorage.Read(alignedOffset, pooledBuffer.GetBuffer().Slice(0, (int)_dataAlignment)); + if (rc.IsFailure()) return rc.Miss(); + + pooledBuffer.GetBuffer().Slice((int)(offset - alignedOffset), headSize).CopyTo(destination); + } + + // Handle the aligned portion. + if (coreOffset < coreOffsetEnd) + { + int coreSize = (int)(coreOffsetEnd - coreOffset); + Span coreBuffer = destination.Slice((int)(coreOffset - offset), coreSize); + + rc = _baseStorage.Read(coreOffset, coreBuffer); + if (rc.IsFailure()) return rc.Miss(); + } + + // Handle any data after the aligned portion. + if (coreOffsetEnd < offsetEnd) + { + int tailSize = (int)(offsetEnd - coreOffsetEnd); + + rc = _baseStorage.Read(coreOffsetEnd, pooledBuffer.GetBuffer().Slice(0, (int)_dataAlignment)); + if (rc.IsFailure()) return rc.Miss(); + + pooledBuffer.GetBuffer().Slice(0, tailSize).CopyTo(destination.Slice((int)(coreOffsetEnd - offset))); + } + + return Result.Success; + } + + public override Result Write(long offset, ReadOnlySpan source) + { + if (source.Length == 0) + return Result.Success; + + Result rc = GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + if (!CheckAccessRange(offset, source.Length, baseStorageSize)) + return ResultFs.OutOfRange.Log(); + + using var pooledBuffer = new PooledBuffer((int)_dataAlignment, (int)_dataAlignment); + + return AlignmentMatchingStorageImpl.Write(_baseStorage, pooledBuffer.GetBuffer(), _dataAlignment, BufferAlign, + offset, source); + } + + public override Result Flush() + { + return _baseStorage.Flush(); + } + + public override Result SetSize(long size) + { + Result rc = _baseStorage.SetSize(Alignment.AlignUpPow2(size, _dataAlignment)); + _baseStorageSize = -1; + + return rc; + } + + public override Result GetSize(out long size) + { + UnsafeHelpers.SkipParamInit(out size); + + if (_baseStorageSize < 0) + { + Result rc = _baseStorage.GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + _baseStorageSize = baseStorageSize; + } + + size = _baseStorageSize; + return Result.Success; + } + + public override Result OperateRange(Span outBuffer, OperationId operationId, long offset, long size, + ReadOnlySpan inBuffer) + { + if (operationId == OperationId.InvalidateCache) + { + return _baseStorage.OperateRange(OperationId.InvalidateCache, offset, size); + } + + if (size == 0) + return Result.Success; + + Result rc = GetSize(out long baseStorageSize); + if (rc.IsFailure()) return rc.Miss(); + + if (!CheckOffsetAndSize(offset, size)) + return ResultFs.OutOfRange.Log(); + + long validSize = Math.Min(size, baseStorageSize - offset); + long alignedOffset = Alignment.AlignDownPow2(offset, _dataAlignment); + long alignedOffsetEnd = Alignment.AlignUpPow2(offset + validSize, _dataAlignment); + long alignedSize = alignedOffsetEnd - alignedOffset; + + return _baseStorage.OperateRange(outBuffer, operationId, alignedOffset, alignedSize, inBuffer); + } +} \ No newline at end of file diff --git a/src/LibHac/FsSystem/AlignmentMatchingStorageImpl.cs b/src/LibHac/FsSystem/AlignmentMatchingStorageImpl.cs new file mode 100644 index 00000000..33b1f868 --- /dev/null +++ b/src/LibHac/FsSystem/AlignmentMatchingStorageImpl.cs @@ -0,0 +1,191 @@ +using System; +using LibHac.Common; +using LibHac.Diag; +using LibHac.Fs; +using LibHac.Util; + +namespace LibHac.FsSystem; + +/// +/// Contains the functions used by classes like for +/// accessing an aligned . +/// +/// Based on FS 13.1.0 (nnSdk 13.4.0) +public static class AlignmentMatchingStorageImpl +{ + public static uint GetRoundDownDifference(int value, uint alignment) + { + return (uint)(value - Alignment.AlignDownPow2(value, alignment)); + } + + public static uint GetRoundDownDifference(long value, uint alignment) + { + return (uint)(value - Alignment.AlignDownPow2(value, alignment)); + } + + public static uint GetRoundUpDifference(int value, uint alignment) + { + return (uint)(Alignment.AlignUpPow2(value, alignment) - value); + } + + private static uint GetRoundUpDifference(long value, uint alignment) + { + return (uint)(Alignment.AlignUpPow2(value, alignment) - value); + } + + public static Result Read(in SharedRef storage, Span workBuffer, uint dataAlignment, + uint bufferAlignment, long offset, Span destination) + { + return Read(storage.Get, workBuffer, dataAlignment, bufferAlignment, offset, destination); + } + + public static Result Write(in SharedRef storage, Span subBuffer, uint dataAlignment, + uint bufferAlignment, long offset, ReadOnlySpan source) + { + return Write(storage.Get, subBuffer, dataAlignment, bufferAlignment, offset, source); + } + + public static Result Read(IStorage storage, Span workBuffer, uint dataAlignment, uint bufferAlignment, + long offset, Span destination) + { + // We don't support buffer alignment because Nintendo never uses any alignment other than 1, and because + // we'd have to mess with pinning the buffer. + Abort.DoAbortUnless(bufferAlignment == 1); + + Assert.SdkRequiresGreaterEqual((uint)workBuffer.Length, dataAlignment); + + if (destination.Length == 0) + return Result.Success; + + // Calculate the range that contains only full data blocks. + uint offsetRoundUpDifference = GetRoundUpDifference(offset, dataAlignment); + + long coreOffset = Alignment.AlignUpPow2(offset, dataAlignment); + long coreSize = destination.Length < offsetRoundUpDifference + ? 0 + : Alignment.AlignDownPow2(destination.Length - offsetRoundUpDifference, dataAlignment); + + long coveredOffset = coreSize > 0 ? coreOffset : offset; + + // Read the core portion that doesn't contain any partial blocks. + if (coreSize > 0) + { + Result rc = storage.Read(coreOffset, destination.Slice((int)offsetRoundUpDifference, (int)coreSize)); + if (rc.IsFailure()) return rc.Miss(); + } + + // Read any partial block at the head of the requested range + if (offset < coveredOffset) + { + long headOffset = Alignment.AlignDownPow2(offset, dataAlignment); + int headSize = (int)(coveredOffset - offset); + + Assert.SdkAssert(GetRoundDownDifference(offset, dataAlignment) + headSize <= workBuffer.Length); + + Result rc = storage.Read(headOffset, workBuffer.Slice(0, (int)dataAlignment)); + if (rc.IsFailure()) return rc.Miss(); + + workBuffer.Slice((int)GetRoundDownDifference(offset, dataAlignment), headSize).CopyTo(destination); + } + + long tailOffset = coveredOffset + coreSize; + long remainingTailSize = offset + destination.Length - tailOffset; + + // Read any partial block at the tail of the requested range + while (remainingTailSize > 0) + { + long alignedTailOffset = Alignment.AlignDownPow2(tailOffset, dataAlignment); + long copySize = Math.Min(alignedTailOffset + dataAlignment - tailOffset, remainingTailSize); + + Result rc = storage.Read(alignedTailOffset, workBuffer.Slice(0, (int)dataAlignment)); + if (rc.IsFailure()) return rc.Miss(); + + Assert.SdkAssert(tailOffset - offset + copySize <= destination.Length); + Assert.SdkAssert(tailOffset - alignedTailOffset + copySize <= dataAlignment); + workBuffer.Slice((int)(tailOffset - alignedTailOffset), (int)copySize) + .CopyTo(destination.Slice((int)(tailOffset - offset))); + + remainingTailSize -= copySize; + tailOffset += copySize; + } + + return Result.Success; + } + + public static Result Write(IStorage storage, Span workBuffer, uint dataAlignment, uint bufferAlignment, + long offset, ReadOnlySpan source) + { + // We don't support buffer alignment because Nintendo never uses any alignment other than 1, and because + // we'd have to mess with pinning the buffer. + Abort.DoAbortUnless(bufferAlignment == 1); + + Assert.SdkRequiresGreaterEqual((uint)workBuffer.Length, dataAlignment); + + if (source.Length == 0) + return Result.Success; + + // Calculate the range that contains only full data blocks. + uint offsetRoundUpDifference = GetRoundUpDifference(offset, dataAlignment); + + long coreOffset = Alignment.AlignUpPow2(offset, dataAlignment); + long coreSize = source.Length < offsetRoundUpDifference + ? 0 + : Alignment.AlignDownPow2(source.Length - offsetRoundUpDifference, dataAlignment); + + long coveredOffset = coreSize > 0 ? coreOffset : offset; + + // Write the core portion that doesn't contain any partial blocks. + if (coreSize > 0) + { + Result rc = storage.Write(coreOffset, source.Slice((int)offsetRoundUpDifference, (int)coreSize)); + if (rc.IsFailure()) return rc.Miss(); + } + + // Write any partial block at the head of the specified range + if (offset < coveredOffset) + { + long headOffset = Alignment.AlignDownPow2(offset, dataAlignment); + int headSize = (int)(coveredOffset - offset); + + Assert.SdkAssert((offset - headOffset) + headSize <= workBuffer.Length); + + // Read the existing block, copy the partial block to the appropriate portion, + // and write the modified block back to the base storage. + Result rc = storage.Read(headOffset, workBuffer.Slice(0, (int)dataAlignment)); + if (rc.IsFailure()) return rc.Miss(); + + source.Slice(0, headSize).CopyTo(workBuffer.Slice((int)(offset - headOffset))); + + rc = storage.Write(headOffset, workBuffer.Slice(0, (int)dataAlignment)); + if (rc.IsFailure()) return rc.Miss(); + } + + long tailOffset = coveredOffset + coreSize; + long remainingTailSize = offset + source.Length - tailOffset; + + // Write any partial block at the tail of the specified range + while (remainingTailSize > 0) + { + Assert.SdkAssert(tailOffset - offset < source.Length); + + long alignedTailOffset = Alignment.AlignDownPow2(tailOffset, dataAlignment); + long copySize = Math.Min(alignedTailOffset + dataAlignment - tailOffset, remainingTailSize); + + // Read the existing block, copy the partial block to the appropriate portion, + // and write the modified block back to the base storage. + Result rc = storage.Read(alignedTailOffset, workBuffer.Slice(0, (int)dataAlignment)); + if (rc.IsFailure()) return rc.Miss(); + + source.Slice((int)(tailOffset - offset), (int)copySize) + .CopyTo(workBuffer.Slice((int)GetRoundDownDifference(tailOffset, dataAlignment))); + + rc = storage.Write(alignedTailOffset, workBuffer.Slice(0, (int)dataAlignment)); + if (rc.IsFailure()) return rc.Miss(); + + remainingTailSize -= copySize; + tailOffset += copySize; + } + + return Result.Success; + } +} \ No newline at end of file