mirror of
https://github.com/Thealexbarney/LibHac.git
synced 2024-11-14 10:49:41 +01:00
Merge pull request #147 from Thealexbarney/bucket-tree
Add more accurate BucketTree and IndirectStorage classes to replace the old ones. Add a bucket tree builder.
This commit is contained in:
commit
d3c95d14d3
11 changed files with 1506 additions and 203 deletions
|
@ -50,7 +50,24 @@ Module,DescriptionStart,DescriptionEnd,Name,Summary
|
|||
|
||||
2,4000,4999,DataCorrupted,
|
||||
2,4001,4299,RomCorrupted,
|
||||
2,4023,,InvalidIndirectStorageSource,
|
||||
|
||||
2,4021,4029,IndirectStorageCorrupted,
|
||||
2,4022,,InvalidIndirectEntryOffset,
|
||||
2,4023,,InvalidIndirectEntryStorageIndex,
|
||||
2,4024,,InvalidIndirectStorageSize,
|
||||
2,4025,,InvalidIndirectVirtualOffset,
|
||||
2,4026,,InvalidIndirectPhysicalOffset,
|
||||
2,4027,,InvalidIndirectStorageIndex,
|
||||
|
||||
2,4031,4039,BucketTreeCorrupted,
|
||||
2,4032,,InvalidBucketTreeSignature,
|
||||
2,4033,,InvalidBucketTreeEntryCount,
|
||||
2,4034,,InvalidBucketTreeNodeEntryCount,
|
||||
2,4035,,InvalidBucketTreeNodeOffset,
|
||||
2,4036,,InvalidBucketTreeEntryOffset,
|
||||
2,4037,,InvalidBucketTreeEntrySetOffset,
|
||||
2,4038,,InvalidBucketTreeNodeIndex,
|
||||
2,4039,,InvalidBucketTreeVirtualOffset,
|
||||
|
||||
2,4241,4259,RomHostFileSystemCorrupted,
|
||||
2,4242,,RomHostEntryCorrupted,
|
||||
|
@ -220,6 +237,7 @@ Module,DescriptionStart,DescriptionEnd,Name,Summary
|
|||
2,6606,,TargetProgramIndexNotFound,Specified program index is not found
|
||||
|
||||
2,6700,6799,OutOfResource,
|
||||
2,6705,,BufferAllocationFailed,
|
||||
2,6706,,MappingTableFull,
|
||||
2,6707,,AllocationTableInsufficientFreeBlocks,
|
||||
2,6709,,OpenCountLimit,
|
||||
|
|
Can't render this file because it has a wrong number of fields in line 153.
|
|
@ -57,5 +57,34 @@ namespace LibHac.Common
|
|||
{
|
||||
return CreateReadOnlySpan(ref Unsafe.As<T, byte>(ref reference), Unsafe.SizeOf<T>());
|
||||
}
|
||||
|
||||
// All AsStruct methods do bounds checks on the input
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ref T AsStruct<T>(Span<byte> span) where T : unmanaged
|
||||
{
|
||||
return ref MemoryMarshal.Cast<byte, T>(span)[0];
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ref readonly T AsReadOnlyStruct<T>(ReadOnlySpan<byte> span) where T : unmanaged
|
||||
{
|
||||
return ref MemoryMarshal.Cast<byte, T>(span)[0];
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ref TTo AsStruct<TFrom, TTo>(Span<TFrom> span)
|
||||
where TFrom : unmanaged
|
||||
where TTo : unmanaged
|
||||
{
|
||||
return ref MemoryMarshal.Cast<TFrom, TTo>(span)[0];
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static ref readonly TTo AsStruct<TFrom, TTo>(ReadOnlySpan<TFrom> span)
|
||||
where TFrom : unmanaged
|
||||
where TTo : unmanaged
|
||||
{
|
||||
return ref MemoryMarshal.Cast<TFrom, TTo>(span)[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,5 +18,29 @@ namespace LibHac.Diag
|
|||
|
||||
throw new LibHacException($"Assertion failed: {message}");
|
||||
}
|
||||
|
||||
[Conditional("DEBUG")]
|
||||
public static void NotNull<T>([NotNull] T item) where T : class
|
||||
{
|
||||
if (!(item is null))
|
||||
{
|
||||
throw new LibHacException("Not-null assertion failed.");
|
||||
}
|
||||
}
|
||||
|
||||
[Conditional("DEBUG")]
|
||||
public static void InRange(int value, int lowerInclusive, int upperExclusive)
|
||||
{
|
||||
InRange((long)value, lowerInclusive, upperExclusive);
|
||||
}
|
||||
|
||||
[Conditional("DEBUG")]
|
||||
public static void InRange(long value, long lowerInclusive, long upperExclusive)
|
||||
{
|
||||
if (value < lowerInclusive || value >= upperExclusive)
|
||||
{
|
||||
throw new LibHacException($"Value {value} is not in the range {lowerInclusive} to {upperExclusive}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -132,8 +132,8 @@ namespace LibHac.Fs
|
|||
protected abstract Result DoRead(long offset, Span<byte> destination);
|
||||
protected abstract Result DoWrite(long offset, ReadOnlySpan<byte> source);
|
||||
protected abstract Result DoFlush();
|
||||
protected abstract Result DoGetSize(out long size);
|
||||
protected abstract Result DoSetSize(long size);
|
||||
protected abstract Result DoGetSize(out long size);
|
||||
|
||||
protected virtual Result DoOperateRange(Span<byte> outBuffer, OperationId operationId, long offset, long size,
|
||||
ReadOnlySpan<byte> inBuffer)
|
||||
|
|
|
@ -114,8 +114,39 @@ namespace LibHac.Fs
|
|||
public static Result.Base DataCorrupted { [MethodImpl(MethodImplOptions.AggressiveInlining)] get => new Result.Base(ModuleFs, 4000, 4999); }
|
||||
/// <summary>Error code: 2002-4001; Range: 4001-4299; Inner value: 0x1f4202</summary>
|
||||
public static Result.Base RomCorrupted { [MethodImpl(MethodImplOptions.AggressiveInlining)] get => new Result.Base(ModuleFs, 4001, 4299); }
|
||||
/// <summary>Error code: 2002-4023; Inner value: 0x1f6e02</summary>
|
||||
public static Result.Base InvalidIndirectStorageSource => new Result.Base(ModuleFs, 4023);
|
||||
/// <summary>Error code: 2002-4021; Range: 4021-4029; Inner value: 0x1f6a02</summary>
|
||||
public static Result.Base IndirectStorageCorrupted { [MethodImpl(MethodImplOptions.AggressiveInlining)] get => new Result.Base(ModuleFs, 4021, 4029); }
|
||||
/// <summary>Error code: 2002-4022; Inner value: 0x1f6c02</summary>
|
||||
public static Result.Base InvalidIndirectEntryOffset => new Result.Base(ModuleFs, 4022);
|
||||
/// <summary>Error code: 2002-4023; Inner value: 0x1f6e02</summary>
|
||||
public static Result.Base InvalidIndirectEntryStorageIndex => new Result.Base(ModuleFs, 4023);
|
||||
/// <summary>Error code: 2002-4024; Inner value: 0x1f7002</summary>
|
||||
public static Result.Base InvalidIndirectStorageSize => new Result.Base(ModuleFs, 4024);
|
||||
/// <summary>Error code: 2002-4025; Inner value: 0x1f7202</summary>
|
||||
public static Result.Base InvalidIndirectVirtualOffset => new Result.Base(ModuleFs, 4025);
|
||||
/// <summary>Error code: 2002-4026; Inner value: 0x1f7402</summary>
|
||||
public static Result.Base InvalidIndirectPhysicalOffset => new Result.Base(ModuleFs, 4026);
|
||||
/// <summary>Error code: 2002-4027; Inner value: 0x1f7602</summary>
|
||||
public static Result.Base InvalidIndirectStorageIndex => new Result.Base(ModuleFs, 4027);
|
||||
|
||||
/// <summary>Error code: 2002-4031; Range: 4031-4039; Inner value: 0x1f7e02</summary>
|
||||
public static Result.Base BucketTreeCorrupted { [MethodImpl(MethodImplOptions.AggressiveInlining)] get => new Result.Base(ModuleFs, 4031, 4039); }
|
||||
/// <summary>Error code: 2002-4032; Inner value: 0x1f8002</summary>
|
||||
public static Result.Base InvalidBucketTreeSignature => new Result.Base(ModuleFs, 4032);
|
||||
/// <summary>Error code: 2002-4033; Inner value: 0x1f8202</summary>
|
||||
public static Result.Base InvalidBucketTreeEntryCount => new Result.Base(ModuleFs, 4033);
|
||||
/// <summary>Error code: 2002-4034; Inner value: 0x1f8402</summary>
|
||||
public static Result.Base InvalidBucketTreeNodeEntryCount => new Result.Base(ModuleFs, 4034);
|
||||
/// <summary>Error code: 2002-4035; Inner value: 0x1f8602</summary>
|
||||
public static Result.Base InvalidBucketTreeNodeOffset => new Result.Base(ModuleFs, 4035);
|
||||
/// <summary>Error code: 2002-4036; Inner value: 0x1f8802</summary>
|
||||
public static Result.Base InvalidBucketTreeEntryOffset => new Result.Base(ModuleFs, 4036);
|
||||
/// <summary>Error code: 2002-4037; Inner value: 0x1f8a02</summary>
|
||||
public static Result.Base InvalidBucketTreeEntrySetOffset => new Result.Base(ModuleFs, 4037);
|
||||
/// <summary>Error code: 2002-4038; Inner value: 0x1f8c02</summary>
|
||||
public static Result.Base InvalidBucketTreeNodeIndex => new Result.Base(ModuleFs, 4038);
|
||||
/// <summary>Error code: 2002-4039; Inner value: 0x1f8e02</summary>
|
||||
public static Result.Base InvalidBucketTreeVirtualOffset => new Result.Base(ModuleFs, 4039);
|
||||
|
||||
/// <summary>Error code: 2002-4241; Range: 4241-4259; Inner value: 0x212202</summary>
|
||||
public static Result.Base RomHostFileSystemCorrupted { [MethodImpl(MethodImplOptions.AggressiveInlining)] get => new Result.Base(ModuleFs, 4241, 4259); }
|
||||
|
@ -426,6 +457,8 @@ namespace LibHac.Fs
|
|||
|
||||
/// <summary>Error code: 2002-6700; Range: 6700-6799; Inner value: 0x345802</summary>
|
||||
public static Result.Base OutOfResource { [MethodImpl(MethodImplOptions.AggressiveInlining)] get => new Result.Base(ModuleFs, 6700, 6799); }
|
||||
/// <summary>Error code: 2002-6705; Inner value: 0x346202</summary>
|
||||
public static Result.Base BufferAllocationFailed => new Result.Base(ModuleFs, 6705);
|
||||
/// <summary>Error code: 2002-6706; Inner value: 0x346402</summary>
|
||||
public static Result.Base MappingTableFull => new Result.Base(ModuleFs, 6706);
|
||||
/// <summary>Error code: 2002-6707; Inner value: 0x346602</summary>
|
||||
|
|
|
@ -1,65 +1,86 @@
|
|||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using LibHac.Fs;
|
||||
|
||||
namespace LibHac.FsSystem
|
||||
{
|
||||
public class Aes128CtrExStorage : Aes128CtrStorage
|
||||
{
|
||||
private List<AesSubsectionEntry> SubsectionEntries { get; }
|
||||
private List<long> SubsectionOffsets { get; }
|
||||
private BucketTree<AesSubsectionEntry> BucketTree { get; }
|
||||
public static readonly int NodeSize = 1024 * 16;
|
||||
|
||||
private BucketTree Table { get; } = new BucketTree();
|
||||
|
||||
private readonly object _locker = new object();
|
||||
|
||||
public Aes128CtrExStorage(IStorage baseStorage, IStorage bucketTreeData, byte[] key, long counterOffset, byte[] ctrHi, bool leaveOpen)
|
||||
: base(baseStorage, key, counterOffset, ctrHi, leaveOpen)
|
||||
[StructLayout(LayoutKind.Sequential, Size = 0x10)]
|
||||
public struct Entry
|
||||
{
|
||||
BucketTree = new BucketTree<AesSubsectionEntry>(bucketTreeData);
|
||||
|
||||
SubsectionEntries = BucketTree.GetEntryList();
|
||||
SubsectionOffsets = SubsectionEntries.Select(x => x.Offset).ToList();
|
||||
public long Offset;
|
||||
public int Reserved;
|
||||
public int Generation;
|
||||
}
|
||||
|
||||
public Aes128CtrExStorage(IStorage baseStorage, IStorage bucketTreeData, byte[] key, byte[] counter, bool leaveOpen)
|
||||
public Aes128CtrExStorage(IStorage baseStorage, SubStorage2 nodeStorage, SubStorage2 entryStorage,
|
||||
int entryCount, byte[] key, byte[] counter, bool leaveOpen)
|
||||
: base(baseStorage, key, counter, leaveOpen)
|
||||
{
|
||||
BucketTree = new BucketTree<AesSubsectionEntry>(bucketTreeData);
|
||||
|
||||
SubsectionEntries = BucketTree.GetEntryList();
|
||||
SubsectionOffsets = SubsectionEntries.Select(x => x.Offset).ToList();
|
||||
Result rc = Table.Initialize(nodeStorage, entryStorage, NodeSize, Unsafe.SizeOf<Entry>(), entryCount);
|
||||
rc.ThrowIfFailure();
|
||||
}
|
||||
|
||||
protected override Result DoRead(long offset, Span<byte> destination)
|
||||
{
|
||||
AesSubsectionEntry entry = GetSubsectionEntry(offset);
|
||||
if (destination.Length == 0)
|
||||
return Result.Success;
|
||||
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
var visitor = new BucketTree.Visitor();
|
||||
|
||||
while (remaining > 0)
|
||||
try
|
||||
{
|
||||
int bytesToRead = (int)Math.Min(entry.OffsetEnd - inPos, remaining);
|
||||
Result rc = Table.Find(ref visitor, offset);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
lock (_locker)
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
UpdateCounterSubsection(entry.Counter);
|
||||
var currentEntry = visitor.Get<Entry>();
|
||||
|
||||
Result rc = base.DoRead(inPos, destination.Slice(outPos, bytesToRead));
|
||||
if (rc.IsFailure()) return rc;
|
||||
}
|
||||
// Get and validate the next entry offset
|
||||
long nextEntryOffset;
|
||||
if (visitor.CanMoveNext())
|
||||
{
|
||||
rc = visitor.MoveNext();
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
nextEntryOffset = visitor.Get<Entry>().Offset;
|
||||
if (!Table.Includes(nextEntryOffset))
|
||||
return ResultFs.InvalidIndirectEntryOffset.Log();
|
||||
}
|
||||
else
|
||||
{
|
||||
nextEntryOffset = Table.GetEnd();
|
||||
}
|
||||
|
||||
if (remaining != 0 && inPos >= entry.OffsetEnd)
|
||||
{
|
||||
entry = entry.Next;
|
||||
int bytesToRead = (int)Math.Min(nextEntryOffset - inPos, remaining);
|
||||
|
||||
lock (_locker)
|
||||
{
|
||||
UpdateCounterSubsection((uint)currentEntry.Generation);
|
||||
|
||||
rc = base.DoRead(inPos, destination.Slice(outPos, bytesToRead));
|
||||
if (rc.IsFailure()) return rc;
|
||||
}
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
}
|
||||
}
|
||||
finally { visitor.Dispose(); }
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
|
@ -74,13 +95,6 @@ namespace LibHac.FsSystem
|
|||
return Result.Success;
|
||||
}
|
||||
|
||||
private AesSubsectionEntry GetSubsectionEntry(long offset)
|
||||
{
|
||||
int index = SubsectionOffsets.BinarySearch(offset);
|
||||
if (index < 0) index = ~index - 1;
|
||||
return SubsectionEntries[index];
|
||||
}
|
||||
|
||||
private void UpdateCounterSubsection(uint value)
|
||||
{
|
||||
Counter[7] = (byte)value;
|
||||
|
|
|
@ -1,109 +1,743 @@
|
|||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Linq;
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.Buffers.Binary;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using LibHac.Common;
|
||||
using LibHac.Diag;
|
||||
using LibHac.Fs;
|
||||
|
||||
namespace LibHac.FsSystem
|
||||
{
|
||||
public class BucketTree<T> where T : BucketTreeEntry<T>, new()
|
||||
public partial class BucketTree
|
||||
{
|
||||
private const int BucketAlignment = 0x4000;
|
||||
public BucketTreeBucket<OffsetEntry> BucketOffsets { get; }
|
||||
public BucketTreeBucket<T>[] Buckets { get; }
|
||||
private const uint ExpectedMagic = 0x52544B42; // BKTR
|
||||
private const int MaxVersion = 1;
|
||||
|
||||
public BucketTree(IStorage data)
|
||||
private const int NodeSizeMin = 1024;
|
||||
private const int NodeSizeMax = 1024 * 512;
|
||||
|
||||
private static int NodeHeaderSize => Unsafe.SizeOf<NodeHeader>();
|
||||
|
||||
private SubStorage2 NodeStorage { get; set; }
|
||||
private SubStorage2 EntryStorage { get; set; }
|
||||
|
||||
private NodeBuffer _nodeL1 = new NodeBuffer();
|
||||
|
||||
private long NodeSize { get; set; }
|
||||
private long EntrySize { get; set; }
|
||||
private int OffsetCount { get; set; }
|
||||
private int EntrySetCount { get; set; }
|
||||
private long StartOffset { get; set; }
|
||||
private long EndOffset { get; set; }
|
||||
|
||||
public Result Initialize(SubStorage2 nodeStorage, SubStorage2 entryStorage, int nodeSize, int entrySize,
|
||||
int entryCount)
|
||||
{
|
||||
var reader = new BinaryReader(data.AsStream());
|
||||
Assert.AssertTrue(entrySize >= sizeof(long));
|
||||
Assert.AssertTrue(nodeSize >= entrySize + Unsafe.SizeOf<NodeHeader>());
|
||||
Assert.AssertTrue(NodeSizeMin <= nodeSize && nodeSize <= NodeSizeMax);
|
||||
Assert.AssertTrue(Util.IsPowerOfTwo(nodeSize));
|
||||
Assert.AssertTrue(!IsInitialized());
|
||||
|
||||
BucketOffsets = new BucketTreeBucket<OffsetEntry>(reader);
|
||||
// Ensure valid entry count.
|
||||
if (entryCount <= 0)
|
||||
return ResultFs.InvalidArgument.Log();
|
||||
|
||||
Buckets = new BucketTreeBucket<T>[BucketOffsets.EntryCount];
|
||||
// Allocate node.
|
||||
if (!_nodeL1.Allocate(nodeSize))
|
||||
return ResultFs.BufferAllocationFailed.Log();
|
||||
|
||||
for (int i = 0; i < BucketOffsets.EntryCount; i++)
|
||||
bool needFree = true;
|
||||
try
|
||||
{
|
||||
reader.BaseStream.Position = (i + 1) * BucketAlignment;
|
||||
Buckets[i] = new BucketTreeBucket<T>(reader);
|
||||
// Read node.
|
||||
Result rc = nodeStorage.Read(0, _nodeL1.GetBuffer());
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Verify node.
|
||||
rc = _nodeL1.GetHeader().Verify(0, nodeSize, sizeof(long));
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Validate offsets.
|
||||
int offsetCount = GetOffsetCount(nodeSize);
|
||||
int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount);
|
||||
BucketTreeNode<long> node = _nodeL1.GetNode<long>();
|
||||
|
||||
long startOffset;
|
||||
if (offsetCount < entrySetCount && node.GetCount() < offsetCount)
|
||||
{
|
||||
startOffset = node.GetL2BeginOffset();
|
||||
}
|
||||
else
|
||||
{
|
||||
startOffset = node.GetBeginOffset();
|
||||
}
|
||||
|
||||
long endOffset = node.GetEndOffset();
|
||||
|
||||
if (startOffset < 0 || startOffset > node.GetBeginOffset() || startOffset >= endOffset)
|
||||
return ResultFs.InvalidBucketTreeEntryOffset.Log();
|
||||
|
||||
NodeStorage = nodeStorage;
|
||||
EntryStorage = entryStorage;
|
||||
NodeSize = nodeSize;
|
||||
EntrySize = entrySize;
|
||||
OffsetCount = offsetCount;
|
||||
EntrySetCount = entrySetCount;
|
||||
StartOffset = startOffset;
|
||||
EndOffset = endOffset;
|
||||
|
||||
needFree = false;
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (needFree)
|
||||
_nodeL1.Free();
|
||||
}
|
||||
}
|
||||
|
||||
public List<T> GetEntryList()
|
||||
{
|
||||
List<T> list = Buckets.SelectMany(x => x.Entries).ToList();
|
||||
public bool IsInitialized() => NodeSize > 0;
|
||||
public bool IsEmpty() => EntrySize == 0;
|
||||
|
||||
for (int i = 0; i < list.Count - 1; i++)
|
||||
public long GetStart() => StartOffset;
|
||||
public long GetEnd() => EndOffset;
|
||||
public long GetSize() => EndOffset - StartOffset;
|
||||
|
||||
public bool Includes(long offset)
|
||||
{
|
||||
return StartOffset <= offset && offset < EndOffset;
|
||||
}
|
||||
|
||||
public bool Includes(long offset, long size)
|
||||
{
|
||||
return size > 0 && StartOffset <= offset && size <= EndOffset - offset;
|
||||
}
|
||||
|
||||
public Result Find(ref Visitor visitor, long virtualAddress)
|
||||
{
|
||||
Assert.AssertTrue(IsInitialized());
|
||||
|
||||
if (virtualAddress < 0)
|
||||
return ResultFs.InvalidOffset.Log();
|
||||
|
||||
if (IsEmpty())
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
Result rc = visitor.Initialize(this);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
return visitor.Find(virtualAddress);
|
||||
}
|
||||
|
||||
public static int QueryHeaderStorageSize() => Unsafe.SizeOf<Header>();
|
||||
|
||||
public static long QueryNodeStorageSize(long nodeSize, long entrySize, int entryCount)
|
||||
{
|
||||
Assert.AssertTrue(entrySize >= sizeof(long));
|
||||
Assert.AssertTrue(nodeSize >= entrySize + Unsafe.SizeOf<NodeHeader>());
|
||||
Assert.AssertTrue(NodeSizeMin <= nodeSize && nodeSize <= NodeSizeMax);
|
||||
Assert.AssertTrue(Util.IsPowerOfTwo(nodeSize));
|
||||
Assert.AssertTrue(entryCount >= 0);
|
||||
|
||||
if (entryCount <= 0)
|
||||
return 0;
|
||||
|
||||
return (1 + GetNodeL2Count(nodeSize, entrySize, entryCount)) * nodeSize;
|
||||
}
|
||||
|
||||
public static long QueryEntryStorageSize(long nodeSize, long entrySize, int entryCount)
|
||||
{
|
||||
Assert.AssertTrue(entrySize >= sizeof(long));
|
||||
Assert.AssertTrue(nodeSize >= entrySize + Unsafe.SizeOf<NodeHeader>());
|
||||
Assert.AssertTrue(NodeSizeMin <= nodeSize && nodeSize <= NodeSizeMax);
|
||||
Assert.AssertTrue(Util.IsPowerOfTwo(nodeSize));
|
||||
Assert.AssertTrue(entryCount >= 0);
|
||||
|
||||
if (entryCount <= 0)
|
||||
return 0;
|
||||
|
||||
return GetEntrySetCount(nodeSize, entrySize, entryCount) * nodeSize;
|
||||
}
|
||||
|
||||
private static int GetEntryCount(long nodeSize, long entrySize)
|
||||
{
|
||||
return (int)((nodeSize - Unsafe.SizeOf<NodeHeader>()) / entrySize);
|
||||
}
|
||||
|
||||
private static int GetOffsetCount(long nodeSize)
|
||||
{
|
||||
return (int)((nodeSize - Unsafe.SizeOf<NodeHeader>()) / sizeof(long));
|
||||
}
|
||||
|
||||
private static int GetEntrySetCount(long nodeSize, long entrySize, int entryCount)
|
||||
{
|
||||
int entryCountPerNode = GetEntryCount(nodeSize, entrySize);
|
||||
return Util.DivideByRoundUp(entryCount, entryCountPerNode);
|
||||
}
|
||||
|
||||
public static int GetNodeL2Count(long nodeSize, long entrySize, int entryCount)
|
||||
{
|
||||
int offsetCountPerNode = GetOffsetCount(nodeSize);
|
||||
int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount);
|
||||
|
||||
if (entrySetCount <= offsetCountPerNode)
|
||||
return 0;
|
||||
|
||||
int nodeL2Count = Util.DivideByRoundUp(entrySetCount, offsetCountPerNode);
|
||||
Abort.DoAbortUnless(nodeL2Count <= offsetCountPerNode);
|
||||
|
||||
return Util.DivideByRoundUp(entrySetCount - (offsetCountPerNode - (nodeL2Count - 1)), offsetCountPerNode);
|
||||
}
|
||||
|
||||
private static long GetBucketTreeEntryOffset(long entrySetOffset, long entrySize, int entryIndex)
|
||||
{
|
||||
return entrySetOffset + Unsafe.SizeOf<NodeHeader>() + entryIndex * entrySize;
|
||||
}
|
||||
|
||||
private static long GetBucketTreeEntryOffset(int entrySetIndex, long nodeSize, long entrySize, int entryIndex)
|
||||
{
|
||||
return GetBucketTreeEntryOffset(entrySetIndex * nodeSize, entrySize, entryIndex);
|
||||
}
|
||||
|
||||
private bool IsExistL2() => OffsetCount < EntrySetCount;
|
||||
private bool IsExistOffsetL2OnL1() => IsExistL2() && _nodeL1.GetHeader().Count < OffsetCount;
|
||||
|
||||
private long GetEntrySetIndex(int nodeIndex, int offsetIndex)
|
||||
{
|
||||
return (OffsetCount - _nodeL1.GetHeader().Count) + (OffsetCount * nodeIndex) + offsetIndex;
|
||||
}
|
||||
|
||||
public struct Header
|
||||
{
|
||||
public uint Magic;
|
||||
public uint Version;
|
||||
public int EntryCount;
|
||||
#pragma warning disable 414
|
||||
private int _reserved;
|
||||
#pragma warning restore 414
|
||||
|
||||
public void Format(int entryCount)
|
||||
{
|
||||
list[i].Next = list[i + 1];
|
||||
list[i].OffsetEnd = list[i + 1].Offset;
|
||||
Magic = ExpectedMagic;
|
||||
Version = MaxVersion;
|
||||
EntryCount = entryCount;
|
||||
_reserved = 0;
|
||||
}
|
||||
|
||||
list[list.Count - 1].OffsetEnd = BucketOffsets.OffsetEnd;
|
||||
|
||||
return list;
|
||||
}
|
||||
}
|
||||
|
||||
public class BucketTreeBucket<T> where T : BucketTreeEntry<T>, new()
|
||||
{
|
||||
public int Index;
|
||||
public int EntryCount;
|
||||
public long OffsetEnd;
|
||||
public T[] Entries;
|
||||
|
||||
public BucketTreeBucket(BinaryReader reader)
|
||||
{
|
||||
Index = reader.ReadInt32();
|
||||
EntryCount = reader.ReadInt32();
|
||||
OffsetEnd = reader.ReadInt64();
|
||||
Entries = new T[EntryCount];
|
||||
|
||||
for (int i = 0; i < EntryCount; i++)
|
||||
public Result Verify()
|
||||
{
|
||||
Entries[i] = new T().Read(reader);
|
||||
if (Magic != ExpectedMagic)
|
||||
return ResultFs.InvalidBucketTreeSignature.Log();
|
||||
|
||||
if (EntryCount < 0)
|
||||
return ResultFs.InvalidBucketTreeEntryCount.Log();
|
||||
|
||||
if (Version > MaxVersion)
|
||||
return ResultFs.UnsupportedVersion.Log();
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public abstract class BucketTreeEntry<T> where T : BucketTreeEntry<T>
|
||||
{
|
||||
public long Offset { get; set; }
|
||||
public long OffsetEnd { get; set; }
|
||||
public T Next { get; set; }
|
||||
|
||||
protected abstract void ReadSpecific(BinaryReader reader);
|
||||
internal T Read(BinaryReader reader)
|
||||
public struct NodeHeader
|
||||
{
|
||||
Offset = reader.ReadInt64();
|
||||
ReadSpecific(reader);
|
||||
return (T)this;
|
||||
public int Index;
|
||||
public int Count;
|
||||
public long Offset;
|
||||
|
||||
public Result Verify(int nodeIndex, long nodeSize, long entrySize)
|
||||
{
|
||||
if (Index != nodeIndex)
|
||||
return ResultFs.InvalidBucketTreeNodeIndex.Log();
|
||||
|
||||
if (entrySize == 0 || nodeSize < entrySize + NodeHeaderSize)
|
||||
return ResultFs.InvalidSize.Log();
|
||||
|
||||
long maxEntryCount = (nodeSize - NodeHeaderSize) / entrySize;
|
||||
|
||||
if (Count <= 0 || maxEntryCount < Count)
|
||||
return ResultFs.InvalidBucketTreeNodeEntryCount.Log();
|
||||
|
||||
if (Offset < 0)
|
||||
return ResultFs.InvalidBucketTreeNodeOffset.Log();
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class OffsetEntry : BucketTreeEntry<OffsetEntry>
|
||||
{
|
||||
protected override void ReadSpecific(BinaryReader reader) { }
|
||||
}
|
||||
|
||||
public class AesSubsectionEntry : BucketTreeEntry<AesSubsectionEntry>
|
||||
{
|
||||
public uint Field8 { get; set; }
|
||||
public uint Counter { get; set; }
|
||||
|
||||
protected override void ReadSpecific(BinaryReader reader)
|
||||
private struct NodeBuffer
|
||||
{
|
||||
Field8 = reader.ReadUInt32();
|
||||
Counter = reader.ReadUInt32();
|
||||
// Use long to ensure alignment
|
||||
private long[] _header;
|
||||
|
||||
public bool Allocate(int nodeSize)
|
||||
{
|
||||
Assert.AssertTrue(_header == null);
|
||||
|
||||
_header = new long[nodeSize / sizeof(long)];
|
||||
|
||||
return _header != null;
|
||||
}
|
||||
|
||||
public void Free()
|
||||
{
|
||||
_header = null;
|
||||
}
|
||||
|
||||
public void FillZero()
|
||||
{
|
||||
if (_header != null)
|
||||
{
|
||||
Array.Fill(_header, 0);
|
||||
}
|
||||
}
|
||||
|
||||
public ref NodeHeader GetHeader()
|
||||
{
|
||||
Assert.AssertTrue(_header.Length / sizeof(long) >= Unsafe.SizeOf<NodeHeader>());
|
||||
|
||||
return ref Unsafe.As<long, NodeHeader>(ref _header[0]);
|
||||
}
|
||||
|
||||
public Span<byte> GetBuffer()
|
||||
{
|
||||
return MemoryMarshal.AsBytes(_header.AsSpan());
|
||||
}
|
||||
|
||||
public BucketTreeNode<TEntry> GetNode<TEntry>() where TEntry : unmanaged
|
||||
{
|
||||
return new BucketTreeNode<TEntry>(GetBuffer());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class RelocationEntry : BucketTreeEntry<RelocationEntry>
|
||||
{
|
||||
public long SourceOffset { get; set; }
|
||||
public int SourceIndex { get; set; }
|
||||
|
||||
protected override void ReadSpecific(BinaryReader reader)
|
||||
public readonly ref struct BucketTreeNode<TEntry> where TEntry : unmanaged
|
||||
{
|
||||
SourceOffset = reader.ReadInt64();
|
||||
SourceIndex = reader.ReadInt32();
|
||||
private readonly Span<byte> _buffer;
|
||||
|
||||
public BucketTreeNode(Span<byte> buffer)
|
||||
{
|
||||
_buffer = buffer;
|
||||
|
||||
Assert.AssertTrue(_buffer.Length >= Unsafe.SizeOf<NodeHeader>());
|
||||
Assert.AssertTrue(_buffer.Length >= Unsafe.SizeOf<NodeHeader>() + GetHeader().Count * Unsafe.SizeOf<TEntry>());
|
||||
}
|
||||
|
||||
public int GetCount() => GetHeader().Count;
|
||||
|
||||
public ReadOnlySpan<TEntry> GetArray() => GetWritableArray();
|
||||
internal Span<TEntry> GetWritableArray() => GetWritableArray<TEntry>();
|
||||
|
||||
public long GetBeginOffset() => GetArray<long>()[0];
|
||||
public long GetEndOffset() => GetHeader().Offset;
|
||||
public long GetL2BeginOffset() => GetArray<long>()[GetCount()];
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public ReadOnlySpan<TElement> GetArray<TElement>() where TElement : unmanaged
|
||||
{
|
||||
return GetWritableArray<TElement>();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private Span<TElement> GetWritableArray<TElement>() where TElement : unmanaged
|
||||
{
|
||||
return MemoryMarshal.Cast<byte, TElement>(_buffer.Slice(Unsafe.SizeOf<NodeHeader>()));
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
internal ref NodeHeader GetHeader()
|
||||
{
|
||||
return ref Unsafe.As<byte, NodeHeader>(ref MemoryMarshal.GetReference(_buffer));
|
||||
}
|
||||
}
|
||||
|
||||
public ref struct Visitor
|
||||
{
|
||||
private BucketTree Tree { get; set; }
|
||||
private byte[] Entry { get; set; }
|
||||
private int EntryIndex { get; set; }
|
||||
private int EntrySetCount { get; set; }
|
||||
private EntrySetHeader _entrySet;
|
||||
|
||||
[StructLayout(LayoutKind.Explicit)]
|
||||
private struct EntrySetHeader
|
||||
{
|
||||
// ReSharper disable once MemberHidesStaticFromOuterClass
|
||||
[FieldOffset(0)] public NodeHeader Header;
|
||||
[FieldOffset(0)] public EntrySetInfo Info;
|
||||
|
||||
[StructLayout(LayoutKind.Sequential)]
|
||||
public struct EntrySetInfo
|
||||
{
|
||||
public int Index;
|
||||
public int Count;
|
||||
public long End;
|
||||
public long Start;
|
||||
}
|
||||
}
|
||||
|
||||
public Result Initialize(BucketTree tree)
|
||||
{
|
||||
Assert.AssertTrue(tree != null);
|
||||
Assert.AssertTrue(Tree == null || tree == Tree);
|
||||
|
||||
if (Entry == null)
|
||||
{
|
||||
Entry = ArrayPool<byte>.Shared.Rent((int)tree.EntrySize);
|
||||
Tree = tree;
|
||||
EntryIndex = -1;
|
||||
}
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Entry != null)
|
||||
{
|
||||
ArrayPool<byte>.Shared.Return(Entry);
|
||||
Entry = null;
|
||||
}
|
||||
}
|
||||
|
||||
public bool IsValid() => EntryIndex >= 0;
|
||||
|
||||
public bool CanMoveNext()
|
||||
{
|
||||
return IsValid() && (EntryIndex + 1 < _entrySet.Info.Count || _entrySet.Info.Index + 1 < EntrySetCount);
|
||||
}
|
||||
|
||||
public bool CanMovePrevious()
|
||||
{
|
||||
return IsValid() && (EntryIndex > 0 || _entrySet.Info.Index > 0);
|
||||
}
|
||||
|
||||
public ref T Get<T>() where T : unmanaged
|
||||
{
|
||||
return ref MemoryMarshal.Cast<byte, T>(Entry)[0];
|
||||
}
|
||||
|
||||
public Result MoveNext()
|
||||
{
|
||||
Result rc;
|
||||
|
||||
if (!IsValid())
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
int entryIndex = EntryIndex + 1;
|
||||
|
||||
// Invalidate our index, and read the header for the next index.
|
||||
if (entryIndex == _entrySet.Info.Count)
|
||||
{
|
||||
int entrySetIndex = _entrySet.Info.Index + 1;
|
||||
if (entrySetIndex >= EntrySetCount)
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
EntryIndex = -1;
|
||||
|
||||
long end = _entrySet.Info.End;
|
||||
|
||||
long entrySetSize = Tree.NodeSize;
|
||||
long entrySetOffset = entrySetIndex * entrySetSize;
|
||||
|
||||
rc = Tree.EntryStorage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref _entrySet));
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
rc = _entrySet.Header.Verify(entrySetIndex, entrySetSize, Tree.EntrySize);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
if (_entrySet.Info.Start != end || _entrySet.Info.Start >= _entrySet.Info.End)
|
||||
return ResultFs.InvalidBucketTreeEntrySetOffset.Log();
|
||||
|
||||
entryIndex = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
EntryIndex = 1;
|
||||
}
|
||||
|
||||
// Read the new entry
|
||||
long entrySize = Tree.EntrySize;
|
||||
long entryOffset = GetBucketTreeEntryOffset(_entrySet.Info.Index, Tree.NodeSize, entrySize, entryIndex);
|
||||
|
||||
rc = Tree.EntryStorage.Read(entryOffset, Entry);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Note that we changed index.
|
||||
EntryIndex = entryIndex;
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
public Result MovePrevious()
|
||||
{
|
||||
Result rc;
|
||||
|
||||
if (!IsValid())
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
int entryIndex = EntryIndex;
|
||||
|
||||
if (entryIndex == 0)
|
||||
{
|
||||
if (_entrySet.Info.Index <= 0)
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
EntryIndex = -1;
|
||||
|
||||
long start = _entrySet.Info.Start;
|
||||
|
||||
long entrySetSize = Tree.NodeSize;
|
||||
int entrySetIndex = _entrySet.Info.Index - 1;
|
||||
long entrySetOffset = entrySetIndex * entrySetSize;
|
||||
|
||||
rc = Tree.EntryStorage.Read(entrySetOffset, SpanHelpers.AsByteSpan(ref _entrySet));
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
rc = _entrySet.Header.Verify(entrySetIndex, entrySetSize, Tree.EntrySize);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
if (_entrySet.Info.End != start || _entrySet.Info.Start >= _entrySet.Info.End)
|
||||
return ResultFs.InvalidBucketTreeEntrySetOffset.Log();
|
||||
|
||||
entryIndex = _entrySet.Info.Count;
|
||||
}
|
||||
else
|
||||
{
|
||||
EntryIndex = -1;
|
||||
}
|
||||
|
||||
// Read the new entry
|
||||
long entrySize = Tree.EntrySize;
|
||||
long entryOffset = GetBucketTreeEntryOffset(_entrySet.Info.Index, Tree.NodeSize, entrySize, entryIndex);
|
||||
|
||||
rc = Tree.EntryStorage.Read(entryOffset, Entry);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Note that we changed index.
|
||||
EntryIndex = entryIndex;
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
public Result Find(long virtualAddress)
|
||||
{
|
||||
Result rc;
|
||||
|
||||
// Get the node.
|
||||
BucketTreeNode<long> node = Tree._nodeL1.GetNode<long>();
|
||||
|
||||
if (virtualAddress >= node.GetEndOffset())
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
int entrySetIndex;
|
||||
|
||||
if (Tree.IsExistOffsetL2OnL1() && virtualAddress < node.GetBeginOffset())
|
||||
{
|
||||
// The portion of the L2 offsets containing our target offset is stored in the L1 node
|
||||
ReadOnlySpan<long> offsets = node.GetArray<long>().Slice(node.GetCount());
|
||||
int index = offsets.BinarySearch(virtualAddress);
|
||||
if (index < 0) index = (~index) - 1;
|
||||
|
||||
if (index < 0)
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
entrySetIndex = index;
|
||||
}
|
||||
else
|
||||
{
|
||||
ReadOnlySpan<long> offsets = node.GetArray<long>().Slice(0, node.GetCount());
|
||||
int index = offsets.BinarySearch(virtualAddress);
|
||||
if (index < 0) index = (~index) - 1;
|
||||
|
||||
if (index < 0)
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
if (Tree.IsExistL2())
|
||||
{
|
||||
if (index >= Tree.OffsetCount)
|
||||
return ResultFs.InvalidBucketTreeNodeOffset.Log();
|
||||
|
||||
rc = FindEntrySet(out entrySetIndex, virtualAddress, index);
|
||||
if (rc.IsFailure()) return rc;
|
||||
}
|
||||
else
|
||||
{
|
||||
entrySetIndex = index;
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the entry set index.
|
||||
if (entrySetIndex < 0 || entrySetIndex >= Tree.EntrySetCount)
|
||||
return ResultFs.InvalidBucketTreeNodeOffset.Log();
|
||||
|
||||
// Find the entry.
|
||||
rc = FindEntry(virtualAddress, entrySetIndex);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Set count.
|
||||
EntrySetCount = Tree.EntrySetCount;
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
private Result FindEntrySet(out int entrySetIndex, long virtualAddress, int nodeIndex)
|
||||
{
|
||||
long nodeSize = Tree.NodeSize;
|
||||
|
||||
using (var rented = new RentedArray<byte>((int)nodeSize))
|
||||
{
|
||||
return FindEntrySetWithBuffer(out entrySetIndex, virtualAddress, nodeIndex, rented.Span);
|
||||
}
|
||||
}
|
||||
|
||||
private Result FindEntrySetWithBuffer(out int outIndex, long virtualAddress, int nodeIndex,
|
||||
Span<byte> buffer)
|
||||
{
|
||||
outIndex = default;
|
||||
|
||||
// Calculate node extents.
|
||||
long nodeSize = Tree.NodeSize;
|
||||
long nodeOffset = (nodeIndex + 1) * nodeSize;
|
||||
SubStorage2 storage = Tree.NodeStorage;
|
||||
|
||||
// Read the node.
|
||||
Result rc = storage.Read(nodeOffset, buffer.Slice(0, (int)nodeSize));
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Validate the header.
|
||||
NodeHeader header = MemoryMarshal.Cast<byte, NodeHeader>(buffer)[0];
|
||||
rc = header.Verify(nodeIndex, nodeSize, sizeof(long));
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Create the node and find.
|
||||
var node = new StorageNode(sizeof(long), header.Count);
|
||||
node.Find(buffer, virtualAddress);
|
||||
|
||||
if (node.GetIndex() < 0)
|
||||
return ResultFs.InvalidBucketTreeVirtualOffset.Log();
|
||||
|
||||
// Return the index.
|
||||
outIndex = (int)Tree.GetEntrySetIndex(header.Index, node.GetIndex());
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
private Result FindEntry(long virtualAddress, int entrySetIndex)
|
||||
{
|
||||
long entrySetSize = Tree.NodeSize;
|
||||
|
||||
using (var rented = new RentedArray<byte>((int)entrySetSize))
|
||||
{
|
||||
return FindEntryWithBuffer(virtualAddress, entrySetIndex, rented.Span);
|
||||
}
|
||||
}
|
||||
|
||||
private Result FindEntryWithBuffer(long virtualAddress, int entrySetIndex, Span<byte> buffer)
|
||||
{
|
||||
// Calculate entry set extents.
|
||||
long entrySize = Tree.EntrySize;
|
||||
long entrySetSize = Tree.NodeSize;
|
||||
long entrySetOffset = entrySetIndex * entrySetSize;
|
||||
SubStorage2 storage = Tree.EntryStorage;
|
||||
|
||||
// Read the entry set.
|
||||
Result rc = storage.Read(entrySetOffset, buffer.Slice(0, (int)entrySetSize));
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Validate the entry set.
|
||||
EntrySetHeader entrySet = MemoryMarshal.Cast<byte, EntrySetHeader>(buffer)[0];
|
||||
rc = entrySet.Header.Verify(entrySetIndex, entrySetSize, entrySize);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Create the node, and find.
|
||||
var node = new StorageNode(entrySize, entrySet.Info.Count);
|
||||
node.Find(buffer, virtualAddress);
|
||||
|
||||
if (node.GetIndex() < 0)
|
||||
return ResultFs.InvalidBucketTreeVirtualOffset.Log();
|
||||
|
||||
// Copy the data into entry.
|
||||
int entryIndex = node.GetIndex();
|
||||
long entryOffset = GetBucketTreeEntryOffset(0, entrySize, entryIndex);
|
||||
buffer.Slice((int)entryOffset, (int)entrySize).CopyTo(Entry);
|
||||
|
||||
// Set our entry set/index.
|
||||
_entrySet = entrySet;
|
||||
EntryIndex = entryIndex;
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
private struct StorageNode
|
||||
{
|
||||
private Offset _start;
|
||||
private int _count;
|
||||
private int _index;
|
||||
|
||||
public StorageNode(long size, int count)
|
||||
{
|
||||
_start = new Offset(NodeHeaderSize, (int)size);
|
||||
_count = count;
|
||||
_index = -1;
|
||||
}
|
||||
|
||||
public int GetIndex() => _index;
|
||||
|
||||
public void Find(ReadOnlySpan<byte> buffer, long virtualAddress)
|
||||
{
|
||||
int end = _count;
|
||||
Offset pos = _start;
|
||||
|
||||
while (end > 0)
|
||||
{
|
||||
int half = end / 2;
|
||||
Offset mid = pos + half;
|
||||
|
||||
long offset = BinaryPrimitives.ReadInt64LittleEndian(buffer.Slice((int)mid.Get()));
|
||||
|
||||
if (offset <= virtualAddress)
|
||||
{
|
||||
pos = mid + 1;
|
||||
end -= half + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
end = half;
|
||||
}
|
||||
}
|
||||
|
||||
_index = (int)(pos - _start) - 1;
|
||||
}
|
||||
|
||||
private readonly struct Offset
|
||||
{
|
||||
private readonly long _offset;
|
||||
private readonly int _stride;
|
||||
|
||||
public Offset(long offset, int stride)
|
||||
{
|
||||
_offset = offset;
|
||||
_stride = stride;
|
||||
}
|
||||
|
||||
public long Get() => _offset;
|
||||
|
||||
public static Offset operator ++(Offset left) => left + 1;
|
||||
public static Offset operator --(Offset left) => left - 1;
|
||||
|
||||
public static Offset operator +(Offset left, long right) => new Offset(left._offset + right * left._stride, left._stride);
|
||||
public static Offset operator -(Offset left, long right) => new Offset(left._offset - right * left._stride, left._stride);
|
||||
|
||||
public static long operator -(Offset left, Offset right) =>
|
||||
(left._offset - right._offset) / left._stride;
|
||||
|
||||
public static bool operator ==(Offset left, Offset right) => left._offset == right._offset;
|
||||
public static bool operator !=(Offset left, Offset right) => left._offset != right._offset;
|
||||
|
||||
public bool Equals(Offset other) => _offset == other._offset;
|
||||
public override bool Equals(object obj) => obj is Offset other && Equals(other);
|
||||
public override int GetHashCode() => _offset.GetHashCode();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
309
src/LibHac/FsSystem/BucketTreeBuilder.cs
Normal file
309
src/LibHac/FsSystem/BucketTreeBuilder.cs
Normal file
|
@ -0,0 +1,309 @@
|
|||
using System;
|
||||
using System.Buffers.Binary;
|
||||
using System.Runtime.CompilerServices;
|
||||
using LibHac.Common;
|
||||
using LibHac.Diag;
|
||||
using LibHac.Fs;
|
||||
|
||||
namespace LibHac.FsSystem
|
||||
{
|
||||
public partial class BucketTree
|
||||
{
|
||||
public class Builder
|
||||
{
|
||||
private SubStorage2 NodeStorage { get; set; }
|
||||
private SubStorage2 EntryStorage { get; set; }
|
||||
|
||||
private NodeBuffer _l1Node = new NodeBuffer();
|
||||
private NodeBuffer _l2Node = new NodeBuffer();
|
||||
private NodeBuffer _entrySet = new NodeBuffer();
|
||||
|
||||
private int NodeSize { get; set; }
|
||||
private int EntrySize { get; set; }
|
||||
private int EntryCount { get; set; }
|
||||
private int EntriesPerEntrySet { get; set; }
|
||||
private int OffsetsPerNode { get; set; }
|
||||
|
||||
private int CurrentL2OffsetIndex { get; set; }
|
||||
private int CurrentEntryIndex { get; set; }
|
||||
private long CurrentOffset { get; set; } = -1;
|
||||
|
||||
/// <summary>
|
||||
/// Initializes the bucket tree builder.
|
||||
/// </summary>
|
||||
/// <param name="headerStorage">The <see cref="SubStorage2"/> the tree's header will be written to.Must be at least the size in bytes returned by <see cref="QueryHeaderStorageSize"/>.</param>
|
||||
/// <param name="nodeStorage">The <see cref="SubStorage2"/> the tree's nodes will be written to. Must be at least the size in bytes returned by <see cref="QueryNodeStorageSize"/>.</param>
|
||||
/// <param name="entryStorage">The <see cref="SubStorage2"/> the tree's entries will be written to. Must be at least the size in bytes returned by <see cref="QueryEntryStorageSize"/>.</param>
|
||||
/// <param name="nodeSize">The size of each node in the bucket tree. Must be a power of 2.</param>
|
||||
/// <param name="entrySize">The size of each entry that will be stored in the bucket tree.</param>
|
||||
/// <param name="entryCount">The exact number of entries that will be added to the bucket tree.</param>
|
||||
/// <returns>The <see cref="Result"/> of the operation.</returns>
|
||||
public Result Initialize(SubStorage2 headerStorage, SubStorage2 nodeStorage, SubStorage2 entryStorage,
|
||||
int nodeSize, int entrySize, int entryCount)
|
||||
{
|
||||
Assert.AssertTrue(entrySize >= sizeof(long));
|
||||
Assert.AssertTrue(nodeSize >= entrySize + Unsafe.SizeOf<NodeHeader>());
|
||||
Assert.AssertTrue(NodeSizeMin <= nodeSize && nodeSize <= NodeSizeMax);
|
||||
Assert.AssertTrue(Util.IsPowerOfTwo(nodeSize));
|
||||
|
||||
if (headerStorage is null || nodeStorage is null || entryStorage is null)
|
||||
return ResultFs.NullptrArgument.Log();
|
||||
|
||||
// Set the builder parameters
|
||||
NodeSize = nodeSize;
|
||||
EntrySize = entrySize;
|
||||
EntryCount = entryCount;
|
||||
|
||||
EntriesPerEntrySet = GetEntryCount(nodeSize, entrySize);
|
||||
OffsetsPerNode = GetOffsetCount(nodeSize);
|
||||
CurrentL2OffsetIndex = GetNodeL2Count(nodeSize, entrySize, entryCount);
|
||||
|
||||
// Create and write the header
|
||||
var header = new Header();
|
||||
header.Format(entryCount);
|
||||
Result rc = headerStorage.Write(0, SpanHelpers.AsByteSpan(ref header));
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Allocate buffers for the L1 node and entry sets
|
||||
_l1Node.Allocate(nodeSize);
|
||||
_entrySet.Allocate(nodeSize);
|
||||
|
||||
int entrySetCount = GetEntrySetCount(nodeSize, entrySize, entryCount);
|
||||
|
||||
// Allocate an L2 node buffer if there are more entry sets than will fit in the L1 node
|
||||
if (OffsetsPerNode < entrySetCount)
|
||||
{
|
||||
_l2Node.Allocate(nodeSize);
|
||||
}
|
||||
|
||||
_l1Node.FillZero();
|
||||
_l2Node.FillZero();
|
||||
_entrySet.FillZero();
|
||||
|
||||
NodeStorage = nodeStorage;
|
||||
EntryStorage = entryStorage;
|
||||
|
||||
// Set the initial position
|
||||
CurrentEntryIndex = 0;
|
||||
CurrentOffset = -1;
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds a new entry to the bucket tree.
|
||||
/// </summary>
|
||||
/// <typeparam name="T">The type of the entry to add. Added entries should all be the same type.</typeparam>
|
||||
/// <param name="entry">The entry to add.</param>
|
||||
/// <returns>The <see cref="Result"/> of the operation.</returns>
|
||||
public Result Add<T>(ref T entry) where T : unmanaged
|
||||
{
|
||||
Assert.AssertTrue(Unsafe.SizeOf<T>() == EntrySize);
|
||||
|
||||
if (CurrentEntryIndex >= EntryCount)
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
// The entry offset must always be the first 8 bytes of the struct
|
||||
long entryOffset = BinaryPrimitives.ReadInt64LittleEndian(SpanHelpers.AsByteSpan(ref entry));
|
||||
|
||||
if (entryOffset <= CurrentOffset)
|
||||
return ResultFs.InvalidOffset.Log();
|
||||
|
||||
Result rc = FinalizePreviousEntrySet(entryOffset);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
AddEntryOffset(entryOffset);
|
||||
|
||||
// Write the new entry
|
||||
int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet;
|
||||
_entrySet.GetNode<T>().GetWritableArray()[indexInEntrySet] = entry;
|
||||
|
||||
CurrentOffset = entryOffset;
|
||||
CurrentEntryIndex++;
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Checks if a new entry set is being started. If so, sets the end offset of the previous
|
||||
/// entry set and writes it to the output storage.
|
||||
/// </summary>
|
||||
/// <param name="endOffset">The end offset of the previous entry.</param>
|
||||
/// <returns>The <see cref="Result"/> of the operation.</returns>
|
||||
private Result FinalizePreviousEntrySet(long endOffset)
|
||||
{
|
||||
int prevEntrySetIndex = CurrentEntryIndex / EntriesPerEntrySet - 1;
|
||||
int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet;
|
||||
|
||||
// If the previous Add finished an entry set
|
||||
if (CurrentEntryIndex > 0 && indexInEntrySet == 0)
|
||||
{
|
||||
// Set the end offset of that entry set
|
||||
ref NodeHeader entrySetHeader = ref _entrySet.GetHeader();
|
||||
|
||||
entrySetHeader.Index = prevEntrySetIndex;
|
||||
entrySetHeader.Count = EntriesPerEntrySet;
|
||||
entrySetHeader.Offset = endOffset;
|
||||
|
||||
// Write the entry set to the entry storage
|
||||
long storageOffset = (long)NodeSize * prevEntrySetIndex;
|
||||
Result rc = EntryStorage.Write(storageOffset, _entrySet.GetBuffer());
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Clear the entry set buffer to begin the new entry set
|
||||
_entrySet.FillZero();
|
||||
|
||||
// Check if we're writing in L2 nodes
|
||||
if (CurrentL2OffsetIndex > OffsetsPerNode)
|
||||
{
|
||||
int prevL2NodeIndex = CurrentL2OffsetIndex / OffsetsPerNode - 2;
|
||||
int indexInL2Node = CurrentL2OffsetIndex % OffsetsPerNode;
|
||||
|
||||
// If the previous Add finished an L2 node
|
||||
if (indexInL2Node == 0)
|
||||
{
|
||||
// Set the end offset of that node
|
||||
ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader();
|
||||
|
||||
l2NodeHeader.Index = prevL2NodeIndex;
|
||||
l2NodeHeader.Count = OffsetsPerNode;
|
||||
l2NodeHeader.Offset = endOffset;
|
||||
|
||||
// Write the L2 node to the node storage
|
||||
long nodeOffset = (long)NodeSize * (prevL2NodeIndex + 1);
|
||||
rc = NodeStorage.Write(nodeOffset, _l2Node.GetBuffer());
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Clear the L2 node buffer to begin the new node
|
||||
_l2Node.FillZero();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// If needed, adds a new entry set's start offset to the L1 or L2 nodes.
|
||||
/// </summary>
|
||||
/// <param name="entryOffset">The start offset of the entry being added.</param>
|
||||
private void AddEntryOffset(long entryOffset)
|
||||
{
|
||||
int entrySetIndex = CurrentEntryIndex / EntriesPerEntrySet;
|
||||
int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet;
|
||||
|
||||
// If we're starting a new entry set we need to add its start offset to the L1/L2 nodes
|
||||
if (indexInEntrySet == 0)
|
||||
{
|
||||
Span<long> l1Data = _l1Node.GetNode<long>().GetWritableArray();
|
||||
|
||||
if (CurrentL2OffsetIndex == 0)
|
||||
{
|
||||
// There are no L2 nodes. Write the entry set end offset directly to L1
|
||||
l1Data[entrySetIndex] = entryOffset;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (CurrentL2OffsetIndex < OffsetsPerNode)
|
||||
{
|
||||
// The current L2 offset is stored in the L1 node
|
||||
l1Data[CurrentL2OffsetIndex] = entryOffset;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Write the entry set offset to the current L2 node
|
||||
int l2NodeIndex = CurrentL2OffsetIndex / OffsetsPerNode;
|
||||
int indexInL2Node = CurrentL2OffsetIndex % OffsetsPerNode;
|
||||
|
||||
Span<long> l2Data = _l2Node.GetNode<long>().GetWritableArray();
|
||||
l2Data[indexInL2Node] = entryOffset;
|
||||
|
||||
// If we're starting a new L2 node we need to add its start offset to the L1 node
|
||||
if (indexInL2Node == 0)
|
||||
{
|
||||
l1Data[l2NodeIndex - 1] = entryOffset;
|
||||
}
|
||||
}
|
||||
|
||||
CurrentL2OffsetIndex++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Finalizes the bucket tree. Must be called after all entries are added.
|
||||
/// </summary>
|
||||
/// <param name="endOffset">The end offset of the bucket tree.</param>
|
||||
/// <returns>The <see cref="Result"/> of the operation.</returns>
|
||||
public Result Finalize(long endOffset)
|
||||
{
|
||||
// Finalize must only be called after all entries are added
|
||||
if (EntryCount != CurrentEntryIndex)
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
if (endOffset <= CurrentOffset)
|
||||
return ResultFs.InvalidOffset.Log();
|
||||
|
||||
if (CurrentOffset == -1)
|
||||
return Result.Success;
|
||||
|
||||
Result rc = FinalizePreviousEntrySet(endOffset);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
int entrySetIndex = CurrentEntryIndex / EntriesPerEntrySet;
|
||||
int indexInEntrySet = CurrentEntryIndex % EntriesPerEntrySet;
|
||||
|
||||
// Finalize the current entry set if needed
|
||||
if (indexInEntrySet != 0)
|
||||
{
|
||||
ref NodeHeader entrySetHeader = ref _entrySet.GetHeader();
|
||||
|
||||
entrySetHeader.Index = entrySetIndex;
|
||||
entrySetHeader.Count = indexInEntrySet;
|
||||
entrySetHeader.Offset = endOffset;
|
||||
|
||||
long entryStorageOffset = (long)NodeSize * entrySetIndex;
|
||||
rc = EntryStorage.Write(entryStorageOffset, _entrySet.GetBuffer());
|
||||
if (rc.IsFailure()) return rc;
|
||||
}
|
||||
|
||||
int l2NodeIndex = Util.DivideByRoundUp(CurrentL2OffsetIndex, OffsetsPerNode) - 2;
|
||||
int indexInL2Node = CurrentL2OffsetIndex % OffsetsPerNode;
|
||||
|
||||
// Finalize the current L2 node if needed
|
||||
if (CurrentL2OffsetIndex > OffsetsPerNode && (indexInEntrySet != 0 || indexInL2Node != 0))
|
||||
{
|
||||
ref NodeHeader l2NodeHeader = ref _l2Node.GetHeader();
|
||||
l2NodeHeader.Index = l2NodeIndex;
|
||||
l2NodeHeader.Count = indexInL2Node != 0 ? indexInL2Node : OffsetsPerNode;
|
||||
l2NodeHeader.Offset = endOffset;
|
||||
|
||||
long l2NodeStorageOffset = NodeSize * (l2NodeIndex + 1);
|
||||
rc = NodeStorage.Write(l2NodeStorageOffset, _l2Node.GetBuffer());
|
||||
if (rc.IsFailure()) return rc;
|
||||
}
|
||||
|
||||
// Finalize the L1 node
|
||||
ref NodeHeader l1NodeHeader = ref _l1Node.GetHeader();
|
||||
l1NodeHeader.Index = 0;
|
||||
l1NodeHeader.Offset = endOffset;
|
||||
|
||||
// L1 count depends on the existence or absence of L2 nodes
|
||||
if (CurrentL2OffsetIndex == 0)
|
||||
{
|
||||
l1NodeHeader.Count = Util.DivideByRoundUp(CurrentEntryIndex, EntriesPerEntrySet);
|
||||
}
|
||||
else
|
||||
{
|
||||
l1NodeHeader.Count = l2NodeIndex + 1;
|
||||
}
|
||||
|
||||
rc = NodeStorage.Write(0, _l1Node.GetBuffer());
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
CurrentOffset = long.MaxValue;
|
||||
return Result.Success;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,72 +1,188 @@
|
|||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
using LibHac.Common;
|
||||
using LibHac.Diag;
|
||||
using LibHac.Fs;
|
||||
|
||||
namespace LibHac.FsSystem
|
||||
{
|
||||
public class IndirectStorage : IStorage
|
||||
{
|
||||
private List<RelocationEntry> RelocationEntries { get; }
|
||||
private List<long> RelocationOffsets { get; }
|
||||
public static readonly int StorageCount = 2;
|
||||
public static readonly int NodeSize = 1024 * 16;
|
||||
|
||||
private List<IStorage> Sources { get; } = new List<IStorage>();
|
||||
private BucketTree<RelocationEntry> BucketTree { get; }
|
||||
private long Length { get; }
|
||||
private bool LeaveOpen { get; }
|
||||
private BucketTree Table { get; } = new BucketTree();
|
||||
private SubStorage2[] DataStorage { get; } = new SubStorage2[StorageCount];
|
||||
|
||||
public IndirectStorage(IStorage bucketTreeData, bool leaveOpen, params IStorage[] sources)
|
||||
[StructLayout(LayoutKind.Sequential, Size = 0x14, Pack = 4)]
|
||||
public struct Entry
|
||||
{
|
||||
Sources.AddRange(sources);
|
||||
private long VirtualOffset;
|
||||
private long PhysicalOffset;
|
||||
public int StorageIndex;
|
||||
|
||||
LeaveOpen = leaveOpen;
|
||||
public void SetVirtualOffset(long offset) => VirtualOffset = offset;
|
||||
public long GetVirtualOffset() => VirtualOffset;
|
||||
|
||||
BucketTree = new BucketTree<RelocationEntry>(bucketTreeData);
|
||||
|
||||
RelocationEntries = BucketTree.GetEntryList();
|
||||
RelocationOffsets = RelocationEntries.Select(x => x.Offset).ToList();
|
||||
|
||||
Length = BucketTree.BucketOffsets.OffsetEnd;
|
||||
public void SetPhysicalOffset(long offset) => PhysicalOffset = offset;
|
||||
public long GetPhysicalOffset() => PhysicalOffset;
|
||||
}
|
||||
|
||||
protected override Result DoRead(long offset, Span<byte> destination)
|
||||
public static long QueryHeaderStorageSize() => BucketTree.QueryHeaderStorageSize();
|
||||
|
||||
public static long QueryNodeStorageSize(int entryCount) =>
|
||||
BucketTree.QueryNodeStorageSize(NodeSize, Unsafe.SizeOf<Entry>(), entryCount);
|
||||
|
||||
public static long QueryEntryStorageSize(int entryCount) =>
|
||||
BucketTree.QueryEntryStorageSize(NodeSize, Unsafe.SizeOf<Entry>(), entryCount);
|
||||
|
||||
public bool IsInitialized() => Table.IsInitialized();
|
||||
|
||||
public Result Initialize(SubStorage2 tableStorage)
|
||||
{
|
||||
RelocationEntry entry = GetRelocationEntry(offset);
|
||||
// Read and verify the bucket tree header.
|
||||
// note: skip init
|
||||
var header = new BucketTree.Header();
|
||||
|
||||
if (entry.SourceIndex > Sources.Count)
|
||||
Result rc = tableStorage.Read(0, SpanHelpers.AsByteSpan(ref header));
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
rc = header.Verify();
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Determine extents.
|
||||
long nodeStorageSize = QueryNodeStorageSize(header.EntryCount);
|
||||
long entryStorageSize = QueryEntryStorageSize(header.EntryCount);
|
||||
long nodeStorageOffset = QueryHeaderStorageSize();
|
||||
long entryStorageOffset = nodeStorageOffset + nodeStorageSize;
|
||||
|
||||
// Initialize.
|
||||
var nodeStorage = new SubStorage2(tableStorage, nodeStorageOffset, nodeStorageSize);
|
||||
var entryStorage = new SubStorage2(tableStorage, entryStorageOffset, entryStorageSize);
|
||||
|
||||
return Initialize(nodeStorage, entryStorage, header.EntryCount);
|
||||
}
|
||||
|
||||
public Result Initialize(SubStorage2 nodeStorage, SubStorage2 entryStorage, int entryCount)
|
||||
{
|
||||
return Table.Initialize(nodeStorage, entryStorage, NodeSize, Unsafe.SizeOf<Entry>(), entryCount);
|
||||
}
|
||||
|
||||
public void SetStorage(int index, SubStorage2 storage)
|
||||
{
|
||||
Assert.InRange(index, 0, StorageCount);
|
||||
DataStorage[index] = storage;
|
||||
}
|
||||
|
||||
public void SetStorage(int index, IStorage storage, long offset, long size)
|
||||
{
|
||||
Assert.InRange(index, 0, StorageCount);
|
||||
DataStorage[index] = new SubStorage2(storage, offset, size);
|
||||
}
|
||||
|
||||
public Result GetEntryList(Span<Entry> entryBuffer, out int outputEntryCount, long offset, long size)
|
||||
{
|
||||
// Validate pre-conditions
|
||||
Assert.AssertTrue(offset >= 0);
|
||||
Assert.AssertTrue(size >= 0);
|
||||
Assert.AssertTrue(IsInitialized());
|
||||
|
||||
// Clear the out count
|
||||
outputEntryCount = 0;
|
||||
|
||||
// Succeed if there's no range
|
||||
if (size == 0)
|
||||
return Result.Success;
|
||||
|
||||
// Check that our range is valid
|
||||
if (!Table.Includes(offset, size))
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
// Find the offset in our tree
|
||||
var visitor = new BucketTree.Visitor();
|
||||
|
||||
try
|
||||
{
|
||||
return ResultFs.InvalidIndirectStorageSource.Log();
|
||||
}
|
||||
|
||||
long inPos = offset;
|
||||
int outPos = 0;
|
||||
int remaining = destination.Length;
|
||||
|
||||
while (remaining > 0)
|
||||
{
|
||||
long entryPos = inPos - entry.Offset;
|
||||
|
||||
int bytesToRead = (int)Math.Min(entry.OffsetEnd - inPos, remaining);
|
||||
|
||||
Result rc = Sources[entry.SourceIndex].Read(entry.SourceOffset + entryPos, destination.Slice(outPos, bytesToRead));
|
||||
Result rc = Table.Find(ref visitor, offset);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
outPos += bytesToRead;
|
||||
inPos += bytesToRead;
|
||||
remaining -= bytesToRead;
|
||||
long entryOffset = visitor.Get<Entry>().GetVirtualOffset();
|
||||
if (entryOffset > 0 || !Table.Includes(entryOffset))
|
||||
return ResultFs.InvalidIndirectEntryOffset.Log();
|
||||
|
||||
if (inPos >= entry.OffsetEnd)
|
||||
// Prepare to loop over entries
|
||||
long endOffset = offset + size;
|
||||
int count = 0;
|
||||
|
||||
ref Entry currentEntry = ref visitor.Get<Entry>();
|
||||
while (currentEntry.GetVirtualOffset() < endOffset)
|
||||
{
|
||||
entry = entry.Next;
|
||||
}
|
||||
}
|
||||
// Try to write the entry to the out list
|
||||
if (entryBuffer.Length != 0)
|
||||
{
|
||||
if (count >= entryBuffer.Length)
|
||||
break;
|
||||
|
||||
return Result.Success;
|
||||
entryBuffer[count] = currentEntry;
|
||||
}
|
||||
|
||||
count++;
|
||||
|
||||
// Advance
|
||||
if (visitor.CanMoveNext())
|
||||
{
|
||||
rc = visitor.MoveNext();
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
currentEntry = ref visitor.Get<Entry>();
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Write the entry count
|
||||
outputEntryCount = count;
|
||||
return Result.Success;
|
||||
}
|
||||
finally { visitor.Dispose(); }
|
||||
}
|
||||
|
||||
protected override unsafe Result DoRead(long offset, Span<byte> destination)
|
||||
{
|
||||
// Validate pre-conditions
|
||||
Assert.AssertTrue(offset >= 0);
|
||||
Assert.AssertTrue(IsInitialized());
|
||||
|
||||
// Succeed if there's nothing to read
|
||||
if (destination.Length == 0)
|
||||
return Result.Success;
|
||||
|
||||
// Pin and recreate the span because C# can't use byref-like types in a closure
|
||||
int bufferSize = destination.Length;
|
||||
fixed (byte* pBuffer = destination)
|
||||
{
|
||||
// Copy the pointer to workaround CS1764.
|
||||
// OperatePerEntry won't store the delegate anywhere, so it should be safe
|
||||
byte* pBuffer2 = pBuffer;
|
||||
|
||||
Result Operate(IStorage storage, long dataOffset, long currentOffset, long currentSize)
|
||||
{
|
||||
var buffer = new Span<byte>(pBuffer2, bufferSize);
|
||||
|
||||
return storage.Read(dataOffset,
|
||||
buffer.Slice((int)(currentOffset - offset), (int)currentSize));
|
||||
}
|
||||
|
||||
return OperatePerEntry(offset, destination.Length, Operate);
|
||||
}
|
||||
}
|
||||
|
||||
protected override Result DoWrite(long offset, ReadOnlySpan<byte> source)
|
||||
{
|
||||
return ResultFs.UnsupportedOperationInIndirectStorageSetSize.Log();
|
||||
return ResultFs.UnsupportedOperationInIndirectStorageWrite.Log();
|
||||
}
|
||||
|
||||
protected override Result DoFlush()
|
||||
|
@ -74,36 +190,121 @@ namespace LibHac.FsSystem
|
|||
return Result.Success;
|
||||
}
|
||||
|
||||
protected override Result DoGetSize(out long size)
|
||||
{
|
||||
size = Length;
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
protected override Result DoSetSize(long size)
|
||||
{
|
||||
return ResultFs.UnsupportedOperationInIndirectStorageSetSize.Log();
|
||||
}
|
||||
|
||||
protected override void Dispose(bool disposing)
|
||||
protected override Result DoGetSize(out long size)
|
||||
{
|
||||
if (disposing)
|
||||
{
|
||||
if (!LeaveOpen && Sources != null)
|
||||
{
|
||||
foreach (IStorage storage in Sources)
|
||||
{
|
||||
storage?.Dispose();
|
||||
}
|
||||
}
|
||||
}
|
||||
size = Table.GetEnd();
|
||||
return Result.Success;
|
||||
}
|
||||
|
||||
private RelocationEntry GetRelocationEntry(long offset)
|
||||
private delegate Result OperateFunc(IStorage storage, long dataOffset, long currentOffset, long currentSize);
|
||||
|
||||
private Result OperatePerEntry(long offset, long size, OperateFunc func)
|
||||
{
|
||||
int index = RelocationOffsets.BinarySearch(offset);
|
||||
if (index < 0) index = ~index - 1;
|
||||
return RelocationEntries[index];
|
||||
// Validate preconditions
|
||||
Assert.AssertTrue(offset >= 0);
|
||||
Assert.AssertTrue(size >= 0);
|
||||
Assert.AssertTrue(IsInitialized());
|
||||
|
||||
// Succeed if there's nothing to operate on
|
||||
if (size == 0)
|
||||
return Result.Success;
|
||||
|
||||
// Validate arguments
|
||||
if (!Table.Includes(offset, size))
|
||||
return ResultFs.OutOfRange.Log();
|
||||
|
||||
// Find the offset in our tree
|
||||
var visitor = new BucketTree.Visitor();
|
||||
|
||||
try
|
||||
{
|
||||
Result rc = Table.Find(ref visitor, offset);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
long entryOffset = visitor.Get<Entry>().GetVirtualOffset();
|
||||
if (entryOffset < 0 || !Table.Includes(entryOffset))
|
||||
return ResultFs.InvalidIndirectEntryStorageIndex.Log();
|
||||
|
||||
// Prepare to operate in chunks
|
||||
long currentOffset = offset;
|
||||
long endOffset = offset + size;
|
||||
|
||||
while (currentOffset < endOffset)
|
||||
{
|
||||
// Get the current entry
|
||||
var currentEntry = visitor.Get<Entry>();
|
||||
|
||||
// Get and validate the entry's offset
|
||||
long currentEntryOffset = currentEntry.GetVirtualOffset();
|
||||
if (currentEntryOffset > currentOffset)
|
||||
return ResultFs.InvalidIndirectEntryOffset.Log();
|
||||
|
||||
// Validate the storage index
|
||||
if (currentEntry.StorageIndex < 0 || currentEntry.StorageIndex >= StorageCount)
|
||||
return ResultFs.InvalidIndirectEntryStorageIndex.Log();
|
||||
|
||||
// todo: Implement continuous reading
|
||||
|
||||
// Get and validate the next entry offset
|
||||
long nextEntryOffset;
|
||||
if (visitor.CanMoveNext())
|
||||
{
|
||||
rc = visitor.MoveNext();
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
nextEntryOffset = visitor.Get<Entry>().GetVirtualOffset();
|
||||
if (!Table.Includes(nextEntryOffset))
|
||||
return ResultFs.InvalidIndirectEntryOffset.Log();
|
||||
}
|
||||
else
|
||||
{
|
||||
nextEntryOffset = Table.GetEnd();
|
||||
}
|
||||
|
||||
if (currentOffset >= nextEntryOffset)
|
||||
return ResultFs.InvalidIndirectEntryOffset.Log();
|
||||
|
||||
// Get the offset of the entry in the data we read
|
||||
long dataOffset = currentOffset - currentEntryOffset;
|
||||
long dataSize = nextEntryOffset - currentEntryOffset - dataOffset;
|
||||
Assert.AssertTrue(dataSize > 0);
|
||||
|
||||
// Determine how much is left
|
||||
long remainingSize = endOffset - currentOffset;
|
||||
long currentSize = Math.Min(remainingSize, dataSize);
|
||||
Assert.AssertTrue(currentSize <= size);
|
||||
|
||||
{
|
||||
SubStorage2 currentStorage = DataStorage[currentEntry.StorageIndex];
|
||||
|
||||
// Get the current data storage's size.
|
||||
rc = currentStorage.GetSize(out long currentDataStorageSize);
|
||||
if (rc.IsFailure()) return rc;
|
||||
|
||||
// Ensure that we remain within range.
|
||||
long currentEntryPhysicalOffset = currentEntry.GetPhysicalOffset();
|
||||
|
||||
if (currentEntryPhysicalOffset < 0 || currentEntryPhysicalOffset > currentDataStorageSize)
|
||||
return ResultFs.IndirectStorageCorrupted.Log();
|
||||
|
||||
if (currentDataStorageSize < currentEntryPhysicalOffset + dataOffset + currentSize)
|
||||
return ResultFs.IndirectStorageCorrupted.Log();
|
||||
|
||||
rc = func(currentStorage, currentEntryPhysicalOffset + dataOffset, currentOffset, currentSize);
|
||||
if (rc.IsFailure()) return rc;
|
||||
}
|
||||
|
||||
currentOffset += currentSize;
|
||||
}
|
||||
}
|
||||
finally { visitor.Dispose(); }
|
||||
|
||||
return Result.Success;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
using System.Collections.Generic;
|
||||
using System.Diagnostics;
|
||||
using System.IO;
|
||||
using LibHac.Common;
|
||||
using LibHac.Fs;
|
||||
using LibHac.Fs.Fsa;
|
||||
using LibHac.FsSystem.RomFs;
|
||||
|
@ -193,10 +194,21 @@ namespace LibHac.FsSystem.NcaUtils
|
|||
byte[] counterEx = Aes128CtrStorage.CreateCounter(fsHeader.Counter, sectionOffset);
|
||||
|
||||
IStorage bucketTreeData = new CachedStorage(new Aes128CtrStorage(baseStorage.Slice(bktrOffset, bktrSize), key, counter, true), 4, true);
|
||||
var encryptionBucketTreeData = new SubStorage2(bucketTreeData,
|
||||
info.EncryptionTreeOffset - bktrOffset, sectionSize - info.EncryptionTreeOffset);
|
||||
|
||||
IStorage encryptionBucketTreeData = bucketTreeData.Slice(info.EncryptionTreeOffset - bktrOffset);
|
||||
IStorage decStorage = new Aes128CtrExStorage(baseStorage.Slice(0, dataSize), encryptionBucketTreeData, key, counterEx, true);
|
||||
decStorage = new CachedStorage(decStorage, 0x4000, 4, true);
|
||||
var cachedBucketTreeData = new CachedStorage(encryptionBucketTreeData, IndirectStorage.NodeSize, 6, true);
|
||||
|
||||
var treeHeader = new BucketTree.Header();
|
||||
info.EncryptionTreeHeader.CopyTo(SpanHelpers.AsByteSpan(ref treeHeader));
|
||||
long nodeStorageSize = IndirectStorage.QueryNodeStorageSize(treeHeader.EntryCount);
|
||||
long entryStorageSize = IndirectStorage.QueryEntryStorageSize(treeHeader.EntryCount);
|
||||
|
||||
var tableNodeStorage = new SubStorage2(cachedBucketTreeData, 0, nodeStorageSize);
|
||||
var tableEntryStorage = new SubStorage2(cachedBucketTreeData, nodeStorageSize, entryStorageSize);
|
||||
|
||||
IStorage decStorage = new Aes128CtrExStorage(baseStorage.Slice(0, dataSize), tableNodeStorage,
|
||||
tableEntryStorage, treeHeader.EntryCount, key, counterEx, true);
|
||||
|
||||
return new ConcatenationStorage(new[] { decStorage, bucketTreeData }, true);
|
||||
}
|
||||
|
@ -214,6 +226,9 @@ namespace LibHac.FsSystem.NcaUtils
|
|||
IStorage patchStorage = patchNca.OpenRawStorage(index);
|
||||
IStorage baseStorage = SectionExists(index) ? OpenRawStorage(index) : new NullStorage();
|
||||
|
||||
patchStorage.GetSize(out long patchSize).ThrowIfFailure();
|
||||
baseStorage.GetSize(out long baseSize).ThrowIfFailure();
|
||||
|
||||
NcaFsHeader header = patchNca.Header.GetFsHeader(index);
|
||||
NcaFsPatchInfo patchInfo = header.GetPatchInfo();
|
||||
|
||||
|
@ -222,9 +237,24 @@ namespace LibHac.FsSystem.NcaUtils
|
|||
return patchStorage;
|
||||
}
|
||||
|
||||
IStorage relocationTableStorage = patchStorage.Slice(patchInfo.RelocationTreeOffset, patchInfo.RelocationTreeSize);
|
||||
var treeHeader = new BucketTree.Header();
|
||||
patchInfo.RelocationTreeHeader.CopyTo(SpanHelpers.AsByteSpan(ref treeHeader));
|
||||
long nodeStorageSize = IndirectStorage.QueryNodeStorageSize(treeHeader.EntryCount);
|
||||
long entryStorageSize = IndirectStorage.QueryEntryStorageSize(treeHeader.EntryCount);
|
||||
|
||||
return new IndirectStorage(relocationTableStorage, true, baseStorage, patchStorage);
|
||||
var relocationTableStorage = new SubStorage2(patchStorage, patchInfo.RelocationTreeOffset, patchInfo.RelocationTreeSize);
|
||||
var cachedTableStorage = new CachedStorage(relocationTableStorage, IndirectStorage.NodeSize, 4, true);
|
||||
|
||||
var tableNodeStorage = new SubStorage2(cachedTableStorage, 0, nodeStorageSize);
|
||||
var tableEntryStorage = new SubStorage2(cachedTableStorage, nodeStorageSize, entryStorageSize);
|
||||
|
||||
var storage = new IndirectStorage();
|
||||
storage.Initialize(tableNodeStorage, tableEntryStorage, treeHeader.EntryCount).ThrowIfFailure();
|
||||
|
||||
storage.SetStorage(0, baseStorage, 0, baseSize);
|
||||
storage.SetStorage(1, patchStorage, 0, patchSize);
|
||||
|
||||
return storage;
|
||||
}
|
||||
|
||||
public IStorage OpenStorage(int index, IntegrityCheckLevel integrityCheckLevel)
|
||||
|
|
|
@ -48,20 +48,9 @@ namespace LibHac
|
|||
return true;
|
||||
}
|
||||
|
||||
public static bool SpansEqual<T>(Span<T> a1, Span<T> a2)
|
||||
public static bool SpansEqual<T>(Span<T> a1, Span<T> a2) where T : IEquatable<T>
|
||||
{
|
||||
if (a1 == a2) return true;
|
||||
if (a1.Length != a2.Length) return false;
|
||||
|
||||
for (int i = 0; i < a1.Length; i++)
|
||||
{
|
||||
if (!a1[i].Equals(a2[i]))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return a1.SequenceEqual(a2);
|
||||
}
|
||||
|
||||
public static ReadOnlySpan<byte> GetUtf8Bytes(string value)
|
||||
|
@ -491,5 +480,27 @@ namespace LibHac
|
|||
|
||||
return keyGeneration - 1;
|
||||
}
|
||||
|
||||
public static bool IsPowerOfTwo(int value)
|
||||
{
|
||||
return value > 0 && ResetLeastSignificantOneBit(value) == 0;
|
||||
}
|
||||
|
||||
public static bool IsPowerOfTwo(long value)
|
||||
{
|
||||
return value > 0 && ResetLeastSignificantOneBit(value) == 0;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static int ResetLeastSignificantOneBit(int value)
|
||||
{
|
||||
return value & (value - 1);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static long ResetLeastSignificantOneBit(long value)
|
||||
{
|
||||
return value & (value - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue