mirror of
https://github.com/Thealexbarney/LibHac.git
synced 2024-11-14 10:49:41 +01:00
Support reading compressed NCAs
This commit is contained in:
parent
1597f05b27
commit
b27bc7e665
8 changed files with 310 additions and 19 deletions
|
@ -78,6 +78,16 @@ public struct ValueSubStorage : IDisposable
|
||||||
_sharedBaseStorage.Destroy();
|
_sharedBaseStorage.Destroy();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public readonly SubStorage GetSubStorage()
|
||||||
|
{
|
||||||
|
if (_sharedBaseStorage.HasValue)
|
||||||
|
{
|
||||||
|
return new SubStorage(in _sharedBaseStorage, _offset, _size);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new SubStorage(_baseStorage, _offset, _size);
|
||||||
|
}
|
||||||
|
|
||||||
public void Set(in ValueSubStorage other)
|
public void Set(in ValueSubStorage other)
|
||||||
{
|
{
|
||||||
if (!Unsafe.AreSame(ref Unsafe.AsRef(in this), ref Unsafe.AsRef(in other)))
|
if (!Unsafe.AreSame(ref Unsafe.AsRef(in this), ref Unsafe.AsRef(in other)))
|
||||||
|
@ -176,4 +186,4 @@ public struct ValueSubStorage : IDisposable
|
||||||
|
|
||||||
return _baseStorage.OperateRange(outBuffer, operationId, _offset + offset, size, inBuffer);
|
return _baseStorage.OperateRange(outBuffer, operationId, _offset + offset, size, inBuffer);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -262,6 +262,7 @@ public class CompressedStorage : IStorage, IAsynchronousAccessSplitter
|
||||||
public long VirtualOffset;
|
public long VirtualOffset;
|
||||||
public long PhysicalOffset;
|
public long PhysicalOffset;
|
||||||
public CompressionType CompressionType;
|
public CompressionType CompressionType;
|
||||||
|
public sbyte CompressionLevel;
|
||||||
public uint PhysicalSize;
|
public uint PhysicalSize;
|
||||||
|
|
||||||
public readonly long GetPhysicalSize() => PhysicalSize;
|
public readonly long GetPhysicalSize() => PhysicalSize;
|
||||||
|
@ -360,18 +361,18 @@ public class CompressedStorage : IStorage, IAsynchronousAccessSplitter
|
||||||
switch (operationId)
|
switch (operationId)
|
||||||
{
|
{
|
||||||
case OperationId.InvalidateCache:
|
case OperationId.InvalidateCache:
|
||||||
{
|
{
|
||||||
_cacheManager.Invalidate();
|
_cacheManager.Invalidate();
|
||||||
Result rc = _core.Invalidate();
|
Result rc = _core.Invalidate();
|
||||||
if (rc.IsFailure()) return rc.Miss();
|
if (rc.IsFailure()) return rc.Miss();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case OperationId.QueryRange:
|
case OperationId.QueryRange:
|
||||||
{
|
{
|
||||||
Result rc = _core.QueryRange(outBuffer, offset, size);
|
Result rc = _core.QueryRange(outBuffer, offset, size);
|
||||||
if (rc.IsFailure()) return rc.Miss();
|
if (rc.IsFailure()) return rc.Miss();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return ResultFs.UnsupportedOperateRangeForCompressedStorage.Log();
|
return ResultFs.UnsupportedOperateRangeForCompressedStorage.Log();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
namespace LibHac.FsSystem;
|
namespace LibHac.FsSystem;
|
||||||
|
|
||||||
public enum CompressionType
|
public enum CompressionType : byte
|
||||||
{
|
{
|
||||||
None = 0,
|
None = 0,
|
||||||
Zeroed = 1,
|
Zeroed = 1,
|
||||||
|
|
|
@ -24,6 +24,13 @@ public struct NcaSparseInfo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public struct NcaCompressionInfo
|
||||||
|
{
|
||||||
|
public long MetaOffset;
|
||||||
|
public long MetaSize;
|
||||||
|
public Array16<byte> MetaHeader;
|
||||||
|
}
|
||||||
|
|
||||||
[StructLayout(LayoutKind.Explicit)]
|
[StructLayout(LayoutKind.Explicit)]
|
||||||
public struct NcaAesCtrUpperIv
|
public struct NcaAesCtrUpperIv
|
||||||
{
|
{
|
||||||
|
|
210
src/LibHac/Tools/FsSystem/CompressedStorage.cs
Normal file
210
src/LibHac/Tools/FsSystem/CompressedStorage.cs
Normal file
|
@ -0,0 +1,210 @@
|
||||||
|
using System;
|
||||||
|
using System.Buffers;
|
||||||
|
using System.Runtime.CompilerServices;
|
||||||
|
using System.Runtime.InteropServices;
|
||||||
|
using LibHac.Common;
|
||||||
|
using LibHac.Diag;
|
||||||
|
using LibHac.Fs;
|
||||||
|
using LibHac.FsSystem;
|
||||||
|
using LibHac.Util;
|
||||||
|
|
||||||
|
namespace LibHac.Tools.FsSystem;
|
||||||
|
|
||||||
|
internal class CompressedStorage : IStorage
|
||||||
|
{
|
||||||
|
[StructLayout(LayoutKind.Sequential)]
|
||||||
|
public struct Entry
|
||||||
|
{
|
||||||
|
public long VirtualOffset;
|
||||||
|
public long PhysicalOffset;
|
||||||
|
public CompressionType CompressionType;
|
||||||
|
public sbyte CompressionLevel;
|
||||||
|
public uint PhysicalSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static readonly int NodeSize = 0x4000;
|
||||||
|
|
||||||
|
public static long QueryEntryStorageSize(int entryCount)
|
||||||
|
{
|
||||||
|
return BucketTree.QueryEntryStorageSize(NodeSize, Unsafe.SizeOf<Entry>(), entryCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static long QueryNodeStorageSize(int entryCount)
|
||||||
|
{
|
||||||
|
return BucketTree.QueryNodeStorageSize(NodeSize, Unsafe.SizeOf<Entry>(), entryCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
private readonly BucketTree _bucketTree;
|
||||||
|
private ValueSubStorage _dataStorage;
|
||||||
|
|
||||||
|
public CompressedStorage()
|
||||||
|
{
|
||||||
|
_bucketTree = new BucketTree();
|
||||||
|
_dataStorage = new ValueSubStorage();
|
||||||
|
}
|
||||||
|
|
||||||
|
public Result Initialize(MemoryResource allocatorForBucketTree, in ValueSubStorage dataStorage,
|
||||||
|
in ValueSubStorage nodeStorage, in ValueSubStorage entryStorage, int bucketTreeEntryCount)
|
||||||
|
{
|
||||||
|
nodeStorage.GetSubStorage().WriteAllBytes("nodeStorage");
|
||||||
|
entryStorage.GetSubStorage().WriteAllBytes("entryStorage");
|
||||||
|
|
||||||
|
Result rc = _bucketTree.Initialize(allocatorForBucketTree, in nodeStorage, in entryStorage, NodeSize,
|
||||||
|
Unsafe.SizeOf<Entry>(), bucketTreeEntryCount);
|
||||||
|
if (rc.IsFailure()) return rc.Miss();
|
||||||
|
|
||||||
|
_dataStorage.Set(in dataStorage);
|
||||||
|
|
||||||
|
return Result.Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override Result Read(long offset, Span<byte> destination)
|
||||||
|
{
|
||||||
|
// Validate arguments
|
||||||
|
Result rc = _bucketTree.GetOffsets(out BucketTree.Offsets offsets);
|
||||||
|
if (rc.IsFailure()) return rc.Miss();
|
||||||
|
|
||||||
|
if (!offsets.IsInclude(offset, destination.Length))
|
||||||
|
return ResultFs.OutOfRange.Log();
|
||||||
|
|
||||||
|
// Find the offset in our tree
|
||||||
|
using var visitor = new BucketTree.Visitor();
|
||||||
|
|
||||||
|
rc = _bucketTree.Find(ref visitor.Ref, offset);
|
||||||
|
if (rc.IsFailure()) return rc;
|
||||||
|
|
||||||
|
long entryOffset = visitor.Get<Entry>().VirtualOffset;
|
||||||
|
if (entryOffset < 0 || !offsets.IsInclude(entryOffset))
|
||||||
|
return ResultFs.UnexpectedInCompressedStorageA.Log();
|
||||||
|
|
||||||
|
// Prepare to operate in chunks
|
||||||
|
long currentOffset = offset;
|
||||||
|
long endOffset = offset + destination.Length;
|
||||||
|
|
||||||
|
byte[] workBufferEnc = null;
|
||||||
|
byte[] workBufferDec = null;
|
||||||
|
|
||||||
|
while (currentOffset < endOffset)
|
||||||
|
{
|
||||||
|
// Get the current entry
|
||||||
|
var currentEntry = visitor.Get<Entry>();
|
||||||
|
|
||||||
|
// Get and validate the entry's offset
|
||||||
|
long currentEntryOffset = currentEntry.VirtualOffset;
|
||||||
|
if (currentEntryOffset > currentOffset)
|
||||||
|
return ResultFs.UnexpectedInCompressedStorageA.Log();
|
||||||
|
|
||||||
|
// Get and validate the next entry offset
|
||||||
|
long nextEntryOffset;
|
||||||
|
if (visitor.CanMoveNext())
|
||||||
|
{
|
||||||
|
rc = visitor.MoveNext();
|
||||||
|
if (rc.IsFailure()) return rc;
|
||||||
|
|
||||||
|
nextEntryOffset = visitor.Get<Entry>().VirtualOffset;
|
||||||
|
if (!offsets.IsInclude(nextEntryOffset))
|
||||||
|
return ResultFs.UnexpectedInCompressedStorageA.Log();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
nextEntryOffset = offsets.EndOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (currentOffset >= nextEntryOffset)
|
||||||
|
return ResultFs.UnexpectedInCompressedStorageA.Log();
|
||||||
|
|
||||||
|
// Get the offset of the data we need in the entry
|
||||||
|
long dataOffsetInEntry = currentOffset - currentEntryOffset;
|
||||||
|
long currentEntrySize = nextEntryOffset - currentEntryOffset;
|
||||||
|
|
||||||
|
// Determine how much is left
|
||||||
|
long remainingSize = endOffset - currentOffset;
|
||||||
|
long toWriteSize = Math.Min(remainingSize, currentEntrySize - dataOffsetInEntry);
|
||||||
|
Assert.SdkLessEqual(toWriteSize, destination.Length);
|
||||||
|
|
||||||
|
Span<byte> entryDestination = destination.Slice((int)(currentOffset - offset), (int)toWriteSize);
|
||||||
|
|
||||||
|
if (currentEntry.CompressionType == CompressionType.Lz4)
|
||||||
|
{
|
||||||
|
EnsureBufferSize(ref workBufferEnc, (int)currentEntry.PhysicalSize);
|
||||||
|
EnsureBufferSize(ref workBufferDec, (int)currentEntrySize);
|
||||||
|
|
||||||
|
Span<byte> encBuffer = workBufferEnc.AsSpan(0, (int)currentEntry.PhysicalSize);
|
||||||
|
Span<byte> decBuffer = workBufferDec.AsSpan(0, (int)currentEntrySize);
|
||||||
|
|
||||||
|
rc = _dataStorage.Read(currentEntry.PhysicalOffset, encBuffer);
|
||||||
|
if (rc.IsFailure()) return rc.Miss();
|
||||||
|
|
||||||
|
Lz4.Decompress(encBuffer, decBuffer);
|
||||||
|
|
||||||
|
decBuffer.Slice((int)dataOffsetInEntry, (int)toWriteSize).CopyTo(entryDestination);
|
||||||
|
}
|
||||||
|
else if (currentEntry.CompressionType == CompressionType.None)
|
||||||
|
{
|
||||||
|
rc = _dataStorage.Read(currentEntry.PhysicalOffset + dataOffsetInEntry, entryDestination);
|
||||||
|
if (rc.IsFailure()) return rc.Miss();
|
||||||
|
}
|
||||||
|
else if (currentEntry.CompressionType == CompressionType.Zeroed)
|
||||||
|
{
|
||||||
|
entryDestination.Clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
currentOffset += toWriteSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (workBufferDec is not null)
|
||||||
|
ArrayPool<byte>.Shared.Return(workBufferDec);
|
||||||
|
|
||||||
|
if (workBufferEnc is not null)
|
||||||
|
ArrayPool<byte>.Shared.Return(workBufferEnc);
|
||||||
|
|
||||||
|
return Result.Success;
|
||||||
|
|
||||||
|
static void EnsureBufferSize(ref byte[] buffer, int requiredSize)
|
||||||
|
{
|
||||||
|
if (buffer is null || buffer.Length < requiredSize)
|
||||||
|
{
|
||||||
|
if (buffer is not null)
|
||||||
|
{
|
||||||
|
ArrayPool<byte>.Shared.Return(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer = ArrayPool<byte>.Shared.Rent(requiredSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.SdkGreaterEqual(buffer.Length, requiredSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public override Result Write(long offset, ReadOnlySpan<byte> source)
|
||||||
|
{
|
||||||
|
return ResultFs.UnsupportedWriteForCompressedStorage.Log();
|
||||||
|
}
|
||||||
|
|
||||||
|
public override Result Flush()
|
||||||
|
{
|
||||||
|
return Result.Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override Result SetSize(long size)
|
||||||
|
{
|
||||||
|
return ResultFs.UnsupportedSetSizeForIndirectStorage.Log();
|
||||||
|
}
|
||||||
|
|
||||||
|
public override Result GetSize(out long size)
|
||||||
|
{
|
||||||
|
UnsafeHelpers.SkipParamInit(out size);
|
||||||
|
|
||||||
|
Result rc = _bucketTree.GetOffsets(out BucketTree.Offsets offsets);
|
||||||
|
if (rc.IsFailure()) return rc.Miss();
|
||||||
|
|
||||||
|
size = offsets.EndOffset;
|
||||||
|
return Result.Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override Result OperateRange(Span<byte> outBuffer, OperationId operationId, long offset, long size,
|
||||||
|
ReadOnlySpan<byte> inBuffer)
|
||||||
|
{
|
||||||
|
throw new NotImplementedException();
|
||||||
|
}
|
||||||
|
}
|
|
@ -202,8 +202,6 @@ public class Nca
|
||||||
using var nodeStorage = new ValueSubStorage(metaStorage, nodeOffset, nodeSize);
|
using var nodeStorage = new ValueSubStorage(metaStorage, nodeOffset, nodeSize);
|
||||||
using var entryStorage = new ValueSubStorage(metaStorage, entryOffset, entrySize);
|
using var entryStorage = new ValueSubStorage(metaStorage, entryOffset, entrySize);
|
||||||
|
|
||||||
new SubStorage(metaStorage, nodeOffset, nodeSize).WriteAllBytes("nodeStorage");
|
|
||||||
|
|
||||||
sparseStorage.Initialize(new ArrayPoolMemoryResource(), in nodeStorage, in entryStorage, header.EntryCount).ThrowIfFailure();
|
sparseStorage.Initialize(new ArrayPoolMemoryResource(), in nodeStorage, in entryStorage, header.EntryCount).ThrowIfFailure();
|
||||||
|
|
||||||
using var dataStorage = new ValueSubStorage(baseStorage, 0, sparseInfo.GetPhysicalSize());
|
using var dataStorage = new ValueSubStorage(baseStorage, 0, sparseInfo.GetPhysicalSize());
|
||||||
|
@ -368,6 +366,11 @@ public class Nca
|
||||||
}
|
}
|
||||||
|
|
||||||
public IStorage OpenStorage(int index, IntegrityCheckLevel integrityCheckLevel)
|
public IStorage OpenStorage(int index, IntegrityCheckLevel integrityCheckLevel)
|
||||||
|
{
|
||||||
|
return OpenStorage(index, integrityCheckLevel, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
public IStorage OpenStorage(int index, IntegrityCheckLevel integrityCheckLevel, bool leaveCompressed)
|
||||||
{
|
{
|
||||||
IStorage rawStorage = OpenRawStorage(index);
|
IStorage rawStorage = OpenRawStorage(index);
|
||||||
NcaFsHeader header = GetFsHeader(index);
|
NcaFsHeader header = GetFsHeader(index);
|
||||||
|
@ -377,15 +380,62 @@ public class Nca
|
||||||
return rawStorage.Slice(0, header.GetPatchInfo().RelocationTreeOffset);
|
return rawStorage.Slice(0, header.GetPatchInfo().RelocationTreeOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
return CreateVerificationStorage(integrityCheckLevel, header, rawStorage);
|
IStorage returnStorage = CreateVerificationStorage(integrityCheckLevel, header, rawStorage);
|
||||||
|
|
||||||
|
if (!leaveCompressed && header.ExistsCompressionLayer())
|
||||||
|
{
|
||||||
|
returnStorage = OpenCompressedStorage(header, returnStorage);
|
||||||
|
}
|
||||||
|
|
||||||
|
return returnStorage;
|
||||||
}
|
}
|
||||||
|
|
||||||
public IStorage OpenStorageWithPatch(Nca patchNca, int index, IntegrityCheckLevel integrityCheckLevel)
|
public IStorage OpenStorageWithPatch(Nca patchNca, int index, IntegrityCheckLevel integrityCheckLevel)
|
||||||
|
{
|
||||||
|
return OpenStorageWithPatch(patchNca, index, integrityCheckLevel, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
public IStorage OpenStorageWithPatch(Nca patchNca, int index, IntegrityCheckLevel integrityCheckLevel,
|
||||||
|
bool leaveCompressed)
|
||||||
{
|
{
|
||||||
IStorage rawStorage = OpenRawStorageWithPatch(patchNca, index);
|
IStorage rawStorage = OpenRawStorageWithPatch(patchNca, index);
|
||||||
NcaFsHeader header = patchNca.GetFsHeader(index);
|
NcaFsHeader header = patchNca.GetFsHeader(index);
|
||||||
|
|
||||||
return CreateVerificationStorage(integrityCheckLevel, header, rawStorage);
|
IStorage returnStorage = CreateVerificationStorage(integrityCheckLevel, header, rawStorage);
|
||||||
|
|
||||||
|
if (!leaveCompressed && header.ExistsCompressionLayer())
|
||||||
|
{
|
||||||
|
returnStorage = OpenCompressedStorage(header, returnStorage);
|
||||||
|
}
|
||||||
|
|
||||||
|
return returnStorage;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static IStorage OpenCompressedStorage(NcaFsHeader header, IStorage baseStorage)
|
||||||
|
{
|
||||||
|
ref NcaCompressionInfo compressionInfo = ref header.GetCompressionInfo();
|
||||||
|
|
||||||
|
Unsafe.SkipInit(out BucketTree.Header bucketTreeHeader);
|
||||||
|
compressionInfo.MetaHeader.ItemsRo.CopyTo(SpanHelpers.AsByteSpan(ref bucketTreeHeader));
|
||||||
|
bucketTreeHeader.Verify().ThrowIfFailure();
|
||||||
|
|
||||||
|
long nodeStorageSize = CompressedStorage.QueryNodeStorageSize(bucketTreeHeader.EntryCount);
|
||||||
|
long entryStorageSize = CompressedStorage.QueryEntryStorageSize(bucketTreeHeader.EntryCount);
|
||||||
|
long tableOffset = compressionInfo.MetaOffset;
|
||||||
|
long tableSize = compressionInfo.MetaSize;
|
||||||
|
|
||||||
|
if (entryStorageSize + nodeStorageSize > tableSize)
|
||||||
|
throw new HorizonResultException(ResultFs.NcaInvalidCompressionInfo.Value);
|
||||||
|
|
||||||
|
using var dataStorage = new ValueSubStorage(baseStorage, 0, tableOffset);
|
||||||
|
using var nodeStorage = new ValueSubStorage(baseStorage, tableOffset, nodeStorageSize);
|
||||||
|
using var entryStorage = new ValueSubStorage(baseStorage, tableOffset + nodeStorageSize, entryStorageSize);
|
||||||
|
|
||||||
|
var compressedStorage = new CompressedStorage();
|
||||||
|
compressedStorage.Initialize(new ArrayPoolMemoryResource(), in dataStorage, in nodeStorage, in entryStorage,
|
||||||
|
bucketTreeHeader.EntryCount).ThrowIfFailure();
|
||||||
|
|
||||||
|
return new CachedStorage(compressedStorage, 0x4000, 32, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
private IStorage CreateVerificationStorage(IntegrityCheckLevel integrityCheckLevel, NcaFsHeader header,
|
private IStorage CreateVerificationStorage(IntegrityCheckLevel integrityCheckLevel, NcaFsHeader header,
|
||||||
|
|
|
@ -157,7 +157,7 @@ public static class NcaExtensions
|
||||||
NcaHashType hashType = sect.HashType;
|
NcaHashType hashType = sect.HashType;
|
||||||
if (hashType != NcaHashType.Sha256 && hashType != NcaHashType.Ivfc) return Validity.Unchecked;
|
if (hashType != NcaHashType.Sha256 && hashType != NcaHashType.Ivfc) return Validity.Unchecked;
|
||||||
|
|
||||||
var stream = nca.OpenStorage(index, IntegrityCheckLevel.IgnoreOnInvalid)
|
var stream = nca.OpenStorage(index, IntegrityCheckLevel.IgnoreOnInvalid, true)
|
||||||
as HierarchicalIntegrityVerificationStorage;
|
as HierarchicalIntegrityVerificationStorage;
|
||||||
if (stream == null) return Validity.Unchecked;
|
if (stream == null) return Validity.Unchecked;
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ public static class NcaExtensions
|
||||||
NcaHashType hashType = sect.HashType;
|
NcaHashType hashType = sect.HashType;
|
||||||
if (hashType != NcaHashType.Sha256 && hashType != NcaHashType.Ivfc) return Validity.Unchecked;
|
if (hashType != NcaHashType.Sha256 && hashType != NcaHashType.Ivfc) return Validity.Unchecked;
|
||||||
|
|
||||||
var stream = nca.OpenStorageWithPatch(patchNca, index, IntegrityCheckLevel.IgnoreOnInvalid)
|
var stream = nca.OpenStorageWithPatch(patchNca, index, IntegrityCheckLevel.IgnoreOnInvalid, true)
|
||||||
as HierarchicalIntegrityVerificationStorage;
|
as HierarchicalIntegrityVerificationStorage;
|
||||||
if (stream == null) return Validity.Unchecked;
|
if (stream == null) return Validity.Unchecked;
|
||||||
|
|
||||||
|
|
|
@ -73,6 +73,17 @@ public struct NcaFsHeader
|
||||||
return GetSparseInfo().Generation != 0;
|
return GetSparseInfo().Generation != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ref NcaCompressionInfo GetCompressionInfo()
|
||||||
|
{
|
||||||
|
return ref MemoryMarshal.Cast<byte, NcaCompressionInfo>(_header.Span.Slice(FsHeaderStruct.CompressionInfoOffset,
|
||||||
|
FsHeaderStruct.CompressionInfoSize))[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
public bool ExistsCompressionLayer()
|
||||||
|
{
|
||||||
|
return GetCompressionInfo().MetaOffset != 0 && GetCompressionInfo().MetaSize != 0;
|
||||||
|
}
|
||||||
|
|
||||||
public ulong Counter
|
public ulong Counter
|
||||||
{
|
{
|
||||||
get => Header.UpperCounter;
|
get => Header.UpperCounter;
|
||||||
|
@ -100,6 +111,8 @@ public struct NcaFsHeader
|
||||||
public const int PatchInfoSize = 0x40;
|
public const int PatchInfoSize = 0x40;
|
||||||
public const int SparseInfoOffset = 0x148;
|
public const int SparseInfoOffset = 0x148;
|
||||||
public const int SparseInfoSize = 0x30;
|
public const int SparseInfoSize = 0x30;
|
||||||
|
public const int CompressionInfoOffset = 0x178;
|
||||||
|
public const int CompressionInfoSize = 0x20;
|
||||||
|
|
||||||
[FieldOffset(0)] public short Version;
|
[FieldOffset(0)] public short Version;
|
||||||
[FieldOffset(2)] public byte FormatType;
|
[FieldOffset(2)] public byte FormatType;
|
||||||
|
|
Loading…
Reference in a new issue