mirror of
https://github.com/jakcron/nstool.git
synced 2024-12-22 02:35:28 +00:00
commit
54bef2fdc4
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -4,6 +4,7 @@ data/*
|
|||
*.a
|
||||
*.so.*
|
||||
.DS_Store
|
||||
.vscode/*
|
||||
|
||||
## Ignore Visual Studio temporary files, build results, and
|
||||
## files generated by popular Visual Studio add-ons.
|
||||
|
|
6
.gitmodules
vendored
6
.gitmodules
vendored
|
@ -1,9 +1,6 @@
|
|||
[submodule "deps/liblz4"]
|
||||
path = deps/liblz4
|
||||
url = https://github.com/jakcron/liblz4.git
|
||||
[submodule "deps/libpolarssl"]
|
||||
path = deps/libpolarssl
|
||||
url = https://github.com/jakcron/libpolarssl.git
|
||||
[submodule "deps/libfnd"]
|
||||
path = deps/libfnd
|
||||
url = https://github.com/jakcron/libfnd.git
|
||||
|
@ -19,3 +16,6 @@
|
|||
[submodule "deps/libnintendo-hac-hb"]
|
||||
path = deps/libnintendo-hac-hb
|
||||
url = https://github.com/jakcron/libnintendo-hac-hb.git
|
||||
[submodule "deps/libmbedtls"]
|
||||
path = deps/libmbedtls
|
||||
url = https://github.com/jakcron/libmbedtls
|
||||
|
|
|
@ -4,8 +4,8 @@ General purpose reading/extraction tool for Nintendo Switch file formats.
|
|||
## Supported File Formats
|
||||
* Meta (.npdm)
|
||||
* PartitionFS (and HashedPartitionFS) (includes raw .nsp)
|
||||
* RomFS
|
||||
* GameCard Image (.xci)
|
||||
* RomFS (and CompressedRomFS)
|
||||
* NX GameCard Image (.xci)
|
||||
* Nintendo Content Archive (.nca)
|
||||
* Content Metadata (.cnmt)
|
||||
* Nintendo Software Object (.nso)
|
||||
|
|
|
@ -36,7 +36,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libnintendo-pki", "..\..\de
|
|||
{4E578016-34BA-4A1E-B8EC-37A48780B6CA} = {4E578016-34BA-4A1E-B8EC-37A48780B6CA}
|
||||
EndProjectSection
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libpolarssl", "..\..\deps\libpolarssl\build\visualstudio\libpolarssl\libpolarssl.vcxproj", "{7A7C66F3-2B5B-4E23-85D8-2A74FEDAD92C}"
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libmbedtls", "..\..\deps\libmbedtls\build\visualstudio\libmbedtls\libmbedtls.vcxproj", "{7A7C66F3-2B5B-4E23-85D8-2A74FEDAD92C}"
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
|
|
|
@ -137,13 +137,14 @@
|
|||
<ProjectReference Include="..\..\..\deps\libnintendo-pki\build\visualstudio\libnintendo-pki\libnintendo-pki.vcxproj">
|
||||
<Project>{0bef63a0-2801-4563-ab65-1e2fd881c3af}</Project>
|
||||
</ProjectReference>
|
||||
<ProjectReference Include="..\..\..\deps\libpolarssl\build\visualstudio\libpolarssl\libpolarssl.vcxproj">
|
||||
<ProjectReference Include="..\..\..\deps\libmbedtls\build\visualstudio\libmbedtls\libmbedtls.vcxproj">
|
||||
<Project>{7a7c66f3-2b5b-4e23-85d8-2a74fedad92c}</Project>
|
||||
</ProjectReference>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="..\..\..\src\AssetProcess.cpp" />
|
||||
<ClCompile Include="..\..\..\src\CnmtProcess.cpp" />
|
||||
<ClCompile Include="..\..\..\src\CompressedArchiveIFile.cpp" />
|
||||
<ClCompile Include="..\..\..\src\ElfSymbolParser.cpp" />
|
||||
<ClCompile Include="..\..\..\src\EsTikProcess.cpp" />
|
||||
<ClCompile Include="..\..\..\src\GameCardProcess.cpp" />
|
||||
|
@ -168,6 +169,7 @@
|
|||
<ClInclude Include="..\..\..\src\AssetProcess.h" />
|
||||
<ClInclude Include="..\..\..\src\CnmtProcess.h" />
|
||||
<ClInclude Include="..\..\..\src\common.h" />
|
||||
<ClInclude Include="..\..\..\src\CompressedArchiveIFile.h" />
|
||||
<ClInclude Include="..\..\..\src\ElfSymbolParser.h" />
|
||||
<ClInclude Include="..\..\..\src\EsTikProcess.h" />
|
||||
<ClInclude Include="..\..\..\src\GameCardProcess.h" />
|
||||
|
|
|
@ -21,6 +21,9 @@
|
|||
<ClCompile Include="..\..\..\src\CnmtProcess.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\src\CompressedArchiveIFile.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\src\ElfSymbolParser.cpp">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
@ -89,6 +92,9 @@
|
|||
<ClInclude Include="..\..\..\src\common.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\..\src\CompressedArchiveIFile.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\..\src\ElfSymbolParser.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
|
|
2
deps/libfnd
vendored
2
deps/libfnd
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 234c81d864e53cd208bb93bce69a1f5ff7e44161
|
||||
Subproject commit 19c1683060a8b39b737da8505b5e23660ed86282
|
1
deps/libmbedtls
vendored
Submodule
1
deps/libmbedtls
vendored
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit bc43e5e079529455749d81d1a3b77a9574d5ab01
|
2
deps/libnintendo-es
vendored
2
deps/libnintendo-es
vendored
|
@ -1 +1 @@
|
|||
Subproject commit ba3036e08dc0d58662c3fc38a3dbee46371e399a
|
||||
Subproject commit 9e3f1ea763be033f60b3b2db0b2d6e2aac462a37
|
2
deps/libnintendo-hac
vendored
2
deps/libnintendo-hac
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 9372cb34d8ffdf4c84cc7a25ec9e675fdaff32c6
|
||||
Subproject commit 89c258322f1fd48dbda9c0498be52399a3c04a8c
|
2
deps/libnintendo-hac-hb
vendored
2
deps/libnintendo-hac-hb
vendored
|
@ -1 +1 @@
|
|||
Subproject commit e7f773bf883c6f5db700df32a286aa110849d863
|
||||
Subproject commit 95fb4d7762eb5b395fa5023fd3a9b6f34151505a
|
2
deps/libnintendo-pki
vendored
2
deps/libnintendo-pki
vendored
|
@ -1 +1 @@
|
|||
Subproject commit ca228576350a7ea3922ddc87ca43d68376d16fb2
|
||||
Subproject commit 5097871222f6e2cd07b7b8e8b58551e913eb1c15
|
1
deps/libpolarssl
vendored
1
deps/libpolarssl
vendored
|
@ -1 +0,0 @@
|
|||
Subproject commit 7cf5ebe575b5330b946160bad05c6be917340ae9
|
2
makefile
2
makefile
|
@ -31,7 +31,7 @@ PROJECT_SONAME = $(PROJECT_NAME).so.$(PROJECT_SO_VER_MAJOR)
|
|||
PROJECT_SO_FILENAME = $(PROJECT_SONAME).$(PROJECT_SO_VER_MINOR).$(PROJECT_SO_VER_PATCH)
|
||||
|
||||
# Project Dependencies
|
||||
PROJECT_DEPEND_LOCAL = nintendo-hac-hb nintendo-hac nintendo-es nintendo-pki fnd polarssl lz4
|
||||
PROJECT_DEPEND_LOCAL = nintendo-hac-hb nintendo-hac nintendo-es nintendo-pki fnd mbedtls lz4
|
||||
PROJECT_DEPEND_EXTERNAL =
|
||||
|
||||
# Generate compiler flags for including project include path
|
||||
|
|
195
src/CompressedArchiveIFile.cpp
Normal file
195
src/CompressedArchiveIFile.cpp
Normal file
|
@ -0,0 +1,195 @@
|
|||
#include "CompressedArchiveIFile.h"
|
||||
#include <fnd/lz4.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
CompressedArchiveIFile::CompressedArchiveIFile(const fnd::SharedPtr<fnd::IFile>& base_file, size_t compression_meta_offset) :
|
||||
mFile(base_file),
|
||||
mCompEntries(),
|
||||
mLogicalFileSize(0),
|
||||
mCacheCapacity(nn::hac::compression::kRomfsBlockSize),
|
||||
mCurrentCacheDataSize(0),
|
||||
mCache(std::shared_ptr<byte_t>(new byte_t[mCacheCapacity])),
|
||||
mScratch(std::shared_ptr<byte_t>(new byte_t[mCacheCapacity]))
|
||||
{
|
||||
// determine and check the compression metadata size
|
||||
size_t compression_meta_size = (*mFile)->size() - compression_meta_offset;
|
||||
if (compression_meta_size % sizeof(nn::hac::sCompressionEntry))
|
||||
{
|
||||
fnd::Exception(kModuleName, "Invalid compression meta size");
|
||||
}
|
||||
|
||||
// import raw metadata
|
||||
std::shared_ptr<byte_t> entries_raw = std::shared_ptr<byte_t>(new byte_t[compression_meta_size]);
|
||||
(*mFile)->read(entries_raw.get(), compression_meta_offset, compression_meta_size);
|
||||
|
||||
// process metadata entries
|
||||
nn::hac::sCompressionEntry* entries = (nn::hac::sCompressionEntry*)entries_raw.get();
|
||||
for (size_t idx = 0, num = compression_meta_size / sizeof(nn::hac::sCompressionEntry); idx < num; idx++)
|
||||
{
|
||||
if (idx == 0)
|
||||
{
|
||||
if (entries[idx].physical_offset.get() != 0x0)
|
||||
throw fnd::Exception(kModuleName, "Entry 0 had a non-zero physical offset");
|
||||
if (entries[idx].virtual_offset.get() != 0x0)
|
||||
throw fnd::Exception(kModuleName, "Entry 0 had a non-zero virtual offset");
|
||||
}
|
||||
else
|
||||
{
|
||||
if (entries[idx].physical_offset.get() != align(entries[idx - 1].physical_offset.get() + entries[idx - 1].physical_size.get(), nn::hac::compression::kRomfsBlockAlign))
|
||||
throw fnd::Exception(kModuleName, "Entry was not physically aligned with previous entry");
|
||||
if (entries[idx].virtual_offset.get() <= entries[idx - 1].virtual_offset.get())
|
||||
throw fnd::Exception(kModuleName, "Entry was not virtually aligned with previous entry");
|
||||
|
||||
// set previous entry virtual_size = this->virtual_offset - prev->virtual_offset;
|
||||
mCompEntries[mCompEntries.size() - 1].virtual_size = uint32_t(entries[idx].virtual_offset.get() - mCompEntries[mCompEntries.size() - 1].virtual_offset);
|
||||
}
|
||||
|
||||
if (entries[idx].physical_size.get() > nn::hac::compression::kRomfsBlockSize)
|
||||
throw fnd::Exception(kModuleName, "Entry physical size was too large");
|
||||
|
||||
switch ((nn::hac::compression::CompressionType)entries[idx].compression_type)
|
||||
{
|
||||
case (nn::hac::compression::CompressionType::None):
|
||||
case (nn::hac::compression::CompressionType::Lz4):
|
||||
break;
|
||||
default:
|
||||
throw fnd::Exception(kModuleName, "Unsupported CompressionType");
|
||||
}
|
||||
|
||||
mCompEntries.push_back({(nn::hac::compression::CompressionType)entries[idx].compression_type, entries[idx].virtual_offset.get(), 0, entries[idx].physical_offset.get(), entries[idx].physical_size.get()});
|
||||
}
|
||||
|
||||
// determine logical file size and final entry size
|
||||
importEntryDataToCache(mCompEntries.size() - 1);
|
||||
mCompEntries[mCurrentEntryIndex].virtual_size = mCurrentCacheDataSize;
|
||||
mLogicalFileSize = mCompEntries[mCurrentEntryIndex].virtual_offset + mCompEntries[mCurrentEntryIndex].virtual_size;
|
||||
|
||||
/*
|
||||
for (auto itr = mCompEntries.begin(); itr != mCompEntries.end(); itr++)
|
||||
{
|
||||
std::cout << "entry " << std::endl;
|
||||
std::cout << " type: " << (uint32_t)itr->compression_type << std::endl;
|
||||
std::cout << " phys_addr: 0x" << std::hex << itr->physical_offset << std::endl;
|
||||
std::cout << " phys_size: 0x" << std::hex << itr->physical_size << std::endl;
|
||||
std::cout << " virt_addr: 0x" << std::hex << itr->virtual_offset << std::endl;
|
||||
std::cout << " virt_size: 0x" << std::hex << itr->virtual_size << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "logical size: 0x" << std::hex << mLogicalFileSize << std::endl;
|
||||
*/
|
||||
}
|
||||
|
||||
size_t CompressedArchiveIFile::size()
|
||||
{
|
||||
return mLogicalFileSize;
|
||||
}
|
||||
|
||||
void CompressedArchiveIFile::seek(size_t offset)
|
||||
{
|
||||
mLogicalOffset = std::min<size_t>(offset, mLogicalFileSize);
|
||||
}
|
||||
|
||||
void CompressedArchiveIFile::read(byte_t* out, size_t len)
|
||||
{
|
||||
// limit len to the end of the logical file
|
||||
len = std::min<size_t>(len, mLogicalFileSize - mLogicalOffset);
|
||||
|
||||
for (size_t pos = 0, entry_index = getEntryIndexForLogicalOffset(mLogicalOffset); pos < len; entry_index++)
|
||||
{
|
||||
// importing entry into cache (this does nothing if the entry is already imported)
|
||||
importEntryDataToCache(entry_index);
|
||||
|
||||
// write padding if required
|
||||
if (mCompEntries[entry_index].virtual_size > mCurrentCacheDataSize)
|
||||
{
|
||||
memset(mCache.get() + mCurrentCacheDataSize, 0, mCompEntries[entry_index].virtual_size - mCurrentCacheDataSize);
|
||||
}
|
||||
|
||||
// determine subset of cache to copy out
|
||||
size_t read_offset = mLogicalOffset - (size_t)mCompEntries[entry_index].virtual_offset;
|
||||
size_t read_size = std::min<size_t>(len, (size_t)mCompEntries[entry_index].virtual_size - read_offset);
|
||||
|
||||
memcpy(out + pos, mCache.get() + read_offset, read_size);
|
||||
|
||||
// update position/logical offset
|
||||
pos += read_size;
|
||||
mLogicalOffset += read_size;
|
||||
}
|
||||
}
|
||||
|
||||
void CompressedArchiveIFile::read(byte_t* out, size_t offset, size_t len)
|
||||
{
|
||||
seek(offset);
|
||||
read(out, len);
|
||||
}
|
||||
|
||||
void CompressedArchiveIFile::write(const byte_t* out, size_t len)
|
||||
{
|
||||
throw fnd::Exception(kModuleName, "write() not supported");
|
||||
}
|
||||
|
||||
void CompressedArchiveIFile::write(const byte_t* out, size_t offset, size_t len)
|
||||
{
|
||||
throw fnd::Exception(kModuleName, "write() not supported");
|
||||
}
|
||||
|
||||
void CompressedArchiveIFile::importEntryDataToCache(size_t entry_index)
|
||||
{
|
||||
// return if entry already imported
|
||||
if (mCurrentEntryIndex == entry_index && mCurrentCacheDataSize != 0)
|
||||
return;
|
||||
|
||||
// save index
|
||||
mCurrentEntryIndex = entry_index;
|
||||
|
||||
// reference entry
|
||||
CompressionEntry& entry = mCompEntries[mCurrentEntryIndex];
|
||||
|
||||
if (entry.compression_type == nn::hac::compression::CompressionType::None)
|
||||
{
|
||||
(*mFile)->read(mCache.get(), entry.physical_offset, entry.physical_size);
|
||||
mCurrentCacheDataSize = entry.physical_size;
|
||||
}
|
||||
else if (entry.compression_type == nn::hac::compression::CompressionType::Lz4)
|
||||
{
|
||||
(*mFile)->read(mScratch.get(), entry.physical_offset, entry.physical_size);
|
||||
|
||||
mCurrentCacheDataSize = 0;
|
||||
fnd::lz4::decompressData(mScratch.get(), entry.physical_size, mCache.get(), uint32_t(mCacheCapacity), mCurrentCacheDataSize);
|
||||
|
||||
if (mCurrentCacheDataSize == 0)
|
||||
{
|
||||
throw fnd::Exception(kModuleName, "Decompression of final block failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t CompressedArchiveIFile::getEntryIndexForLogicalOffset(size_t logical_offset)
|
||||
{
|
||||
// rule out bad offset
|
||||
if (logical_offset > mLogicalFileSize)
|
||||
throw fnd::Exception(kModuleName, "illegal logical offset");
|
||||
|
||||
size_t entry_index = 0;
|
||||
|
||||
// try the current comp entry
|
||||
if (mCompEntries[mCurrentEntryIndex].virtual_offset <= logical_offset && \
|
||||
mCompEntries[mCurrentEntryIndex].virtual_offset + mCompEntries[mCurrentEntryIndex].virtual_size >= logical_offset)
|
||||
{
|
||||
entry_index = mCurrentEntryIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t index = 0; index < mCompEntries.size(); index++)
|
||||
{
|
||||
if (mCompEntries[index].virtual_offset <= logical_offset && \
|
||||
mCompEntries[index].virtual_offset + mCompEntries[index].virtual_size >= logical_offset)
|
||||
{
|
||||
entry_index = index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return entry_index;
|
||||
}
|
51
src/CompressedArchiveIFile.h
Normal file
51
src/CompressedArchiveIFile.h
Normal file
|
@ -0,0 +1,51 @@
|
|||
#pragma once
|
||||
#include <sstream>
|
||||
#include <fnd/IFile.h>
|
||||
#include <fnd/SharedPtr.h>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <nn/hac/define/compression.h>
|
||||
|
||||
class CompressedArchiveIFile : public fnd::IFile
|
||||
{
|
||||
public:
|
||||
CompressedArchiveIFile(const fnd::SharedPtr<fnd::IFile>& file, size_t compression_meta_offset);
|
||||
|
||||
size_t size();
|
||||
void seek(size_t offset);
|
||||
void read(byte_t* out, size_t len);
|
||||
void read(byte_t* out, size_t offset, size_t len);
|
||||
void write(const byte_t* out, size_t len);
|
||||
void write(const byte_t* out, size_t offset, size_t len);
|
||||
private:
|
||||
const std::string kModuleName = "CompressedArchiveIFile";
|
||||
std::stringstream mErrorSs;
|
||||
|
||||
struct CompressionEntry
|
||||
{
|
||||
nn::hac::compression::CompressionType compression_type;
|
||||
uint64_t virtual_offset;
|
||||
uint32_t virtual_size;
|
||||
uint64_t physical_offset;
|
||||
uint32_t physical_size;
|
||||
};
|
||||
|
||||
// raw data
|
||||
fnd::SharedPtr<fnd::IFile> mFile;
|
||||
|
||||
// compression metadata
|
||||
std::vector<CompressionEntry> mCompEntries;
|
||||
size_t mLogicalFileSize;
|
||||
size_t mLogicalOffset;
|
||||
|
||||
// cached decompressed entry
|
||||
size_t mCacheCapacity; // capacity
|
||||
size_t mCurrentEntryIndex; // index of entry currently associated with the cache
|
||||
uint32_t mCurrentCacheDataSize; // size of data currently in cache
|
||||
std::shared_ptr<byte_t> mCache; // where decompressed data resides
|
||||
std::shared_ptr<byte_t> mScratch; // same size as cache, but is used for storing data pre-compression
|
||||
|
||||
// this will import entry to cache
|
||||
void importEntryDataToCache(size_t entry_index);
|
||||
size_t getEntryIndexForLogicalOffset(size_t logical_offset);
|
||||
};
|
|
@ -489,4 +489,37 @@ void NacpProcess::displayNacp()
|
|||
std::cout << " IsEnabled: " << std::boolalpha << mNacp.getJitConfiguration().is_enabled << std::endl;
|
||||
std::cout << " MemorySize: 0x" << std::hex << std::setw(16) << std::setfill('0') << mNacp.getJitConfiguration().memory_size << std::endl;
|
||||
}
|
||||
|
||||
// PlayReportPermission
|
||||
if (mNacp.getPlayReportPermission() != nn::hac::nacp::PlayReportPermission::None || _HAS_BIT(mCliOutputMode, OUTPUT_EXTENDED))
|
||||
{
|
||||
std::cout << " PlayReportPermission: " << nn::hac::ApplicationControlPropertyUtil::getPlayReportPermissionAsString(mNacp.getPlayReportPermission()) << std::endl;
|
||||
}
|
||||
|
||||
// CrashScreenshotForProd
|
||||
if (mNacp.getCrashScreenshotForProd() != nn::hac::nacp::CrashScreenshotForProd::Deny || _HAS_BIT(mCliOutputMode, OUTPUT_EXTENDED))
|
||||
{
|
||||
std::cout << " CrashScreenshotForProd: " << nn::hac::ApplicationControlPropertyUtil::getCrashScreenshotForProdAsString(mNacp.getCrashScreenshotForProd()) << std::endl;
|
||||
}
|
||||
|
||||
// CrashScreenshotForDev
|
||||
if (mNacp.getCrashScreenshotForDev() != nn::hac::nacp::CrashScreenshotForDev::Deny || _HAS_BIT(mCliOutputMode, OUTPUT_EXTENDED))
|
||||
{
|
||||
std::cout << " CrashScreenshotForDev: " << nn::hac::ApplicationControlPropertyUtil::getCrashScreenshotForDevAsString(mNacp.getCrashScreenshotForDev()) << std::endl;
|
||||
}
|
||||
|
||||
// AccessibleLaunchRequiredVersion
|
||||
if (mNacp.getAccessibleLaunchRequiredVersionApplicationId().size() > 0)
|
||||
{
|
||||
std::cout << " AccessibleLaunchRequiredVersion:" << std::endl;
|
||||
std::cout << " ApplicationId:" << std::endl;
|
||||
for (auto itr = mNacp.getAccessibleLaunchRequiredVersionApplicationId().begin(); itr != mNacp.getAccessibleLaunchRequiredVersionApplicationId().end(); itr++)
|
||||
{
|
||||
std::cout << " 0x" << std::hex << std::setw(16) << std::setfill('0') << *itr << std::endl;
|
||||
}
|
||||
}
|
||||
else if (_HAS_BIT(mCliOutputMode, OUTPUT_EXTENDED))
|
||||
{
|
||||
std::cout << " AccessibleLaunchRequiredVersion: None" << std::endl;
|
||||
}
|
||||
}
|
|
@ -3,6 +3,7 @@
|
|||
#include <fnd/SimpleTextOutput.h>
|
||||
#include <fnd/SimpleFile.h>
|
||||
#include <fnd/io.h>
|
||||
#include "CompressedArchiveIFile.h"
|
||||
#include "RomfsProcess.h"
|
||||
|
||||
RomfsProcess::RomfsProcess() :
|
||||
|
@ -262,6 +263,56 @@ void RomfsProcess::resolveRomfs()
|
|||
throw fnd::Exception(kModuleName, "Invalid ROMFS Header");
|
||||
}
|
||||
|
||||
// check for romfs compression
|
||||
size_t physical_size = (*mFile)->size();
|
||||
size_t logical_size = mHdr.sections[nn::hac::romfs::FILE_NODE_TABLE].offset.get() + mHdr.sections[nn::hac::romfs::FILE_NODE_TABLE].size.get();
|
||||
|
||||
// if logical size is greater than the physical size, check for compression meta footer
|
||||
if (logical_size > physical_size)
|
||||
{
|
||||
// initial and final entries
|
||||
nn::hac::sCompressionEntry entry[2];
|
||||
|
||||
// read final compression entry
|
||||
(*mFile)->read((byte_t*)&entry[1], physical_size - sizeof(nn::hac::sCompressionEntry), sizeof(nn::hac::sCompressionEntry));
|
||||
|
||||
// the final compression entry should be for the (final part, in the case of metadata > 0x10000) romfs footer, for which the logical offset is detailed in the romfs header
|
||||
// the compression is always enabled for non-header compression entries
|
||||
uint64_t romfs_metadata_begin_offset = mHdr.sections[nn::hac::romfs::DIR_HASHMAP_TABLE].offset.get();
|
||||
uint64_t romfs_metadata_end_offset = mHdr.sections[nn::hac::romfs::FILE_NODE_TABLE].offset.get() + mHdr.sections[nn::hac::romfs::FILE_NODE_TABLE].size.get();
|
||||
|
||||
if ((entry[1].virtual_offset.get() >= romfs_metadata_begin_offset && entry[1].virtual_offset.get() < romfs_metadata_end_offset) == false || \
|
||||
entry[1].compression_type != (byte_t)nn::hac::compression::CompressionType::Lz4)
|
||||
{
|
||||
throw fnd::Exception(kModuleName, "RomFs appears corrupted (bad final compression entry virtual offset/compression type)");
|
||||
}
|
||||
|
||||
// the first compression entry follows the physical placement of the final data chunk (specified in the final compression entry)
|
||||
size_t first_entry_offset = align(entry[1].physical_offset.get() + entry[1].physical_size.get(), nn::hac::compression::kRomfsBlockAlign);
|
||||
|
||||
// quick check to make sure the offset at least before the last entry offset
|
||||
if (first_entry_offset >= (physical_size - sizeof(nn::hac::sCompressionEntry)))
|
||||
{
|
||||
throw fnd::Exception(kModuleName, "RomFs appears corrupted (bad final compression entry physical offset/size)");
|
||||
}
|
||||
|
||||
// read first compression entry
|
||||
(*mFile)->read((byte_t*)&entry[0], first_entry_offset, sizeof(nn::hac::sCompressionEntry));
|
||||
|
||||
// validate first compression entry
|
||||
// this should be the same for all compressed romfs
|
||||
if (entry[0].virtual_offset.get() != 0x0 || \
|
||||
entry[0].physical_offset.get() != 0x0 || \
|
||||
entry[0].physical_size.get() != 0x200 || \
|
||||
entry[0].compression_type != (byte_t)nn::hac::compression::CompressionType::None)
|
||||
{
|
||||
throw fnd::Exception(kModuleName, "RomFs appears corrupted (bad first compression entry)");
|
||||
}
|
||||
|
||||
// wrap mFile in a class to transparantly decompress the image.
|
||||
mFile = new CompressedArchiveIFile(mFile, first_entry_offset);
|
||||
}
|
||||
|
||||
// read directory nodes
|
||||
mDirNodes.alloc(mHdr.sections[nn::hac::romfs::DIR_NODE_TABLE].size.get());
|
||||
(*mFile)->read(mDirNodes.data(), mHdr.sections[nn::hac::romfs::DIR_NODE_TABLE].offset.get(), mDirNodes.size());
|
||||
|
|
|
@ -924,12 +924,12 @@ void UserSettings::dumpKeyConfig() const
|
|||
std::cout << " NCA Keys:" << std::endl;
|
||||
for (size_t i = 0; i < kMasterKeyNum; i++)
|
||||
{
|
||||
if (mKeyCfg.getContentArchiveHeader0SignKey(rsa2048_key, i) == true)
|
||||
if (mKeyCfg.getContentArchiveHeader0SignKey(rsa2048_key, byte_t(i)) == true)
|
||||
dumpRsa2048Key(rsa2048_key, "Header0-SignatureKey-" + kKeyIndex[i], 2);
|
||||
}
|
||||
for (size_t i = 0; i < kMasterKeyNum; i++)
|
||||
{
|
||||
if (mKeyCfg.getAcidSignKey(rsa2048_key, i) == true)
|
||||
if (mKeyCfg.getAcidSignKey(rsa2048_key, byte_t(i)) == true)
|
||||
dumpRsa2048Key(rsa2048_key, "Acid-SignatureKey-" + kKeyIndex[i], 2);
|
||||
}
|
||||
|
||||
|
@ -938,28 +938,28 @@ void UserSettings::dumpKeyConfig() const
|
|||
|
||||
for (size_t i = 0; i < kMasterKeyNum; i++)
|
||||
{
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKey(i,0, aes_key) == true)
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKey(byte_t(i), 0, aes_key) == true)
|
||||
dumpAesKey(aes_key, "KeyAreaEncryptionKey-Application-" + kKeyIndex[i], 2);
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKey(i,1, aes_key) == true)
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKey(byte_t(i), 1, aes_key) == true)
|
||||
dumpAesKey(aes_key, "KeyAreaEncryptionKey-Ocean-" + kKeyIndex[i], 2);
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKey(i,2, aes_key) == true)
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKey(byte_t(i), 2, aes_key) == true)
|
||||
dumpAesKey(aes_key, "KeyAreaEncryptionKey-System-" + kKeyIndex[i], 2);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < kMasterKeyNum; i++)
|
||||
{
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKeyHw(i,0, aes_key) == true)
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKeyHw(byte_t(i), 0, aes_key) == true)
|
||||
dumpAesKey(aes_key, "KeyAreaEncryptionKeyHw-Application-" + kKeyIndex[i], 2);
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKeyHw(i,1, aes_key) == true)
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKeyHw(byte_t(i), 1, aes_key) == true)
|
||||
dumpAesKey(aes_key, "KeyAreaEncryptionKeyHw-Ocean-" + kKeyIndex[i], 2);
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKeyHw(i,2, aes_key) == true)
|
||||
if (mKeyCfg.getNcaKeyAreaEncryptionKeyHw(byte_t(i), 2, aes_key) == true)
|
||||
dumpAesKey(aes_key, "KeyAreaEncryptionKeyHw-System-" + kKeyIndex[i], 2);
|
||||
}
|
||||
|
||||
std::cout << " NRR Keys:" << std::endl;
|
||||
for (size_t i = 0; i < kMasterKeyNum; i++)
|
||||
{
|
||||
if (mKeyCfg.getNrrCertificateSignKey(rsa2048_key, i) == true)
|
||||
if (mKeyCfg.getNrrCertificateSignKey(rsa2048_key, byte_t(i)) == true)
|
||||
dumpRsa2048Key(rsa2048_key, "Certificate-SignatureKey-" + kKeyIndex[i], 2);
|
||||
}
|
||||
|
||||
|
@ -975,7 +975,7 @@ void UserSettings::dumpKeyConfig() const
|
|||
std::cout << " Package1 Keys:" << std::endl;
|
||||
for (size_t i = 0; i < kMasterKeyNum; i++)
|
||||
{
|
||||
if (mKeyCfg.getPkg1Key(i, aes_key) == true)
|
||||
if (mKeyCfg.getPkg1Key(byte_t(i), aes_key) == true)
|
||||
dumpAesKey(aes_key, "EncryptionKey-" + kKeyIndex[i], 2);
|
||||
}
|
||||
|
||||
|
@ -984,14 +984,14 @@ void UserSettings::dumpKeyConfig() const
|
|||
dumpRsa2048Key(rsa2048_key, "Signature Key", 2);
|
||||
for (size_t i = 0; i < kMasterKeyNum; i++)
|
||||
{
|
||||
if (mKeyCfg.getPkg2Key(i, aes_key) == true)
|
||||
if (mKeyCfg.getPkg2Key(byte_t(i), aes_key) == true)
|
||||
dumpAesKey(aes_key, "EncryptionKey-" + kKeyIndex[i], 2);
|
||||
}
|
||||
|
||||
std::cout << " ETicket Keys:" << std::endl;
|
||||
for (size_t i = 0; i < kMasterKeyNum; i++)
|
||||
{
|
||||
if (mKeyCfg.getETicketCommonKey(i, aes_key) == true)
|
||||
if (mKeyCfg.getETicketCommonKey(byte_t(i), aes_key) == true)
|
||||
dumpAesKey(aes_key, "CommonKey-" + kKeyIndex[i], 2);
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
#define APP_NAME "NSTool"
|
||||
#define BIN_NAME "nstool"
|
||||
#define VER_MAJOR 1
|
||||
#define VER_MINOR 3
|
||||
#define VER_MINOR 4
|
||||
#define VER_PATCH 0
|
||||
#define AUTHORS "jakcron"
|
Loading…
Reference in a new issue