mirror of
https://github.com/jakcron/nstool.git
synced 2024-12-22 18:55:29 +00:00
[nstool] Fix critical bug in HashTreeWrappedIFile (bumped to v1.0.5)
This commit is contained in:
parent
8bb759292d
commit
3716bf0b82
|
@ -34,7 +34,7 @@ else
|
||||||
endif
|
endif
|
||||||
|
|
||||||
BIN_DIR = bin
|
BIN_DIR = bin
|
||||||
OUTPUT = $(BIN_DIR)/$(shell basename $(CURDIR))
|
OUTPUT = $(BIN_DIR)/$(shell basename "$(CURDIR)")
|
||||||
|
|
||||||
all: build
|
all: build
|
||||||
|
|
||||||
|
|
|
@ -23,45 +23,49 @@ void HashTreeWrappedIFile::seek(size_t offset)
|
||||||
|
|
||||||
void HashTreeWrappedIFile::read(byte_t* out, size_t len)
|
void HashTreeWrappedIFile::read(byte_t* out, size_t len)
|
||||||
{
|
{
|
||||||
size_t offset_in_start_block = getOffsetInBlock(mDataOffset);
|
struct sBlockPosition
|
||||||
size_t offset_in_end_block = getOffsetInBlock(offset_in_start_block + len);
|
|
||||||
|
|
||||||
size_t start_block = getOffsetBlock(mDataOffset);
|
|
||||||
size_t block_num = align(offset_in_start_block + len, mDataBlockSize) / mDataBlockSize;
|
|
||||||
|
|
||||||
size_t partial_last_block_num = block_num % mCacheBlockNum;
|
|
||||||
bool has_partial_block_num = partial_last_block_num > 0;
|
|
||||||
size_t read_iterations = (block_num / mCacheBlockNum) + has_partial_block_num;
|
|
||||||
|
|
||||||
size_t block_read_len;
|
|
||||||
size_t block_export_offset;
|
|
||||||
size_t block_export_size;
|
|
||||||
size_t block_export_pos = 0;
|
|
||||||
for (size_t i = 0; i < read_iterations; i++)
|
|
||||||
{
|
{
|
||||||
// how many blocks to read from source file
|
size_t index;
|
||||||
block_read_len = (i+1 == read_iterations && has_partial_block_num) ? partial_last_block_num : mCacheBlockNum;
|
size_t pos;
|
||||||
|
} start_blk, end_blk;
|
||||||
|
|
||||||
// offset in this current read to copy from
|
start_blk.index = getOffsetBlock(mDataOffset);
|
||||||
block_export_offset = (i == 0) ? offset_in_start_block : 0;
|
start_blk.pos = getOffsetInBlock(mDataOffset);
|
||||||
|
|
||||||
// size of current read to copy
|
end_blk.index = getOffsetBlock(mDataOffset + len);
|
||||||
block_export_size = (block_read_len * mDataBlockSize) - block_export_offset;
|
end_blk.pos = getOffsetInBlock(mDataOffset + len);
|
||||||
|
if (end_blk.pos == 0 && len != 0)
|
||||||
|
{
|
||||||
|
end_blk.index -= 1;
|
||||||
|
end_blk.pos = mDataBlockSize;
|
||||||
|
}
|
||||||
|
|
||||||
// if last read, reduce the export size by one block less offset_in_end_block
|
size_t total_blk_num = (end_blk.index - start_blk.index) + 1;
|
||||||
if (i+1 == read_iterations)
|
|
||||||
{
|
|
||||||
block_export_size -= (mDataBlockSize - offset_in_end_block);
|
|
||||||
}
|
|
||||||
|
|
||||||
// read the blocks
|
size_t read_blk_num = 0;
|
||||||
readData(start_block + (i * mCacheBlockNum), block_read_len);
|
size_t cache_export_start_pos, cache_export_end_pos, cache_export_size;
|
||||||
|
size_t export_pos = 0;
|
||||||
|
for (size_t i = 0; i < total_blk_num; i += read_blk_num, export_pos += cache_export_size)
|
||||||
|
{
|
||||||
|
read_blk_num = _MIN(mCacheBlockNum, (total_blk_num - i));
|
||||||
|
readData(start_blk.index + i, read_blk_num);
|
||||||
|
|
||||||
// export the section of data that is relevant
|
// if this is the first read, adjust offset
|
||||||
memcpy(out + block_export_pos, mCache.data() + block_export_offset, block_export_size);
|
if (i == 0)
|
||||||
|
cache_export_start_pos = start_blk.pos;
|
||||||
|
else
|
||||||
|
cache_export_start_pos = 0;
|
||||||
|
|
||||||
// update export position
|
// if this is the last block, adjust end offset
|
||||||
block_export_pos += block_export_size;
|
if ((i + read_blk_num) == total_blk_num)
|
||||||
|
cache_export_end_pos = ((read_blk_num - 1) * mDataBlockSize) + end_blk.pos;
|
||||||
|
else
|
||||||
|
cache_export_end_pos = read_blk_num * mDataBlockSize;
|
||||||
|
|
||||||
|
// determine cache export size
|
||||||
|
cache_export_size = cache_export_end_pos - cache_export_start_pos;
|
||||||
|
|
||||||
|
memcpy(out + export_pos, mCache.data() + cache_export_start_pos, cache_export_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// update offset
|
// update offset
|
||||||
|
@ -150,7 +154,6 @@ void HashTreeWrappedIFile::initialiseDataLayer(const HashTreeMeta& hdr)
|
||||||
|
|
||||||
void HashTreeWrappedIFile::readData(size_t block_offset, size_t block_num)
|
void HashTreeWrappedIFile::readData(size_t block_offset, size_t block_num)
|
||||||
{
|
{
|
||||||
(*mData)->seek(block_offset * mDataBlockSize);
|
|
||||||
fnd::sha::sSha256Hash hash;
|
fnd::sha::sSha256Hash hash;
|
||||||
|
|
||||||
// determine read size
|
// determine read size
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
#define BIN_NAME "nstool"
|
#define BIN_NAME "nstool"
|
||||||
#define VER_MAJOR 1
|
#define VER_MAJOR 1
|
||||||
#define VER_MINOR 0
|
#define VER_MINOR 0
|
||||||
#define VER_PATCH 4
|
#define VER_PATCH 5
|
||||||
#define AUTHORS "jakcron"
|
#define AUTHORS "jakcron"
|
Loading…
Reference in a new issue