|
@@ -8,6 +8,7 @@
|
|
|
#include "SubDirectory.h"
|
|
|
#include "SubFile.h"
|
|
|
#include "SubfileData.h"
|
|
|
+#include "DatOperationResult.h"
|
|
|
|
|
|
#include <EasyLogging++/easylogging++.h>
|
|
|
|
|
@@ -31,10 +32,8 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
|
|
|
|
|
|
- DatFile::DatFile() {
|
|
|
+ DatFile::DatFile() : io(*this) {
|
|
|
dat_state_ = CLOSED;
|
|
|
- root_directory_ = nullptr;
|
|
|
- file_handler_ = nullptr;
|
|
|
free_buffered_size_ = 0;
|
|
|
|
|
|
orig_dict_.clear();
|
|
@@ -78,43 +77,18 @@ namespace LOTRO_DAT {
|
|
|
dat_id_ = dat_id;
|
|
|
dat_state_ = CLOSED;
|
|
|
current_locale_ = ORIGINAL;
|
|
|
- root_directory_ = nullptr;
|
|
|
- file_handler_ = nullptr;
|
|
|
free_buffered_size_ = 0;
|
|
|
filename_ = "none";
|
|
|
|
|
|
DAT_RESULT result;
|
|
|
DAT_RESULT return_value = SUCCESS;
|
|
|
|
|
|
- LOG(INFO) << "Opening .dat file " << filename;
|
|
|
- result = OpenDatFile(filename.c_str());
|
|
|
- if (result != SUCCESS) {
|
|
|
- LOG(ERROR) << "Unable to perform opening file. Aborting.";
|
|
|
+ auto res = io.Init(filename);
|
|
|
+ if (res.result == DatOperationResult::ERROR) {
|
|
|
+ LOG(ERROR) << "ERROR! Unable to initialize input-output!";
|
|
|
CloseDatFile();
|
|
|
- return result;
|
|
|
- }
|
|
|
- return_value = std::max(return_value, result);
|
|
|
-
|
|
|
- LOG(INFO) << "Starting ReadSuperBlock";
|
|
|
-
|
|
|
- result = ReadSuperBlock();
|
|
|
- if (result <= 0) {
|
|
|
- LOG(ERROR) << "Unable to read super block. Aborting.";
|
|
|
- CloseDatFile();
|
|
|
- return result;
|
|
|
- }
|
|
|
- return_value = std::max(return_value, result);
|
|
|
-
|
|
|
- LOG(INFO) << "Starting MakeDirectories";
|
|
|
-
|
|
|
- result = MakeDirectories();
|
|
|
- if (result <= 0) {
|
|
|
- LOG(ERROR) << "Unable to make directories. Aborting.";
|
|
|
- CloseDatFile();
|
|
|
- return result;
|
|
|
+ return NO_FILE_ERROR;
|
|
|
}
|
|
|
- return_value = std::max(return_value, result);
|
|
|
-
|
|
|
LOG(INFO) << "Starting MakeDictionary";
|
|
|
|
|
|
result = MakeDictionary();
|
|
@@ -163,100 +137,6 @@ namespace LOTRO_DAT {
|
|
|
return return_value;
|
|
|
}
|
|
|
|
|
|
- DAT_RESULT DatFile::OpenDatFile(const char *dat_name) {
|
|
|
- LOG(DEBUG) << "Started opening DatFile";
|
|
|
- if (dat_state_ != CLOSED) {
|
|
|
- CloseDatFile();
|
|
|
- }
|
|
|
-
|
|
|
- file_handler_ = fopen(dat_name, "r+b");
|
|
|
-
|
|
|
- if (file_handler_ == nullptr) {
|
|
|
- LOG(ERROR) << "Unable to open file " << dat_name;
|
|
|
- return NO_FILE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- fseek(file_handler_, 0, SEEK_END);
|
|
|
- actual_dat_size_ = ftell(file_handler_);
|
|
|
- fseek(file_handler_, 0, SEEK_SET);
|
|
|
-
|
|
|
- dat_state_ = SUCCESS_OPENED;
|
|
|
- LOG(DEBUG) << "Successfully opened DatFile";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::ReadSuperBlock() {
|
|
|
- LOG(DEBUG) << "Started reading superblock";
|
|
|
- if (dat_state_ != SUCCESS_OPENED) {
|
|
|
- LOG(ERROR) << "Dat state isn't SUCCESS_OPENED. Cannot perform extraction.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- BinaryData data(1024);
|
|
|
- ReadData(data, 1024);
|
|
|
-
|
|
|
- constant1_ = data.ToNumber<4>(0x100);
|
|
|
- constant2_ = data.ToNumber<4>(0x140);
|
|
|
- version1_ = data.ToNumber<4>(0x14C);
|
|
|
- file_size_ = data.ToNumber<4>(0x148);
|
|
|
- version2_ = data.ToNumber<4>(0x150);
|
|
|
- fragmentation_journal_offset_ = data.ToNumber<4>(0x154);
|
|
|
- fragmentation_journal_end_ = data.ToNumber<4>(0x158);
|
|
|
- fragmentation_journal_size_ = data.ToNumber<4>(0x15C);
|
|
|
- root_directory_offset_ = data.ToNumber<4>(0x160);
|
|
|
- free_dat_size_ = data.ToNumber<4>(0x19C);
|
|
|
-
|
|
|
- if (constant1_ != 0x4C5000) {
|
|
|
- LOG(ERROR) << "variable at position 0x100 is not equal to .dat file constant!";
|
|
|
- return INCORRECT_SUPERBLOCK_ERROR;
|
|
|
- }
|
|
|
- if (constant2_ != 0x5442) {
|
|
|
- LOG(ERROR) << "variable at position 0x140 is not equal to .dat file constant!";
|
|
|
- return INCORRECT_SUPERBLOCK_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (file_size_ != actual_dat_size_) {
|
|
|
- LOG(ERROR) << "variable at 0x148 position is not equal to .dat file size!";
|
|
|
-
|
|
|
- }
|
|
|
-
|
|
|
- dat_state_ = SUCCESS_SUPERBLOCK;
|
|
|
- LOG(DEBUG) << "Superblock read successfully";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::MakeDirectories() {
|
|
|
- LOG(DEBUG) << "Started making directories";
|
|
|
- if (dat_state_ != SUCCESS_SUPERBLOCK) {
|
|
|
- LOG(ERROR) << "Dat state isn't SUCCESS_SUPERBLOCK. Cannot make directories.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- root_directory_ = std::make_shared<SubDirectory>((unsigned) root_directory_offset_, *this);
|
|
|
- SubDirectory::subdir_init_queue_.insert(root_directory_);
|
|
|
-
|
|
|
- while (!SubDirectory::subdir_init_queue_.empty()) {
|
|
|
- std::shared_ptr<SubDirectory> dir = *SubDirectory::subdir_init_queue_.begin();
|
|
|
- SubDirectory::subdir_init_queue_.erase(SubDirectory::subdir_init_queue_.begin());
|
|
|
- if (dir->MakeSubDirectories())
|
|
|
- SubDirectory::subfile_init_queue_.insert(dir);
|
|
|
- else
|
|
|
- dir->clear();
|
|
|
- }
|
|
|
-
|
|
|
- while (!SubDirectory::subfile_init_queue_.empty()) {
|
|
|
- std::shared_ptr<SubDirectory> dir = *SubDirectory::subfile_init_queue_.begin();
|
|
|
- SubDirectory::subfile_init_queue_.erase(SubDirectory::subfile_init_queue_.begin());
|
|
|
- if (!dir->MakeSubFiles())
|
|
|
- dir->clear();
|
|
|
- }
|
|
|
-
|
|
|
- dat_state_ = SUCCESS_DIRECTORIES;
|
|
|
-
|
|
|
- LOG(DEBUG) << "Directories made successfully";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
DAT_RESULT DatFile::MakeDictionary() {
|
|
|
LOG(DEBUG) << "Started making dictionary";
|
|
|
if (dat_state_ != SUCCESS_DIRECTORIES) {
|
|
@@ -264,12 +144,12 @@ namespace LOTRO_DAT {
|
|
|
return INCORRECT_STATE_ERROR;
|
|
|
}
|
|
|
|
|
|
- if (root_directory_ == nullptr) {
|
|
|
+ if (io.GetRootDirectory() == nullptr) {
|
|
|
LOG(ERROR) << "root_directory is nullptr!!";
|
|
|
return INIT_ERROR;
|
|
|
}
|
|
|
|
|
|
- root_directory_->MakeDictionary(dictionary_);
|
|
|
+ io.GetRootDirectory()->MakeDictionary(dictionary_);
|
|
|
dat_state_ = SUCCESS_DICTIONARY;
|
|
|
LOG(DEBUG) << "Dictionary made successfull";
|
|
|
return SUCCESS;
|
|
@@ -279,43 +159,44 @@ namespace LOTRO_DAT {
|
|
|
LOG(INFO) << "Initialising locales...";
|
|
|
BinaryData dicts_data(4);
|
|
|
|
|
|
- ReadData(dicts_data, 4, 300);
|
|
|
+ io.ReadData(dicts_data, 4, 300);
|
|
|
long long dict_offset = dicts_data.ToNumber<4>(0);
|
|
|
|
|
|
- if (dict_offset == 0 || dict_offset + 8 >= actual_dat_size_) {
|
|
|
+ if (dict_offset == 0 || dict_offset + 8 >= io.GetActualDatSize()) {
|
|
|
LOG(INFO) << "Dictionary offset is empty or incorrect. Passing.";
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
- ReadData(dicts_data, 4, dict_offset);
|
|
|
+ io.ReadData(dicts_data, 4, dict_offset);
|
|
|
long long dict_size = dicts_data.ToNumber<4>(0);
|
|
|
|
|
|
- ReadData(dicts_data, 4, dict_offset + 4);
|
|
|
+ io.ReadData(dicts_data, 4, dict_offset + 4);
|
|
|
long long dict_version = dicts_data.ToNumber<4>(0);
|
|
|
|
|
|
- ReadData(dicts_data, 4, dict_offset + 8);
|
|
|
- file_size_ = dicts_data.ToNumber<4>(0);
|
|
|
+ io.ReadData(dicts_data, 4, dict_offset + 8);
|
|
|
+ io.file_size = dicts_data.ToNumber<4>(0);
|
|
|
|
|
|
- LOG(INFO) << "Dictionary size is " << dict_size << ". Version is " << dict_version << ". Localed .dat size = " << file_size_;
|
|
|
+ LOG(INFO) << "Dictionary size is " << dict_size << ". Version is " << dict_version << ". Localed .dat size = "
|
|
|
+ << io.file_size;
|
|
|
|
|
|
if (dict_version != 101) {
|
|
|
LOG(WARNING) << "DICTIONARY IS OLD!!!";
|
|
|
orig_dict_.clear();
|
|
|
patch_dict_.clear();
|
|
|
- WriteData(BinaryData::FromNumber<4>(0), 4, 300);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(0), 4, 300);
|
|
|
dat_state_ = UPDATED;
|
|
|
dat_without_patches_ = true;
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
dicts_data = BinaryData((unsigned)dict_size);
|
|
|
- ReadData(dicts_data, dict_size, dict_offset + 12);
|
|
|
+ io.ReadData(dicts_data, dict_size, dict_offset + 12);
|
|
|
|
|
|
if (dicts_data.size() < 15) {
|
|
|
LOG(ERROR) << "Incorrect dictionary. Passing without it.";
|
|
|
orig_dict_.clear();
|
|
|
patch_dict_.clear();
|
|
|
- WriteData(BinaryData::FromNumber<4>(0), 4, 300);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(0), 4, 300);
|
|
|
dat_state_ = UPDATED;
|
|
|
dat_without_patches_ = true;
|
|
|
return SUCCESS;
|
|
@@ -434,24 +315,20 @@ namespace LOTRO_DAT {
|
|
|
}
|
|
|
|
|
|
|
|
|
-
|
|
|
+ io.DeInit();
|
|
|
if (!pending_dictionary_.empty() || dat_state_ == UPDATED) {
|
|
|
CommitLocales();
|
|
|
CommitDirectories();
|
|
|
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
|
|
|
}
|
|
|
|
|
|
current_locale_ = ORIGINAL;
|
|
|
|
|
|
- if (file_handler_ != nullptr) {
|
|
|
- fclose(file_handler_);
|
|
|
- }
|
|
|
SubDirectory::visited_subdirectories_.clear();
|
|
|
-
|
|
|
|
|
|
free_buffered_size_ = 0;
|
|
|
|
|
@@ -462,23 +339,9 @@ namespace LOTRO_DAT {
|
|
|
pending_patch_.clear();
|
|
|
inactive_categories.clear();
|
|
|
|
|
|
- file_handler_ = nullptr;
|
|
|
- root_directory_ = nullptr;
|
|
|
-
|
|
|
-
|
|
|
pending_dictionary_.clear();
|
|
|
dictionary_.clear();
|
|
|
|
|
|
- constant1_ = 0;
|
|
|
- constant2_ = 0;
|
|
|
- file_size_ = 0;
|
|
|
- version1_ = 0;
|
|
|
- version2_ = 0;
|
|
|
- fragmentation_journal_size_ = 0;
|
|
|
- fragmentation_journal_end_ = 0;
|
|
|
- root_directory_offset_ = 0;
|
|
|
- fragmentation_journal_offset_ = 0;
|
|
|
-
|
|
|
dat_state_ = CLOSED;
|
|
|
|
|
|
dat_id_ = -1;
|
|
@@ -540,31 +403,33 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
|
|
|
BinaryData dicts_data(4);
|
|
|
- ReadData(dicts_data, 4, 300);
|
|
|
+ io.ReadData(dicts_data, 4, 300);
|
|
|
long long dict_offset = dicts_data.ToNumber<4>(0);
|
|
|
- ReadData(dicts_data, 4, dict_offset);
|
|
|
+ io.ReadData(dicts_data, 4, dict_offset);
|
|
|
long long dict_size = dicts_data.ToNumber<4>(0);
|
|
|
|
|
|
if (binary_data.size() > dict_size || dict_offset == 0) {
|
|
|
- WriteData(BinaryData::FromNumber<4>(file_size_), 4, 300);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(io.file_size), 4, 300);
|
|
|
|
|
|
- WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size() + 4, 20u * 1024u * 1024u)), 4, file_size_);
|
|
|
- WriteData(BinaryData::FromNumber<4>(101), 4, file_size_ + 4);
|
|
|
- WriteData(BinaryData::FromNumber<4>(file_size_ + binary_data.size() + 12 + 20 * 1024 * 1024), 4, file_size_ + 8);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size() + 4, 20u * 1024u * 1024u)), 4,
|
|
|
+ io.file_size);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(101), 4, io.file_size + 4);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(io.file_size + binary_data.size() + 12 + 20 * 1024 * 1024), 4,
|
|
|
+ io.file_size + 8);
|
|
|
|
|
|
- WriteData(binary_data, binary_data.size(), file_size_ + 12);
|
|
|
- file_size_ += binary_data.size() + 12;
|
|
|
+ io.WriteData(binary_data, binary_data.size(), io.file_size + 12);
|
|
|
+ io.file_size += binary_data.size() + 12;
|
|
|
|
|
|
|
|
|
BinaryData nulls(unsigned(20 * 1024 * 1024));
|
|
|
- WriteData(nulls, nulls.size(), file_size_);
|
|
|
- file_size_ += nulls.size();
|
|
|
+ io.WriteData(nulls, nulls.size(), io.file_size);
|
|
|
+ io.file_size += nulls.size();
|
|
|
|
|
|
} else {
|
|
|
- WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, dict_offset);
|
|
|
- WriteData(BinaryData::FromNumber<4>(101), 4, dict_offset + 4);
|
|
|
- WriteData(BinaryData::FromNumber<4>(file_size_), 4, dict_offset + 8);
|
|
|
- WriteData(binary_data, binary_data.size(), dict_offset + 12);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, dict_offset);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(101), 4, dict_offset + 4);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(io.file_size), 4, dict_offset + 8);
|
|
|
+ io.WriteData(binary_data, binary_data.size(), dict_offset + 12);
|
|
|
}
|
|
|
LOG(INFO) << "Locales commited successfully";
|
|
|
return SUCCESS;
|
|
@@ -574,53 +439,38 @@ namespace LOTRO_DAT {
|
|
|
for (auto file_id : pending_dictionary_) {
|
|
|
if (dictionary_[file_id] == nullptr || !CorrectSubfile(dictionary_[file_id]))
|
|
|
continue;
|
|
|
- WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
|
|
|
+ io.WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
|
|
|
}
|
|
|
pending_dictionary_.clear();
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
DAT_RESULT DatFile::ModifyFragmentationJournal() {
|
|
|
- if (fragmentation_journal_size_ == 0)
|
|
|
+ if (io.fragmentation_journal_size == 0)
|
|
|
return SUCCESS;
|
|
|
LOG(DEBUG) << "Modifying fragmentation journal";
|
|
|
BinaryData data(4);
|
|
|
- ReadData(data, 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
|
|
|
+ io.ReadData(data, 4, io.fragmentation_journal_offset + 8 * io.fragmentation_journal_size);
|
|
|
LOG(INFO) << "FREE_SIZE BLOCK = " << data.ToNumber<4>(0);
|
|
|
|
|
|
long long free_size = data.ToNumber<4>(0);
|
|
|
- long long free_offset = file_size_;
|
|
|
+ long long free_offset = io.file_size;
|
|
|
|
|
|
BinaryData nulldata = BinaryData(unsigned(free_size));
|
|
|
- WriteData(nulldata, nulldata.size(), file_size_);
|
|
|
- file_size_ += nulldata.size();
|
|
|
+ io.WriteData(nulldata, nulldata.size(), io.file_size);
|
|
|
+ io.file_size += nulldata.size();
|
|
|
|
|
|
- WriteData(BinaryData::FromNumber<4>(free_size), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
|
|
|
- WriteData(BinaryData::FromNumber<4>(free_offset), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_ + 4);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(free_size), 4,
|
|
|
+ io.fragmentation_journal_offset + 8 * io.fragmentation_journal_size);
|
|
|
+ io.WriteData(BinaryData::FromNumber<4>(free_offset), 4,
|
|
|
+ io.fragmentation_journal_offset + 8 * io.fragmentation_journal_size + 4);
|
|
|
|
|
|
|
|
|
-
|
|
|
+
|
|
|
LOG(DEBUG) << "Finished modifying fragmentation journal";
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
- DAT_RESULT DatFile::UpdateHeader() {
|
|
|
- LOG(DEBUG) << "Updating header";
|
|
|
- WriteData(BinaryData::FromNumber<4>(constant1_), 4, 0x100);
|
|
|
- WriteData(BinaryData::FromNumber<4>(constant2_), 4, 0x140);
|
|
|
-
|
|
|
- WriteData(BinaryData::FromNumber<4>(file_size_), 4, 0x148);
|
|
|
- WriteData(BinaryData::FromNumber<4>(version1_ ), 4, 0x14C);
|
|
|
- WriteData(BinaryData::FromNumber<4>(version2_ ), 4, 0x150);
|
|
|
- WriteData(BinaryData::FromNumber<4>(fragmentation_journal_offset_), 4, 0x154);
|
|
|
- WriteData(BinaryData::FromNumber<4>(fragmentation_journal_end_), 4, 0x158);
|
|
|
- WriteData(BinaryData::FromNumber<4>(fragmentation_journal_size_), 4, 0x15C);
|
|
|
- WriteData(BinaryData::FromNumber<4>(root_directory_offset_), 4, 0x160);
|
|
|
- WriteData(BinaryData::FromNumber<4>(free_dat_size_), 4, 0x19C);
|
|
|
- LOG(DEBUG) << "Finished updating header";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
DAT_RESULT DatFile::RepairDatFile() {
|
|
|
for (const auto& file : dictionary_) {
|
|
|
auto subfile = file.second;
|
|
@@ -761,10 +611,9 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
|
|
|
LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
|
|
|
- actual_dat_size_ = std::max(file_size_, actual_dat_size_);
|
|
|
|
|
|
if (!dat_without_patches_) {
|
|
|
- file_size_ = actual_dat_size_;
|
|
|
+ io.file_size = io.GetActualDatSize();
|
|
|
}
|
|
|
|
|
|
if (dat_state_ < READY) {
|
|
@@ -864,8 +713,8 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
|
|
|
|
|
|
-
|
|
|
-
|
|
|
+
|
|
|
+
|
|
|
|
|
|
|
|
|
if (data.Empty()) {
|
|
@@ -890,13 +739,13 @@ namespace LOTRO_DAT {
|
|
|
if ((patch_dict_.count(file_id) == 0 && file_id != 2013266257) || data.size() > file->block_size()
|
|
|
|| file->file_size() + 8 > file->block_size()) {
|
|
|
|
|
|
- file->file_offset_ = file_size_;
|
|
|
+ file->file_offset_ = io.file_size;
|
|
|
file->block_size_ = std::max((long long)data.size(), file->block_size_);
|
|
|
|
|
|
free_buffered_size_ = std::max(0ll, free_buffered_size_ - file->block_size_ - 8);
|
|
|
AddBufferedSize();
|
|
|
|
|
|
- this->file_size_ += file->block_size_ + 8;
|
|
|
+ io.file_size += file->block_size_ + 8;
|
|
|
}
|
|
|
|
|
|
file->file_size_ = data.size() - 8;
|
|
@@ -910,7 +759,7 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
|
|
|
|
|
|
- WriteData(data, data.size(), file->file_offset());
|
|
|
+ io.WriteData(data, data.size(), file->file_offset());
|
|
|
|
|
|
|
|
|
patch_dict_.erase(file_id);
|
|
@@ -948,7 +797,7 @@ namespace LOTRO_DAT {
|
|
|
LOG(DEBUG) << "Getting file " << file->file_id() << " data";
|
|
|
|
|
|
BinaryData mfile_id(20);
|
|
|
- ReadData(mfile_id, 20, file->file_offset() + 8);
|
|
|
+ io.ReadData(mfile_id, 20, file->file_offset() + 8);
|
|
|
if (mfile_id.Empty()) {
|
|
|
LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
|
|
|
<< file->file_offset() << "); Aborting.";
|
|
@@ -964,82 +813,35 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
BinaryData data((unsigned)(file->file_size() + (8 - offset)));
|
|
|
if (file->block_size() >= file->file_size() + 8) {
|
|
|
- ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
|
|
|
+ io.ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
|
|
|
return data;
|
|
|
}
|
|
|
|
|
|
BinaryData fragments_count(4);
|
|
|
- ReadData(fragments_count, 4, file->file_offset());
|
|
|
+ io.ReadData(fragments_count, 4, file->file_offset());
|
|
|
|
|
|
long long fragments_number = fragments_count.ToNumber<4>(0);
|
|
|
|
|
|
long long current_block_size = file->block_size() - offset - 8 * fragments_number;
|
|
|
|
|
|
- ReadData(data, current_block_size, file->file_offset() + offset);
|
|
|
+ io.ReadData(data, current_block_size, file->file_offset() + offset);
|
|
|
|
|
|
BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
|
|
|
- ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
|
|
|
+ io.ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
|
|
|
file->file_offset() + file->block_size() - 8 * fragments_number);
|
|
|
|
|
|
|
|
|
for (long long i = 0; i < fragments_number; i++) {
|
|
|
long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
|
|
|
long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
|
|
|
- ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
|
|
|
- current_block_size);
|
|
|
+ io.ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
|
|
|
+ current_block_size);
|
|
|
current_block_size += fragment_size;
|
|
|
}
|
|
|
LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
|
|
|
return data;
|
|
|
}
|
|
|
|
|
|
- DAT_RESULT DatFile::ReadData(BinaryData &data, long long size, long long offset, long long data_offset) {
|
|
|
- if (dat_state_ == CLOSED) {
|
|
|
- LOG(ERROR) << "Dat state is CLOSED. Cannot read data.";
|
|
|
- data = BinaryData(0);
|
|
|
- return INIT_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (data_offset + size > data.size()) {
|
|
|
- LOG(ERROR) << "Trying to read more than BinaryData size: Reading " << size << " bytes from " << offset
|
|
|
- << " position.";
|
|
|
- data = BinaryData(0);
|
|
|
- return DAT_READ_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- if (offset + size > actual_dat_size_) {
|
|
|
- LOG(ERROR) << "Trying to read more than DatFile size elapsed: Reading " << size << " bytes from " << offset
|
|
|
- << " position.";
|
|
|
- data = BinaryData(0);
|
|
|
- return DAT_READ_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (offset != ftell(file_handler_))
|
|
|
- fseek(file_handler_, offset, SEEK_SET);
|
|
|
- fread(data.data() + data_offset, unsigned(size), 1, file_handler_);
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::WriteData(const BinaryData &data, long long size, long long offset, long long data_offset) {
|
|
|
- if (dat_state_ < SUCCESS_DICTIONARY) {
|
|
|
- LOG(ERROR) << "Dat state isn't READY. Cannot write data.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (offset != ftell(file_handler_))
|
|
|
- fseek(file_handler_, offset, SEEK_SET);
|
|
|
-
|
|
|
- if (data_offset + size > data.size()) {
|
|
|
- LOG(ERROR) << "Trying to write more than BinaryData size";
|
|
|
- return DAT_WRITE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- fwrite(data.data() + data_offset, unsigned(size), 1, file_handler_);
|
|
|
- actual_dat_size_ = std::max(file_size_, actual_dat_size_);
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
|
|
|
|
|
|
|
|
@@ -1117,7 +919,7 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
bool DatFile::CorrectSubfile(std::shared_ptr<SubFile> file) {
|
|
|
BinaryData mfile_id(20);
|
|
|
- ReadData(mfile_id, 20, file->file_offset() + 8);
|
|
|
+ io.ReadData(mfile_id, 20, file->file_offset() + 8);
|
|
|
if (mfile_id.Empty())
|
|
|
return false;
|
|
|
|
|
@@ -1142,7 +944,7 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
|
|
|
|
|
|
-
|
|
|
+
|
|
|
|
|
|
|
|
|
|
|
@@ -1215,7 +1017,7 @@ namespace LOTRO_DAT {
|
|
|
if (free_buffered_size_ >= MIN_BUFFERED_SIZE)
|
|
|
return;
|
|
|
BinaryData nulls(MAX_BUFFERED_SIZE);
|
|
|
- WriteData(nulls, MAX_BUFFERED_SIZE, file_size_);
|
|
|
+ io.WriteData(nulls, MAX_BUFFERED_SIZE, io.file_size);
|
|
|
free_buffered_size_ = MAX_BUFFERED_SIZE;
|
|
|
}
|
|
|
|