|
@@ -1,1254 +0,0 @@
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-#include "DatFile.h"
|
|
|
-#include "BinaryData.h"
|
|
|
-
|
|
|
-#include "SubDirectory.h"
|
|
|
-#include "Subfile.h"
|
|
|
-#include "SubfileData.h"
|
|
|
-
|
|
|
-#include <EasyLogging++/easylogging++.h>
|
|
|
-#include <unistd.h>
|
|
|
-#include <algorithm>
|
|
|
-#include <iterator>
|
|
|
-#include <locale>
|
|
|
-
|
|
|
-#define ELPP_FEATURE_CRASH_LOG
|
|
|
-INITIALIZE_EASYLOGGINGPP
|
|
|
-
|
|
|
-#ifdef WIN32
|
|
|
-#define fseek _fseeki64
|
|
|
-#define ftell _ftelli64
|
|
|
-#endif
|
|
|
-
|
|
|
-extern "C++"
|
|
|
-{
|
|
|
-namespace LOTRO_DAT {
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- DatFile::DatFile() {
|
|
|
- dat_state_ = CLOSED;
|
|
|
- root_directory_ = nullptr;
|
|
|
- file_handler_ = nullptr;
|
|
|
- free_buffered_size_ = 0;
|
|
|
-
|
|
|
- orig_dict_.clear();
|
|
|
- patch_dict_.clear();
|
|
|
- dictionary_.clear();
|
|
|
-
|
|
|
- el::Configurations defaultConf;
|
|
|
- defaultConf.setToDefault();
|
|
|
- defaultConf.setGlobally(el::ConfigurationType::Format,
|
|
|
- "%datetime %level %fbase (line %line) : %msg (function: %func)");
|
|
|
- defaultConf.setGlobally(el::ConfigurationType::ToFile, "true");
|
|
|
- defaultConf.setGlobally(el::ConfigurationType::Filename, "dat_library.log");
|
|
|
- defaultConf.setGlobally(el::ConfigurationType::ToStandardOutput, "false");
|
|
|
- defaultConf.setGlobally(el::ConfigurationType::PerformanceTracking, "true");
|
|
|
- defaultConf.setGlobally(el::ConfigurationType::MaxLogFileSize, "5242880");
|
|
|
- defaultConf.setGlobally(el::ConfigurationType::LogFlushThreshold, "1");
|
|
|
-
|
|
|
- defaultConf.set(el::Level::Debug, el::ConfigurationType::Enabled, "false");
|
|
|
- defaultConf.set(el::Level::Debug, el::ConfigurationType::Filename, "dat_library_debug.log");
|
|
|
-
|
|
|
- el::Loggers::reconfigureAllLoggers(defaultConf);
|
|
|
- LOG(INFO) << "==================================================================";
|
|
|
- LOG(INFO) << "Starting new DatFile class instance";
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::InitDatFile(const std::string &filename, int dat_id) {
|
|
|
- LOG(DEBUG) << "Started initialisation of DatFile " << filename;
|
|
|
- if (dat_state_ != CLOSED && filename == filename_) {
|
|
|
- LOG(DEBUG) << "Trying to reopen the same file: " << filename << ". Doing nothing.";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- if (dat_state_ != CLOSED && filename != filename_) {
|
|
|
- LOG(DEBUG) << "DatFile wasn't closed yet. Closing in order to reopen.";
|
|
|
- if (CloseDatFile() != SUCCESS) {
|
|
|
- LOG(ERROR) << "Unable to perform CloseDatFile()! Aborting initialization!";
|
|
|
- return FAILED;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- dat_id_ = dat_id;
|
|
|
- dat_state_ = CLOSED;
|
|
|
- current_locale_ = ORIGINAL;
|
|
|
- root_directory_ = nullptr;
|
|
|
- file_handler_ = nullptr;
|
|
|
- free_buffered_size_ = 0;
|
|
|
- filename_ = "none";
|
|
|
-
|
|
|
- DAT_RESULT result;
|
|
|
- DAT_RESULT return_value = SUCCESS;
|
|
|
-
|
|
|
- result = OpenDatFile(filename.c_str());
|
|
|
- if (result != SUCCESS) {
|
|
|
- LOG(ERROR) << "Unable to perform opening file. Aborting.";
|
|
|
- CloseDatFile();
|
|
|
- return result;
|
|
|
- }
|
|
|
- return_value = std::max(return_value, result);
|
|
|
-
|
|
|
- result = ReadSuperBlock();
|
|
|
- if (result <= 0) {
|
|
|
- LOG(ERROR) << "Unable to read super block. Aborting.";
|
|
|
- CloseDatFile();
|
|
|
- return result;
|
|
|
- }
|
|
|
- return_value = std::max(return_value, result);
|
|
|
-
|
|
|
- result = MakeDirectories();
|
|
|
- if (result <= 0) {
|
|
|
- LOG(ERROR) << "Unable to make directories. Aborting.";
|
|
|
- CloseDatFile();
|
|
|
- return result;
|
|
|
- }
|
|
|
- return_value = std::max(return_value, result);
|
|
|
-
|
|
|
- result = MakeDictionary();
|
|
|
- if (result <= 0) {
|
|
|
- LOG(ERROR) << "Unable to make dictionary. Aborting.";
|
|
|
- CloseDatFile();
|
|
|
- return result;
|
|
|
- }
|
|
|
- return_value = std::max(return_value, result);
|
|
|
-
|
|
|
- result = InitLocales();
|
|
|
- if (result <= 0) {
|
|
|
- LOG(ERROR) << "Unable to initialize locales. Aborting.";
|
|
|
- CloseDatFile();
|
|
|
- return result;
|
|
|
- }
|
|
|
- return_value = std::max(return_value, result);
|
|
|
-
|
|
|
- LOG(INFO) << "File " << filename << " opened successfully!";
|
|
|
- filename_ = filename;
|
|
|
- dat_state_ = READY;
|
|
|
-
|
|
|
- LOG(INFO) << "Making last preparations...";
|
|
|
- return_value = std::max(return_value, result);
|
|
|
-
|
|
|
- PerformDictionaryCheck();
|
|
|
-
|
|
|
- if (return_value >= 2) {
|
|
|
- LOG(WARNING) << "Dat file could be corrupted. Trying to delete corrupted dictionary rows";
|
|
|
- if (RepairDatFile() != SUCCESS)
|
|
|
- return CRITICAL_DAT_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (CheckIfUpdatedByGame()) {
|
|
|
- LOG(INFO) << ".dat file was updated by game! Need to reinitialize files and directories!";
|
|
|
- CloseDatFile();
|
|
|
- InitDatFile(filename, dat_id);
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
- dat_without_patches_ = CheckIfNotPatched();
|
|
|
- LOG(INFO) << "Preparations made successfully! Init return value = " << return_value;
|
|
|
- return return_value;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::OpenDatFile(const char *dat_name) {
|
|
|
- LOG(DEBUG) << "Started opening DatFile";
|
|
|
- if (dat_state_ != CLOSED) {
|
|
|
- CloseDatFile();
|
|
|
- }
|
|
|
-
|
|
|
- file_handler_ = fopen(dat_name, "r+b");
|
|
|
-
|
|
|
- if (file_handler_ == nullptr) {
|
|
|
- LOG(ERROR) << "Unable to open file " << dat_name;
|
|
|
- return NO_FILE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- fseek(file_handler_, 0, SEEK_END);
|
|
|
- actual_dat_size_ = ftell(file_handler_);
|
|
|
- fseek(file_handler_, 0, SEEK_SET);
|
|
|
-
|
|
|
- dat_state_ = SUCCESS_OPENED;
|
|
|
- LOG(DEBUG) << "Successfully opened DatFile";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::ReadSuperBlock() {
|
|
|
- LOG(DEBUG) << "Started reading superblock";
|
|
|
- if (dat_state_ != SUCCESS_OPENED) {
|
|
|
- LOG(ERROR) << "Dat state isn't SUCCESS_OPENED. Cannot perform extraction.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- BinaryData data(1024);
|
|
|
- ReadData(data, 1024);
|
|
|
-
|
|
|
- constant1_ = data.ToNumber<4>(0x100);
|
|
|
- constant2_ = data.ToNumber<4>(0x140);
|
|
|
- version1_ = data.ToNumber<4>(0x14C);
|
|
|
- file_size_ = data.ToNumber<4>(0x148);
|
|
|
- version2_ = data.ToNumber<4>(0x150);
|
|
|
- fragmentation_journal_offset_ = data.ToNumber<4>(0x154);
|
|
|
- fragmentation_journal_end_ = data.ToNumber<4>(0x158);
|
|
|
- fragmentation_journal_size_ = data.ToNumber<4>(0x15C);
|
|
|
- root_directory_offset_ = data.ToNumber<4>(0x160);
|
|
|
- free_dat_size_ = data.ToNumber<4>(0x19C);
|
|
|
-
|
|
|
- if (constant1_ != 0x4C5000) {
|
|
|
- LOG(ERROR) << "variable at position 0x100 is not equal to .dat file constant!";
|
|
|
- return INCORRECT_SUPERBLOCK_ERROR;
|
|
|
- }
|
|
|
- if (constant2_ != 0x5442) {
|
|
|
- LOG(ERROR) << "variable at position 0x140 is not equal to .dat file constant!";
|
|
|
- return INCORRECT_SUPERBLOCK_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (file_size_ != actual_dat_size_) {
|
|
|
- LOG(ERROR) << "variable at 0x148 position is not equal to .dat file size!";
|
|
|
-
|
|
|
- }
|
|
|
-
|
|
|
- dat_state_ = SUCCESS_SUPERBLOCK;
|
|
|
- LOG(DEBUG) << "Superblock read successfully";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::MakeDirectories() {
|
|
|
- LOG(DEBUG) << "Started making directories";
|
|
|
- if (dat_state_ != SUCCESS_SUPERBLOCK) {
|
|
|
- LOG(ERROR) << "Dat state isn't SUCCESS_SUPERBLOCK. Cannot make directories.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- root_directory_ = new SubDirectory((unsigned) root_directory_offset_, this);
|
|
|
- dat_state_ = SUCCESS_DIRECTORIES;
|
|
|
-
|
|
|
- LOG(DEBUG) << "Directories made successfully";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::MakeDictionary() {
|
|
|
- LOG(DEBUG) << "Started making dictionary";
|
|
|
- if (dat_state_ != SUCCESS_DIRECTORIES) {
|
|
|
- LOG(ERROR) << "Dat state isn't SUCCESS_DIRECTORIES. Cannot make directories.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (root_directory_ == nullptr) {
|
|
|
- LOG(ERROR) << "root_directory is nullptr!!";
|
|
|
- return INIT_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- root_directory_->MakeDictionary(dictionary_);
|
|
|
- dat_state_ = SUCCESS_DICTIONARY;
|
|
|
- LOG(DEBUG) << "Dictionary made successfull";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::InitLocales() {
|
|
|
- LOG(INFO) << "Initialising locales...";
|
|
|
- BinaryData dicts_data(4);
|
|
|
-
|
|
|
- ReadData(dicts_data, 4, 300);
|
|
|
- long long dict_offset = dicts_data.ToNumber<4>(0);
|
|
|
-
|
|
|
- if (dict_offset == 0 || dict_offset + 8 >= actual_dat_size_) {
|
|
|
- LOG(INFO) << "Dictionary offset is empty or incorrect. Passing.";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- ReadData(dicts_data, 4, dict_offset);
|
|
|
- long long dict_size = dicts_data.ToNumber<4>(0);
|
|
|
-
|
|
|
- ReadData(dicts_data, 4, dict_offset + 4);
|
|
|
- long long dict_version = dicts_data.ToNumber<4>(0);
|
|
|
- LOG(INFO) << "Dictionary size is " << dict_size << ". Version is " << dict_version;
|
|
|
-
|
|
|
- if (dict_version != 100) {
|
|
|
- LOG(WARNING) << "DICTIONARY IS OLD!!!";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- dicts_data = BinaryData((unsigned)dict_size);
|
|
|
- ReadData(dicts_data, dict_size, dict_offset + 8);
|
|
|
-
|
|
|
- if (dicts_data.size() < 15) {
|
|
|
- LOG(ERROR) << "Incorrect dictionary. Passing without it.";
|
|
|
- orig_dict_.clear();
|
|
|
- patch_dict_.clear();
|
|
|
- WriteData(BinaryData::FromNumber<4>(0), 4, 300);
|
|
|
- dat_state_ = UPDATED;
|
|
|
- dat_without_patches_ = true;
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- BinaryData hi_data = dicts_data.CutData(0, 15) + BinaryData("\0", 1);
|
|
|
- std::string hi = std::string((char *) (hi_data.data()));
|
|
|
- LOG(DEBUG) << "hi info is " << hi;
|
|
|
-
|
|
|
- if (hi != "Hi from Gi1dor!") {
|
|
|
- LOG(WARNING) << "Didn't receive 'hi' from Gi1dor... Initialising locale dicts as empty";
|
|
|
- LOG(INFO) << "Could't init locales' file... Continuing without them";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- int offset = 15;
|
|
|
- BinaryData current_locale_data = dicts_data.CutData(offset, offset + 4) + BinaryData("\0", 1);
|
|
|
- std::string locale((char *) (current_locale_data.data()));
|
|
|
- offset += 4;
|
|
|
- LOG(DEBUG) << "current locale:" << locale;
|
|
|
-
|
|
|
- if (locale != "PATC" && locale != "ORIG") {
|
|
|
- LOG(WARNING) << "Incorrect locale... Initialising locale dicts as empty";
|
|
|
- LOG(INFO) << "Could't recognize locale... Continuing without locales";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
- current_locale_ = (locale == "PATC" ? PATCHED : ORIGINAL);
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- size_t orig_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
|
|
|
- offset += 4;
|
|
|
- for (size_t i = 0; i < orig_dict_size; i++) {
|
|
|
- auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
|
|
|
- orig_dict_[file->file_id()] = file;
|
|
|
- offset += 32;
|
|
|
- orig_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
|
|
|
- offset += 4;
|
|
|
-
|
|
|
- if (orig_dict_[file->file_id()]->category == 0)
|
|
|
- LOG(DEBUG) << "file category is undefined (0)!";
|
|
|
- }
|
|
|
-
|
|
|
- size_t patch_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
|
|
|
- offset += 4;
|
|
|
- for (size_t i = 0; i < patch_dict_size; i++) {
|
|
|
- auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
|
|
|
- patch_dict_[file->file_id()] = file;
|
|
|
- offset += 32;
|
|
|
- patch_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
|
|
|
- offset += 4;
|
|
|
- if (patch_dict_[file->file_id()]->category == 0)
|
|
|
- LOG(DEBUG) << "file category is undefined (0)!";
|
|
|
-
|
|
|
- }
|
|
|
-
|
|
|
- size_t active_patches_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
|
|
|
- offset += 4;
|
|
|
- for (size_t i = 0; i < active_patches_dict_size; i++) {
|
|
|
- inactive_categories.insert(dicts_data.ToNumber<4>(offset));
|
|
|
- offset += 4;
|
|
|
- }
|
|
|
-
|
|
|
- LOG(INFO) << "There are " << patch_dict_.size() << " files in patch locale dictionary";
|
|
|
- LOG(INFO) << "There are " << orig_dict_.size() << " files in original locale dictionary";
|
|
|
- std::string inactive_cat_s;
|
|
|
- for (auto i : inactive_categories) {
|
|
|
- inactive_cat_s += std::to_string(i) + " ";
|
|
|
- }
|
|
|
- LOG(INFO) << "Unactive patches now: " << inactive_cat_s;
|
|
|
- LOG(INFO) << "Finished initialising locales";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::PerformDictionaryCheck() {
|
|
|
- for (auto mpair : dictionary_) {
|
|
|
- auto file = mpair.second;
|
|
|
- auto file_id = mpair.first;
|
|
|
- if (CorrectSubfile(file))
|
|
|
- continue;
|
|
|
-
|
|
|
- if (current_locale_ == PATCHED && orig_dict_.count(file_id) > 0) {
|
|
|
- LOG(WARNING) << "Potential incorrect patched version of file " << file_id << ". Switching to original.";
|
|
|
- dictionary_[file_id] = orig_dict_[file_id];
|
|
|
- }
|
|
|
-
|
|
|
- if (!CorrectSubfile(file)) {
|
|
|
- LOG(ERROR) << "Incorrect file " << file_id << ". It's offset is said as " << file->file_offset()
|
|
|
- << ". Erasing it from dictionary.";
|
|
|
- dictionary_.erase(file_id);
|
|
|
- }
|
|
|
- }
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- DatFile::~DatFile() {
|
|
|
- CloseDatFile();
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::CloseDatFile() {
|
|
|
- LOG(INFO) << "Closing DatFile";
|
|
|
- if (dat_state_ == CLOSED) {
|
|
|
- LOG(INFO) << "DatFile is already closed. Nothing to do";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- if (!pending_dictionary_.empty() || dat_state_ == UPDATED) {
|
|
|
- CommitLocales();
|
|
|
- CommitDirectories();
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- }
|
|
|
-
|
|
|
- current_locale_ = ORIGINAL;
|
|
|
-
|
|
|
- if (file_handler_ != nullptr) {
|
|
|
- fclose(file_handler_);
|
|
|
- }
|
|
|
- SubDirectory::visited_subdirectories_.clear();
|
|
|
- delete root_directory_;
|
|
|
-
|
|
|
-
|
|
|
- free_buffered_size_ = 0;
|
|
|
-
|
|
|
- filename_ = "none";
|
|
|
-
|
|
|
- orig_dict_.clear();
|
|
|
- patch_dict_.clear();
|
|
|
- pending_patch_.clear();
|
|
|
- inactive_categories.clear();
|
|
|
-
|
|
|
- file_handler_ = nullptr;
|
|
|
- root_directory_ = nullptr;
|
|
|
-
|
|
|
-
|
|
|
- pending_dictionary_.clear();
|
|
|
- dictionary_.clear();
|
|
|
-
|
|
|
- constant1_ = 0;
|
|
|
- constant2_ = 0;
|
|
|
- file_size_ = 0;
|
|
|
- version1_ = 0;
|
|
|
- version2_ = 0;
|
|
|
- fragmentation_journal_size_ = 0;
|
|
|
- fragmentation_journal_end_ = 0;
|
|
|
- root_directory_offset_ = 0;
|
|
|
- fragmentation_journal_offset_ = 0;
|
|
|
-
|
|
|
- dat_state_ = CLOSED;
|
|
|
-
|
|
|
- dat_id_ = -1;
|
|
|
-
|
|
|
-
|
|
|
- LOG(INFO) << "File closed successfully.";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::CommitLocales() {
|
|
|
- LOG(INFO) << "Committing locales...";
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- BinaryData binary_data = BinaryData(14 + 15 + 4
|
|
|
- + 4 + (32 + 4) * orig_dict_.size()
|
|
|
- + 4 + (32 + 4) * patch_dict_.size()
|
|
|
- + 4 + 4 * inactive_categories.size());
|
|
|
-
|
|
|
- size_t current_size = 0;
|
|
|
- binary_data.Append(BinaryData("Hi from Gi1dor!", 15), current_size);
|
|
|
- current_size += 15;
|
|
|
-
|
|
|
- binary_data.Append(BinaryData((current_locale_ == ORIGINAL ? "ORIG" : "PATC"), 4), current_size);
|
|
|
- current_size += 4;
|
|
|
-
|
|
|
- binary_data.Append(BinaryData::FromNumber<4>(orig_dict_.size()), current_size);
|
|
|
- current_size += 4;
|
|
|
-
|
|
|
- for (auto file : orig_dict_) {
|
|
|
- binary_data.Append(file.second->MakeHeaderData(), current_size);
|
|
|
- current_size += 32;
|
|
|
- binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
|
|
|
- current_size += 4;
|
|
|
- }
|
|
|
-
|
|
|
- binary_data.Append(BinaryData::FromNumber<4>(patch_dict_.size()), current_size);
|
|
|
- current_size += 4;
|
|
|
-
|
|
|
- for (auto file : patch_dict_) {
|
|
|
- binary_data.Append(file.second->MakeHeaderData(), current_size);
|
|
|
- current_size += 32;
|
|
|
- binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
|
|
|
- current_size += 4;
|
|
|
- }
|
|
|
-
|
|
|
- binary_data.Append(BinaryData::FromNumber<4>(inactive_categories.size()), current_size);
|
|
|
- current_size += 4;
|
|
|
- for (auto patch_id : inactive_categories) {
|
|
|
- binary_data.Append(BinaryData::FromNumber<4>(patch_id), current_size);
|
|
|
- current_size += 4;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
- BinaryData dicts_data(4);
|
|
|
- ReadData(dicts_data, 4, 300);
|
|
|
- long long dict_offset = dicts_data.ToNumber<4>(0);
|
|
|
- ReadData(dicts_data, 4, dict_offset);
|
|
|
- long long dict_size = dicts_data.ToNumber<4>(0);
|
|
|
-
|
|
|
- if (binary_data.size() > dict_size || dict_offset == 0) {
|
|
|
- WriteData(BinaryData::FromNumber<4>(file_size_), 4, 300);
|
|
|
-
|
|
|
- WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size() + 4, 20u * 1024u * 1024u)), 4, file_size_);
|
|
|
- WriteData(BinaryData::FromNumber<4>(100), 4, file_size_ + 4);
|
|
|
- WriteData(binary_data, binary_data.size(), file_size_ + 8);
|
|
|
- file_size_ += binary_data.size() + 8;
|
|
|
-
|
|
|
-
|
|
|
- BinaryData nulls(unsigned(20 * 1024 * 1024));
|
|
|
- WriteData(nulls, nulls.size(), file_size_);
|
|
|
- file_size_ += nulls.size();
|
|
|
-
|
|
|
- } else {
|
|
|
- WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, dict_offset);
|
|
|
- WriteData(BinaryData::FromNumber<4>(100), 4, dict_offset + 4);
|
|
|
- WriteData(binary_data, binary_data.size(), dict_offset + 8);
|
|
|
- }
|
|
|
- LOG(INFO) << "Locales commited successfully";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::CommitDirectories() {
|
|
|
- for (auto file_id : pending_dictionary_) {
|
|
|
- if (dictionary_[file_id] == nullptr || !CorrectSubfile(dictionary_[file_id]))
|
|
|
- continue;
|
|
|
- WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
|
|
|
- }
|
|
|
- pending_dictionary_.clear();
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::ModifyFragmentationJournal() {
|
|
|
- if (fragmentation_journal_size_ == 0)
|
|
|
- return SUCCESS;
|
|
|
- LOG(DEBUG) << "Modifying fragmentation journal";
|
|
|
- BinaryData data(4);
|
|
|
- ReadData(data, 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
|
|
|
- LOG(INFO) << "FREE_SIZE BLOCK = " << data.ToNumber<4>(0);
|
|
|
-
|
|
|
- long long free_size = data.ToNumber<4>(0);
|
|
|
- long long free_offset = file_size_;
|
|
|
-
|
|
|
- BinaryData nulldata = BinaryData(unsigned(free_size));
|
|
|
- WriteData(nulldata, nulldata.size(), file_size_);
|
|
|
- file_size_ += nulldata.size();
|
|
|
-
|
|
|
- WriteData(BinaryData::FromNumber<4>(free_size), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
|
|
|
- WriteData(BinaryData::FromNumber<4>(free_offset), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_ + 4);
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- LOG(DEBUG) << "Finished modifying fragmentation journal";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::UpdateHeader() {
|
|
|
- LOG(DEBUG) << "Updating header";
|
|
|
- WriteData(BinaryData::FromNumber<4>(constant1_), 4, 0x100);
|
|
|
- WriteData(BinaryData::FromNumber<4>(constant2_), 4, 0x140);
|
|
|
-
|
|
|
- WriteData(BinaryData::FromNumber<4>(file_size_), 4, 0x148);
|
|
|
- WriteData(BinaryData::FromNumber<4>(version1_ ), 4, 0x14C);
|
|
|
- WriteData(BinaryData::FromNumber<4>(version2_ ), 4, 0x150);
|
|
|
- WriteData(BinaryData::FromNumber<4>(fragmentation_journal_offset_), 4, 0x154);
|
|
|
- WriteData(BinaryData::FromNumber<4>(fragmentation_journal_end_), 4, 0x158);
|
|
|
- WriteData(BinaryData::FromNumber<4>(fragmentation_journal_size_), 4, 0x15C);
|
|
|
- WriteData(BinaryData::FromNumber<4>(root_directory_offset_), 4, 0x160);
|
|
|
- WriteData(BinaryData::FromNumber<4>(free_dat_size_), 4, 0x19C);
|
|
|
- LOG(DEBUG) << "Finished updating header";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::RepairDatFile() {
|
|
|
- for (auto file : dictionary_) {
|
|
|
- auto subfile = file.second;
|
|
|
- auto file_id = file.first;
|
|
|
-
|
|
|
- if (CorrectSubfile(subfile))
|
|
|
- continue;
|
|
|
-
|
|
|
- orig_dict_.clear();
|
|
|
- patch_dict_.clear();
|
|
|
- return SUCCESS;
|
|
|
-
|
|
|
- if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
|
|
|
- return CRITICAL_DAT_ERROR;
|
|
|
-
|
|
|
- *dictionary_[file_id] = *orig_dict_[file_id];
|
|
|
- patch_dict_.erase(file_id);
|
|
|
- orig_dict_.erase(file_id);
|
|
|
- }
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- DAT_STATE DatFile::DatFileState() const {
|
|
|
- return dat_state_;
|
|
|
- }
|
|
|
-
|
|
|
- long long DatFile::files_number() const {
|
|
|
- return dictionary_.size();
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
|
|
|
- LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
|
|
|
- if (dat_state_ < READY) {
|
|
|
- LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
- BinaryData file_data = GetFileData(dictionary_[file_id], 8);
|
|
|
-
|
|
|
- if (file_data.size() == 0) {
|
|
|
- LOG(ERROR) << "File data is empty. Aborting extraction.";
|
|
|
- return NO_FILE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- SubfileData export_data = dictionary_[file_id]->PrepareForExport(file_data);
|
|
|
-
|
|
|
- if (export_data.Empty()) {
|
|
|
- LOG(ERROR) << "Export data is empty. Aborting extraction.";
|
|
|
- return NO_FILE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (export_data.binary_data.WriteToFile(path + export_data.options["ext"].as<std::string>()) != SUCCESS) {
|
|
|
- LOG(ERROR) << "Cannot write to file.";
|
|
|
- return WRITE_TO_FILE_ERROR;
|
|
|
- }
|
|
|
- LOG(DEBUG) << "File " << file_id << " extracted successfully";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
|
|
|
- LOG(DEBUG) << "Extracting file " << file_id << " to database.";
|
|
|
-
|
|
|
- if (dat_state_ < READY) {
|
|
|
- LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- BinaryData file_data = GetFileData(dictionary_[file_id], 8);
|
|
|
-
|
|
|
- if (file_data.Empty()) {
|
|
|
- LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty. Passing it.";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- SubfileData export_data;
|
|
|
- export_data = dictionary_[file_id]->PrepareForExport(file_data);
|
|
|
- export_data.options["did"] = dat_id_;
|
|
|
-
|
|
|
- if (export_data == SubfileData()) {
|
|
|
- LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty or incorrect.";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- try {
|
|
|
- db->PushFile(export_data);
|
|
|
- } catch (std::exception &e) {
|
|
|
- LOG(ERROR) << "Caught " << e.what() << " exception.";
|
|
|
- return FAILED;
|
|
|
- }
|
|
|
- LOG(DEBUG) << "File " << file_id << " extracted successfully";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
|
|
|
- LOG(INFO) << "Extracting all files to path " << path;
|
|
|
- if (dat_state_ < READY) {
|
|
|
- LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- int success = 0;
|
|
|
- for (auto i : dictionary_) {
|
|
|
- FILE_TYPE file_type = i.second->FileType();
|
|
|
- if (file_type == type) {
|
|
|
- success += (ExtractFile(i.second->file_id(), (path + std::to_string(i.second->file_id()))) == SUCCESS
|
|
|
- ? 1 : 0);
|
|
|
- }
|
|
|
- }
|
|
|
- LOG(INFO) << "Successfully extracted " << success << " files";
|
|
|
- return success;
|
|
|
- }
|
|
|
-
|
|
|
- int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
|
|
|
- LOG(INFO) << "Extracting all files to database...";
|
|
|
-
|
|
|
- if (dat_state_ < READY) {
|
|
|
- LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- int success = 0;
|
|
|
- for (auto i : dictionary_) {
|
|
|
- FILE_TYPE file_type = i.second->FileType();
|
|
|
- if (file_type == type) {
|
|
|
- success += (ExtractFile(i.second->file_id(), db) == SUCCESS ? 1 : 0);
|
|
|
- }
|
|
|
- }
|
|
|
- LOG(INFO) << "Extracted " << success << " files";
|
|
|
- return success;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
|
|
|
- LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
|
|
|
- actual_dat_size_ = std::max(file_size_, actual_dat_size_);
|
|
|
-
|
|
|
- if (!dat_without_patches_) {
|
|
|
- file_size_ = actual_dat_size_;
|
|
|
- }
|
|
|
-
|
|
|
- if (dat_state_ < READY) {
|
|
|
- LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- auto file_id = data.options["fid"].as<long long>();
|
|
|
-
|
|
|
- if (dictionary_.count(file_id) == 0) {
|
|
|
- LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
|
|
|
- return NO_FILE_ERROR;
|
|
|
- }
|
|
|
- Subfile *file = dictionary_[file_id];
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0 && file_id != 2013266257) {
|
|
|
- *dictionary_[file_id] = *patch_dict_[file_id];
|
|
|
- }
|
|
|
-
|
|
|
- if (data.options["cat"].IsDefined()) {
|
|
|
- file->category = data.options["cat"].as<long long>();
|
|
|
- } else {
|
|
|
- file->category = 1;
|
|
|
- }
|
|
|
-
|
|
|
- BinaryData old_data = GetFileData(orig_dict_.count(file->file_id()) == 0 ? file : orig_dict_[file->file_id_]);
|
|
|
- if (old_data.Empty()) {
|
|
|
- LOG(ERROR) << "GetFileData returned empty data. Aborting.";
|
|
|
- return DAT_PATCH_FILE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- BinaryData patch_data = file->MakeForImport(old_data, data);
|
|
|
- DAT_RESULT result = ApplyFilePatch(file, patch_data);
|
|
|
- if (result != SUCCESS)
|
|
|
- return result;
|
|
|
-
|
|
|
- LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>() << ".";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
|
|
|
- LOG(INFO) << "Patching all database";
|
|
|
- if (dat_state_ < READY) {
|
|
|
- LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- SubfileData data;
|
|
|
- data = db->GetNextFile();
|
|
|
-
|
|
|
- while (!data.Empty()) {
|
|
|
- DAT_RESULT result = PatchFile(data);
|
|
|
- if (result != SUCCESS)
|
|
|
- LOG(ERROR) << "Cannot patch file " << data.options["fid"].as<long long>() << " continuing";
|
|
|
- data = db->GetNextFile();
|
|
|
- }
|
|
|
- LOG(INFO) << "Successfully patched whole database";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
|
|
|
- LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
|
|
|
- FILE *f = nullptr;
|
|
|
- fopen_s(&f, (path + "dict.txt").c_str(), "w");
|
|
|
-
|
|
|
- if (f == nullptr) {
|
|
|
- LOG(ERROR) << "Cannot open file " << path + "dict.txt";
|
|
|
- return WRITE_TO_FILE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- fprintf(f, "unk1 file_id offset size1 timestamp version size2 unknown2 type\n");
|
|
|
- for (auto i : dictionary_) {
|
|
|
- fprintf(f, "%lld %lld %lld %lld %lld %lld %lld %lld %s\n", i.second->unknown1(), i.second->file_id(),
|
|
|
- i.second->file_offset(), i.second->file_size(), i.second->timestamp(), i.second->version(),
|
|
|
- i.second->block_size(), i.second->unknown2(), i.second->Extension().c_str());
|
|
|
- }
|
|
|
- fclose(f);
|
|
|
- LOG(INFO) << "Unordered dictionary was written successfully to " << path << "dict.txt";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::ApplyFilePatch(Subfile *file, BinaryData &data) {
|
|
|
- LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- if (data.Empty()) {
|
|
|
- LOG(ERROR) << "Error caused during making file for import. Cannot patch file " << file->file_id();
|
|
|
- return FAILED;
|
|
|
- }
|
|
|
-
|
|
|
- auto file_id = file->file_id();
|
|
|
-
|
|
|
- if (current_locale() != PATCHED && file_id != 2013266257) {
|
|
|
- LOG(INFO) << "Changing locale to PATCHED(RU) in order to patch file";
|
|
|
- SetLocale(PATCHED);
|
|
|
- }
|
|
|
-
|
|
|
- dat_state_ = UPDATED;
|
|
|
-
|
|
|
- if (orig_dict_.count(file_id) == 0 && file_id != 2013266257) {
|
|
|
- orig_dict_[file_id] = new Subfile(this, file->MakeHeaderData());
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
- if ((patch_dict_.count(file_id) == 0 && file_id != 2013266257) || data.size() > file->block_size()
|
|
|
- || file->file_size() + 8 > file->block_size()) {
|
|
|
-
|
|
|
- file->file_offset_ = file_size_;
|
|
|
- file->block_size_ = std::max((long long)data.size(), file->block_size_);
|
|
|
-
|
|
|
- free_buffered_size_ = std::max(0ll, free_buffered_size_ - file->block_size_ - 8);
|
|
|
- AddBufferedSize();
|
|
|
-
|
|
|
- this->file_size_ += file->block_size_ + 8;
|
|
|
- }
|
|
|
-
|
|
|
- file->file_size_ = data.size() - 8;
|
|
|
-
|
|
|
- data.Append(BinaryData::FromNumber<4>(0), 0);
|
|
|
-
|
|
|
- if (file_id != data.ToNumber<4>(8)) {
|
|
|
- LOG(ERROR) << "Created data's file_id doesn't match to original! Patch wasn't written to .dat file";
|
|
|
- return INCORRECT_PATCH_FILE;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- WriteData(data, data.size(), file->file_offset());
|
|
|
-
|
|
|
-
|
|
|
- patch_dict_.erase(file_id);
|
|
|
- if (file_id != 2013266257) {
|
|
|
- patch_dict_[file_id] = new Subfile(this, file->MakeHeaderData());
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
- if (inactive_categories.count(file->category) != 0) {
|
|
|
- file->file_offset_ = orig_dict_[file_id]->file_offset_;
|
|
|
- file->file_size_ = orig_dict_[file_id]->file_size_;
|
|
|
- file->block_size_ = orig_dict_[file_id]->block_size_;
|
|
|
- file->timestamp_ = orig_dict_[file_id]->timestamp_;
|
|
|
- file->version_ = orig_dict_[file_id]->version_;
|
|
|
- }
|
|
|
-
|
|
|
- if (orig_dict_.count(file_id) != 0 && file_id != 2013266257)
|
|
|
- orig_dict_[file_id]->category = file->category;
|
|
|
- if (patch_dict_.count(file_id) != 0 && file_id != 2013266257)
|
|
|
- patch_dict_[file_id]->category = file->category;
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- pending_dictionary_.insert(file_id);
|
|
|
-
|
|
|
- LOG(DEBUG) << "Successfully applied file " << file->file_id() << " patch.";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- BinaryData DatFile::GetFileData(const Subfile *file, long long int offset) {
|
|
|
- LOG(DEBUG) << "Getting file " << file->file_id() << " data";
|
|
|
-
|
|
|
- BinaryData mfile_id(20);
|
|
|
- ReadData(mfile_id, 20, file->file_offset() + 8);
|
|
|
- if (mfile_id.Empty()) {
|
|
|
- LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
|
|
|
- << file->file_offset() << "); Aborting.";
|
|
|
- return BinaryData(0);
|
|
|
- }
|
|
|
-
|
|
|
- if (!mfile_id.CheckCompression() && file->file_id() != mfile_id.ToNumber<4>(0)) {
|
|
|
- LOG(ERROR) << "Bad DatFile::GetFileData() - file_id in Subfile ("
|
|
|
- << file->file_id()
|
|
|
- << ") doesn't match to file_id (" << mfile_id.ToNumber<4>(0) << ")in DatFile.";
|
|
|
- return BinaryData(0);
|
|
|
- }
|
|
|
-
|
|
|
- BinaryData data((unsigned)(file->file_size() + (8 - offset)));
|
|
|
- if (file->block_size() >= file->file_size() + 8) {
|
|
|
- ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
|
|
|
- return data;
|
|
|
- }
|
|
|
-
|
|
|
- BinaryData fragments_count(4);
|
|
|
- ReadData(fragments_count, 4, file->file_offset());
|
|
|
-
|
|
|
- long long fragments_number = fragments_count.ToNumber<4>(0);
|
|
|
-
|
|
|
- long long current_block_size = file->block_size() - offset - 8 * fragments_number;
|
|
|
-
|
|
|
- ReadData(data, current_block_size, file->file_offset() + offset);
|
|
|
-
|
|
|
- BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
|
|
|
- ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
|
|
|
- file->file_offset() + file->block_size() - 8 * fragments_number);
|
|
|
-
|
|
|
-
|
|
|
- for (long long i = 0; i < fragments_number; i++) {
|
|
|
- long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
|
|
|
- long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
|
|
|
- ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
|
|
|
- current_block_size);
|
|
|
- current_block_size += fragment_size;
|
|
|
- }
|
|
|
- LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
|
|
|
- return data;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::ReadData(BinaryData &data, long long size, long long offset, long long data_offset) {
|
|
|
- if (dat_state_ == CLOSED) {
|
|
|
- LOG(ERROR) << "Dat state is CLOSED. Cannot read data.";
|
|
|
- data = BinaryData(0);
|
|
|
- return INIT_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (data_offset + size > data.size()) {
|
|
|
- LOG(ERROR) << "Trying to read more than BinaryData size: Reading " << size << " bytes from " << offset
|
|
|
- << " position.";
|
|
|
- data = BinaryData(0);
|
|
|
- return DAT_READ_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- if (offset + size > actual_dat_size_) {
|
|
|
- LOG(ERROR) << "Trying to read more than DatFile size elapsed: Reading " << size << " bytes from " << offset
|
|
|
- << " position.";
|
|
|
- data = BinaryData(0);
|
|
|
- return DAT_READ_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- fseek(file_handler_, offset, SEEK_SET);
|
|
|
- fread(data.data() + data_offset, unsigned(size), 1, file_handler_);
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::WriteData(const BinaryData &data, long long size, long long offset, long long data_offset) {
|
|
|
- if (dat_state_ < SUCCESS_DICTIONARY) {
|
|
|
- LOG(ERROR) << "Dat state isn't READY. Cannot write data.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- fseek(file_handler_, offset, SEEK_SET);
|
|
|
- if (data_offset + size > data.size()) {
|
|
|
- LOG(ERROR) << "Trying to write more than BinaryData size";
|
|
|
- return DAT_WRITE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- fwrite(data.data() + data_offset, unsigned(size), 1, file_handler_);
|
|
|
- actual_dat_size_ = std::max(file_size_, actual_dat_size_);
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- DAT_RESULT DatFile::SetLocale(LOCALE locale) {
|
|
|
- LOG(INFO) << "Setting locale to " << (locale == PATCHED ? " PATCHED" : " ORIGINAL");
|
|
|
- if (dat_state_ < READY) {
|
|
|
- LOG(ERROR) << "Dat state isn't READY. Cannot set locale.";
|
|
|
- return INCORRECT_STATE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- if (current_locale_ == locale) {
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- dat_state_ = UPDATED;
|
|
|
- auto dict = GetLocaleDictReference(locale);
|
|
|
- for (auto file : *dict) {
|
|
|
- if (file.second == nullptr)
|
|
|
- continue;
|
|
|
-
|
|
|
- if (dictionary_.count(file.first) == 0) {
|
|
|
- LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
|
|
|
- << "which is not in .dat file! Passing it and removing from locale dictionary";
|
|
|
- dict->erase(file.first);
|
|
|
- continue;
|
|
|
- }
|
|
|
- if (dictionary_[file.first]->MakeHeaderData().CutData(8, 16) ==
|
|
|
- file.second->MakeHeaderData().CutData(8, 16) ||
|
|
|
- inactive_categories.count(orig_dict_[file.first]->category) != 0)
|
|
|
- continue;
|
|
|
-
|
|
|
- long long file_id = file.first;
|
|
|
- Subfile *new_file = file.second;
|
|
|
-
|
|
|
- *dictionary_[file_id] = *new_file;
|
|
|
-
|
|
|
- pending_dictionary_.insert(file_id);
|
|
|
- dat_state_ = UPDATED;
|
|
|
- }
|
|
|
-
|
|
|
- current_locale_ = locale;
|
|
|
- LOG(INFO) << "Locale set successfull";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- LOCALE DatFile::current_locale() {
|
|
|
- if (dat_state_ < READY) {
|
|
|
- LOG(ERROR) << "dat_file is in incorrect state!";
|
|
|
- return ORIGINAL;
|
|
|
- }
|
|
|
- if (current_locale_ != PATCHED && current_locale_ != ORIGINAL) {
|
|
|
- LOG(ERROR) << "locale has incorrect value. Setting it to original";
|
|
|
- current_locale_ = ORIGINAL;
|
|
|
- }
|
|
|
- return current_locale_;
|
|
|
- }
|
|
|
-
|
|
|
- std::map<long long, Subfile *> *DatFile::GetLocaleDictReference(LOCALE locale) {
|
|
|
- switch (locale) {
|
|
|
- case PATCHED:
|
|
|
- return &patch_dict_;
|
|
|
- case ORIGINAL:
|
|
|
- return &orig_dict_;
|
|
|
- default:
|
|
|
- LOG(ERROR) << "Unknown locale! Returning original";
|
|
|
- return &orig_dict_;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- bool DatFile::CorrectSubfile(Subfile *file) {
|
|
|
- BinaryData mfile_id(20);
|
|
|
- ReadData(mfile_id, 20, file->file_offset() + 8);
|
|
|
- if (mfile_id.Empty())
|
|
|
- return false;
|
|
|
-
|
|
|
- return (mfile_id.CheckCompression() || file->file_id() == mfile_id.ToNumber<4>(0)) && file->file_size() < 50ll * 1024ll * 1024ll;
|
|
|
- }
|
|
|
-
|
|
|
- bool DatFile::CheckIfUpdatedByGame() {
|
|
|
- LOG(INFO) << "Checking if DatFile was updated by LotRO";
|
|
|
- if (!pending_patch_.empty())
|
|
|
- return true;
|
|
|
-
|
|
|
- bool updated = false;
|
|
|
-
|
|
|
- for (auto i : dictionary_) {
|
|
|
- long long file_id = i.first;
|
|
|
- Subfile *subfile = i.second;
|
|
|
- if (patch_dict_.count(file_id) == 0)
|
|
|
- continue;
|
|
|
-
|
|
|
- if (*subfile != *patch_dict_[file_id] && *subfile != *orig_dict_[file_id]) {
|
|
|
- orig_dict_.clear();
|
|
|
- patch_dict_.clear();
|
|
|
- LOG(INFO) << "DAT FILE WAS UPDATED!!!! CLEARING PATCH DATA";
|
|
|
- pending_patch_.insert(file_id);
|
|
|
- WriteData(BinaryData::FromNumber<4>(0), 4, 300);
|
|
|
- return true;
|
|
|
- }
|
|
|
- }
|
|
|
- return updated;
|
|
|
- }
|
|
|
-
|
|
|
- bool DatFile::CheckIfNotPatched() {
|
|
|
- LOG(INFO) << "DatFile " << (patch_dict_.empty() ? "HASN'T " : "HAS already")
|
|
|
- << " been patched by LEGACY launcher!";
|
|
|
- return patch_dict_.empty();
|
|
|
- }
|
|
|
-
|
|
|
- bool DatFile::CheckIfPatchedByOldLauncher() {
|
|
|
- LOG(INFO) << "DatFile " << (dictionary_.count(620750000) == 0 ? "HASN'T " : "HAS already")
|
|
|
- << " been patched by OLD LAUNCHER!";
|
|
|
- return dictionary_.count(620750000) > 0;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- DAT_RESULT DatFile::EnableCategory(int category) {
|
|
|
- LOG(INFO) << "Enabling category " << category;
|
|
|
- if (inactive_categories.count(category) == 0)
|
|
|
- return SUCCESS;
|
|
|
-
|
|
|
- inactive_categories.erase(category);
|
|
|
- dat_state_ = UPDATED;
|
|
|
-
|
|
|
- for (auto file : dictionary_) {
|
|
|
- auto file_id = file.first;
|
|
|
- if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
|
|
|
- *file.second = *patch_dict_[file_id];
|
|
|
- pending_dictionary_.insert(file_id);
|
|
|
- }
|
|
|
- }
|
|
|
- LOG(INFO) << "Category " << category << " enabled successfully";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::DisableCategory(int category) {
|
|
|
- LOG(INFO) << "Disabling category " << category;
|
|
|
- if (inactive_categories.count(category) != 0)
|
|
|
- return SUCCESS;
|
|
|
- inactive_categories.insert(category);
|
|
|
- dat_state_ = UPDATED;
|
|
|
-
|
|
|
- for (auto file : dictionary_) {
|
|
|
- auto file_id = file.first;
|
|
|
- if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
|
|
|
- *file.second = *orig_dict_[file_id];
|
|
|
- pending_dictionary_.insert(file_id);
|
|
|
- }
|
|
|
- }
|
|
|
- LOG(INFO) << "Category " << category << " disabled successfully";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- const std::set<long long> &DatFile::GetInactiveCategoriesList() {
|
|
|
- return inactive_categories;
|
|
|
- }
|
|
|
-
|
|
|
- const std::string &DatFile::filename() const {
|
|
|
- return filename_;
|
|
|
- }
|
|
|
-
|
|
|
- void DatFile::AddBufferedSize() {
|
|
|
- if (free_buffered_size_ >= MIN_BUFFERED_SIZE)
|
|
|
- return;
|
|
|
- BinaryData nulls(MAX_BUFFERED_SIZE);
|
|
|
- WriteData(nulls, MAX_BUFFERED_SIZE, file_size_);
|
|
|
- free_buffered_size_ = MAX_BUFFERED_SIZE;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- bool DatFile::CheckIfBackupExists(const std::string &backup_datname) {
|
|
|
- std::ifstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
|
|
|
- return !dst.fail();
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::RemoveBackup(const std::string &backup_datname) {
|
|
|
- if (!CheckIfBackupExists(backup_datname))
|
|
|
- return SUCCESS;
|
|
|
- if (remove(("DAT_LIBRARY_BACKUP/" + backup_datname).c_str()) == 0)
|
|
|
- return SUCCESS;
|
|
|
- return REMOVE_FILE_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::CreateBackup(const std::string &backup_datname) {
|
|
|
- auto filename = filename_;
|
|
|
- auto dat_id = dat_id_;
|
|
|
- LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
|
|
|
- LOG(INFO) << " Closing DatFile...";
|
|
|
- CloseDatFile();
|
|
|
- LOG(INFO) << " Copying " << filename << " to " << backup_datname;
|
|
|
- mkdir("DAT_LIBRARY_BACKUP");
|
|
|
- std::ifstream src(filename, std::ios::binary);
|
|
|
- std::ofstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
|
|
|
-
|
|
|
- std::istreambuf_iterator<char> begin_source(src);
|
|
|
- std::istreambuf_iterator<char> end_source;
|
|
|
- std::ostreambuf_iterator<char> begin_dest(dst);
|
|
|
- std::copy(begin_source, end_source, begin_dest);
|
|
|
-
|
|
|
- src.close();
|
|
|
- dst.close();
|
|
|
-
|
|
|
- LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
|
|
|
- InitDatFile(filename, dat_id);
|
|
|
- LOG(INFO) << "Restoring .dat file success!";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-
|
|
|
- DAT_RESULT DatFile::RestoreFromBackup(const std::string &backup_datname) {
|
|
|
- auto filename = filename_;
|
|
|
- auto dat_id = dat_id_;
|
|
|
- LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
|
|
|
- LOG(INFO) << " Closing DatFile...";
|
|
|
- CloseDatFile();
|
|
|
- LOG(INFO) << " Copying " << filename << " to " << backup_datname;
|
|
|
- mkdir("DAT_LIBRARY_BACKUP");
|
|
|
- std::ifstream src("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
|
|
|
- std::ofstream dst(filename, std::ios::binary);
|
|
|
- if (src.fail()) {
|
|
|
- LOG(ERROR) << "CANNOT RESTORE FILE FROM BACKUP - no backup specified with name " << backup_datname;
|
|
|
- return NO_BACKUP_ERROR;
|
|
|
- }
|
|
|
-
|
|
|
- std::istreambuf_iterator<char> begin_source(src);
|
|
|
- std::istreambuf_iterator<char> end_source;
|
|
|
- std::ostreambuf_iterator<char> begin_dest(dst);
|
|
|
- std::copy(begin_source, end_source, begin_dest);
|
|
|
-
|
|
|
- src.close();
|
|
|
- dst.close();
|
|
|
-
|
|
|
- LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
|
|
|
- InitDatFile(filename, dat_id);
|
|
|
- LOG(INFO) << "Restoring .dat file success!";
|
|
|
- return SUCCESS;
|
|
|
- }
|
|
|
-}
|
|
|
-}
|