12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223 |
- //
- // Created by Иван_Архипов on 31.10.2017.
- //
- #include "DatFile.h"
- #include "BinaryData.h"
- #include "SubDirectory.h"
- #include "Subfile.h"
- #include "SubfileData.h"
- #include <EasyLogging++/easylogging++.h>
- #include <unistd.h>
- #include <algorithm>
- #include <iterator>
- #include <locale>
- #define ELPP_FEATURE_CRASH_LOG
- INITIALIZE_EASYLOGGINGPP
- #ifdef WIN32
- #define fseek _fseeki64
- #define ftell _ftelli64
- #endif
- extern "C++"
- {
- namespace LOTRO_DAT {
- //------------------------------------------------//
- // INIT SECTION
- //------------------------------------------------//
- DatFile::DatFile() {
- dat_state_ = CLOSED;
- root_directory_ = nullptr;
- file_handler_ = nullptr;
- free_buffered_size_ = 0;
- orig_dict_.clear();
- patch_dict_.clear();
- dictionary_.clear();
- el::Configurations defaultConf;
- defaultConf.setToDefault();
- defaultConf.setGlobally(el::ConfigurationType::Format,
- "%datetime %level %fbase (line %line) : %msg (function: %func)");
- defaultConf.setGlobally(el::ConfigurationType::ToFile, "true");
- defaultConf.setGlobally(el::ConfigurationType::Filename, "dat_library.log");
- defaultConf.setGlobally(el::ConfigurationType::ToStandardOutput, "false");
- defaultConf.setGlobally(el::ConfigurationType::PerformanceTracking, "true");
- defaultConf.setGlobally(el::ConfigurationType::MaxLogFileSize, "5242880"); // 5MB
- defaultConf.setGlobally(el::ConfigurationType::LogFlushThreshold, "1"); // Flush after every one log
- defaultConf.set(el::Level::Debug, el::ConfigurationType::Enabled, "false");
- defaultConf.set(el::Level::Debug, el::ConfigurationType::Filename, "dat_library_debug.log");
- el::Loggers::reconfigureAllLoggers(defaultConf);
- LOG(INFO) << "==================================================================";
- LOG(INFO) << "Starting new DatFile class instance";
- }
- DAT_RESULT DatFile::InitDatFile(const std::string &filename, int dat_id) {
- LOG(DEBUG) << "Started initialisation of DatFile " << filename;
- if (dat_state_ != CLOSED && filename == filename_) {
- LOG(DEBUG) << "Trying to reopen the same file: " << filename << ". Doing nothing.";
- return SUCCESS;
- }
- if (dat_state_ != CLOSED && filename != filename_) {
- LOG(DEBUG) << "DatFile wasn't closed yet. Closing in order to reopen.";
- if (CloseDatFile() != SUCCESS) {
- LOG(ERROR) << "Unable to perform CloseDatFile()! Aborting initialization!";
- return FAILED;
- }
- }
- dat_id_ = dat_id;
- dat_state_ = CLOSED;
- current_locale_ = ORIGINAL;
- root_directory_ = nullptr;
- file_handler_ = nullptr;
- free_buffered_size_ = 0;
- filename_ = "none";
- DAT_RESULT result;
- DAT_RESULT return_value = SUCCESS;
- result = OpenDatFile(filename.c_str());
- if (result != SUCCESS) {
- LOG(ERROR) << "Unable to perform opening file. Aborting.";
- CloseDatFile();
- return result;
- }
- return_value = std::max(return_value, result);
- result = ReadSuperBlock();
- if (result <= 0) {
- LOG(ERROR) << "Unable to read super block. Aborting.";
- CloseDatFile();
- return result;
- }
- return_value = std::max(return_value, result);
- result = MakeDirectories();
- if (result <= 0) {
- LOG(ERROR) << "Unable to make directories. Aborting.";
- CloseDatFile();
- return result;
- }
- return_value = std::max(return_value, result);
- result = MakeDictionary();
- if (result <= 0) {
- LOG(ERROR) << "Unable to make dictionary. Aborting.";
- CloseDatFile();
- return result;
- }
- return_value = std::max(return_value, result);
- result = InitLocales();
- if (result <= 0) {
- LOG(ERROR) << "Unable to initialize locales. Aborting.";
- CloseDatFile();
- return result;
- }
- return_value = std::max(return_value, result);
- LOG(INFO) << "File " << filename << " opened successfully!";
- filename_ = filename;
- dat_state_ = READY;
- LOG(INFO) << "Making last preparations...";
- return_value = std::max(return_value, result);
- if (return_value >= 2) {
- LOG(WARNING) << "Dat file could be corrupted. Trying to delete corrupted dictionary rows";
- if (RepairDatFile() != SUCCESS)
- return CRITICAL_DAT_ERROR;
- }
- if (CheckIfUpdatedByGame()) {
- LOG(INFO) << ".dat file was updated by game! Need to reinitialize files and directories!";
- CloseDatFile();
- InitDatFile(filename, dat_id);
- }
- dat_without_patches_ = CheckIfNotPatched();
- LOG(INFO) << "Preparations made successfully! Init return value = " << return_value;
- return return_value;
- }
- DAT_RESULT DatFile::OpenDatFile(const char *dat_name) {
- LOG(DEBUG) << "Started opening DatFile";
- if (dat_state_ != CLOSED) {
- CloseDatFile();
- }
- file_handler_ = fopen(dat_name, "r+b");
- if (file_handler_ == nullptr) {
- LOG(ERROR) << "Unable to open file " << dat_name;
- return NO_FILE_ERROR;
- }
- fseek(file_handler_, 0, SEEK_END);
- actual_dat_size_ = ftell(file_handler_);
- fseek(file_handler_, 0, SEEK_SET);
- dat_state_ = SUCCESS_OPENED;
- LOG(DEBUG) << "Successfully opened DatFile";
- return SUCCESS;
- }
- DAT_RESULT DatFile::ReadSuperBlock() {
- LOG(DEBUG) << "Started reading superblock";
- if (dat_state_ != SUCCESS_OPENED) {
- LOG(ERROR) << "Dat state isn't SUCCESS_OPENED. Cannot perform extraction.";
- return INCORRECT_STATE_ERROR;
- }
- BinaryData data(1024);
- ReadData(data, 1024);
- constant1_ = data.ToNumber<4>(0x100);
- constant2_ = data.ToNumber<4>(0x140);
- version1_ = data.ToNumber<4>(0x14C);
- file_size_ = data.ToNumber<4>(0x148);
- version2_ = data.ToNumber<4>(0x150);
- fragmentation_journal_offset_ = data.ToNumber<4>(0x154);
- fragmentation_journal_end_ = data.ToNumber<4>(0x158);
- fragmentation_journal_size_ = data.ToNumber<4>(0x15C);
- root_directory_offset_ = data.ToNumber<4>(0x160);
- free_dat_size_ = data.ToNumber<4>(0x19C);
- if (constant1_ != 0x4C5000) {
- LOG(ERROR) << "variable at position 0x100 is not equal to .dat file constant!";
- return INCORRECT_SUPERBLOCK_ERROR;
- }
- if (constant2_ != 0x5442) {
- LOG(ERROR) << "variable at position 0x140 is not equal to .dat file constant!";
- return INCORRECT_SUPERBLOCK_ERROR;
- }
- if (file_size_ != actual_dat_size_) {
- LOG(ERROR) << "variable at 0x148 position is not equal to .dat file size!";
- //return CORRUPTED_FILE_WARNING;
- }
- dat_state_ = SUCCESS_SUPERBLOCK;
- LOG(DEBUG) << "Superblock read successfully";
- return SUCCESS;
- }
- DAT_RESULT DatFile::MakeDirectories() {
- LOG(DEBUG) << "Started making directories";
- if (dat_state_ != SUCCESS_SUPERBLOCK) {
- LOG(ERROR) << "Dat state isn't SUCCESS_SUPERBLOCK. Cannot make directories.";
- return INCORRECT_STATE_ERROR;
- }
- root_directory_ = new SubDirectory((unsigned) root_directory_offset_, this);
- dat_state_ = SUCCESS_DIRECTORIES;
- LOG(DEBUG) << "Directories made successfully";
- return SUCCESS;
- }
- DAT_RESULT DatFile::MakeDictionary() {
- LOG(DEBUG) << "Started making dictionary";
- if (dat_state_ != SUCCESS_DIRECTORIES) {
- LOG(ERROR) << "Dat state isn't SUCCESS_DIRECTORIES. Cannot make directories.";
- return INCORRECT_STATE_ERROR;
- }
- if (root_directory_ == nullptr) {
- LOG(ERROR) << "root_directory is nullptr!!";
- return INIT_ERROR;
- }
- root_directory_->MakeDictionary(dictionary_);
- dat_state_ = SUCCESS_DICTIONARY;
- LOG(DEBUG) << "Dictionary made successfull";
- return SUCCESS;
- }
- DAT_RESULT DatFile::InitLocales() {
- LOG(INFO) << "Initialising locales...";
- BinaryData dicts_data(4);
- ReadData(dicts_data, 4, 300);
- long long dict_offset = dicts_data.ToNumber<4>(0);
- if (dict_offset == 0 || dict_offset + 8 >= actual_dat_size_) {
- LOG(INFO) << "Dictionary offset is empty or incorrect. Passing.";
- return SUCCESS;
- }
- ReadData(dicts_data, 4, dict_offset);
- long long dict_size = dicts_data.ToNumber<4>(0);
- ReadData(dicts_data, 4, dict_offset + 4);
- long long dict_version = dicts_data.ToNumber<4>(0);
- LOG(INFO) << "Dictionary size is " << dict_size << ". Version is " << dict_version;
- if (dict_version != 100) {
- LOG(WARNING) << "DICTIONARY IS OLD!!!";
- return SUCCESS;
- }
- dicts_data = BinaryData((unsigned)dict_size);
- ReadData(dicts_data, dict_size, dict_offset + 8);
- if (dicts_data.size() < 15) {
- LOG(ERROR) << "Incorrect dictionary. Passing.";
- return FAILED;
- }
- BinaryData hi_data = dicts_data.CutData(0, 15) + BinaryData("\0", 1);
- std::string hi = std::string((char *) (hi_data.data()));
- LOG(DEBUG) << "hi info is " << hi;
- if (hi != "Hi from Gi1dor!") {
- LOG(WARNING) << "Didn't receive 'hi' from Gi1dor... Initialising locale dicts as empty";
- LOG(INFO) << "Could't init locales' file... Continuing without them";
- return SUCCESS;
- }
- int offset = 15;
- BinaryData current_locale_data = dicts_data.CutData(offset, offset + 4) + BinaryData("\0", 1);
- std::string locale((char *) (current_locale_data.data()));
- offset += 4;
- LOG(DEBUG) << "current locale:" << locale;
- if (locale != "PATC" && locale != "ORIG") {
- LOG(WARNING) << "Incorrect locale... Initialising locale dicts as empty";
- LOG(INFO) << "Could't recognize locale... Continuing without locales";
- return SUCCESS;
- }
- current_locale_ = (locale == "PATC" ? PATCHED : ORIGINAL);
- // 15 bytes for "Hi from Gi1dor"
- // 4 bytes for LOCALE
- // 4 bytes for orig_dict.size()
- // (32 + 4) * orig_dict.size() bytes for orig_dict data
- // 4 bytes for patch_dict.size()
- // (32 + 4) * patch_dict.size() bytes for patch_dict data
- // 4 bytes for inactive_categories dict
- // 4 * inactive_categories.size() bytes for inactive_categories data
- size_t orig_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
- offset += 4;
- for (size_t i = 0; i < orig_dict_size; i++) {
- auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
- orig_dict_[file->file_id()] = file;
- offset += 32;
- orig_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
- offset += 4;
- if (orig_dict_[file->file_id()]->category == 0)
- LOG(DEBUG) << "file category is undefined (0)!";
- }
- size_t patch_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
- offset += 4;
- for (size_t i = 0; i < patch_dict_size; i++) {
- auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
- patch_dict_[file->file_id()] = file;
- offset += 32;
- patch_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
- offset += 4;
- if (patch_dict_[file->file_id()]->category == 0)
- LOG(DEBUG) << "file category is undefined (0)!";
- }
- size_t active_patches_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
- offset += 4;
- for (size_t i = 0; i < active_patches_dict_size; i++) {
- inactive_categories.insert(dicts_data.ToNumber<4>(offset));
- offset += 4;
- }
- LOG(INFO) << "There are " << patch_dict_.size() << " files in patch locale dictionary";
- LOG(INFO) << "There are " << orig_dict_.size() << " files in original locale dictionary";
- std::string inactive_cat_s;
- for (auto i : inactive_categories) {
- inactive_cat_s += std::to_string(i) + " ";
- }
- LOG(INFO) << "Unactive patches now: " << inactive_cat_s;
- LOG(INFO) << "Finished initialising locales";
- return SUCCESS;
- }
- //------------------------------------------------//
- // CLOSE SECTION
- //------------------------------------------------//
- DatFile::~DatFile() {
- CloseDatFile();
- }
- DAT_RESULT DatFile::CloseDatFile() {
- LOG(INFO) << "Closing DatFile";
- if (dat_state_ == CLOSED) {
- LOG(INFO) << "DatFile is already closed. Nothing to do";
- return SUCCESS;
- }
- // Committing changes and updating/writing locales and header info
- if (!pending_dictionary_.empty() || dat_state_ == UPDATED) {
- CommitLocales();
- CommitDirectories();
- //ModifyFragmentationJournal();
- //free_dat_size_ = 128248;
- //fragmentation_journal_end_ = 0;
- //fragmentation_journal_size_ = 1;
- //UpdateHeader();
- }
- current_locale_ = ORIGINAL;
- if (file_handler_ != nullptr) {
- fclose(file_handler_);
- }
- SubDirectory::visited_subdirectories_.clear();
- delete root_directory_;
- //truncate64(filename_.c_str(), file_size_);
- free_buffered_size_ = 0;
- filename_ = "none";
- orig_dict_.clear();
- patch_dict_.clear();
- pending_patch_.clear();
- inactive_categories.clear();
- file_handler_ = nullptr;
- root_directory_ = nullptr;
- pending_dictionary_.clear();
- dictionary_.clear();
- constant1_ = 0;
- constant2_ = 0;
- file_size_ = 0;
- version1_ = 0;
- version2_ = 0;
- fragmentation_journal_size_ = 0;
- fragmentation_journal_end_ = 0;
- root_directory_offset_ = 0;
- fragmentation_journal_offset_ = 0;
- dat_state_ = CLOSED;
- dat_id_ = -1;
- LOG(INFO) << "File closed successfully.";
- return SUCCESS;
- }
- DAT_RESULT DatFile::CommitLocales() {
- LOG(INFO) << "Committing locales...";
- // 15 bytes for "Hi from Gi1dor"
- // 4 bytes for LOCALE
- // 4 bytes for orig_dict.size()
- // (32 + 4) * orig_dict.size() bytes for orig_dict data
- // 4 bytes for patch_dict.size()
- // (32 + 4) * patch_dict.size() bytes for patch_dict data
- // 4 bytes for inactive_categories list
- // 4 * inactive_categories.size() bytes for inactive_categories data
- BinaryData binary_data = BinaryData(14 + 15 + 4
- + 4 + (32 + 4) * orig_dict_.size()
- + 4 + (32 + 4) * patch_dict_.size()
- + 4 + 4 * inactive_categories.size());
- size_t current_size = 0;
- binary_data.Append(BinaryData("Hi from Gi1dor!", 15), current_size);
- current_size += 15;
- binary_data.Append(BinaryData((current_locale_ == ORIGINAL ? "ORIG" : "PATC"), 4), current_size);
- current_size += 4;
- binary_data.Append(BinaryData::FromNumber<4>(orig_dict_.size()), current_size);
- current_size += 4;
- for (auto file : orig_dict_) {
- binary_data.Append(file.second->MakeHeaderData(), current_size);
- current_size += 32;
- binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
- current_size += 4;
- }
- binary_data.Append(BinaryData::FromNumber<4>(patch_dict_.size()), current_size);
- current_size += 4;
- for (auto file : patch_dict_) {
- binary_data.Append(file.second->MakeHeaderData(), current_size);
- current_size += 32;
- binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
- current_size += 4;
- }
- binary_data.Append(BinaryData::FromNumber<4>(inactive_categories.size()), current_size);
- current_size += 4;
- for (auto patch_id : inactive_categories) {
- binary_data.Append(BinaryData::FromNumber<4>(patch_id), current_size);
- current_size += 4;
- }
- BinaryData dicts_data(4);
- ReadData(dicts_data, 4, 300);
- long long dict_offset = dicts_data.ToNumber<4>(0);
- ReadData(dicts_data, 4, dict_offset);
- long long dict_size = dicts_data.ToNumber<4>(0);
- if (binary_data.size() > dict_size) {
- WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, file_size_);
- WriteData(BinaryData::FromNumber<4>(100), 4, file_size_ + 4);
- WriteData(binary_data, binary_data.size(), file_size_ + 8);
- WriteData(BinaryData::FromNumber<4>(file_size_), 4, 300);
- file_size_ += binary_data.size();
- // Adding space for 25 megabytes locales file in total.
- BinaryData nulls(unsigned(20 * 1024 * 1024));
- WriteData(nulls, nulls.size(), file_size_);
- file_size_ += nulls.size();
- } else {
- WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, dict_offset);
- WriteData(BinaryData::FromNumber<4>(100), 4, dict_offset + 4);
- WriteData(binary_data, binary_data.size(), dict_offset + 8);
- }
- LOG(INFO) << "Locales commited successfully";
- return SUCCESS;
- }
- DAT_RESULT DatFile::CommitDirectories() {
- for (auto file_id : pending_dictionary_) {
- if (dictionary_[file_id] == nullptr || !CorrectSubfile(dictionary_[file_id]))
- continue;
- WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
- }
- pending_dictionary_.clear();
- return SUCCESS;
- }
- DAT_RESULT DatFile::ModifyFragmentationJournal() {
- if (fragmentation_journal_size_ == 0)
- return SUCCESS;
- LOG(DEBUG) << "Modifying fragmentation journal";
- BinaryData data(4);
- ReadData(data, 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
- LOG(INFO) << "FREE_SIZE BLOCK = " << data.ToNumber<4>(0);
- long long free_size = data.ToNumber<4>(0);
- long long free_offset = file_size_;
- BinaryData nulldata = BinaryData(unsigned(free_size));
- WriteData(nulldata, nulldata.size(), file_size_);
- file_size_ += nulldata.size();
- WriteData(BinaryData::FromNumber<4>(free_size), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
- WriteData(BinaryData::FromNumber<4>(free_offset), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_ + 4);
- //nulldata = BinaryData(8);
- //WriteData(nulldata, nulldata.size(), fragmentation_journal_offset_ + 16);
- LOG(DEBUG) << "Finished modifying fragmentation journal";
- return SUCCESS;
- }
- DAT_RESULT DatFile::UpdateHeader() {
- LOG(DEBUG) << "Updating header";
- WriteData(BinaryData::FromNumber<4>(constant1_), 4, 0x100);
- WriteData(BinaryData::FromNumber<4>(constant2_), 4, 0x140);
- //WriteData(BinaryData::FromNumber<4>( 0 ), 4, 0x144);
- WriteData(BinaryData::FromNumber<4>(file_size_), 4, 0x148);
- WriteData(BinaryData::FromNumber<4>(version1_ ), 4, 0x14C);
- WriteData(BinaryData::FromNumber<4>(version2_ ), 4, 0x150);
- WriteData(BinaryData::FromNumber<4>(fragmentation_journal_offset_), 4, 0x154);
- WriteData(BinaryData::FromNumber<4>(fragmentation_journal_end_), 4, 0x158);
- WriteData(BinaryData::FromNumber<4>(fragmentation_journal_size_), 4, 0x15C);
- WriteData(BinaryData::FromNumber<4>(root_directory_offset_), 4, 0x160);
- WriteData(BinaryData::FromNumber<4>(free_dat_size_), 4, 0x19C);
- LOG(DEBUG) << "Finished updating header";
- return SUCCESS;
- }
- DAT_RESULT DatFile::RepairDatFile() {
- for (auto file : dictionary_) {
- auto subfile = file.second;
- auto file_id = file.first;
- if (CorrectSubfile(subfile))
- continue;
- orig_dict_.clear();
- patch_dict_.clear();
- return SUCCESS;
- if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
- return CRITICAL_DAT_ERROR;
- *dictionary_[file_id] = *orig_dict_[file_id];
- patch_dict_.erase(file_id);
- orig_dict_.erase(file_id);
- }
- return SUCCESS;
- }
- //------------------------------------------------//
- // DAT INFO SECTION
- //------------------------------------------------//
- DAT_STATE DatFile::DatFileState() const {
- return dat_state_;
- }
- long long DatFile::files_number() const {
- return dictionary_.size();
- }
- //------------------------------------------------//
- // EXTRACT SECTION
- //------------------------------------------------//
- DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
- LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
- if (dat_state_ < READY) {
- LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
- return INCORRECT_STATE_ERROR;
- }
- BinaryData file_data = GetFileData(dictionary_[file_id], 8);
- if (file_data.size() == 0) {
- LOG(ERROR) << "File data is empty. Aborting extraction.";
- return NO_FILE_ERROR;
- }
- SubfileData export_data = dictionary_[file_id]->PrepareForExport(file_data);
- if (export_data.Empty()) {
- LOG(ERROR) << "Export data is empty. Aborting extraction.";
- return NO_FILE_ERROR;
- }
- if (export_data.binary_data.WriteToFile(path + export_data.options["ext"].as<std::string>()) != SUCCESS) {
- LOG(ERROR) << "Cannot write to file.";
- return WRITE_TO_FILE_ERROR;
- }
- LOG(DEBUG) << "File " << file_id << " extracted successfully";
- return SUCCESS;
- }
- DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
- LOG(DEBUG) << "Extracting file " << file_id << " to database.";
- if (dat_state_ < READY) {
- LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
- return INCORRECT_STATE_ERROR;
- }
- BinaryData file_data = GetFileData(dictionary_[file_id], 8);
- if (file_data.Empty()) {
- LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty. Passing it.";
- return SUCCESS;
- }
- SubfileData export_data;
- export_data = dictionary_[file_id]->PrepareForExport(file_data);
- export_data.options["did"] = dat_id_;
- if (export_data == SubfileData()) {
- LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty or incorrect.";
- return SUCCESS;
- }
- try {
- db->PushFile(export_data);
- } catch (std::exception &e) {
- LOG(ERROR) << "Caught " << e.what() << " exception.";
- return FAILED;
- }
- LOG(DEBUG) << "File " << file_id << " extracted successfully";
- return SUCCESS;
- }
- int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
- LOG(INFO) << "Extracting all files to path " << path;
- if (dat_state_ < READY) {
- LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
- return INCORRECT_STATE_ERROR;
- }
- int success = 0;
- for (auto i : dictionary_) {
- FILE_TYPE file_type = i.second->FileType();
- if (file_type == type) {
- success += (ExtractFile(i.second->file_id(), (path + std::to_string(i.second->file_id()))) == SUCCESS
- ? 1 : 0);
- }
- }
- LOG(INFO) << "Successfully extracted " << success << " files";
- return success;
- }
- int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
- LOG(INFO) << "Extracting all files to database...";
- if (dat_state_ < READY) {
- LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
- return INCORRECT_STATE_ERROR;
- }
- int success = 0;
- for (auto i : dictionary_) {
- FILE_TYPE file_type = i.second->FileType();
- if (file_type == type) {
- success += (ExtractFile(i.second->file_id(), db) == SUCCESS ? 1 : 0);
- }
- }
- LOG(INFO) << "Extracted " << success << " files";
- return success;
- }
- //------------------------------------------------//
- // PATCH SECTION
- //------------------------------------------------//
- DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
- LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
- actual_dat_size_ = std::max(file_size_, actual_dat_size_);
- if (!dat_without_patches_) {
- file_size_ = actual_dat_size_;
- }
- if (dat_state_ < READY) {
- LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
- return INCORRECT_STATE_ERROR;
- }
- auto file_id = data.options["fid"].as<long long>();
- if (dictionary_.count(file_id) == 0) {
- LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
- return NO_FILE_ERROR;
- }
- Subfile *file = dictionary_[file_id];
- if (!CorrectSubfile(file)) {
- LOG(ERROR) << "Incorrect subfile with id " << file->file_id() << " (headers do not match). Cannot patch it";
- return FAILED;
- }
- // If file has inactive category, then we should set it to patched state in order to commit patch and
- // then in ApplyFilePatch(), if new category is still inactive, return dictionary to its original state;
- if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0 && file_id != 2013266257) {
- *dictionary_[file_id] = *patch_dict_[file_id];
- }
- if (data.options["cat"].IsDefined()) {
- file->category = data.options["cat"].as<long long>();
- } else {
- file->category = 1;
- }
- BinaryData old_data = GetFileData(file);
- if (old_data.Empty()) {
- LOG(ERROR) << "GetFileData returned empty data. Aborting.";
- return DAT_PATCH_FILE_ERROR;
- }
- BinaryData patch_data = file->MakeForImport(old_data, data);
- DAT_RESULT result = ApplyFilePatch(file, patch_data);
- if (result != SUCCESS)
- return result;
- LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>() << ".";
- return SUCCESS;
- }
- DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
- LOG(INFO) << "Patching all database";
- if (dat_state_ < READY) {
- LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
- return INCORRECT_STATE_ERROR;
- }
- SubfileData data;
- data = db->GetNextFile();
- while (!data.Empty()) {
- DAT_RESULT result = PatchFile(data);
- if (result != SUCCESS)
- LOG(ERROR) << "Cannot patch file" << data.options["fid"].as<long long>() << " continuing";
- data = db->GetNextFile();
- }
- LOG(INFO) << "Successfully patched whole database";
- return SUCCESS;
- }
- DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
- LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
- FILE *f = nullptr;
- fopen_s(&f, (path + "dict.txt").c_str(), "w");
- if (f == nullptr) {
- LOG(ERROR) << "Cannot open file " << path + "dict.txt";
- return WRITE_TO_FILE_ERROR;
- }
- fprintf(f, "unk1 file_id offset size1 timestamp version size2 unknown2 type\n");
- for (auto i : dictionary_) {
- fprintf(f, "%lld %lld %lld %lld %lld %lld %lld %lld %s\n", i.second->unknown1(), i.second->file_id(),
- i.second->file_offset(), i.second->file_size(), i.second->timestamp(), i.second->version(),
- i.second->block_size(), i.second->unknown2(), i.second->Extension().c_str());
- }
- fclose(f);
- LOG(INFO) << "Unordered dictionary was written successfully to " << path << "dict.txt";
- return SUCCESS;
- }
- DAT_RESULT DatFile::ApplyFilePatch(Subfile *file, BinaryData &data) {
- LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
- // if (patch_dict_.size() == 0 && pending_dictionary_.size() == 0) {
- // BinaryData nulls(50 * 1024 * 1024);
- // WriteData(nulls, nulls.size(), file_size_);
- // file_size_ += 50 * 1024 * 1024;
- // }
- if (data.Empty()) {
- LOG(ERROR) << "Error caused during making file for import. Cannot patch file " << file->file_id();
- return FAILED;
- }
- auto file_id = file->file_id();
- if (current_locale() != PATCHED && file_id != 2013266257) {
- LOG(INFO) << "Changing locale to PATCHED(RU) in order to patch file";
- SetLocale(PATCHED);
- }
- dat_state_ = UPDATED;
- if (orig_dict_.count(file_id) == 0 && file_id != 2013266257) {
- orig_dict_[file_id] = new Subfile(this, file->MakeHeaderData());
- }
- if ((patch_dict_.count(file_id) == 0 && file_id != 2013266257) || data.size() > file->block_size()
- || file->file_size() + 8 > file->block_size()) {
- file->file_offset_ = file_size_;
- file->block_size_ = std::max((long long)data.size(), file->block_size_);
- free_buffered_size_ = std::max(0ll, free_buffered_size_ - file->block_size_ - 8);
- AddBufferedSize();
- this->file_size_ += file->block_size_ + 8;
- }
- file->file_size_ = data.size() - 8;
- data.Append(BinaryData::FromNumber<4>(0), 0); // set additional fragments count to zero
- if (file_id != data.ToNumber<4>(8)) {
- LOG(ERROR) << "Created data's file_id doesn't match to original! Patch wasn't written to .dat file";
- return INCORRECT_PATCH_FILE;
- }
- //data.ProtectData();
- //BinaryData data1(data.size());
- WriteData(data, data.size(), file->file_offset());
- //data.DeprotectData();
- patch_dict_.erase(file_id); // Удалили старое значение в русском словаре
- if (file_id != 2013266257) {
- patch_dict_[file_id] = new Subfile(this, file->MakeHeaderData()); // Создали новое значение
- }
- // If category is forbidden, then return file header data to original state
- if (inactive_categories.count(file->category) != 0) {
- file->file_offset_ = orig_dict_[file_id]->file_offset_;
- file->file_size_ = orig_dict_[file_id]->file_size_;
- file->block_size_ = orig_dict_[file_id]->block_size_;
- file->timestamp_ = orig_dict_[file_id]->timestamp_;
- file->version_ = orig_dict_[file_id]->version_;
- }
- if (orig_dict_.count(file_id) != 0 && file_id != 2013266257)
- orig_dict_[file_id]->category = file->category;
- if (patch_dict_.count(file_id) != 0 && file_id != 2013266257)
- patch_dict_[file_id]->category = file->category;
- // Applying file info in directory
- pending_dictionary_.insert(file_id);
- LOG(DEBUG) << "Successfully applied file " << file->file_id() << " patch.";
- return SUCCESS;
- }
- //------------------------------------------------//
- // INPUT-OUTPUT SECTION
- //------------------------------------------------//
- BinaryData DatFile::GetFileData(const Subfile *file, long long int offset) {
- LOG(DEBUG) << "Getting file " << file->file_id() << " data";
- BinaryData mfile_id(20);
- ReadData(mfile_id, 20, file->file_offset() + 8);
- if (mfile_id.Empty()) {
- LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
- << file->file_offset() << "); Aborting.";
- return BinaryData(0);
- }
- if (!mfile_id.CheckCompression() && file->file_id() != mfile_id.ToNumber<4>(0)) {
- LOG(ERROR) << "Bad DatFile::GetFileData() - file_id in Subfile ("
- << file->file_id()
- << ") doesn't match to file_id (" << mfile_id.ToNumber<4>(0) << ")in DatFile.";
- return BinaryData(0);
- }
- BinaryData data((unsigned)(file->file_size() + (8 - offset)));
- if (file->block_size() >= file->file_size() + 8) {
- ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
- return data;
- }
- BinaryData fragments_count(4);
- ReadData(fragments_count, 4, file->file_offset());
- long long fragments_number = fragments_count.ToNumber<4>(0);
- long long current_block_size = file->block_size() - offset - 8 * fragments_number;
- ReadData(data, current_block_size, file->file_offset() + offset);
- BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
- ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
- file->file_offset() + file->block_size() - 8 * fragments_number);
- for (long long i = 0; i < fragments_number; i++) {
- long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
- long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
- ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
- current_block_size);
- current_block_size += fragment_size;
- }
- LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
- return data;
- }
- DAT_RESULT DatFile::ReadData(BinaryData &data, long long size, long long offset, long long data_offset) {
- if (dat_state_ == CLOSED) {
- LOG(ERROR) << "Dat state is CLOSED. Cannot read data.";
- data = BinaryData(0);
- return INIT_ERROR;
- }
- if (data_offset + size > data.size()) {
- LOG(ERROR) << "Trying to read more than BinaryData size: Reading " << size << " bytes from " << offset
- << " position.";
- data = BinaryData(0);
- return DAT_READ_ERROR;
- }
- if (offset + size > actual_dat_size_) {
- LOG(ERROR) << "Trying to read more than DatFile size elapsed: Reading " << size << " bytes from " << offset
- << " position.";
- data = BinaryData(0);
- return DAT_READ_ERROR;
- }
- fseek(file_handler_, offset, SEEK_SET);
- fread(data.data() + data_offset, unsigned(size), 1, file_handler_);
- return SUCCESS;
- }
- DAT_RESULT DatFile::WriteData(const BinaryData &data, long long size, long long offset, long long data_offset) {
- if (dat_state_ < READY) {
- LOG(ERROR) << "Dat state isn't READY. Cannot write data.";
- return INCORRECT_STATE_ERROR;
- }
- fseek(file_handler_, offset, SEEK_SET);
- if (data_offset + size > data.size()) {
- LOG(ERROR) << "Trying to write more than BinaryData size";
- return DAT_WRITE_ERROR;
- }
- fwrite(data.data() + data_offset, unsigned(size), 1, file_handler_);
- actual_dat_size_ = std::max(file_size_, actual_dat_size_);
- return SUCCESS;
- }
- //------------------------------------------------//
- // LOCALE SECTION
- //------------------------------------------------//
- DAT_RESULT DatFile::SetLocale(LOCALE locale) {
- LOG(INFO) << "Setting locale to " << (locale == PATCHED ? " PATCHED" : " ORIGINAL");
- if (dat_state_ < READY) {
- LOG(ERROR) << "Dat state isn't READY. Cannot set locale.";
- return INCORRECT_STATE_ERROR;
- }
- if (current_locale_ == locale) {
- return SUCCESS;
- }
- dat_state_ = UPDATED;
- auto dict = GetLocaleDictReference(locale);
- for (auto file : *dict) {
- if (file.second == nullptr)
- continue;
- if (dictionary_.count(file.first) == 0) {
- LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
- << "which is not in .dat file! Passing it and removing from locale dictionary";
- dict->erase(file.first);
- continue;
- }
- if (dictionary_[file.first]->MakeHeaderData().CutData(8, 16) ==
- file.second->MakeHeaderData().CutData(8, 16) ||
- inactive_categories.count(orig_dict_[file.first]->category) != 0)
- continue;
- long long file_id = file.first;
- Subfile *new_file = file.second;
- *dictionary_[file_id] = *new_file;
- pending_dictionary_.insert(file_id);
- dat_state_ = UPDATED;
- }
- current_locale_ = locale;
- LOG(INFO) << "Locale set successfull";
- return SUCCESS;
- }
- LOCALE DatFile::current_locale() {
- if (dat_state_ < READY) {
- LOG(ERROR) << "dat_file is in incorrect state!";
- return ORIGINAL;
- }
- if (current_locale_ != PATCHED && current_locale_ != ORIGINAL) {
- LOG(ERROR) << "locale has incorrect value. Setting it to original";
- current_locale_ = ORIGINAL;
- }
- return current_locale_;
- }
- std::map<long long, Subfile *> *DatFile::GetLocaleDictReference(LOCALE locale) {
- switch (locale) {
- case PATCHED:
- return &patch_dict_;
- case ORIGINAL:
- return &orig_dict_;
- default:
- LOG(ERROR) << "Unknown locale! Returning original";
- return &orig_dict_;
- }
- }
- //------------------------------------------------//
- // CHECKERS SECTION
- //------------------------------------------------//
- bool DatFile::CorrectSubfile(Subfile *file) {
- BinaryData mfile_id(20);
- ReadData(mfile_id, 20, file->file_offset() + 8);
- if (mfile_id.Empty())
- return false;
- return (mfile_id.CheckCompression() || file->file_id() == mfile_id.ToNumber<4>(0)) && file->file_size() < 50ll * 1024ll * 1024ll;
- }
- bool DatFile::CheckIfUpdatedByGame() {
- LOG(INFO) << "Checking if DatFile was updated by LotRO";
- if (!pending_patch_.empty())
- return true;
- bool updated = false;
- for (auto i : dictionary_) {
- long long file_id = i.first;
- Subfile *subfile = i.second;
- if (patch_dict_.count(file_id) == 0)
- continue;
- if (*subfile != *patch_dict_[file_id] && *subfile != *orig_dict_[file_id]) {
- orig_dict_.clear();
- patch_dict_.clear();
- LOG(INFO) << "DAT FILE WAS UPDATED!!!! CLEARING PATCH DATA";
- pending_patch_.insert(file_id);
- truncate64(filename_.c_str(), file_size_);
- return true;
- orig_dict_.erase(file_id);
- patch_dict_.erase(file_id);
- updated = true;
- dat_state_ = UPDATED;
- }
- }
- return updated;
- }
- bool DatFile::CheckIfNotPatched() {
- LOG(INFO) << "DatFile " << (patch_dict_.empty() ? "HASN'T " : "HAS already")
- << " been patched by LEGACY launcher!";
- return patch_dict_.empty();
- }
- bool DatFile::CheckIfPatchedByOldLauncher() {
- LOG(INFO) << "DatFile " << (dictionary_.count(620750000) == 0 ? "HASN'T " : "HAS already")
- << " been patched by OLD LAUNCHER!";
- return dictionary_.count(620750000) > 0;
- }
- //------------------------------------------------//
- // CATEGORY SECTION
- //------------------------------------------------//
- DAT_RESULT DatFile::EnableCategory(int category) {
- LOG(INFO) << "Enabling category " << category;
- if (inactive_categories.count(category) == 0)
- return SUCCESS;
- inactive_categories.erase(category);
- dat_state_ = UPDATED;
- for (auto file : dictionary_) {
- auto file_id = file.first;
- if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
- *file.second = *patch_dict_[file_id];
- pending_dictionary_.insert(file_id);
- }
- }
- LOG(INFO) << "Category " << category << " enabled successfully";
- return SUCCESS;
- }
- DAT_RESULT DatFile::DisableCategory(int category) {
- LOG(INFO) << "Disabling category " << category;
- if (inactive_categories.count(category) != 0)
- return SUCCESS;
- inactive_categories.insert(category);
- dat_state_ = UPDATED;
- for (auto file : dictionary_) {
- auto file_id = file.first;
- if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
- *file.second = *orig_dict_[file_id];
- pending_dictionary_.insert(file_id);
- }
- }
- LOG(INFO) << "Category " << category << " disabled successfully";
- return SUCCESS;
- }
- const std::set<long long> &DatFile::GetInactiveCategoriesList() {
- return inactive_categories;
- }
- const std::string &DatFile::filename() const {
- return filename_;
- }
- void DatFile::AddBufferedSize() {
- if (free_buffered_size_ >= MIN_BUFFERED_SIZE)
- return;
- BinaryData nulls(MAX_BUFFERED_SIZE);
- WriteData(nulls, MAX_BUFFERED_SIZE, file_size_);
- free_buffered_size_ = MAX_BUFFERED_SIZE;
- }
- //------------------------------------------------//
- // BACKUP SECTION
- //------------------------------------------------//
- bool DatFile::CheckIfBackupExists(const std::string &backup_datname) {
- std::ifstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
- return !dst.fail();
- }
- DAT_RESULT DatFile::RemoveBackup(const std::string &backup_datname) {
- if (!CheckIfBackupExists(backup_datname))
- return SUCCESS;
- if (remove(("DAT_LIBRARY_BACKUP/" + backup_datname).c_str()) == 0)
- return SUCCESS;
- return REMOVE_FILE_ERROR;
- }
- DAT_RESULT DatFile::CreateBackup(const std::string &backup_datname) {
- auto filename = filename_;
- auto dat_id = dat_id_;
- LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
- LOG(INFO) << " Closing DatFile...";
- CloseDatFile();
- LOG(INFO) << " Copying " << filename << " to " << backup_datname;
- mkdir("DAT_LIBRARY_BACKUP");
- std::ifstream src(filename, std::ios::binary);
- std::ofstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
- std::istreambuf_iterator<char> begin_source(src);
- std::istreambuf_iterator<char> end_source;
- std::ostreambuf_iterator<char> begin_dest(dst);
- std::copy(begin_source, end_source, begin_dest);
- src.close();
- dst.close();
- LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
- InitDatFile(filename, dat_id);
- LOG(INFO) << "Restoring .dat file success!";
- return SUCCESS;
- }
- DAT_RESULT DatFile::RestoreFromBackup(const std::string &backup_datname) {
- auto filename = filename_;
- auto dat_id = dat_id_;
- LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
- LOG(INFO) << " Closing DatFile...";
- CloseDatFile();
- LOG(INFO) << " Copying " << filename << " to " << backup_datname;
- mkdir("DAT_LIBRARY_BACKUP");
- std::ifstream src("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
- std::ofstream dst(filename, std::ios::binary);
- if (src.fail()) {
- LOG(ERROR) << "CANNOT RESTORE FILE FROM BACKUP - no backup specified with name " << backup_datname;
- return NO_BACKUP_ERROR;
- }
- std::istreambuf_iterator<char> begin_source(src);
- std::istreambuf_iterator<char> end_source;
- std::ostreambuf_iterator<char> begin_dest(dst);
- std::copy(begin_source, end_source, begin_dest);
- src.close();
- dst.close();
- LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
- InitDatFile(filename, dat_id);
- LOG(INFO) << "Restoring .dat file success!";
- return SUCCESS;
- }
- }
- }
|