DatFile.cpp 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182
  1. //
  2. // Created by Иван_Архипов on 31.10.2017.
  3. //
  4. #include "DatFile.h"
  5. #include "BinaryData.h"
  6. #include "DatException.h"
  7. #include "SubDirectory.h"
  8. #include "Subfile.h"
  9. #include "SubfileData.h"
  10. #include <EasyLogging++/easylogging++.h>
  11. #include <unistd.h>
  12. #define ELPP_FEATURE_CRASH_LOG
  13. INITIALIZE_EASYLOGGINGPP
  14. #include <locale>
  15. #ifdef WIN32
  16. #define fseek _fseeki64
  17. #define ftell _ftelli64
  18. #endif
  19. extern "C++"
  20. {
  21. namespace LOTRO_DAT {
  22. DatFile::DatFile() {
  23. dat_state_ = CLOSED;
  24. root_directory_ = nullptr;
  25. file_handler_ = nullptr;
  26. el::Configurations defaultConf;
  27. defaultConf.setToDefault();
  28. defaultConf.setGlobally(el::ConfigurationType::Format,
  29. "%datetime %level %fbase (line %line) : %msg (function: %func)");
  30. defaultConf.setGlobally(el::ConfigurationType::ToFile, "true");
  31. defaultConf.setGlobally(el::ConfigurationType::Filename, "dat_library.log");
  32. defaultConf.setGlobally(el::ConfigurationType::ToStandardOutput, "false");
  33. defaultConf.setGlobally(el::ConfigurationType::PerformanceTracking, "true");
  34. defaultConf.setGlobally(el::ConfigurationType::MaxLogFileSize, "5242880"); // 5MB
  35. defaultConf.setGlobally(el::ConfigurationType::LogFlushThreshold, "1"); // Flush after every one log
  36. defaultConf.set(el::Level::Debug, el::ConfigurationType::Enabled, "false");
  37. defaultConf.set(el::Level::Debug, el::ConfigurationType::Filename, "dat_library_debug.log");
  38. el::Loggers::reconfigureAllLoggers(defaultConf);
  39. LOG(INFO) << "==================================================================";
  40. LOG(INFO) << "Starting new DatFile class instance";
  41. }
  42. DAT_RESULT DatFile::InitDatFile(const std::string &filename, int dat_id) {
  43. LOG(DEBUG) << "Started initialisation of DatFile " << filename;
  44. if (dat_state_ != CLOSED && filename == filename_) {
  45. LOG(DEBUG) << "Trying to reopen the same file: " << filename << ". Doing nothing.";
  46. return SUCCESS;
  47. }
  48. if (dat_state_ != CLOSED && filename != filename_) {
  49. LOG(DEBUG) << "DatFile wasn't closed yet. Closing in order to reopen.";
  50. if (CloseDatFile() != SUCCESS) {
  51. LOG(ERROR) << "Unable to perform CloseDatFile()! Aborting initialization!";
  52. return FAILED;
  53. }
  54. }
  55. dat_id_ = dat_id;
  56. dat_state_ = CLOSED;
  57. current_locale_ = ORIGINAL;
  58. root_directory_ = nullptr;
  59. file_handler_ = nullptr;
  60. filename_ = "none";
  61. DAT_RESULT result;
  62. DAT_RESULT return_value = SUCCESS;
  63. result = OpenDatFile(filename.c_str());
  64. if (result != SUCCESS) {
  65. LOG(ERROR) << "Unable to perform opening file. Aborting.";
  66. CloseDatFile();
  67. return result;
  68. }
  69. return_value = std::max(return_value, result);
  70. result = ReadSuperBlock();
  71. if (result <= 0) {
  72. LOG(ERROR) << "Unable to read super block. Aborting.";
  73. CloseDatFile();
  74. return result;
  75. }
  76. return_value = std::max(return_value, result);
  77. result = MakeDirectories();
  78. if (result <= 0) {
  79. LOG(ERROR) << "Unable to make directories. Aborting.";
  80. CloseDatFile();
  81. return result;
  82. }
  83. return_value = std::max(return_value, result);
  84. result = MakeDictionary();
  85. if (result <= 0) {
  86. LOG(ERROR) << "Unable to make dictionary. Aborting.";
  87. CloseDatFile();
  88. return result;
  89. }
  90. return_value = std::max(return_value, result);
  91. result = InitLocales();
  92. if (result <= 0) {
  93. LOG(ERROR) << "Unable to initialize locales. Aborting.";
  94. CloseDatFile();
  95. return result;
  96. }
  97. return_value = std::max(return_value, result);
  98. if (return_value >= 2) {
  99. LOG(WARNING) << "Dat file is corrupted. Trying to delete corrupted dictionary rows";
  100. if (RepairDatFile() != SUCCESS)
  101. return CRITICAL_DAT_ERROR;
  102. }
  103. LOG(INFO) << "File " << filename << " opened successfully!";
  104. filename_ = filename;
  105. dat_state_ = READY;
  106. return return_value;
  107. }
  108. DAT_STATE DatFile::DatFileState() const {
  109. return dat_state_;
  110. }
  111. DatFile::~DatFile() {
  112. CloseDatFile();
  113. }
  114. /// Extracts file with file_id.
  115. /// If path is undefined then it will be recognised as current working directory
  116. /// Output file path consists of "path + file_id + file_extension";
  117. /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
  118. /// Otherwise DatException() will be thrown.
  119. /// Returns true, if file was successfully extracted;
  120. /// Throws DatException() if undefined behaviour happened
  121. DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
  122. LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
  123. if (dat_state_ < READY) {
  124. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  125. return INCORRECT_STATE_ERROR;
  126. }
  127. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  128. if (file_data.size() == 0) {
  129. LOG(ERROR) << "File data is empty. Aborting extraction.";
  130. return NO_FILE_ERROR;
  131. }
  132. SubfileData export_data = dictionary_[file_id]->PrepareForExport(file_data);
  133. if (export_data.Empty()) {
  134. LOG(ERROR) << "Export data is empty. Aborting extraction.";
  135. return NO_FILE_ERROR;
  136. }
  137. if (export_data.binary_data.WriteToFile(path + export_data.options["ext"].as<std::string>()) != SUCCESS) {
  138. LOG(ERROR) << "Cannot write to file.";
  139. return WRITE_TO_FILE_ERROR;
  140. }
  141. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  142. return SUCCESS;
  143. }
  144. /// Extracts file with file_id to database "db".
  145. /// DATABASE SHOULD BE ALREADY CREATED; Otherwise DatException will be called.
  146. /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
  147. /// Otherwise DatException() will be thrown.
  148. /// Returns true, if file was successfully extracted;
  149. /// Throws DatException() if undefined behaviour happened
  150. DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
  151. LOG(DEBUG) << "Extracting file " << file_id << " to database.";
  152. if (dat_state_ < READY) {
  153. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  154. return INCORRECT_STATE_ERROR;
  155. }
  156. BinaryData file_data;
  157. try {
  158. file_data = GetFileData(dictionary_[file_id], 8);
  159. } catch (std::exception &e) {
  160. LOG(ERROR) << "Caught" << e.what() << " exception.";
  161. return FAILED;
  162. }
  163. SubfileData export_data;
  164. try {
  165. export_data = dictionary_[file_id]->PrepareForExport(file_data);
  166. export_data.options["did"] = dat_id_;
  167. } catch (std::exception &e) {
  168. LOG(ERROR) << "Caught" << e.what() << " exception.";
  169. return FAILED;
  170. }
  171. if (export_data == SubfileData()) {
  172. LOG(WARNING) << "file with id " << dictionary_[file_id]->file_id() << " is empty. Passing it.";
  173. return SUCCESS;
  174. }
  175. try {
  176. db->PushFile(export_data);
  177. } catch (std::exception &e) {
  178. LOG(ERROR) << "Caught " << e.what() << " exception.";
  179. return FAILED;
  180. }
  181. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  182. return SUCCESS;
  183. }
  184. /// Extracts all files with specific type to "path + type + file_id + file_part + extension" files;
  185. /// If path is undefined then it will be recognised as current working directory
  186. /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
  187. /// Otherwise DatException() will be thrown.
  188. /// Returns number of successfully extracted files
  189. /// Throws DatException() if undefined behaviour happened
  190. int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
  191. LOG(INFO) << "Extracting all files to path " << path;
  192. if (dat_state_ < READY) {
  193. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  194. return INCORRECT_STATE_ERROR;
  195. }
  196. int success = 0;
  197. for (auto i : dictionary_) {
  198. FILE_TYPE file_type = i.second->FileType();
  199. if (file_type == type) {
  200. success += (ExtractFile(i.second->file_id(), (path + std::to_string(i.second->file_id()))) == SUCCESS
  201. ? 1 : 0);
  202. }
  203. }
  204. LOG(INFO) << "Successfully extracted " << success << " files";
  205. return success;
  206. }
  207. /// Extracts all files with specific type to database "db";
  208. /// DATABASE SHOULD BE ALREADY CREATED; Otherwise DatException will be called.
  209. /// Returns number of successfully extracted files
  210. /// Throws DatException() if undefined behaviour happened
  211. int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
  212. LOG(INFO) << "Extracting all files to database...";
  213. if (dat_state_ < READY) {
  214. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  215. return INCORRECT_STATE_ERROR;
  216. }
  217. int success = 0;
  218. for (auto i : dictionary_) {
  219. FILE_TYPE file_type = i.second->FileType();
  220. if (file_type == type) {
  221. success += (ExtractFile(i.second->file_id(), db) == SUCCESS ? 1 : 0);
  222. }
  223. }
  224. LOG(INFO) << "Extracted " << success << " files";
  225. return success;
  226. }
  227. // TODO: Write description and make asserts
  228. DAT_RESULT DatFile::PatchFile(const char *filename, YAML::Node options) {
  229. LOG(DEBUG) << "Patching file with filename" << filename << " and id = " << options["fid"].as<long long>();
  230. if (dat_state_ < READY) {
  231. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  232. return INCORRECT_STATE_ERROR;
  233. }
  234. if (options["did"].IsDefined() && options["did"].as<int>() != dat_id_)
  235. return INCORRECT_DAT_ID;
  236. BinaryData data;
  237. data.ReadFromFile(filename);
  238. auto file_id = options["fid"].as<long long>();
  239. if (dictionary_[file_id] == nullptr) {
  240. LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
  241. return NO_FILE_ERROR;
  242. }
  243. BinaryData old_data = GetFileData(dictionary_[file_id]);
  244. if (old_data.Empty()) {
  245. LOG(ERROR) << "GetFileData returned empty data. Aborting.";
  246. return DAT_PATCH_FILE_ERROR;
  247. }
  248. data = dictionary_[file_id]->MakeForImport(old_data, SubfileData(data, u"", options));
  249. try {
  250. DAT_RESULT result = ApplyFilePatch(dictionary_[file_id], data);
  251. if (result != SUCCESS)
  252. return result;
  253. } catch (std::exception &e) {
  254. LOG(ERROR) << "Caught " << e.what() << " exception.";
  255. return FAILED;
  256. }
  257. LOG(DEBUG) << "Successfully patched file with filename = " << filename << " and id = "
  258. << options["fid"].as<long long>();
  259. return SUCCESS;
  260. }
  261. // TODO: Write description and make asserts
  262. DAT_RESULT DatFile::PatchFile(const SubfileData &data, bool rewrite_original) {
  263. LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>()
  264. << (rewrite_original ? " REWRITING ORIGINAL FILE." : ".");
  265. if (dat_state_ < READY) {
  266. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  267. return INCORRECT_STATE_ERROR;
  268. }
  269. auto file_id = data.options["fid"].as<long long>();
  270. Subfile *file = dictionary_[file_id];
  271. if (file == nullptr) {
  272. LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
  273. return NO_FILE_ERROR;
  274. }
  275. // If file has inactive category, then we should set it to patched state in order to commit patch and
  276. // then in ApplyFilePatch(), if new category is still inactive, return dictionary to its original state;
  277. if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0) {
  278. dictionary_[file_id]->file_offset_ = patch_dict_[file_id]->file_offset_;
  279. dictionary_[file_id]->file_size_ = patch_dict_[file_id]->file_size_;
  280. dictionary_[file_id]->block_size_ = patch_dict_[file_id]->block_size_;
  281. dictionary_[file_id]->timestamp_ = patch_dict_[file_id]->timestamp_;
  282. dictionary_[file_id]->version_ = patch_dict_[file_id]->version_;
  283. }
  284. if (data.options["cat"].IsDefined()) {
  285. file->category = data.options["cat"].as<long long>();
  286. } else {
  287. file->category = 1;
  288. }
  289. BinaryData old_data = GetFileData(file);
  290. if (old_data.Empty()) {
  291. LOG(ERROR) << "GetFileData returned empty data. Aborting.";
  292. return DAT_PATCH_FILE_ERROR;
  293. }
  294. BinaryData patch_data = file->MakeForImport(old_data, data);
  295. try {
  296. DAT_RESULT result = ApplyFilePatch(file, patch_data, rewrite_original);
  297. if (result != SUCCESS)
  298. return result;
  299. } catch (std::exception &e) {
  300. LOG(ERROR) << "Caught " << e.what() << " exception";
  301. return FAILED;
  302. }
  303. LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>()
  304. << (rewrite_original ? " REWRITING ORIGINAL FILE." : ".");
  305. return SUCCESS;
  306. }
  307. // TODO: Write description
  308. DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
  309. LOG(INFO) << "Patching all database";
  310. if (dat_state_ < READY) {
  311. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  312. return INCORRECT_STATE_ERROR;
  313. }
  314. SubfileData data;
  315. data = db->GetNextFile();
  316. while (!data.Empty()) {
  317. DAT_RESULT result = PatchFile(data);
  318. if (result != SUCCESS)
  319. LOG(ERROR) << "Cannot patch file" << data.options["fid"].as<long long>() << " continuing";
  320. data = db->GetNextFile();
  321. }
  322. DAT_RESULT result = CommitChanges();
  323. if (result != SUCCESS)
  324. return result;
  325. LOG(INFO) << "Successfully patched whole database";
  326. return SUCCESS;
  327. }
  328. /// DatFile::WriteUnorderedDictionary(...);
  329. /// Prints list of all found files with some information about them to file.
  330. /// Gets std::string path - path to directory, where the file will be written with name "dict.txt"
  331. DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
  332. LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
  333. FILE *f = nullptr;
  334. fopen_s(&f, (path + "dict.txt").c_str(), "w");
  335. if (f == nullptr) {
  336. LOG(ERROR) << "Cannot open file " << path + "dict.txt";
  337. return WRITE_TO_FILE_ERROR;
  338. }
  339. fprintf(f, "file_id offset size size2 extension\n");
  340. for (auto i : dictionary_) {
  341. fprintf(f, "%lld %lld %lld %lld %s\n", i.second->file_id(), i.second->file_offset(), i.second->file_size(),
  342. i.second->block_size(), i.second->Extension().c_str());
  343. }
  344. fclose(f);
  345. LOG(INFO) << "Unordered dictionary was written successfully to " << path << "dict.txt";
  346. return SUCCESS;
  347. }
  348. /// DatFile::files_number();
  349. /// Returns amount of files, found in dictionaries of DatFile. Some if them may be empty or erased.
  350. long long DatFile::files_number() const {
  351. return dictionary_.size();
  352. }
  353. /// DatFile::GetFileData()
  354. /// Returns BinaryData, which contains of subfile data, made from parts of file in DatFile
  355. // TODO: ASSERTS
  356. BinaryData DatFile::GetFileData(const Subfile *file, long long int offset) {
  357. LOG(DEBUG) << "Getting file " << file->file_id() << " data";
  358. try {
  359. BinaryData mfile_id(20);
  360. ReadData(mfile_id, 20, file->file_offset() + 8);
  361. if (mfile_id.Empty()) {
  362. LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
  363. << file->file_offset() << "); Aborting.";
  364. return BinaryData(0);
  365. }
  366. if (!mfile_id.CheckCompression() && file->file_id() != mfile_id.ToNumber<4>(0)) {
  367. LOG(ERROR) << "Bad DatFile::GetFileData() - file_id in Subfile ("
  368. << file->file_id()
  369. << ") doesn't match to file_id (" << mfile_id.ToNumber<4>(0) << ")in DatFile.";
  370. return BinaryData(0);
  371. }
  372. BinaryData data((unsigned) (file->file_size() + (8 - offset)));
  373. if (file->block_size() >= file->file_size() + 8) {
  374. ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
  375. return data;
  376. }
  377. BinaryData fragments_count(4);
  378. ReadData(fragments_count, 4, file->file_offset());
  379. long long fragments_number = fragments_count.ToNumber<4>(0);
  380. long long current_block_size = file->block_size() - offset - 8 * fragments_number;
  381. ReadData(data, current_block_size, file->file_offset() + offset);
  382. BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
  383. ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
  384. file->file_offset() + file->block_size() - 8 * fragments_number);
  385. for (long long i = 0; i < fragments_number; i++) {
  386. long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
  387. long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
  388. ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
  389. current_block_size);
  390. current_block_size += fragment_size;
  391. }
  392. LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
  393. return data;
  394. } catch (std::exception &e) {
  395. LOG(ERROR) << "Caught " << e.what() << " exception";
  396. }
  397. return BinaryData(0);
  398. }
  399. /// DatFile special functions for opening and reading/writing raw data.
  400. /// Shouldn't be used by any external classes except Subfile and Subdirectory.
  401. DAT_RESULT DatFile::OpenDatFile(const char *dat_name) {
  402. LOG(DEBUG) << "Started opening DatFile";
  403. if (dat_state_ != CLOSED) {
  404. CloseDatFile();
  405. }
  406. file_handler_ = fopen(dat_name, "r+b");
  407. if (file_handler_ == nullptr) {
  408. LOG(ERROR) << "Unable to open file " << dat_name;
  409. return NO_FILE_ERROR;
  410. }
  411. fseek(file_handler_, 0, SEEK_END);
  412. file_size_ = ftell(file_handler_);
  413. fseek(file_handler_, 0, SEEK_SET);
  414. dat_state_ = SUCCESS_OPENED;
  415. LOG(DEBUG) << "Successfully opened DatFile";
  416. return SUCCESS;
  417. }
  418. DAT_RESULT DatFile::ReadSuperBlock() {
  419. LOG(DEBUG) << "Started reading superblock";
  420. if (dat_state_ != SUCCESS_OPENED) {
  421. LOG(ERROR) << "Dat state isn't SUCCESS_OPENED. Cannot perform extraction.";
  422. return INCORRECT_STATE_ERROR;
  423. }
  424. BinaryData data(1024);
  425. ReadData(data, 1024);
  426. constant1_ = data.ToNumber<4>(0x100);
  427. constant2_ = data.ToNumber<4>(0x140);
  428. version1_ = data.ToNumber<4>(0x14C);
  429. version2_ = data.ToNumber<4>(0x150);
  430. fragmentation_journal_offset_ = data.ToNumber<4>(0x154);
  431. root_directory_offset_ = data.ToNumber<4>(0x160);
  432. auto size1 = data.ToNumber<4>(0x148);
  433. if (constant1_ != 0x4C5000) {
  434. LOG(ERROR) << "variable at position 0x100 is not equal to .dat file constant!";
  435. return INCORRECT_SUPERBLOCK_ERROR;
  436. }
  437. if (constant2_ != 0x5442) {
  438. LOG(ERROR) << "variable at position 0x140 is not equal to .dat file constant!";
  439. return INCORRECT_SUPERBLOCK_ERROR;
  440. }
  441. if (file_size_ != size1) {
  442. LOG(ERROR) << "variable at 0x148 position is not equal to .dat file size!";
  443. file_size_ = size1;
  444. dat_state_ = SUCCESS_SUPERBLOCK;
  445. return CORRUPTED_FILE_WARNING;
  446. }
  447. dat_state_ = SUCCESS_SUPERBLOCK;
  448. LOG(DEBUG) << "Superblock read successfully";
  449. return SUCCESS;
  450. }
  451. DAT_RESULT DatFile::MakeDirectories() {
  452. LOG(DEBUG) << "Started making directories";
  453. if (dat_state_ != SUCCESS_SUPERBLOCK) {
  454. LOG(ERROR) << "Dat state isn't SUCCESS_SUPERBLOCK. Cannot make directories.";
  455. return INCORRECT_STATE_ERROR;
  456. }
  457. root_directory_ = new SubDirectory((unsigned) root_directory_offset_, this);
  458. dat_state_ = SUCCESS_DIRECTORIES;
  459. LOG(DEBUG) << "Directories made successfully";
  460. return SUCCESS;
  461. }
  462. DAT_RESULT DatFile::MakeDictionary() {
  463. LOG(DEBUG) << "Started making dictionary";
  464. if (dat_state_ != SUCCESS_DIRECTORIES) {
  465. LOG(ERROR) << "Dat state isn't SUCCESS_DIRECTORIES. Cannot make directories.";
  466. return INCORRECT_STATE_ERROR;
  467. }
  468. if (root_directory_ == nullptr) {
  469. LOG(ERROR) << "root_directory is nullptr!!";
  470. return INIT_ERROR;
  471. }
  472. root_directory_->MakeDictionary(dictionary_);
  473. dat_state_ = SUCCESS_DICTIONARY;
  474. LOG(DEBUG) << "Dictionary made successfull";
  475. return SUCCESS;
  476. }
  477. DAT_RESULT DatFile::ReadData(BinaryData &data, long long size, long long offset, long long data_offset) {
  478. if (dat_state_ == CLOSED) {
  479. LOG(ERROR) << "Dat state is CLOSED. Cannot read data.";
  480. data = BinaryData(0);
  481. return INIT_ERROR;
  482. }
  483. if (data_offset + size > data.size()) {
  484. LOG(ERROR) << "Trying to read more than BinaryData size: Reading " << size << " bytes from " << offset
  485. << " position.";
  486. data = BinaryData(0);
  487. return DAT_READ_ERROR;
  488. }
  489. if (offset + size > file_size_) {
  490. LOG(ERROR) << "Trying to read more than DatFile size elapsed: Reading " << size << " bytes from " << offset
  491. << " position.";
  492. data = BinaryData(0);
  493. return DAT_READ_ERROR;
  494. }
  495. fseek(file_handler_, offset, SEEK_SET);
  496. fread(data.data() + data_offset, unsigned(size), 1, file_handler_);
  497. return SUCCESS;
  498. }
  499. DAT_RESULT DatFile::WriteData(const BinaryData &data, long long size, long long offset, long long data_offset) {
  500. if (dat_state_ < READY) {
  501. LOG(ERROR) << "Dat state isn't READY. Cannot write data.";
  502. return INCORRECT_STATE_ERROR;
  503. }
  504. fseek(file_handler_, offset, SEEK_SET);
  505. if (data_offset + size > data.size()) {
  506. LOG(ERROR) << "Trying to write more than BinaryData size";
  507. return DAT_WRITE_ERROR;
  508. }
  509. fwrite(data.data() + data_offset, unsigned(size), 1, file_handler_);
  510. return SUCCESS;
  511. }
  512. /// Special functions used by patch process.
  513. /// Shouldn't be used by any external class.
  514. DAT_RESULT DatFile::ApplyFilePatch(Subfile *file, const BinaryData &data, bool rewrite_original) {
  515. LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
  516. auto file_id = file->file_id();
  517. if (patched_list.count(file_id) != 0) {
  518. LOG(WARNING) << "Warning: DatFile::ApplyFilePatch - found 2 files in patch with the same file_id = "
  519. << file->file_id() << " Passing last...";
  520. return DUBLICATE_PATCH_FILES_ERROR;
  521. }
  522. if (current_locale() != PATCHED && !rewrite_original) {
  523. LOG(INFO) << "Changing locale to PATCHED(RU) in order to patch file";
  524. SetLocale(PATCHED);
  525. }
  526. if (current_locale() == PATCHED && rewrite_original && patch_dict_.count(file_id) != 0) {
  527. LOG(INFO) << "Changing locale to ORIGINAL in order to patch original version of file, which has both "
  528. << "original and patched versions.";
  529. SetLocale(ORIGINAL);
  530. }
  531. dat_state_ = UPDATED;
  532. if (orig_dict_.count(file_id) == 0 && !rewrite_original) {
  533. orig_dict_[file_id] = new Subfile(this, file->MakeHeaderData());
  534. }
  535. auto journal = GetFragmentationJournal();
  536. file->file_size_ = data.size() - 8;
  537. if ((patch_dict_.count(file_id) == 0 && !rewrite_original) || data.size() > file->block_size()) {
  538. if (journal[0].second != file_size_) {
  539. journal[0].second = file_size_;
  540. }
  541. file->file_offset_ = journal[0].second;
  542. file->block_size_ = std::max(data.size(), 256u);
  543. journal[0].second += data.size();
  544. BinaryData nulls(data.size());
  545. WriteData(nulls, nulls.size(), file_size_);
  546. this->file_size_ += data.size();
  547. }
  548. BinaryData fragments_count(4);
  549. fragments_count = BinaryData::FromNumber<4>(0);
  550. BinaryData file_data = fragments_count + data.CutData(4);
  551. if (file_id != file_data.ToNumber<4>(8)) {
  552. LOG(ERROR) << "Created data's file_id doesn't match to original! Patch wasn't written to .dat file";
  553. return INCORRECT_PATCH_FILE;
  554. }
  555. WriteData(file_data, file_data.size(), file->file_offset());
  556. patched_list.insert(file_id);
  557. if (!rewrite_original) {
  558. patch_dict_.erase(file_id); // Удалили старое значение в русском словаре
  559. patch_dict_[file_id] = new Subfile(this, file->MakeHeaderData()); // Создали новое значение
  560. }
  561. // If category is forbidden, then return file header data to original state
  562. if (inactive_categories.count(file->category) != 0) {
  563. dictionary_[file_id]->file_offset_ = orig_dict_[file_id]->file_offset_;
  564. dictionary_[file_id]->file_size_ = orig_dict_[file_id]->file_size_;
  565. dictionary_[file_id]->block_size_ = orig_dict_[file_id]->block_size_;
  566. dictionary_[file_id]->timestamp_ = orig_dict_[file_id]->timestamp_;
  567. dictionary_[file_id]->version_ = orig_dict_[file_id]->version_;
  568. }
  569. if (orig_dict_.count(file_id) != 0)
  570. orig_dict_[file_id]->category = file->category;
  571. if (patch_dict_.count(file_id) != 0)
  572. patch_dict_[file_id]->category = file->category;
  573. UpdateFragmentationJournal(journal);
  574. LOG(DEBUG) << "Successfully applied file " << file->file_id() << " patch.";
  575. return SUCCESS;
  576. }
  577. DAT_RESULT DatFile::UpdateSubdirectories() {
  578. // TODO: asserts
  579. LOG(DEBUG) << "Started updating subdirectories";
  580. root_directory_->UpdateDirectories(patched_list, dictionary_);
  581. LOG(DEBUG) << "Finished updating subdirectories";
  582. return SUCCESS;
  583. }
  584. std::vector<std::pair<long long, long long> > DatFile::GetFragmentationJournal() {
  585. LOG(DEBUG) << "Getting fragmentation journal";
  586. BinaryData data(8);
  587. DAT_RESULT res = ReadData(data, 8, fragmentation_journal_offset_ + 8);
  588. std::vector<std::pair<long long, long long> > result;
  589. if (res != SUCCESS) {
  590. LOG(ERROR) << "Error " << res << " while reading data";
  591. return result;
  592. }
  593. result.emplace_back(std::make_pair(data.ToNumber<4>(0), data.ToNumber<4>(4)));
  594. LOG(DEBUG) << "Finished getting fragmentation journal";
  595. return result;
  596. }
  597. DAT_RESULT DatFile::UpdateHeader() {
  598. LOG(DEBUG) << "Updating header";
  599. WriteData(BinaryData::FromNumber<4>(constant1_), 4, 0x100);
  600. WriteData(BinaryData::FromNumber<4>(constant2_), 4, 0x140);
  601. WriteData(BinaryData::FromNumber<4>(file_size_), 4, 0x148);
  602. WriteData(BinaryData::FromNumber<4>(version1_), 4, 0x14C);
  603. WriteData(BinaryData::FromNumber<4>(version2_), 4, 0x150);
  604. WriteData(BinaryData::FromNumber<4>(fragmentation_journal_offset_), 4, 0x154);
  605. WriteData(BinaryData::FromNumber<4>(root_directory_offset_), 4, 0x160);
  606. LOG(DEBUG) << "Finished updating header";
  607. return SUCCESS;
  608. }
  609. DAT_RESULT DatFile::UpdateFragmentationJournal(const std::vector<std::pair<long long, long long> > &journal) {
  610. LOG(DEBUG) << "Updating fragmentation journal";
  611. for (unsigned i = 0; i < journal.size(); i++) {
  612. long long size = journal[i].first;
  613. long long offset = journal[i].second;
  614. WriteData(BinaryData::FromNumber<4>(size), 4, fragmentation_journal_offset_ + 8 * (i + 1));
  615. WriteData(BinaryData::FromNumber<4>(offset), 4, fragmentation_journal_offset_ + 8 * (i + 1) + 4);
  616. }
  617. LOG(DEBUG) << "Finished updating fragmentation journal";
  618. return SUCCESS;
  619. }
  620. DAT_RESULT DatFile::CommitChanges() {
  621. LOG(INFO) << "Started commiting changes";
  622. if (dat_state_ != UPDATED) {
  623. LOG(DEBUG) << "Commiting changes to file with state != UPDATED. Nothing to do";
  624. return SUCCESS;
  625. }
  626. LOG(INFO) << "There are some updated files. Rewriting dictionary...";
  627. CommitLocales();
  628. auto journal = GetFragmentationJournal();
  629. UpdateFragmentationJournal(journal);
  630. UpdateHeader();
  631. UpdateSubdirectories();
  632. LOG(INFO) << "Changed " << patched_list.size() << " files...";
  633. patched_list.clear();
  634. dat_state_ = READY;
  635. LOG(INFO) << "Done Commiting changes!";
  636. return SUCCESS;
  637. }
  638. DAT_RESULT DatFile::CloseDatFile() {
  639. LOG(INFO) << "Closing DatFile";
  640. if (dat_state_ == CLOSED) {
  641. LOG(INFO) << "DatFile is already closed. Nothing to do";
  642. return SUCCESS;
  643. }
  644. CommitChanges();
  645. orig_dict_.clear();
  646. patched_list.clear();
  647. pending_patch_.clear();
  648. current_locale_ = ORIGINAL;
  649. if (file_handler_ != nullptr) {
  650. fclose(file_handler_);
  651. }
  652. delete root_directory_;
  653. dictionary_.clear();
  654. patched_list.clear();
  655. truncate64(filename_.c_str(), file_size_);
  656. filename_ = "none";
  657. dat_state_ = CLOSED;
  658. LOG(INFO) << "File closed successfully.";
  659. return SUCCESS;
  660. }
  661. // LOCALE MANAGING SECTION
  662. DAT_RESULT DatFile::InitLocales() {
  663. LOG(INFO) << "Initialising locales...";
  664. BinaryData dicts_data;
  665. if (dictionary_.count(2013266257) != 0)
  666. dicts_data = GetFileData(dictionary_[2013266257]);
  667. if (dicts_data.size() < 29) {
  668. LOG(WARNING) << "Locales file is empty.. Initialising locale dicts as empty";
  669. LOG(INFO) << "Could't find locales file or it's corrupted/empty... Continuing without locales";
  670. return SUCCESS;
  671. }
  672. BinaryData hi_data = dicts_data.CutData(14, 29) + BinaryData("\0", 1);
  673. std::string hi = std::string((char *) (hi_data.data()));
  674. LOG(DEBUG) << "hi info is " << hi;
  675. if (hi != "Hi from Gi1dor!") {
  676. LOG(WARNING) << "Didn't receive 'hi' from Gi1dor... Initialising locale dicts as empty";
  677. LOG(INFO) << "Could't init locales' file... Continuing without them";
  678. return SUCCESS;
  679. }
  680. int offset = 29;
  681. BinaryData current_locale_data = dicts_data.CutData(offset, offset + 4) + BinaryData("\0", 1);
  682. std::string locale((char *) (current_locale_data.data()));
  683. offset += 4;
  684. LOG(DEBUG) << "current locale:" << locale;
  685. if (locale != "PATC" && locale != "ORIG") {
  686. LOG(WARNING) << "Incorrect locale... Initialising locale dicts as empty";
  687. LOG(INFO) << "Could't recognize locale... Continuing without locales";
  688. return SUCCESS;
  689. }
  690. current_locale_ = (locale == "PATC" ? PATCHED : ORIGINAL);
  691. // 14 bytes for old data
  692. // 15 bytes for "Hi from Gi1dor"
  693. // 4 bytes for LOCALE
  694. // 4 bytes for orig_dict.size()
  695. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  696. // 4 bytes for patch_dict.size()
  697. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  698. // 4 bytes for inactive_categories dict
  699. // 4 * inactive_categories.size() bytes for inactive_categories data
  700. size_t orig_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  701. offset += 4;
  702. for (size_t i = 0; i < orig_dict_size; i++) {
  703. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  704. orig_dict_[file->file_id()] = file;
  705. offset += 32;
  706. orig_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  707. offset += 4;
  708. if (orig_dict_[file->file_id()]->category == 0)
  709. LOG(DEBUG) << "file category is undefined (0)!";
  710. }
  711. size_t patch_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  712. offset += 4;
  713. for (size_t i = 0; i < patch_dict_size; i++) {
  714. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  715. patch_dict_[file->file_id()] = file;
  716. offset += 32;
  717. patch_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  718. offset += 4;
  719. if (patch_dict_[file->file_id()]->category == 0)
  720. LOG(DEBUG) << "file category is undefined (0)!";
  721. }
  722. size_t active_patches_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  723. offset += 4;
  724. for (size_t i = 0; i < active_patches_dict_size; i++) {
  725. inactive_categories.insert(dicts_data.ToNumber<4>(offset));
  726. offset += 4;
  727. }
  728. LOG(INFO) << "There are " << patch_dict_.size() << " files in patch locale dictionary";
  729. LOG(INFO) << "There are " << orig_dict_.size() << " files in original locale dictionary";
  730. std::string inactive_cat_s;
  731. for (auto i : inactive_categories) {
  732. inactive_cat_s += std::to_string(i) + " ";
  733. }
  734. LOG(INFO) << "Unactive patches now: " << inactive_cat_s;
  735. LOG(INFO) << "Finished initialising locales";
  736. return SUCCESS;
  737. }
  738. std::map<long long, Subfile *> *DatFile::GetLocaleDictReference(LOCALE locale) {
  739. switch (locale) {
  740. case PATCHED:
  741. return &patch_dict_;
  742. case ORIGINAL:
  743. return &orig_dict_;
  744. default:
  745. LOG(ERROR) << "Unknown locale! Returning original";
  746. return &orig_dict_;
  747. }
  748. }
  749. bool DatFile::CorrectSubfile(Subfile *file) {
  750. BinaryData mfile_id(20);
  751. ReadData(mfile_id, 20, file->file_offset() + 8);
  752. if (mfile_id.Empty())
  753. return false;
  754. return mfile_id.CheckCompression() || file->file_id() == mfile_id.ToNumber<4>(0);
  755. }
  756. DAT_RESULT DatFile::RepairDatFile() {
  757. for (auto file : dictionary_) {
  758. auto subfile = file.second;
  759. auto file_id = file.first;
  760. if (CorrectSubfile(subfile))
  761. continue;
  762. if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
  763. return CRITICAL_DAT_ERROR;
  764. dictionary_[file_id]->file_offset_ = orig_dict_[file_id]->file_offset_;
  765. dictionary_[file_id]->file_size_ = orig_dict_[file_id]->file_size_;
  766. dictionary_[file_id]->block_size_ = orig_dict_[file_id]->block_size_;
  767. dictionary_[file_id]->timestamp_ = orig_dict_[file_id]->timestamp_;
  768. dictionary_[file_id]->version_ = orig_dict_[file_id]->version_;
  769. patch_dict_.erase(file_id);
  770. orig_dict_.erase(file_id);
  771. }
  772. return SUCCESS;
  773. }
  774. DAT_RESULT DatFile::SetLocale(LOCALE locale) {
  775. LOG(INFO) << "Setting locale to " << (locale == PATCHED ? " PATCHED" : " ORIGINAL");
  776. if (dat_state_ < READY) {
  777. LOG(ERROR) << "Dat state isn't READY. Cannot set locale.";
  778. return INCORRECT_STATE_ERROR;
  779. }
  780. if (current_locale_ == locale) {
  781. return SUCCESS;
  782. }
  783. dat_state_ = UPDATED;
  784. auto dict = GetLocaleDictReference(locale);
  785. for (auto file : *dict) {
  786. if (dictionary_[file.first] == nullptr) {
  787. LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
  788. << "which is not in .dat file! Passing it and removing from locale dictionary";
  789. dict->erase(file.first);
  790. continue;
  791. }
  792. if (dictionary_[file.first]->MakeHeaderData().CutData(8, 16) ==
  793. file.second->MakeHeaderData().CutData(8, 16) ||
  794. inactive_categories.count(orig_dict_[file.first]->category) != 0)
  795. continue;
  796. long long file_id = file.first;
  797. Subfile *new_file = file.second;
  798. dictionary_[file_id]->file_offset_ = new_file->file_offset_;
  799. dictionary_[file_id]->file_size_ = new_file->file_size_;
  800. dictionary_[file_id]->block_size_ = new_file->block_size_;
  801. dictionary_[file_id]->timestamp_ = new_file->timestamp_;
  802. dictionary_[file_id]->version_ = new_file->version_;
  803. patched_list.insert(file.first);
  804. dat_state_ = UPDATED;
  805. }
  806. current_locale_ = locale;
  807. CommitChanges();
  808. LOG(INFO) << "Locale set successfull";
  809. return SUCCESS;
  810. }
  811. bool DatFile::CheckIfUpdatedByGame() {
  812. LOG(INFO) << "Checking if DatFile was updated by LotRO";
  813. if (!pending_patch_.empty())
  814. return true;
  815. if (current_locale_ == ORIGINAL)
  816. return false;
  817. bool updated = false;
  818. for (auto i : dictionary_) {
  819. long long file_id = i.first;
  820. Subfile *subfile = i.second;
  821. if (inactive_categories.count(subfile->category) > 0)
  822. continue;
  823. if (patch_dict_.count(file_id) > 0
  824. && (subfile->file_size() != patch_dict_[file_id]->file_size()
  825. || subfile->file_offset() != patch_dict_[file_id]->file_offset()
  826. || subfile->block_size() != patch_dict_[file_id]->block_size())) {
  827. orig_dict_.erase(file_id);
  828. patch_dict_.erase(file_id);
  829. pending_patch_.insert(file_id);
  830. updated = true;
  831. dat_state_ = UPDATED;
  832. }
  833. }
  834. CommitChanges();
  835. LOG(INFO) << "Dat file " << (updated ? "WAS " : "WASN'T ") << "updated by game.";
  836. return updated;
  837. }
  838. DAT_RESULT DatFile::RepairPatches(Database *db) {
  839. // TODO: Find errors
  840. LOG(INFO) << "Repairing patches";
  841. SubfileData data;
  842. data = db->GetNextFile();
  843. while (!data.Empty()) {
  844. if (pending_patch_.count(data.options["fid"].as<long long>()) > 0) {
  845. PatchFile(data);
  846. }
  847. data = db->GetNextFile();
  848. }
  849. CommitChanges();
  850. LOG(INFO) << "Successfully repaired with database";
  851. return SUCCESS;
  852. }
  853. DAT_RESULT DatFile::FinishRepairingPatches() {
  854. LOG(INFO) << "Finishing repairing patches";
  855. pending_patch_.clear();
  856. return SUCCESS;
  857. }
  858. LOCALE DatFile::current_locale() {
  859. if (dat_state_ < READY) {
  860. LOG(ERROR) << "dat_file is in incorrect state!";
  861. return ORIGINAL;
  862. }
  863. if (current_locale_ != PATCHED && current_locale_ != ORIGINAL) {
  864. LOG(ERROR) << "locale has incorrect value. Setting it to original";
  865. current_locale_ = ORIGINAL;
  866. }
  867. return current_locale_;
  868. }
  869. DAT_RESULT DatFile::CommitLocales() {
  870. LOG(INFO) << "Committing locales...";
  871. SubfileData data = dictionary_[2013266257]->PrepareForExport(GetFileData(dictionary_[2013266257]));
  872. data.options["fid"] = "2013266257";
  873. data.options["ext"] = ".unknown";
  874. BinaryData old_data = BinaryData(GetFileData(dictionary_[2013266257u]));
  875. // 14 bytes for old data
  876. // 15 bytes for "Hi from Gi1dor"
  877. // 4 bytes for LOCALE
  878. // 4 bytes for orig_dict.size()
  879. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  880. // 4 bytes for patch_dict.size()
  881. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  882. // 4 bytes for inactive_categories list
  883. // 4 * inactive_categories.size() bytes for inactive_categories data
  884. data.binary_data = BinaryData(14 + 15 + 4
  885. + 4 + (32 + 4) * orig_dict_.size()
  886. + 4 + (32 + 4) * patch_dict_.size()
  887. + 4 + 4 * inactive_categories.size());
  888. size_t current_size = 0;
  889. data.binary_data.Append(GetFileData(dictionary_[2013266257u]).CutData(0, 14), current_size);
  890. current_size += 14;
  891. data.binary_data.Append(BinaryData("Hi from Gi1dor!", 15), current_size);
  892. current_size += 15;
  893. data.binary_data.Append(BinaryData((current_locale_ == ORIGINAL ? "ORIG" : "PATC"), 4), current_size);
  894. current_size += 4;
  895. data.binary_data.Append(BinaryData::FromNumber<4>(orig_dict_.size()), current_size);
  896. current_size += 4;
  897. for (auto file : orig_dict_) {
  898. data.binary_data.Append(file.second->MakeHeaderData(), current_size);
  899. current_size += 32;
  900. data.binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  901. current_size += 4;
  902. }
  903. data.binary_data.Append(BinaryData::FromNumber<4>(patch_dict_.size()), current_size);
  904. current_size += 4;
  905. for (auto file : patch_dict_) {
  906. data.binary_data.Append(file.second->MakeHeaderData(), current_size);
  907. current_size += 32;
  908. data.binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  909. current_size += 4;
  910. }
  911. data.binary_data.Append(BinaryData::FromNumber<4>(inactive_categories.size()), current_size);
  912. current_size += 4;
  913. for (auto patch_id : inactive_categories) {
  914. data.binary_data.Append(BinaryData::FromNumber<4>(patch_id), current_size);
  915. current_size += 4;
  916. }
  917. DAT_RESULT result = PatchFile(data, true);
  918. if (result != SUCCESS)
  919. return result;
  920. LOG(INFO) << "Locales commited successfully";
  921. return SUCCESS;
  922. }
  923. DAT_RESULT DatFile::EnableCategory(int category) {
  924. LOG(INFO) << "Enabling category " << category;
  925. if (inactive_categories.count(category) == 0)
  926. return SUCCESS;
  927. inactive_categories.erase(category);
  928. dat_state_ = UPDATED;
  929. for (auto file : dictionary_) {
  930. auto file_id = file.first;
  931. if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
  932. file.second->file_offset_ = patch_dict_[file_id]->file_offset_;
  933. file.second->file_size_ = patch_dict_[file_id]->file_size_;
  934. file.second->block_size_ = patch_dict_[file_id]->block_size_;
  935. file.second->timestamp_ = patch_dict_[file_id]->timestamp_;
  936. file.second->version_ = patch_dict_[file_id]->version_;
  937. patched_list.insert(file_id);
  938. }
  939. }
  940. LOG(INFO) << "Category " << category << " enabled successfully";
  941. return SUCCESS;
  942. }
  943. DAT_RESULT DatFile::DisableCategory(int category) {
  944. LOG(INFO) << "Disabling category " << category;
  945. if (inactive_categories.count(category) != 0)
  946. return SUCCESS;
  947. inactive_categories.insert(category);
  948. dat_state_ = UPDATED;
  949. for (auto file : dictionary_) {
  950. auto file_id = file.first;
  951. if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
  952. file.second->file_offset_ = orig_dict_[file_id]->file_offset_;
  953. file.second->file_size_ = orig_dict_[file_id]->file_size_;
  954. file.second->block_size_ = orig_dict_[file_id]->block_size_;
  955. file.second->timestamp_ = orig_dict_[file_id]->timestamp_;
  956. file.second->version_ = orig_dict_[file_id]->version_;
  957. patched_list.insert(file_id);
  958. }
  959. }
  960. LOG(INFO) << "Category " << category << " disabled successfully";
  961. return SUCCESS;
  962. }
  963. const std::set<long long> &DatFile::GetInactiveCategoriesList() {
  964. return inactive_categories;
  965. }
  966. bool DatFile::CheckIfNotPatched() {
  967. LOG(INFO) << "DatFile " << (patch_dict_.empty() ? "HASN'T " : "HAS already")
  968. << " been patched by LEGACY launcher!";
  969. return patch_dict_.empty();
  970. }
  971. bool DatFile::CheckIfPatchedByOldLauncher() {
  972. LOG(INFO) << "DatFile " << (dictionary_.count(620750000) == 0 ? "HASN'T " : "HAS already")
  973. << " been patched by OLD LAUNCHER!";
  974. return dictionary_.count(620750000) > 0;
  975. }
  976. const std::string &DatFile::filename() const {
  977. return filename_;
  978. }
  979. }
  980. }