DatFile.cpp 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095
  1. //
  2. // Created by Иван_Архипов on 31.10.2017.
  3. //
  4. #define NOMINMAX
  5. #include "DatFile.h"
  6. #include "BinaryData.h"
  7. #include "SubDirectory.h"
  8. #include "SubFile.h"
  9. #include "SubfileData.h"
  10. #include "DatOperationResult.h"
  11. #include <EasyLogging++/easylogging++.h>
  12. //#include <unistd.h>
  13. #include <algorithm>
  14. #include <iterator>
  15. #include <locale>
  16. #define ELPP_FEATURE_CRASH_LOG
  17. INITIALIZE_EASYLOGGINGPP
  18. #ifdef WIN32
  19. #define fseek _fseeki64
  20. #define ftell _ftelli64
  21. #endif
  22. extern "C++"
  23. {
  24. namespace LOTRO_DAT {
  25. //------------------------------------------------//
  26. // INIT SECTION
  27. //------------------------------------------------//
  28. DatFile::DatFile() : io(*this) {
  29. dat_state_ = CLOSED;
  30. free_buffered_size_ = 0;
  31. orig_dict_.clear();
  32. patch_dict_.clear();
  33. dictionary_.clear();
  34. el::Configurations defaultConf;
  35. defaultConf.setToDefault();
  36. defaultConf.setGlobally(el::ConfigurationType::Format,
  37. "%datetime %level %fbase (line %line) : %msg (function: %func)");
  38. defaultConf.setGlobally(el::ConfigurationType::ToFile, "true");
  39. defaultConf.setGlobally(el::ConfigurationType::Filename, "dat_library.log");
  40. defaultConf.setGlobally(el::ConfigurationType::ToStandardOutput, "false");
  41. defaultConf.setGlobally(el::ConfigurationType::PerformanceTracking, "true");
  42. defaultConf.setGlobally(el::ConfigurationType::MaxLogFileSize, "5242880"); // 5MB
  43. defaultConf.setGlobally(el::ConfigurationType::LogFlushThreshold, "1"); // Flush after every one log
  44. defaultConf.set(el::Level::Debug, el::ConfigurationType::Enabled, "false");
  45. defaultConf.set(el::Level::Debug, el::ConfigurationType::Filename, "dat_library_debug.log");
  46. el::Loggers::reconfigureAllLoggers(defaultConf);
  47. LOG(INFO) << "==================================================================";
  48. LOG(INFO) << "Starting new DatFile class instance";
  49. }
  50. DAT_RESULT DatFile::InitDatFile(const std::string &filename, int dat_id) {
  51. LOG(DEBUG) << "Started initialisation of DatFile " << filename;
  52. if (dat_state_ != CLOSED && filename == filename_) {
  53. LOG(DEBUG) << "Trying to reopen the same file: " << filename << ". Doing nothing.";
  54. return SUCCESS;
  55. }
  56. if (dat_state_ != CLOSED && filename != filename_) {
  57. LOG(DEBUG) << "DatFile wasn't closed yet. Closing in order to reopen.";
  58. if (CloseDatFile() != SUCCESS) {
  59. LOG(ERROR) << "Unable to perform CloseDatFile()! Aborting initialization!";
  60. return FAILED;
  61. }
  62. }
  63. dat_id_ = dat_id;
  64. dat_state_ = CLOSED;
  65. current_locale_ = ORIGINAL;
  66. free_buffered_size_ = 0;
  67. filename_ = "none";
  68. DAT_RESULT result;
  69. DAT_RESULT return_value = SUCCESS;
  70. auto res = io.Init(filename);
  71. if (res.result == DatOperationResult::ERROR) {
  72. LOG(ERROR) << "ERROR! Unable to initialize input-output!";
  73. CloseDatFile();
  74. return NO_FILE_ERROR;
  75. }
  76. LOG(INFO) << "Starting MakeDictionary";
  77. result = MakeDictionary();
  78. if (result <= 0) {
  79. LOG(ERROR) << "Unable to make dictionary. Aborting.";
  80. CloseDatFile();
  81. return result;
  82. }
  83. return_value = std::max(return_value, result);
  84. LOG(INFO) << "Starting InitLocales";
  85. result = InitLocales();
  86. if (result <= 0) {
  87. LOG(ERROR) << "Unable to initialize locales. Aborting.";
  88. CloseDatFile();
  89. return result;
  90. }
  91. return_value = std::max(return_value, result);
  92. LOG(INFO) << "File " << filename << " opened successfully!";
  93. filename_ = filename;
  94. dat_state_ = READY;
  95. LOG(INFO) << "Making last preparations...";
  96. return_value = std::max(return_value, result);
  97. PerformDictionaryCheck();
  98. if (return_value >= 2) {
  99. LOG(WARNING) << "Dat file could be corrupted. Trying to delete corrupted dictionary rows";
  100. if (RepairDatFile() != SUCCESS)
  101. return CRITICAL_DAT_ERROR;
  102. }
  103. if (CheckIfUpdatedByGame()) {
  104. LOG(INFO) << ".dat file was updated by game! Need to reinitialize files and directories!";
  105. CloseDatFile();
  106. InitDatFile(filename, dat_id);
  107. }
  108. std::cout << "Visited subdirs: " << SubDirectory::visited_subdirectories_.size() << std::endl;
  109. std::cout << "Visited files: " << SubDirectory::visited_subfiles_.size() << std::endl;
  110. dat_without_patches_ = CheckIfNotPatched();
  111. LOG(INFO) << "Preparations made successfully! Init return value = " << return_value;
  112. return return_value;
  113. }
  114. DAT_RESULT DatFile::MakeDictionary() {
  115. LOG(DEBUG) << "Started making dictionary";
  116. if (dat_state_ != SUCCESS_DIRECTORIES) {
  117. LOG(ERROR) << "Dat state isn't SUCCESS_DIRECTORIES. Cannot make directories.";
  118. return INCORRECT_STATE_ERROR;
  119. }
  120. if (io.GetRootDirectory() == nullptr) {
  121. LOG(ERROR) << "root_directory is nullptr!!";
  122. return INIT_ERROR;
  123. }
  124. io.GetRootDirectory()->MakeDictionary(dictionary_);
  125. dat_state_ = SUCCESS_DICTIONARY;
  126. LOG(DEBUG) << "Dictionary made successfull";
  127. return SUCCESS;
  128. }
  129. DAT_RESULT DatFile::InitLocales() {
  130. LOG(INFO) << "Initialising locales...";
  131. BinaryData dicts_data(4);
  132. io.ReadData(dicts_data, 4, 300);
  133. long long dict_offset = dicts_data.ToNumber<4>(0);
  134. if (dict_offset == 0 || dict_offset + 8 >= io.GetActualDatSize()) {
  135. LOG(INFO) << "Dictionary offset is empty or incorrect. Passing.";
  136. return SUCCESS;
  137. }
  138. io.ReadData(dicts_data, 4, dict_offset);
  139. long long dict_size = dicts_data.ToNumber<4>(0);
  140. io.ReadData(dicts_data, 4, dict_offset + 4);
  141. long long dict_version = dicts_data.ToNumber<4>(0);
  142. io.ReadData(dicts_data, 4, dict_offset + 8);
  143. io.file_size = dicts_data.ToNumber<4>(0);
  144. LOG(INFO) << "Dictionary size is " << dict_size << ". Version is " << dict_version << ". Localed .dat size = "
  145. << io.file_size;
  146. if (dict_version != 101) {
  147. LOG(WARNING) << "DICTIONARY IS OLD!!!";
  148. orig_dict_.clear();
  149. patch_dict_.clear();
  150. io.WriteData(BinaryData::FromNumber<4>(0), 4, 300);
  151. dat_state_ = UPDATED;
  152. dat_without_patches_ = true;
  153. return SUCCESS;
  154. }
  155. dicts_data = BinaryData((unsigned)dict_size);
  156. io.ReadData(dicts_data, dict_size, dict_offset + 12);
  157. if (dicts_data.size() < 15) {
  158. LOG(ERROR) << "Incorrect dictionary. Passing without it.";
  159. orig_dict_.clear();
  160. patch_dict_.clear();
  161. io.WriteData(BinaryData::FromNumber<4>(0), 4, 300);
  162. dat_state_ = UPDATED;
  163. dat_without_patches_ = true;
  164. return SUCCESS;
  165. }
  166. BinaryData hi_data = dicts_data.CutData(0, 15) + BinaryData("\0", 1);
  167. std::string hi = std::string((char *) (hi_data.data()));
  168. LOG(DEBUG) << "hi info is " << hi;
  169. if (hi != "Hi from Gi1dor!") {
  170. LOG(WARNING) << "Didn't receive 'hi' from Gi1dor... Initialising locale dicts as empty";
  171. LOG(INFO) << "Could't init locales' file... Continuing without them";
  172. return SUCCESS;
  173. }
  174. int offset = 15;
  175. BinaryData current_locale_data = dicts_data.CutData(offset, offset + 4) + BinaryData("\0", 1);
  176. std::string locale((char *) (current_locale_data.data()));
  177. offset += 4;
  178. LOG(DEBUG) << "current locale:" << locale;
  179. if (locale != "PATC" && locale != "ORIG") {
  180. LOG(WARNING) << "Incorrect locale... Initialising locale dicts as empty";
  181. LOG(INFO) << "Could't recognize locale... Continuing without locales";
  182. return SUCCESS;
  183. }
  184. current_locale_ = (locale == "PATC" ? PATCHED : ORIGINAL);
  185. // 15 bytes for "Hi from Gi1dor"
  186. // 4 bytes for LOCALE
  187. // 4 bytes for orig_dict.size()
  188. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  189. // 4 bytes for patch_dict.size()
  190. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  191. // 4 bytes for inactive_categories dict
  192. // 4 * inactive_categories.size() bytes for inactive_categories data
  193. size_t orig_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  194. offset += 4;
  195. for (size_t i = 0; i < orig_dict_size; i++) {
  196. auto file = std::make_shared<SubFile>(*this, dicts_data.CutData(offset, offset + 32));
  197. orig_dict_[file->file_id()] = file;
  198. offset += 32;
  199. orig_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  200. offset += 4;
  201. if (orig_dict_[file->file_id()]->category == 0)
  202. LOG(DEBUG) << "file category is undefined (0)!";
  203. }
  204. size_t patch_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  205. offset += 4;
  206. for (size_t i = 0; i < patch_dict_size; i++) {
  207. auto file = std::make_shared<SubFile>(*this, dicts_data.CutData(offset, offset + 32));
  208. patch_dict_[file->file_id()] = file;
  209. offset += 32;
  210. patch_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  211. offset += 4;
  212. if (patch_dict_[file->file_id()]->category == 0)
  213. LOG(DEBUG) << "file category is undefined (0)!";
  214. }
  215. size_t active_patches_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  216. offset += 4;
  217. for (size_t i = 0; i < active_patches_dict_size; i++) {
  218. inactive_categories.insert(dicts_data.ToNumber<4>(offset));
  219. offset += 4;
  220. }
  221. LOG(INFO) << "There are " << patch_dict_.size() << " files in patch locale dictionary";
  222. LOG(INFO) << "There are " << orig_dict_.size() << " files in original locale dictionary";
  223. std::string inactive_cat_s;
  224. for (auto i : inactive_categories) {
  225. inactive_cat_s += std::to_string(i) + " ";
  226. }
  227. LOG(INFO) << "Unactive patches now: " << inactive_cat_s;
  228. LOG(INFO) << "Finished initialising locales";
  229. return SUCCESS;
  230. }
  231. DAT_RESULT DatFile::PerformDictionaryCheck() {
  232. for (const auto& mpair : dictionary_) {
  233. auto file = mpair.second;
  234. auto file_id = mpair.first;
  235. if (CorrectSubfile(file))
  236. continue;
  237. if (current_locale_ == PATCHED && orig_dict_.count(file_id) > 0) {
  238. LOG(WARNING) << "Potential incorrect patched version of file " << file_id << ". Switching to original.";
  239. dictionary_[file_id] = orig_dict_[file_id];
  240. }
  241. if (!CorrectSubfile(file)) {
  242. LOG(ERROR) << "Incorrect file " << file_id << ". It's offset is said as " << file->file_offset()
  243. << ". Erasing it from dictionary.";
  244. dictionary_.erase(file_id);
  245. }
  246. }
  247. return SUCCESS;
  248. }
  249. //------------------------------------------------//
  250. // CLOSE SECTION
  251. //------------------------------------------------//
  252. DatFile::~DatFile() {
  253. CloseDatFile();
  254. }
  255. DAT_RESULT DatFile::CloseDatFile() {
  256. LOG(INFO) << "Closing DatFile";
  257. if (dat_state_ == CLOSED) {
  258. LOG(INFO) << "DatFile is already closed. Nothing to do";
  259. return SUCCESS;
  260. }
  261. // Committing changes and updating/writing locales and header info
  262. io.DeInit();
  263. if (!pending_dictionary_.empty() || dat_state_ == UPDATED) {
  264. CommitLocales();
  265. CommitDirectories();
  266. //ModifyFragmentationJournal();
  267. //free_dat_size = 128248;
  268. //fragmentation_journal_end = 0;
  269. //fragmentation_journal_size = 1;
  270. //UpdateHeader();
  271. }
  272. current_locale_ = ORIGINAL;
  273. SubDirectory::visited_subdirectories_.clear();
  274. free_buffered_size_ = 0;
  275. filename_ = "none";
  276. orig_dict_.clear();
  277. patch_dict_.clear();
  278. pending_patch_.clear();
  279. inactive_categories.clear();
  280. pending_dictionary_.clear();
  281. dictionary_.clear();
  282. dat_state_ = CLOSED;
  283. dat_id_ = -1;
  284. LOG(INFO) << "File closed successfully.";
  285. return SUCCESS;
  286. }
  287. DAT_RESULT DatFile::CommitLocales() {
  288. LOG(INFO) << "Committing locales...";
  289. // 15 bytes for "Hi from Gi1dor"
  290. // 4 bytes for LOCALE
  291. // 4 bytes for orig_dict.size()
  292. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  293. // 4 bytes for patch_dict.size()
  294. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  295. // 4 bytes for inactive_categories list
  296. // 4 * inactive_categories.size() bytes for inactive_categories data
  297. BinaryData binary_data = BinaryData(14 + 15 + 4
  298. + 4 + (32 + 4) * orig_dict_.size()
  299. + 4 + (32 + 4) * patch_dict_.size()
  300. + 4 + 4 * inactive_categories.size());
  301. size_t current_size = 0;
  302. binary_data.Append(BinaryData("Hi from Gi1dor!", 15), current_size);
  303. current_size += 15;
  304. binary_data.Append(BinaryData((current_locale_ == ORIGINAL ? "ORIG" : "PATC"), 4), current_size);
  305. current_size += 4;
  306. binary_data.Append(BinaryData::FromNumber<4>(orig_dict_.size()), current_size);
  307. current_size += 4;
  308. for (const auto &file : orig_dict_) {
  309. binary_data.Append(file.second->MakeHeaderData(), current_size);
  310. current_size += 32;
  311. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  312. current_size += 4;
  313. }
  314. binary_data.Append(BinaryData::FromNumber<4>(patch_dict_.size()), current_size);
  315. current_size += 4;
  316. for (const auto &file : patch_dict_) {
  317. binary_data.Append(file.second->MakeHeaderData(), current_size);
  318. current_size += 32;
  319. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  320. current_size += 4;
  321. }
  322. binary_data.Append(BinaryData::FromNumber<4>(inactive_categories.size()), current_size);
  323. current_size += 4;
  324. for (auto patch_id : inactive_categories) {
  325. binary_data.Append(BinaryData::FromNumber<4>(patch_id), current_size);
  326. current_size += 4;
  327. }
  328. BinaryData dicts_data(4);
  329. io.ReadData(dicts_data, 4, 300);
  330. long long dict_offset = dicts_data.ToNumber<4>(0);
  331. io.ReadData(dicts_data, 4, dict_offset);
  332. long long dict_size = dicts_data.ToNumber<4>(0);
  333. if (binary_data.size() > dict_size || dict_offset == 0) {
  334. io.WriteData(BinaryData::FromNumber<4>(io.file_size), 4, 300);
  335. io.WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size() + 4, 20u * 1024u * 1024u)), 4,
  336. io.file_size);
  337. io.WriteData(BinaryData::FromNumber<4>(101), 4, io.file_size + 4);
  338. io.WriteData(BinaryData::FromNumber<4>(io.file_size + binary_data.size() + 12 + 20 * 1024 * 1024), 4,
  339. io.file_size + 8); // Writing current file size;
  340. io.WriteData(binary_data, binary_data.size(), io.file_size + 12);
  341. io.file_size += binary_data.size() + 12;
  342. // Adding space for 25 megabytes locales file in total.
  343. BinaryData nulls(unsigned(20 * 1024 * 1024));
  344. io.WriteData(nulls, nulls.size(), io.file_size);
  345. io.file_size += nulls.size();
  346. } else {
  347. io.WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, dict_offset);
  348. io.WriteData(BinaryData::FromNumber<4>(101), 4, dict_offset + 4);
  349. io.WriteData(BinaryData::FromNumber<4>(io.file_size), 4, dict_offset + 8); // Writing current file size;
  350. io.WriteData(binary_data, binary_data.size(), dict_offset + 12);
  351. }
  352. LOG(INFO) << "Locales commited successfully";
  353. return SUCCESS;
  354. }
  355. DAT_RESULT DatFile::CommitDirectories() {
  356. for (auto file_id : pending_dictionary_) {
  357. if (dictionary_[file_id] == nullptr || !CorrectSubfile(dictionary_[file_id]))
  358. continue;
  359. io.WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
  360. }
  361. pending_dictionary_.clear();
  362. return SUCCESS;
  363. }
  364. DAT_RESULT DatFile::ModifyFragmentationJournal() {
  365. if (io.fragmentation_journal_size == 0)
  366. return SUCCESS;
  367. LOG(DEBUG) << "Modifying fragmentation journal";
  368. BinaryData data(4);
  369. io.ReadData(data, 4, io.fragmentation_journal_offset + 8 * io.fragmentation_journal_size);
  370. LOG(INFO) << "FREE_SIZE BLOCK = " << data.ToNumber<4>(0);
  371. long long free_size = data.ToNumber<4>(0);
  372. long long free_offset = io.file_size;
  373. BinaryData nulldata = BinaryData(unsigned(free_size));
  374. io.WriteData(nulldata, nulldata.size(), io.file_size);
  375. io.file_size += nulldata.size();
  376. io.WriteData(BinaryData::FromNumber<4>(free_size), 4,
  377. io.fragmentation_journal_offset + 8 * io.fragmentation_journal_size);
  378. io.WriteData(BinaryData::FromNumber<4>(free_offset), 4,
  379. io.fragmentation_journal_offset + 8 * io.fragmentation_journal_size + 4);
  380. //nulldata = BinaryData(8);
  381. //io.WriteData(nulldata, nulldata.size(), fragmentation_journal_offset + 16);
  382. LOG(DEBUG) << "Finished modifying fragmentation journal";
  383. return SUCCESS;
  384. }
  385. DAT_RESULT DatFile::RepairDatFile() {
  386. for (const auto& file : dictionary_) {
  387. auto subfile = file.second;
  388. auto file_id = file.first;
  389. if (CorrectSubfile(subfile))
  390. continue;
  391. if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
  392. return CRITICAL_DAT_ERROR;
  393. *dictionary_[file_id] = *orig_dict_[file_id];
  394. patch_dict_.erase(file_id);
  395. orig_dict_.erase(file_id);
  396. }
  397. return SUCCESS;
  398. }
  399. //------------------------------------------------//
  400. // DAT INFO SECTION
  401. //------------------------------------------------//
  402. DAT_STATE DatFile::DatFileState() const {
  403. return dat_state_;
  404. }
  405. long long DatFile::files_number() const {
  406. return dictionary_.size();
  407. }
  408. //------------------------------------------------//
  409. // EXTRACT SECTION
  410. //------------------------------------------------//
  411. DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
  412. LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
  413. if (dat_state_ < READY) {
  414. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  415. return INCORRECT_STATE_ERROR;
  416. }
  417. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  418. if (file_data.size() == 0) {
  419. LOG(ERROR) << "File data is empty. Aborting extraction.";
  420. return NO_FILE_ERROR;
  421. }
  422. SubfileData export_data = dictionary_[file_id]->PrepareForExport(file_data);
  423. if (export_data.Empty()) {
  424. LOG(ERROR) << "Export data is empty. Aborting extraction.";
  425. return NO_FILE_ERROR;
  426. }
  427. if (export_data.binary_data.WriteToFile(path + export_data.options["ext"].as<std::string>()) != SUCCESS) {
  428. LOG(ERROR) << "Cannot write to file.";
  429. return WRITE_TO_FILE_ERROR;
  430. }
  431. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  432. return SUCCESS;
  433. }
  434. DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
  435. LOG(DEBUG) << "Extracting file " << file_id << " to database.";
  436. if (dat_state_ < READY) {
  437. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  438. return INCORRECT_STATE_ERROR;
  439. }
  440. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  441. if (file_data.Empty()) {
  442. LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty. Passing it.";
  443. return SUCCESS;
  444. }
  445. SubfileData export_data;
  446. export_data = dictionary_[file_id]->PrepareForExport(file_data);
  447. export_data.options["did"] = dat_id_;
  448. if (export_data == SubfileData()) {
  449. LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty or incorrect.";
  450. return SUCCESS;
  451. }
  452. try {
  453. db->PushFile(export_data);
  454. } catch (std::exception &e) {
  455. LOG(ERROR) << "Caught " << e.what() << " exception.";
  456. return FAILED;
  457. }
  458. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  459. return SUCCESS;
  460. }
  461. int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
  462. LOG(INFO) << "Extracting all files to path " << path;
  463. if (dat_state_ < READY) {
  464. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  465. return INCORRECT_STATE_ERROR;
  466. }
  467. int success = 0;
  468. for (const auto& i : dictionary_) {
  469. FILE_TYPE file_type = i.second->FileType();
  470. if (file_type == type) {
  471. success += (ExtractFile(i.second->file_id(), (path + std::to_string(i.second->file_id()))) == SUCCESS
  472. ? 1 : 0);
  473. }
  474. }
  475. LOG(INFO) << "Successfully extracted " << success << " files";
  476. return success;
  477. }
  478. int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
  479. LOG(INFO) << "Extracting all files to database...";
  480. if (dat_state_ < READY) {
  481. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  482. return INCORRECT_STATE_ERROR;
  483. }
  484. int success = 0;
  485. for (const auto& i : dictionary_) {
  486. FILE_TYPE file_type = i.second->FileType();
  487. if (file_type == type) {
  488. success += (ExtractFile(i.second->file_id(), db) == SUCCESS ? 1 : 0);
  489. }
  490. }
  491. LOG(INFO) << "Extracted " << success << " files";
  492. return success;
  493. }
  494. //------------------------------------------------//
  495. // PATCH SECTION
  496. //------------------------------------------------//
  497. DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
  498. LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
  499. if (!dat_without_patches_) {
  500. io.file_size = io.GetActualDatSize();
  501. }
  502. if (dat_state_ < READY) {
  503. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  504. return INCORRECT_STATE_ERROR;
  505. }
  506. auto file_id = data.options["fid"].as<long long>();
  507. if (dictionary_.count(file_id) == 0) {
  508. LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
  509. return NO_FILE_ERROR;
  510. }
  511. std::shared_ptr<SubFile> file = dictionary_[file_id];
  512. // if (!CorrectSubfile(file)) {
  513. // if (current_locale_ == PATCHED && patch_dict_.count(file_id) > 0) {
  514. // LOG(WARNING) << "Patched subfile header with id = " << file->file_id() << " differs from original version...";
  515. // } else {
  516. // LOG(ERROR) << "Incorrect subfile with id " << file->file_id()
  517. // << " (headers do not match). Cannot patch it";
  518. // return FAILED;
  519. // }
  520. // }
  521. // If file has inactive category, then we should set it to patched state in order to commit patch and
  522. // then in ApplyFilePatch(), if new category is still inactive, return dictionary to its original state;
  523. if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0 && file_id != 2013266257) {
  524. *dictionary_[file_id] = *patch_dict_[file_id];
  525. }
  526. if (data.options["cat"].IsDefined()) {
  527. file->category = data.options["cat"].as<long long>();
  528. } else {
  529. file->category = 1;
  530. }
  531. BinaryData old_data = GetFileData(orig_dict_.count(file->file_id()) == 0 ? file : orig_dict_[file->file_id_]);
  532. if (old_data.Empty()) {
  533. LOG(ERROR) << "GetFileData returned empty data. Aborting.";
  534. return DAT_PATCH_FILE_ERROR;
  535. }
  536. BinaryData patch_data = file->MakeForImport(old_data, data);
  537. DAT_RESULT result = ApplyFilePatch(file, patch_data);
  538. if (result != SUCCESS)
  539. return result;
  540. LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>() << ".";
  541. return SUCCESS;
  542. }
  543. DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
  544. LOG(INFO) << "Patching all database";
  545. if (dat_state_ < READY) {
  546. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  547. return INCORRECT_STATE_ERROR;
  548. }
  549. SubfileData data;
  550. data = db->GetNextFile();
  551. while (!data.Empty()) {
  552. DAT_RESULT result = PatchFile(data);
  553. if (result != SUCCESS)
  554. LOG(ERROR) << "Cannot patch file " << data.options["fid"].as<long long>() << " continuing";
  555. data = db->GetNextFile();
  556. }
  557. LOG(INFO) << "Successfully patched whole database";
  558. return SUCCESS;
  559. }
  560. DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
  561. LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
  562. FILE *f = nullptr;
  563. fopen_s(&f, (path + "dict.txt").c_str(), "w");
  564. if (f == nullptr) {
  565. LOG(ERROR) << "Cannot open file " << path + "dict.txt";
  566. return WRITE_TO_FILE_ERROR;
  567. }
  568. fprintf(f, "unk1 file_id offset size1 timestamp version size2 unknown2 type\n");
  569. for (const auto& i : dictionary_) {
  570. fprintf(f, "%lld %lld %lld %lld %lld %lld %lld %lld %s\n", i.second->unknown1(), i.second->file_id(),
  571. i.second->file_offset(), i.second->file_size(), i.second->timestamp(), i.second->version(),
  572. i.second->block_size(), i.second->unknown2(), i.second->Extension().c_str());
  573. }
  574. fclose(f);
  575. LOG(INFO) << "Unordered dictionary was written successfully to " << path << "dict.txt";
  576. return SUCCESS;
  577. }
  578. DAT_RESULT DatFile::ApplyFilePatch(std::shared_ptr<SubFile> file, BinaryData &data) {
  579. LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
  580. // if (patch_dict_.size() == 0 && pending_dictionary_.size() == 0) {
  581. // BinaryData nulls(50 * 1024 * 1024);
  582. // io.WriteData(nulls, nulls.size(), file_size);
  583. // file_size += 50 * 1024 * 1024;
  584. // }
  585. if (data.Empty()) {
  586. LOG(ERROR) << "Error caused during making file for import. Cannot patch file " << file->file_id();
  587. return FAILED;
  588. }
  589. auto file_id = file->file_id();
  590. if (current_locale() != PATCHED && file_id != 2013266257) {
  591. LOG(INFO) << "Changing locale to PATCHED(RU) in order to patch file";
  592. SetLocale(PATCHED);
  593. }
  594. dat_state_ = UPDATED;
  595. if (orig_dict_.count(file_id) == 0 && file_id != 2013266257) {
  596. orig_dict_[file_id] = std::make_shared<SubFile>(*this, file->MakeHeaderData());
  597. }
  598. if ((patch_dict_.count(file_id) == 0 && file_id != 2013266257) || data.size() > file->block_size()
  599. || file->file_size() + 8 > file->block_size()) {
  600. file->file_offset_ = io.file_size;
  601. file->block_size_ = std::max((long long)data.size(), file->block_size_);
  602. free_buffered_size_ = std::max(0ll, free_buffered_size_ - file->block_size_ - 8);
  603. AddBufferedSize();
  604. io.file_size += file->block_size_ + 8;
  605. }
  606. file->file_size_ = data.size() - 8;
  607. data.Append(BinaryData::FromNumber<4>(0), 0); // set additional fragments count to zero
  608. if (file_id != data.ToNumber<4>(8)) {
  609. LOG(ERROR) << "Created data's file_id doesn't match to original! Patch wasn't written to .dat file";
  610. return INCORRECT_PATCH_FILE;
  611. }
  612. //data.ProtectData();
  613. //BinaryData data1(data.size());
  614. io.WriteData(data, data.size(), file->file_offset());
  615. //data.DeprotectData();
  616. patch_dict_.erase(file_id); // Удалили старое значение в русском словаре
  617. if (file_id != 2013266257) {
  618. patch_dict_[file_id] = std::make_shared<SubFile>(*this, file->MakeHeaderData()); // Создали новое значение
  619. }
  620. // If category is forbidden, then return file header data to original state
  621. if (inactive_categories.count(file->category) != 0) {
  622. file->file_offset_ = orig_dict_[file_id]->file_offset_;
  623. file->file_size_ = orig_dict_[file_id]->file_size_;
  624. file->block_size_ = orig_dict_[file_id]->block_size_;
  625. file->timestamp_ = orig_dict_[file_id]->timestamp_;
  626. file->version_ = orig_dict_[file_id]->version_;
  627. }
  628. if (orig_dict_.count(file_id) != 0 && file_id != 2013266257)
  629. orig_dict_[file_id]->category = file->category;
  630. if (patch_dict_.count(file_id) != 0 && file_id != 2013266257)
  631. patch_dict_[file_id]->category = file->category;
  632. // Applying file info in directory
  633. pending_dictionary_.insert(file_id);
  634. LOG(DEBUG) << "Successfully applied file " << file->file_id() << " patch.";
  635. return SUCCESS;
  636. }
  637. //------------------------------------------------//
  638. // INPUT-OUTPUT SECTION
  639. //------------------------------------------------//
  640. BinaryData DatFile::GetFileData(const std::shared_ptr<SubFile>& file, long long int offset) {
  641. LOG(DEBUG) << "Getting file " << file->file_id() << " data";
  642. BinaryData mfile_id(20);
  643. io.ReadData(mfile_id, 20, file->file_offset() + 8);
  644. if (mfile_id.Empty()) {
  645. LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
  646. << file->file_offset() << "); Aborting.";
  647. return BinaryData(0);
  648. }
  649. if (!mfile_id.CheckCompression() && file->file_id() != mfile_id.ToNumber<4>(0)) {
  650. LOG(ERROR) << "Bad DatFile::GetFileData() - file_id in SubFile ("
  651. << file->file_id()
  652. << ") doesn't match to file_id (" << mfile_id.ToNumber<4>(0) << ")in DatFile.";
  653. return BinaryData(0);
  654. }
  655. BinaryData data((unsigned)(file->file_size() + (8 - offset)));
  656. if (file->block_size() >= file->file_size() + 8) {
  657. io.ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
  658. return data;
  659. }
  660. BinaryData fragments_count(4);
  661. io.ReadData(fragments_count, 4, file->file_offset());
  662. long long fragments_number = fragments_count.ToNumber<4>(0);
  663. long long current_block_size = file->block_size() - offset - 8 * fragments_number;
  664. io.ReadData(data, current_block_size, file->file_offset() + offset);
  665. BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
  666. io.ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
  667. file->file_offset() + file->block_size() - 8 * fragments_number);
  668. for (long long i = 0; i < fragments_number; i++) {
  669. long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
  670. long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
  671. io.ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
  672. current_block_size);
  673. current_block_size += fragment_size;
  674. }
  675. LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
  676. return data;
  677. }
  678. //------------------------------------------------//
  679. // LOCALE SECTION
  680. //------------------------------------------------//
  681. DAT_RESULT DatFile::SetLocale(LOCALE locale) {
  682. LOG(INFO) << "Setting locale to " << (locale == PATCHED ? " PATCHED" : " ORIGINAL");
  683. if (dat_state_ < READY) {
  684. LOG(ERROR) << "Dat state isn't READY. Cannot set locale.";
  685. return INCORRECT_STATE_ERROR;
  686. }
  687. if (current_locale_ == locale) {
  688. return SUCCESS;
  689. }
  690. dat_state_ = UPDATED;
  691. auto dict = GetLocaleDictReference(locale);
  692. for (const auto& file : dict) {
  693. if (file.second == nullptr)
  694. continue;
  695. if (dictionary_.count(file.first) == 0) {
  696. LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
  697. << "which is not in .dat file! Passing it and removing from locale dictionary";
  698. dict.erase(file.first);
  699. continue;
  700. }
  701. if (dictionary_[file.first]->MakeHeaderData().CutData(8, 16) ==
  702. file.second->MakeHeaderData().CutData(8, 16) ||
  703. inactive_categories.count(orig_dict_[file.first]->category) != 0)
  704. continue;
  705. long long file_id = file.first;
  706. std::shared_ptr<SubFile> new_file = file.second;
  707. *dictionary_[file_id] = *new_file;
  708. pending_dictionary_.insert(file_id);
  709. dat_state_ = UPDATED;
  710. }
  711. current_locale_ = locale;
  712. LOG(INFO) << "Locale set successfull";
  713. return SUCCESS;
  714. }
  715. LOCALE DatFile::current_locale() {
  716. if (dat_state_ < READY) {
  717. LOG(ERROR) << "dat_file is in incorrect state!";
  718. return ORIGINAL;
  719. }
  720. if (current_locale_ != PATCHED && current_locale_ != ORIGINAL) {
  721. LOG(ERROR) << "locale has incorrect value. Setting it to original";
  722. current_locale_ = ORIGINAL;
  723. }
  724. return current_locale_;
  725. }
  726. std::map<long long, std::shared_ptr<SubFile> > &DatFile::GetLocaleDictReference(LOCALE locale) {
  727. switch (locale) {
  728. case PATCHED:
  729. return patch_dict_;
  730. case ORIGINAL:
  731. return orig_dict_;
  732. default:
  733. LOG(ERROR) << "Unknown locale! Returning original";
  734. return orig_dict_;
  735. }
  736. }
  737. //------------------------------------------------//
  738. // CHECKERS SECTION
  739. //------------------------------------------------//
  740. bool DatFile::CorrectSubfile(std::shared_ptr<SubFile> file) {
  741. BinaryData mfile_id(20);
  742. io.ReadData(mfile_id, 20, file->file_offset() + 8);
  743. if (mfile_id.Empty())
  744. return false;
  745. return (mfile_id.CheckCompression() || file->file_id() == mfile_id.ToNumber<4>(0)) && file->file_size() < 50ll * 1024ll * 1024ll;
  746. }
  747. bool DatFile::CheckIfUpdatedByGame() {
  748. // LOG(INFO) << "Checking if DatFile was updated by LotRO";
  749. // if (!pending_patch_.empty())
  750. // return true;
  751. // bool updated = false;
  752. // for (const auto& i : dictionary_) {
  753. // long long file_id = i.first;
  754. // std::shared_ptr<SubFile> subfile = i.second;
  755. // if (patch_dict_.count(file_id) == 0)
  756. // continue;
  757. //
  758. // if (*subfile != *patch_dict_[file_id] && *subfile != *orig_dict_[file_id]) {
  759. // //orig_dict_.clear();
  760. // //patch_dict_.clear();
  761. // LOG(INFO) << "DAT FILE WAS UPDATED!!!! CLEARING PATCH DATA";
  762. // pending_patch_.insert(file_id);
  763. // io.WriteData(BinaryData::FromNumber<4>(0), 4, 300);
  764. // return true;
  765. // }
  766. // }
  767. // return updated;
  768. return false;
  769. }
  770. bool DatFile::CheckIfNotPatched() {
  771. LOG(INFO) << "DatFile " << (patch_dict_.empty() ? "HASN'T " : "HAS already")
  772. << " been patched by LEGACY launcher!";
  773. return patch_dict_.empty();
  774. }
  775. bool DatFile::CheckIfPatchedByOldLauncher() {
  776. LOG(INFO) << "DatFile " << (dictionary_.count(620750000) == 0 ? "HASN'T " : "HAS already")
  777. << " been patched by OLD LAUNCHER!";
  778. return dictionary_.count(620750000) > 0;
  779. }
  780. //------------------------------------------------//
  781. // CATEGORY SECTION
  782. //------------------------------------------------//
  783. DAT_RESULT DatFile::EnableCategory(int category) {
  784. LOG(INFO) << "Enabling category " << category;
  785. if (inactive_categories.count(category) == 0)
  786. return SUCCESS;
  787. inactive_categories.erase(category);
  788. dat_state_ = UPDATED;
  789. for (auto &file : dictionary_) {
  790. auto file_id = file.first;
  791. if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
  792. *file.second = *patch_dict_[file_id];
  793. pending_dictionary_.insert(file_id);
  794. }
  795. }
  796. LOG(INFO) << "Category " << category << " enabled successfully";
  797. return SUCCESS;
  798. }
  799. DAT_RESULT DatFile::DisableCategory(int category) {
  800. LOG(INFO) << "Disabling category " << category;
  801. if (inactive_categories.count(category) != 0)
  802. return SUCCESS;
  803. inactive_categories.insert(category);
  804. dat_state_ = UPDATED;
  805. for (auto &file : dictionary_) {
  806. auto file_id = file.first;
  807. if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
  808. *file.second = *orig_dict_[file_id];
  809. pending_dictionary_.insert(file_id);
  810. }
  811. }
  812. LOG(INFO) << "Category " << category << " disabled successfully";
  813. return SUCCESS;
  814. }
  815. const std::set<long long> &DatFile::GetInactiveCategoriesList() {
  816. return inactive_categories;
  817. }
  818. const std::string &DatFile::filename() const {
  819. return filename_;
  820. }
  821. void DatFile::AddBufferedSize() {
  822. if (free_buffered_size_ >= MIN_BUFFERED_SIZE)
  823. return;
  824. BinaryData nulls(MAX_BUFFERED_SIZE);
  825. io.WriteData(nulls, MAX_BUFFERED_SIZE, io.file_size);
  826. free_buffered_size_ = MAX_BUFFERED_SIZE;
  827. }
  828. //------------------------------------------------//
  829. // BACKUP SECTION
  830. //------------------------------------------------//
  831. bool DatFile::CheckIfBackupExists(const std::string &backup_datname) {
  832. std::ifstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  833. return !dst.fail();
  834. }
  835. DAT_RESULT DatFile::RemoveBackup(const std::string &backup_datname) {
  836. if (!CheckIfBackupExists(backup_datname))
  837. return SUCCESS;
  838. if (remove(("DAT_LIBRARY_BACKUP/" + backup_datname).c_str()) == 0)
  839. return SUCCESS;
  840. return REMOVE_FILE_ERROR;
  841. }
  842. DAT_RESULT DatFile::CreateBackup(const std::string &backup_datname) {
  843. auto filename = filename_;
  844. auto dat_id = dat_id_;
  845. LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
  846. LOG(INFO) << " Closing DatFile...";
  847. CloseDatFile();
  848. LOG(INFO) << " Copying " << filename << " to " << backup_datname;
  849. mkdir("DAT_LIBRARY_BACKUP");
  850. std::ifstream src(filename, std::ios::binary);
  851. std::ofstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  852. std::istreambuf_iterator<char> begin_source(src);
  853. std::istreambuf_iterator<char> end_source;
  854. std::ostreambuf_iterator<char> begin_dest(dst);
  855. std::copy(begin_source, end_source, begin_dest);
  856. src.close();
  857. dst.close();
  858. LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
  859. InitDatFile(filename, dat_id);
  860. LOG(INFO) << "Restoring .dat file success!";
  861. return SUCCESS;
  862. }
  863. DAT_RESULT DatFile::RestoreFromBackup(const std::string &backup_datname) {
  864. auto filename = filename_;
  865. auto dat_id = dat_id_;
  866. LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
  867. LOG(INFO) << " Closing DatFile...";
  868. CloseDatFile();
  869. LOG(INFO) << " Copying " << filename << " to " << backup_datname;
  870. mkdir("DAT_LIBRARY_BACKUP");
  871. std::ifstream src("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  872. std::ofstream dst(filename, std::ios::binary);
  873. if (src.fail()) {
  874. LOG(ERROR) << "CANNOT RESTORE FILE FROM BACKUP - no backup specified with name " << backup_datname;
  875. return NO_BACKUP_ERROR;
  876. }
  877. std::istreambuf_iterator<char> begin_source(src);
  878. std::istreambuf_iterator<char> end_source;
  879. std::ostreambuf_iterator<char> begin_dest(dst);
  880. std::copy(begin_source, end_source, begin_dest);
  881. src.close();
  882. dst.close();
  883. LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
  884. InitDatFile(filename, dat_id);
  885. LOG(INFO) << "Restoring .dat file success!";
  886. return SUCCESS;
  887. }
  888. }
  889. }