DatFile.cpp 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. //
  2. // Created by Иван_Архипов on 31.10.2017.
  3. //
  4. #define NOMINMAX
  5. #include "DatFile.h"
  6. #include "BinaryData.h"
  7. #include "SubDirectory.h"
  8. #include "SubFile.h"
  9. #include "SubfileData.h"
  10. #include "DatOperationResult.h"
  11. #include <EasyLogging++/easylogging++.h>
  12. //#include <unistd.h>
  13. #include <algorithm>
  14. #include <iterator>
  15. #include <locale>
  16. #define ELPP_FEATURE_CRASH_LOG
  17. INITIALIZE_EASYLOGGINGPP
  18. #ifdef WIN32
  19. #define fseek _fseeki64
  20. #define ftell _ftelli64
  21. #endif
  22. extern "C++"
  23. {
  24. namespace LOTRO_DAT {
  25. //------------------------------------------------//
  26. // INIT SECTION
  27. //------------------------------------------------//
  28. DatFile::DatFile() : io(*this) {
  29. dat_state_ = CLOSED;
  30. free_buffered_size_ = 0;
  31. orig_dict_.clear();
  32. patch_dict_.clear();
  33. dictionary_.clear();
  34. el::Configurations defaultConf;
  35. defaultConf.setToDefault();
  36. defaultConf.setGlobally(el::ConfigurationType::Format,
  37. "%datetime %level %fbase (line %line) : %msg (function: %func)");
  38. defaultConf.setGlobally(el::ConfigurationType::ToFile, "true");
  39. defaultConf.setGlobally(el::ConfigurationType::Filename, "dat_library.log");
  40. defaultConf.setGlobally(el::ConfigurationType::ToStandardOutput, "false");
  41. defaultConf.setGlobally(el::ConfigurationType::PerformanceTracking, "true");
  42. defaultConf.setGlobally(el::ConfigurationType::MaxLogFileSize, "5242880"); // 5MB
  43. defaultConf.setGlobally(el::ConfigurationType::LogFlushThreshold, "1"); // Flush after every one log
  44. defaultConf.set(el::Level::Debug, el::ConfigurationType::Enabled, "false");
  45. defaultConf.set(el::Level::Debug, el::ConfigurationType::Filename, "dat_library_debug.log");
  46. el::Loggers::reconfigureAllLoggers(defaultConf);
  47. LOG(INFO) << "==================================================================";
  48. LOG(INFO) << "Starting new DatFile class instance";
  49. }
  50. DAT_RESULT DatFile::InitDatFile(const std::string &filename, int dat_id) {
  51. LOG(DEBUG) << "Started initialisation of DatFile " << filename;
  52. if (dat_state_ != CLOSED && filename == filename_) {
  53. LOG(DEBUG) << "Trying to reopen the same file: " << filename << ". Doing nothing.";
  54. return SUCCESS;
  55. }
  56. if (dat_state_ != CLOSED && filename != filename_) {
  57. LOG(DEBUG) << "DatFile wasn't closed yet. Closing in order to reopen.";
  58. if (CloseDatFile() != SUCCESS) {
  59. LOG(ERROR) << "Unable to perform CloseDatFile()! Aborting initialization!";
  60. return FAILED;
  61. }
  62. }
  63. dat_id_ = dat_id;
  64. dat_state_ = CLOSED;
  65. current_locale_ = ORIGINAL;
  66. free_buffered_size_ = 0;
  67. filename_ = "none";
  68. DAT_RESULT result;
  69. DAT_RESULT return_value = SUCCESS;
  70. auto res = io.Init(filename);
  71. if (res.result == DatOperationResult::ERROR) {
  72. LOG(ERROR) << "ERROR! Unable to initialize input-output!";
  73. CloseDatFile();
  74. return NO_FILE_ERROR;
  75. }
  76. dat_state_ = SUCCESS_DIRECTORIES;
  77. LOG(INFO) << "Starting MakeDictionary";
  78. result = MakeDictionary();
  79. if (result <= 0) {
  80. LOG(ERROR) << "Unable to make dictionary. Aborting.";
  81. CloseDatFile();
  82. return result;
  83. }
  84. return_value = std::max(return_value, result);
  85. LOG(INFO) << "Starting InitLocales";
  86. result = InitLocales();
  87. if (result <= 0) {
  88. LOG(ERROR) << "Unable to initialize locales. Aborting.";
  89. CloseDatFile();
  90. return result;
  91. }
  92. return_value = std::max(return_value, result);
  93. LOG(INFO) << "File " << filename << " opened successfully!";
  94. filename_ = filename;
  95. dat_state_ = READY;
  96. LOG(INFO) << "Making last preparations...";
  97. return_value = std::max(return_value, result);
  98. PerformDictionaryCheck();
  99. if (return_value >= 2) {
  100. LOG(WARNING) << "Dat file could be corrupted. Trying to delete corrupted dictionary rows";
  101. if (RepairDatFile() != SUCCESS)
  102. return CRITICAL_DAT_ERROR;
  103. }
  104. if (CheckIfUpdatedByGame()) {
  105. LOG(INFO) << ".dat file was updated by game! Need to reinitialize files and directories!";
  106. CloseDatFile();
  107. InitDatFile(filename, dat_id);
  108. }
  109. std::cout << "Visited subdirs: " << SubDirectory::visited_subdirectories_.size() << std::endl;
  110. std::cout << "Visited files: " << SubDirectory::visited_subfiles_.size() << std::endl;
  111. dat_without_patches_ = CheckIfNotPatched();
  112. LOG(INFO) << "Preparations made successfully! Init return value = " << return_value;
  113. return return_value;
  114. }
  115. DAT_RESULT DatFile::MakeDictionary() {
  116. LOG(DEBUG) << "Started making dictionary";
  117. if (dat_state_ != SUCCESS_DIRECTORIES) {
  118. LOG(ERROR) << "Dat state isn't SUCCESS_DIRECTORIES. Cannot make directories.";
  119. return INCORRECT_STATE_ERROR;
  120. }
  121. if (io.GetRootDirectory() == nullptr) {
  122. LOG(ERROR) << "root_directory is nullptr!!";
  123. return INIT_ERROR;
  124. }
  125. io.GetRootDirectory()->MakeDictionary(dictionary_);
  126. dat_state_ = SUCCESS_DICTIONARY;
  127. LOG(DEBUG) << "Dictionary made successfull";
  128. return SUCCESS;
  129. }
  130. DAT_RESULT DatFile::InitLocales() {
  131. LOG(INFO) << "Initialising locales...";
  132. BinaryData dicts_data(4);
  133. io.ReadData(dicts_data, 4, 300);
  134. long long dict_offset = dicts_data.ToNumber<4>(0);
  135. if (dict_offset == 0 || dict_offset + 8 >= io.GetActualDatSize()) {
  136. LOG(INFO) << "Dictionary offset is empty or incorrect. Passing.";
  137. return SUCCESS;
  138. }
  139. io.ReadData(dicts_data, 4, dict_offset);
  140. long long dict_size = dicts_data.ToNumber<4>(0);
  141. io.ReadData(dicts_data, 4, dict_offset + 4);
  142. long long dict_version = dicts_data.ToNumber<4>(0);
  143. io.ReadData(dicts_data, 4, dict_offset + 8);
  144. io.file_size = dicts_data.ToNumber<4>(0);
  145. LOG(INFO) << "Dictionary size is " << dict_size << ". Version is " << dict_version << ". Localed .dat size = "
  146. << io.file_size;
  147. if (dict_version != 101) {
  148. LOG(WARNING) << "DICTIONARY IS OLD!!!";
  149. orig_dict_.clear();
  150. patch_dict_.clear();
  151. io.WriteData(BinaryData::FromNumber<4>(0), 4, 300);
  152. dat_state_ = UPDATED;
  153. dat_without_patches_ = true;
  154. return SUCCESS;
  155. }
  156. dicts_data = BinaryData((unsigned)dict_size);
  157. io.ReadData(dicts_data, dict_size, dict_offset + 12);
  158. if (dicts_data.size() < 15) {
  159. LOG(ERROR) << "Incorrect dictionary. Passing without it.";
  160. orig_dict_.clear();
  161. patch_dict_.clear();
  162. io.WriteData(BinaryData::FromNumber<4>(0), 4, 300);
  163. dat_state_ = UPDATED;
  164. dat_without_patches_ = true;
  165. return SUCCESS;
  166. }
  167. BinaryData hi_data = dicts_data.CutData(0, 15) + BinaryData("\0", 1);
  168. std::string hi = std::string((char *) (hi_data.data()));
  169. LOG(DEBUG) << "hi info is " << hi;
  170. if (hi != "Hi from Gi1dor!") {
  171. LOG(WARNING) << "Didn't receive 'hi' from Gi1dor... Initialising locale dicts as empty";
  172. LOG(INFO) << "Could't init locales' file... Continuing without them";
  173. return SUCCESS;
  174. }
  175. int offset = 15;
  176. BinaryData current_locale_data = dicts_data.CutData(offset, offset + 4) + BinaryData("\0", 1);
  177. std::string locale((char *) (current_locale_data.data()));
  178. offset += 4;
  179. LOG(DEBUG) << "current locale:" << locale;
  180. if (locale != "PATC" && locale != "ORIG") {
  181. LOG(WARNING) << "Incorrect locale... Initialising locale dicts as empty";
  182. LOG(INFO) << "Could't recognize locale... Continuing without locales";
  183. return SUCCESS;
  184. }
  185. current_locale_ = (locale == "PATC" ? PATCHED : ORIGINAL);
  186. // 15 bytes for "Hi from Gi1dor"
  187. // 4 bytes for LOCALE
  188. // 4 bytes for orig_dict.size()
  189. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  190. // 4 bytes for patch_dict.size()
  191. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  192. // 4 bytes for inactive_categories dict
  193. // 4 * inactive_categories.size() bytes for inactive_categories data
  194. size_t orig_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  195. offset += 4;
  196. for (size_t i = 0; i < orig_dict_size; i++) {
  197. auto file = std::make_shared<SubFile>(*this, dicts_data.CutData(offset, offset + 32));
  198. orig_dict_[file->file_id()] = file;
  199. offset += 32;
  200. orig_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  201. offset += 4;
  202. if (orig_dict_[file->file_id()]->category == 0)
  203. LOG(DEBUG) << "file category is undefined (0)!";
  204. }
  205. size_t patch_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  206. offset += 4;
  207. for (size_t i = 0; i < patch_dict_size; i++) {
  208. auto file = std::make_shared<SubFile>(*this, dicts_data.CutData(offset, offset + 32));
  209. patch_dict_[file->file_id()] = file;
  210. offset += 32;
  211. patch_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  212. offset += 4;
  213. if (patch_dict_[file->file_id()]->category == 0)
  214. LOG(DEBUG) << "file category is undefined (0)!";
  215. }
  216. size_t active_patches_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  217. offset += 4;
  218. for (size_t i = 0; i < active_patches_dict_size; i++) {
  219. inactive_categories.insert(dicts_data.ToNumber<4>(offset));
  220. offset += 4;
  221. }
  222. LOG(INFO) << "There are " << patch_dict_.size() << " files in patch locale dictionary";
  223. LOG(INFO) << "There are " << orig_dict_.size() << " files in original locale dictionary";
  224. std::string inactive_cat_s;
  225. for (auto i : inactive_categories) {
  226. inactive_cat_s += std::to_string(i) + " ";
  227. }
  228. LOG(INFO) << "Unactive patches now: " << inactive_cat_s;
  229. LOG(INFO) << "Finished initialising locales";
  230. return SUCCESS;
  231. }
  232. DAT_RESULT DatFile::PerformDictionaryCheck() {
  233. for (const auto& mpair : dictionary_) {
  234. auto file = mpair.second;
  235. auto file_id = mpair.first;
  236. if (CorrectSubfile(file))
  237. continue;
  238. if (current_locale_ == PATCHED && orig_dict_.count(file_id) > 0) {
  239. LOG(WARNING) << "Potential incorrect patched version of file " << file_id << ". Switching to original.";
  240. dictionary_[file_id] = orig_dict_[file_id];
  241. }
  242. if (!CorrectSubfile(file)) {
  243. LOG(ERROR) << "Incorrect file " << file_id << ". It's offset is said as " << file->file_offset()
  244. << ". Erasing it from dictionary.";
  245. dictionary_.erase(file_id);
  246. }
  247. }
  248. return SUCCESS;
  249. }
  250. //------------------------------------------------//
  251. // CLOSE SECTION
  252. //------------------------------------------------//
  253. DatFile::~DatFile() {
  254. CloseDatFile();
  255. }
  256. DAT_RESULT DatFile::CloseDatFile() {
  257. LOG(INFO) << "Closing DatFile";
  258. if (dat_state_ == CLOSED) {
  259. LOG(INFO) << "DatFile is already closed. Nothing to do";
  260. return SUCCESS;
  261. }
  262. // Committing changes and updating/writing locales and header info
  263. io.DeInit();
  264. if (!pending_dictionary_.empty() || dat_state_ == UPDATED) {
  265. CommitLocales();
  266. CommitDirectories();
  267. //ModifyFragmentationJournal();
  268. //free_dat_size = 128248;
  269. //fragmentation_journal_end = 0;
  270. //fragmentation_journal_size = 1;
  271. //UpdateHeader();
  272. }
  273. current_locale_ = ORIGINAL;
  274. SubDirectory::visited_subdirectories_.clear();
  275. free_buffered_size_ = 0;
  276. filename_ = "none";
  277. orig_dict_.clear();
  278. patch_dict_.clear();
  279. pending_patch_.clear();
  280. inactive_categories.clear();
  281. pending_dictionary_.clear();
  282. dictionary_.clear();
  283. dat_state_ = CLOSED;
  284. dat_id_ = -1;
  285. LOG(INFO) << "File closed successfully.";
  286. return SUCCESS;
  287. }
  288. DAT_RESULT DatFile::CommitLocales() {
  289. LOG(INFO) << "Committing locales...";
  290. // 15 bytes for "Hi from Gi1dor"
  291. // 4 bytes for LOCALE
  292. // 4 bytes for orig_dict.size()
  293. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  294. // 4 bytes for patch_dict.size()
  295. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  296. // 4 bytes for inactive_categories list
  297. // 4 * inactive_categories.size() bytes for inactive_categories data
  298. BinaryData binary_data = BinaryData(14 + 15 + 4
  299. + 4 + (32 + 4) * orig_dict_.size()
  300. + 4 + (32 + 4) * patch_dict_.size()
  301. + 4 + 4 * inactive_categories.size());
  302. size_t current_size = 0;
  303. binary_data.Append(BinaryData("Hi from Gi1dor!", 15), current_size);
  304. current_size += 15;
  305. binary_data.Append(BinaryData((current_locale_ == ORIGINAL ? "ORIG" : "PATC"), 4), current_size);
  306. current_size += 4;
  307. binary_data.Append(BinaryData::FromNumber<4>(orig_dict_.size()), current_size);
  308. current_size += 4;
  309. for (const auto &file : orig_dict_) {
  310. binary_data.Append(file.second->MakeHeaderData(), current_size);
  311. current_size += 32;
  312. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  313. current_size += 4;
  314. }
  315. binary_data.Append(BinaryData::FromNumber<4>(patch_dict_.size()), current_size);
  316. current_size += 4;
  317. for (const auto &file : patch_dict_) {
  318. binary_data.Append(file.second->MakeHeaderData(), current_size);
  319. current_size += 32;
  320. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  321. current_size += 4;
  322. }
  323. binary_data.Append(BinaryData::FromNumber<4>(inactive_categories.size()), current_size);
  324. current_size += 4;
  325. for (auto patch_id : inactive_categories) {
  326. binary_data.Append(BinaryData::FromNumber<4>(patch_id), current_size);
  327. current_size += 4;
  328. }
  329. BinaryData dicts_data(4);
  330. io.ReadData(dicts_data, 4, 300);
  331. long long dict_offset = dicts_data.ToNumber<4>(0);
  332. io.ReadData(dicts_data, 4, dict_offset);
  333. long long dict_size = dicts_data.ToNumber<4>(0);
  334. if (binary_data.size() > dict_size || dict_offset == 0) {
  335. io.WriteData(BinaryData::FromNumber<4>(io.file_size), 4, 300);
  336. io.WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size() + 4, 20u * 1024u * 1024u)), 4,
  337. io.file_size);
  338. io.WriteData(BinaryData::FromNumber<4>(101), 4, io.file_size + 4);
  339. io.WriteData(BinaryData::FromNumber<4>(io.file_size + binary_data.size() + 12 + 20 * 1024 * 1024), 4,
  340. io.file_size + 8); // Writing current file size;
  341. io.WriteData(binary_data, binary_data.size(), io.file_size + 12);
  342. io.file_size += binary_data.size() + 12;
  343. // Adding space for 25 megabytes locales file in total.
  344. BinaryData nulls(unsigned(20 * 1024 * 1024));
  345. io.WriteData(nulls, nulls.size(), io.file_size);
  346. io.file_size += nulls.size();
  347. } else {
  348. io.WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, dict_offset);
  349. io.WriteData(BinaryData::FromNumber<4>(101), 4, dict_offset + 4);
  350. io.WriteData(BinaryData::FromNumber<4>(io.file_size), 4, dict_offset + 8); // Writing current file size;
  351. io.WriteData(binary_data, binary_data.size(), dict_offset + 12);
  352. }
  353. LOG(INFO) << "Locales commited successfully";
  354. return SUCCESS;
  355. }
  356. DAT_RESULT DatFile::CommitDirectories() {
  357. for (auto file_id : pending_dictionary_) {
  358. if (dictionary_[file_id] == nullptr || !CorrectSubfile(dictionary_[file_id]))
  359. continue;
  360. io.WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
  361. }
  362. pending_dictionary_.clear();
  363. return SUCCESS;
  364. }
  365. DAT_RESULT DatFile::ModifyFragmentationJournal() {
  366. if (io.fragmentation_journal_size == 0)
  367. return SUCCESS;
  368. LOG(DEBUG) << "Modifying fragmentation journal";
  369. BinaryData data(4);
  370. io.ReadData(data, 4, io.fragmentation_journal_offset + 8 * io.fragmentation_journal_size);
  371. LOG(INFO) << "FREE_SIZE BLOCK = " << data.ToNumber<4>(0);
  372. long long free_size = data.ToNumber<4>(0);
  373. long long free_offset = io.file_size;
  374. BinaryData nulldata = BinaryData(unsigned(free_size));
  375. io.WriteData(nulldata, nulldata.size(), io.file_size);
  376. io.file_size += nulldata.size();
  377. io.WriteData(BinaryData::FromNumber<4>(free_size), 4,
  378. io.fragmentation_journal_offset + 8 * io.fragmentation_journal_size);
  379. io.WriteData(BinaryData::FromNumber<4>(free_offset), 4,
  380. io.fragmentation_journal_offset + 8 * io.fragmentation_journal_size + 4);
  381. //nulldata = BinaryData(8);
  382. //io.WriteData(nulldata, nulldata.size(), fragmentation_journal_offset + 16);
  383. LOG(DEBUG) << "Finished modifying fragmentation journal";
  384. return SUCCESS;
  385. }
  386. DAT_RESULT DatFile::RepairDatFile() {
  387. for (const auto& file : dictionary_) {
  388. auto subfile = file.second;
  389. auto file_id = file.first;
  390. if (CorrectSubfile(subfile))
  391. continue;
  392. if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
  393. return CRITICAL_DAT_ERROR;
  394. *dictionary_[file_id] = *orig_dict_[file_id];
  395. patch_dict_.erase(file_id);
  396. orig_dict_.erase(file_id);
  397. }
  398. return SUCCESS;
  399. }
  400. //------------------------------------------------//
  401. // DAT INFO SECTION
  402. //------------------------------------------------//
  403. DAT_STATE DatFile::DatFileState() const {
  404. return dat_state_;
  405. }
  406. long long DatFile::files_number() const {
  407. return dictionary_.size();
  408. }
  409. //------------------------------------------------//
  410. // EXTRACT SECTION
  411. //------------------------------------------------//
  412. DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
  413. LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
  414. if (dat_state_ < READY) {
  415. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  416. return INCORRECT_STATE_ERROR;
  417. }
  418. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  419. if (file_data.size() == 0) {
  420. LOG(ERROR) << "File data is empty. Aborting extraction.";
  421. return NO_FILE_ERROR;
  422. }
  423. SubfileData export_data = dictionary_[file_id]->PrepareForExport(file_data);
  424. if (export_data.Empty()) {
  425. LOG(ERROR) << "Export data is empty. Aborting extraction.";
  426. return NO_FILE_ERROR;
  427. }
  428. if (export_data.binary_data.WriteToFile(path + export_data.options["ext"].as<std::string>()) != SUCCESS) {
  429. LOG(ERROR) << "Cannot write to file.";
  430. return WRITE_TO_FILE_ERROR;
  431. }
  432. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  433. return SUCCESS;
  434. }
  435. DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
  436. LOG(DEBUG) << "Extracting file " << file_id << " to database.";
  437. if (dat_state_ < READY) {
  438. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  439. return INCORRECT_STATE_ERROR;
  440. }
  441. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  442. if (file_data.Empty()) {
  443. LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty. Passing it.";
  444. return SUCCESS;
  445. }
  446. SubfileData export_data;
  447. export_data = dictionary_[file_id]->PrepareForExport(file_data);
  448. export_data.options["did"] = dat_id_;
  449. if (export_data == SubfileData()) {
  450. LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty or incorrect.";
  451. return SUCCESS;
  452. }
  453. try {
  454. db->PushFile(export_data);
  455. } catch (std::exception &e) {
  456. LOG(ERROR) << "Caught " << e.what() << " exception.";
  457. return FAILED;
  458. }
  459. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  460. return SUCCESS;
  461. }
  462. int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
  463. LOG(INFO) << "Extracting all files to path " << path;
  464. if (dat_state_ < READY) {
  465. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  466. return INCORRECT_STATE_ERROR;
  467. }
  468. int success = 0;
  469. for (const auto& i : dictionary_) {
  470. FILE_TYPE file_type = i.second->FileType();
  471. if (file_type == type) {
  472. success += (ExtractFile(i.second->file_id(), (path + std::to_string(i.second->file_id()))) == SUCCESS
  473. ? 1 : 0);
  474. }
  475. }
  476. LOG(INFO) << "Successfully extracted " << success << " files";
  477. return success;
  478. }
  479. int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
  480. LOG(INFO) << "Extracting all files to database...";
  481. if (dat_state_ < READY) {
  482. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  483. return INCORRECT_STATE_ERROR;
  484. }
  485. int success = 0;
  486. for (const auto& i : dictionary_) {
  487. FILE_TYPE file_type = i.second->FileType();
  488. if (file_type == type) {
  489. success += (ExtractFile(i.second->file_id(), db) == SUCCESS ? 1 : 0);
  490. }
  491. }
  492. LOG(INFO) << "Extracted " << success << " files";
  493. return success;
  494. }
  495. //------------------------------------------------//
  496. // PATCH SECTION
  497. //------------------------------------------------//
  498. DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
  499. LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
  500. if (!dat_without_patches_) {
  501. io.file_size = io.GetActualDatSize();
  502. }
  503. if (dat_state_ < READY) {
  504. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  505. return INCORRECT_STATE_ERROR;
  506. }
  507. auto file_id = data.options["fid"].as<long long>();
  508. if (dictionary_.count(file_id) == 0) {
  509. LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
  510. return NO_FILE_ERROR;
  511. }
  512. std::shared_ptr<SubFile> file = dictionary_[file_id];
  513. // if (!CorrectSubfile(file)) {
  514. // if (current_locale_ == PATCHED && patch_dict_.count(file_id) > 0) {
  515. // LOG(WARNING) << "Patched subfile header with id = " << file->file_id() << " differs from original version...";
  516. // } else {
  517. // LOG(ERROR) << "Incorrect subfile with id " << file->file_id()
  518. // << " (headers do not match). Cannot patch it";
  519. // return FAILED;
  520. // }
  521. // }
  522. // If file has inactive category, then we should set it to patched state in order to commit patch and
  523. // then in ApplyFilePatch(), if new category is still inactive, return dictionary to its original state;
  524. if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0 && file_id != 2013266257) {
  525. *dictionary_[file_id] = *patch_dict_[file_id];
  526. }
  527. if (data.options["cat"].IsDefined()) {
  528. file->category = data.options["cat"].as<long long>();
  529. } else {
  530. file->category = 1;
  531. }
  532. BinaryData old_data = GetFileData(orig_dict_.count(file->file_id()) == 0 ? file : orig_dict_[file->file_id_]);
  533. if (old_data.Empty()) {
  534. LOG(ERROR) << "GetFileData returned empty data. Aborting.";
  535. return DAT_PATCH_FILE_ERROR;
  536. }
  537. BinaryData patch_data = file->MakeForImport(old_data, data);
  538. DAT_RESULT result = ApplyFilePatch(file, patch_data);
  539. if (result != SUCCESS)
  540. return result;
  541. LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>() << ".";
  542. return SUCCESS;
  543. }
  544. DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
  545. LOG(INFO) << "Patching all database";
  546. if (dat_state_ < READY) {
  547. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  548. return INCORRECT_STATE_ERROR;
  549. }
  550. SubfileData data;
  551. data = db->GetNextFile();
  552. while (!data.Empty()) {
  553. DAT_RESULT result = PatchFile(data);
  554. if (result != SUCCESS)
  555. LOG(ERROR) << "Cannot patch file " << data.options["fid"].as<long long>() << " continuing";
  556. data = db->GetNextFile();
  557. }
  558. LOG(INFO) << "Successfully patched whole database";
  559. return SUCCESS;
  560. }
  561. DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
  562. LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
  563. FILE *f = nullptr;
  564. fopen_s(&f, (path + "dict.txt").c_str(), "w");
  565. if (f == nullptr) {
  566. LOG(ERROR) << "Cannot open file " << path + "dict.txt";
  567. return WRITE_TO_FILE_ERROR;
  568. }
  569. fprintf(f, "unk1 file_id offset size1 timestamp version size2 unknown2 type\n");
  570. for (const auto& i : dictionary_) {
  571. fprintf(f, "%lld %lld %lld %lld %lld %lld %lld %lld %s\n", i.second->unknown1(), i.second->file_id(),
  572. i.second->file_offset(), i.second->file_size(), i.second->timestamp(), i.second->version(),
  573. i.second->block_size(), i.second->unknown2(), i.second->Extension().c_str());
  574. }
  575. fclose(f);
  576. LOG(INFO) << "Unordered dictionary was written successfully to " << path << "dict.txt";
  577. return SUCCESS;
  578. }
  579. DAT_RESULT DatFile::ApplyFilePatch(std::shared_ptr<SubFile> file, BinaryData &data) {
  580. LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
  581. // if (patch_dict_.size() == 0 && pending_dictionary_.size() == 0) {
  582. // BinaryData nulls(50 * 1024 * 1024);
  583. // io.WriteData(nulls, nulls.size(), file_size);
  584. // file_size += 50 * 1024 * 1024;
  585. // }
  586. if (data.Empty()) {
  587. LOG(ERROR) << "Error caused during making file for import. Cannot patch file " << file->file_id();
  588. return FAILED;
  589. }
  590. auto file_id = file->file_id();
  591. if (current_locale() != PATCHED && file_id != 2013266257) {
  592. LOG(INFO) << "Changing locale to PATCHED(RU) in order to patch file";
  593. SetLocale(PATCHED);
  594. }
  595. dat_state_ = UPDATED;
  596. if (orig_dict_.count(file_id) == 0 && file_id != 2013266257) {
  597. orig_dict_[file_id] = std::make_shared<SubFile>(*this, file->MakeHeaderData());
  598. }
  599. if ((patch_dict_.count(file_id) == 0 && file_id != 2013266257) || data.size() > file->block_size()
  600. || file->file_size() + 8 > file->block_size()) {
  601. file->file_offset_ = io.file_size;
  602. file->block_size_ = std::max((long long)data.size(), file->block_size_);
  603. free_buffered_size_ = std::max(0ll, free_buffered_size_ - file->block_size_ - 8);
  604. AddBufferedSize();
  605. io.file_size += file->block_size_ + 8;
  606. }
  607. file->file_size_ = data.size() - 8;
  608. data.Append(BinaryData::FromNumber<4>(0), 0); // set additional fragments count to zero
  609. if (file_id != data.ToNumber<4>(8)) {
  610. LOG(ERROR) << "Created data's file_id doesn't match to original! Patch wasn't written to .dat file";
  611. return INCORRECT_PATCH_FILE;
  612. }
  613. //data.ProtectData();
  614. //BinaryData data1(data.size());
  615. io.WriteData(data, data.size(), file->file_offset());
  616. //data.DeprotectData();
  617. patch_dict_.erase(file_id); // Удалили старое значение в русском словаре
  618. if (file_id != 2013266257) {
  619. patch_dict_[file_id] = std::make_shared<SubFile>(*this, file->MakeHeaderData()); // Создали новое значение
  620. }
  621. // If category is forbidden, then return file header data to original state
  622. if (inactive_categories.count(file->category) != 0) {
  623. file->file_offset_ = orig_dict_[file_id]->file_offset_;
  624. file->file_size_ = orig_dict_[file_id]->file_size_;
  625. file->block_size_ = orig_dict_[file_id]->block_size_;
  626. file->timestamp_ = orig_dict_[file_id]->timestamp_;
  627. file->version_ = orig_dict_[file_id]->version_;
  628. }
  629. if (orig_dict_.count(file_id) != 0 && file_id != 2013266257)
  630. orig_dict_[file_id]->category = file->category;
  631. if (patch_dict_.count(file_id) != 0 && file_id != 2013266257)
  632. patch_dict_[file_id]->category = file->category;
  633. // Applying file info in directory
  634. pending_dictionary_.insert(file_id);
  635. LOG(DEBUG) << "Successfully applied file " << file->file_id() << " patch.";
  636. return SUCCESS;
  637. }
  638. //------------------------------------------------//
  639. // INPUT-OUTPUT SECTION
  640. //------------------------------------------------//
  641. BinaryData DatFile::GetFileData(const std::shared_ptr<SubFile>& file, long long int offset) {
  642. LOG(DEBUG) << "Getting file " << file->file_id() << " data";
  643. BinaryData mfile_id(20);
  644. io.ReadData(mfile_id, 20, file->file_offset() + 8);
  645. if (mfile_id.Empty()) {
  646. LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
  647. << file->file_offset() << "); Aborting.";
  648. return BinaryData(0);
  649. }
  650. if (!mfile_id.CheckCompression() && file->file_id() != mfile_id.ToNumber<4>(0)) {
  651. LOG(ERROR) << "Bad DatFile::GetFileData() - file_id in SubFile ("
  652. << file->file_id()
  653. << ") doesn't match to file_id (" << mfile_id.ToNumber<4>(0) << ")in DatFile.";
  654. return BinaryData(0);
  655. }
  656. BinaryData data((unsigned)(file->file_size() + (8 - offset)));
  657. if (file->block_size() >= file->file_size() + 8) {
  658. io.ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
  659. return data;
  660. }
  661. BinaryData fragments_count(4);
  662. io.ReadData(fragments_count, 4, file->file_offset());
  663. long long fragments_number = fragments_count.ToNumber<4>(0);
  664. long long current_block_size = file->block_size() - offset - 8 * fragments_number;
  665. io.ReadData(data, current_block_size, file->file_offset() + offset);
  666. BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
  667. io.ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
  668. file->file_offset() + file->block_size() - 8 * fragments_number);
  669. for (long long i = 0; i < fragments_number; i++) {
  670. long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
  671. long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
  672. io.ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
  673. current_block_size);
  674. current_block_size += fragment_size;
  675. }
  676. LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
  677. return data;
  678. }
  679. //------------------------------------------------//
  680. // LOCALE SECTION
  681. //------------------------------------------------//
  682. DAT_RESULT DatFile::SetLocale(LOCALE locale) {
  683. LOG(INFO) << "Setting locale to " << (locale == PATCHED ? " PATCHED" : " ORIGINAL");
  684. if (dat_state_ < READY) {
  685. LOG(ERROR) << "Dat state isn't READY. Cannot set locale.";
  686. return INCORRECT_STATE_ERROR;
  687. }
  688. if (current_locale_ == locale) {
  689. return SUCCESS;
  690. }
  691. dat_state_ = UPDATED;
  692. auto dict = GetLocaleDictReference(locale);
  693. for (const auto& file : dict) {
  694. if (file.second == nullptr)
  695. continue;
  696. if (dictionary_.count(file.first) == 0) {
  697. LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
  698. << "which is not in .dat file! Passing it and removing from locale dictionary";
  699. dict.erase(file.first);
  700. continue;
  701. }
  702. if (dictionary_[file.first]->MakeHeaderData().CutData(8, 16) ==
  703. file.second->MakeHeaderData().CutData(8, 16) ||
  704. inactive_categories.count(orig_dict_[file.first]->category) != 0)
  705. continue;
  706. long long file_id = file.first;
  707. std::shared_ptr<SubFile> new_file = file.second;
  708. *dictionary_[file_id] = *new_file;
  709. pending_dictionary_.insert(file_id);
  710. dat_state_ = UPDATED;
  711. }
  712. current_locale_ = locale;
  713. LOG(INFO) << "Locale set successfull";
  714. return SUCCESS;
  715. }
  716. LOCALE DatFile::current_locale() {
  717. if (dat_state_ < READY) {
  718. LOG(ERROR) << "dat_file is in incorrect state!";
  719. return ORIGINAL;
  720. }
  721. if (current_locale_ != PATCHED && current_locale_ != ORIGINAL) {
  722. LOG(ERROR) << "locale has incorrect value. Setting it to original";
  723. current_locale_ = ORIGINAL;
  724. }
  725. return current_locale_;
  726. }
  727. std::map<long long, std::shared_ptr<SubFile> > &DatFile::GetLocaleDictReference(LOCALE locale) {
  728. switch (locale) {
  729. case PATCHED:
  730. return patch_dict_;
  731. case ORIGINAL:
  732. return orig_dict_;
  733. default:
  734. LOG(ERROR) << "Unknown locale! Returning original";
  735. return orig_dict_;
  736. }
  737. }
  738. //------------------------------------------------//
  739. // CHECKERS SECTION
  740. //------------------------------------------------//
  741. bool DatFile::CorrectSubfile(std::shared_ptr<SubFile> file) {
  742. BinaryData mfile_id(20);
  743. io.ReadData(mfile_id, 20, file->file_offset() + 8);
  744. if (mfile_id.Empty())
  745. return false;
  746. return (mfile_id.CheckCompression() || file->file_id() == mfile_id.ToNumber<4>(0)) && file->file_size() < 50ll * 1024ll * 1024ll;
  747. }
  748. bool DatFile::CheckIfUpdatedByGame() {
  749. // LOG(INFO) << "Checking if DatFile was updated by LotRO";
  750. // if (!pending_patch_.empty())
  751. // return true;
  752. // bool updated = false;
  753. // for (const auto& i : dictionary_) {
  754. // long long file_id = i.first;
  755. // std::shared_ptr<SubFile> subfile = i.second;
  756. // if (patch_dict_.count(file_id) == 0)
  757. // continue;
  758. //
  759. // if (*subfile != *patch_dict_[file_id] && *subfile != *orig_dict_[file_id]) {
  760. // //orig_dict_.clear();
  761. // //patch_dict_.clear();
  762. // LOG(INFO) << "DAT FILE WAS UPDATED!!!! CLEARING PATCH DATA";
  763. // pending_patch_.insert(file_id);
  764. // io.WriteData(BinaryData::FromNumber<4>(0), 4, 300);
  765. // return true;
  766. // }
  767. // }
  768. // return updated;
  769. return false;
  770. }
  771. bool DatFile::CheckIfNotPatched() {
  772. LOG(INFO) << "DatFile " << (patch_dict_.empty() ? "HASN'T " : "HAS already")
  773. << " been patched by LEGACY launcher!";
  774. return patch_dict_.empty();
  775. }
  776. bool DatFile::CheckIfPatchedByOldLauncher() {
  777. LOG(INFO) << "DatFile " << (dictionary_.count(620750000) == 0 ? "HASN'T " : "HAS already")
  778. << " been patched by OLD LAUNCHER!";
  779. return dictionary_.count(620750000) > 0;
  780. }
  781. //------------------------------------------------//
  782. // CATEGORY SECTION
  783. //------------------------------------------------//
  784. DAT_RESULT DatFile::EnableCategory(int category) {
  785. LOG(INFO) << "Enabling category " << category;
  786. if (inactive_categories.count(category) == 0)
  787. return SUCCESS;
  788. inactive_categories.erase(category);
  789. dat_state_ = UPDATED;
  790. for (auto &file : dictionary_) {
  791. auto file_id = file.first;
  792. if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
  793. *file.second = *patch_dict_[file_id];
  794. pending_dictionary_.insert(file_id);
  795. }
  796. }
  797. LOG(INFO) << "Category " << category << " enabled successfully";
  798. return SUCCESS;
  799. }
  800. DAT_RESULT DatFile::DisableCategory(int category) {
  801. LOG(INFO) << "Disabling category " << category;
  802. if (inactive_categories.count(category) != 0)
  803. return SUCCESS;
  804. inactive_categories.insert(category);
  805. dat_state_ = UPDATED;
  806. for (auto &file : dictionary_) {
  807. auto file_id = file.first;
  808. if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
  809. *file.second = *orig_dict_[file_id];
  810. pending_dictionary_.insert(file_id);
  811. }
  812. }
  813. LOG(INFO) << "Category " << category << " disabled successfully";
  814. return SUCCESS;
  815. }
  816. const std::set<long long> &DatFile::GetInactiveCategoriesList() {
  817. return inactive_categories;
  818. }
  819. const std::string &DatFile::filename() const {
  820. return filename_;
  821. }
  822. void DatFile::AddBufferedSize() {
  823. if (free_buffered_size_ >= MIN_BUFFERED_SIZE)
  824. return;
  825. BinaryData nulls(MAX_BUFFERED_SIZE);
  826. io.WriteData(nulls, MAX_BUFFERED_SIZE, io.file_size);
  827. free_buffered_size_ = MAX_BUFFERED_SIZE;
  828. }
  829. //------------------------------------------------//
  830. // BACKUP SECTION
  831. //------------------------------------------------//
  832. bool DatFile::CheckIfBackupExists(const std::string &backup_datname) {
  833. std::ifstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  834. return !dst.fail();
  835. }
  836. DAT_RESULT DatFile::RemoveBackup(const std::string &backup_datname) {
  837. if (!CheckIfBackupExists(backup_datname))
  838. return SUCCESS;
  839. if (remove(("DAT_LIBRARY_BACKUP/" + backup_datname).c_str()) == 0)
  840. return SUCCESS;
  841. return REMOVE_FILE_ERROR;
  842. }
  843. DAT_RESULT DatFile::CreateBackup(const std::string &backup_datname) {
  844. auto filename = filename_;
  845. auto dat_id = dat_id_;
  846. LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
  847. LOG(INFO) << " Closing DatFile...";
  848. CloseDatFile();
  849. LOG(INFO) << " Copying " << filename << " to " << backup_datname;
  850. mkdir("DAT_LIBRARY_BACKUP");
  851. std::ifstream src(filename, std::ios::binary);
  852. std::ofstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  853. std::istreambuf_iterator<char> begin_source(src);
  854. std::istreambuf_iterator<char> end_source;
  855. std::ostreambuf_iterator<char> begin_dest(dst);
  856. std::copy(begin_source, end_source, begin_dest);
  857. src.close();
  858. dst.close();
  859. LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
  860. InitDatFile(filename, dat_id);
  861. LOG(INFO) << "Restoring .dat file success!";
  862. return SUCCESS;
  863. }
  864. DAT_RESULT DatFile::RestoreFromBackup(const std::string &backup_datname) {
  865. auto filename = filename_;
  866. auto dat_id = dat_id_;
  867. LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
  868. LOG(INFO) << " Closing DatFile...";
  869. CloseDatFile();
  870. LOG(INFO) << " Copying " << filename << " to " << backup_datname;
  871. mkdir("DAT_LIBRARY_BACKUP");
  872. std::ifstream src("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  873. std::ofstream dst(filename, std::ios::binary);
  874. if (src.fail()) {
  875. LOG(ERROR) << "CANNOT RESTORE FILE FROM BACKUP - no backup specified with name " << backup_datname;
  876. return NO_BACKUP_ERROR;
  877. }
  878. std::istreambuf_iterator<char> begin_source(src);
  879. std::istreambuf_iterator<char> end_source;
  880. std::ostreambuf_iterator<char> begin_dest(dst);
  881. std::copy(begin_source, end_source, begin_dest);
  882. src.close();
  883. dst.close();
  884. LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
  885. InitDatFile(filename, dat_id);
  886. LOG(INFO) << "Restoring .dat file success!";
  887. return SUCCESS;
  888. }
  889. }
  890. }