DatFile.cpp 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254
  1. //
  2. // Created by Иван_Архипов on 31.10.2017.
  3. //
  4. #include "DatFile.h"
  5. #include "BinaryData.h"
  6. #include "SubDirectory.h"
  7. #include "Subfile.h"
  8. #include "SubfileData.h"
  9. #include <EasyLogging++/easylogging++.h>
  10. #include <unistd.h>
  11. #include <algorithm>
  12. #include <iterator>
  13. #include <locale>
  14. #define ELPP_FEATURE_CRASH_LOG
  15. INITIALIZE_EASYLOGGINGPP
  16. #ifdef WIN32
  17. #define fseek _fseeki64
  18. #define ftell _ftelli64
  19. #endif
  20. extern "C++"
  21. {
  22. namespace LOTRO_DAT {
  23. //------------------------------------------------//
  24. // INIT SECTION
  25. //------------------------------------------------//
  26. DatFile::DatFile() {
  27. dat_state_ = CLOSED;
  28. root_directory_ = nullptr;
  29. file_handler_ = nullptr;
  30. free_buffered_size_ = 0;
  31. orig_dict_.clear();
  32. patch_dict_.clear();
  33. dictionary_.clear();
  34. el::Configurations defaultConf;
  35. defaultConf.setToDefault();
  36. defaultConf.setGlobally(el::ConfigurationType::Format,
  37. "%datetime %level %fbase (line %line) : %msg (function: %func)");
  38. defaultConf.setGlobally(el::ConfigurationType::ToFile, "true");
  39. defaultConf.setGlobally(el::ConfigurationType::Filename, "dat_library.log");
  40. defaultConf.setGlobally(el::ConfigurationType::ToStandardOutput, "false");
  41. defaultConf.setGlobally(el::ConfigurationType::PerformanceTracking, "true");
  42. defaultConf.setGlobally(el::ConfigurationType::MaxLogFileSize, "5242880"); // 5MB
  43. defaultConf.setGlobally(el::ConfigurationType::LogFlushThreshold, "1"); // Flush after every one log
  44. defaultConf.set(el::Level::Debug, el::ConfigurationType::Enabled, "false");
  45. defaultConf.set(el::Level::Debug, el::ConfigurationType::Filename, "dat_library_debug.log");
  46. el::Loggers::reconfigureAllLoggers(defaultConf);
  47. LOG(INFO) << "==================================================================";
  48. LOG(INFO) << "Starting new DatFile class instance";
  49. }
  50. DAT_RESULT DatFile::InitDatFile(const std::string &filename, int dat_id) {
  51. LOG(DEBUG) << "Started initialisation of DatFile " << filename;
  52. if (dat_state_ != CLOSED && filename == filename_) {
  53. LOG(DEBUG) << "Trying to reopen the same file: " << filename << ". Doing nothing.";
  54. return SUCCESS;
  55. }
  56. if (dat_state_ != CLOSED && filename != filename_) {
  57. LOG(DEBUG) << "DatFile wasn't closed yet. Closing in order to reopen.";
  58. if (CloseDatFile() != SUCCESS) {
  59. LOG(ERROR) << "Unable to perform CloseDatFile()! Aborting initialization!";
  60. return FAILED;
  61. }
  62. }
  63. dat_id_ = dat_id;
  64. dat_state_ = CLOSED;
  65. current_locale_ = ORIGINAL;
  66. root_directory_ = nullptr;
  67. file_handler_ = nullptr;
  68. free_buffered_size_ = 0;
  69. filename_ = "none";
  70. DAT_RESULT result;
  71. DAT_RESULT return_value = SUCCESS;
  72. result = OpenDatFile(filename.c_str());
  73. if (result != SUCCESS) {
  74. LOG(ERROR) << "Unable to perform opening file. Aborting.";
  75. CloseDatFile();
  76. return result;
  77. }
  78. return_value = std::max(return_value, result);
  79. result = ReadSuperBlock();
  80. if (result <= 0) {
  81. LOG(ERROR) << "Unable to read super block. Aborting.";
  82. CloseDatFile();
  83. return result;
  84. }
  85. return_value = std::max(return_value, result);
  86. result = MakeDirectories();
  87. if (result <= 0) {
  88. LOG(ERROR) << "Unable to make directories. Aborting.";
  89. CloseDatFile();
  90. return result;
  91. }
  92. return_value = std::max(return_value, result);
  93. result = MakeDictionary();
  94. if (result <= 0) {
  95. LOG(ERROR) << "Unable to make dictionary. Aborting.";
  96. CloseDatFile();
  97. return result;
  98. }
  99. return_value = std::max(return_value, result);
  100. result = InitLocales();
  101. if (result <= 0) {
  102. LOG(ERROR) << "Unable to initialize locales. Aborting.";
  103. CloseDatFile();
  104. return result;
  105. }
  106. return_value = std::max(return_value, result);
  107. LOG(INFO) << "File " << filename << " opened successfully!";
  108. filename_ = filename;
  109. dat_state_ = READY;
  110. LOG(INFO) << "Making last preparations...";
  111. return_value = std::max(return_value, result);
  112. PerformDictionaryCheck();
  113. if (return_value >= 2) {
  114. LOG(WARNING) << "Dat file could be corrupted. Trying to delete corrupted dictionary rows";
  115. if (RepairDatFile() != SUCCESS)
  116. return CRITICAL_DAT_ERROR;
  117. }
  118. if (CheckIfUpdatedByGame()) {
  119. LOG(INFO) << ".dat file was updated by game! Need to reinitialize files and directories!";
  120. CloseDatFile();
  121. InitDatFile(filename, dat_id);
  122. }
  123. dat_without_patches_ = CheckIfNotPatched();
  124. LOG(INFO) << "Preparations made successfully! Init return value = " << return_value;
  125. return return_value;
  126. }
  127. DAT_RESULT DatFile::OpenDatFile(const char *dat_name) {
  128. LOG(DEBUG) << "Started opening DatFile";
  129. if (dat_state_ != CLOSED) {
  130. CloseDatFile();
  131. }
  132. file_handler_ = fopen(dat_name, "r+b");
  133. if (file_handler_ == nullptr) {
  134. LOG(ERROR) << "Unable to open file " << dat_name;
  135. return NO_FILE_ERROR;
  136. }
  137. fseek(file_handler_, 0, SEEK_END);
  138. actual_dat_size_ = ftell(file_handler_);
  139. fseek(file_handler_, 0, SEEK_SET);
  140. dat_state_ = SUCCESS_OPENED;
  141. LOG(DEBUG) << "Successfully opened DatFile";
  142. return SUCCESS;
  143. }
  144. DAT_RESULT DatFile::ReadSuperBlock() {
  145. LOG(DEBUG) << "Started reading superblock";
  146. if (dat_state_ != SUCCESS_OPENED) {
  147. LOG(ERROR) << "Dat state isn't SUCCESS_OPENED. Cannot perform extraction.";
  148. return INCORRECT_STATE_ERROR;
  149. }
  150. BinaryData data(1024);
  151. ReadData(data, 1024);
  152. constant1_ = data.ToNumber<4>(0x100);
  153. constant2_ = data.ToNumber<4>(0x140);
  154. version1_ = data.ToNumber<4>(0x14C);
  155. file_size_ = data.ToNumber<4>(0x148);
  156. version2_ = data.ToNumber<4>(0x150);
  157. fragmentation_journal_offset_ = data.ToNumber<4>(0x154);
  158. fragmentation_journal_end_ = data.ToNumber<4>(0x158);
  159. fragmentation_journal_size_ = data.ToNumber<4>(0x15C);
  160. root_directory_offset_ = data.ToNumber<4>(0x160);
  161. free_dat_size_ = data.ToNumber<4>(0x19C);
  162. if (constant1_ != 0x4C5000) {
  163. LOG(ERROR) << "variable at position 0x100 is not equal to .dat file constant!";
  164. return INCORRECT_SUPERBLOCK_ERROR;
  165. }
  166. if (constant2_ != 0x5442) {
  167. LOG(ERROR) << "variable at position 0x140 is not equal to .dat file constant!";
  168. return INCORRECT_SUPERBLOCK_ERROR;
  169. }
  170. if (file_size_ != actual_dat_size_) {
  171. LOG(ERROR) << "variable at 0x148 position is not equal to .dat file size!";
  172. //return CORRUPTED_FILE_WARNING;
  173. }
  174. dat_state_ = SUCCESS_SUPERBLOCK;
  175. LOG(DEBUG) << "Superblock read successfully";
  176. return SUCCESS;
  177. }
  178. DAT_RESULT DatFile::MakeDirectories() {
  179. LOG(DEBUG) << "Started making directories";
  180. if (dat_state_ != SUCCESS_SUPERBLOCK) {
  181. LOG(ERROR) << "Dat state isn't SUCCESS_SUPERBLOCK. Cannot make directories.";
  182. return INCORRECT_STATE_ERROR;
  183. }
  184. root_directory_ = new SubDirectory((unsigned) root_directory_offset_, this);
  185. dat_state_ = SUCCESS_DIRECTORIES;
  186. LOG(DEBUG) << "Directories made successfully";
  187. return SUCCESS;
  188. }
  189. DAT_RESULT DatFile::MakeDictionary() {
  190. LOG(DEBUG) << "Started making dictionary";
  191. if (dat_state_ != SUCCESS_DIRECTORIES) {
  192. LOG(ERROR) << "Dat state isn't SUCCESS_DIRECTORIES. Cannot make directories.";
  193. return INCORRECT_STATE_ERROR;
  194. }
  195. if (root_directory_ == nullptr) {
  196. LOG(ERROR) << "root_directory is nullptr!!";
  197. return INIT_ERROR;
  198. }
  199. root_directory_->MakeDictionary(dictionary_);
  200. dat_state_ = SUCCESS_DICTIONARY;
  201. LOG(DEBUG) << "Dictionary made successfull";
  202. return SUCCESS;
  203. }
  204. DAT_RESULT DatFile::InitLocales() {
  205. LOG(INFO) << "Initialising locales...";
  206. BinaryData dicts_data(4);
  207. ReadData(dicts_data, 4, 300);
  208. long long dict_offset = dicts_data.ToNumber<4>(0);
  209. if (dict_offset == 0 || dict_offset + 8 >= actual_dat_size_) {
  210. LOG(INFO) << "Dictionary offset is empty or incorrect. Passing.";
  211. return SUCCESS;
  212. }
  213. ReadData(dicts_data, 4, dict_offset);
  214. long long dict_size = dicts_data.ToNumber<4>(0);
  215. ReadData(dicts_data, 4, dict_offset + 4);
  216. long long dict_version = dicts_data.ToNumber<4>(0);
  217. LOG(INFO) << "Dictionary size is " << dict_size << ". Version is " << dict_version;
  218. if (dict_version != 100) {
  219. LOG(WARNING) << "DICTIONARY IS OLD!!!";
  220. return SUCCESS;
  221. }
  222. dicts_data = BinaryData((unsigned)dict_size);
  223. ReadData(dicts_data, dict_size, dict_offset + 8);
  224. if (dicts_data.size() < 15) {
  225. LOG(ERROR) << "Incorrect dictionary. Passing without it.";
  226. orig_dict_.clear();
  227. patch_dict_.clear();
  228. WriteData(BinaryData::FromNumber<4>(0), 4, 300);
  229. dat_state_ = UPDATED;
  230. dat_without_patches_ = true;
  231. return SUCCESS;
  232. }
  233. BinaryData hi_data = dicts_data.CutData(0, 15) + BinaryData("\0", 1);
  234. std::string hi = std::string((char *) (hi_data.data()));
  235. LOG(DEBUG) << "hi info is " << hi;
  236. if (hi != "Hi from Gi1dor!") {
  237. LOG(WARNING) << "Didn't receive 'hi' from Gi1dor... Initialising locale dicts as empty";
  238. LOG(INFO) << "Could't init locales' file... Continuing without them";
  239. return SUCCESS;
  240. }
  241. int offset = 15;
  242. BinaryData current_locale_data = dicts_data.CutData(offset, offset + 4) + BinaryData("\0", 1);
  243. std::string locale((char *) (current_locale_data.data()));
  244. offset += 4;
  245. LOG(DEBUG) << "current locale:" << locale;
  246. if (locale != "PATC" && locale != "ORIG") {
  247. LOG(WARNING) << "Incorrect locale... Initialising locale dicts as empty";
  248. LOG(INFO) << "Could't recognize locale... Continuing without locales";
  249. return SUCCESS;
  250. }
  251. current_locale_ = (locale == "PATC" ? PATCHED : ORIGINAL);
  252. // 15 bytes for "Hi from Gi1dor"
  253. // 4 bytes for LOCALE
  254. // 4 bytes for orig_dict.size()
  255. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  256. // 4 bytes for patch_dict.size()
  257. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  258. // 4 bytes for inactive_categories dict
  259. // 4 * inactive_categories.size() bytes for inactive_categories data
  260. size_t orig_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  261. offset += 4;
  262. for (size_t i = 0; i < orig_dict_size; i++) {
  263. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  264. orig_dict_[file->file_id()] = file;
  265. offset += 32;
  266. orig_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  267. offset += 4;
  268. if (orig_dict_[file->file_id()]->category == 0)
  269. LOG(DEBUG) << "file category is undefined (0)!";
  270. }
  271. size_t patch_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  272. offset += 4;
  273. for (size_t i = 0; i < patch_dict_size; i++) {
  274. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  275. patch_dict_[file->file_id()] = file;
  276. offset += 32;
  277. patch_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  278. offset += 4;
  279. if (patch_dict_[file->file_id()]->category == 0)
  280. LOG(DEBUG) << "file category is undefined (0)!";
  281. }
  282. size_t active_patches_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  283. offset += 4;
  284. for (size_t i = 0; i < active_patches_dict_size; i++) {
  285. inactive_categories.insert(dicts_data.ToNumber<4>(offset));
  286. offset += 4;
  287. }
  288. LOG(INFO) << "There are " << patch_dict_.size() << " files in patch locale dictionary";
  289. LOG(INFO) << "There are " << orig_dict_.size() << " files in original locale dictionary";
  290. std::string inactive_cat_s;
  291. for (auto i : inactive_categories) {
  292. inactive_cat_s += std::to_string(i) + " ";
  293. }
  294. LOG(INFO) << "Unactive patches now: " << inactive_cat_s;
  295. LOG(INFO) << "Finished initialising locales";
  296. return SUCCESS;
  297. }
  298. DAT_RESULT DatFile::PerformDictionaryCheck() {
  299. for (auto mpair : dictionary_) {
  300. auto file = mpair.second;
  301. auto file_id = mpair.first;
  302. if (CorrectSubfile(file))
  303. continue;
  304. if (current_locale_ == PATCHED && orig_dict_.count(file_id) > 0) {
  305. LOG(WARNING) << "Potential incorrect patched version of file " << file_id << ". Switching to original.";
  306. dictionary_[file_id] = orig_dict_[file_id];
  307. }
  308. if (!CorrectSubfile(file)) {
  309. LOG(ERROR) << "Incorrect file " << file_id << ". It's offset is said as " << file->file_offset()
  310. << ". Erasing it from dictionary.";
  311. dictionary_.erase(file_id);
  312. }
  313. }
  314. return SUCCESS;
  315. }
  316. //------------------------------------------------//
  317. // CLOSE SECTION
  318. //------------------------------------------------//
  319. DatFile::~DatFile() {
  320. CloseDatFile();
  321. }
  322. DAT_RESULT DatFile::CloseDatFile() {
  323. LOG(INFO) << "Closing DatFile";
  324. if (dat_state_ == CLOSED) {
  325. LOG(INFO) << "DatFile is already closed. Nothing to do";
  326. return SUCCESS;
  327. }
  328. // Committing changes and updating/writing locales and header info
  329. if (!pending_dictionary_.empty() || dat_state_ == UPDATED) {
  330. CommitLocales();
  331. CommitDirectories();
  332. //ModifyFragmentationJournal();
  333. //free_dat_size_ = 128248;
  334. //fragmentation_journal_end_ = 0;
  335. //fragmentation_journal_size_ = 1;
  336. //UpdateHeader();
  337. }
  338. current_locale_ = ORIGINAL;
  339. if (file_handler_ != nullptr) {
  340. fclose(file_handler_);
  341. }
  342. SubDirectory::visited_subdirectories_.clear();
  343. delete root_directory_;
  344. //truncate64(filename_.c_str(), file_size_);
  345. free_buffered_size_ = 0;
  346. filename_ = "none";
  347. orig_dict_.clear();
  348. patch_dict_.clear();
  349. pending_patch_.clear();
  350. inactive_categories.clear();
  351. file_handler_ = nullptr;
  352. root_directory_ = nullptr;
  353. pending_dictionary_.clear();
  354. dictionary_.clear();
  355. constant1_ = 0;
  356. constant2_ = 0;
  357. file_size_ = 0;
  358. version1_ = 0;
  359. version2_ = 0;
  360. fragmentation_journal_size_ = 0;
  361. fragmentation_journal_end_ = 0;
  362. root_directory_offset_ = 0;
  363. fragmentation_journal_offset_ = 0;
  364. dat_state_ = CLOSED;
  365. dat_id_ = -1;
  366. LOG(INFO) << "File closed successfully.";
  367. return SUCCESS;
  368. }
  369. DAT_RESULT DatFile::CommitLocales() {
  370. LOG(INFO) << "Committing locales...";
  371. // 15 bytes for "Hi from Gi1dor"
  372. // 4 bytes for LOCALE
  373. // 4 bytes for orig_dict.size()
  374. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  375. // 4 bytes for patch_dict.size()
  376. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  377. // 4 bytes for inactive_categories list
  378. // 4 * inactive_categories.size() bytes for inactive_categories data
  379. BinaryData binary_data = BinaryData(14 + 15 + 4
  380. + 4 + (32 + 4) * orig_dict_.size()
  381. + 4 + (32 + 4) * patch_dict_.size()
  382. + 4 + 4 * inactive_categories.size());
  383. size_t current_size = 0;
  384. binary_data.Append(BinaryData("Hi from Gi1dor!", 15), current_size);
  385. current_size += 15;
  386. binary_data.Append(BinaryData((current_locale_ == ORIGINAL ? "ORIG" : "PATC"), 4), current_size);
  387. current_size += 4;
  388. binary_data.Append(BinaryData::FromNumber<4>(orig_dict_.size()), current_size);
  389. current_size += 4;
  390. for (auto file : orig_dict_) {
  391. binary_data.Append(file.second->MakeHeaderData(), current_size);
  392. current_size += 32;
  393. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  394. current_size += 4;
  395. }
  396. binary_data.Append(BinaryData::FromNumber<4>(patch_dict_.size()), current_size);
  397. current_size += 4;
  398. for (auto file : patch_dict_) {
  399. binary_data.Append(file.second->MakeHeaderData(), current_size);
  400. current_size += 32;
  401. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  402. current_size += 4;
  403. }
  404. binary_data.Append(BinaryData::FromNumber<4>(inactive_categories.size()), current_size);
  405. current_size += 4;
  406. for (auto patch_id : inactive_categories) {
  407. binary_data.Append(BinaryData::FromNumber<4>(patch_id), current_size);
  408. current_size += 4;
  409. }
  410. BinaryData dicts_data(4);
  411. ReadData(dicts_data, 4, 300);
  412. long long dict_offset = dicts_data.ToNumber<4>(0);
  413. ReadData(dicts_data, 4, dict_offset);
  414. long long dict_size = dicts_data.ToNumber<4>(0);
  415. if (binary_data.size() > dict_size || dict_offset == 0) {
  416. WriteData(BinaryData::FromNumber<4>(file_size_), 4, 300);
  417. WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size() + 4, 20u * 1024u * 1024u)), 4, file_size_);
  418. WriteData(BinaryData::FromNumber<4>(100), 4, file_size_ + 4);
  419. WriteData(binary_data, binary_data.size(), file_size_ + 8);
  420. file_size_ += binary_data.size() + 8;
  421. // Adding space for 25 megabytes locales file in total.
  422. BinaryData nulls(unsigned(20 * 1024 * 1024));
  423. WriteData(nulls, nulls.size(), file_size_);
  424. file_size_ += nulls.size();
  425. } else {
  426. WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, dict_offset);
  427. WriteData(BinaryData::FromNumber<4>(100), 4, dict_offset + 4);
  428. WriteData(binary_data, binary_data.size(), dict_offset + 8);
  429. }
  430. LOG(INFO) << "Locales commited successfully";
  431. return SUCCESS;
  432. }
  433. DAT_RESULT DatFile::CommitDirectories() {
  434. for (auto file_id : pending_dictionary_) {
  435. if (dictionary_[file_id] == nullptr || !CorrectSubfile(dictionary_[file_id]))
  436. continue;
  437. WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
  438. }
  439. pending_dictionary_.clear();
  440. return SUCCESS;
  441. }
  442. DAT_RESULT DatFile::ModifyFragmentationJournal() {
  443. if (fragmentation_journal_size_ == 0)
  444. return SUCCESS;
  445. LOG(DEBUG) << "Modifying fragmentation journal";
  446. BinaryData data(4);
  447. ReadData(data, 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
  448. LOG(INFO) << "FREE_SIZE BLOCK = " << data.ToNumber<4>(0);
  449. long long free_size = data.ToNumber<4>(0);
  450. long long free_offset = file_size_;
  451. BinaryData nulldata = BinaryData(unsigned(free_size));
  452. WriteData(nulldata, nulldata.size(), file_size_);
  453. file_size_ += nulldata.size();
  454. WriteData(BinaryData::FromNumber<4>(free_size), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
  455. WriteData(BinaryData::FromNumber<4>(free_offset), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_ + 4);
  456. //nulldata = BinaryData(8);
  457. //WriteData(nulldata, nulldata.size(), fragmentation_journal_offset_ + 16);
  458. LOG(DEBUG) << "Finished modifying fragmentation journal";
  459. return SUCCESS;
  460. }
  461. DAT_RESULT DatFile::UpdateHeader() {
  462. LOG(DEBUG) << "Updating header";
  463. WriteData(BinaryData::FromNumber<4>(constant1_), 4, 0x100);
  464. WriteData(BinaryData::FromNumber<4>(constant2_), 4, 0x140);
  465. //WriteData(BinaryData::FromNumber<4>( 0 ), 4, 0x144);
  466. WriteData(BinaryData::FromNumber<4>(file_size_), 4, 0x148);
  467. WriteData(BinaryData::FromNumber<4>(version1_ ), 4, 0x14C);
  468. WriteData(BinaryData::FromNumber<4>(version2_ ), 4, 0x150);
  469. WriteData(BinaryData::FromNumber<4>(fragmentation_journal_offset_), 4, 0x154);
  470. WriteData(BinaryData::FromNumber<4>(fragmentation_journal_end_), 4, 0x158);
  471. WriteData(BinaryData::FromNumber<4>(fragmentation_journal_size_), 4, 0x15C);
  472. WriteData(BinaryData::FromNumber<4>(root_directory_offset_), 4, 0x160);
  473. WriteData(BinaryData::FromNumber<4>(free_dat_size_), 4, 0x19C);
  474. LOG(DEBUG) << "Finished updating header";
  475. return SUCCESS;
  476. }
  477. DAT_RESULT DatFile::RepairDatFile() {
  478. for (auto file : dictionary_) {
  479. auto subfile = file.second;
  480. auto file_id = file.first;
  481. if (CorrectSubfile(subfile))
  482. continue;
  483. orig_dict_.clear();
  484. patch_dict_.clear();
  485. return SUCCESS;
  486. if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
  487. return CRITICAL_DAT_ERROR;
  488. *dictionary_[file_id] = *orig_dict_[file_id];
  489. patch_dict_.erase(file_id);
  490. orig_dict_.erase(file_id);
  491. }
  492. return SUCCESS;
  493. }
  494. //------------------------------------------------//
  495. // DAT INFO SECTION
  496. //------------------------------------------------//
  497. DAT_STATE DatFile::DatFileState() const {
  498. return dat_state_;
  499. }
  500. long long DatFile::files_number() const {
  501. return dictionary_.size();
  502. }
  503. //------------------------------------------------//
  504. // EXTRACT SECTION
  505. //------------------------------------------------//
  506. DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
  507. LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
  508. if (dat_state_ < READY) {
  509. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  510. return INCORRECT_STATE_ERROR;
  511. }
  512. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  513. if (file_data.size() == 0) {
  514. LOG(ERROR) << "File data is empty. Aborting extraction.";
  515. return NO_FILE_ERROR;
  516. }
  517. SubfileData export_data = dictionary_[file_id]->PrepareForExport(file_data);
  518. if (export_data.Empty()) {
  519. LOG(ERROR) << "Export data is empty. Aborting extraction.";
  520. return NO_FILE_ERROR;
  521. }
  522. if (export_data.binary_data.WriteToFile(path + export_data.options["ext"].as<std::string>()) != SUCCESS) {
  523. LOG(ERROR) << "Cannot write to file.";
  524. return WRITE_TO_FILE_ERROR;
  525. }
  526. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  527. return SUCCESS;
  528. }
  529. DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
  530. LOG(DEBUG) << "Extracting file " << file_id << " to database.";
  531. if (dat_state_ < READY) {
  532. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  533. return INCORRECT_STATE_ERROR;
  534. }
  535. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  536. if (file_data.Empty()) {
  537. LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty. Passing it.";
  538. return SUCCESS;
  539. }
  540. SubfileData export_data;
  541. export_data = dictionary_[file_id]->PrepareForExport(file_data);
  542. export_data.options["did"] = dat_id_;
  543. if (export_data == SubfileData()) {
  544. LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty or incorrect.";
  545. return SUCCESS;
  546. }
  547. try {
  548. db->PushFile(export_data);
  549. } catch (std::exception &e) {
  550. LOG(ERROR) << "Caught " << e.what() << " exception.";
  551. return FAILED;
  552. }
  553. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  554. return SUCCESS;
  555. }
  556. int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
  557. LOG(INFO) << "Extracting all files to path " << path;
  558. if (dat_state_ < READY) {
  559. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  560. return INCORRECT_STATE_ERROR;
  561. }
  562. int success = 0;
  563. for (auto i : dictionary_) {
  564. FILE_TYPE file_type = i.second->FileType();
  565. if (file_type == type) {
  566. success += (ExtractFile(i.second->file_id(), (path + std::to_string(i.second->file_id()))) == SUCCESS
  567. ? 1 : 0);
  568. }
  569. }
  570. LOG(INFO) << "Successfully extracted " << success << " files";
  571. return success;
  572. }
  573. int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
  574. LOG(INFO) << "Extracting all files to database...";
  575. if (dat_state_ < READY) {
  576. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  577. return INCORRECT_STATE_ERROR;
  578. }
  579. int success = 0;
  580. for (auto i : dictionary_) {
  581. FILE_TYPE file_type = i.second->FileType();
  582. if (file_type == type) {
  583. success += (ExtractFile(i.second->file_id(), db) == SUCCESS ? 1 : 0);
  584. }
  585. }
  586. LOG(INFO) << "Extracted " << success << " files";
  587. return success;
  588. }
  589. //------------------------------------------------//
  590. // PATCH SECTION
  591. //------------------------------------------------//
  592. DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
  593. LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
  594. actual_dat_size_ = std::max(file_size_, actual_dat_size_);
  595. if (!dat_without_patches_) {
  596. file_size_ = actual_dat_size_;
  597. }
  598. if (dat_state_ < READY) {
  599. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  600. return INCORRECT_STATE_ERROR;
  601. }
  602. auto file_id = data.options["fid"].as<long long>();
  603. if (dictionary_.count(file_id) == 0) {
  604. LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
  605. return NO_FILE_ERROR;
  606. }
  607. Subfile *file = dictionary_[file_id];
  608. // if (!CorrectSubfile(file)) {
  609. // if (current_locale_ == PATCHED && patch_dict_.count(file_id) > 0) {
  610. // LOG(WARNING) << "Patched subfile header with id = " << file->file_id() << " differs from original version...";
  611. // } else {
  612. // LOG(ERROR) << "Incorrect subfile with id " << file->file_id()
  613. // << " (headers do not match). Cannot patch it";
  614. // return FAILED;
  615. // }
  616. // }
  617. // If file has inactive category, then we should set it to patched state in order to commit patch and
  618. // then in ApplyFilePatch(), if new category is still inactive, return dictionary to its original state;
  619. if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0 && file_id != 2013266257) {
  620. *dictionary_[file_id] = *patch_dict_[file_id];
  621. }
  622. if (data.options["cat"].IsDefined()) {
  623. file->category = data.options["cat"].as<long long>();
  624. } else {
  625. file->category = 1;
  626. }
  627. BinaryData old_data = GetFileData(orig_dict_.count(file->file_id()) == 0 ? file : orig_dict_[file->file_id_]);
  628. if (old_data.Empty()) {
  629. LOG(ERROR) << "GetFileData returned empty data. Aborting.";
  630. return DAT_PATCH_FILE_ERROR;
  631. }
  632. BinaryData patch_data = file->MakeForImport(old_data, data);
  633. DAT_RESULT result = ApplyFilePatch(file, patch_data);
  634. if (result != SUCCESS)
  635. return result;
  636. LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>() << ".";
  637. return SUCCESS;
  638. }
  639. DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
  640. LOG(INFO) << "Patching all database";
  641. if (dat_state_ < READY) {
  642. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  643. return INCORRECT_STATE_ERROR;
  644. }
  645. SubfileData data;
  646. data = db->GetNextFile();
  647. while (!data.Empty()) {
  648. DAT_RESULT result = PatchFile(data);
  649. if (result != SUCCESS)
  650. LOG(ERROR) << "Cannot patch file " << data.options["fid"].as<long long>() << " continuing";
  651. data = db->GetNextFile();
  652. }
  653. LOG(INFO) << "Successfully patched whole database";
  654. return SUCCESS;
  655. }
  656. DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
  657. LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
  658. FILE *f = nullptr;
  659. fopen_s(&f, (path + "dict.txt").c_str(), "w");
  660. if (f == nullptr) {
  661. LOG(ERROR) << "Cannot open file " << path + "dict.txt";
  662. return WRITE_TO_FILE_ERROR;
  663. }
  664. fprintf(f, "unk1 file_id offset size1 timestamp version size2 unknown2 type\n");
  665. for (auto i : dictionary_) {
  666. fprintf(f, "%lld %lld %lld %lld %lld %lld %lld %lld %s\n", i.second->unknown1(), i.second->file_id(),
  667. i.second->file_offset(), i.second->file_size(), i.second->timestamp(), i.second->version(),
  668. i.second->block_size(), i.second->unknown2(), i.second->Extension().c_str());
  669. }
  670. fclose(f);
  671. LOG(INFO) << "Unordered dictionary was written successfully to " << path << "dict.txt";
  672. return SUCCESS;
  673. }
  674. DAT_RESULT DatFile::ApplyFilePatch(Subfile *file, BinaryData &data) {
  675. LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
  676. // if (patch_dict_.size() == 0 && pending_dictionary_.size() == 0) {
  677. // BinaryData nulls(50 * 1024 * 1024);
  678. // WriteData(nulls, nulls.size(), file_size_);
  679. // file_size_ += 50 * 1024 * 1024;
  680. // }
  681. if (data.Empty()) {
  682. LOG(ERROR) << "Error caused during making file for import. Cannot patch file " << file->file_id();
  683. return FAILED;
  684. }
  685. auto file_id = file->file_id();
  686. if (current_locale() != PATCHED && file_id != 2013266257) {
  687. LOG(INFO) << "Changing locale to PATCHED(RU) in order to patch file";
  688. SetLocale(PATCHED);
  689. }
  690. dat_state_ = UPDATED;
  691. if (orig_dict_.count(file_id) == 0 && file_id != 2013266257) {
  692. orig_dict_[file_id] = new Subfile(this, file->MakeHeaderData());
  693. }
  694. if ((patch_dict_.count(file_id) == 0 && file_id != 2013266257) || data.size() > file->block_size()
  695. || file->file_size() + 8 > file->block_size()) {
  696. file->file_offset_ = file_size_;
  697. file->block_size_ = std::max((long long)data.size(), file->block_size_);
  698. free_buffered_size_ = std::max(0ll, free_buffered_size_ - file->block_size_ - 8);
  699. AddBufferedSize();
  700. this->file_size_ += file->block_size_ + 8;
  701. }
  702. file->file_size_ = data.size() - 8;
  703. data.Append(BinaryData::FromNumber<4>(0), 0); // set additional fragments count to zero
  704. if (file_id != data.ToNumber<4>(8)) {
  705. LOG(ERROR) << "Created data's file_id doesn't match to original! Patch wasn't written to .dat file";
  706. return INCORRECT_PATCH_FILE;
  707. }
  708. //data.ProtectData();
  709. //BinaryData data1(data.size());
  710. WriteData(data, data.size(), file->file_offset());
  711. //data.DeprotectData();
  712. patch_dict_.erase(file_id); // Удалили старое значение в русском словаре
  713. if (file_id != 2013266257) {
  714. patch_dict_[file_id] = new Subfile(this, file->MakeHeaderData()); // Создали новое значение
  715. }
  716. // If category is forbidden, then return file header data to original state
  717. if (inactive_categories.count(file->category) != 0) {
  718. file->file_offset_ = orig_dict_[file_id]->file_offset_;
  719. file->file_size_ = orig_dict_[file_id]->file_size_;
  720. file->block_size_ = orig_dict_[file_id]->block_size_;
  721. file->timestamp_ = orig_dict_[file_id]->timestamp_;
  722. file->version_ = orig_dict_[file_id]->version_;
  723. }
  724. if (orig_dict_.count(file_id) != 0 && file_id != 2013266257)
  725. orig_dict_[file_id]->category = file->category;
  726. if (patch_dict_.count(file_id) != 0 && file_id != 2013266257)
  727. patch_dict_[file_id]->category = file->category;
  728. // Applying file info in directory
  729. pending_dictionary_.insert(file_id);
  730. LOG(DEBUG) << "Successfully applied file " << file->file_id() << " patch.";
  731. return SUCCESS;
  732. }
  733. //------------------------------------------------//
  734. // INPUT-OUTPUT SECTION
  735. //------------------------------------------------//
  736. BinaryData DatFile::GetFileData(const Subfile *file, long long int offset) {
  737. LOG(DEBUG) << "Getting file " << file->file_id() << " data";
  738. BinaryData mfile_id(20);
  739. ReadData(mfile_id, 20, file->file_offset() + 8);
  740. if (mfile_id.Empty()) {
  741. LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
  742. << file->file_offset() << "); Aborting.";
  743. return BinaryData(0);
  744. }
  745. if (!mfile_id.CheckCompression() && file->file_id() != mfile_id.ToNumber<4>(0)) {
  746. LOG(ERROR) << "Bad DatFile::GetFileData() - file_id in Subfile ("
  747. << file->file_id()
  748. << ") doesn't match to file_id (" << mfile_id.ToNumber<4>(0) << ")in DatFile.";
  749. return BinaryData(0);
  750. }
  751. BinaryData data((unsigned)(file->file_size() + (8 - offset)));
  752. if (file->block_size() >= file->file_size() + 8) {
  753. ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
  754. return data;
  755. }
  756. BinaryData fragments_count(4);
  757. ReadData(fragments_count, 4, file->file_offset());
  758. long long fragments_number = fragments_count.ToNumber<4>(0);
  759. long long current_block_size = file->block_size() - offset - 8 * fragments_number;
  760. ReadData(data, current_block_size, file->file_offset() + offset);
  761. BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
  762. ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
  763. file->file_offset() + file->block_size() - 8 * fragments_number);
  764. for (long long i = 0; i < fragments_number; i++) {
  765. long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
  766. long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
  767. ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
  768. current_block_size);
  769. current_block_size += fragment_size;
  770. }
  771. LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
  772. return data;
  773. }
  774. DAT_RESULT DatFile::ReadData(BinaryData &data, long long size, long long offset, long long data_offset) {
  775. if (dat_state_ == CLOSED) {
  776. LOG(ERROR) << "Dat state is CLOSED. Cannot read data.";
  777. data = BinaryData(0);
  778. return INIT_ERROR;
  779. }
  780. if (data_offset + size > data.size()) {
  781. LOG(ERROR) << "Trying to read more than BinaryData size: Reading " << size << " bytes from " << offset
  782. << " position.";
  783. data = BinaryData(0);
  784. return DAT_READ_ERROR;
  785. }
  786. if (offset + size > actual_dat_size_) {
  787. LOG(ERROR) << "Trying to read more than DatFile size elapsed: Reading " << size << " bytes from " << offset
  788. << " position.";
  789. data = BinaryData(0);
  790. return DAT_READ_ERROR;
  791. }
  792. fseek(file_handler_, offset, SEEK_SET);
  793. fread(data.data() + data_offset, unsigned(size), 1, file_handler_);
  794. return SUCCESS;
  795. }
  796. DAT_RESULT DatFile::WriteData(const BinaryData &data, long long size, long long offset, long long data_offset) {
  797. if (dat_state_ < SUCCESS_DICTIONARY) {
  798. LOG(ERROR) << "Dat state isn't READY. Cannot write data.";
  799. return INCORRECT_STATE_ERROR;
  800. }
  801. fseek(file_handler_, offset, SEEK_SET);
  802. if (data_offset + size > data.size()) {
  803. LOG(ERROR) << "Trying to write more than BinaryData size";
  804. return DAT_WRITE_ERROR;
  805. }
  806. fwrite(data.data() + data_offset, unsigned(size), 1, file_handler_);
  807. actual_dat_size_ = std::max(file_size_, actual_dat_size_);
  808. return SUCCESS;
  809. }
  810. //------------------------------------------------//
  811. // LOCALE SECTION
  812. //------------------------------------------------//
  813. DAT_RESULT DatFile::SetLocale(LOCALE locale) {
  814. LOG(INFO) << "Setting locale to " << (locale == PATCHED ? " PATCHED" : " ORIGINAL");
  815. if (dat_state_ < READY) {
  816. LOG(ERROR) << "Dat state isn't READY. Cannot set locale.";
  817. return INCORRECT_STATE_ERROR;
  818. }
  819. if (current_locale_ == locale) {
  820. return SUCCESS;
  821. }
  822. dat_state_ = UPDATED;
  823. auto dict = GetLocaleDictReference(locale);
  824. for (auto file : *dict) {
  825. if (file.second == nullptr)
  826. continue;
  827. if (dictionary_.count(file.first) == 0) {
  828. LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
  829. << "which is not in .dat file! Passing it and removing from locale dictionary";
  830. dict->erase(file.first);
  831. continue;
  832. }
  833. if (dictionary_[file.first]->MakeHeaderData().CutData(8, 16) ==
  834. file.second->MakeHeaderData().CutData(8, 16) ||
  835. inactive_categories.count(orig_dict_[file.first]->category) != 0)
  836. continue;
  837. long long file_id = file.first;
  838. Subfile *new_file = file.second;
  839. *dictionary_[file_id] = *new_file;
  840. pending_dictionary_.insert(file_id);
  841. dat_state_ = UPDATED;
  842. }
  843. current_locale_ = locale;
  844. LOG(INFO) << "Locale set successfull";
  845. return SUCCESS;
  846. }
  847. LOCALE DatFile::current_locale() {
  848. if (dat_state_ < READY) {
  849. LOG(ERROR) << "dat_file is in incorrect state!";
  850. return ORIGINAL;
  851. }
  852. if (current_locale_ != PATCHED && current_locale_ != ORIGINAL) {
  853. LOG(ERROR) << "locale has incorrect value. Setting it to original";
  854. current_locale_ = ORIGINAL;
  855. }
  856. return current_locale_;
  857. }
  858. std::map<long long, Subfile *> *DatFile::GetLocaleDictReference(LOCALE locale) {
  859. switch (locale) {
  860. case PATCHED:
  861. return &patch_dict_;
  862. case ORIGINAL:
  863. return &orig_dict_;
  864. default:
  865. LOG(ERROR) << "Unknown locale! Returning original";
  866. return &orig_dict_;
  867. }
  868. }
  869. //------------------------------------------------//
  870. // CHECKERS SECTION
  871. //------------------------------------------------//
  872. bool DatFile::CorrectSubfile(Subfile *file) {
  873. BinaryData mfile_id(20);
  874. ReadData(mfile_id, 20, file->file_offset() + 8);
  875. if (mfile_id.Empty())
  876. return false;
  877. return (mfile_id.CheckCompression() || file->file_id() == mfile_id.ToNumber<4>(0)) && file->file_size() < 50ll * 1024ll * 1024ll;
  878. }
  879. bool DatFile::CheckIfUpdatedByGame() {
  880. LOG(INFO) << "Checking if DatFile was updated by LotRO";
  881. if (!pending_patch_.empty())
  882. return true;
  883. bool updated = false;
  884. for (auto i : dictionary_) {
  885. long long file_id = i.first;
  886. Subfile *subfile = i.second;
  887. if (patch_dict_.count(file_id) == 0)
  888. continue;
  889. if (*subfile != *patch_dict_[file_id] && *subfile != *orig_dict_[file_id]) {
  890. orig_dict_.clear();
  891. patch_dict_.clear();
  892. LOG(INFO) << "DAT FILE WAS UPDATED!!!! CLEARING PATCH DATA";
  893. pending_patch_.insert(file_id);
  894. WriteData(BinaryData::FromNumber<4>(0), 4, 300);
  895. return true;
  896. }
  897. }
  898. return updated;
  899. }
  900. bool DatFile::CheckIfNotPatched() {
  901. LOG(INFO) << "DatFile " << (patch_dict_.empty() ? "HASN'T " : "HAS already")
  902. << " been patched by LEGACY launcher!";
  903. return patch_dict_.empty();
  904. }
  905. bool DatFile::CheckIfPatchedByOldLauncher() {
  906. LOG(INFO) << "DatFile " << (dictionary_.count(620750000) == 0 ? "HASN'T " : "HAS already")
  907. << " been patched by OLD LAUNCHER!";
  908. return dictionary_.count(620750000) > 0;
  909. }
  910. //------------------------------------------------//
  911. // CATEGORY SECTION
  912. //------------------------------------------------//
  913. DAT_RESULT DatFile::EnableCategory(int category) {
  914. LOG(INFO) << "Enabling category " << category;
  915. if (inactive_categories.count(category) == 0)
  916. return SUCCESS;
  917. inactive_categories.erase(category);
  918. dat_state_ = UPDATED;
  919. for (auto file : dictionary_) {
  920. auto file_id = file.first;
  921. if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
  922. *file.second = *patch_dict_[file_id];
  923. pending_dictionary_.insert(file_id);
  924. }
  925. }
  926. LOG(INFO) << "Category " << category << " enabled successfully";
  927. return SUCCESS;
  928. }
  929. DAT_RESULT DatFile::DisableCategory(int category) {
  930. LOG(INFO) << "Disabling category " << category;
  931. if (inactive_categories.count(category) != 0)
  932. return SUCCESS;
  933. inactive_categories.insert(category);
  934. dat_state_ = UPDATED;
  935. for (auto file : dictionary_) {
  936. auto file_id = file.first;
  937. if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
  938. *file.second = *orig_dict_[file_id];
  939. pending_dictionary_.insert(file_id);
  940. }
  941. }
  942. LOG(INFO) << "Category " << category << " disabled successfully";
  943. return SUCCESS;
  944. }
  945. const std::set<long long> &DatFile::GetInactiveCategoriesList() {
  946. return inactive_categories;
  947. }
  948. const std::string &DatFile::filename() const {
  949. return filename_;
  950. }
  951. void DatFile::AddBufferedSize() {
  952. if (free_buffered_size_ >= MIN_BUFFERED_SIZE)
  953. return;
  954. BinaryData nulls(MAX_BUFFERED_SIZE);
  955. WriteData(nulls, MAX_BUFFERED_SIZE, file_size_);
  956. free_buffered_size_ = MAX_BUFFERED_SIZE;
  957. }
  958. //------------------------------------------------//
  959. // BACKUP SECTION
  960. //------------------------------------------------//
  961. bool DatFile::CheckIfBackupExists(const std::string &backup_datname) {
  962. std::ifstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  963. return !dst.fail();
  964. }
  965. DAT_RESULT DatFile::RemoveBackup(const std::string &backup_datname) {
  966. if (!CheckIfBackupExists(backup_datname))
  967. return SUCCESS;
  968. if (remove(("DAT_LIBRARY_BACKUP/" + backup_datname).c_str()) == 0)
  969. return SUCCESS;
  970. return REMOVE_FILE_ERROR;
  971. }
  972. DAT_RESULT DatFile::CreateBackup(const std::string &backup_datname) {
  973. auto filename = filename_;
  974. auto dat_id = dat_id_;
  975. LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
  976. LOG(INFO) << " Closing DatFile...";
  977. CloseDatFile();
  978. LOG(INFO) << " Copying " << filename << " to " << backup_datname;
  979. mkdir("DAT_LIBRARY_BACKUP");
  980. std::ifstream src(filename, std::ios::binary);
  981. std::ofstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  982. std::istreambuf_iterator<char> begin_source(src);
  983. std::istreambuf_iterator<char> end_source;
  984. std::ostreambuf_iterator<char> begin_dest(dst);
  985. std::copy(begin_source, end_source, begin_dest);
  986. src.close();
  987. dst.close();
  988. LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
  989. InitDatFile(filename, dat_id);
  990. LOG(INFO) << "Restoring .dat file success!";
  991. return SUCCESS;
  992. }
  993. DAT_RESULT DatFile::RestoreFromBackup(const std::string &backup_datname) {
  994. auto filename = filename_;
  995. auto dat_id = dat_id_;
  996. LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
  997. LOG(INFO) << " Closing DatFile...";
  998. CloseDatFile();
  999. LOG(INFO) << " Copying " << filename << " to " << backup_datname;
  1000. mkdir("DAT_LIBRARY_BACKUP");
  1001. std::ifstream src("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  1002. std::ofstream dst(filename, std::ios::binary);
  1003. if (src.fail()) {
  1004. LOG(ERROR) << "CANNOT RESTORE FILE FROM BACKUP - no backup specified with name " << backup_datname;
  1005. return NO_BACKUP_ERROR;
  1006. }
  1007. std::istreambuf_iterator<char> begin_source(src);
  1008. std::istreambuf_iterator<char> end_source;
  1009. std::ostreambuf_iterator<char> begin_dest(dst);
  1010. std::copy(begin_source, end_source, begin_dest);
  1011. src.close();
  1012. dst.close();
  1013. LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
  1014. InitDatFile(filename, dat_id);
  1015. LOG(INFO) << "Restoring .dat file success!";
  1016. return SUCCESS;
  1017. }
  1018. }
  1019. }