DatFile.cpp 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. //
  2. // Created by Иван_Архипов on 31.10.2017.
  3. //
  4. #include "DatFile.h"
  5. #include "BinaryData.h"
  6. #include "DatException.h"
  7. #include "SubDirectory.h"
  8. #include "Subfile.h"
  9. #include "SubfileData.h"
  10. #include <EasyLogging++/easylogging++.h>
  11. #include <unistd.h>
  12. #include <locale>
  13. #define ELPP_FEATURE_CRASH_LOG
  14. INITIALIZE_EASYLOGGINGPP
  15. #ifdef WIN32
  16. #define fseek _fseeki64
  17. #define ftell _ftelli64
  18. #endif
  19. extern "C++"
  20. {
  21. namespace LOTRO_DAT {
  22. DatFile::DatFile() {
  23. dat_state_ = CLOSED;
  24. root_directory_ = nullptr;
  25. file_handler_ = nullptr;
  26. free_buffered_size_ = 0;
  27. el::Configurations defaultConf;
  28. defaultConf.setToDefault();
  29. defaultConf.setGlobally(el::ConfigurationType::Format,
  30. "%datetime %level %fbase (line %line) : %msg (function: %func)");
  31. defaultConf.setGlobally(el::ConfigurationType::ToFile, "true");
  32. defaultConf.setGlobally(el::ConfigurationType::Filename, "dat_library.log");
  33. defaultConf.setGlobally(el::ConfigurationType::ToStandardOutput, "false");
  34. defaultConf.setGlobally(el::ConfigurationType::PerformanceTracking, "true");
  35. defaultConf.setGlobally(el::ConfigurationType::MaxLogFileSize, "5242880"); // 5MB
  36. defaultConf.setGlobally(el::ConfigurationType::LogFlushThreshold, "1"); // Flush after every one log
  37. defaultConf.set(el::Level::Debug, el::ConfigurationType::Enabled, "false");
  38. defaultConf.set(el::Level::Debug, el::ConfigurationType::Filename, "dat_library_debug.log");
  39. el::Loggers::reconfigureAllLoggers(defaultConf);
  40. LOG(INFO) << "==================================================================";
  41. LOG(INFO) << "Starting new DatFile class instance";
  42. }
  43. DAT_RESULT DatFile::InitDatFile(const std::string &filename, int dat_id) {
  44. LOG(DEBUG) << "Started initialisation of DatFile " << filename;
  45. if (dat_state_ != CLOSED && filename == filename_) {
  46. LOG(DEBUG) << "Trying to reopen the same file: " << filename << ". Doing nothing.";
  47. return SUCCESS;
  48. }
  49. if (dat_state_ != CLOSED && filename != filename_) {
  50. LOG(DEBUG) << "DatFile wasn't closed yet. Closing in order to reopen.";
  51. if (CloseDatFile() != SUCCESS) {
  52. LOG(ERROR) << "Unable to perform CloseDatFile()! Aborting initialization!";
  53. return FAILED;
  54. }
  55. }
  56. dat_id_ = dat_id;
  57. dat_state_ = CLOSED;
  58. current_locale_ = ORIGINAL;
  59. root_directory_ = nullptr;
  60. file_handler_ = nullptr;
  61. free_buffered_size_ = 0;
  62. filename_ = "none";
  63. DAT_RESULT result;
  64. DAT_RESULT return_value = SUCCESS;
  65. result = OpenDatFile(filename.c_str());
  66. if (result != SUCCESS) {
  67. LOG(ERROR) << "Unable to perform opening file. Aborting.";
  68. CloseDatFile();
  69. return result;
  70. }
  71. return_value = std::max(return_value, result);
  72. result = ReadSuperBlock();
  73. if (result <= 0) {
  74. LOG(ERROR) << "Unable to read super block. Aborting.";
  75. CloseDatFile();
  76. return result;
  77. }
  78. return_value = std::max(return_value, result);
  79. result = MakeDirectories();
  80. if (result <= 0) {
  81. LOG(ERROR) << "Unable to make directories. Aborting.";
  82. CloseDatFile();
  83. return result;
  84. }
  85. return_value = std::max(return_value, result);
  86. result = MakeDictionary();
  87. if (result <= 0) {
  88. LOG(ERROR) << "Unable to make dictionary. Aborting.";
  89. CloseDatFile();
  90. return result;
  91. }
  92. return_value = std::max(return_value, result);
  93. result = InitLocales();
  94. if (result <= 0) {
  95. LOG(ERROR) << "Unable to initialize locales. Aborting.";
  96. CloseDatFile();
  97. return result;
  98. }
  99. return_value = std::max(return_value, result);
  100. LOG(INFO) << "File " << filename << " opened successfully!";
  101. filename_ = filename;
  102. dat_state_ = READY;
  103. LOG(INFO) << "Making last preparations...";
  104. return_value = std::max(return_value, result);
  105. if (return_value >= 2) {
  106. LOG(WARNING) << "Dat file could be corrupted. Trying to delete corrupted dictionary rows";
  107. if (RepairDatFile() != SUCCESS)
  108. return CRITICAL_DAT_ERROR;
  109. }
  110. LOG(INFO) << "Preparations made successfully! Init return value = " << return_value;
  111. return return_value;
  112. }
  113. DAT_STATE DatFile::DatFileState() const {
  114. return dat_state_;
  115. }
  116. DatFile::~DatFile() {
  117. CloseDatFile();
  118. }
  119. /// Extracts file with file_id.
  120. /// If path is undefined then it will be recognised as current working directory
  121. /// Output file path consists of "path + file_id + file_extension";
  122. /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
  123. /// Otherwise DatException() will be thrown.
  124. /// Returns true, if file was successfully extracted;
  125. /// Throws DatException() if undefined behaviour happened
  126. DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
  127. LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
  128. if (dat_state_ < READY) {
  129. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  130. return INCORRECT_STATE_ERROR;
  131. }
  132. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  133. if (file_data.size() == 0) {
  134. LOG(ERROR) << "File data is empty. Aborting extraction.";
  135. return NO_FILE_ERROR;
  136. }
  137. SubfileData export_data = dictionary_[file_id]->PrepareForExport(file_data);
  138. if (export_data.Empty()) {
  139. LOG(ERROR) << "Export data is empty. Aborting extraction.";
  140. return NO_FILE_ERROR;
  141. }
  142. if (export_data.binary_data.WriteToFile(path + export_data.options["ext"].as<std::string>()) != SUCCESS) {
  143. LOG(ERROR) << "Cannot write to file.";
  144. return WRITE_TO_FILE_ERROR;
  145. }
  146. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  147. return SUCCESS;
  148. }
  149. /// Extracts file with file_id to database "db".
  150. /// DATABASE SHOULD BE ALREADY CREATED; Otherwise DatException will be called.
  151. /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
  152. /// Otherwise DatException() will be thrown.
  153. /// Returns true, if file was successfully extracted;
  154. /// Throws DatException() if undefined behaviour happened
  155. DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
  156. LOG(DEBUG) << "Extracting file " << file_id << " to database.";
  157. if (dat_state_ < READY) {
  158. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  159. return INCORRECT_STATE_ERROR;
  160. }
  161. BinaryData file_data;
  162. try {
  163. file_data = GetFileData(dictionary_[file_id], 8);
  164. } catch (std::exception &e) {
  165. LOG(ERROR) << "Caught" << e.what() << " exception.";
  166. return FAILED;
  167. }
  168. SubfileData export_data;
  169. try {
  170. export_data = dictionary_[file_id]->PrepareForExport(file_data);
  171. export_data.options["did"] = dat_id_;
  172. } catch (std::exception &e) {
  173. LOG(ERROR) << "Caught" << e.what() << " exception.";
  174. return FAILED;
  175. }
  176. if (export_data == SubfileData()) {
  177. LOG(WARNING) << "file with id " << dictionary_[file_id]->file_id() << " is empty. Passing it.";
  178. return SUCCESS;
  179. }
  180. try {
  181. db->PushFile(export_data);
  182. } catch (std::exception &e) {
  183. LOG(ERROR) << "Caught " << e.what() << " exception.";
  184. return FAILED;
  185. }
  186. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  187. return SUCCESS;
  188. }
  189. /// Extracts all files with specific type to "path + type + file_id + file_part + extension" files;
  190. /// If path is undefined then it will be recognised as current working directory
  191. /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
  192. /// Otherwise DatException() will be thrown.
  193. /// Returns number of successfully extracted files
  194. /// Throws DatException() if undefined behaviour happened
  195. int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
  196. LOG(INFO) << "Extracting all files to path " << path;
  197. if (dat_state_ < READY) {
  198. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  199. return INCORRECT_STATE_ERROR;
  200. }
  201. int success = 0;
  202. for (auto i : dictionary_) {
  203. FILE_TYPE file_type = i.second->FileType();
  204. if (file_type == type) {
  205. success += (ExtractFile(i.second->file_id(), (path + std::to_string(i.second->file_id()))) == SUCCESS
  206. ? 1 : 0);
  207. }
  208. }
  209. LOG(INFO) << "Successfully extracted " << success << " files";
  210. return success;
  211. }
  212. /// Extracts all files with specific type to database "db";
  213. /// DATABASE SHOULD BE ALREADY CREATED; Otherwise DatException will be called.
  214. /// Returns number of successfully extracted files
  215. /// Throws DatException() if undefined behaviour happened
  216. int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
  217. LOG(INFO) << "Extracting all files to database...";
  218. if (dat_state_ < READY) {
  219. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  220. return INCORRECT_STATE_ERROR;
  221. }
  222. int success = 0;
  223. for (auto i : dictionary_) {
  224. FILE_TYPE file_type = i.second->FileType();
  225. if (file_type == type) {
  226. success += (ExtractFile(i.second->file_id(), db) == SUCCESS ? 1 : 0);
  227. }
  228. }
  229. LOG(INFO) << "Extracted " << success << " files";
  230. return success;
  231. }
  232. // TODO: Write description and make asserts
  233. DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
  234. LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
  235. if (dat_state_ < READY) {
  236. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  237. return INCORRECT_STATE_ERROR;
  238. }
  239. auto file_id = data.options["fid"].as<long long>();
  240. Subfile *file = dictionary_[file_id];
  241. if (file == nullptr) {
  242. LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
  243. return NO_FILE_ERROR;
  244. }
  245. if (!CorrectSubfile(file)) {
  246. LOG(ERROR) << "Incorrect subfile with id " << file->file_id() << " (headers do not match). Cannot patch it";
  247. return FAILED;
  248. }
  249. // If file has inactive category, then we should set it to patched state in order to commit patch and
  250. // then in ApplyFilePatch(), if new category is still inactive, return dictionary to its original state;
  251. if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0 && file_id != 2013266257) {
  252. dictionary_[file_id]->file_offset_ = patch_dict_[file_id]->file_offset_;
  253. dictionary_[file_id]->file_size_ = patch_dict_[file_id]->file_size_;
  254. dictionary_[file_id]->block_size_ = patch_dict_[file_id]->block_size_;
  255. dictionary_[file_id]->timestamp_ = patch_dict_[file_id]->timestamp_;
  256. dictionary_[file_id]->version_ = patch_dict_[file_id]->version_;
  257. }
  258. if (data.options["cat"].IsDefined()) {
  259. file->category = data.options["cat"].as<long long>();
  260. } else {
  261. file->category = 1;
  262. }
  263. BinaryData old_data = GetFileData(file);
  264. if (old_data.Empty()) {
  265. LOG(ERROR) << "GetFileData returned empty data. Aborting.";
  266. return DAT_PATCH_FILE_ERROR;
  267. }
  268. BinaryData patch_data = file->MakeForImport(old_data, data);
  269. DAT_RESULT result = ApplyFilePatch(file, patch_data);
  270. if (result != SUCCESS)
  271. return result;
  272. LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>() << ".";
  273. return SUCCESS;
  274. }
  275. // TODO: Write description
  276. DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
  277. LOG(INFO) << "Patching all database";
  278. if (dat_state_ < READY) {
  279. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  280. return INCORRECT_STATE_ERROR;
  281. }
  282. SubfileData data;
  283. data = db->GetNextFile();
  284. while (!data.Empty()) {
  285. DAT_RESULT result = PatchFile(data);
  286. if (result != SUCCESS)
  287. LOG(ERROR) << "Cannot patch file" << data.options["fid"].as<long long>() << " continuing";
  288. data = db->GetNextFile();
  289. }
  290. LOG(INFO) << "Successfully patched whole database";
  291. return SUCCESS;
  292. }
  293. /// DatFile::WriteUnorderedDictionary(...);
  294. /// Prints list of all found files with some information about them to file.
  295. /// Gets std::string path - path to directory, where the file will be written with name "dict.txt"
  296. DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
  297. LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
  298. FILE *f = nullptr;
  299. fopen_s(&f, (path + "dict.txt").c_str(), "w");
  300. if (f == nullptr) {
  301. LOG(ERROR) << "Cannot open file " << path + "dict.txt";
  302. return WRITE_TO_FILE_ERROR;
  303. }
  304. fprintf(f, "unk1 file_id offset size1 timestamp version size2 unknown2 type\n");
  305. for (auto i : dictionary_) {
  306. fprintf(f, "%lld %lld %lld %lld %lld %lld %lld %lld %s\n", i.second->unknown1(), i.second->file_id(),
  307. i.second->file_offset(), i.second->file_size(), i.second->timestamp(), i.second->version(),
  308. i.second->block_size(), i.second->unknown2(), i.second->Extension().c_str());
  309. }
  310. fclose(f);
  311. LOG(INFO) << "Unordered dictionary was written successfully to " << path << "dict.txt";
  312. return SUCCESS;
  313. }
  314. /// DatFile::files_number();
  315. /// Returns amount of files, found in dictionaries of DatFile. Some if them may be empty or erased.
  316. long long DatFile::files_number() const {
  317. return dictionary_.size();
  318. }
  319. /// DatFile::GetFileData()
  320. /// Returns BinaryData, which contains of subfile data, made from parts of file in DatFile
  321. // TODO: ASSERTS
  322. BinaryData DatFile::GetFileData(const Subfile *file, long long int offset) {
  323. LOG(DEBUG) << "Getting file " << file->file_id() << " data";
  324. BinaryData mfile_id(20);
  325. ReadData(mfile_id, 20, file->file_offset() + 8);
  326. if (mfile_id.Empty()) {
  327. LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
  328. << file->file_offset() << "); Aborting.";
  329. return BinaryData(0);
  330. }
  331. if (!mfile_id.CheckCompression() && file->file_id() != mfile_id.ToNumber<4>(0)) {
  332. LOG(ERROR) << "Bad DatFile::GetFileData() - file_id in Subfile ("
  333. << file->file_id()
  334. << ") doesn't match to file_id (" << mfile_id.ToNumber<4>(0) << ")in DatFile.";
  335. return BinaryData(0);
  336. }
  337. BinaryData data((unsigned)(file->file_size() + (8 - offset)));
  338. if (file->block_size() >= file->file_size() + 8) {
  339. ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
  340. return data;
  341. }
  342. BinaryData fragments_count(4);
  343. ReadData(fragments_count, 4, file->file_offset());
  344. long long fragments_number = fragments_count.ToNumber<4>(0);
  345. long long current_block_size = file->block_size() - offset - 8 * fragments_number;
  346. ReadData(data, current_block_size, file->file_offset() + offset);
  347. BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
  348. ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
  349. file->file_offset() + file->block_size() - 8 * fragments_number);
  350. for (long long i = 0; i < fragments_number; i++) {
  351. long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
  352. long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
  353. ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
  354. current_block_size);
  355. current_block_size += fragment_size;
  356. }
  357. LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
  358. return data;
  359. }
  360. /// DatFile special functions for opening and reading/writing raw data.
  361. /// Shouldn't be used by any external classes except Subfile and Subdirectory.
  362. DAT_RESULT DatFile::OpenDatFile(const char *dat_name) {
  363. LOG(DEBUG) << "Started opening DatFile";
  364. if (dat_state_ != CLOSED) {
  365. CloseDatFile();
  366. }
  367. file_handler_ = fopen(dat_name, "r+b");
  368. if (file_handler_ == nullptr) {
  369. LOG(ERROR) << "Unable to open file " << dat_name;
  370. return NO_FILE_ERROR;
  371. }
  372. fseek(file_handler_, 0, SEEK_END);
  373. file_size_ = ftell(file_handler_);
  374. fseek(file_handler_, 0, SEEK_SET);
  375. dat_state_ = SUCCESS_OPENED;
  376. LOG(DEBUG) << "Successfully opened DatFile";
  377. return SUCCESS;
  378. }
  379. DAT_RESULT DatFile::ReadSuperBlock() {
  380. LOG(DEBUG) << "Started reading superblock";
  381. if (dat_state_ != SUCCESS_OPENED) {
  382. LOG(ERROR) << "Dat state isn't SUCCESS_OPENED. Cannot perform extraction.";
  383. return INCORRECT_STATE_ERROR;
  384. }
  385. BinaryData data(1024);
  386. ReadData(data, 1024);
  387. constant1_ = data.ToNumber<4>(0x100);
  388. constant2_ = data.ToNumber<4>(0x140);
  389. version1_ = data.ToNumber<4>(0x14C);
  390. version2_ = data.ToNumber<4>(0x150);
  391. fragmentation_journal_offset_ = data.ToNumber<4>(0x154);
  392. root_directory_offset_ = data.ToNumber<4>(0x160);
  393. auto size1 = data.ToNumber<4>(0x148);
  394. if (constant1_ != 0x4C5000) {
  395. LOG(ERROR) << "variable at position 0x100 is not equal to .dat file constant!";
  396. return INCORRECT_SUPERBLOCK_ERROR;
  397. }
  398. if (constant2_ != 0x5442) {
  399. LOG(ERROR) << "variable at position 0x140 is not equal to .dat file constant!";
  400. return INCORRECT_SUPERBLOCK_ERROR;
  401. }
  402. if (file_size_ != size1) {
  403. LOG(ERROR) << "variable at 0x148 position is not equal to .dat file size!";
  404. file_size_ = size1;
  405. dat_state_ = SUCCESS_SUPERBLOCK;
  406. return CORRUPTED_FILE_WARNING;
  407. }
  408. dat_state_ = SUCCESS_SUPERBLOCK;
  409. LOG(DEBUG) << "Superblock read successfully";
  410. return SUCCESS;
  411. }
  412. DAT_RESULT DatFile::MakeDirectories() {
  413. LOG(DEBUG) << "Started making directories";
  414. if (dat_state_ != SUCCESS_SUPERBLOCK) {
  415. LOG(ERROR) << "Dat state isn't SUCCESS_SUPERBLOCK. Cannot make directories.";
  416. return INCORRECT_STATE_ERROR;
  417. }
  418. root_directory_ = new SubDirectory((unsigned) root_directory_offset_, this);
  419. dat_state_ = SUCCESS_DIRECTORIES;
  420. LOG(DEBUG) << "Directories made successfully";
  421. return SUCCESS;
  422. }
  423. DAT_RESULT DatFile::MakeDictionary() {
  424. LOG(DEBUG) << "Started making dictionary";
  425. if (dat_state_ != SUCCESS_DIRECTORIES) {
  426. LOG(ERROR) << "Dat state isn't SUCCESS_DIRECTORIES. Cannot make directories.";
  427. return INCORRECT_STATE_ERROR;
  428. }
  429. if (root_directory_ == nullptr) {
  430. LOG(ERROR) << "root_directory is nullptr!!";
  431. return INIT_ERROR;
  432. }
  433. root_directory_->MakeDictionary(dictionary_);
  434. dat_state_ = SUCCESS_DICTIONARY;
  435. LOG(DEBUG) << "Dictionary made successfull";
  436. return SUCCESS;
  437. }
  438. DAT_RESULT DatFile::ReadData(BinaryData &data, long long size, long long offset, long long data_offset) {
  439. if (dat_state_ == CLOSED) {
  440. LOG(ERROR) << "Dat state is CLOSED. Cannot read data.";
  441. data = BinaryData(0);
  442. return INIT_ERROR;
  443. }
  444. if (data_offset + size > data.size()) {
  445. LOG(ERROR) << "Trying to read more than BinaryData size: Reading " << size << " bytes from " << offset
  446. << " position.";
  447. data = BinaryData(0);
  448. return DAT_READ_ERROR;
  449. }
  450. if (offset + size > file_size_) {
  451. LOG(ERROR) << "Trying to read more than DatFile size elapsed: Reading " << size << " bytes from " << offset
  452. << " position.";
  453. data = BinaryData(0);
  454. return DAT_READ_ERROR;
  455. }
  456. fseek(file_handler_, offset, SEEK_SET);
  457. fread(data.data() + data_offset, unsigned(size), 1, file_handler_);
  458. return SUCCESS;
  459. }
  460. DAT_RESULT DatFile::WriteData(const BinaryData &data, long long size, long long offset, long long data_offset) {
  461. if (dat_state_ < READY) {
  462. LOG(ERROR) << "Dat state isn't READY. Cannot write data.";
  463. return INCORRECT_STATE_ERROR;
  464. }
  465. fseek(file_handler_, offset, SEEK_SET);
  466. if (data_offset + size > data.size()) {
  467. LOG(ERROR) << "Trying to write more than BinaryData size";
  468. return DAT_WRITE_ERROR;
  469. }
  470. fwrite(data.data() + data_offset, unsigned(size), 1, file_handler_);
  471. return SUCCESS;
  472. }
  473. /// Special functions used by patch process.
  474. /// Shouldn't be used by any external class.
  475. DAT_RESULT DatFile::ApplyFilePatch(Subfile *file, BinaryData &data) {
  476. LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
  477. if (data.Empty()) {
  478. LOG(ERROR) << "Error caused during making file for import. Cannot patch file " << file->file_id();
  479. return FAILED;
  480. }
  481. auto file_id = file->file_id();
  482. if (current_locale() != PATCHED && file_id != 2013266257) {
  483. LOG(INFO) << "Changing locale to PATCHED(RU) in order to patch file";
  484. SetLocale(PATCHED);
  485. }
  486. dat_state_ = UPDATED;
  487. if (orig_dict_.count(file_id) == 0 && file_id != 2013266257) {
  488. orig_dict_[file_id] = new Subfile(this, file->MakeHeaderData());
  489. }
  490. if ((patch_dict_.count(file_id) == 0 && file_id != 2013266257) || data.size() > file->block_size()
  491. || file->file_size() + 8 > file->block_size()) {
  492. file->file_offset_ = file_size_;
  493. file->block_size_ = std::max((long long)data.size(), file->block_size_);
  494. free_buffered_size_ = std::max(0ll, free_buffered_size_ - file->block_size_);
  495. AddBufferedSize();
  496. this->file_size_ += file->block_size_;
  497. }
  498. file->file_size_ = data.size() - 8;
  499. data.Append(BinaryData::FromNumber<4>(0), 0); // set additional fragments count to zero
  500. if (file_id != data.ToNumber<4>(8)) {
  501. LOG(ERROR) << "Created data's file_id doesn't match to original! Patch wasn't written to .dat file";
  502. return INCORRECT_PATCH_FILE;
  503. }
  504. WriteData(data, data.size(), file->file_offset());
  505. patch_dict_.erase(file_id); // Удалили старое значение в русском словаре
  506. if (file_id != 2013266257) {
  507. patch_dict_[file_id] = new Subfile(this, file->MakeHeaderData()); // Создали новое значение
  508. }
  509. // If category is forbidden, then return file header data to original state
  510. if (inactive_categories.count(file->category) != 0) {
  511. file->file_offset_ = orig_dict_[file_id]->file_offset_;
  512. file->file_size_ = orig_dict_[file_id]->file_size_;
  513. file->block_size_ = orig_dict_[file_id]->block_size_;
  514. file->timestamp_ = orig_dict_[file_id]->timestamp_;
  515. file->version_ = orig_dict_[file_id]->version_;
  516. }
  517. if (orig_dict_.count(file_id) != 0 && file_id != 2013266257)
  518. orig_dict_[file_id]->category = file->category;
  519. if (patch_dict_.count(file_id) != 0 && file_id != 2013266257)
  520. patch_dict_[file_id]->category = file->category;
  521. // Applying file info in directory
  522. pending_dictionary_.insert(file_id);
  523. LOG(DEBUG) << "Successfully applied file " << file->file_id() << " patch.";
  524. return SUCCESS;
  525. }
  526. DAT_RESULT DatFile::ClearFragmentationJournal() {
  527. LOG(DEBUG) << "Clearing fragmentation journal";
  528. long long offset = 0;
  529. BinaryData data(32);
  530. DAT_RESULT res = ReadData(data, 32, fragmentation_journal_offset_ + 8 + offset);
  531. if (res != SUCCESS) {
  532. LOG(ERROR) << "Error " << res << " while reading data";
  533. return FAILED;
  534. }
  535. BinaryData nulls = BinaryData(32);
  536. while (data != nulls && !data.Empty()) {
  537. WriteData(nulls, 32, fragmentation_journal_offset_ + 8 + offset);
  538. offset += 32;
  539. ReadData(data, 32, fragmentation_journal_offset_ + 8 + offset);
  540. }
  541. //fragmentation_journal_.emplace_back(std::make_pair(data.ToNumber<4>(0), data.ToNumber<4>(4)));
  542. LOG(DEBUG) << "Finished getting fragmentation journal";
  543. return SUCCESS;
  544. }
  545. DAT_RESULT DatFile::UpdateHeader() {
  546. LOG(DEBUG) << "Updating header";
  547. WriteData(BinaryData::FromNumber<4>(constant1_), 4, 0x100);
  548. WriteData(BinaryData::FromNumber<4>(constant2_), 4, 0x140);
  549. WriteData(BinaryData::FromNumber<4>(file_size_), 4, 0x148);
  550. WriteData(BinaryData::FromNumber<4>(version1_), 4, 0x14C);
  551. WriteData(BinaryData::FromNumber<4>(version2_), 4, 0x150);
  552. WriteData(BinaryData::FromNumber<4>(fragmentation_journal_offset_), 4, 0x154);
  553. WriteData(BinaryData::FromNumber<4>(root_directory_offset_), 4, 0x160);
  554. LOG(DEBUG) << "Finished updating header";
  555. return SUCCESS;
  556. }
  557. DAT_RESULT DatFile::CloseDatFile() {
  558. LOG(INFO) << "Closing DatFile";
  559. if (dat_state_ == CLOSED) {
  560. LOG(INFO) << "DatFile is already closed. Nothing to do";
  561. return SUCCESS;
  562. }
  563. // Commiting changes and updating/writing locales and header info
  564. CommitLocales();
  565. CommitDirectories();
  566. UpdateHeader();
  567. ClearFragmentationJournal();
  568. orig_dict_.clear();
  569. pending_patch_.clear();
  570. current_locale_ = ORIGINAL;
  571. if (file_handler_ != nullptr) {
  572. fclose(file_handler_);
  573. }
  574. delete root_directory_;
  575. dictionary_.clear();
  576. free_buffered_size_ = 0;
  577. truncate64(filename_.c_str(), file_size_);
  578. filename_ = "none";
  579. dat_state_ = CLOSED;
  580. LOG(INFO) << "File closed successfully.";
  581. return SUCCESS;
  582. }
  583. // LOCALE MANAGING SECTION
  584. DAT_RESULT DatFile::InitLocales() {
  585. LOG(INFO) << "Initialising locales...";
  586. BinaryData dicts_data;
  587. if (dictionary_.count(2013266257) != 0)
  588. dicts_data = GetFileData(dictionary_[2013266257]);
  589. if (dicts_data.size() < 29) {
  590. LOG(WARNING) << "Locales file is empty.. Initialising locale dicts as empty";
  591. LOG(INFO) << "Could't find locales file or it's corrupted/empty... Continuing without locales";
  592. return SUCCESS;
  593. }
  594. BinaryData hi_data = dicts_data.CutData(14, 29) + BinaryData("\0", 1);
  595. std::string hi = std::string((char *) (hi_data.data()));
  596. LOG(DEBUG) << "hi info is " << hi;
  597. if (hi != "Hi from Gi1dor!") {
  598. LOG(WARNING) << "Didn't receive 'hi' from Gi1dor... Initialising locale dicts as empty";
  599. LOG(INFO) << "Could't init locales' file... Continuing without them";
  600. return SUCCESS;
  601. }
  602. int offset = 29;
  603. BinaryData current_locale_data = dicts_data.CutData(offset, offset + 4) + BinaryData("\0", 1);
  604. std::string locale((char *) (current_locale_data.data()));
  605. offset += 4;
  606. LOG(DEBUG) << "current locale:" << locale;
  607. if (locale != "PATC" && locale != "ORIG") {
  608. LOG(WARNING) << "Incorrect locale... Initialising locale dicts as empty";
  609. LOG(INFO) << "Could't recognize locale... Continuing without locales";
  610. return SUCCESS;
  611. }
  612. current_locale_ = (locale == "PATC" ? PATCHED : ORIGINAL);
  613. // 14 bytes for old data
  614. // 15 bytes for "Hi from Gi1dor"
  615. // 4 bytes for LOCALE
  616. // 4 bytes for orig_dict.size()
  617. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  618. // 4 bytes for patch_dict.size()
  619. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  620. // 4 bytes for inactive_categories dict
  621. // 4 * inactive_categories.size() bytes for inactive_categories data
  622. size_t orig_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  623. offset += 4;
  624. for (size_t i = 0; i < orig_dict_size; i++) {
  625. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  626. orig_dict_[file->file_id()] = file;
  627. offset += 32;
  628. orig_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  629. offset += 4;
  630. if (orig_dict_[file->file_id()]->category == 0)
  631. LOG(DEBUG) << "file category is undefined (0)!";
  632. }
  633. size_t patch_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  634. offset += 4;
  635. for (size_t i = 0; i < patch_dict_size; i++) {
  636. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  637. patch_dict_[file->file_id()] = file;
  638. offset += 32;
  639. patch_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  640. offset += 4;
  641. if (patch_dict_[file->file_id()]->category == 0)
  642. LOG(DEBUG) << "file category is undefined (0)!";
  643. }
  644. size_t active_patches_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  645. offset += 4;
  646. for (size_t i = 0; i < active_patches_dict_size; i++) {
  647. inactive_categories.insert(dicts_data.ToNumber<4>(offset));
  648. offset += 4;
  649. }
  650. LOG(INFO) << "There are " << patch_dict_.size() << " files in patch locale dictionary";
  651. LOG(INFO) << "There are " << orig_dict_.size() << " files in original locale dictionary";
  652. std::string inactive_cat_s;
  653. for (auto i : inactive_categories) {
  654. inactive_cat_s += std::to_string(i) + " ";
  655. }
  656. LOG(INFO) << "Unactive patches now: " << inactive_cat_s;
  657. LOG(INFO) << "Finished initialising locales";
  658. return SUCCESS;
  659. }
  660. std::map<long long, Subfile *> *DatFile::GetLocaleDictReference(LOCALE locale) {
  661. switch (locale) {
  662. case PATCHED:
  663. return &patch_dict_;
  664. case ORIGINAL:
  665. return &orig_dict_;
  666. default:
  667. LOG(ERROR) << "Unknown locale! Returning original";
  668. return &orig_dict_;
  669. }
  670. }
  671. bool DatFile::CorrectSubfile(Subfile *file) {
  672. BinaryData mfile_id(20);
  673. ReadData(mfile_id, 20, file->file_offset() + 8);
  674. if (mfile_id.Empty())
  675. return false;
  676. return mfile_id.CheckCompression() || file->file_id() == mfile_id.ToNumber<4>(0);
  677. }
  678. DAT_RESULT DatFile::RepairDatFile() {
  679. for (auto file : dictionary_) {
  680. auto subfile = file.second;
  681. auto file_id = file.first;
  682. if (CorrectSubfile(subfile))
  683. continue;
  684. if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
  685. return CRITICAL_DAT_ERROR;
  686. dictionary_[file_id]->file_offset_ = orig_dict_[file_id]->file_offset_;
  687. dictionary_[file_id]->file_size_ = orig_dict_[file_id]->file_size_;
  688. dictionary_[file_id]->block_size_ = orig_dict_[file_id]->block_size_;
  689. dictionary_[file_id]->timestamp_ = orig_dict_[file_id]->timestamp_;
  690. dictionary_[file_id]->version_ = orig_dict_[file_id]->version_;
  691. patch_dict_.erase(file_id);
  692. orig_dict_.erase(file_id);
  693. }
  694. return SUCCESS;
  695. }
  696. DAT_RESULT DatFile::SetLocale(LOCALE locale) {
  697. LOG(INFO) << "Setting locale to " << (locale == PATCHED ? " PATCHED" : " ORIGINAL");
  698. if (dat_state_ < READY) {
  699. LOG(ERROR) << "Dat state isn't READY. Cannot set locale.";
  700. return INCORRECT_STATE_ERROR;
  701. }
  702. if (current_locale_ == locale) {
  703. return SUCCESS;
  704. }
  705. dat_state_ = UPDATED;
  706. auto dict = GetLocaleDictReference(locale);
  707. for (auto file : *dict) {
  708. if (dictionary_[file.first] == nullptr) {
  709. LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
  710. << "which is not in .dat file! Passing it and removing from locale dictionary";
  711. dict->erase(file.first);
  712. continue;
  713. }
  714. if (dictionary_[file.first]->MakeHeaderData().CutData(8, 16) ==
  715. file.second->MakeHeaderData().CutData(8, 16) ||
  716. inactive_categories.count(orig_dict_[file.first]->category) != 0)
  717. continue;
  718. long long file_id = file.first;
  719. Subfile *new_file = file.second;
  720. dictionary_[file_id]->file_offset_ = new_file->file_offset_;
  721. dictionary_[file_id]->file_size_ = new_file->file_size_;
  722. dictionary_[file_id]->block_size_ = new_file->block_size_;
  723. dictionary_[file_id]->timestamp_ = new_file->timestamp_;
  724. dictionary_[file_id]->version_ = new_file->version_;
  725. pending_dictionary_.insert(file_id);
  726. dat_state_ = UPDATED;
  727. }
  728. current_locale_ = locale;
  729. LOG(INFO) << "Locale set successfull";
  730. return SUCCESS;
  731. }
  732. bool DatFile::CheckIfUpdatedByGame() {
  733. LOG(INFO) << "Checking if DatFile was updated by LotRO";
  734. if (!pending_patch_.empty())
  735. return true;
  736. if (current_locale_ == ORIGINAL)
  737. return false;
  738. bool updated = false;
  739. for (auto i : dictionary_) {
  740. long long file_id = i.first;
  741. Subfile *subfile = i.second;
  742. if (inactive_categories.count(subfile->category) > 0)
  743. continue;
  744. if (patch_dict_.count(file_id) > 0
  745. && (subfile->file_size() != patch_dict_[file_id]->file_size()
  746. || subfile->file_offset() != patch_dict_[file_id]->file_offset()
  747. || subfile->block_size() != patch_dict_[file_id]->block_size())) {
  748. orig_dict_.erase(file_id);
  749. patch_dict_.erase(file_id);
  750. pending_patch_.insert(file_id);
  751. updated = true;
  752. dat_state_ = UPDATED;
  753. }
  754. }
  755. LOG(INFO) << "Dat file " << (updated ? "WAS " : "WASN'T ") << "updated by game.";
  756. return updated;
  757. }
  758. DAT_RESULT DatFile::RepairPatches(Database *db) {
  759. // TODO: Find errors
  760. LOG(INFO) << "Repairing patches";
  761. SubfileData data;
  762. data = db->GetNextFile();
  763. while (!data.Empty()) {
  764. if (pending_patch_.count(data.options["fid"].as<long long>()) > 0) {
  765. PatchFile(data);
  766. }
  767. data = db->GetNextFile();
  768. }
  769. LOG(INFO) << "Successfully repaired with database";
  770. return SUCCESS;
  771. }
  772. DAT_RESULT DatFile::FinishRepairingPatches() {
  773. LOG(INFO) << "Finishing repairing patches";
  774. pending_patch_.clear();
  775. return SUCCESS;
  776. }
  777. LOCALE DatFile::current_locale() {
  778. if (dat_state_ < READY) {
  779. LOG(ERROR) << "dat_file is in incorrect state!";
  780. return ORIGINAL;
  781. }
  782. if (current_locale_ != PATCHED && current_locale_ != ORIGINAL) {
  783. LOG(ERROR) << "locale has incorrect value. Setting it to original";
  784. current_locale_ = ORIGINAL;
  785. }
  786. return current_locale_;
  787. }
  788. DAT_RESULT DatFile::CommitLocales() {
  789. LOG(INFO) << "Committing locales...";
  790. SubfileData data = dictionary_[2013266257]->PrepareForExport(GetFileData(dictionary_[2013266257]));
  791. data.options["fid"] = "2013266257";
  792. data.options["ext"] = ".unknown";
  793. BinaryData old_data = BinaryData(GetFileData(dictionary_[2013266257u]));
  794. // 14 bytes for old data
  795. // 15 bytes for "Hi from Gi1dor"
  796. // 4 bytes for LOCALE
  797. // 4 bytes for orig_dict.size()
  798. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  799. // 4 bytes for patch_dict.size()
  800. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  801. // 4 bytes for inactive_categories list
  802. // 4 * inactive_categories.size() bytes for inactive_categories data
  803. data.binary_data = BinaryData(14 + 15 + 4
  804. + 4 + (32 + 4) * orig_dict_.size()
  805. + 4 + (32 + 4) * patch_dict_.size()
  806. + 4 + 4 * inactive_categories.size());
  807. size_t current_size = 0;
  808. data.binary_data.Append(GetFileData(dictionary_[2013266257u]).CutData(0, 14), current_size);
  809. current_size += 14;
  810. data.binary_data.Append(BinaryData("Hi from Gi1dor!", 15), current_size);
  811. current_size += 15;
  812. data.binary_data.Append(BinaryData((current_locale_ == ORIGINAL ? "ORIG" : "PATC"), 4), current_size);
  813. current_size += 4;
  814. data.binary_data.Append(BinaryData::FromNumber<4>(orig_dict_.size()), current_size);
  815. current_size += 4;
  816. for (auto file : orig_dict_) {
  817. data.binary_data.Append(file.second->MakeHeaderData(), current_size);
  818. current_size += 32;
  819. data.binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  820. current_size += 4;
  821. }
  822. data.binary_data.Append(BinaryData::FromNumber<4>(patch_dict_.size()), current_size);
  823. current_size += 4;
  824. for (auto file : patch_dict_) {
  825. data.binary_data.Append(file.second->MakeHeaderData(), current_size);
  826. current_size += 32;
  827. data.binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  828. current_size += 4;
  829. }
  830. data.binary_data.Append(BinaryData::FromNumber<4>(inactive_categories.size()), current_size);
  831. current_size += 4;
  832. for (auto patch_id : inactive_categories) {
  833. data.binary_data.Append(BinaryData::FromNumber<4>(patch_id), current_size);
  834. current_size += 4;
  835. }
  836. DAT_RESULT result = PatchFile(data);
  837. if (result != SUCCESS)
  838. return result;
  839. LOG(INFO) << "Locales commited successfully";
  840. return SUCCESS;
  841. }
  842. DAT_RESULT DatFile::EnableCategory(int category) {
  843. LOG(INFO) << "Enabling category " << category;
  844. if (inactive_categories.count(category) == 0)
  845. return SUCCESS;
  846. inactive_categories.erase(category);
  847. dat_state_ = UPDATED;
  848. for (auto file : dictionary_) {
  849. auto file_id = file.first;
  850. if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
  851. file.second->file_offset_ = patch_dict_[file_id]->file_offset_;
  852. file.second->file_size_ = patch_dict_[file_id]->file_size_;
  853. file.second->block_size_ = patch_dict_[file_id]->block_size_;
  854. file.second->timestamp_ = patch_dict_[file_id]->timestamp_;
  855. file.second->version_ = patch_dict_[file_id]->version_;
  856. pending_dictionary_.insert(file_id);
  857. }
  858. }
  859. LOG(INFO) << "Category " << category << " enabled successfully";
  860. return SUCCESS;
  861. }
  862. DAT_RESULT DatFile::DisableCategory(int category) {
  863. LOG(INFO) << "Disabling category " << category;
  864. if (inactive_categories.count(category) != 0)
  865. return SUCCESS;
  866. inactive_categories.insert(category);
  867. dat_state_ = UPDATED;
  868. for (auto file : dictionary_) {
  869. auto file_id = file.first;
  870. if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
  871. file.second->file_offset_ = orig_dict_[file_id]->file_offset_;
  872. file.second->file_size_ = orig_dict_[file_id]->file_size_;
  873. file.second->block_size_ = orig_dict_[file_id]->block_size_;
  874. file.second->timestamp_ = orig_dict_[file_id]->timestamp_;
  875. file.second->version_ = orig_dict_[file_id]->version_;
  876. pending_dictionary_.insert(file_id);
  877. }
  878. }
  879. LOG(INFO) << "Category " << category << " disabled successfully";
  880. return SUCCESS;
  881. }
  882. const std::set<long long> &DatFile::GetInactiveCategoriesList() {
  883. return inactive_categories;
  884. }
  885. bool DatFile::CheckIfNotPatched() {
  886. LOG(INFO) << "DatFile " << (patch_dict_.empty() ? "HASN'T " : "HAS already")
  887. << " been patched by LEGACY launcher!";
  888. return patch_dict_.empty();
  889. }
  890. bool DatFile::CheckIfPatchedByOldLauncher() {
  891. LOG(INFO) << "DatFile " << (dictionary_.count(620750000) == 0 ? "HASN'T " : "HAS already")
  892. << " been patched by OLD LAUNCHER!";
  893. return dictionary_.count(620750000) > 0;
  894. }
  895. const std::string &DatFile::filename() const {
  896. return filename_;
  897. }
  898. DAT_RESULT DatFile::CommitDirectories() {
  899. for (auto file_id : pending_dictionary_) {
  900. WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
  901. }
  902. pending_dictionary_.clear();
  903. return SUCCESS;
  904. }
  905. void DatFile::AddBufferedSize() {
  906. if (free_buffered_size_ >= MIN_BUFFERED_SIZE)
  907. return;
  908. BinaryData nulls(MAX_BUFFERED_SIZE);
  909. WriteData(nulls, MAX_BUFFERED_SIZE, file_size_);
  910. free_buffered_size_ = MAX_BUFFERED_SIZE;
  911. }
  912. }
  913. }