DatFile.cpp 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223
  1. //
  2. // Created by Иван_Архипов on 31.10.2017.
  3. //
  4. #include "DatFile.h"
  5. #include "BinaryData.h"
  6. #include "SubDirectory.h"
  7. #include "Subfile.h"
  8. #include "SubfileData.h"
  9. #include <EasyLogging++/easylogging++.h>
  10. #include <unistd.h>
  11. #include <algorithm>
  12. #include <iterator>
  13. #include <locale>
  14. #define ELPP_FEATURE_CRASH_LOG
  15. INITIALIZE_EASYLOGGINGPP
  16. #ifdef WIN32
  17. #define fseek _fseeki64
  18. #define ftell _ftelli64
  19. #endif
  20. extern "C++"
  21. {
  22. namespace LOTRO_DAT {
  23. //------------------------------------------------//
  24. // INIT SECTION
  25. //------------------------------------------------//
  26. DatFile::DatFile() {
  27. dat_state_ = CLOSED;
  28. root_directory_ = nullptr;
  29. file_handler_ = nullptr;
  30. free_buffered_size_ = 0;
  31. orig_dict_.clear();
  32. patch_dict_.clear();
  33. dictionary_.clear();
  34. el::Configurations defaultConf;
  35. defaultConf.setToDefault();
  36. defaultConf.setGlobally(el::ConfigurationType::Format,
  37. "%datetime %level %fbase (line %line) : %msg (function: %func)");
  38. defaultConf.setGlobally(el::ConfigurationType::ToFile, "true");
  39. defaultConf.setGlobally(el::ConfigurationType::Filename, "dat_library.log");
  40. defaultConf.setGlobally(el::ConfigurationType::ToStandardOutput, "false");
  41. defaultConf.setGlobally(el::ConfigurationType::PerformanceTracking, "true");
  42. defaultConf.setGlobally(el::ConfigurationType::MaxLogFileSize, "5242880"); // 5MB
  43. defaultConf.setGlobally(el::ConfigurationType::LogFlushThreshold, "1"); // Flush after every one log
  44. defaultConf.set(el::Level::Debug, el::ConfigurationType::Enabled, "false");
  45. defaultConf.set(el::Level::Debug, el::ConfigurationType::Filename, "dat_library_debug.log");
  46. el::Loggers::reconfigureAllLoggers(defaultConf);
  47. LOG(INFO) << "==================================================================";
  48. LOG(INFO) << "Starting new DatFile class instance";
  49. }
  50. DAT_RESULT DatFile::InitDatFile(const std::string &filename, int dat_id) {
  51. LOG(DEBUG) << "Started initialisation of DatFile " << filename;
  52. if (dat_state_ != CLOSED && filename == filename_) {
  53. LOG(DEBUG) << "Trying to reopen the same file: " << filename << ". Doing nothing.";
  54. return SUCCESS;
  55. }
  56. if (dat_state_ != CLOSED && filename != filename_) {
  57. LOG(DEBUG) << "DatFile wasn't closed yet. Closing in order to reopen.";
  58. if (CloseDatFile() != SUCCESS) {
  59. LOG(ERROR) << "Unable to perform CloseDatFile()! Aborting initialization!";
  60. return FAILED;
  61. }
  62. }
  63. dat_id_ = dat_id;
  64. dat_state_ = CLOSED;
  65. current_locale_ = ORIGINAL;
  66. root_directory_ = nullptr;
  67. file_handler_ = nullptr;
  68. free_buffered_size_ = 0;
  69. filename_ = "none";
  70. DAT_RESULT result;
  71. DAT_RESULT return_value = SUCCESS;
  72. result = OpenDatFile(filename.c_str());
  73. if (result != SUCCESS) {
  74. LOG(ERROR) << "Unable to perform opening file. Aborting.";
  75. CloseDatFile();
  76. return result;
  77. }
  78. return_value = std::max(return_value, result);
  79. result = ReadSuperBlock();
  80. if (result <= 0) {
  81. LOG(ERROR) << "Unable to read super block. Aborting.";
  82. CloseDatFile();
  83. return result;
  84. }
  85. return_value = std::max(return_value, result);
  86. result = MakeDirectories();
  87. if (result <= 0) {
  88. LOG(ERROR) << "Unable to make directories. Aborting.";
  89. CloseDatFile();
  90. return result;
  91. }
  92. return_value = std::max(return_value, result);
  93. result = MakeDictionary();
  94. if (result <= 0) {
  95. LOG(ERROR) << "Unable to make dictionary. Aborting.";
  96. CloseDatFile();
  97. return result;
  98. }
  99. return_value = std::max(return_value, result);
  100. result = InitLocales();
  101. if (result <= 0) {
  102. LOG(ERROR) << "Unable to initialize locales. Aborting.";
  103. CloseDatFile();
  104. return result;
  105. }
  106. return_value = std::max(return_value, result);
  107. LOG(INFO) << "File " << filename << " opened successfully!";
  108. filename_ = filename;
  109. dat_state_ = READY;
  110. LOG(INFO) << "Making last preparations...";
  111. return_value = std::max(return_value, result);
  112. if (return_value >= 2) {
  113. LOG(WARNING) << "Dat file could be corrupted. Trying to delete corrupted dictionary rows";
  114. if (RepairDatFile() != SUCCESS)
  115. return CRITICAL_DAT_ERROR;
  116. }
  117. if (CheckIfUpdatedByGame()) {
  118. LOG(INFO) << ".dat file was updated by game! Need to reinitialize files and directories!";
  119. CloseDatFile();
  120. InitDatFile(filename, dat_id);
  121. }
  122. dat_without_patches_ = CheckIfNotPatched();
  123. LOG(INFO) << "Preparations made successfully! Init return value = " << return_value;
  124. return return_value;
  125. }
  126. DAT_RESULT DatFile::OpenDatFile(const char *dat_name) {
  127. LOG(DEBUG) << "Started opening DatFile";
  128. if (dat_state_ != CLOSED) {
  129. CloseDatFile();
  130. }
  131. file_handler_ = fopen(dat_name, "r+b");
  132. if (file_handler_ == nullptr) {
  133. LOG(ERROR) << "Unable to open file " << dat_name;
  134. return NO_FILE_ERROR;
  135. }
  136. fseek(file_handler_, 0, SEEK_END);
  137. actual_dat_size_ = ftell(file_handler_);
  138. fseek(file_handler_, 0, SEEK_SET);
  139. dat_state_ = SUCCESS_OPENED;
  140. LOG(DEBUG) << "Successfully opened DatFile";
  141. return SUCCESS;
  142. }
  143. DAT_RESULT DatFile::ReadSuperBlock() {
  144. LOG(DEBUG) << "Started reading superblock";
  145. if (dat_state_ != SUCCESS_OPENED) {
  146. LOG(ERROR) << "Dat state isn't SUCCESS_OPENED. Cannot perform extraction.";
  147. return INCORRECT_STATE_ERROR;
  148. }
  149. BinaryData data(1024);
  150. ReadData(data, 1024);
  151. constant1_ = data.ToNumber<4>(0x100);
  152. constant2_ = data.ToNumber<4>(0x140);
  153. version1_ = data.ToNumber<4>(0x14C);
  154. file_size_ = data.ToNumber<4>(0x148);
  155. version2_ = data.ToNumber<4>(0x150);
  156. fragmentation_journal_offset_ = data.ToNumber<4>(0x154);
  157. fragmentation_journal_end_ = data.ToNumber<4>(0x158);
  158. fragmentation_journal_size_ = data.ToNumber<4>(0x15C);
  159. root_directory_offset_ = data.ToNumber<4>(0x160);
  160. free_dat_size_ = data.ToNumber<4>(0x19C);
  161. if (constant1_ != 0x4C5000) {
  162. LOG(ERROR) << "variable at position 0x100 is not equal to .dat file constant!";
  163. return INCORRECT_SUPERBLOCK_ERROR;
  164. }
  165. if (constant2_ != 0x5442) {
  166. LOG(ERROR) << "variable at position 0x140 is not equal to .dat file constant!";
  167. return INCORRECT_SUPERBLOCK_ERROR;
  168. }
  169. if (file_size_ != actual_dat_size_) {
  170. LOG(ERROR) << "variable at 0x148 position is not equal to .dat file size!";
  171. //return CORRUPTED_FILE_WARNING;
  172. }
  173. dat_state_ = SUCCESS_SUPERBLOCK;
  174. LOG(DEBUG) << "Superblock read successfully";
  175. return SUCCESS;
  176. }
  177. DAT_RESULT DatFile::MakeDirectories() {
  178. LOG(DEBUG) << "Started making directories";
  179. if (dat_state_ != SUCCESS_SUPERBLOCK) {
  180. LOG(ERROR) << "Dat state isn't SUCCESS_SUPERBLOCK. Cannot make directories.";
  181. return INCORRECT_STATE_ERROR;
  182. }
  183. root_directory_ = new SubDirectory((unsigned) root_directory_offset_, this);
  184. dat_state_ = SUCCESS_DIRECTORIES;
  185. LOG(DEBUG) << "Directories made successfully";
  186. return SUCCESS;
  187. }
  188. DAT_RESULT DatFile::MakeDictionary() {
  189. LOG(DEBUG) << "Started making dictionary";
  190. if (dat_state_ != SUCCESS_DIRECTORIES) {
  191. LOG(ERROR) << "Dat state isn't SUCCESS_DIRECTORIES. Cannot make directories.";
  192. return INCORRECT_STATE_ERROR;
  193. }
  194. if (root_directory_ == nullptr) {
  195. LOG(ERROR) << "root_directory is nullptr!!";
  196. return INIT_ERROR;
  197. }
  198. root_directory_->MakeDictionary(dictionary_);
  199. dat_state_ = SUCCESS_DICTIONARY;
  200. LOG(DEBUG) << "Dictionary made successfull";
  201. return SUCCESS;
  202. }
  203. DAT_RESULT DatFile::InitLocales() {
  204. LOG(INFO) << "Initialising locales...";
  205. BinaryData dicts_data(4);
  206. ReadData(dicts_data, 4, 300);
  207. long long dict_offset = dicts_data.ToNumber<4>(0);
  208. if (dict_offset == 0 || dict_offset + 8 >= actual_dat_size_) {
  209. LOG(INFO) << "Dictionary offset is empty or incorrect. Passing.";
  210. return SUCCESS;
  211. }
  212. ReadData(dicts_data, 4, dict_offset);
  213. long long dict_size = dicts_data.ToNumber<4>(0);
  214. ReadData(dicts_data, 4, dict_offset + 4);
  215. long long dict_version = dicts_data.ToNumber<4>(0);
  216. LOG(INFO) << "Dictionary size is " << dict_size << ". Version is " << dict_version;
  217. if (dict_version != 100) {
  218. LOG(WARNING) << "DICTIONARY IS OLD!!!";
  219. return SUCCESS;
  220. }
  221. dicts_data = BinaryData((unsigned)dict_size);
  222. ReadData(dicts_data, dict_size, dict_offset + 8);
  223. if (dicts_data.size() < 15) {
  224. LOG(ERROR) << "Incorrect dictionary. Passing.";
  225. return FAILED;
  226. }
  227. BinaryData hi_data = dicts_data.CutData(0, 15) + BinaryData("\0", 1);
  228. std::string hi = std::string((char *) (hi_data.data()));
  229. LOG(DEBUG) << "hi info is " << hi;
  230. if (hi != "Hi from Gi1dor!") {
  231. LOG(WARNING) << "Didn't receive 'hi' from Gi1dor... Initialising locale dicts as empty";
  232. LOG(INFO) << "Could't init locales' file... Continuing without them";
  233. return SUCCESS;
  234. }
  235. int offset = 15;
  236. BinaryData current_locale_data = dicts_data.CutData(offset, offset + 4) + BinaryData("\0", 1);
  237. std::string locale((char *) (current_locale_data.data()));
  238. offset += 4;
  239. LOG(DEBUG) << "current locale:" << locale;
  240. if (locale != "PATC" && locale != "ORIG") {
  241. LOG(WARNING) << "Incorrect locale... Initialising locale dicts as empty";
  242. LOG(INFO) << "Could't recognize locale... Continuing without locales";
  243. return SUCCESS;
  244. }
  245. current_locale_ = (locale == "PATC" ? PATCHED : ORIGINAL);
  246. // 15 bytes for "Hi from Gi1dor"
  247. // 4 bytes for LOCALE
  248. // 4 bytes for orig_dict.size()
  249. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  250. // 4 bytes for patch_dict.size()
  251. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  252. // 4 bytes for inactive_categories dict
  253. // 4 * inactive_categories.size() bytes for inactive_categories data
  254. size_t orig_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  255. offset += 4;
  256. for (size_t i = 0; i < orig_dict_size; i++) {
  257. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  258. orig_dict_[file->file_id()] = file;
  259. offset += 32;
  260. orig_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  261. offset += 4;
  262. if (orig_dict_[file->file_id()]->category == 0)
  263. LOG(DEBUG) << "file category is undefined (0)!";
  264. }
  265. size_t patch_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  266. offset += 4;
  267. for (size_t i = 0; i < patch_dict_size; i++) {
  268. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  269. patch_dict_[file->file_id()] = file;
  270. offset += 32;
  271. patch_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  272. offset += 4;
  273. if (patch_dict_[file->file_id()]->category == 0)
  274. LOG(DEBUG) << "file category is undefined (0)!";
  275. }
  276. size_t active_patches_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  277. offset += 4;
  278. for (size_t i = 0; i < active_patches_dict_size; i++) {
  279. inactive_categories.insert(dicts_data.ToNumber<4>(offset));
  280. offset += 4;
  281. }
  282. LOG(INFO) << "There are " << patch_dict_.size() << " files in patch locale dictionary";
  283. LOG(INFO) << "There are " << orig_dict_.size() << " files in original locale dictionary";
  284. std::string inactive_cat_s;
  285. for (auto i : inactive_categories) {
  286. inactive_cat_s += std::to_string(i) + " ";
  287. }
  288. LOG(INFO) << "Unactive patches now: " << inactive_cat_s;
  289. LOG(INFO) << "Finished initialising locales";
  290. return SUCCESS;
  291. }
  292. //------------------------------------------------//
  293. // CLOSE SECTION
  294. //------------------------------------------------//
  295. DatFile::~DatFile() {
  296. CloseDatFile();
  297. }
  298. DAT_RESULT DatFile::CloseDatFile() {
  299. LOG(INFO) << "Closing DatFile";
  300. if (dat_state_ == CLOSED) {
  301. LOG(INFO) << "DatFile is already closed. Nothing to do";
  302. return SUCCESS;
  303. }
  304. // Committing changes and updating/writing locales and header info
  305. if (!pending_dictionary_.empty() || dat_state_ == UPDATED) {
  306. CommitLocales();
  307. CommitDirectories();
  308. //ModifyFragmentationJournal();
  309. //free_dat_size_ = 128248;
  310. //fragmentation_journal_end_ = 0;
  311. //fragmentation_journal_size_ = 1;
  312. //UpdateHeader();
  313. }
  314. current_locale_ = ORIGINAL;
  315. if (file_handler_ != nullptr) {
  316. fclose(file_handler_);
  317. }
  318. SubDirectory::visited_subdirectories_.clear();
  319. delete root_directory_;
  320. //truncate64(filename_.c_str(), file_size_);
  321. free_buffered_size_ = 0;
  322. filename_ = "none";
  323. orig_dict_.clear();
  324. patch_dict_.clear();
  325. pending_patch_.clear();
  326. inactive_categories.clear();
  327. file_handler_ = nullptr;
  328. root_directory_ = nullptr;
  329. pending_dictionary_.clear();
  330. dictionary_.clear();
  331. constant1_ = 0;
  332. constant2_ = 0;
  333. file_size_ = 0;
  334. version1_ = 0;
  335. version2_ = 0;
  336. fragmentation_journal_size_ = 0;
  337. fragmentation_journal_end_ = 0;
  338. root_directory_offset_ = 0;
  339. fragmentation_journal_offset_ = 0;
  340. dat_state_ = CLOSED;
  341. dat_id_ = -1;
  342. LOG(INFO) << "File closed successfully.";
  343. return SUCCESS;
  344. }
  345. DAT_RESULT DatFile::CommitLocales() {
  346. LOG(INFO) << "Committing locales...";
  347. // 15 bytes for "Hi from Gi1dor"
  348. // 4 bytes for LOCALE
  349. // 4 bytes for orig_dict.size()
  350. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  351. // 4 bytes for patch_dict.size()
  352. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  353. // 4 bytes for inactive_categories list
  354. // 4 * inactive_categories.size() bytes for inactive_categories data
  355. BinaryData binary_data = BinaryData(14 + 15 + 4
  356. + 4 + (32 + 4) * orig_dict_.size()
  357. + 4 + (32 + 4) * patch_dict_.size()
  358. + 4 + 4 * inactive_categories.size());
  359. size_t current_size = 0;
  360. binary_data.Append(BinaryData("Hi from Gi1dor!", 15), current_size);
  361. current_size += 15;
  362. binary_data.Append(BinaryData((current_locale_ == ORIGINAL ? "ORIG" : "PATC"), 4), current_size);
  363. current_size += 4;
  364. binary_data.Append(BinaryData::FromNumber<4>(orig_dict_.size()), current_size);
  365. current_size += 4;
  366. for (auto file : orig_dict_) {
  367. binary_data.Append(file.second->MakeHeaderData(), current_size);
  368. current_size += 32;
  369. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  370. current_size += 4;
  371. }
  372. binary_data.Append(BinaryData::FromNumber<4>(patch_dict_.size()), current_size);
  373. current_size += 4;
  374. for (auto file : patch_dict_) {
  375. binary_data.Append(file.second->MakeHeaderData(), current_size);
  376. current_size += 32;
  377. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  378. current_size += 4;
  379. }
  380. binary_data.Append(BinaryData::FromNumber<4>(inactive_categories.size()), current_size);
  381. current_size += 4;
  382. for (auto patch_id : inactive_categories) {
  383. binary_data.Append(BinaryData::FromNumber<4>(patch_id), current_size);
  384. current_size += 4;
  385. }
  386. BinaryData dicts_data(4);
  387. ReadData(dicts_data, 4, 300);
  388. long long dict_offset = dicts_data.ToNumber<4>(0);
  389. ReadData(dicts_data, 4, dict_offset);
  390. long long dict_size = dicts_data.ToNumber<4>(0);
  391. if (binary_data.size() > dict_size) {
  392. WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, file_size_);
  393. WriteData(BinaryData::FromNumber<4>(100), 4, file_size_ + 4);
  394. WriteData(binary_data, binary_data.size(), file_size_ + 8);
  395. WriteData(BinaryData::FromNumber<4>(file_size_), 4, 300);
  396. file_size_ += binary_data.size();
  397. // Adding space for 25 megabytes locales file in total.
  398. BinaryData nulls(unsigned(20 * 1024 * 1024));
  399. WriteData(nulls, nulls.size(), file_size_);
  400. file_size_ += nulls.size();
  401. } else {
  402. WriteData(BinaryData::FromNumber<4>(std::max(binary_data.size(), 20u * 1024u * 1024u)), 4, dict_offset);
  403. WriteData(BinaryData::FromNumber<4>(100), 4, dict_offset + 4);
  404. WriteData(binary_data, binary_data.size(), dict_offset + 8);
  405. }
  406. LOG(INFO) << "Locales commited successfully";
  407. return SUCCESS;
  408. }
  409. DAT_RESULT DatFile::CommitDirectories() {
  410. for (auto file_id : pending_dictionary_) {
  411. if (dictionary_[file_id] == nullptr || !CorrectSubfile(dictionary_[file_id]))
  412. continue;
  413. WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
  414. }
  415. pending_dictionary_.clear();
  416. return SUCCESS;
  417. }
  418. DAT_RESULT DatFile::ModifyFragmentationJournal() {
  419. if (fragmentation_journal_size_ == 0)
  420. return SUCCESS;
  421. LOG(DEBUG) << "Modifying fragmentation journal";
  422. BinaryData data(4);
  423. ReadData(data, 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
  424. LOG(INFO) << "FREE_SIZE BLOCK = " << data.ToNumber<4>(0);
  425. long long free_size = data.ToNumber<4>(0);
  426. long long free_offset = file_size_;
  427. BinaryData nulldata = BinaryData(unsigned(free_size));
  428. WriteData(nulldata, nulldata.size(), file_size_);
  429. file_size_ += nulldata.size();
  430. WriteData(BinaryData::FromNumber<4>(free_size), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_);
  431. WriteData(BinaryData::FromNumber<4>(free_offset), 4, fragmentation_journal_offset_ + 8 * fragmentation_journal_size_ + 4);
  432. //nulldata = BinaryData(8);
  433. //WriteData(nulldata, nulldata.size(), fragmentation_journal_offset_ + 16);
  434. LOG(DEBUG) << "Finished modifying fragmentation journal";
  435. return SUCCESS;
  436. }
  437. DAT_RESULT DatFile::UpdateHeader() {
  438. LOG(DEBUG) << "Updating header";
  439. WriteData(BinaryData::FromNumber<4>(constant1_), 4, 0x100);
  440. WriteData(BinaryData::FromNumber<4>(constant2_), 4, 0x140);
  441. //WriteData(BinaryData::FromNumber<4>( 0 ), 4, 0x144);
  442. WriteData(BinaryData::FromNumber<4>(file_size_), 4, 0x148);
  443. WriteData(BinaryData::FromNumber<4>(version1_ ), 4, 0x14C);
  444. WriteData(BinaryData::FromNumber<4>(version2_ ), 4, 0x150);
  445. WriteData(BinaryData::FromNumber<4>(fragmentation_journal_offset_), 4, 0x154);
  446. WriteData(BinaryData::FromNumber<4>(fragmentation_journal_end_), 4, 0x158);
  447. WriteData(BinaryData::FromNumber<4>(fragmentation_journal_size_), 4, 0x15C);
  448. WriteData(BinaryData::FromNumber<4>(root_directory_offset_), 4, 0x160);
  449. WriteData(BinaryData::FromNumber<4>(free_dat_size_), 4, 0x19C);
  450. LOG(DEBUG) << "Finished updating header";
  451. return SUCCESS;
  452. }
  453. DAT_RESULT DatFile::RepairDatFile() {
  454. for (auto file : dictionary_) {
  455. auto subfile = file.second;
  456. auto file_id = file.first;
  457. if (CorrectSubfile(subfile))
  458. continue;
  459. orig_dict_.clear();
  460. patch_dict_.clear();
  461. return SUCCESS;
  462. if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
  463. return CRITICAL_DAT_ERROR;
  464. *dictionary_[file_id] = *orig_dict_[file_id];
  465. patch_dict_.erase(file_id);
  466. orig_dict_.erase(file_id);
  467. }
  468. return SUCCESS;
  469. }
  470. //------------------------------------------------//
  471. // DAT INFO SECTION
  472. //------------------------------------------------//
  473. DAT_STATE DatFile::DatFileState() const {
  474. return dat_state_;
  475. }
  476. long long DatFile::files_number() const {
  477. return dictionary_.size();
  478. }
  479. //------------------------------------------------//
  480. // EXTRACT SECTION
  481. //------------------------------------------------//
  482. DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
  483. LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
  484. if (dat_state_ < READY) {
  485. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  486. return INCORRECT_STATE_ERROR;
  487. }
  488. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  489. if (file_data.size() == 0) {
  490. LOG(ERROR) << "File data is empty. Aborting extraction.";
  491. return NO_FILE_ERROR;
  492. }
  493. SubfileData export_data = dictionary_[file_id]->PrepareForExport(file_data);
  494. if (export_data.Empty()) {
  495. LOG(ERROR) << "Export data is empty. Aborting extraction.";
  496. return NO_FILE_ERROR;
  497. }
  498. if (export_data.binary_data.WriteToFile(path + export_data.options["ext"].as<std::string>()) != SUCCESS) {
  499. LOG(ERROR) << "Cannot write to file.";
  500. return WRITE_TO_FILE_ERROR;
  501. }
  502. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  503. return SUCCESS;
  504. }
  505. DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
  506. LOG(DEBUG) << "Extracting file " << file_id << " to database.";
  507. if (dat_state_ < READY) {
  508. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  509. return INCORRECT_STATE_ERROR;
  510. }
  511. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  512. if (file_data.Empty()) {
  513. LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty. Passing it.";
  514. return SUCCESS;
  515. }
  516. SubfileData export_data;
  517. export_data = dictionary_[file_id]->PrepareForExport(file_data);
  518. export_data.options["did"] = dat_id_;
  519. if (export_data == SubfileData()) {
  520. LOG(WARNING) << "File with id " << dictionary_[file_id]->file_id() << " is empty or incorrect.";
  521. return SUCCESS;
  522. }
  523. try {
  524. db->PushFile(export_data);
  525. } catch (std::exception &e) {
  526. LOG(ERROR) << "Caught " << e.what() << " exception.";
  527. return FAILED;
  528. }
  529. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  530. return SUCCESS;
  531. }
  532. int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
  533. LOG(INFO) << "Extracting all files to path " << path;
  534. if (dat_state_ < READY) {
  535. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  536. return INCORRECT_STATE_ERROR;
  537. }
  538. int success = 0;
  539. for (auto i : dictionary_) {
  540. FILE_TYPE file_type = i.second->FileType();
  541. if (file_type == type) {
  542. success += (ExtractFile(i.second->file_id(), (path + std::to_string(i.second->file_id()))) == SUCCESS
  543. ? 1 : 0);
  544. }
  545. }
  546. LOG(INFO) << "Successfully extracted " << success << " files";
  547. return success;
  548. }
  549. int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
  550. LOG(INFO) << "Extracting all files to database...";
  551. if (dat_state_ < READY) {
  552. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  553. return INCORRECT_STATE_ERROR;
  554. }
  555. int success = 0;
  556. for (auto i : dictionary_) {
  557. FILE_TYPE file_type = i.second->FileType();
  558. if (file_type == type) {
  559. success += (ExtractFile(i.second->file_id(), db) == SUCCESS ? 1 : 0);
  560. }
  561. }
  562. LOG(INFO) << "Extracted " << success << " files";
  563. return success;
  564. }
  565. //------------------------------------------------//
  566. // PATCH SECTION
  567. //------------------------------------------------//
  568. DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
  569. LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
  570. actual_dat_size_ = std::max(file_size_, actual_dat_size_);
  571. if (!dat_without_patches_) {
  572. file_size_ = actual_dat_size_;
  573. }
  574. if (dat_state_ < READY) {
  575. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  576. return INCORRECT_STATE_ERROR;
  577. }
  578. auto file_id = data.options["fid"].as<long long>();
  579. if (dictionary_.count(file_id) == 0) {
  580. LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
  581. return NO_FILE_ERROR;
  582. }
  583. Subfile *file = dictionary_[file_id];
  584. if (!CorrectSubfile(file)) {
  585. LOG(ERROR) << "Incorrect subfile with id " << file->file_id() << " (headers do not match). Cannot patch it";
  586. return FAILED;
  587. }
  588. // If file has inactive category, then we should set it to patched state in order to commit patch and
  589. // then in ApplyFilePatch(), if new category is still inactive, return dictionary to its original state;
  590. if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0 && file_id != 2013266257) {
  591. *dictionary_[file_id] = *patch_dict_[file_id];
  592. }
  593. if (data.options["cat"].IsDefined()) {
  594. file->category = data.options["cat"].as<long long>();
  595. } else {
  596. file->category = 1;
  597. }
  598. BinaryData old_data = GetFileData(file);
  599. if (old_data.Empty()) {
  600. LOG(ERROR) << "GetFileData returned empty data. Aborting.";
  601. return DAT_PATCH_FILE_ERROR;
  602. }
  603. BinaryData patch_data = file->MakeForImport(old_data, data);
  604. DAT_RESULT result = ApplyFilePatch(file, patch_data);
  605. if (result != SUCCESS)
  606. return result;
  607. LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>() << ".";
  608. return SUCCESS;
  609. }
  610. DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
  611. LOG(INFO) << "Patching all database";
  612. if (dat_state_ < READY) {
  613. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  614. return INCORRECT_STATE_ERROR;
  615. }
  616. SubfileData data;
  617. data = db->GetNextFile();
  618. while (!data.Empty()) {
  619. DAT_RESULT result = PatchFile(data);
  620. if (result != SUCCESS)
  621. LOG(ERROR) << "Cannot patch file" << data.options["fid"].as<long long>() << " continuing";
  622. data = db->GetNextFile();
  623. }
  624. LOG(INFO) << "Successfully patched whole database";
  625. return SUCCESS;
  626. }
  627. DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
  628. LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
  629. FILE *f = nullptr;
  630. fopen_s(&f, (path + "dict.txt").c_str(), "w");
  631. if (f == nullptr) {
  632. LOG(ERROR) << "Cannot open file " << path + "dict.txt";
  633. return WRITE_TO_FILE_ERROR;
  634. }
  635. fprintf(f, "unk1 file_id offset size1 timestamp version size2 unknown2 type\n");
  636. for (auto i : dictionary_) {
  637. fprintf(f, "%lld %lld %lld %lld %lld %lld %lld %lld %s\n", i.second->unknown1(), i.second->file_id(),
  638. i.second->file_offset(), i.second->file_size(), i.second->timestamp(), i.second->version(),
  639. i.second->block_size(), i.second->unknown2(), i.second->Extension().c_str());
  640. }
  641. fclose(f);
  642. LOG(INFO) << "Unordered dictionary was written successfully to " << path << "dict.txt";
  643. return SUCCESS;
  644. }
  645. DAT_RESULT DatFile::ApplyFilePatch(Subfile *file, BinaryData &data) {
  646. LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
  647. // if (patch_dict_.size() == 0 && pending_dictionary_.size() == 0) {
  648. // BinaryData nulls(50 * 1024 * 1024);
  649. // WriteData(nulls, nulls.size(), file_size_);
  650. // file_size_ += 50 * 1024 * 1024;
  651. // }
  652. if (data.Empty()) {
  653. LOG(ERROR) << "Error caused during making file for import. Cannot patch file " << file->file_id();
  654. return FAILED;
  655. }
  656. auto file_id = file->file_id();
  657. if (current_locale() != PATCHED && file_id != 2013266257) {
  658. LOG(INFO) << "Changing locale to PATCHED(RU) in order to patch file";
  659. SetLocale(PATCHED);
  660. }
  661. dat_state_ = UPDATED;
  662. if (orig_dict_.count(file_id) == 0 && file_id != 2013266257) {
  663. orig_dict_[file_id] = new Subfile(this, file->MakeHeaderData());
  664. }
  665. if ((patch_dict_.count(file_id) == 0 && file_id != 2013266257) || data.size() > file->block_size()
  666. || file->file_size() + 8 > file->block_size()) {
  667. file->file_offset_ = file_size_;
  668. file->block_size_ = std::max((long long)data.size(), file->block_size_);
  669. free_buffered_size_ = std::max(0ll, free_buffered_size_ - file->block_size_ - 8);
  670. AddBufferedSize();
  671. this->file_size_ += file->block_size_ + 8;
  672. }
  673. file->file_size_ = data.size() - 8;
  674. data.Append(BinaryData::FromNumber<4>(0), 0); // set additional fragments count to zero
  675. if (file_id != data.ToNumber<4>(8)) {
  676. LOG(ERROR) << "Created data's file_id doesn't match to original! Patch wasn't written to .dat file";
  677. return INCORRECT_PATCH_FILE;
  678. }
  679. //data.ProtectData();
  680. //BinaryData data1(data.size());
  681. WriteData(data, data.size(), file->file_offset());
  682. //data.DeprotectData();
  683. patch_dict_.erase(file_id); // Удалили старое значение в русском словаре
  684. if (file_id != 2013266257) {
  685. patch_dict_[file_id] = new Subfile(this, file->MakeHeaderData()); // Создали новое значение
  686. }
  687. // If category is forbidden, then return file header data to original state
  688. if (inactive_categories.count(file->category) != 0) {
  689. file->file_offset_ = orig_dict_[file_id]->file_offset_;
  690. file->file_size_ = orig_dict_[file_id]->file_size_;
  691. file->block_size_ = orig_dict_[file_id]->block_size_;
  692. file->timestamp_ = orig_dict_[file_id]->timestamp_;
  693. file->version_ = orig_dict_[file_id]->version_;
  694. }
  695. if (orig_dict_.count(file_id) != 0 && file_id != 2013266257)
  696. orig_dict_[file_id]->category = file->category;
  697. if (patch_dict_.count(file_id) != 0 && file_id != 2013266257)
  698. patch_dict_[file_id]->category = file->category;
  699. // Applying file info in directory
  700. pending_dictionary_.insert(file_id);
  701. LOG(DEBUG) << "Successfully applied file " << file->file_id() << " patch.";
  702. return SUCCESS;
  703. }
  704. //------------------------------------------------//
  705. // INPUT-OUTPUT SECTION
  706. //------------------------------------------------//
  707. BinaryData DatFile::GetFileData(const Subfile *file, long long int offset) {
  708. LOG(DEBUG) << "Getting file " << file->file_id() << " data";
  709. BinaryData mfile_id(20);
  710. ReadData(mfile_id, 20, file->file_offset() + 8);
  711. if (mfile_id.Empty()) {
  712. LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
  713. << file->file_offset() << "); Aborting.";
  714. return BinaryData(0);
  715. }
  716. if (!mfile_id.CheckCompression() && file->file_id() != mfile_id.ToNumber<4>(0)) {
  717. LOG(ERROR) << "Bad DatFile::GetFileData() - file_id in Subfile ("
  718. << file->file_id()
  719. << ") doesn't match to file_id (" << mfile_id.ToNumber<4>(0) << ")in DatFile.";
  720. return BinaryData(0);
  721. }
  722. BinaryData data((unsigned)(file->file_size() + (8 - offset)));
  723. if (file->block_size() >= file->file_size() + 8) {
  724. ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
  725. return data;
  726. }
  727. BinaryData fragments_count(4);
  728. ReadData(fragments_count, 4, file->file_offset());
  729. long long fragments_number = fragments_count.ToNumber<4>(0);
  730. long long current_block_size = file->block_size() - offset - 8 * fragments_number;
  731. ReadData(data, current_block_size, file->file_offset() + offset);
  732. BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
  733. ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
  734. file->file_offset() + file->block_size() - 8 * fragments_number);
  735. for (long long i = 0; i < fragments_number; i++) {
  736. long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
  737. long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
  738. ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
  739. current_block_size);
  740. current_block_size += fragment_size;
  741. }
  742. LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
  743. return data;
  744. }
  745. DAT_RESULT DatFile::ReadData(BinaryData &data, long long size, long long offset, long long data_offset) {
  746. if (dat_state_ == CLOSED) {
  747. LOG(ERROR) << "Dat state is CLOSED. Cannot read data.";
  748. data = BinaryData(0);
  749. return INIT_ERROR;
  750. }
  751. if (data_offset + size > data.size()) {
  752. LOG(ERROR) << "Trying to read more than BinaryData size: Reading " << size << " bytes from " << offset
  753. << " position.";
  754. data = BinaryData(0);
  755. return DAT_READ_ERROR;
  756. }
  757. if (offset + size > actual_dat_size_) {
  758. LOG(ERROR) << "Trying to read more than DatFile size elapsed: Reading " << size << " bytes from " << offset
  759. << " position.";
  760. data = BinaryData(0);
  761. return DAT_READ_ERROR;
  762. }
  763. fseek(file_handler_, offset, SEEK_SET);
  764. fread(data.data() + data_offset, unsigned(size), 1, file_handler_);
  765. return SUCCESS;
  766. }
  767. DAT_RESULT DatFile::WriteData(const BinaryData &data, long long size, long long offset, long long data_offset) {
  768. if (dat_state_ < READY) {
  769. LOG(ERROR) << "Dat state isn't READY. Cannot write data.";
  770. return INCORRECT_STATE_ERROR;
  771. }
  772. fseek(file_handler_, offset, SEEK_SET);
  773. if (data_offset + size > data.size()) {
  774. LOG(ERROR) << "Trying to write more than BinaryData size";
  775. return DAT_WRITE_ERROR;
  776. }
  777. fwrite(data.data() + data_offset, unsigned(size), 1, file_handler_);
  778. actual_dat_size_ = std::max(file_size_, actual_dat_size_);
  779. return SUCCESS;
  780. }
  781. //------------------------------------------------//
  782. // LOCALE SECTION
  783. //------------------------------------------------//
  784. DAT_RESULT DatFile::SetLocale(LOCALE locale) {
  785. LOG(INFO) << "Setting locale to " << (locale == PATCHED ? " PATCHED" : " ORIGINAL");
  786. if (dat_state_ < READY) {
  787. LOG(ERROR) << "Dat state isn't READY. Cannot set locale.";
  788. return INCORRECT_STATE_ERROR;
  789. }
  790. if (current_locale_ == locale) {
  791. return SUCCESS;
  792. }
  793. dat_state_ = UPDATED;
  794. auto dict = GetLocaleDictReference(locale);
  795. for (auto file : *dict) {
  796. if (file.second == nullptr)
  797. continue;
  798. if (dictionary_.count(file.first) == 0) {
  799. LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
  800. << "which is not in .dat file! Passing it and removing from locale dictionary";
  801. dict->erase(file.first);
  802. continue;
  803. }
  804. if (dictionary_[file.first]->MakeHeaderData().CutData(8, 16) ==
  805. file.second->MakeHeaderData().CutData(8, 16) ||
  806. inactive_categories.count(orig_dict_[file.first]->category) != 0)
  807. continue;
  808. long long file_id = file.first;
  809. Subfile *new_file = file.second;
  810. *dictionary_[file_id] = *new_file;
  811. pending_dictionary_.insert(file_id);
  812. dat_state_ = UPDATED;
  813. }
  814. current_locale_ = locale;
  815. LOG(INFO) << "Locale set successfull";
  816. return SUCCESS;
  817. }
  818. LOCALE DatFile::current_locale() {
  819. if (dat_state_ < READY) {
  820. LOG(ERROR) << "dat_file is in incorrect state!";
  821. return ORIGINAL;
  822. }
  823. if (current_locale_ != PATCHED && current_locale_ != ORIGINAL) {
  824. LOG(ERROR) << "locale has incorrect value. Setting it to original";
  825. current_locale_ = ORIGINAL;
  826. }
  827. return current_locale_;
  828. }
  829. std::map<long long, Subfile *> *DatFile::GetLocaleDictReference(LOCALE locale) {
  830. switch (locale) {
  831. case PATCHED:
  832. return &patch_dict_;
  833. case ORIGINAL:
  834. return &orig_dict_;
  835. default:
  836. LOG(ERROR) << "Unknown locale! Returning original";
  837. return &orig_dict_;
  838. }
  839. }
  840. //------------------------------------------------//
  841. // CHECKERS SECTION
  842. //------------------------------------------------//
  843. bool DatFile::CorrectSubfile(Subfile *file) {
  844. BinaryData mfile_id(20);
  845. ReadData(mfile_id, 20, file->file_offset() + 8);
  846. if (mfile_id.Empty())
  847. return false;
  848. return (mfile_id.CheckCompression() || file->file_id() == mfile_id.ToNumber<4>(0)) && file->file_size() < 50ll * 1024ll * 1024ll;
  849. }
  850. bool DatFile::CheckIfUpdatedByGame() {
  851. LOG(INFO) << "Checking if DatFile was updated by LotRO";
  852. if (!pending_patch_.empty())
  853. return true;
  854. bool updated = false;
  855. for (auto i : dictionary_) {
  856. long long file_id = i.first;
  857. Subfile *subfile = i.second;
  858. if (patch_dict_.count(file_id) == 0)
  859. continue;
  860. if (*subfile != *patch_dict_[file_id] && *subfile != *orig_dict_[file_id]) {
  861. orig_dict_.clear();
  862. patch_dict_.clear();
  863. LOG(INFO) << "DAT FILE WAS UPDATED!!!! CLEARING PATCH DATA";
  864. pending_patch_.insert(file_id);
  865. truncate64(filename_.c_str(), file_size_);
  866. return true;
  867. orig_dict_.erase(file_id);
  868. patch_dict_.erase(file_id);
  869. updated = true;
  870. dat_state_ = UPDATED;
  871. }
  872. }
  873. return updated;
  874. }
  875. bool DatFile::CheckIfNotPatched() {
  876. LOG(INFO) << "DatFile " << (patch_dict_.empty() ? "HASN'T " : "HAS already")
  877. << " been patched by LEGACY launcher!";
  878. return patch_dict_.empty();
  879. }
  880. bool DatFile::CheckIfPatchedByOldLauncher() {
  881. LOG(INFO) << "DatFile " << (dictionary_.count(620750000) == 0 ? "HASN'T " : "HAS already")
  882. << " been patched by OLD LAUNCHER!";
  883. return dictionary_.count(620750000) > 0;
  884. }
  885. //------------------------------------------------//
  886. // CATEGORY SECTION
  887. //------------------------------------------------//
  888. DAT_RESULT DatFile::EnableCategory(int category) {
  889. LOG(INFO) << "Enabling category " << category;
  890. if (inactive_categories.count(category) == 0)
  891. return SUCCESS;
  892. inactive_categories.erase(category);
  893. dat_state_ = UPDATED;
  894. for (auto file : dictionary_) {
  895. auto file_id = file.first;
  896. if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
  897. *file.second = *patch_dict_[file_id];
  898. pending_dictionary_.insert(file_id);
  899. }
  900. }
  901. LOG(INFO) << "Category " << category << " enabled successfully";
  902. return SUCCESS;
  903. }
  904. DAT_RESULT DatFile::DisableCategory(int category) {
  905. LOG(INFO) << "Disabling category " << category;
  906. if (inactive_categories.count(category) != 0)
  907. return SUCCESS;
  908. inactive_categories.insert(category);
  909. dat_state_ = UPDATED;
  910. for (auto file : dictionary_) {
  911. auto file_id = file.first;
  912. if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
  913. *file.second = *orig_dict_[file_id];
  914. pending_dictionary_.insert(file_id);
  915. }
  916. }
  917. LOG(INFO) << "Category " << category << " disabled successfully";
  918. return SUCCESS;
  919. }
  920. const std::set<long long> &DatFile::GetInactiveCategoriesList() {
  921. return inactive_categories;
  922. }
  923. const std::string &DatFile::filename() const {
  924. return filename_;
  925. }
  926. void DatFile::AddBufferedSize() {
  927. if (free_buffered_size_ >= MIN_BUFFERED_SIZE)
  928. return;
  929. BinaryData nulls(MAX_BUFFERED_SIZE);
  930. WriteData(nulls, MAX_BUFFERED_SIZE, file_size_);
  931. free_buffered_size_ = MAX_BUFFERED_SIZE;
  932. }
  933. //------------------------------------------------//
  934. // BACKUP SECTION
  935. //------------------------------------------------//
  936. bool DatFile::CheckIfBackupExists(const std::string &backup_datname) {
  937. std::ifstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  938. return !dst.fail();
  939. }
  940. DAT_RESULT DatFile::RemoveBackup(const std::string &backup_datname) {
  941. if (!CheckIfBackupExists(backup_datname))
  942. return SUCCESS;
  943. if (remove(("DAT_LIBRARY_BACKUP/" + backup_datname).c_str()) == 0)
  944. return SUCCESS;
  945. return REMOVE_FILE_ERROR;
  946. }
  947. DAT_RESULT DatFile::CreateBackup(const std::string &backup_datname) {
  948. auto filename = filename_;
  949. auto dat_id = dat_id_;
  950. LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
  951. LOG(INFO) << " Closing DatFile...";
  952. CloseDatFile();
  953. LOG(INFO) << " Copying " << filename << " to " << backup_datname;
  954. mkdir("DAT_LIBRARY_BACKUP");
  955. std::ifstream src(filename, std::ios::binary);
  956. std::ofstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  957. std::istreambuf_iterator<char> begin_source(src);
  958. std::istreambuf_iterator<char> end_source;
  959. std::ostreambuf_iterator<char> begin_dest(dst);
  960. std::copy(begin_source, end_source, begin_dest);
  961. src.close();
  962. dst.close();
  963. LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
  964. InitDatFile(filename, dat_id);
  965. LOG(INFO) << "Restoring .dat file success!";
  966. return SUCCESS;
  967. }
  968. DAT_RESULT DatFile::RestoreFromBackup(const std::string &backup_datname) {
  969. auto filename = filename_;
  970. auto dat_id = dat_id_;
  971. LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
  972. LOG(INFO) << " Closing DatFile...";
  973. CloseDatFile();
  974. LOG(INFO) << " Copying " << filename << " to " << backup_datname;
  975. mkdir("DAT_LIBRARY_BACKUP");
  976. std::ifstream src("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
  977. std::ofstream dst(filename, std::ios::binary);
  978. if (src.fail()) {
  979. LOG(ERROR) << "CANNOT RESTORE FILE FROM BACKUP - no backup specified with name " << backup_datname;
  980. return NO_BACKUP_ERROR;
  981. }
  982. std::istreambuf_iterator<char> begin_source(src);
  983. std::istreambuf_iterator<char> end_source;
  984. std::ostreambuf_iterator<char> begin_dest(dst);
  985. std::copy(begin_source, end_source, begin_dest);
  986. src.close();
  987. dst.close();
  988. LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
  989. InitDatFile(filename, dat_id);
  990. LOG(INFO) << "Restoring .dat file success!";
  991. return SUCCESS;
  992. }
  993. }
  994. }