DatFile.cpp 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136
  1. //
  2. // Created by Иван_Архипов on 31.10.2017.
  3. //
  4. #include "DatFile.h"
  5. #include "BinaryData.h"
  6. #include "DatException.h"
  7. #include "SubDirectory.h"
  8. #include "Subfile.h"
  9. #include "SubfileData.h"
  10. #include <EasyLogging++/easylogging++.h>
  11. #include <unistd.h>
  12. #include <locale>
  13. #define ELPP_FEATURE_CRASH_LOG
  14. INITIALIZE_EASYLOGGINGPP
  15. #ifdef WIN32
  16. #define fseek _fseeki64
  17. #define ftell _ftelli64
  18. #endif
  19. extern "C++"
  20. {
  21. namespace LOTRO_DAT {
  22. DatFile::DatFile() {
  23. dat_state_ = CLOSED;
  24. root_directory_ = nullptr;
  25. file_handler_ = nullptr;
  26. free_buffered_size_ = 0;
  27. el::Configurations defaultConf;
  28. defaultConf.setToDefault();
  29. defaultConf.setGlobally(el::ConfigurationType::Format,
  30. "%datetime %level %fbase (line %line) : %msg (function: %func)");
  31. defaultConf.setGlobally(el::ConfigurationType::ToFile, "true");
  32. defaultConf.setGlobally(el::ConfigurationType::Filename, "dat_library.log");
  33. defaultConf.setGlobally(el::ConfigurationType::ToStandardOutput, "false");
  34. defaultConf.setGlobally(el::ConfigurationType::PerformanceTracking, "true");
  35. defaultConf.setGlobally(el::ConfigurationType::MaxLogFileSize, "5242880"); // 5MB
  36. defaultConf.setGlobally(el::ConfigurationType::LogFlushThreshold, "1"); // Flush after every one log
  37. defaultConf.set(el::Level::Debug, el::ConfigurationType::Enabled, "false");
  38. defaultConf.set(el::Level::Debug, el::ConfigurationType::Filename, "dat_library_debug.log");
  39. el::Loggers::reconfigureAllLoggers(defaultConf);
  40. LOG(INFO) << "==================================================================";
  41. LOG(INFO) << "Starting new DatFile class instance";
  42. }
  43. DAT_RESULT DatFile::InitDatFile(const std::string &filename, int dat_id) {
  44. LOG(DEBUG) << "Started initialisation of DatFile " << filename;
  45. if (dat_state_ != CLOSED && filename == filename_) {
  46. LOG(DEBUG) << "Trying to reopen the same file: " << filename << ". Doing nothing.";
  47. return SUCCESS;
  48. }
  49. if (dat_state_ != CLOSED && filename != filename_) {
  50. LOG(DEBUG) << "DatFile wasn't closed yet. Closing in order to reopen.";
  51. if (CloseDatFile() != SUCCESS) {
  52. LOG(ERROR) << "Unable to perform CloseDatFile()! Aborting initialization!";
  53. return FAILED;
  54. }
  55. }
  56. dat_id_ = dat_id;
  57. dat_state_ = CLOSED;
  58. current_locale_ = ORIGINAL;
  59. root_directory_ = nullptr;
  60. file_handler_ = nullptr;
  61. free_buffered_size_ = 0;
  62. filename_ = "none";
  63. DAT_RESULT result;
  64. DAT_RESULT return_value = SUCCESS;
  65. result = OpenDatFile(filename.c_str());
  66. if (result != SUCCESS) {
  67. LOG(ERROR) << "Unable to perform opening file. Aborting.";
  68. CloseDatFile();
  69. return result;
  70. }
  71. return_value = std::max(return_value, result);
  72. result = ReadSuperBlock();
  73. if (result <= 0) {
  74. LOG(ERROR) << "Unable to read super block. Aborting.";
  75. CloseDatFile();
  76. return result;
  77. }
  78. return_value = std::max(return_value, result);
  79. result = MakeDirectories();
  80. if (result <= 0) {
  81. LOG(ERROR) << "Unable to make directories. Aborting.";
  82. CloseDatFile();
  83. return result;
  84. }
  85. return_value = std::max(return_value, result);
  86. result = MakeDictionary();
  87. if (result <= 0) {
  88. LOG(ERROR) << "Unable to make dictionary. Aborting.";
  89. CloseDatFile();
  90. return result;
  91. }
  92. return_value = std::max(return_value, result);
  93. result = InitLocales();
  94. if (result <= 0) {
  95. LOG(ERROR) << "Unable to initialize locales. Aborting.";
  96. CloseDatFile();
  97. return result;
  98. }
  99. return_value = std::max(return_value, result);
  100. LOG(INFO) << "File " << filename << " opened successfully!";
  101. filename_ = filename;
  102. dat_state_ = READY;
  103. LOG(INFO) << "Making last preparations...";
  104. return_value = std::max(return_value, result);
  105. if (return_value >= 2) {
  106. LOG(WARNING) << "Dat file could be corrupted. Trying to delete corrupted dictionary rows";
  107. if (RepairDatFile() != SUCCESS)
  108. return CRITICAL_DAT_ERROR;
  109. }
  110. LOG(INFO) << "Preparations made successfully! Init return value = " << return_value;
  111. return return_value;
  112. }
  113. DAT_STATE DatFile::DatFileState() const {
  114. return dat_state_;
  115. }
  116. DatFile::~DatFile() {
  117. CloseDatFile();
  118. }
  119. /// Extracts file with file_id.
  120. /// If path is undefined then it will be recognised as current working directory
  121. /// Output file path consists of "path + file_id + file_extension";
  122. /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
  123. /// Otherwise DatException() will be thrown.
  124. /// Returns true, if file was successfully extracted;
  125. /// Throws DatException() if undefined behaviour happened
  126. DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
  127. LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
  128. if (dat_state_ < READY) {
  129. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  130. return INCORRECT_STATE_ERROR;
  131. }
  132. BinaryData file_data = GetFileData(dictionary_[file_id], 8);
  133. if (file_data.size() == 0) {
  134. LOG(ERROR) << "File data is empty. Aborting extraction.";
  135. return NO_FILE_ERROR;
  136. }
  137. SubfileData export_data = dictionary_[file_id]->PrepareForExport(file_data);
  138. if (export_data.Empty()) {
  139. LOG(ERROR) << "Export data is empty. Aborting extraction.";
  140. return NO_FILE_ERROR;
  141. }
  142. if (export_data.binary_data.WriteToFile(path + export_data.options["ext"].as<std::string>()) != SUCCESS) {
  143. LOG(ERROR) << "Cannot write to file.";
  144. return WRITE_TO_FILE_ERROR;
  145. }
  146. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  147. return SUCCESS;
  148. }
  149. /// Extracts file with file_id to database "db".
  150. /// DATABASE SHOULD BE ALREADY CREATED; Otherwise DatException will be called.
  151. /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
  152. /// Otherwise DatException() will be thrown.
  153. /// Returns true, if file was successfully extracted;
  154. /// Throws DatException() if undefined behaviour happened
  155. DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
  156. LOG(DEBUG) << "Extracting file " << file_id << " to database.";
  157. if (dat_state_ < READY) {
  158. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  159. return INCORRECT_STATE_ERROR;
  160. }
  161. BinaryData file_data;
  162. try {
  163. file_data = GetFileData(dictionary_[file_id], 8);
  164. } catch (std::exception &e) {
  165. LOG(ERROR) << "Caught" << e.what() << " exception.";
  166. return FAILED;
  167. }
  168. SubfileData export_data;
  169. try {
  170. export_data = dictionary_[file_id]->PrepareForExport(file_data);
  171. export_data.options["did"] = dat_id_;
  172. } catch (std::exception &e) {
  173. LOG(ERROR) << "Caught" << e.what() << " exception.";
  174. return FAILED;
  175. }
  176. if (export_data == SubfileData()) {
  177. LOG(WARNING) << "file with id " << dictionary_[file_id]->file_id() << " is empty. Passing it.";
  178. return SUCCESS;
  179. }
  180. try {
  181. db->PushFile(export_data);
  182. } catch (std::exception &e) {
  183. LOG(ERROR) << "Caught " << e.what() << " exception.";
  184. return FAILED;
  185. }
  186. LOG(DEBUG) << "File " << file_id << " extracted successfully";
  187. return SUCCESS;
  188. }
  189. /// Extracts all files with specific type to "path + type + file_id + file_part + extension" files;
  190. /// If path is undefined then it will be recognised as current working directory
  191. /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
  192. /// Otherwise DatException() will be thrown.
  193. /// Returns number of successfully extracted files
  194. /// Throws DatException() if undefined behaviour happened
  195. int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
  196. LOG(INFO) << "Extracting all files to path " << path;
  197. if (dat_state_ < READY) {
  198. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  199. return INCORRECT_STATE_ERROR;
  200. }
  201. int success = 0;
  202. for (auto i : dictionary_) {
  203. FILE_TYPE file_type = i.second->FileType();
  204. if (file_type == type) {
  205. success += (ExtractFile(i.second->file_id(), (path + std::to_string(i.second->file_id()))) == SUCCESS
  206. ? 1 : 0);
  207. }
  208. }
  209. LOG(INFO) << "Successfully extracted " << success << " files";
  210. return success;
  211. }
  212. /// Extracts all files with specific type to database "db";
  213. /// DATABASE SHOULD BE ALREADY CREATED; Otherwise DatException will be called.
  214. /// Returns number of successfully extracted files
  215. /// Throws DatException() if undefined behaviour happened
  216. int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
  217. LOG(INFO) << "Extracting all files to database...";
  218. if (dat_state_ < READY) {
  219. LOG(ERROR) << "Dat state isn't READY. Cannot perform extraction.";
  220. return INCORRECT_STATE_ERROR;
  221. }
  222. int success = 0;
  223. for (auto i : dictionary_) {
  224. FILE_TYPE file_type = i.second->FileType();
  225. if (file_type == type) {
  226. success += (ExtractFile(i.second->file_id(), db) == SUCCESS ? 1 : 0);
  227. }
  228. }
  229. LOG(INFO) << "Extracted " << success << " files";
  230. return success;
  231. }
  232. // TODO: Write description and make asserts
  233. DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
  234. LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
  235. if (dat_state_ < READY) {
  236. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  237. return INCORRECT_STATE_ERROR;
  238. }
  239. auto file_id = data.options["fid"].as<long long>();
  240. if (dictionary_.count(file_id) == 0) {
  241. LOG(ERROR) << "Cannot patch file - there is no file in dictionary with file_id = " << file_id;
  242. return NO_FILE_ERROR;
  243. }
  244. Subfile *file = dictionary_[file_id];
  245. if (!CorrectSubfile(file)) {
  246. LOG(ERROR) << "Incorrect subfile with id " << file->file_id() << " (headers do not match). Cannot patch it";
  247. return FAILED;
  248. }
  249. // If file has inactive category, then we should set it to patched state in order to commit patch and
  250. // then in ApplyFilePatch(), if new category is still inactive, return dictionary to its original state;
  251. if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0 && file_id != 2013266257) {
  252. dictionary_[file_id]->file_offset_ = patch_dict_[file_id]->file_offset_;
  253. dictionary_[file_id]->file_size_ = patch_dict_[file_id]->file_size_;
  254. dictionary_[file_id]->block_size_ = patch_dict_[file_id]->block_size_;
  255. dictionary_[file_id]->timestamp_ = patch_dict_[file_id]->timestamp_;
  256. dictionary_[file_id]->version_ = patch_dict_[file_id]->version_;
  257. }
  258. if (data.options["cat"].IsDefined()) {
  259. file->category = data.options["cat"].as<long long>();
  260. } else {
  261. file->category = 1;
  262. }
  263. BinaryData old_data = GetFileData(file);
  264. if (old_data.Empty()) {
  265. LOG(ERROR) << "GetFileData returned empty data. Aborting.";
  266. return DAT_PATCH_FILE_ERROR;
  267. }
  268. BinaryData patch_data = file->MakeForImport(old_data, data);
  269. DAT_RESULT result = ApplyFilePatch(file, patch_data);
  270. if (result != SUCCESS)
  271. return result;
  272. LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>() << ".";
  273. return SUCCESS;
  274. }
  275. // TODO: Write description
  276. DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
  277. LOG(INFO) << "Patching all database";
  278. if (dat_state_ < READY) {
  279. LOG(ERROR) << "Dat state isn't READY. Cannot patch.";
  280. return INCORRECT_STATE_ERROR;
  281. }
  282. SubfileData data;
  283. data = db->GetNextFile();
  284. while (!data.Empty()) {
  285. DAT_RESULT result = PatchFile(data);
  286. if (result != SUCCESS)
  287. LOG(ERROR) << "Cannot patch file" << data.options["fid"].as<long long>() << " continuing";
  288. data = db->GetNextFile();
  289. }
  290. LOG(INFO) << "Successfully patched whole database";
  291. return SUCCESS;
  292. }
  293. /// DatFile::WriteUnorderedDictionary(...);
  294. /// Prints list of all found files with some information about them to file.
  295. /// Gets std::string path - path to directory, where the file will be written with name "dict.txt"
  296. DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
  297. LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
  298. FILE *f = nullptr;
  299. fopen_s(&f, (path + "dict.txt").c_str(), "w");
  300. if (f == nullptr) {
  301. LOG(ERROR) << "Cannot open file " << path + "dict.txt";
  302. return WRITE_TO_FILE_ERROR;
  303. }
  304. fprintf(f, "unk1 file_id offset size1 timestamp version size2 unknown2 type\n");
  305. for (auto i : dictionary_) {
  306. fprintf(f, "%lld %lld %lld %lld %lld %lld %lld %lld %s\n", i.second->unknown1(), i.second->file_id(),
  307. i.second->file_offset(), i.second->file_size(), i.second->timestamp(), i.second->version(),
  308. i.second->block_size(), i.second->unknown2(), i.second->Extension().c_str());
  309. }
  310. fclose(f);
  311. LOG(INFO) << "Unordered dictionary was written successfully to " << path << "dict.txt";
  312. return SUCCESS;
  313. }
  314. /// DatFile::files_number();
  315. /// Returns amount of files, found in dictionaries of DatFile. Some if them may be empty or erased.
  316. long long DatFile::files_number() const {
  317. return dictionary_.size();
  318. }
  319. /// DatFile::GetFileData()
  320. /// Returns BinaryData, which contains of subfile data, made from parts of file in DatFile
  321. // TODO: ASSERTS
  322. BinaryData DatFile::GetFileData(const Subfile *file, long long int offset) {
  323. LOG(DEBUG) << "Getting file " << file->file_id() << " data";
  324. BinaryData mfile_id(20);
  325. ReadData(mfile_id, 20, file->file_offset() + 8);
  326. if (mfile_id.Empty()) {
  327. LOG(ERROR) << "Error while reading file " << file->file_id() << " header (offset = "
  328. << file->file_offset() << "); Aborting.";
  329. return BinaryData(0);
  330. }
  331. if (!mfile_id.CheckCompression() && file->file_id() != mfile_id.ToNumber<4>(0)) {
  332. LOG(ERROR) << "Bad DatFile::GetFileData() - file_id in Subfile ("
  333. << file->file_id()
  334. << ") doesn't match to file_id (" << mfile_id.ToNumber<4>(0) << ")in DatFile.";
  335. return BinaryData(0);
  336. }
  337. BinaryData data((unsigned)(file->file_size() + (8 - offset)));
  338. if (file->block_size() >= file->file_size() + 8) {
  339. ReadData(data, file->file_size() + (8 - offset), file->file_offset() + offset);
  340. return data;
  341. }
  342. BinaryData fragments_count(4);
  343. ReadData(fragments_count, 4, file->file_offset());
  344. long long fragments_number = fragments_count.ToNumber<4>(0);
  345. long long current_block_size = file->block_size() - offset - 8 * fragments_number;
  346. ReadData(data, current_block_size, file->file_offset() + offset);
  347. BinaryData FragmentsDictionary(8 * unsigned(fragments_number));
  348. ReadData(FragmentsDictionary, 8 * unsigned(fragments_number),
  349. file->file_offset() + file->block_size() - 8 * fragments_number);
  350. for (long long i = 0; i < fragments_number; i++) {
  351. long long fragment_size = FragmentsDictionary.ToNumber<4>(8 * i);
  352. long long fragment_offset = FragmentsDictionary.ToNumber<4>(8 * i + 4);
  353. ReadData(data, std::min(fragment_size, file->file_size() - current_block_size), fragment_offset,
  354. current_block_size);
  355. current_block_size += fragment_size;
  356. }
  357. LOG(DEBUG) << "Successfully got file " << file->file_id() << " data";
  358. return data;
  359. }
  360. /// DatFile special functions for opening and reading/writing raw data.
  361. /// Shouldn't be used by any external classes except Subfile and Subdirectory.
  362. DAT_RESULT DatFile::OpenDatFile(const char *dat_name) {
  363. LOG(DEBUG) << "Started opening DatFile";
  364. if (dat_state_ != CLOSED) {
  365. CloseDatFile();
  366. }
  367. file_handler_ = fopen(dat_name, "r+b");
  368. if (file_handler_ == nullptr) {
  369. LOG(ERROR) << "Unable to open file " << dat_name;
  370. return NO_FILE_ERROR;
  371. }
  372. fseek(file_handler_, 0, SEEK_END);
  373. file_size_ = ftell(file_handler_);
  374. fseek(file_handler_, 0, SEEK_SET);
  375. dat_state_ = SUCCESS_OPENED;
  376. LOG(DEBUG) << "Successfully opened DatFile";
  377. return SUCCESS;
  378. }
  379. DAT_RESULT DatFile::ReadSuperBlock() {
  380. LOG(DEBUG) << "Started reading superblock";
  381. if (dat_state_ != SUCCESS_OPENED) {
  382. LOG(ERROR) << "Dat state isn't SUCCESS_OPENED. Cannot perform extraction.";
  383. return INCORRECT_STATE_ERROR;
  384. }
  385. BinaryData data(1024);
  386. ReadData(data, 1024);
  387. constant1_ = data.ToNumber<4>(0x100);
  388. constant2_ = data.ToNumber<4>(0x140);
  389. version1_ = data.ToNumber<4>(0x14C);
  390. version2_ = data.ToNumber<4>(0x150);
  391. fragmentation_journal_offset_ = data.ToNumber<4>(0x154);
  392. root_directory_offset_ = data.ToNumber<4>(0x160);
  393. auto size1 = data.ToNumber<4>(0x148);
  394. if (constant1_ != 0x4C5000) {
  395. LOG(ERROR) << "variable at position 0x100 is not equal to .dat file constant!";
  396. return INCORRECT_SUPERBLOCK_ERROR;
  397. }
  398. if (constant2_ != 0x5442) {
  399. LOG(ERROR) << "variable at position 0x140 is not equal to .dat file constant!";
  400. return INCORRECT_SUPERBLOCK_ERROR;
  401. }
  402. if (file_size_ != size1) {
  403. LOG(ERROR) << "variable at 0x148 position is not equal to .dat file size!";
  404. file_size_ = size1;
  405. dat_state_ = SUCCESS_SUPERBLOCK;
  406. return CORRUPTED_FILE_WARNING;
  407. }
  408. dat_state_ = SUCCESS_SUPERBLOCK;
  409. LOG(DEBUG) << "Superblock read successfully";
  410. return SUCCESS;
  411. }
  412. DAT_RESULT DatFile::MakeDirectories() {
  413. LOG(DEBUG) << "Started making directories";
  414. if (dat_state_ != SUCCESS_SUPERBLOCK) {
  415. LOG(ERROR) << "Dat state isn't SUCCESS_SUPERBLOCK. Cannot make directories.";
  416. return INCORRECT_STATE_ERROR;
  417. }
  418. root_directory_ = new SubDirectory((unsigned) root_directory_offset_, this);
  419. dat_state_ = SUCCESS_DIRECTORIES;
  420. LOG(DEBUG) << "Directories made successfully";
  421. return SUCCESS;
  422. }
  423. DAT_RESULT DatFile::MakeDictionary() {
  424. LOG(DEBUG) << "Started making dictionary";
  425. if (dat_state_ != SUCCESS_DIRECTORIES) {
  426. LOG(ERROR) << "Dat state isn't SUCCESS_DIRECTORIES. Cannot make directories.";
  427. return INCORRECT_STATE_ERROR;
  428. }
  429. if (root_directory_ == nullptr) {
  430. LOG(ERROR) << "root_directory is nullptr!!";
  431. return INIT_ERROR;
  432. }
  433. root_directory_->MakeDictionary(dictionary_);
  434. dat_state_ = SUCCESS_DICTIONARY;
  435. LOG(DEBUG) << "Dictionary made successfull";
  436. return SUCCESS;
  437. }
  438. DAT_RESULT DatFile::ReadData(BinaryData &data, long long size, long long offset, long long data_offset) {
  439. if (dat_state_ == CLOSED) {
  440. LOG(ERROR) << "Dat state is CLOSED. Cannot read data.";
  441. data = BinaryData(0);
  442. return INIT_ERROR;
  443. }
  444. if (data_offset + size > data.size()) {
  445. LOG(ERROR) << "Trying to read more than BinaryData size: Reading " << size << " bytes from " << offset
  446. << " position.";
  447. data = BinaryData(0);
  448. return DAT_READ_ERROR;
  449. }
  450. if (offset + size > file_size_) {
  451. LOG(ERROR) << "Trying to read more than DatFile size elapsed: Reading " << size << " bytes from " << offset
  452. << " position.";
  453. data = BinaryData(0);
  454. return DAT_READ_ERROR;
  455. }
  456. fseek(file_handler_, offset, SEEK_SET);
  457. fread(data.data() + data_offset, unsigned(size), 1, file_handler_);
  458. return SUCCESS;
  459. }
  460. DAT_RESULT DatFile::WriteData(const BinaryData &data, long long size, long long offset, long long data_offset) {
  461. if (dat_state_ < READY) {
  462. LOG(ERROR) << "Dat state isn't READY. Cannot write data.";
  463. return INCORRECT_STATE_ERROR;
  464. }
  465. fseek(file_handler_, offset, SEEK_SET);
  466. if (data_offset + size > data.size()) {
  467. LOG(ERROR) << "Trying to write more than BinaryData size";
  468. return DAT_WRITE_ERROR;
  469. }
  470. fwrite(data.data() + data_offset, unsigned(size), 1, file_handler_);
  471. return SUCCESS;
  472. }
  473. /// Special functions used by patch process.
  474. /// Shouldn't be used by any external class.
  475. DAT_RESULT DatFile::ApplyFilePatch(Subfile *file, BinaryData &data) {
  476. LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
  477. // if (patch_dict_.size() == 0 && pending_dictionary_.size() == 0) {
  478. // BinaryData nulls(50 * 1024 * 1024);
  479. // WriteData(nulls, nulls.size(), file_size_);
  480. // file_size_ += 50 * 1024 * 1024;
  481. // }
  482. if (data.Empty()) {
  483. LOG(ERROR) << "Error caused during making file for import. Cannot patch file " << file->file_id();
  484. return FAILED;
  485. }
  486. auto file_id = file->file_id();
  487. if (current_locale() != PATCHED && file_id != 2013266257) {
  488. LOG(INFO) << "Changing locale to PATCHED(RU) in order to patch file";
  489. SetLocale(PATCHED);
  490. }
  491. dat_state_ = UPDATED;
  492. if (orig_dict_.count(file_id) == 0 && file_id != 2013266257) {
  493. orig_dict_[file_id] = new Subfile(this, file->MakeHeaderData());
  494. }
  495. if ((patch_dict_.count(file_id) == 0 && file_id != 2013266257) || data.size() > file->block_size()
  496. || file->file_size() + 8 > file->block_size()) {
  497. file->file_offset_ = file_size_;
  498. file->block_size_ = std::max((long long)data.size(), file->block_size_);
  499. free_buffered_size_ = std::max(0ll, free_buffered_size_ - file->block_size_ - 8);
  500. AddBufferedSize();
  501. this->file_size_ += file->block_size_ + 8;
  502. }
  503. file->file_size_ = data.size() - 8;
  504. data.Append(BinaryData::FromNumber<4>(0), 0); // set additional fragments count to zero
  505. if (file_id != data.ToNumber<4>(8)) {
  506. LOG(ERROR) << "Created data's file_id doesn't match to original! Patch wasn't written to .dat file";
  507. return INCORRECT_PATCH_FILE;
  508. }
  509. WriteData(data, data.size(), file->file_offset());
  510. patch_dict_.erase(file_id); // Удалили старое значение в русском словаре
  511. if (file_id != 2013266257) {
  512. patch_dict_[file_id] = new Subfile(this, file->MakeHeaderData()); // Создали новое значение
  513. }
  514. // If category is forbidden, then return file header data to original state
  515. if (inactive_categories.count(file->category) != 0) {
  516. file->file_offset_ = orig_dict_[file_id]->file_offset_;
  517. file->file_size_ = orig_dict_[file_id]->file_size_;
  518. file->block_size_ = orig_dict_[file_id]->block_size_;
  519. file->timestamp_ = orig_dict_[file_id]->timestamp_;
  520. file->version_ = orig_dict_[file_id]->version_;
  521. }
  522. if (orig_dict_.count(file_id) != 0 && file_id != 2013266257)
  523. orig_dict_[file_id]->category = file->category;
  524. if (patch_dict_.count(file_id) != 0 && file_id != 2013266257)
  525. patch_dict_[file_id]->category = file->category;
  526. // Applying file info in directory
  527. pending_dictionary_.insert(file_id);
  528. LOG(DEBUG) << "Successfully applied file " << file->file_id() << " patch.";
  529. return SUCCESS;
  530. }
  531. DAT_RESULT DatFile::ClearFragmentationJournal() {
  532. LOG(DEBUG) << "Clearing fragmentation journal";
  533. long long offset = 0;
  534. BinaryData data(32);
  535. DAT_RESULT res = ReadData(data, 32, fragmentation_journal_offset_ + 8 + offset);
  536. if (res != SUCCESS) {
  537. LOG(ERROR) << "Error " << res << " while reading data";
  538. return FAILED;
  539. }
  540. BinaryData nulls = BinaryData(32);
  541. while (data != nulls && !data.Empty()) {
  542. WriteData(nulls, 32, fragmentation_journal_offset_ + 8 + offset);
  543. offset += 32;
  544. ReadData(data, 32, fragmentation_journal_offset_ + 8 + offset);
  545. }
  546. //fragmentation_journal_.emplace_back(std::make_pair(data.ToNumber<4>(0), data.ToNumber<4>(4)));
  547. LOG(DEBUG) << "Finished getting fragmentation journal";
  548. return SUCCESS;
  549. }
  550. DAT_RESULT DatFile::UpdateHeader() {
  551. LOG(DEBUG) << "Updating header";
  552. WriteData(BinaryData::FromNumber<4>(constant1_), 4, 0x100);
  553. WriteData(BinaryData::FromNumber<4>(constant2_), 4, 0x140);
  554. WriteData(BinaryData::FromNumber<4>(file_size_), 4, 0x148);
  555. WriteData(BinaryData::FromNumber<4>(version1_), 4, 0x14C);
  556. WriteData(BinaryData::FromNumber<4>(version2_), 4, 0x150);
  557. WriteData(BinaryData::FromNumber<4>(fragmentation_journal_offset_), 4, 0x154);
  558. WriteData(BinaryData::FromNumber<4>(root_directory_offset_), 4, 0x160);
  559. LOG(DEBUG) << "Finished updating header";
  560. return SUCCESS;
  561. }
  562. DAT_RESULT DatFile::CloseDatFile() {
  563. LOG(INFO) << "Closing DatFile";
  564. if (dat_state_ == CLOSED) {
  565. LOG(INFO) << "DatFile is already closed. Nothing to do";
  566. return SUCCESS;
  567. }
  568. // Commiting changes and updating/writing locales and header info
  569. if (!pending_dictionary_.empty()) {
  570. CommitLocales();
  571. CommitDirectories();
  572. UpdateHeader();
  573. }
  574. //ClearFragmentationJournal();
  575. orig_dict_.clear();
  576. pending_patch_.clear();
  577. current_locale_ = ORIGINAL;
  578. if (file_handler_ != nullptr) {
  579. fclose(file_handler_);
  580. }
  581. delete root_directory_;
  582. dictionary_.clear();
  583. free_buffered_size_ = 0;
  584. truncate64(filename_.c_str(), file_size_);
  585. filename_ = "none";
  586. dat_state_ = CLOSED;
  587. LOG(INFO) << "File closed successfully.";
  588. return SUCCESS;
  589. }
  590. // LOCALE MANAGING SECTION
  591. DAT_RESULT DatFile::InitLocales() {
  592. LOG(INFO) << "Initialising locales...";
  593. BinaryData dicts_data(4);
  594. ReadData(dicts_data, 4, 300);
  595. long long dict_offset = dicts_data.ToNumber<4>(0);
  596. if (dict_offset == 0) {
  597. LOG(INFO) << "Dictionary offset is empty. Passing.";
  598. return SUCCESS;
  599. }
  600. ReadData(dicts_data, 4, dict_offset);
  601. long long dict_size = dicts_data.ToNumber<4>(0);
  602. ReadData(dicts_data, 4, dict_offset + 4);
  603. long long dict_version = dicts_data.ToNumber<4>(0);
  604. LOG(INFO) << "Dictionary size is " << dict_size << ". Version is " << dict_version;
  605. if (dict_version != 100) {
  606. LOG(WARNING) << "DICTIONARY IS OLD!!!";
  607. return SUCCESS;
  608. }
  609. dicts_data = BinaryData((unsigned)dict_size);
  610. ReadData(dicts_data, dict_size, dict_offset + 8);
  611. BinaryData hi_data = dicts_data.CutData(0, 15) + BinaryData("\0", 1);
  612. std::string hi = std::string((char *) (hi_data.data()));
  613. LOG(DEBUG) << "hi info is " << hi;
  614. if (hi != "Hi from Gi1dor!") {
  615. LOG(WARNING) << "Didn't receive 'hi' from Gi1dor... Initialising locale dicts as empty";
  616. LOG(INFO) << "Could't init locales' file... Continuing without them";
  617. return SUCCESS;
  618. }
  619. int offset = 15;
  620. BinaryData current_locale_data = dicts_data.CutData(offset, offset + 4) + BinaryData("\0", 1);
  621. std::string locale((char *) (current_locale_data.data()));
  622. offset += 4;
  623. LOG(DEBUG) << "current locale:" << locale;
  624. if (locale != "PATC" && locale != "ORIG") {
  625. LOG(WARNING) << "Incorrect locale... Initialising locale dicts as empty";
  626. LOG(INFO) << "Could't recognize locale... Continuing without locales";
  627. return SUCCESS;
  628. }
  629. current_locale_ = (locale == "PATC" ? PATCHED : ORIGINAL);
  630. // 15 bytes for "Hi from Gi1dor"
  631. // 4 bytes for LOCALE
  632. // 4 bytes for orig_dict.size()
  633. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  634. // 4 bytes for patch_dict.size()
  635. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  636. // 4 bytes for inactive_categories dict
  637. // 4 * inactive_categories.size() bytes for inactive_categories data
  638. size_t orig_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  639. offset += 4;
  640. for (size_t i = 0; i < orig_dict_size; i++) {
  641. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  642. orig_dict_[file->file_id()] = file;
  643. offset += 32;
  644. orig_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  645. offset += 4;
  646. if (orig_dict_[file->file_id()]->category == 0)
  647. LOG(DEBUG) << "file category is undefined (0)!";
  648. }
  649. size_t patch_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  650. offset += 4;
  651. for (size_t i = 0; i < patch_dict_size; i++) {
  652. auto file = new Subfile(this, dicts_data.CutData(offset, offset + 32));
  653. patch_dict_[file->file_id()] = file;
  654. offset += 32;
  655. patch_dict_[file->file_id()]->category = dicts_data.ToNumber<4>(offset);
  656. offset += 4;
  657. if (patch_dict_[file->file_id()]->category == 0)
  658. LOG(DEBUG) << "file category is undefined (0)!";
  659. }
  660. size_t active_patches_dict_size = size_t(dicts_data.CutData(offset, offset + 4).ToNumber<4>(0));
  661. offset += 4;
  662. for (size_t i = 0; i < active_patches_dict_size; i++) {
  663. inactive_categories.insert(dicts_data.ToNumber<4>(offset));
  664. offset += 4;
  665. }
  666. LOG(INFO) << "There are " << patch_dict_.size() << " files in patch locale dictionary";
  667. LOG(INFO) << "There are " << orig_dict_.size() << " files in original locale dictionary";
  668. std::string inactive_cat_s;
  669. for (auto i : inactive_categories) {
  670. inactive_cat_s += std::to_string(i) + " ";
  671. }
  672. LOG(INFO) << "Unactive patches now: " << inactive_cat_s;
  673. LOG(INFO) << "Finished initialising locales";
  674. return SUCCESS;
  675. }
  676. std::map<long long, Subfile *> *DatFile::GetLocaleDictReference(LOCALE locale) {
  677. switch (locale) {
  678. case PATCHED:
  679. return &patch_dict_;
  680. case ORIGINAL:
  681. return &orig_dict_;
  682. default:
  683. LOG(ERROR) << "Unknown locale! Returning original";
  684. return &orig_dict_;
  685. }
  686. }
  687. bool DatFile::CorrectSubfile(Subfile *file) {
  688. BinaryData mfile_id(20);
  689. ReadData(mfile_id, 20, file->file_offset() + 8);
  690. if (mfile_id.Empty())
  691. return false;
  692. return mfile_id.CheckCompression() || file->file_id() == mfile_id.ToNumber<4>(0);
  693. }
  694. DAT_RESULT DatFile::RepairDatFile() {
  695. for (auto file : dictionary_) {
  696. auto subfile = file.second;
  697. auto file_id = file.first;
  698. if (CorrectSubfile(subfile))
  699. continue;
  700. if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
  701. return CRITICAL_DAT_ERROR;
  702. dictionary_[file_id]->file_offset_ = orig_dict_[file_id]->file_offset_;
  703. dictionary_[file_id]->file_size_ = orig_dict_[file_id]->file_size_;
  704. dictionary_[file_id]->block_size_ = orig_dict_[file_id]->block_size_;
  705. dictionary_[file_id]->timestamp_ = orig_dict_[file_id]->timestamp_;
  706. dictionary_[file_id]->version_ = orig_dict_[file_id]->version_;
  707. patch_dict_.erase(file_id);
  708. orig_dict_.erase(file_id);
  709. }
  710. return SUCCESS;
  711. }
  712. DAT_RESULT DatFile::SetLocale(LOCALE locale) {
  713. LOG(INFO) << "Setting locale to " << (locale == PATCHED ? " PATCHED" : " ORIGINAL");
  714. if (dat_state_ < READY) {
  715. LOG(ERROR) << "Dat state isn't READY. Cannot set locale.";
  716. return INCORRECT_STATE_ERROR;
  717. }
  718. if (current_locale_ == locale) {
  719. return SUCCESS;
  720. }
  721. dat_state_ = UPDATED;
  722. auto dict = GetLocaleDictReference(locale);
  723. for (auto file : *dict) {
  724. if (dictionary_[file.first] == nullptr) {
  725. LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
  726. << "which is not in .dat file! Passing it and removing from locale dictionary";
  727. dict->erase(file.first);
  728. continue;
  729. }
  730. if (dictionary_[file.first]->MakeHeaderData().CutData(8, 16) ==
  731. file.second->MakeHeaderData().CutData(8, 16) ||
  732. inactive_categories.count(orig_dict_[file.first]->category) != 0)
  733. continue;
  734. long long file_id = file.first;
  735. Subfile *new_file = file.second;
  736. dictionary_[file_id]->file_offset_ = new_file->file_offset_;
  737. dictionary_[file_id]->file_size_ = new_file->file_size_;
  738. dictionary_[file_id]->block_size_ = new_file->block_size_;
  739. dictionary_[file_id]->timestamp_ = new_file->timestamp_;
  740. dictionary_[file_id]->version_ = new_file->version_;
  741. pending_dictionary_.insert(file_id);
  742. dat_state_ = UPDATED;
  743. }
  744. current_locale_ = locale;
  745. LOG(INFO) << "Locale set successfull";
  746. return SUCCESS;
  747. }
  748. bool DatFile::CheckIfUpdatedByGame() {
  749. LOG(INFO) << "Checking if DatFile was updated by LotRO";
  750. if (!pending_patch_.empty())
  751. return true;
  752. if (current_locale_ == ORIGINAL)
  753. return false;
  754. bool updated = false;
  755. for (auto i : dictionary_) {
  756. long long file_id = i.first;
  757. Subfile *subfile = i.second;
  758. if (inactive_categories.count(subfile->category) > 0)
  759. continue;
  760. if (patch_dict_.count(file_id) > 0
  761. && (subfile->file_size() != patch_dict_[file_id]->file_size()
  762. || subfile->file_offset() != patch_dict_[file_id]->file_offset()
  763. || subfile->block_size() != patch_dict_[file_id]->block_size())) {
  764. orig_dict_.erase(file_id);
  765. patch_dict_.erase(file_id);
  766. pending_patch_.insert(file_id);
  767. updated = true;
  768. dat_state_ = UPDATED;
  769. }
  770. }
  771. LOG(INFO) << "Dat file " << (updated ? "WAS " : "WASN'T ") << "updated by game.";
  772. return updated;
  773. }
  774. DAT_RESULT DatFile::RepairPatches(Database *db) {
  775. LOG(INFO) << "Repairing patches";
  776. SubfileData data;
  777. data = db->GetNextFile();
  778. while (!data.Empty()) {
  779. if (pending_patch_.count(data.options["fid"].as<long long>()) > 0) {
  780. PatchFile(data);
  781. }
  782. data = db->GetNextFile();
  783. }
  784. LOG(INFO) << "Successfully repaired with database";
  785. return SUCCESS;
  786. }
  787. DAT_RESULT DatFile::FinishRepairingPatches() {
  788. LOG(INFO) << "Finishing repairing patches";
  789. pending_patch_.clear();
  790. return SUCCESS;
  791. }
  792. LOCALE DatFile::current_locale() {
  793. if (dat_state_ < READY) {
  794. LOG(ERROR) << "dat_file is in incorrect state!";
  795. return ORIGINAL;
  796. }
  797. if (current_locale_ != PATCHED && current_locale_ != ORIGINAL) {
  798. LOG(ERROR) << "locale has incorrect value. Setting it to original";
  799. current_locale_ = ORIGINAL;
  800. }
  801. return current_locale_;
  802. }
  803. DAT_RESULT DatFile::CommitLocales() {
  804. LOG(INFO) << "Committing locales...";
  805. // 15 bytes for "Hi from Gi1dor"
  806. // 4 bytes for LOCALE
  807. // 4 bytes for orig_dict.size()
  808. // (32 + 4) * orig_dict.size() bytes for orig_dict data
  809. // 4 bytes for patch_dict.size()
  810. // (32 + 4) * patch_dict.size() bytes for patch_dict data
  811. // 4 bytes for inactive_categories list
  812. // 4 * inactive_categories.size() bytes for inactive_categories data
  813. BinaryData binary_data = BinaryData(14 + 15 + 4
  814. + 4 + (32 + 4) * orig_dict_.size()
  815. + 4 + (32 + 4) * patch_dict_.size()
  816. + 4 + 4 * inactive_categories.size());
  817. size_t current_size = 0;
  818. binary_data.Append(BinaryData("Hi from Gi1dor!", 15), current_size);
  819. current_size += 15;
  820. binary_data.Append(BinaryData((current_locale_ == ORIGINAL ? "ORIG" : "PATC"), 4), current_size);
  821. current_size += 4;
  822. binary_data.Append(BinaryData::FromNumber<4>(orig_dict_.size()), current_size);
  823. current_size += 4;
  824. for (auto file : orig_dict_) {
  825. binary_data.Append(file.second->MakeHeaderData(), current_size);
  826. current_size += 32;
  827. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  828. current_size += 4;
  829. }
  830. binary_data.Append(BinaryData::FromNumber<4>(patch_dict_.size()), current_size);
  831. current_size += 4;
  832. for (auto file : patch_dict_) {
  833. binary_data.Append(file.second->MakeHeaderData(), current_size);
  834. current_size += 32;
  835. binary_data.Append(BinaryData::FromNumber<4>(file.second->category), current_size);
  836. current_size += 4;
  837. }
  838. binary_data.Append(BinaryData::FromNumber<4>(inactive_categories.size()), current_size);
  839. current_size += 4;
  840. for (auto patch_id : inactive_categories) {
  841. binary_data.Append(BinaryData::FromNumber<4>(patch_id), current_size);
  842. current_size += 4;
  843. }
  844. WriteData(BinaryData::FromNumber<4>(binary_data.size()), 4, file_size_);
  845. WriteData(BinaryData::FromNumber<4>(100), 4, file_size_ + 4);
  846. WriteData(binary_data, binary_data.size(), file_size_ + 8);
  847. WriteData(BinaryData::FromNumber<4>(file_size_), 4, 300);
  848. file_size_ += binary_data.size() + 8;
  849. LOG(INFO) << "Locales commited successfully";
  850. return SUCCESS;
  851. }
  852. DAT_RESULT DatFile::EnableCategory(int category) {
  853. LOG(INFO) << "Enabling category " << category;
  854. if (inactive_categories.count(category) == 0)
  855. return SUCCESS;
  856. inactive_categories.erase(category);
  857. dat_state_ = UPDATED;
  858. for (auto file : dictionary_) {
  859. auto file_id = file.first;
  860. if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
  861. file.second->file_offset_ = patch_dict_[file_id]->file_offset_;
  862. file.second->file_size_ = patch_dict_[file_id]->file_size_;
  863. file.second->block_size_ = patch_dict_[file_id]->block_size_;
  864. file.second->timestamp_ = patch_dict_[file_id]->timestamp_;
  865. file.second->version_ = patch_dict_[file_id]->version_;
  866. pending_dictionary_.insert(file_id);
  867. }
  868. }
  869. LOG(INFO) << "Category " << category << " enabled successfully";
  870. return SUCCESS;
  871. }
  872. DAT_RESULT DatFile::DisableCategory(int category) {
  873. LOG(INFO) << "Disabling category " << category;
  874. if (inactive_categories.count(category) != 0)
  875. return SUCCESS;
  876. inactive_categories.insert(category);
  877. dat_state_ = UPDATED;
  878. for (auto file : dictionary_) {
  879. auto file_id = file.first;
  880. if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
  881. file.second->file_offset_ = orig_dict_[file_id]->file_offset_;
  882. file.second->file_size_ = orig_dict_[file_id]->file_size_;
  883. file.second->block_size_ = orig_dict_[file_id]->block_size_;
  884. file.second->timestamp_ = orig_dict_[file_id]->timestamp_;
  885. file.second->version_ = orig_dict_[file_id]->version_;
  886. pending_dictionary_.insert(file_id);
  887. }
  888. }
  889. LOG(INFO) << "Category " << category << " disabled successfully";
  890. return SUCCESS;
  891. }
  892. const std::set<long long> &DatFile::GetInactiveCategoriesList() {
  893. return inactive_categories;
  894. }
  895. bool DatFile::CheckIfNotPatched() {
  896. LOG(INFO) << "DatFile " << (patch_dict_.empty() ? "HASN'T " : "HAS already")
  897. << " been patched by LEGACY launcher!";
  898. return patch_dict_.empty();
  899. }
  900. bool DatFile::CheckIfPatchedByOldLauncher() {
  901. LOG(INFO) << "DatFile " << (dictionary_.count(620750000) == 0 ? "HASN'T " : "HAS already")
  902. << " been patched by OLD LAUNCHER!";
  903. return dictionary_.count(620750000) > 0;
  904. }
  905. const std::string &DatFile::filename() const {
  906. return filename_;
  907. }
  908. DAT_RESULT DatFile::CommitDirectories() {
  909. // for (auto i : dictionary_) {
  910. // if (i.second == nullptr) {
  911. // LOG(WARNING) << "WHAT?? " << i.first;
  912. // continue;
  913. // }
  914. // //i.second->block_size_ = 8;
  915. // //WriteData(i.second->MakeHeaderData(), 32, i.second->dictionary_offset());
  916. // }
  917. for (auto file_id : pending_dictionary_) {
  918. if (dictionary_[file_id] == nullptr)
  919. continue;
  920. WriteData(dictionary_[file_id]->MakeHeaderData(), 32, dictionary_[file_id]->dictionary_offset());
  921. }
  922. pending_dictionary_.clear();
  923. return SUCCESS;
  924. }
  925. void DatFile::AddBufferedSize() {
  926. if (free_buffered_size_ >= MIN_BUFFERED_SIZE)
  927. return;
  928. BinaryData nulls(MAX_BUFFERED_SIZE);
  929. WriteData(nulls, MAX_BUFFERED_SIZE, file_size_);
  930. free_buffered_size_ = MAX_BUFFERED_SIZE;
  931. }
  932. }
  933. }