|
@@ -5,14 +5,14 @@
|
|
|
#include "DatFile.h"
|
|
|
#include "BinaryData.h"
|
|
|
|
|
|
-#include "DatException.h"
|
|
|
#include "SubDirectory.h"
|
|
|
#include "Subfile.h"
|
|
|
#include "SubfileData.h"
|
|
|
|
|
|
#include <EasyLogging++/easylogging++.h>
|
|
|
#include <unistd.h>
|
|
|
-
|
|
|
+#include <algorithm>
|
|
|
+#include <iterator>
|
|
|
#include <locale>
|
|
|
|
|
|
#define ELPP_FEATURE_CRASH_LOG
|
|
@@ -33,6 +33,10 @@ namespace LOTRO_DAT {
|
|
|
file_handler_ = nullptr;
|
|
|
free_buffered_size_ = 0;
|
|
|
|
|
|
+ orig_dict_.clear();
|
|
|
+ patch_dict_.clear();
|
|
|
+ dictionary_.clear();
|
|
|
+
|
|
|
el::Configurations defaultConf;
|
|
|
defaultConf.setToDefault();
|
|
|
defaultConf.setGlobally(el::ConfigurationType::Format,
|
|
@@ -125,6 +129,8 @@ namespace LOTRO_DAT {
|
|
|
LOG(INFO) << "Making last preparations...";
|
|
|
return_value = std::max(return_value, result);
|
|
|
|
|
|
+ CheckIfUpdatedByGame();
|
|
|
+
|
|
|
if (return_value >= 2) {
|
|
|
LOG(WARNING) << "Dat file could be corrupted. Trying to delete corrupted dictionary rows";
|
|
|
if (RepairDatFile() != SUCCESS)
|
|
@@ -143,14 +149,6 @@ namespace LOTRO_DAT {
|
|
|
CloseDatFile();
|
|
|
}
|
|
|
|
|
|
- /// Extracts file with file_id.
|
|
|
- /// If path is undefined then it will be recognised as current working directory
|
|
|
- /// Output file path consists of "path + file_id + file_extension";
|
|
|
- /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
|
|
|
- /// Otherwise DatException() will be thrown.
|
|
|
- /// Returns true, if file was successfully extracted;
|
|
|
- /// Throws DatException() if undefined behaviour happened
|
|
|
-
|
|
|
DAT_RESULT DatFile::ExtractFile(long long file_id, const std::string &path) {
|
|
|
LOG(DEBUG) << "Extracting file " << file_id << " to path " << path;
|
|
|
if (dat_state_ < READY) {
|
|
@@ -179,13 +177,6 @@ namespace LOTRO_DAT {
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
- /// Extracts file with file_id to database "db".
|
|
|
- /// DATABASE SHOULD BE ALREADY CREATED; Otherwise DatException will be called.
|
|
|
- /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
|
|
|
- /// Otherwise DatException() will be thrown.
|
|
|
- /// Returns true, if file was successfully extracted;
|
|
|
- /// Throws DatException() if undefined behaviour happened
|
|
|
-
|
|
|
DAT_RESULT DatFile::ExtractFile(long long file_id, Database *db) {
|
|
|
LOG(DEBUG) << "Extracting file " << file_id << " to database.";
|
|
|
|
|
@@ -228,13 +219,6 @@ namespace LOTRO_DAT {
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
- /// Extracts all files with specific type to "path + type + file_id + file_part + extension" files;
|
|
|
- /// If path is undefined then it will be recognised as current working directory
|
|
|
- /// NOTICE: The directory, mentioned in "std::string path" variable SHOULD BE ALREADY CREATED;
|
|
|
- /// Otherwise DatException() will be thrown.
|
|
|
- /// Returns number of successfully extracted files
|
|
|
- /// Throws DatException() if undefined behaviour happened
|
|
|
-
|
|
|
int DatFile::ExtractAllFilesByType(FILE_TYPE type, std::string path) {
|
|
|
LOG(INFO) << "Extracting all files to path " << path;
|
|
|
if (dat_state_ < READY) {
|
|
@@ -254,11 +238,6 @@ namespace LOTRO_DAT {
|
|
|
return success;
|
|
|
}
|
|
|
|
|
|
- /// Extracts all files with specific type to database "db";
|
|
|
- /// DATABASE SHOULD BE ALREADY CREATED; Otherwise DatException will be called.
|
|
|
- /// Returns number of successfully extracted files
|
|
|
- /// Throws DatException() if undefined behaviour happened
|
|
|
-
|
|
|
int DatFile::ExtractAllFilesByType(FILE_TYPE type, Database *db) {
|
|
|
LOG(INFO) << "Extracting all files to database...";
|
|
|
|
|
@@ -278,7 +257,6 @@ namespace LOTRO_DAT {
|
|
|
return success;
|
|
|
}
|
|
|
|
|
|
- // TODO: Write description and make asserts
|
|
|
DAT_RESULT DatFile::PatchFile(const SubfileData &data) {
|
|
|
LOG(DEBUG) << "Patching file with id = " << data.options["fid"].as<long long>() << ".";
|
|
|
|
|
@@ -304,11 +282,7 @@ namespace LOTRO_DAT {
|
|
|
// then in ApplyFilePatch(), if new category is still inactive, return dictionary to its original state;
|
|
|
|
|
|
if (inactive_categories.count(file->category) != 0 && patch_dict_.count(file_id) != 0 && file_id != 2013266257) {
|
|
|
- dictionary_[file_id]->file_offset_ = patch_dict_[file_id]->file_offset_;
|
|
|
- dictionary_[file_id]->file_size_ = patch_dict_[file_id]->file_size_;
|
|
|
- dictionary_[file_id]->block_size_ = patch_dict_[file_id]->block_size_;
|
|
|
- dictionary_[file_id]->timestamp_ = patch_dict_[file_id]->timestamp_;
|
|
|
- dictionary_[file_id]->version_ = patch_dict_[file_id]->version_;
|
|
|
+ *dictionary_[file_id] = *patch_dict_[file_id];
|
|
|
}
|
|
|
|
|
|
if (data.options["cat"].IsDefined()) {
|
|
@@ -327,12 +301,13 @@ namespace LOTRO_DAT {
|
|
|
DAT_RESULT result = ApplyFilePatch(file, patch_data);
|
|
|
if (result != SUCCESS)
|
|
|
return result;
|
|
|
+ file->timestamp_ = std::time(0);
|
|
|
+ patch_dict_[file->file_id_]->timestamp_ = file->timestamp_;
|
|
|
|
|
|
LOG(DEBUG) << "Patched successfully file " << data.options["fid"].as<long long>() << ".";
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
- // TODO: Write description
|
|
|
DAT_RESULT DatFile::PatchAllDatabase(Database *db) {
|
|
|
LOG(INFO) << "Patching all database";
|
|
|
if (dat_state_ < READY) {
|
|
@@ -353,10 +328,6 @@ namespace LOTRO_DAT {
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
- /// DatFile::WriteUnorderedDictionary(...);
|
|
|
- /// Prints list of all found files with some information about them to file.
|
|
|
- /// Gets std::string path - path to directory, where the file will be written with name "dict.txt"
|
|
|
-
|
|
|
DAT_RESULT DatFile::WriteUnorderedDictionary(std::string path) const {
|
|
|
LOG(INFO) << "Writing unordered dictionary to " << path << "dict.txt";
|
|
|
FILE *f = nullptr;
|
|
@@ -378,16 +349,10 @@ namespace LOTRO_DAT {
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
- /// DatFile::files_number();
|
|
|
- /// Returns amount of files, found in dictionaries of DatFile. Some if them may be empty or erased.
|
|
|
-
|
|
|
long long DatFile::files_number() const {
|
|
|
return dictionary_.size();
|
|
|
}
|
|
|
|
|
|
- /// DatFile::GetFileData()
|
|
|
- /// Returns BinaryData, which contains of subfile data, made from parts of file in DatFile
|
|
|
- // TODO: ASSERTS
|
|
|
BinaryData DatFile::GetFileData(const Subfile *file, long long int offset) {
|
|
|
LOG(DEBUG) << "Getting file " << file->file_id() << " data";
|
|
|
BinaryData mfile_id(20);
|
|
@@ -436,9 +401,6 @@ namespace LOTRO_DAT {
|
|
|
return data;
|
|
|
}
|
|
|
|
|
|
- /// DatFile special functions for opening and reading/writing raw data.
|
|
|
- /// Shouldn't be used by any external classes except Subfile and Subdirectory.
|
|
|
-
|
|
|
DAT_RESULT DatFile::OpenDatFile(const char *dat_name) {
|
|
|
LOG(DEBUG) << "Started opening DatFile";
|
|
|
if (dat_state_ != CLOSED) {
|
|
@@ -479,6 +441,7 @@ namespace LOTRO_DAT {
|
|
|
fragmentation_journal_end_ = data.ToNumber<4>(0x158);
|
|
|
fragmentation_journal_size_ = data.ToNumber<4>(0x15C);
|
|
|
root_directory_offset_ = data.ToNumber<4>(0x160);
|
|
|
+ free_dat_size_ = data.ToNumber<4>(0x19C);
|
|
|
auto size1 = data.ToNumber<4>(0x148);
|
|
|
|
|
|
if (constant1_ != 0x4C5000) {
|
|
@@ -492,9 +455,9 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
if (file_size_ != size1) {
|
|
|
LOG(ERROR) << "variable at 0x148 position is not equal to .dat file size!";
|
|
|
- file_size_ = size1;
|
|
|
+ //file_size_ = size1;
|
|
|
dat_state_ = SUCCESS_SUPERBLOCK;
|
|
|
- return CORRUPTED_FILE_WARNING;
|
|
|
+ //return CORRUPTED_FILE_WARNING;
|
|
|
}
|
|
|
|
|
|
dat_state_ = SUCCESS_SUPERBLOCK;
|
|
@@ -576,9 +539,6 @@ namespace LOTRO_DAT {
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
- /// Special functions used by patch process.
|
|
|
- /// Shouldn't be used by any external class.
|
|
|
-
|
|
|
DAT_RESULT DatFile::ApplyFilePatch(Subfile *file, BinaryData &data) {
|
|
|
LOG(DEBUG) << "Applying " << file->file_id() << " patch.";
|
|
|
|
|
@@ -657,27 +617,22 @@ namespace LOTRO_DAT {
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
|
- DAT_RESULT DatFile::ClearFragmentationJournal() {
|
|
|
- LOG(DEBUG) << "Clearing fragmentation journal";
|
|
|
+ DAT_RESULT DatFile::ModifyFragmentationJournal() {
|
|
|
+ LOG(DEBUG) << "Modifying fragmentation journal";
|
|
|
|
|
|
- long long offset = 0;
|
|
|
- BinaryData data(32);
|
|
|
- DAT_RESULT res = ReadData(data, 32, fragmentation_journal_offset_ + 8 + offset);
|
|
|
+ long long free_size = 128256;
|
|
|
+ long long free_offset = file_size_;
|
|
|
|
|
|
- if (res != SUCCESS) {
|
|
|
- LOG(ERROR) << "Error " << res << " while reading data";
|
|
|
- return FAILED;
|
|
|
- }
|
|
|
+ BinaryData nulldata = BinaryData(unsigned(free_size));
|
|
|
+ WriteData(nulldata, nulldata.size(), file_size_);
|
|
|
+ file_size_ += nulldata.size();
|
|
|
|
|
|
- BinaryData nulls = BinaryData(32);
|
|
|
+ WriteData(BinaryData::FromNumber<4>(free_size), 4, fragmentation_journal_offset_ + 8);
|
|
|
+ WriteData(BinaryData::FromNumber<4>(free_offset), 4, fragmentation_journal_offset_ + 12);
|
|
|
|
|
|
- while (data != nulls && !data.Empty()) {
|
|
|
- WriteData(nulls, 32, fragmentation_journal_offset_ + 8 + offset);
|
|
|
- offset += 32;
|
|
|
- ReadData(data, 32, fragmentation_journal_offset_ + 8 + offset);
|
|
|
- }
|
|
|
- //fragmentation_journal_.emplace_back(std::make_pair(data.ToNumber<4>(0), data.ToNumber<4>(4)));
|
|
|
- LOG(DEBUG) << "Finished getting fragmentation journal";
|
|
|
+ nulldata = BinaryData(8);
|
|
|
+ WriteData(nulldata, nulldata.size(), fragmentation_journal_offset_ + 16);
|
|
|
+ LOG(DEBUG) << "Finished modifying fragmentation journal";
|
|
|
return SUCCESS;
|
|
|
}
|
|
|
|
|
@@ -685,13 +640,15 @@ namespace LOTRO_DAT {
|
|
|
LOG(DEBUG) << "Updating header";
|
|
|
WriteData(BinaryData::FromNumber<4>(constant1_), 4, 0x100);
|
|
|
WriteData(BinaryData::FromNumber<4>(constant2_), 4, 0x140);
|
|
|
+ //WriteData(BinaryData::FromNumber<4>( 0 ), 4, 0x144);
|
|
|
WriteData(BinaryData::FromNumber<4>(file_size_), 4, 0x148);
|
|
|
- WriteData(BinaryData::FromNumber<4>(version1_), 4, 0x14C);
|
|
|
- WriteData(BinaryData::FromNumber<4>(version2_), 4, 0x150);
|
|
|
+ WriteData(BinaryData::FromNumber<4>(version1_ ), 4, 0x14C);
|
|
|
+ WriteData(BinaryData::FromNumber<4>(version2_ ), 4, 0x150);
|
|
|
WriteData(BinaryData::FromNumber<4>(fragmentation_journal_offset_), 4, 0x154);
|
|
|
WriteData(BinaryData::FromNumber<4>(fragmentation_journal_end_), 4, 0x158);
|
|
|
WriteData(BinaryData::FromNumber<4>(fragmentation_journal_size_), 4, 0x15C);
|
|
|
WriteData(BinaryData::FromNumber<4>(root_directory_offset_), 4, 0x160);
|
|
|
+ WriteData(BinaryData::FromNumber<4>(free_dat_size_), 4, 0x19C);
|
|
|
LOG(DEBUG) << "Finished updating header";
|
|
|
return SUCCESS;
|
|
|
}
|
|
@@ -705,33 +662,56 @@ namespace LOTRO_DAT {
|
|
|
|
|
|
// Commiting changes and updating/writing locales and header info
|
|
|
|
|
|
- if (!pending_dictionary_.empty()) {
|
|
|
+ if (!pending_dictionary_.empty() || dat_state_ == UPDATED) {
|
|
|
CommitLocales();
|
|
|
CommitDirectories();
|
|
|
+ ModifyFragmentationJournal();
|
|
|
+ free_dat_size_ = 128254;
|
|
|
fragmentation_journal_end_ = 0;
|
|
|
- fragmentation_journal_size_ = 0;
|
|
|
+ fragmentation_journal_size_ = 1;
|
|
|
UpdateHeader();
|
|
|
}
|
|
|
- ClearFragmentationJournal();
|
|
|
-
|
|
|
- orig_dict_.clear();
|
|
|
- pending_patch_.clear();
|
|
|
|
|
|
current_locale_ = ORIGINAL;
|
|
|
|
|
|
if (file_handler_ != nullptr) {
|
|
|
fclose(file_handler_);
|
|
|
}
|
|
|
-
|
|
|
+ SubDirectory::visited_subdirectories_.clear();
|
|
|
delete root_directory_;
|
|
|
+ truncate64(filename_.c_str(), file_size_);
|
|
|
|
|
|
- dictionary_.clear();
|
|
|
free_buffered_size_ = 0;
|
|
|
|
|
|
- truncate64(filename_.c_str(), file_size_);
|
|
|
filename_ = "none";
|
|
|
|
|
|
+ orig_dict_.clear();
|
|
|
+ patch_dict_.clear();
|
|
|
+ pending_patch_.clear();
|
|
|
+ inactive_categories.clear();
|
|
|
+
|
|
|
+ file_handler_ = nullptr;
|
|
|
+ root_directory_ = nullptr;
|
|
|
+
|
|
|
+
|
|
|
+ pending_dictionary_.clear();
|
|
|
+ dictionary_.clear();
|
|
|
+
|
|
|
+ constant1_ = 0;
|
|
|
+ constant2_ = 0;
|
|
|
+ file_size_ = 0;
|
|
|
+ version1_ = 0;
|
|
|
+ version2_ = 0;
|
|
|
+ fragmentation_journal_size_ = 0;
|
|
|
+ fragmentation_journal_end_ = 0;
|
|
|
+ root_directory_offset_ = 0;
|
|
|
+ fragmentation_journal_offset_ = 0;
|
|
|
+
|
|
|
dat_state_ = CLOSED;
|
|
|
+
|
|
|
+ dat_id_ = -1;
|
|
|
+
|
|
|
+
|
|
|
LOG(INFO) << "File closed successfully.";
|
|
|
return SUCCESS;
|
|
|
}
|
|
@@ -878,11 +858,7 @@ namespace LOTRO_DAT {
|
|
|
if (orig_dict_.count(file_id) == 0 || subfile->file_offset() == orig_dict_[file_id]->file_offset())
|
|
|
return CRITICAL_DAT_ERROR;
|
|
|
|
|
|
- dictionary_[file_id]->file_offset_ = orig_dict_[file_id]->file_offset_;
|
|
|
- dictionary_[file_id]->file_size_ = orig_dict_[file_id]->file_size_;
|
|
|
- dictionary_[file_id]->block_size_ = orig_dict_[file_id]->block_size_;
|
|
|
- dictionary_[file_id]->timestamp_ = orig_dict_[file_id]->timestamp_;
|
|
|
- dictionary_[file_id]->version_ = orig_dict_[file_id]->version_;
|
|
|
+ *dictionary_[file_id] = *orig_dict_[file_id];
|
|
|
patch_dict_.erase(file_id);
|
|
|
orig_dict_.erase(file_id);
|
|
|
}
|
|
@@ -903,7 +879,10 @@ namespace LOTRO_DAT {
|
|
|
dat_state_ = UPDATED;
|
|
|
auto dict = GetLocaleDictReference(locale);
|
|
|
for (auto file : *dict) {
|
|
|
- if (dictionary_[file.first] == nullptr) {
|
|
|
+ if (file.second == nullptr)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (dictionary_.count(file.first) == 0) {
|
|
|
LOG(WARNING) << "In locale dictionary there is file with file_id = " << file.first
|
|
|
<< "which is not in .dat file! Passing it and removing from locale dictionary";
|
|
|
dict->erase(file.first);
|
|
@@ -917,11 +896,7 @@ namespace LOTRO_DAT {
|
|
|
long long file_id = file.first;
|
|
|
Subfile *new_file = file.second;
|
|
|
|
|
|
- dictionary_[file_id]->file_offset_ = new_file->file_offset_;
|
|
|
- dictionary_[file_id]->file_size_ = new_file->file_size_;
|
|
|
- dictionary_[file_id]->block_size_ = new_file->block_size_;
|
|
|
- dictionary_[file_id]->timestamp_ = new_file->timestamp_;
|
|
|
- dictionary_[file_id]->version_ = new_file->version_;
|
|
|
+ *dictionary_[file_id] = *new_file;
|
|
|
|
|
|
pending_dictionary_.insert(file_id);
|
|
|
dat_state_ = UPDATED;
|
|
@@ -936,20 +911,16 @@ namespace LOTRO_DAT {
|
|
|
LOG(INFO) << "Checking if DatFile was updated by LotRO";
|
|
|
if (!pending_patch_.empty())
|
|
|
return true;
|
|
|
- if (current_locale_ == ORIGINAL)
|
|
|
- return false;
|
|
|
|
|
|
bool updated = false;
|
|
|
|
|
|
for (auto i : dictionary_) {
|
|
|
long long file_id = i.first;
|
|
|
Subfile *subfile = i.second;
|
|
|
- if (inactive_categories.count(subfile->category) > 0)
|
|
|
+ if (patch_dict_.count(file_id) == 0)
|
|
|
continue;
|
|
|
- if (patch_dict_.count(file_id) > 0
|
|
|
- && (subfile->file_size() != patch_dict_[file_id]->file_size()
|
|
|
- || subfile->file_offset() != patch_dict_[file_id]->file_offset()
|
|
|
- || subfile->block_size() != patch_dict_[file_id]->block_size())) {
|
|
|
+
|
|
|
+ if (*subfile != *patch_dict_[file_id] && *subfile != *orig_dict_[file_id]) {
|
|
|
orig_dict_.erase(file_id);
|
|
|
patch_dict_.erase(file_id);
|
|
|
pending_patch_.insert(file_id);
|
|
@@ -957,7 +928,6 @@ namespace LOTRO_DAT {
|
|
|
dat_state_ = UPDATED;
|
|
|
}
|
|
|
}
|
|
|
- LOG(INFO) << "Dat file " << (updated ? "WAS " : "WASN'T ") << "updated by game.";
|
|
|
return updated;
|
|
|
}
|
|
|
|
|
@@ -1082,11 +1052,7 @@ namespace LOTRO_DAT {
|
|
|
for (auto file : dictionary_) {
|
|
|
auto file_id = file.first;
|
|
|
if (patch_dict_.count(file_id) > 0 && patch_dict_[file_id]->category == category) {
|
|
|
- file.second->file_offset_ = patch_dict_[file_id]->file_offset_;
|
|
|
- file.second->file_size_ = patch_dict_[file_id]->file_size_;
|
|
|
- file.second->block_size_ = patch_dict_[file_id]->block_size_;
|
|
|
- file.second->timestamp_ = patch_dict_[file_id]->timestamp_;
|
|
|
- file.second->version_ = patch_dict_[file_id]->version_;
|
|
|
+ *file.second = *patch_dict_[file_id];
|
|
|
pending_dictionary_.insert(file_id);
|
|
|
}
|
|
|
}
|
|
@@ -1104,11 +1070,7 @@ namespace LOTRO_DAT {
|
|
|
for (auto file : dictionary_) {
|
|
|
auto file_id = file.first;
|
|
|
if (orig_dict_.count(file_id) && orig_dict_[file_id]->category == category) {
|
|
|
- file.second->file_offset_ = orig_dict_[file_id]->file_offset_;
|
|
|
- file.second->file_size_ = orig_dict_[file_id]->file_size_;
|
|
|
- file.second->block_size_ = orig_dict_[file_id]->block_size_;
|
|
|
- file.second->timestamp_ = orig_dict_[file_id]->timestamp_;
|
|
|
- file.second->version_ = orig_dict_[file_id]->version_;
|
|
|
+ *file.second = *orig_dict_[file_id];
|
|
|
pending_dictionary_.insert(file_id);
|
|
|
}
|
|
|
}
|
|
@@ -1137,15 +1099,6 @@ namespace LOTRO_DAT {
|
|
|
}
|
|
|
|
|
|
DAT_RESULT DatFile::CommitDirectories() {
|
|
|
-// for (auto i : dictionary_) {
|
|
|
-// if (i.second == nullptr) {
|
|
|
-// LOG(WARNING) << "WHAT?? " << i.first;
|
|
|
-// continue;
|
|
|
-// }
|
|
|
-// //i.second->block_size_ = 8;
|
|
|
-// //WriteData(i.second->MakeHeaderData(), 32, i.second->dictionary_offset());
|
|
|
-// }
|
|
|
-
|
|
|
for (auto file_id : pending_dictionary_) {
|
|
|
if (dictionary_[file_id] == nullptr)
|
|
|
continue;
|
|
@@ -1162,5 +1115,72 @@ namespace LOTRO_DAT {
|
|
|
WriteData(nulls, MAX_BUFFERED_SIZE, file_size_);
|
|
|
free_buffered_size_ = MAX_BUFFERED_SIZE;
|
|
|
}
|
|
|
+
|
|
|
+ bool DatFile::CheckIfBackupExists(const std::string &backup_datname) {
|
|
|
+ std::ifstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
|
|
|
+ return !dst.fail();
|
|
|
+ }
|
|
|
+
|
|
|
+ DAT_RESULT DatFile::RemoveBackup(const std::string &backup_datname) {
|
|
|
+ if (!CheckIfBackupExists(backup_datname))
|
|
|
+ return SUCCESS;
|
|
|
+ if (remove(("DAT_LIBRARY_BACKUP/" + backup_datname).c_str()) == 0)
|
|
|
+ return SUCCESS;
|
|
|
+ return REMOVE_FILE_ERROR;
|
|
|
+ }
|
|
|
+
|
|
|
+ DAT_RESULT DatFile::CreateBackup(const std::string &backup_datname) {
|
|
|
+ auto filename = filename_;
|
|
|
+ auto dat_id = dat_id_;
|
|
|
+ LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
|
|
|
+ LOG(INFO) << " Closing DatFile...";
|
|
|
+ CloseDatFile();
|
|
|
+ LOG(INFO) << " Copying " << filename << " to " << backup_datname;
|
|
|
+ mkdir("DAT_LIBRARY_BACKUP");
|
|
|
+ std::ifstream src(filename, std::ios::binary);
|
|
|
+ std::ofstream dst("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
|
|
|
+
|
|
|
+ std::istreambuf_iterator<char> begin_source(src);
|
|
|
+ std::istreambuf_iterator<char> end_source;
|
|
|
+ std::ostreambuf_iterator<char> begin_dest(dst);
|
|
|
+ std::copy(begin_source, end_source, begin_dest);
|
|
|
+
|
|
|
+ src.close();
|
|
|
+ dst.close();
|
|
|
+
|
|
|
+ LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
|
|
|
+ InitDatFile(filename, dat_id);
|
|
|
+ LOG(INFO) << "Restoring .dat file success!";
|
|
|
+ return SUCCESS;
|
|
|
+ }
|
|
|
+
|
|
|
+ DAT_RESULT DatFile::RestoreFromBackup(const std::string &backup_datname) {
|
|
|
+ auto filename = filename_;
|
|
|
+ auto dat_id = dat_id_;
|
|
|
+ LOG(INFO) << "Restoring .dat file " << filename << " from backup " << backup_datname;
|
|
|
+ LOG(INFO) << " Closing DatFile...";
|
|
|
+ CloseDatFile();
|
|
|
+ LOG(INFO) << " Copying " << filename << " to " << backup_datname;
|
|
|
+ mkdir("DAT_LIBRARY_BACKUP");
|
|
|
+ std::ifstream src("DAT_LIBRARY_BACKUP/" + backup_datname, std::ios::binary);
|
|
|
+ std::ofstream dst(filename, std::ios::binary);
|
|
|
+ if (src.fail()) {
|
|
|
+ LOG(ERROR) << "CANNOT RESTORE FILE FROM BACKUP - no backup specified with name " << backup_datname;
|
|
|
+ return NO_BACKUP_ERROR;
|
|
|
+ }
|
|
|
+
|
|
|
+ std::istreambuf_iterator<char> begin_source(src);
|
|
|
+ std::istreambuf_iterator<char> end_source;
|
|
|
+ std::ostreambuf_iterator<char> begin_dest(dst);
|
|
|
+ std::copy(begin_source, end_source, begin_dest);
|
|
|
+
|
|
|
+ src.close();
|
|
|
+ dst.close();
|
|
|
+
|
|
|
+ LOG(INFO) << " Done copying. Initializing restored" << filename << " DatFile...";
|
|
|
+ InitDatFile(filename, dat_id);
|
|
|
+ LOG(INFO) << "Restoring .dat file success!";
|
|
|
+ return SUCCESS;
|
|
|
+ }
|
|
|
}
|
|
|
}
|