text
stringlengths
0
357
}
private_key = buf;
free(buf2);
buf2 = dc_strdup(buf);
if (dc_split_armored_data(buf2, &buf2_headerline, NULL, NULL, NULL)
&& strcmp(buf2_headerline, "-----BEGIN PGP PUBLIC KEY BLOCK-----")==0) {
/* This file starts with a Public Key.
* However some programs (Thunderbird/Enigmail) put public and private key
* in the same file, so we check if there is a private key following */
private_key = strstr(buf, "-----BEGIN PGP PRIVATE KEY BLOCK");
if (private_key==NULL) {
continue; /* this is no error but quite normal as we always export the public keys together with the private ones */
}
}
set_default = 1;
if (strstr(dir_entry->d_name, "legacy")!=NULL) {
dc_log_info(context, 0, "Treating \"%s\" as a legacy private key.", path_plus_name);
set_default = 0; /* a key with "legacy" in its name is not made default; this may result in a keychain with _no_ default, however, this is no problem, as this will create a default key later */
}
if (!set_self_key(context, private_key, set_default)) {
continue;
}
imported_cnt++;
}
if (imported_cnt==0) {
dc_log_error(context, 0, "No private keys found in \"%s\".", dir_name);
goto cleanup;
}
cleanup:
if (dir_handle) { closedir(dir_handle); }
free(suffix);
free(path_plus_name);
free(buf);
free(buf2);
return imported_cnt;
}
/*******************************************************************************
* Export backup
******************************************************************************/
/* the FILE_PROGRESS macro calls the callback with the permille of files processed.
The macro avoids weird values of 0% or 100% while still working. */
#define FILE_PROGRESS \
processed_files_cnt++; \
int permille = (processed_files_cnt*1000)/total_files_cnt; \
if (permille < 10) { permille = 10; } \
if (permille > 990) { permille = 990; } \
context->cb(context, DC_EVENT_IMEX_PROGRESS, permille, 0);
static int export_backup(dc_context_t* context, const char* dir)
{
int success = 0;
int closed = 0;
char* dest_pathNfilename = NULL;
dc_sqlite3_t* dest_sql = NULL;
time_t now = time(NULL);
DIR* dir_handle = NULL;
struct dirent* dir_entry = NULL;
int prefix_len = strlen(DC_BAK_PREFIX);
int suffix_len = strlen(DC_BAK_SUFFIX);
char* curr_pathNfilename = NULL;
void* buf = NULL;
size_t buf_bytes = 0;
sqlite3_stmt* stmt = NULL;
int total_files_cnt = 0;
int processed_files_cnt = 0;
int delete_dest_file = 0;
/* get a fine backup file name (the name includes the date so that multiple backup instances are possible)
FIXME: we should write to a temporary file first and rename it on success. this would guarantee the backup is complete. however, currently it is not clear it the import exists in the long run (may be replaced by a restore-from-imap)*/
{
struct tm* timeinfo;
char buffer[256];
timeinfo = localtime(&now);
strftime(buffer, 256, DC_BAK_PREFIX "-%Y-%m-%d." DC_BAK_SUFFIX, timeinfo);
if ((dest_pathNfilename=dc_get_fine_pathNfilename(context, dir, buffer))==NULL) {
dc_log_error(context, 0, "Cannot get backup file name.");
goto cleanup;
}
}
/* delete unreferenced files before export */
dc_housekeeping(context);
/* vacuum before export; this fixed failed vacuum's on previous import */
dc_sqlite3_try_execute(context->sql, "VACUUM;");
/* temporary lock and close the source (we just make a copy of the whole file, this is the fastest and easiest approach) */
dc_sqlite3_close(context->sql);
closed = 1;