- bundleMediaFramework: true — bundles full GStreamer plugin set in AppImage. Fixes WebKit hang on video/audio (missing appsink/autoaudiosink plugins). MSDK/VA plugins removed post-build to avoid Ubuntu DMA assertion crash. - Import creates complete posts: BlobHeader, VisibilityIntent, pinned blobs, self-follow. Imported posts now indistinguishable from locally created ones. - First-run chooser: Start Fresh or Import Identity on fresh install only. Profile setup shown for existing identities without display name. - File pickers: native Browse buttons on export (folder) and import (ZIP) via tauri-plugin-dialog. - Export path: relative paths resolved against home dir. - Lightbox close: only on overlay/image click, not inner form content. - Growth loop skips self as N2 candidate. - Node shutdown on identity switch (prevents zombie background tasks). - media-src CSP includes asset protocol. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
455 lines
17 KiB
Rust
455 lines
17 KiB
Rust
//! Import data from ZIP archives exported by the export module.
|
|
//!
|
|
//! Import actions:
|
|
//! - AddAsIdentity: create a new identity from the export's key + data
|
|
//! - ImportPublicPosts: import only public posts into the current identity (new PostIds)
|
|
//! - MergeWithKey: decrypt encrypted posts using provided key, re-encrypt for current identity
|
|
|
|
use std::io::Read;
|
|
use std::path::Path;
|
|
|
|
use serde::{Deserialize, Serialize};
|
|
use tracing::{debug, info, warn};
|
|
|
|
use crate::blob::BlobStore;
|
|
use crate::content::compute_post_id;
|
|
use crate::export::{ExportManifest, ExportedPost};
|
|
use crate::storage::StoragePool;
|
|
use crate::types::{Attachment, NodeId, Post, PostVisibility};
|
|
|
|
/// What to do with the imported data.
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
#[serde(rename_all = "snake_case")]
|
|
pub enum ImportAction {
|
|
/// Create a new identity from the export's key and restore all data.
|
|
AddAsIdentity,
|
|
/// Import public posts into the current identity with new PostIds.
|
|
ImportPublicPosts,
|
|
/// Decrypt with the provided key, re-create posts under current identity.
|
|
MergeWithKey { key_hex: String },
|
|
}
|
|
|
|
/// Summary of what an import ZIP contains (shown to user before importing).
|
|
#[derive(Debug, Serialize, Deserialize)]
|
|
pub struct ImportSummary {
|
|
pub node_id: String,
|
|
pub scope: String,
|
|
pub export_date: u64,
|
|
pub post_count: usize,
|
|
pub blob_count: usize,
|
|
pub has_identity_key: bool,
|
|
pub has_follows: bool,
|
|
pub has_settings: bool,
|
|
}
|
|
|
|
/// Result of an import operation.
|
|
#[derive(Debug, Serialize, Deserialize)]
|
|
pub struct ImportResult {
|
|
pub posts_imported: usize,
|
|
pub posts_skipped: usize,
|
|
pub blobs_imported: usize,
|
|
pub message: String,
|
|
}
|
|
|
|
/// Read a ZIP and return a summary of its contents (without importing).
|
|
pub fn read_import_summary(zip_path: &Path) -> anyhow::Result<ImportSummary> {
|
|
let file = std::fs::File::open(zip_path)?;
|
|
let mut archive = zip::ZipArchive::new(file)?;
|
|
|
|
// Read manifest
|
|
let manifest: ExportManifest = {
|
|
let mut entry = archive.by_name("itsgoin-export/manifest.json")?;
|
|
let mut buf = String::new();
|
|
entry.read_to_string(&mut buf)?;
|
|
serde_json::from_str(&buf)?
|
|
};
|
|
|
|
let has_key = archive.by_name("itsgoin-export/identity.key").is_ok();
|
|
let has_follows = archive.by_name("itsgoin-export/follows.json").is_ok();
|
|
let has_settings = archive.by_name("itsgoin-export/settings.json").is_ok();
|
|
|
|
Ok(ImportSummary {
|
|
node_id: manifest.node_id,
|
|
scope: format!("{:?}", manifest.scope),
|
|
export_date: manifest.export_date,
|
|
post_count: manifest.post_count,
|
|
blob_count: manifest.blob_count,
|
|
has_identity_key: has_key,
|
|
has_follows,
|
|
has_settings,
|
|
})
|
|
}
|
|
|
|
/// Parsed data from a ZIP ready for async import.
|
|
struct ParsedImport {
|
|
posts: Vec<(Post, PostVisibility, Vec<(Attachment, Vec<u8>)>)>,
|
|
skipped: usize,
|
|
}
|
|
|
|
/// Import public posts from a ZIP into the current identity.
|
|
/// Creates new posts with the current node_id as author, preserving original timestamps.
|
|
pub async fn import_public_posts(
|
|
zip_path: &Path,
|
|
storage: &StoragePool,
|
|
blob_store: &BlobStore,
|
|
our_node_id: &NodeId,
|
|
) -> anyhow::Result<ImportResult> {
|
|
// Phase 1: Read everything from ZIP synchronously (no Send requirement)
|
|
let parsed = {
|
|
let zip_path = zip_path.to_path_buf();
|
|
let our_node_id = *our_node_id;
|
|
tokio::task::spawn_blocking(move || -> anyhow::Result<ParsedImport> {
|
|
let file = std::fs::File::open(&zip_path)?;
|
|
let mut archive = zip::ZipArchive::new(file)?;
|
|
|
|
let posts: Vec<ExportedPost> = {
|
|
let mut entry = archive.by_name("itsgoin-export/posts.json")?;
|
|
let mut buf = String::new();
|
|
entry.read_to_string(&mut buf)?;
|
|
serde_json::from_str(&buf)?
|
|
};
|
|
|
|
let mut result_posts = Vec::new();
|
|
let mut skipped = 0usize;
|
|
|
|
for ep in &posts {
|
|
let vis: PostVisibility = serde_json::from_str(&ep.visibility_json).unwrap_or(PostVisibility::Public);
|
|
if !matches!(vis, PostVisibility::Public) {
|
|
skipped += 1;
|
|
continue;
|
|
}
|
|
|
|
let attachments: Vec<Attachment> = serde_json::from_str(&ep.attachments_json).unwrap_or_default();
|
|
let new_post = Post {
|
|
author: our_node_id,
|
|
content: ep.content.clone(),
|
|
attachments: attachments.clone(),
|
|
timestamp_ms: ep.timestamp_ms,
|
|
};
|
|
|
|
// Read blob data from archive
|
|
let mut blob_data = Vec::new();
|
|
for att in &attachments {
|
|
let cid_hex = hex::encode(att.cid);
|
|
let blob_path = format!("itsgoin-export/blobs/{}", cid_hex);
|
|
if let Ok(mut blob_entry) = archive.by_name(&blob_path) {
|
|
let mut data = Vec::new();
|
|
blob_entry.read_to_end(&mut data)?;
|
|
blob_data.push((att.clone(), data));
|
|
}
|
|
}
|
|
|
|
result_posts.push((new_post, vis, blob_data));
|
|
}
|
|
|
|
Ok(ParsedImport { posts: result_posts, skipped })
|
|
}).await??
|
|
};
|
|
|
|
// Phase 2: Store to DB + blob store (async — needs storage.get().await)
|
|
let mut imported = 0usize;
|
|
let mut blobs_imported = 0usize;
|
|
|
|
info!(post_count = parsed.posts.len(), skipped = parsed.skipped, "Import phase 2: storing to DB");
|
|
|
|
// Ensure we follow ourselves so imported posts appear in feed
|
|
{
|
|
let s = storage.get().await;
|
|
let _ = s.add_follow(our_node_id);
|
|
}
|
|
|
|
let now = now_ms();
|
|
|
|
for (new_post, _vis, blob_data) in &parsed.posts {
|
|
let new_id = compute_post_id(new_post);
|
|
|
|
let s = storage.get().await;
|
|
if s.get_post(&new_id).ok().flatten().is_some() {
|
|
drop(s);
|
|
debug!(post = hex::encode(new_id), "Import: skipping duplicate post");
|
|
continue;
|
|
}
|
|
// Store post with intent (matches create_post_with_visibility behavior)
|
|
s.store_post_with_intent(&new_id, new_post, &PostVisibility::Public, &crate::types::VisibilityIntent::Public)?;
|
|
|
|
// Store blobs + record them, matching normal post creation
|
|
for (att, data) in blob_data {
|
|
if !blob_store.has(&att.cid) {
|
|
blob_store.store(&att.cid, data)?;
|
|
}
|
|
s.record_blob(&att.cid, &new_id, our_node_id, data.len() as u64, &att.mime_type, att.size_bytes)?;
|
|
let _ = s.pin_blob(&att.cid);
|
|
blobs_imported += 1;
|
|
}
|
|
|
|
// Create BlobHeader (matches what engagement/sync expects)
|
|
let header = crate::types::BlobHeader {
|
|
post_id: new_id,
|
|
author: *our_node_id,
|
|
reactions: vec![],
|
|
comments: vec![],
|
|
policy: crate::types::CommentPolicy::default(),
|
|
updated_at: now,
|
|
thread_splits: vec![],
|
|
receipt_slots: vec![],
|
|
comment_slots: vec![],
|
|
prior_author: None,
|
|
};
|
|
let header_json = serde_json::to_string(&header).unwrap_or_default();
|
|
let _ = s.store_blob_header(&new_id, our_node_id, &header_json, now);
|
|
drop(s);
|
|
|
|
imported += 1;
|
|
debug!(imported, post = hex::encode(new_id), "Import: stored post");
|
|
}
|
|
|
|
info!(imported, skipped = parsed.skipped, blobs = blobs_imported, "Public post import complete");
|
|
|
|
Ok(ImportResult {
|
|
posts_imported: imported,
|
|
posts_skipped: parsed.skipped,
|
|
blobs_imported,
|
|
message: format!("Imported {} posts ({} skipped), {} blobs", imported, parsed.skipped, blobs_imported),
|
|
})
|
|
}
|
|
|
|
/// Import a ZIP as a new identity (create identity subdir, extract everything).
|
|
pub fn import_as_identity(
|
|
zip_path: &Path,
|
|
base_dir: &Path,
|
|
) -> anyhow::Result<String> {
|
|
let file = std::fs::File::open(zip_path)?;
|
|
let mut archive = zip::ZipArchive::new(file)?;
|
|
|
|
// Read manifest
|
|
let manifest: ExportManifest = {
|
|
let mut entry = archive.by_name("itsgoin-export/manifest.json")?;
|
|
let mut buf = String::new();
|
|
entry.read_to_string(&mut buf)?;
|
|
serde_json::from_str(&buf)?
|
|
};
|
|
|
|
// Read identity key
|
|
let key_data = {
|
|
let mut entry = archive.by_name("itsgoin-export/identity.key")
|
|
.map_err(|_| anyhow::anyhow!("Export doesn't contain an identity key"))?;
|
|
let mut buf = Vec::new();
|
|
entry.read_to_end(&mut buf)?;
|
|
buf
|
|
};
|
|
|
|
// Create identity directory
|
|
let id_dir = base_dir.join("identities").join(&manifest.node_id);
|
|
if id_dir.exists() {
|
|
anyhow::bail!("Identity {} already exists", &manifest.node_id[..12]);
|
|
}
|
|
std::fs::create_dir_all(&id_dir)?;
|
|
|
|
// Write identity key
|
|
let key_path = id_dir.join("identity.key");
|
|
std::fs::write(&key_path, &key_data)?;
|
|
#[cfg(unix)]
|
|
{
|
|
use std::os::unix::fs::PermissionsExt;
|
|
let _ = std::fs::set_permissions(&key_path, std::fs::Permissions::from_mode(0o600));
|
|
}
|
|
|
|
// Write metadata
|
|
let now = now_ms();
|
|
let meta = serde_json::json!({
|
|
"display_name": format!("Imported {}", &manifest.node_id[..12]),
|
|
"created_at": now,
|
|
"last_used_at": now,
|
|
});
|
|
std::fs::write(id_dir.join("meta.json"), serde_json::to_string_pretty(&meta)?)?;
|
|
|
|
info!(identity = manifest.node_id, "Imported identity from ZIP — switch to it to restore data");
|
|
|
|
// Note: posts, blobs, follows, settings will be restored when the user switches to this
|
|
// identity and opens the node. The full DB restore could be done here, but it's simpler
|
|
// to let the user switch and then import posts via the import wizard.
|
|
|
|
Ok(manifest.node_id)
|
|
}
|
|
|
|
/// Merge posts from another identity into the current one using the original key for decryption.
|
|
/// Decrypts encrypted posts, creates new posts under the current identity, preserves timestamps.
|
|
/// BlobHeader gets `prior_author` set for provenance.
|
|
pub async fn merge_with_key(
|
|
zip_path: &Path,
|
|
original_key_hex: &str,
|
|
storage: &StoragePool,
|
|
blob_store: &BlobStore,
|
|
our_node_id: &NodeId,
|
|
_our_seed: &[u8; 32],
|
|
) -> anyhow::Result<ImportResult> {
|
|
// Derive the original identity from the provided key
|
|
let original_seed_bytes = hex::decode(original_key_hex)?;
|
|
let original_seed: [u8; 32] = original_seed_bytes.try_into()
|
|
.map_err(|_| anyhow::anyhow!("key must be 32 bytes (64 hex chars)"))?;
|
|
let original_secret_key = iroh::SecretKey::from_bytes(&original_seed);
|
|
let original_node_id: NodeId = *original_secret_key.public().as_bytes();
|
|
|
|
// Phase 1: Read and decrypt everything from ZIP synchronously
|
|
let parsed = {
|
|
let zip_path = zip_path.to_path_buf();
|
|
let our_nid = *our_node_id;
|
|
let orig_seed = original_seed;
|
|
let orig_nid = original_node_id;
|
|
|
|
tokio::task::spawn_blocking(move || -> anyhow::Result<ParsedImport> {
|
|
let file = std::fs::File::open(&zip_path)?;
|
|
let mut archive = zip::ZipArchive::new(file)?;
|
|
|
|
let posts: Vec<ExportedPost> = {
|
|
let mut entry = archive.by_name("itsgoin-export/posts.json")?;
|
|
let mut buf = String::new();
|
|
entry.read_to_string(&mut buf)?;
|
|
serde_json::from_str(&buf)?
|
|
};
|
|
|
|
let mut result_posts = Vec::new();
|
|
let mut skipped = 0usize;
|
|
|
|
for ep in &posts {
|
|
let vis: PostVisibility = serde_json::from_str(&ep.visibility_json)
|
|
.unwrap_or(PostVisibility::Public);
|
|
let attachments: Vec<Attachment> = serde_json::from_str(&ep.attachments_json)
|
|
.unwrap_or_default();
|
|
|
|
// Decrypt content if encrypted
|
|
let plaintext = match &vis {
|
|
PostVisibility::Public => ep.content.clone(),
|
|
PostVisibility::Encrypted { recipients } => {
|
|
match crate::crypto::decrypt_post(
|
|
&ep.content, &orig_seed, &orig_nid, &orig_nid, recipients,
|
|
) {
|
|
Ok(Some(text)) => text,
|
|
Ok(None) => {
|
|
debug!(post = ep.id, "Not a recipient of this post — skipping");
|
|
skipped += 1;
|
|
continue;
|
|
}
|
|
Err(e) => {
|
|
warn!(post = ep.id, error = %e, "Failed to decrypt post — skipping");
|
|
skipped += 1;
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
PostVisibility::GroupEncrypted { .. } => {
|
|
// Group decryption needs the group seed — skip for now
|
|
debug!(post = ep.id, "Group-encrypted post — skipping (group merge not yet supported)");
|
|
skipped += 1;
|
|
continue;
|
|
}
|
|
};
|
|
|
|
// Create new post under our identity
|
|
let new_post = Post {
|
|
author: our_nid,
|
|
content: plaintext,
|
|
attachments: attachments.clone(),
|
|
timestamp_ms: ep.timestamp_ms,
|
|
};
|
|
|
|
// Read blob data from archive (may need decryption for encrypted posts)
|
|
let mut blob_data = Vec::new();
|
|
for att in &attachments {
|
|
let cid_hex = hex::encode(att.cid);
|
|
let blob_path = format!("itsgoin-export/blobs/{}", cid_hex);
|
|
if let Ok(mut blob_entry) = archive.by_name(&blob_path) {
|
|
let mut data = Vec::new();
|
|
blob_entry.read_to_end(&mut data)?;
|
|
|
|
// If the post was encrypted, blobs are also encrypted with the same CEK
|
|
if matches!(vis, PostVisibility::Encrypted { .. }) {
|
|
if let PostVisibility::Encrypted { ref recipients } = vis {
|
|
if let Ok(Some(cek)) = crate::crypto::unwrap_cek_for_recipient(
|
|
&orig_seed, &orig_nid, &orig_nid, recipients,
|
|
) {
|
|
if let Ok(decrypted) = crate::crypto::decrypt_bytes_with_cek(&data, &cek) {
|
|
data = decrypted;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
blob_data.push((att.clone(), data));
|
|
}
|
|
}
|
|
|
|
// Merged posts go in as public (decrypted content, new author)
|
|
result_posts.push((new_post, PostVisibility::Public, blob_data));
|
|
}
|
|
|
|
Ok(ParsedImport { posts: result_posts, skipped })
|
|
}).await??
|
|
};
|
|
|
|
// Phase 2: Store with prior_author provenance
|
|
let mut imported = 0usize;
|
|
let mut blobs_imported = 0usize;
|
|
|
|
for (new_post, _vis, blob_data) in &parsed.posts {
|
|
let new_id = compute_post_id(new_post);
|
|
|
|
let s = storage.get().await;
|
|
if s.get_post(&new_id).ok().flatten().is_some() {
|
|
continue;
|
|
}
|
|
s.store_post_with_visibility(&new_id, new_post, &PostVisibility::Public)?;
|
|
|
|
// Create BlobHeader with prior_author
|
|
let now = now_ms();
|
|
let header = crate::types::BlobHeader {
|
|
post_id: new_id,
|
|
author: *our_node_id,
|
|
reactions: vec![],
|
|
comments: vec![],
|
|
policy: crate::types::CommentPolicy::default(),
|
|
updated_at: now,
|
|
thread_splits: vec![],
|
|
receipt_slots: vec![],
|
|
comment_slots: vec![],
|
|
prior_author: Some(original_node_id),
|
|
};
|
|
let header_json = serde_json::to_string(&header).unwrap_or_default();
|
|
let _ = s.store_blob_header(&new_id, our_node_id, &header_json, now);
|
|
drop(s);
|
|
|
|
for (att, data) in blob_data {
|
|
if !blob_store.has(&att.cid) {
|
|
blob_store.store(&att.cid, data)?;
|
|
let s = storage.get().await;
|
|
let _ = s.record_blob(&att.cid, &new_id, our_node_id, data.len() as u64, &att.mime_type, att.size_bytes);
|
|
blobs_imported += 1;
|
|
}
|
|
}
|
|
|
|
imported += 1;
|
|
}
|
|
|
|
info!(
|
|
imported, skipped = parsed.skipped, blobs = blobs_imported,
|
|
original = hex::encode(original_node_id),
|
|
"Merge with key complete"
|
|
);
|
|
|
|
Ok(ImportResult {
|
|
posts_imported: imported,
|
|
posts_skipped: parsed.skipped,
|
|
blobs_imported,
|
|
message: format!(
|
|
"Merged {} posts from {} ({} skipped), {} blobs",
|
|
imported, &hex::encode(original_node_id)[..12], parsed.skipped, blobs_imported
|
|
),
|
|
})
|
|
}
|
|
|
|
fn now_ms() -> u64 {
|
|
std::time::SystemTime::now()
|
|
.duration_since(std::time::UNIX_EPOCH)
|
|
.unwrap_or_default()
|
|
.as_millis() as u64
|
|
}
|