v0.5.0-beta: merge-with-key import, prior_author provenance, beta versioning
Merge-with-key: decrypt exported posts using original identity seed, re-create under current identity with prior_author in BlobHeader for provenance tracking. Download page restructured with stable (v0.4.4) + beta (v0.5.0-beta) sections. Version bumped across all crates. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
8ef32e6df6
commit
97dc83f9f1
13 changed files with 311 additions and 10 deletions
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "itsgoin-cli"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "itsgoin-core"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
|
|
|||
|
|
@ -6334,6 +6334,7 @@ impl ConnectionManager {
|
|||
thread_splits: vec![],
|
||||
receipt_slots: vec![],
|
||||
comment_slots: vec![],
|
||||
prior_author: None,
|
||||
}
|
||||
});
|
||||
header.reactions = reactions;
|
||||
|
|
|
|||
|
|
@ -241,6 +241,181 @@ pub fn import_as_identity(
|
|||
Ok(manifest.node_id)
|
||||
}
|
||||
|
||||
/// Merge posts from another identity into the current one using the original key for decryption.
|
||||
/// Decrypts encrypted posts, creates new posts under the current identity, preserves timestamps.
|
||||
/// BlobHeader gets `prior_author` set for provenance.
|
||||
pub async fn merge_with_key(
|
||||
zip_path: &Path,
|
||||
original_key_hex: &str,
|
||||
storage: &StoragePool,
|
||||
blob_store: &BlobStore,
|
||||
our_node_id: &NodeId,
|
||||
_our_seed: &[u8; 32],
|
||||
) -> anyhow::Result<ImportResult> {
|
||||
// Derive the original identity from the provided key
|
||||
let original_seed_bytes = hex::decode(original_key_hex)?;
|
||||
let original_seed: [u8; 32] = original_seed_bytes.try_into()
|
||||
.map_err(|_| anyhow::anyhow!("key must be 32 bytes (64 hex chars)"))?;
|
||||
let original_secret_key = iroh::SecretKey::from_bytes(&original_seed);
|
||||
let original_node_id: NodeId = *original_secret_key.public().as_bytes();
|
||||
|
||||
// Phase 1: Read and decrypt everything from ZIP synchronously
|
||||
let parsed = {
|
||||
let zip_path = zip_path.to_path_buf();
|
||||
let our_nid = *our_node_id;
|
||||
let orig_seed = original_seed;
|
||||
let orig_nid = original_node_id;
|
||||
|
||||
tokio::task::spawn_blocking(move || -> anyhow::Result<ParsedImport> {
|
||||
let file = std::fs::File::open(&zip_path)?;
|
||||
let mut archive = zip::ZipArchive::new(file)?;
|
||||
|
||||
let posts: Vec<ExportedPost> = {
|
||||
let mut entry = archive.by_name("itsgoin-export/posts.json")?;
|
||||
let mut buf = String::new();
|
||||
entry.read_to_string(&mut buf)?;
|
||||
serde_json::from_str(&buf)?
|
||||
};
|
||||
|
||||
let mut result_posts = Vec::new();
|
||||
let mut skipped = 0usize;
|
||||
|
||||
for ep in &posts {
|
||||
let vis: PostVisibility = serde_json::from_str(&ep.visibility_json)
|
||||
.unwrap_or(PostVisibility::Public);
|
||||
let attachments: Vec<Attachment> = serde_json::from_str(&ep.attachments_json)
|
||||
.unwrap_or_default();
|
||||
|
||||
// Decrypt content if encrypted
|
||||
let plaintext = match &vis {
|
||||
PostVisibility::Public => ep.content.clone(),
|
||||
PostVisibility::Encrypted { recipients } => {
|
||||
match crate::crypto::decrypt_post(
|
||||
&ep.content, &orig_seed, &orig_nid, &orig_nid, recipients,
|
||||
) {
|
||||
Ok(Some(text)) => text,
|
||||
Ok(None) => {
|
||||
debug!(post = ep.id, "Not a recipient of this post — skipping");
|
||||
skipped += 1;
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(post = ep.id, error = %e, "Failed to decrypt post — skipping");
|
||||
skipped += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
PostVisibility::GroupEncrypted { .. } => {
|
||||
// Group decryption needs the group seed — skip for now
|
||||
debug!(post = ep.id, "Group-encrypted post — skipping (group merge not yet supported)");
|
||||
skipped += 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Create new post under our identity
|
||||
let new_post = Post {
|
||||
author: our_nid,
|
||||
content: plaintext,
|
||||
attachments: attachments.clone(),
|
||||
timestamp_ms: ep.timestamp_ms,
|
||||
};
|
||||
|
||||
// Read blob data from archive (may need decryption for encrypted posts)
|
||||
let mut blob_data = Vec::new();
|
||||
for att in &attachments {
|
||||
let cid_hex = hex::encode(att.cid);
|
||||
let blob_path = format!("itsgoin-export/blobs/{}", cid_hex);
|
||||
if let Ok(mut blob_entry) = archive.by_name(&blob_path) {
|
||||
let mut data = Vec::new();
|
||||
blob_entry.read_to_end(&mut data)?;
|
||||
|
||||
// If the post was encrypted, blobs are also encrypted with the same CEK
|
||||
if matches!(vis, PostVisibility::Encrypted { .. }) {
|
||||
if let PostVisibility::Encrypted { ref recipients } = vis {
|
||||
if let Ok(Some(cek)) = crate::crypto::unwrap_cek_for_recipient(
|
||||
&orig_seed, &orig_nid, &orig_nid, recipients,
|
||||
) {
|
||||
if let Ok(decrypted) = crate::crypto::decrypt_bytes_with_cek(&data, &cek) {
|
||||
data = decrypted;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
blob_data.push((att.clone(), data));
|
||||
}
|
||||
}
|
||||
|
||||
// Merged posts go in as public (decrypted content, new author)
|
||||
result_posts.push((new_post, PostVisibility::Public, blob_data));
|
||||
}
|
||||
|
||||
Ok(ParsedImport { posts: result_posts, skipped })
|
||||
}).await??
|
||||
};
|
||||
|
||||
// Phase 2: Store with prior_author provenance
|
||||
let mut imported = 0usize;
|
||||
let mut blobs_imported = 0usize;
|
||||
|
||||
for (new_post, _vis, blob_data) in &parsed.posts {
|
||||
let new_id = compute_post_id(new_post);
|
||||
|
||||
let s = storage.get().await;
|
||||
if s.get_post(&new_id).ok().flatten().is_some() {
|
||||
continue;
|
||||
}
|
||||
s.store_post_with_visibility(&new_id, new_post, &PostVisibility::Public)?;
|
||||
|
||||
// Create BlobHeader with prior_author
|
||||
let now = now_ms();
|
||||
let header = crate::types::BlobHeader {
|
||||
post_id: new_id,
|
||||
author: *our_node_id,
|
||||
reactions: vec![],
|
||||
comments: vec![],
|
||||
policy: crate::types::CommentPolicy::default(),
|
||||
updated_at: now,
|
||||
thread_splits: vec![],
|
||||
receipt_slots: vec![],
|
||||
comment_slots: vec![],
|
||||
prior_author: Some(original_node_id),
|
||||
};
|
||||
let header_json = serde_json::to_string(&header).unwrap_or_default();
|
||||
let _ = s.store_blob_header(&new_id, our_node_id, &header_json, now);
|
||||
drop(s);
|
||||
|
||||
for (att, data) in blob_data {
|
||||
if !blob_store.has(&att.cid) {
|
||||
blob_store.store(&att.cid, data)?;
|
||||
let s = storage.get().await;
|
||||
let _ = s.record_blob(&att.cid, &new_id, our_node_id, data.len() as u64, &att.mime_type, att.size_bytes);
|
||||
blobs_imported += 1;
|
||||
}
|
||||
}
|
||||
|
||||
imported += 1;
|
||||
}
|
||||
|
||||
info!(
|
||||
imported, skipped = parsed.skipped, blobs = blobs_imported,
|
||||
original = hex::encode(original_node_id),
|
||||
"Merge with key complete"
|
||||
);
|
||||
|
||||
Ok(ImportResult {
|
||||
posts_imported: imported,
|
||||
posts_skipped: parsed.skipped,
|
||||
blobs_imported,
|
||||
message: format!(
|
||||
"Merged {} posts from {} ({} skipped), {} blobs",
|
||||
imported, &hex::encode(original_node_id)[..12], parsed.skipped, blobs_imported
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
fn now_ms() -> u64 {
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
|
|
|
|||
|
|
@ -533,6 +533,10 @@ impl Node {
|
|||
|
||||
// ---- Identity export/import ----
|
||||
|
||||
pub fn secret_seed(&self) -> [u8; 32] {
|
||||
self.secret_seed
|
||||
}
|
||||
|
||||
pub fn export_identity_hex(&self) -> anyhow::Result<String> {
|
||||
let key_path = self.data_dir.join("identity.key");
|
||||
let key_bytes = std::fs::read(&key_path)?;
|
||||
|
|
@ -753,6 +757,7 @@ impl Node {
|
|||
thread_splits: vec![],
|
||||
receipt_slots,
|
||||
comment_slots,
|
||||
prior_author: None,
|
||||
};
|
||||
let header_json = serde_json::to_string(&blob_header)?;
|
||||
storage.store_blob_header(&post_id, &self.node_id, &header_json, now)?;
|
||||
|
|
@ -3912,6 +3917,7 @@ impl Node {
|
|||
thread_splits: vec![],
|
||||
receipt_slots: vec![],
|
||||
comment_slots: vec![],
|
||||
prior_author: None,
|
||||
})
|
||||
} else {
|
||||
crate::types::BlobHeader {
|
||||
|
|
@ -3924,6 +3930,7 @@ impl Node {
|
|||
thread_splits: vec![],
|
||||
receipt_slots: vec![],
|
||||
comment_slots: vec![],
|
||||
prior_author: None,
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -4001,6 +4008,7 @@ impl Node {
|
|||
thread_splits: vec![],
|
||||
receipt_slots: vec![],
|
||||
comment_slots: vec![],
|
||||
prior_author: None,
|
||||
})
|
||||
} else {
|
||||
crate::types::BlobHeader {
|
||||
|
|
@ -4013,6 +4021,7 @@ impl Node {
|
|||
thread_splits: vec![],
|
||||
receipt_slots: vec![],
|
||||
comment_slots: vec![],
|
||||
prior_author: None,
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -859,6 +859,9 @@ pub struct BlobHeader {
|
|||
/// Encrypted comment slots (each 256 bytes) — only for encrypted posts
|
||||
#[serde(default)]
|
||||
pub comment_slots: Vec<Vec<u8>>,
|
||||
/// Original author NodeId before post merge (set during cross-identity import)
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub prior_author: Option<NodeId>,
|
||||
}
|
||||
|
||||
/// Receipt slot state byte values
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "itsgoin-desktop"
|
||||
version = "0.4.4"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
|
|
|
|||
|
|
@ -2216,6 +2216,24 @@ async fn import_as_new_identity(
|
|||
Ok(format!("Identity {} imported — switch to it in Settings", &node_id[..12]))
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
async fn import_merge_with_key(
|
||||
state: State<'_, AppNode>,
|
||||
zip_path: String,
|
||||
key_hex: String,
|
||||
) -> Result<String, String> {
|
||||
let node = get_node(&state).await;
|
||||
let result = itsgoin_core::import::merge_with_key(
|
||||
std::path::Path::new(&zip_path),
|
||||
&key_hex,
|
||||
&node.storage,
|
||||
&node.blob_store,
|
||||
&node.node_id,
|
||||
&node.secret_seed(),
|
||||
).await.map_err(|e| e.to_string())?;
|
||||
Ok(result.message)
|
||||
}
|
||||
|
||||
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
||||
pub fn run() {
|
||||
tracing_subscriber::fmt()
|
||||
|
|
@ -2408,6 +2426,7 @@ pub fn run() {
|
|||
import_summary,
|
||||
import_public_posts,
|
||||
import_as_new_identity,
|
||||
import_merge_with_key,
|
||||
])
|
||||
.build(tauri::generate_context!())
|
||||
.expect("error while building tauri application")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"productName": "itsgoin",
|
||||
"version": "0.4.4",
|
||||
"version": "0.5.0",
|
||||
"identifier": "com.itsgoin.app",
|
||||
"build": {
|
||||
"frontendDist": "../../frontend",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue