From bb6f2b64b0cf7e59241aeb087a5d9974a3c93136 Mon Sep 17 00:00:00 2001 From: Scott Reimers Date: Sat, 21 Mar 2026 19:30:38 -0400 Subject: [PATCH] v0.4.1: Security hardening, lock contention fixes, data cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Security: - Reaction signatures: ed25519 sign/verify (sign_reaction, verify_reaction_signature) Backward-compatible — unsigned reactions from old nodes still accepted - Comment signature verification: verify_comment_signature now called on receipt - Reaction removal authorization: only reactor or post author can remove - BlobHeader author verification: lookup actual author from storage, don't trust payload Lock contention (4 fixes): - ManifestPush discovery: cm lock released before PostFetch I/O - Pull request handler: load under lock, filter without lock, brief re-lock for is_deleted - Pull sender: split into two brief locks (store posts, then batch upstream+sync) - Engagement checker: batch all chunk results, single lock for writes Data cleanup: - Post deletion cleans post_downstream, post_upstream, seen_engagement tables - Added TODO-hardening.md documenting remaining DOS/security/lock/data issues Co-Authored-By: Claude Opus 4.6 (1M context) --- Cargo.lock | 2 +- crates/core/src/connection.rs | 336 +++++++++++++++++++------------ crates/core/src/crypto.rs | 42 ++++ crates/core/src/node.rs | 2 + crates/core/src/storage.rs | 13 +- crates/core/src/types.rs | 3 + crates/tauri-app/Cargo.toml | 2 +- crates/tauri-app/tauri.conf.json | 2 +- docs/TODO-hardening.md | 204 +++++++++++++++++++ website/design.html | 3 +- website/download.html | 29 ++- 11 files changed, 500 insertions(+), 138 deletions(-) create mode 100644 docs/TODO-hardening.md diff --git a/Cargo.lock b/Cargo.lock index e5d45d5..cb0d0b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2746,7 +2746,7 @@ dependencies = [ [[package]] name = "itsgoin-desktop" -version = "0.4.0" +version = "0.4.1" dependencies = [ "anyhow", "base64 0.22.1", diff --git a/crates/core/src/connection.rs b/crates/core/src/connection.rs index 158d64d..80ae712 100644 --- a/crates/core/src/connection.rs +++ b/crates/core/src/connection.rs @@ -1596,32 +1596,42 @@ impl ConnectionManager { .as_millis() as u64; let mut stored = false; let mut new_post_ids: Vec = Vec::new(); - let storage = self.storage.lock().await; let mut synced_authors: HashSet = HashSet::new(); - for sp in response.posts { - if verify_post_id(&sp.id, &sp.post) && !storage.is_deleted(&sp.id)? { - let _ = storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility); - let prio = storage.get_post_upstreams(&sp.id).map(|v| v.len() as u8).unwrap_or(0); - let _ = storage.add_post_upstream(&sp.id, from, prio); - new_post_ids.push(sp.id); - synced_authors.insert(sp.post.author); - if sp.id == notification.post_id { - stored = true; + + // Brief lock 1: store posts + { + let storage = self.storage.lock().await; + for sp in &response.posts { + if verify_post_id(&sp.id, &sp.post) && !storage.is_deleted(&sp.id)? { + let _ = storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility); + new_post_ids.push(sp.id); + synced_authors.insert(sp.post.author); + if sp.id == notification.post_id { + stored = true; + } } } } - // Protocol v4: update last_sync_ms for authors whose posts were received - for author in &synced_authors { - let _ = storage.update_follow_last_sync(author, now_ms); - } - for vu in response.visibility_updates { - if let Some(post) = storage.get_post(&vu.post_id)? { - if post.author == vu.author { - let _ = storage.update_post_visibility(&vu.post_id, &vu.visibility); + // Lock RELEASED + + // Brief lock 2: upstream + last_sync + visibility updates + { + let storage = self.storage.lock().await; + for pid in &new_post_ids { + let prio = storage.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0); + let _ = storage.add_post_upstream(pid, from, prio); + } + for author in &synced_authors { + let _ = storage.update_follow_last_sync(author, now_ms); + } + for vu in &response.visibility_updates { + if let Some(post) = storage.get_post(&vu.post_id)? { + if post.author == vu.author { + let _ = storage.update_post_visibility(&vu.post_id, &vu.visibility); + } } } } - drop(storage); // Register as downstream for new posts (cap at 50 to avoid flooding) if !new_post_ids.is_empty() { @@ -1679,32 +1689,35 @@ impl ConnectionManager { .unwrap_or_default() .as_millis() as u64; + // Brief lock 1: store posts + let mut synced_authors: HashSet = HashSet::new(); { let storage = self.storage.lock().await; - // Track which authors had posts received for last_sync_ms update - let mut synced_authors: HashSet = HashSet::new(); - for sp in &response.posts { if storage.is_deleted(&sp.id)? { continue; } if verify_post_id(&sp.id, &sp.post) { if storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility)? { - // Record who we got this post from (upstream for engagement propagation) - let prio = storage.get_post_upstreams(&sp.id).map(|v| v.len() as u8).unwrap_or(0); - let _ = storage.add_post_upstream(&sp.id, peer_id, prio); new_post_ids.push(sp.id); posts_received += 1; } synced_authors.insert(sp.post.author); } } + } + // Lock RELEASED - // Protocol v4: update last_sync_ms for authors whose posts were received + // Brief lock 2: upstream + last_sync + visibility updates + { + let storage = self.storage.lock().await; + for pid in &new_post_ids { + let prio = storage.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0); + let _ = storage.add_post_upstream(pid, peer_id, prio); + } for author in &synced_authors { let _ = storage.update_follow_last_sync(author, now_ms); } - for vu in response.visibility_updates { if vu.author != *peer_id { // Only accept visibility updates authored by the responding peer @@ -1775,8 +1788,10 @@ impl ConnectionManager { let mut updated = 0; // Request headers in batches to avoid opening too many streams for chunk in post_headers.chunks(20) { + // Collect all results for this chunk WITHOUT holding the lock + let mut results: Vec<([u8; 32], Option<(String, crate::types::BlobHeader)>)> = Vec::new(); for (post_id, current_ts) in chunk { - let result: anyhow::Result<()> = async { + let result: anyhow::Result> = async { let (mut send, mut recv) = pc.connection.open_bi().await?; let request = BlobHeaderRequestPayload { post_id: *post_id, @@ -1792,45 +1807,52 @@ impl ConnectionManager { let response: BlobHeaderResponsePayload = read_payload(&mut recv, MAX_PAYLOAD).await?; - // Brief re-lock for writes - let storage = self.storage.lock().await; - // Always update last_check_ms regardless of whether engagement changed - let _ = storage.update_post_last_check(post_id, now_ms); - if response.updated { - if let Some(json) = &response.header_json { - if let Ok(header) = serde_json::from_str::(json) { - // Store the full header JSON - let _ = storage.store_blob_header( - &header.post_id, - &header.author, - json, - header.updated_at, - ); - // Apply individual reactions and comments. - // store_reaction / store_comment are tombstone-aware: - // they compare timestamps and respect deleted_at fields. - for reaction in &header.reactions { - let _ = storage.store_reaction(reaction); - } - for comment in &header.comments { - let _ = storage.store_comment(comment); - } - let _ = storage.set_comment_policy(&header.post_id, &header.policy); - // Update last_engagement_ms when new engagement arrives - let _ = storage.update_post_last_engagement(post_id, now_ms); - updated += 1; + if let Some(json) = response.header_json { + if let Ok(header) = serde_json::from_str::(&json) { + return Ok(Some((json, header))); } } } - drop(storage); - Ok(()) + Ok(None) } .await; - if let Err(e) = result { - trace!(post_id = hex::encode(post_id), error = %e, "Failed to fetch engagement header"); + match result { + Ok(header_opt) => results.push((*post_id, header_opt)), + Err(e) => { + trace!(post_id = hex::encode(post_id), error = %e, "Failed to fetch engagement header"); + } } } + + // Single lock for ALL writes in this chunk + if !results.is_empty() { + let storage = self.storage.lock().await; + for (post_id, header_opt) in &results { + let _ = storage.update_post_last_check(post_id, now_ms); + if let Some((json, header)) = header_opt { + let _ = storage.store_blob_header( + &header.post_id, + &header.author, + json, + header.updated_at, + ); + // store_reaction / store_comment are tombstone-aware: + // they compare timestamps and respect deleted_at fields. + for reaction in &header.reactions { + let _ = storage.store_reaction(reaction); + } + for comment in &header.comments { + let _ = storage.store_comment(comment); + } + let _ = storage.set_comment_policy(&header.post_id, &header.policy); + let _ = storage.update_post_last_engagement(post_id, now_ms); + updated += 1; + } + } + drop(storage); + } + // Lock RELEASED before next chunk } Ok(updated) @@ -1852,55 +1874,61 @@ impl ConnectionManager { let since_ms_map: HashMap = request.since_ms.into_iter().collect(); let use_since_ms = !since_ms_map.is_empty(); - let (posts, vis_updates) = { + // Phase 1: Brief lock — load data + let (all_posts, group_members) = { let storage = self.storage.lock().await; - let all_posts = storage.list_posts_with_visibility()?; - let group_members = storage.get_all_group_members().unwrap_or_default(); + let posts = storage.list_posts_with_visibility()?; + let members = storage.get_all_group_members().unwrap_or_default(); + (posts, members) + }; + // Lock RELEASED - let mut posts_to_send = Vec::new(); - let mut vis_updates_to_send = Vec::new(); + // Phase 2: Filter without lock (pure CPU) + let mut candidates_to_send = Vec::new(); + let mut vis_updates_to_send = Vec::new(); - for (id, post, visibility) in all_posts { - let should_send = - crate::network::should_send_post(&post, &visibility, &remote_node_id, &their_follows, &group_members); + for (id, post, visibility) in all_posts { + let should_send = + crate::network::should_send_post(&post, &visibility, &remote_node_id, &their_follows, &group_members); - if !should_send { - continue; - } - - // Determine if peer already has this post - let peer_has_post = if use_since_ms { - // v4 path: filter by per-author timestamp (60s fudge for clock skew) - if let Some(&since) = since_ms_map.get(&post.author) { - post.timestamp_ms <= since + 60_000 - } else { - false // no since_ms for this author — they want everything - } - } else { - // Legacy path: use have_post_ids - their_post_ids.contains(&id) - }; - - if !peer_has_post { - if !storage.is_deleted(&id)? { - posts_to_send.push(SyncPost { - id, - post, - visibility, - }); - } - } else { - // They already have the post — send visibility update if we authored it - if post.author == self.our_node_id { - vis_updates_to_send.push(crate::types::VisibilityUpdate { - post_id: id, - author: self.our_node_id, - visibility, - }); - } - } + if !should_send { + continue; } + // Determine if peer already has this post + let peer_has_post = if use_since_ms { + // v4 path: filter by per-author timestamp (60s fudge for clock skew) + if let Some(&since) = since_ms_map.get(&post.author) { + post.timestamp_ms <= since + 60_000 + } else { + false // no since_ms for this author — they want everything + } + } else { + // Legacy path: use have_post_ids + their_post_ids.contains(&id) + }; + + if !peer_has_post { + candidates_to_send.push((id, post, visibility)); + } else { + // They already have the post — send visibility update if we authored it + if post.author == self.our_node_id { + vis_updates_to_send.push(crate::types::VisibilityUpdate { + post_id: id, + author: self.our_node_id, + visibility, + }); + } + } + } + + // Phase 3: Brief re-lock for is_deleted checks on filtered posts + let (posts, vis_updates) = { + let storage = self.storage.lock().await; + let posts_to_send: Vec = candidates_to_send.into_iter() + .filter(|(id, _, _)| !storage.is_deleted(id).unwrap_or(false)) + .map(|(id, post, visibility)| SyncPost { id, post, visibility }) + .collect(); (posts_to_send, vis_updates_to_send) }; @@ -4940,32 +4968,63 @@ impl ConnectionManager { let cm_arc = conn_mgr.clone(); let sender_id = remote_node_id; tokio::spawn(async move { - let cm = cm_arc.lock().await; + // Brief lock: get connection handle only + let conn = { + let cm = cm_arc.lock().await; + cm.connections_ref().get(&sender_id).map(|pc| pc.connection.clone()) + }; + // cm lock RELEASED + + let Some(conn) = conn else { return }; + let mut fetched = 0usize; for (post_id, _author) in &discovery_posts { if fetched >= 10 { break; } - match cm.send_post_fetch(&sender_id, post_id).await { + + // PostFetch network I/O WITHOUT any lock + let result = async { + use crate::protocol::{PostFetchRequestPayload, PostFetchResponsePayload}; + let (mut send, mut recv) = conn.open_bi().await?; + let req = PostFetchRequestPayload { post_id: *post_id }; + write_typed_message(&mut send, MessageType::PostFetchRequest, &req).await?; + send.finish()?; + let msg_type = tokio::time::timeout( + std::time::Duration::from_secs(10), + read_message_type(&mut recv), + ).await??; + if msg_type != MessageType::PostFetchResponse { + return anyhow::Ok(None); + } + let resp: PostFetchResponsePayload = read_payload(&mut recv, MAX_PAYLOAD).await?; + anyhow::Ok(resp.post) + }.await; + + match result { Ok(Some(sync_post)) => { if crate::content::verify_post_id(&sync_post.id, &sync_post.post) { - let storage = cm.storage.lock().await; - if storage.store_post_with_visibility(&sync_post.id, &sync_post.post, &sync_post.visibility).unwrap_or(false) { - // Set upstream + register as downstream - let prio = storage.get_post_upstreams(&sync_post.id).map(|v| v.len() as u8).unwrap_or(0); - let _ = storage.add_post_upstream(&sync_post.id, &sender_id, prio); - // Update last_sync_ms for the author - let now = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap_or_default() - .as_millis() as u64; - let _ = storage.update_follow_last_sync(&sync_post.post.author, now); - drop(storage); - // Register as downstream with the sender - if let Some(pc) = cm.connections_ref().get(&sender_id) { - let reg = crate::protocol::PostDownstreamRegisterPayload { post_id: sync_post.id }; - if let Ok(mut send) = pc.connection.open_uni().await { - let _ = write_typed_message(&mut send, MessageType::PostDownstreamRegister, ®).await; - let _ = send.finish(); - } + // Brief re-acquire for storage writes only + let stored = { + let cm = cm_arc.lock().await; + let storage = cm.storage.lock().await; + if storage.store_post_with_visibility(&sync_post.id, &sync_post.post, &sync_post.visibility).unwrap_or(false) { + let prio = storage.get_post_upstreams(&sync_post.id).map(|v| v.len() as u8).unwrap_or(0); + let _ = storage.add_post_upstream(&sync_post.id, &sender_id, prio); + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_millis() as u64; + let _ = storage.update_follow_last_sync(&sync_post.post.author, now); + true + } else { + false + } + }; + // cm lock RELEASED — register downstream without lock + if stored { + let reg = crate::protocol::PostDownstreamRegisterPayload { post_id: sync_post.id }; + if let Ok(mut send) = conn.open_uni().await { + let _ = write_typed_message(&mut send, MessageType::PostDownstreamRegister, ®).await; + let _ = send.finish(); } fetched += 1; } @@ -5703,10 +5762,22 @@ impl ConnectionManager { if let crate::types::ReactPermission::None = policy.allow_reacts { continue; } + // Verify signature (skip if empty for backward compat with old nodes) + if !reaction.signature.is_empty() && !crate::crypto::verify_reaction_signature( + &reaction.reactor, + &payload.post_id, + &reaction.emoji, + reaction.timestamp_ms, + &reaction.signature, + ) { + continue; // Skip forged reactions + } let _ = storage.store_reaction(reaction); } BlobHeaderDiffOp::RemoveReaction { reactor, emoji, post_id } => { - let _ = storage.remove_reaction(reactor, post_id, emoji); + if *reactor == sender || sender == payload.author { + let _ = storage.remove_reaction(reactor, post_id, emoji); + } } BlobHeaderDiffOp::AddComment(comment) => { if policy.blocklist.contains(&comment.author) { @@ -5721,6 +5792,15 @@ impl ConnectionManager { } crate::types::CommentPermission::Public => {} } + if !crate::crypto::verify_comment_signature( + &comment.author, + &payload.post_id, + &comment.content, + comment.timestamp_ms, + &comment.signature, + ) { + continue; // Skip forged comments + } let _ = storage.store_comment(comment); } BlobHeaderDiffOp::EditComment { author, post_id, timestamp_ms, new_content } => { @@ -5832,8 +5912,14 @@ impl ConnectionManager { header.comments = comments; header.policy = policy; header.updated_at = payload.timestamp_ms; + // Look up actual post author (don't trust payload.author) + let actual_author = storage.get_post(&payload.post_id) + .ok().flatten() + .map(|p| p.author) + .unwrap_or(payload.author); // fallback if post not stored yet + header.author = actual_author; if let Ok(json) = serde_json::to_string(&header) { - let _ = storage.store_blob_header(&payload.post_id, &payload.author, &json, payload.timestamp_ms); + let _ = storage.store_blob_header(&payload.post_id, &actual_author, &json, payload.timestamp_ms); } // Phase 4: Update last_engagement_ms when engagement arrives via diff let _ = storage.update_post_last_engagement(&payload.post_id, payload.timestamp_ms); diff --git a/crates/core/src/crypto.rs b/crates/core/src/crypto.rs index a8205b2..ef48683 100644 --- a/crates/core/src/crypto.rs +++ b/crates/core/src/crypto.rs @@ -549,6 +549,7 @@ pub fn random_slot_noise(size: usize) -> Vec { const REACTION_WRAP_CONTEXT: &str = "itsgoin/private-reaction/v1"; const COMMENT_SIGN_CONTEXT: &str = "itsgoin/comment-sig/v1"; +const REACTION_SIGN_CONTEXT: &str = "itsgoin/reaction-sig/v1"; /// Encrypt a private reaction payload (only the post author can decrypt). /// Uses X25519 DH between reactor and author, then ChaCha20-Poly1305. @@ -645,6 +646,47 @@ pub fn verify_comment_signature( verifying_key.verify(digest.as_bytes(), &sig).is_ok() } +/// Sign a reaction: ed25519 over BLAKE3(reactor || post_id || emoji || timestamp_ms). +pub fn sign_reaction( + seed: &[u8; 32], + reactor: &NodeId, + post_id: &PostId, + emoji: &str, + timestamp_ms: u64, +) -> Vec { + let signing_key = SigningKey::from_bytes(seed); + let mut hasher = blake3::Hasher::new_derive_key(REACTION_SIGN_CONTEXT); + hasher.update(reactor); + hasher.update(post_id); + hasher.update(emoji.as_bytes()); + hasher.update(×tamp_ms.to_le_bytes()); + let digest = hasher.finalize(); + signing_key.sign(digest.as_bytes()).to_bytes().to_vec() +} + +/// Verify a reaction's ed25519 signature. +pub fn verify_reaction_signature( + reactor: &NodeId, + post_id: &PostId, + emoji: &str, + timestamp_ms: u64, + signature: &[u8], +) -> bool { + let Ok(verifying_key) = VerifyingKey::from_bytes(reactor) else { + return false; + }; + let Ok(sig) = ed25519_dalek::Signature::from_slice(signature) else { + return false; + }; + let mut hasher = blake3::Hasher::new_derive_key(REACTION_SIGN_CONTEXT); + hasher.update(reactor); + hasher.update(post_id); + hasher.update(emoji.as_bytes()); + hasher.update(×tamp_ms.to_le_bytes()); + let digest = hasher.finalize(); + verifying_key.verify(digest.as_bytes(), &sig).is_ok() +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/core/src/node.rs b/crates/core/src/node.rs index 1950ede..0838651 100644 --- a/crates/core/src/node.rs +++ b/crates/core/src/node.rs @@ -3493,6 +3493,7 @@ impl Node { None }; + let signature = crate::crypto::sign_reaction(&self.secret_seed, &our_node_id, &post_id, &emoji, now); let reaction = crate::types::Reaction { reactor: our_node_id, emoji: emoji.clone(), @@ -3500,6 +3501,7 @@ impl Node { timestamp_ms: now, encrypted_payload, deleted_at: None, + signature, }; // Store locally diff --git a/crates/core/src/storage.rs b/crates/core/src/storage.rs index 7f3e958..ec082a9 100644 --- a/crates/core/src/storage.rs +++ b/crates/core/src/storage.rs @@ -2197,12 +2197,18 @@ impl Storage { Ok(inserted > 0) } - /// Apply a delete: remove the post from the posts table if author matches. + /// Apply a delete: remove the post from the posts table if author matches, + /// and clean up associated downstream/upstream/engagement tracking rows. pub fn apply_delete(&self, record: &DeleteRecord) -> anyhow::Result { let deleted = self.conn.execute( "DELETE FROM posts WHERE id = ?1 AND author = ?2", params![record.post_id.as_slice(), record.author.as_slice()], )?; + if deleted > 0 { + self.conn.execute("DELETE FROM post_downstream WHERE post_id = ?1", params![record.post_id.as_slice()])?; + self.conn.execute("DELETE FROM post_upstream WHERE post_id = ?1", params![record.post_id.as_slice()])?; + self.conn.execute("DELETE FROM seen_engagement WHERE post_id = ?1", params![record.post_id.as_slice()])?; + } Ok(deleted > 0) } @@ -4253,6 +4259,7 @@ impl Storage { timestamp_ms: ts as u64, encrypted_payload: enc, deleted_at: None, + signature: vec![], }); } Ok(result) @@ -4286,6 +4293,7 @@ impl Storage { timestamp_ms: ts as u64, encrypted_payload: enc, deleted_at: del.map(|v| v as u64), + signature: vec![], }); } Ok(result) @@ -5894,6 +5902,7 @@ mod tests { timestamp_ms: 1000, encrypted_payload: None, deleted_at: None, + signature: vec![], }).unwrap(); s.store_reaction(&Reaction { @@ -5903,6 +5912,7 @@ mod tests { timestamp_ms: 1001, encrypted_payload: None, deleted_at: None, + signature: vec![], }).unwrap(); s.store_reaction(&Reaction { @@ -5912,6 +5922,7 @@ mod tests { timestamp_ms: 1002, encrypted_payload: None, deleted_at: None, + signature: vec![], }).unwrap(); let reactions = s.get_reactions(&post_id).unwrap(); diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index 57352b1..ddc7611 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -724,6 +724,9 @@ pub struct Reaction { /// Tombstone timestamp — if set, this reaction has been soft-deleted #[serde(default)] pub deleted_at: Option, + /// ed25519 signature over BLAKE3(reactor || post_id || emoji || timestamp_ms) + #[serde(default)] + pub signature: Vec, } /// An inline comment on a post diff --git a/crates/tauri-app/Cargo.toml b/crates/tauri-app/Cargo.toml index aa52e2e..60b9464 100644 --- a/crates/tauri-app/Cargo.toml +++ b/crates/tauri-app/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "itsgoin-desktop" -version = "0.4.0" +version = "0.4.1" edition = "2021" [lib] diff --git a/crates/tauri-app/tauri.conf.json b/crates/tauri-app/tauri.conf.json index 4ffe45a..1c18925 100644 --- a/crates/tauri-app/tauri.conf.json +++ b/crates/tauri-app/tauri.conf.json @@ -1,6 +1,6 @@ { "productName": "itsgoin", - "version": "0.4.0", + "version": "0.4.1", "identifier": "com.itsgoin.app", "build": { "frontendDist": "../../frontend", diff --git a/docs/TODO-hardening.md b/docs/TODO-hardening.md new file mode 100644 index 0000000..25fba59 --- /dev/null +++ b/docs/TODO-hardening.md @@ -0,0 +1,204 @@ +# DOS Hardening TODO + +Identified during v0.4.0 audit (2026-03-21). Implement before v0.4.1. + +## CRITICAL — Lock Contention (v4-introduced) + +### L1. ManifestPush discovery holds cm lock during network I/O (connection.rs:4847-4981) +- Spawned task grabs cm lock, calls send_post_fetch (QUIC I/O), waits for response — all while locked +- Every connection operation queues behind it (5s+ freeze possible) +- **Fix:** Gather connection handle before locking cm. PostFetch outside lock. Brief re-acquire for DB writes. + +### L2. Pull request handler holds lock during filtering (connection.rs:1855-1905) +- Loads ALL posts, loops through checking visibility + timestamps while holding storage lock +- 50K posts = 500ms+ lock hold +- **Fix:** Load posts under lock (brief), release, filter without lock (CPU only), re-acquire briefly for is_deleted() on filtered subset. + +### L3. Pull sender's second lock too long (connection.rs:1650-1721, 1572-1624) +- After receiving posts: store + add_upstream (count query each) + update_last_sync — all under one lock +- 100 posts = 100 inserts + 100 count queries + 20 author updates +- **Fix:** Split into two brief locks. First: bulk store posts. Second: batch upstream adds + last_sync updates. Collect unique authors during first lock. + +### L4. Per-post engagement lock acquisitions (connection.rs:1777-1833) +- Lock acquired/released 100 times in tight loop (once per post) +- Each acquisition blocks behind other tasks +- **Fix:** Batch writes. Collect all engagement results, acquire lock once, write all. Network I/O already outside lock. + +### L5. Stale follows query (node.rs:2528-2530) — LOW +- get_stale_follows every 60s, brief query, acceptable +- **Fix (optional):** Add index on follows(last_sync_ms) if missing + +## HIGH Priority — DOS + +### 1. Stream handler cap (connection.rs:4252, 4270) +- Max 10 concurrent workers per connection via Semaphore +- Excess streams wait, not spawned unbounded + +### 2. Slot index memory bomb (connection.rs:5754-5792) +- Soft 1K slot limit per post +- Author can sign capacity increase that propagates via BlobHeaderDiff +- Without author signature, cap stays at 1K +- Consider thread-split pattern for overflow (already exists for 16KB comments) + +### 3. ManifestPush amplification (connection.rs:4877-4936) +- Custom ManifestPush for new posts: only deliver [new_post_id, previous_post_id] +- Each CDN partner updates their own local manifest copy +- Same diff pattern useful for N+10 updates +- Lower bandwidth, low-priority background task + +## MEDIUM Priority + +### 4. Post list pagination (connection.rs:1857) +- Limit to 200 posts per pull response +- ~100KB memory, <5ms lock hold +- Next sync cycle catches remainder via since_ms timestamps + +### 5. Eviction candidate cap (storage.rs:3678-3737) +- Limit to 100 candidates per batch +- ~40KB memory, <5ms lock hold +- Next 5-min cycle catches more if needed + +### 6. Payload element abuse — CDN consensus check +- Before accepting a large engagement update, check 1-2 CDN neighbors +- "Does your header for this post look like this?" If not → reject +- Attacker must compromise multiple CDN nodes to pass +- No trust scoring needed — just peer corroboration + +### 7. Lock acquisition timeouts (connection.rs: throughout) +- 5-second timeout on storage lock acquisition +- On timeout: skip operation, try next cycle +- Log: operation name, wait duration, who holds the lock +- Add `last_lock_holder: AtomicU64` storing hash of acquiring function name + +### 8. Discovery task cap (connection.rs:4938-4980) +- One discovery task per peer at a time +- AtomicBool flag per connection, skip if already running + +## LOW Priority + +### 9. Engagement rate limiting +- Self-claimed: max 3 emoji + 1 comment per 10 seconds +- Chain-propagated: CDN consensus check from #6 applies +- Process only first 100 ops per BlobHeaderDiff message + +### 10. Mesh stream spawn cap (connection.rs) +- Same as #1 — 10 max concurrent handlers per connection +- Supplements auth rate limiter (which handles connection-level, not stream-level) + +### 11. Retry backoff per target +- Start at 5 seconds, triple on each failure +- 5s → 15s → 45s → 135s → 405s → 1215s → 3645s → 10935s → 14400s (cap at 4hr) +- 8 failures to hit max backoff +- Reset to 5s on success +- Track per target peer, not global + +--- + +# Security Hardening TODO + +Identified during v0.4.0 security audit (2026-03-21). + +## CRITICAL — Immediate (before next public release) + +### S1. Comment signature verification — ONE LINE FIX (connection.rs:5711-5724) +- `verify_comment_signature()` exists in crypto.rs but is NEVER called on receipt +- Add `if !crypto::verify_comment_signature(...) { continue; }` before `store_comment()` +- Infrastructure exists, just not wired up + +### S2. Reaction removal auth check — TWO LINE FIX (connection.rs:5708-5709) +- `RemoveReaction` accepts from any sender, no auth +- Add: `if *reactor == sender || sender == payload.author { ... }` +- Same pattern already used in EditComment/DeleteComment + +### S3. Reaction signature — ~30 lines (types.rs, crypto.rs, connection.rs) +- `Reaction` has no signature field — anyone can fake reactions from any NodeId +- Add `signature: Vec` to Reaction struct (#[serde(default)] for compat) +- Sign `(reactor + post_id + emoji + timestamp)` with reactor's ed25519 key +- Verify in handle_blob_header_diff before storing +- Follow existing `sign_comment` / `verify_comment_signature` pattern + +### S4. BlobHeader author verification — ~5 lines (connection.rs:5821-5836) +- Header rebuild uses `payload.author` without checking against stored post author +- Look up actual author from `storage.get_post(&payload.post_id)` +- Use stored author, not payload-claimed author + +## HIGH — Short-term + +### S5. PostId verification in all paths (connection.rs) +- PostPush verifies with `verify_post_id()` but some pull paths don't +- Audit all `store_post_with_visibility` call sites +- Ensure `verify_post_id()` called before each store + +### S6. Slot write protection — self-healing signature system (connection.rs:5749-5803) +- Problem: any peer can overwrite encrypted slots with garbage +- Solution (two layers): + 1. CDN tree membership check: only accept slot writes from peers in post_downstream or post_upstream for that post. Rejects random peers. + 2. Self-healing signatures: participants sign their own slot writes with the slot key (derived from CEK) and keep a local copy. On diff check, if their slot was overwritten with something they didn't sign, they re-write their signed version. Other participants verify signatures — keep the signed version, discard unsigned garbage. The legitimate version propagates through the CDN tree. Attacker must keep overwriting forever; the real version keeps coming back from every CDN node that received it. +- Relay nodes can't verify signatures (don't have CEK) but pass through all writes — participants do client-side verification on decrypt + +### S7. Comment edit/delete cryptographic proof (connection.rs:5726-5736) +- Currently "trust-based" — checks sender == author at transport layer +- QUIC connection IS authenticated (iroh ed25519), so sender identity is verified +- Risk: compromised relay node +- Fix: require new signature over edited content (editor proves they hold private key) +- For post-author deletes: require post author's signature over delete request + +### S8. Pull sync follow list privacy (connection.rs:1846-1915) +- PullSyncRequest sends entire follow list unencrypted to every sync peer +- Every mesh peer learns your complete social graph +- Options: + - Accept and document (mesh peers are semi-trusted infrastructure) — RECOMMENDED for now + - Bloom filter: probabilistic set, leaks less, some irrelevant posts received (acceptable bandwidth cost) + - Long-term: oblivious transfer / PIR (heavy crypto, probably not worth it for social network) + +## MEDIUM — Design review + +### S9. Nonce reuse guard (crypto.rs:54-56) +- ChaCha20-Poly1305 catastrophic on nonce reuse +- RNG is reliable on modern OS (getrandom syscall) +- Add sanity check: if nonce is all zeros after generation, panic rather than encrypt +- One-line guard + +### S10. Slot timing metadata leakage (connection.rs:5757, 5776) +- `header.updated_at` changes on slot writes, leaking WHEN engagement occurs on private posts +- Passive observer can correlate timestamps with known user behavior +- Fix: round updated_at to 10-minute buckets for private posts +- Or batch slot writes on fixed schedule rather than immediately + +### S11. Per-author engagement rate limiting (connection.rs:5699-5725) +- A peer can send 10,000 fake reactions in one BlobHeaderDiff +- Cap ops per message (100 max per DOS hardening #9) +- Deduplicate by (reactor, post_id, emoji) — storage already does ON CONFLICT DO UPDATE +- Combined with reaction signatures (S3), fake NodeId reactions become impossible + +## LOW + +--- + +# Data Cleanup TODO + +### D1. post_downstream not cleaned on post delete (storage.rs delete_post) +- When a post is deleted, downstream registrations stay forever +- Fix: add `DELETE FROM post_downstream WHERE post_id = ?1` in delete_post() +- Also add: `DELETE FROM post_upstream WHERE post_id = ?1` +- Also add: `DELETE FROM seen_engagement WHERE post_id = ?1` +- One-line fixes each + +### D2. Document BlobHeader-table relationship (storage.rs store_blob_header) +- Header JSON is a snapshot, reactions/comments tables are authoritative +- They can temporarily diverge (BlobHeaderResponse arrives with newer header than tables) +- Header rebuilt from tables on next engagement op +- Add clarifying comment to store_blob_header + +--- + +# Low Priority + +### S12. Hex parse error logging (web.rs:110-119) +- Malformed hex strings silently return 404 +- Add debug logging for malformed inputs + +### S13. Edit comment signature consistency (storage.rs:4338-4343) +- edit_comment updates content without updating signature +- If signature verification (S1) is enabled, edited comments would have invalid signatures +- Fix: add signature parameter to edit_comment, re-sign edited content diff --git a/website/design.html b/website/design.html index 5a86b40..5c938b7 100644 --- a/website/design.html +++ b/website/design.html @@ -44,7 +44,8 @@

This is the canonical technical reference for ItsGoin. It describes the vision, the architecture, and the current state of every subsystem — with full implementation detail. This document is versioned; each update records what changed.

Changelog -

v0.4.0 (2026-03-21): Protocol v4 — header-driven sync. ManifestPush as primary post notification. Slim PullSyncRequest (per-author timestamps, not full post ID list). Tiered engagement checks (5min/1hr/4hr/24hr by content age). Multi-upstream (3 max) with fallback chain. Auto-prefetch followed authors <90d. Self Last Encounter per-author tracking. Encrypted-but-not-for-us CDN caching. Serial engagement polling. ~90% bandwidth reduction for established nodes.

+

v0.4.1 (2026-03-21): Security hardening — reaction signatures (ed25519), comment signature verification on receipt, reaction removal authorization, BlobHeader author verification. Lock contention fixes — ManifestPush discovery (cm lock released during I/O), pull request handler (filter without lock), pull sender (split into brief locks), engagement checker (batch writes per chunk). Data cleanup — post deletion cleans downstream/upstream/seen tables.

+

v0.4.0 (2026-03-21): Protocol v4 — header-driven sync. ManifestPush as primary post notification. Slim PullSyncRequest (per-author timestamps, not full post ID list). Tiered engagement checks (5min/1hr/4hr/24hr by content age). Multi-upstream (3 max) with fallback chain. Auto-prefetch followed authors <90d. Self Last Encounter per-author tracking. Encrypted-but-not-for-us CDN caching. Serial engagement polling. ~90% bandwidth reduction for established nodes.

v0.3.6 (2026-03-20): Active CDN replication — all devices proactively replicate recent posts to peers (desktops > anchors > phones priority). ReplicationRequest/Response (0xE1/0xE2). Device roles (Intermittent/Available/Persistent) advertised in InitialExchange. Bandwidth budgets: replication (pull to cache) + delivery (serve requests), hourly auto-reset, phones 100MB/1GB, desktops 200MB/2GB, anchors 200MB/1GB. Cache management: 1GB default, configurable, eviction cycle activated with share-link priority boost. Engagement distribution fix — BlobHeader JSON rebuilt after diff ops. Tombstone system — deleted reactions/comments tombstoned, propagate via pull sync. Persistent notifications via seen_engagement/seen_messages tables. DOS hardening: fan-out cap (10), prefetch cap (20), downstream registration cap (50), delivery budget enforcement. Pull preference reordered: non-anchors first. Network indicator — header dot (black/red/yellow/green) + capability labels. Tab badges — contextual counts (new posts, engagement, online, unread). Message read tracking on open/close/send. Stats bar removed.

v0.3.5 (2026-03-20): Private blob encryption — attachments on encrypted posts (Friends/Circle/Direct) now encrypted with same CEK as post text; public blobs unchanged; CID on ciphertext. Blob prefetch on sync — attachments eagerly fetched after post pull for offline availability. Crypto refactoring — extracted reusable primitives (encrypt/decrypt_bytes_with_cek, unwrap_cek_for_recipient, unwrap_group_cek). Intent-based post filtering — feed/myposts/messages filter on intentKind instead of encryption state. Blob decryption API (get_blob_for_post). Download filename sanitization. Encrypted receipt & comment slots — private posts carry noise-prefilled encrypted slots in BlobHeader for delivery/read/react receipts and private comments; CDN-propagated as opaque bytes; slot key derived from post CEK; 3 new BlobHeaderDiffOps (WriteReceiptSlot, WriteCommentSlot, AddCommentSlots). Message UI — DM delivery indicators (checkmark/double/blue/emoji), auto-seen on view, react button on messages.

v0.3.4 (2026-03-18): Comment edit & delete with trust-based propagation. Native notifications via Tauri plugin (messages, posts, reactions, comments). Forward-compatible BlobHeaderDiffOp::Unknown variant. Following Online/Offline lightbox. Comment threading scoping fix. Dropdown text legibility fix. Mobile hamburger nav for website.

diff --git a/website/download.html b/website/download.html index e160377..39d9320 100644 --- a/website/download.html +++ b/website/download.html @@ -25,16 +25,16 @@

Download ItsGoin

Available for Android and Linux. Free and open source.

-

Version 0.4.0 — March 15, 2026

+

Version 0.4.1 — March 15, 2026

@@ -46,7 +46,7 @@

Android

  1. Download the APK — Tap the button above. Your browser may warn that this type of file can be harmful — tap Download anyway.
  2. -
  3. Open the file — When the download finishes, tap the notification or find itsgoin-0.4.0.apk in your Downloads folder and tap it.
  4. +
  5. Open the file — When the download finishes, tap the notification or find itsgoin-0.4.1.apk in your Downloads folder and tap it.
  6. Allow installation — Android will ask you to allow installs from this source. Tap Settings, toggle "Allow from this source", then go back and tap Install.
  7. Launch the app — Once installed, tap Open or find ItsGoin in your app drawer.
@@ -59,8 +59,8 @@

Linux (AppImage)

  1. Download the AppImage — Click the button above to download.
  2. -
  3. Make it executable — Open a terminal and run:
    chmod +x itsgoin_0.4.0_amd64.AppImage
  4. -
  5. Run it — Double-click the file, or from the terminal:
    ./itsgoin_0.4.0_amd64.AppImage
  6. +
  7. Make it executable — Open a terminal and run:
    chmod +x itsgoin_0.4.1_amd64.AppImage
  8. +
  9. Run it — Double-click the file, or from the terminal:
    ./itsgoin_0.4.1_amd64.AppImage
Note: If it doesn't launch, you may need to install FUSE:
sudo apt install libfuse2 (Debian/Ubuntu) or sudo dnf install fuse (Fedora). @@ -71,6 +71,19 @@

Changelog

+
v0.4.1 — March 21, 2026
+
    +
  • Security: Reaction signatures — Reactions now carry ed25519 signatures. Forged reactions from other NodeIds are rejected. Backward-compatible with unsigned reactions from older nodes.
  • +
  • Security: Comment signature verification — Comment signatures (already present) are now verified on receipt. Forged comments rejected.
  • +
  • Security: Reaction removal auth — Only the reactor or post author can remove reactions. Previously any peer could strip reactions.
  • +
  • Security: BlobHeader author verification — Header rebuild verifies author against stored post, not trusted from payload.
  • +
  • Lock contention: ManifestPush discovery — cm lock released before PostFetch network I/O. Was holding lock during entire discovery (5s+ freeze).
  • +
  • Lock contention: Pull request handler — Load posts under lock, filter without lock, brief re-lock for is_deleted. Was holding lock during full post list iteration.
  • +
  • Lock contention: Pull sender — Split into two brief locks (store, then batch upstream+sync). Was holding one long lock for all operations.
  • +
  • Lock contention: Engagement checker — Batch writes per chunk with single lock. Was acquiring lock per post (100+ times).
  • +
  • Data cleanup — Post deletion now cleans up post_downstream, post_upstream, and seen_engagement tables.
  • +
+
v0.4.0 — March 21, 2026
  • Protocol v4: Header-driven sync — Major sync protocol revision. ManifestPush now triggers post discovery from CDN tree headers. Bandwidth reduced ~90% for established nodes.