v0.4.1: Security hardening, lock contention fixes, data cleanup

Security:
- Reaction signatures: ed25519 sign/verify (sign_reaction, verify_reaction_signature)
  Backward-compatible — unsigned reactions from old nodes still accepted
- Comment signature verification: verify_comment_signature now called on receipt
- Reaction removal authorization: only reactor or post author can remove
- BlobHeader author verification: lookup actual author from storage, don't trust payload

Lock contention (4 fixes):
- ManifestPush discovery: cm lock released before PostFetch I/O
- Pull request handler: load under lock, filter without lock, brief re-lock for is_deleted
- Pull sender: split into two brief locks (store posts, then batch upstream+sync)
- Engagement checker: batch all chunk results, single lock for writes

Data cleanup:
- Post deletion cleans post_downstream, post_upstream, seen_engagement tables
- Added TODO-hardening.md documenting remaining DOS/security/lock/data issues

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Scott Reimers 2026-03-21 19:30:38 -04:00
parent bbaacf9b6c
commit bb6f2b64b0
11 changed files with 500 additions and 138 deletions

2
Cargo.lock generated
View file

@ -2746,7 +2746,7 @@ dependencies = [
[[package]] [[package]]
name = "itsgoin-desktop" name = "itsgoin-desktop"
version = "0.4.0" version = "0.4.1"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"base64 0.22.1", "base64 0.22.1",

View file

@ -1596,32 +1596,42 @@ impl ConnectionManager {
.as_millis() as u64; .as_millis() as u64;
let mut stored = false; let mut stored = false;
let mut new_post_ids: Vec<PostId> = Vec::new(); let mut new_post_ids: Vec<PostId> = Vec::new();
let storage = self.storage.lock().await;
let mut synced_authors: HashSet<NodeId> = HashSet::new(); let mut synced_authors: HashSet<NodeId> = HashSet::new();
for sp in response.posts {
if verify_post_id(&sp.id, &sp.post) && !storage.is_deleted(&sp.id)? { // Brief lock 1: store posts
let _ = storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility); {
let prio = storage.get_post_upstreams(&sp.id).map(|v| v.len() as u8).unwrap_or(0); let storage = self.storage.lock().await;
let _ = storage.add_post_upstream(&sp.id, from, prio); for sp in &response.posts {
new_post_ids.push(sp.id); if verify_post_id(&sp.id, &sp.post) && !storage.is_deleted(&sp.id)? {
synced_authors.insert(sp.post.author); let _ = storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility);
if sp.id == notification.post_id { new_post_ids.push(sp.id);
stored = true; synced_authors.insert(sp.post.author);
if sp.id == notification.post_id {
stored = true;
}
} }
} }
} }
// Protocol v4: update last_sync_ms for authors whose posts were received // Lock RELEASED
for author in &synced_authors {
let _ = storage.update_follow_last_sync(author, now_ms); // Brief lock 2: upstream + last_sync + visibility updates
} {
for vu in response.visibility_updates { let storage = self.storage.lock().await;
if let Some(post) = storage.get_post(&vu.post_id)? { for pid in &new_post_ids {
if post.author == vu.author { let prio = storage.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0);
let _ = storage.update_post_visibility(&vu.post_id, &vu.visibility); let _ = storage.add_post_upstream(pid, from, prio);
}
for author in &synced_authors {
let _ = storage.update_follow_last_sync(author, now_ms);
}
for vu in &response.visibility_updates {
if let Some(post) = storage.get_post(&vu.post_id)? {
if post.author == vu.author {
let _ = storage.update_post_visibility(&vu.post_id, &vu.visibility);
}
} }
} }
} }
drop(storage);
// Register as downstream for new posts (cap at 50 to avoid flooding) // Register as downstream for new posts (cap at 50 to avoid flooding)
if !new_post_ids.is_empty() { if !new_post_ids.is_empty() {
@ -1679,32 +1689,35 @@ impl ConnectionManager {
.unwrap_or_default() .unwrap_or_default()
.as_millis() as u64; .as_millis() as u64;
// Brief lock 1: store posts
let mut synced_authors: HashSet<NodeId> = HashSet::new();
{ {
let storage = self.storage.lock().await; let storage = self.storage.lock().await;
// Track which authors had posts received for last_sync_ms update
let mut synced_authors: HashSet<NodeId> = HashSet::new();
for sp in &response.posts { for sp in &response.posts {
if storage.is_deleted(&sp.id)? { if storage.is_deleted(&sp.id)? {
continue; continue;
} }
if verify_post_id(&sp.id, &sp.post) { if verify_post_id(&sp.id, &sp.post) {
if storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility)? { if storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility)? {
// Record who we got this post from (upstream for engagement propagation)
let prio = storage.get_post_upstreams(&sp.id).map(|v| v.len() as u8).unwrap_or(0);
let _ = storage.add_post_upstream(&sp.id, peer_id, prio);
new_post_ids.push(sp.id); new_post_ids.push(sp.id);
posts_received += 1; posts_received += 1;
} }
synced_authors.insert(sp.post.author); synced_authors.insert(sp.post.author);
} }
} }
}
// Lock RELEASED
// Protocol v4: update last_sync_ms for authors whose posts were received // Brief lock 2: upstream + last_sync + visibility updates
{
let storage = self.storage.lock().await;
for pid in &new_post_ids {
let prio = storage.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0);
let _ = storage.add_post_upstream(pid, peer_id, prio);
}
for author in &synced_authors { for author in &synced_authors {
let _ = storage.update_follow_last_sync(author, now_ms); let _ = storage.update_follow_last_sync(author, now_ms);
} }
for vu in response.visibility_updates { for vu in response.visibility_updates {
if vu.author != *peer_id { if vu.author != *peer_id {
// Only accept visibility updates authored by the responding peer // Only accept visibility updates authored by the responding peer
@ -1775,8 +1788,10 @@ impl ConnectionManager {
let mut updated = 0; let mut updated = 0;
// Request headers in batches to avoid opening too many streams // Request headers in batches to avoid opening too many streams
for chunk in post_headers.chunks(20) { for chunk in post_headers.chunks(20) {
// Collect all results for this chunk WITHOUT holding the lock
let mut results: Vec<([u8; 32], Option<(String, crate::types::BlobHeader)>)> = Vec::new();
for (post_id, current_ts) in chunk { for (post_id, current_ts) in chunk {
let result: anyhow::Result<()> = async { let result: anyhow::Result<Option<(String, crate::types::BlobHeader)>> = async {
let (mut send, mut recv) = pc.connection.open_bi().await?; let (mut send, mut recv) = pc.connection.open_bi().await?;
let request = BlobHeaderRequestPayload { let request = BlobHeaderRequestPayload {
post_id: *post_id, post_id: *post_id,
@ -1792,45 +1807,52 @@ impl ConnectionManager {
let response: BlobHeaderResponsePayload = let response: BlobHeaderResponsePayload =
read_payload(&mut recv, MAX_PAYLOAD).await?; read_payload(&mut recv, MAX_PAYLOAD).await?;
// Brief re-lock for writes
let storage = self.storage.lock().await;
// Always update last_check_ms regardless of whether engagement changed
let _ = storage.update_post_last_check(post_id, now_ms);
if response.updated { if response.updated {
if let Some(json) = &response.header_json { if let Some(json) = response.header_json {
if let Ok(header) = serde_json::from_str::<crate::types::BlobHeader>(json) { if let Ok(header) = serde_json::from_str::<crate::types::BlobHeader>(&json) {
// Store the full header JSON return Ok(Some((json, header)));
let _ = storage.store_blob_header(
&header.post_id,
&header.author,
json,
header.updated_at,
);
// Apply individual reactions and comments.
// store_reaction / store_comment are tombstone-aware:
// they compare timestamps and respect deleted_at fields.
for reaction in &header.reactions {
let _ = storage.store_reaction(reaction);
}
for comment in &header.comments {
let _ = storage.store_comment(comment);
}
let _ = storage.set_comment_policy(&header.post_id, &header.policy);
// Update last_engagement_ms when new engagement arrives
let _ = storage.update_post_last_engagement(post_id, now_ms);
updated += 1;
} }
} }
} }
drop(storage); Ok(None)
Ok(())
} }
.await; .await;
if let Err(e) = result { match result {
trace!(post_id = hex::encode(post_id), error = %e, "Failed to fetch engagement header"); Ok(header_opt) => results.push((*post_id, header_opt)),
Err(e) => {
trace!(post_id = hex::encode(post_id), error = %e, "Failed to fetch engagement header");
}
} }
} }
// Single lock for ALL writes in this chunk
if !results.is_empty() {
let storage = self.storage.lock().await;
for (post_id, header_opt) in &results {
let _ = storage.update_post_last_check(post_id, now_ms);
if let Some((json, header)) = header_opt {
let _ = storage.store_blob_header(
&header.post_id,
&header.author,
json,
header.updated_at,
);
// store_reaction / store_comment are tombstone-aware:
// they compare timestamps and respect deleted_at fields.
for reaction in &header.reactions {
let _ = storage.store_reaction(reaction);
}
for comment in &header.comments {
let _ = storage.store_comment(comment);
}
let _ = storage.set_comment_policy(&header.post_id, &header.policy);
let _ = storage.update_post_last_engagement(post_id, now_ms);
updated += 1;
}
}
drop(storage);
}
// Lock RELEASED before next chunk
} }
Ok(updated) Ok(updated)
@ -1852,55 +1874,61 @@ impl ConnectionManager {
let since_ms_map: HashMap<NodeId, u64> = request.since_ms.into_iter().collect(); let since_ms_map: HashMap<NodeId, u64> = request.since_ms.into_iter().collect();
let use_since_ms = !since_ms_map.is_empty(); let use_since_ms = !since_ms_map.is_empty();
let (posts, vis_updates) = { // Phase 1: Brief lock — load data
let (all_posts, group_members) = {
let storage = self.storage.lock().await; let storage = self.storage.lock().await;
let all_posts = storage.list_posts_with_visibility()?; let posts = storage.list_posts_with_visibility()?;
let group_members = storage.get_all_group_members().unwrap_or_default(); let members = storage.get_all_group_members().unwrap_or_default();
(posts, members)
};
// Lock RELEASED
let mut posts_to_send = Vec::new(); // Phase 2: Filter without lock (pure CPU)
let mut vis_updates_to_send = Vec::new(); let mut candidates_to_send = Vec::new();
let mut vis_updates_to_send = Vec::new();
for (id, post, visibility) in all_posts { for (id, post, visibility) in all_posts {
let should_send = let should_send =
crate::network::should_send_post(&post, &visibility, &remote_node_id, &their_follows, &group_members); crate::network::should_send_post(&post, &visibility, &remote_node_id, &their_follows, &group_members);
if !should_send { if !should_send {
continue; continue;
}
// Determine if peer already has this post
let peer_has_post = if use_since_ms {
// v4 path: filter by per-author timestamp (60s fudge for clock skew)
if let Some(&since) = since_ms_map.get(&post.author) {
post.timestamp_ms <= since + 60_000
} else {
false // no since_ms for this author — they want everything
}
} else {
// Legacy path: use have_post_ids
their_post_ids.contains(&id)
};
if !peer_has_post {
if !storage.is_deleted(&id)? {
posts_to_send.push(SyncPost {
id,
post,
visibility,
});
}
} else {
// They already have the post — send visibility update if we authored it
if post.author == self.our_node_id {
vis_updates_to_send.push(crate::types::VisibilityUpdate {
post_id: id,
author: self.our_node_id,
visibility,
});
}
}
} }
// Determine if peer already has this post
let peer_has_post = if use_since_ms {
// v4 path: filter by per-author timestamp (60s fudge for clock skew)
if let Some(&since) = since_ms_map.get(&post.author) {
post.timestamp_ms <= since + 60_000
} else {
false // no since_ms for this author — they want everything
}
} else {
// Legacy path: use have_post_ids
their_post_ids.contains(&id)
};
if !peer_has_post {
candidates_to_send.push((id, post, visibility));
} else {
// They already have the post — send visibility update if we authored it
if post.author == self.our_node_id {
vis_updates_to_send.push(crate::types::VisibilityUpdate {
post_id: id,
author: self.our_node_id,
visibility,
});
}
}
}
// Phase 3: Brief re-lock for is_deleted checks on filtered posts
let (posts, vis_updates) = {
let storage = self.storage.lock().await;
let posts_to_send: Vec<SyncPost> = candidates_to_send.into_iter()
.filter(|(id, _, _)| !storage.is_deleted(id).unwrap_or(false))
.map(|(id, post, visibility)| SyncPost { id, post, visibility })
.collect();
(posts_to_send, vis_updates_to_send) (posts_to_send, vis_updates_to_send)
}; };
@ -4940,32 +4968,63 @@ impl ConnectionManager {
let cm_arc = conn_mgr.clone(); let cm_arc = conn_mgr.clone();
let sender_id = remote_node_id; let sender_id = remote_node_id;
tokio::spawn(async move { tokio::spawn(async move {
let cm = cm_arc.lock().await; // Brief lock: get connection handle only
let conn = {
let cm = cm_arc.lock().await;
cm.connections_ref().get(&sender_id).map(|pc| pc.connection.clone())
};
// cm lock RELEASED
let Some(conn) = conn else { return };
let mut fetched = 0usize; let mut fetched = 0usize;
for (post_id, _author) in &discovery_posts { for (post_id, _author) in &discovery_posts {
if fetched >= 10 { break; } if fetched >= 10 { break; }
match cm.send_post_fetch(&sender_id, post_id).await {
// PostFetch network I/O WITHOUT any lock
let result = async {
use crate::protocol::{PostFetchRequestPayload, PostFetchResponsePayload};
let (mut send, mut recv) = conn.open_bi().await?;
let req = PostFetchRequestPayload { post_id: *post_id };
write_typed_message(&mut send, MessageType::PostFetchRequest, &req).await?;
send.finish()?;
let msg_type = tokio::time::timeout(
std::time::Duration::from_secs(10),
read_message_type(&mut recv),
).await??;
if msg_type != MessageType::PostFetchResponse {
return anyhow::Ok(None);
}
let resp: PostFetchResponsePayload = read_payload(&mut recv, MAX_PAYLOAD).await?;
anyhow::Ok(resp.post)
}.await;
match result {
Ok(Some(sync_post)) => { Ok(Some(sync_post)) => {
if crate::content::verify_post_id(&sync_post.id, &sync_post.post) { if crate::content::verify_post_id(&sync_post.id, &sync_post.post) {
let storage = cm.storage.lock().await; // Brief re-acquire for storage writes only
if storage.store_post_with_visibility(&sync_post.id, &sync_post.post, &sync_post.visibility).unwrap_or(false) { let stored = {
// Set upstream + register as downstream let cm = cm_arc.lock().await;
let prio = storage.get_post_upstreams(&sync_post.id).map(|v| v.len() as u8).unwrap_or(0); let storage = cm.storage.lock().await;
let _ = storage.add_post_upstream(&sync_post.id, &sender_id, prio); if storage.store_post_with_visibility(&sync_post.id, &sync_post.post, &sync_post.visibility).unwrap_or(false) {
// Update last_sync_ms for the author let prio = storage.get_post_upstreams(&sync_post.id).map(|v| v.len() as u8).unwrap_or(0);
let now = std::time::SystemTime::now() let _ = storage.add_post_upstream(&sync_post.id, &sender_id, prio);
.duration_since(std::time::UNIX_EPOCH) let now = std::time::SystemTime::now()
.unwrap_or_default() .duration_since(std::time::UNIX_EPOCH)
.as_millis() as u64; .unwrap_or_default()
let _ = storage.update_follow_last_sync(&sync_post.post.author, now); .as_millis() as u64;
drop(storage); let _ = storage.update_follow_last_sync(&sync_post.post.author, now);
// Register as downstream with the sender true
if let Some(pc) = cm.connections_ref().get(&sender_id) { } else {
let reg = crate::protocol::PostDownstreamRegisterPayload { post_id: sync_post.id }; false
if let Ok(mut send) = pc.connection.open_uni().await { }
let _ = write_typed_message(&mut send, MessageType::PostDownstreamRegister, &reg).await; };
let _ = send.finish(); // cm lock RELEASED — register downstream without lock
} if stored {
let reg = crate::protocol::PostDownstreamRegisterPayload { post_id: sync_post.id };
if let Ok(mut send) = conn.open_uni().await {
let _ = write_typed_message(&mut send, MessageType::PostDownstreamRegister, &reg).await;
let _ = send.finish();
} }
fetched += 1; fetched += 1;
} }
@ -5703,10 +5762,22 @@ impl ConnectionManager {
if let crate::types::ReactPermission::None = policy.allow_reacts { if let crate::types::ReactPermission::None = policy.allow_reacts {
continue; continue;
} }
// Verify signature (skip if empty for backward compat with old nodes)
if !reaction.signature.is_empty() && !crate::crypto::verify_reaction_signature(
&reaction.reactor,
&payload.post_id,
&reaction.emoji,
reaction.timestamp_ms,
&reaction.signature,
) {
continue; // Skip forged reactions
}
let _ = storage.store_reaction(reaction); let _ = storage.store_reaction(reaction);
} }
BlobHeaderDiffOp::RemoveReaction { reactor, emoji, post_id } => { BlobHeaderDiffOp::RemoveReaction { reactor, emoji, post_id } => {
let _ = storage.remove_reaction(reactor, post_id, emoji); if *reactor == sender || sender == payload.author {
let _ = storage.remove_reaction(reactor, post_id, emoji);
}
} }
BlobHeaderDiffOp::AddComment(comment) => { BlobHeaderDiffOp::AddComment(comment) => {
if policy.blocklist.contains(&comment.author) { if policy.blocklist.contains(&comment.author) {
@ -5721,6 +5792,15 @@ impl ConnectionManager {
} }
crate::types::CommentPermission::Public => {} crate::types::CommentPermission::Public => {}
} }
if !crate::crypto::verify_comment_signature(
&comment.author,
&payload.post_id,
&comment.content,
comment.timestamp_ms,
&comment.signature,
) {
continue; // Skip forged comments
}
let _ = storage.store_comment(comment); let _ = storage.store_comment(comment);
} }
BlobHeaderDiffOp::EditComment { author, post_id, timestamp_ms, new_content } => { BlobHeaderDiffOp::EditComment { author, post_id, timestamp_ms, new_content } => {
@ -5832,8 +5912,14 @@ impl ConnectionManager {
header.comments = comments; header.comments = comments;
header.policy = policy; header.policy = policy;
header.updated_at = payload.timestamp_ms; header.updated_at = payload.timestamp_ms;
// Look up actual post author (don't trust payload.author)
let actual_author = storage.get_post(&payload.post_id)
.ok().flatten()
.map(|p| p.author)
.unwrap_or(payload.author); // fallback if post not stored yet
header.author = actual_author;
if let Ok(json) = serde_json::to_string(&header) { if let Ok(json) = serde_json::to_string(&header) {
let _ = storage.store_blob_header(&payload.post_id, &payload.author, &json, payload.timestamp_ms); let _ = storage.store_blob_header(&payload.post_id, &actual_author, &json, payload.timestamp_ms);
} }
// Phase 4: Update last_engagement_ms when engagement arrives via diff // Phase 4: Update last_engagement_ms when engagement arrives via diff
let _ = storage.update_post_last_engagement(&payload.post_id, payload.timestamp_ms); let _ = storage.update_post_last_engagement(&payload.post_id, payload.timestamp_ms);

View file

@ -549,6 +549,7 @@ pub fn random_slot_noise(size: usize) -> Vec<u8> {
const REACTION_WRAP_CONTEXT: &str = "itsgoin/private-reaction/v1"; const REACTION_WRAP_CONTEXT: &str = "itsgoin/private-reaction/v1";
const COMMENT_SIGN_CONTEXT: &str = "itsgoin/comment-sig/v1"; const COMMENT_SIGN_CONTEXT: &str = "itsgoin/comment-sig/v1";
const REACTION_SIGN_CONTEXT: &str = "itsgoin/reaction-sig/v1";
/// Encrypt a private reaction payload (only the post author can decrypt). /// Encrypt a private reaction payload (only the post author can decrypt).
/// Uses X25519 DH between reactor and author, then ChaCha20-Poly1305. /// Uses X25519 DH between reactor and author, then ChaCha20-Poly1305.
@ -645,6 +646,47 @@ pub fn verify_comment_signature(
verifying_key.verify(digest.as_bytes(), &sig).is_ok() verifying_key.verify(digest.as_bytes(), &sig).is_ok()
} }
/// Sign a reaction: ed25519 over BLAKE3(reactor || post_id || emoji || timestamp_ms).
pub fn sign_reaction(
seed: &[u8; 32],
reactor: &NodeId,
post_id: &PostId,
emoji: &str,
timestamp_ms: u64,
) -> Vec<u8> {
let signing_key = SigningKey::from_bytes(seed);
let mut hasher = blake3::Hasher::new_derive_key(REACTION_SIGN_CONTEXT);
hasher.update(reactor);
hasher.update(post_id);
hasher.update(emoji.as_bytes());
hasher.update(&timestamp_ms.to_le_bytes());
let digest = hasher.finalize();
signing_key.sign(digest.as_bytes()).to_bytes().to_vec()
}
/// Verify a reaction's ed25519 signature.
pub fn verify_reaction_signature(
reactor: &NodeId,
post_id: &PostId,
emoji: &str,
timestamp_ms: u64,
signature: &[u8],
) -> bool {
let Ok(verifying_key) = VerifyingKey::from_bytes(reactor) else {
return false;
};
let Ok(sig) = ed25519_dalek::Signature::from_slice(signature) else {
return false;
};
let mut hasher = blake3::Hasher::new_derive_key(REACTION_SIGN_CONTEXT);
hasher.update(reactor);
hasher.update(post_id);
hasher.update(emoji.as_bytes());
hasher.update(&timestamp_ms.to_le_bytes());
let digest = hasher.finalize();
verifying_key.verify(digest.as_bytes(), &sig).is_ok()
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View file

@ -3493,6 +3493,7 @@ impl Node {
None None
}; };
let signature = crate::crypto::sign_reaction(&self.secret_seed, &our_node_id, &post_id, &emoji, now);
let reaction = crate::types::Reaction { let reaction = crate::types::Reaction {
reactor: our_node_id, reactor: our_node_id,
emoji: emoji.clone(), emoji: emoji.clone(),
@ -3500,6 +3501,7 @@ impl Node {
timestamp_ms: now, timestamp_ms: now,
encrypted_payload, encrypted_payload,
deleted_at: None, deleted_at: None,
signature,
}; };
// Store locally // Store locally

View file

@ -2197,12 +2197,18 @@ impl Storage {
Ok(inserted > 0) Ok(inserted > 0)
} }
/// Apply a delete: remove the post from the posts table if author matches. /// Apply a delete: remove the post from the posts table if author matches,
/// and clean up associated downstream/upstream/engagement tracking rows.
pub fn apply_delete(&self, record: &DeleteRecord) -> anyhow::Result<bool> { pub fn apply_delete(&self, record: &DeleteRecord) -> anyhow::Result<bool> {
let deleted = self.conn.execute( let deleted = self.conn.execute(
"DELETE FROM posts WHERE id = ?1 AND author = ?2", "DELETE FROM posts WHERE id = ?1 AND author = ?2",
params![record.post_id.as_slice(), record.author.as_slice()], params![record.post_id.as_slice(), record.author.as_slice()],
)?; )?;
if deleted > 0 {
self.conn.execute("DELETE FROM post_downstream WHERE post_id = ?1", params![record.post_id.as_slice()])?;
self.conn.execute("DELETE FROM post_upstream WHERE post_id = ?1", params![record.post_id.as_slice()])?;
self.conn.execute("DELETE FROM seen_engagement WHERE post_id = ?1", params![record.post_id.as_slice()])?;
}
Ok(deleted > 0) Ok(deleted > 0)
} }
@ -4253,6 +4259,7 @@ impl Storage {
timestamp_ms: ts as u64, timestamp_ms: ts as u64,
encrypted_payload: enc, encrypted_payload: enc,
deleted_at: None, deleted_at: None,
signature: vec![],
}); });
} }
Ok(result) Ok(result)
@ -4286,6 +4293,7 @@ impl Storage {
timestamp_ms: ts as u64, timestamp_ms: ts as u64,
encrypted_payload: enc, encrypted_payload: enc,
deleted_at: del.map(|v| v as u64), deleted_at: del.map(|v| v as u64),
signature: vec![],
}); });
} }
Ok(result) Ok(result)
@ -5894,6 +5902,7 @@ mod tests {
timestamp_ms: 1000, timestamp_ms: 1000,
encrypted_payload: None, encrypted_payload: None,
deleted_at: None, deleted_at: None,
signature: vec![],
}).unwrap(); }).unwrap();
s.store_reaction(&Reaction { s.store_reaction(&Reaction {
@ -5903,6 +5912,7 @@ mod tests {
timestamp_ms: 1001, timestamp_ms: 1001,
encrypted_payload: None, encrypted_payload: None,
deleted_at: None, deleted_at: None,
signature: vec![],
}).unwrap(); }).unwrap();
s.store_reaction(&Reaction { s.store_reaction(&Reaction {
@ -5912,6 +5922,7 @@ mod tests {
timestamp_ms: 1002, timestamp_ms: 1002,
encrypted_payload: None, encrypted_payload: None,
deleted_at: None, deleted_at: None,
signature: vec![],
}).unwrap(); }).unwrap();
let reactions = s.get_reactions(&post_id).unwrap(); let reactions = s.get_reactions(&post_id).unwrap();

View file

@ -724,6 +724,9 @@ pub struct Reaction {
/// Tombstone timestamp — if set, this reaction has been soft-deleted /// Tombstone timestamp — if set, this reaction has been soft-deleted
#[serde(default)] #[serde(default)]
pub deleted_at: Option<u64>, pub deleted_at: Option<u64>,
/// ed25519 signature over BLAKE3(reactor || post_id || emoji || timestamp_ms)
#[serde(default)]
pub signature: Vec<u8>,
} }
/// An inline comment on a post /// An inline comment on a post

View file

@ -1,6 +1,6 @@
[package] [package]
name = "itsgoin-desktop" name = "itsgoin-desktop"
version = "0.4.0" version = "0.4.1"
edition = "2021" edition = "2021"
[lib] [lib]

View file

@ -1,6 +1,6 @@
{ {
"productName": "itsgoin", "productName": "itsgoin",
"version": "0.4.0", "version": "0.4.1",
"identifier": "com.itsgoin.app", "identifier": "com.itsgoin.app",
"build": { "build": {
"frontendDist": "../../frontend", "frontendDist": "../../frontend",

204
docs/TODO-hardening.md Normal file
View file

@ -0,0 +1,204 @@
# DOS Hardening TODO
Identified during v0.4.0 audit (2026-03-21). Implement before v0.4.1.
## CRITICAL — Lock Contention (v4-introduced)
### L1. ManifestPush discovery holds cm lock during network I/O (connection.rs:4847-4981)
- Spawned task grabs cm lock, calls send_post_fetch (QUIC I/O), waits for response — all while locked
- Every connection operation queues behind it (5s+ freeze possible)
- **Fix:** Gather connection handle before locking cm. PostFetch outside lock. Brief re-acquire for DB writes.
### L2. Pull request handler holds lock during filtering (connection.rs:1855-1905)
- Loads ALL posts, loops through checking visibility + timestamps while holding storage lock
- 50K posts = 500ms+ lock hold
- **Fix:** Load posts under lock (brief), release, filter without lock (CPU only), re-acquire briefly for is_deleted() on filtered subset.
### L3. Pull sender's second lock too long (connection.rs:1650-1721, 1572-1624)
- After receiving posts: store + add_upstream (count query each) + update_last_sync — all under one lock
- 100 posts = 100 inserts + 100 count queries + 20 author updates
- **Fix:** Split into two brief locks. First: bulk store posts. Second: batch upstream adds + last_sync updates. Collect unique authors during first lock.
### L4. Per-post engagement lock acquisitions (connection.rs:1777-1833)
- Lock acquired/released 100 times in tight loop (once per post)
- Each acquisition blocks behind other tasks
- **Fix:** Batch writes. Collect all engagement results, acquire lock once, write all. Network I/O already outside lock.
### L5. Stale follows query (node.rs:2528-2530) — LOW
- get_stale_follows every 60s, brief query, acceptable
- **Fix (optional):** Add index on follows(last_sync_ms) if missing
## HIGH Priority — DOS
### 1. Stream handler cap (connection.rs:4252, 4270)
- Max 10 concurrent workers per connection via Semaphore
- Excess streams wait, not spawned unbounded
### 2. Slot index memory bomb (connection.rs:5754-5792)
- Soft 1K slot limit per post
- Author can sign capacity increase that propagates via BlobHeaderDiff
- Without author signature, cap stays at 1K
- Consider thread-split pattern for overflow (already exists for 16KB comments)
### 3. ManifestPush amplification (connection.rs:4877-4936)
- Custom ManifestPush for new posts: only deliver [new_post_id, previous_post_id]
- Each CDN partner updates their own local manifest copy
- Same diff pattern useful for N+10 updates
- Lower bandwidth, low-priority background task
## MEDIUM Priority
### 4. Post list pagination (connection.rs:1857)
- Limit to 200 posts per pull response
- ~100KB memory, <5ms lock hold
- Next sync cycle catches remainder via since_ms timestamps
### 5. Eviction candidate cap (storage.rs:3678-3737)
- Limit to 100 candidates per batch
- ~40KB memory, <5ms lock hold
- Next 5-min cycle catches more if needed
### 6. Payload element abuse — CDN consensus check
- Before accepting a large engagement update, check 1-2 CDN neighbors
- "Does your header for this post look like this?" If not → reject
- Attacker must compromise multiple CDN nodes to pass
- No trust scoring needed — just peer corroboration
### 7. Lock acquisition timeouts (connection.rs: throughout)
- 5-second timeout on storage lock acquisition
- On timeout: skip operation, try next cycle
- Log: operation name, wait duration, who holds the lock
- Add `last_lock_holder: AtomicU64` storing hash of acquiring function name
### 8. Discovery task cap (connection.rs:4938-4980)
- One discovery task per peer at a time
- AtomicBool flag per connection, skip if already running
## LOW Priority
### 9. Engagement rate limiting
- Self-claimed: max 3 emoji + 1 comment per 10 seconds
- Chain-propagated: CDN consensus check from #6 applies
- Process only first 100 ops per BlobHeaderDiff message
### 10. Mesh stream spawn cap (connection.rs)
- Same as #1 — 10 max concurrent handlers per connection
- Supplements auth rate limiter (which handles connection-level, not stream-level)
### 11. Retry backoff per target
- Start at 5 seconds, triple on each failure
- 5s → 15s → 45s → 135s → 405s → 1215s → 3645s → 10935s → 14400s (cap at 4hr)
- 8 failures to hit max backoff
- Reset to 5s on success
- Track per target peer, not global
---
# Security Hardening TODO
Identified during v0.4.0 security audit (2026-03-21).
## CRITICAL — Immediate (before next public release)
### S1. Comment signature verification — ONE LINE FIX (connection.rs:5711-5724)
- `verify_comment_signature()` exists in crypto.rs but is NEVER called on receipt
- Add `if !crypto::verify_comment_signature(...) { continue; }` before `store_comment()`
- Infrastructure exists, just not wired up
### S2. Reaction removal auth check — TWO LINE FIX (connection.rs:5708-5709)
- `RemoveReaction` accepts from any sender, no auth
- Add: `if *reactor == sender || sender == payload.author { ... }`
- Same pattern already used in EditComment/DeleteComment
### S3. Reaction signature — ~30 lines (types.rs, crypto.rs, connection.rs)
- `Reaction` has no signature field — anyone can fake reactions from any NodeId
- Add `signature: Vec<u8>` to Reaction struct (#[serde(default)] for compat)
- Sign `(reactor + post_id + emoji + timestamp)` with reactor's ed25519 key
- Verify in handle_blob_header_diff before storing
- Follow existing `sign_comment` / `verify_comment_signature` pattern
### S4. BlobHeader author verification — ~5 lines (connection.rs:5821-5836)
- Header rebuild uses `payload.author` without checking against stored post author
- Look up actual author from `storage.get_post(&payload.post_id)`
- Use stored author, not payload-claimed author
## HIGH — Short-term
### S5. PostId verification in all paths (connection.rs)
- PostPush verifies with `verify_post_id()` but some pull paths don't
- Audit all `store_post_with_visibility` call sites
- Ensure `verify_post_id()` called before each store
### S6. Slot write protection — self-healing signature system (connection.rs:5749-5803)
- Problem: any peer can overwrite encrypted slots with garbage
- Solution (two layers):
1. CDN tree membership check: only accept slot writes from peers in post_downstream or post_upstream for that post. Rejects random peers.
2. Self-healing signatures: participants sign their own slot writes with the slot key (derived from CEK) and keep a local copy. On diff check, if their slot was overwritten with something they didn't sign, they re-write their signed version. Other participants verify signatures — keep the signed version, discard unsigned garbage. The legitimate version propagates through the CDN tree. Attacker must keep overwriting forever; the real version keeps coming back from every CDN node that received it.
- Relay nodes can't verify signatures (don't have CEK) but pass through all writes — participants do client-side verification on decrypt
### S7. Comment edit/delete cryptographic proof (connection.rs:5726-5736)
- Currently "trust-based" — checks sender == author at transport layer
- QUIC connection IS authenticated (iroh ed25519), so sender identity is verified
- Risk: compromised relay node
- Fix: require new signature over edited content (editor proves they hold private key)
- For post-author deletes: require post author's signature over delete request
### S8. Pull sync follow list privacy (connection.rs:1846-1915)
- PullSyncRequest sends entire follow list unencrypted to every sync peer
- Every mesh peer learns your complete social graph
- Options:
- Accept and document (mesh peers are semi-trusted infrastructure) — RECOMMENDED for now
- Bloom filter: probabilistic set, leaks less, some irrelevant posts received (acceptable bandwidth cost)
- Long-term: oblivious transfer / PIR (heavy crypto, probably not worth it for social network)
## MEDIUM — Design review
### S9. Nonce reuse guard (crypto.rs:54-56)
- ChaCha20-Poly1305 catastrophic on nonce reuse
- RNG is reliable on modern OS (getrandom syscall)
- Add sanity check: if nonce is all zeros after generation, panic rather than encrypt
- One-line guard
### S10. Slot timing metadata leakage (connection.rs:5757, 5776)
- `header.updated_at` changes on slot writes, leaking WHEN engagement occurs on private posts
- Passive observer can correlate timestamps with known user behavior
- Fix: round updated_at to 10-minute buckets for private posts
- Or batch slot writes on fixed schedule rather than immediately
### S11. Per-author engagement rate limiting (connection.rs:5699-5725)
- A peer can send 10,000 fake reactions in one BlobHeaderDiff
- Cap ops per message (100 max per DOS hardening #9)
- Deduplicate by (reactor, post_id, emoji) — storage already does ON CONFLICT DO UPDATE
- Combined with reaction signatures (S3), fake NodeId reactions become impossible
## LOW
---
# Data Cleanup TODO
### D1. post_downstream not cleaned on post delete (storage.rs delete_post)
- When a post is deleted, downstream registrations stay forever
- Fix: add `DELETE FROM post_downstream WHERE post_id = ?1` in delete_post()
- Also add: `DELETE FROM post_upstream WHERE post_id = ?1`
- Also add: `DELETE FROM seen_engagement WHERE post_id = ?1`
- One-line fixes each
### D2. Document BlobHeader-table relationship (storage.rs store_blob_header)
- Header JSON is a snapshot, reactions/comments tables are authoritative
- They can temporarily diverge (BlobHeaderResponse arrives with newer header than tables)
- Header rebuilt from tables on next engagement op
- Add clarifying comment to store_blob_header
---
# Low Priority
### S12. Hex parse error logging (web.rs:110-119)
- Malformed hex strings silently return 404
- Add debug logging for malformed inputs
### S13. Edit comment signature consistency (storage.rs:4338-4343)
- edit_comment updates content without updating signature
- If signature verification (S1) is enabled, edited comments would have invalid signatures
- Fix: add signature parameter to edit_comment, re-sign edited content

View file

@ -44,7 +44,8 @@
<p>This is the canonical technical reference for ItsGoin. It describes the vision, the architecture, and the current state of every subsystem &mdash; with full implementation detail. This document is versioned; each update records what changed.</p> <p>This is the canonical technical reference for ItsGoin. It describes the vision, the architecture, and the current state of every subsystem &mdash; with full implementation detail. This document is versioned; each update records what changed.</p>
<div class="card" style="margin-top: 1rem;"> <div class="card" style="margin-top: 1rem;">
<strong style="font-size: 0.85rem; text-transform: uppercase; letter-spacing: 0.05em;">Changelog</strong> <strong style="font-size: 0.85rem; text-transform: uppercase; letter-spacing: 0.05em;">Changelog</strong>
<p style="margin-top: 0.5rem;"><strong>v0.4.0</strong> (2026-03-21): Protocol v4 &mdash; header-driven sync. ManifestPush as primary post notification. Slim PullSyncRequest (per-author timestamps, not full post ID list). Tiered engagement checks (5min/1hr/4hr/24hr by content age). Multi-upstream (3 max) with fallback chain. Auto-prefetch followed authors &lt;90d. Self Last Encounter per-author tracking. Encrypted-but-not-for-us CDN caching. Serial engagement polling. ~90% bandwidth reduction for established nodes.</p> <p style="margin-top: 0.5rem;"><strong>v0.4.1</strong> (2026-03-21): Security hardening &mdash; reaction signatures (ed25519), comment signature verification on receipt, reaction removal authorization, BlobHeader author verification. Lock contention fixes &mdash; ManifestPush discovery (cm lock released during I/O), pull request handler (filter without lock), pull sender (split into brief locks), engagement checker (batch writes per chunk). Data cleanup &mdash; post deletion cleans downstream/upstream/seen tables.</p>
<p><strong>v0.4.0</strong> (2026-03-21): Protocol v4 &mdash; header-driven sync. ManifestPush as primary post notification. Slim PullSyncRequest (per-author timestamps, not full post ID list). Tiered engagement checks (5min/1hr/4hr/24hr by content age). Multi-upstream (3 max) with fallback chain. Auto-prefetch followed authors &lt;90d. Self Last Encounter per-author tracking. Encrypted-but-not-for-us CDN caching. Serial engagement polling. ~90% bandwidth reduction for established nodes.</p>
<p><strong>v0.3.6</strong> (2026-03-20): Active CDN replication &mdash; all devices proactively replicate recent posts to peers (desktops &gt; anchors &gt; phones priority). ReplicationRequest/Response (0xE1/0xE2). Device roles (Intermittent/Available/Persistent) advertised in InitialExchange. Bandwidth budgets: replication (pull to cache) + delivery (serve requests), hourly auto-reset, phones 100MB/1GB, desktops 200MB/2GB, anchors 200MB/1GB. Cache management: 1GB default, configurable, eviction cycle activated with share-link priority boost. Engagement distribution fix &mdash; BlobHeader JSON rebuilt after diff ops. Tombstone system &mdash; deleted reactions/comments tombstoned, propagate via pull sync. Persistent notifications via seen_engagement/seen_messages tables. DOS hardening: fan-out cap (10), prefetch cap (20), downstream registration cap (50), delivery budget enforcement. Pull preference reordered: non-anchors first. Network indicator &mdash; header dot (black/red/yellow/green) + capability labels. Tab badges &mdash; contextual counts (new posts, engagement, online, unread). Message read tracking on open/close/send. Stats bar removed.</p> <p><strong>v0.3.6</strong> (2026-03-20): Active CDN replication &mdash; all devices proactively replicate recent posts to peers (desktops &gt; anchors &gt; phones priority). ReplicationRequest/Response (0xE1/0xE2). Device roles (Intermittent/Available/Persistent) advertised in InitialExchange. Bandwidth budgets: replication (pull to cache) + delivery (serve requests), hourly auto-reset, phones 100MB/1GB, desktops 200MB/2GB, anchors 200MB/1GB. Cache management: 1GB default, configurable, eviction cycle activated with share-link priority boost. Engagement distribution fix &mdash; BlobHeader JSON rebuilt after diff ops. Tombstone system &mdash; deleted reactions/comments tombstoned, propagate via pull sync. Persistent notifications via seen_engagement/seen_messages tables. DOS hardening: fan-out cap (10), prefetch cap (20), downstream registration cap (50), delivery budget enforcement. Pull preference reordered: non-anchors first. Network indicator &mdash; header dot (black/red/yellow/green) + capability labels. Tab badges &mdash; contextual counts (new posts, engagement, online, unread). Message read tracking on open/close/send. Stats bar removed.</p>
<p><strong>v0.3.5</strong> (2026-03-20): Private blob encryption &mdash; attachments on encrypted posts (Friends/Circle/Direct) now encrypted with same CEK as post text; public blobs unchanged; CID on ciphertext. Blob prefetch on sync &mdash; attachments eagerly fetched after post pull for offline availability. Crypto refactoring &mdash; extracted reusable primitives (encrypt/decrypt_bytes_with_cek, unwrap_cek_for_recipient, unwrap_group_cek). Intent-based post filtering &mdash; feed/myposts/messages filter on intentKind instead of encryption state. Blob decryption API (get_blob_for_post). Download filename sanitization. Encrypted receipt &amp; comment slots &mdash; private posts carry noise-prefilled encrypted slots in BlobHeader for delivery/read/react receipts and private comments; CDN-propagated as opaque bytes; slot key derived from post CEK; 3 new BlobHeaderDiffOps (WriteReceiptSlot, WriteCommentSlot, AddCommentSlots). Message UI &mdash; DM delivery indicators (checkmark/double/blue/emoji), auto-seen on view, react button on messages.</p> <p><strong>v0.3.5</strong> (2026-03-20): Private blob encryption &mdash; attachments on encrypted posts (Friends/Circle/Direct) now encrypted with same CEK as post text; public blobs unchanged; CID on ciphertext. Blob prefetch on sync &mdash; attachments eagerly fetched after post pull for offline availability. Crypto refactoring &mdash; extracted reusable primitives (encrypt/decrypt_bytes_with_cek, unwrap_cek_for_recipient, unwrap_group_cek). Intent-based post filtering &mdash; feed/myposts/messages filter on intentKind instead of encryption state. Blob decryption API (get_blob_for_post). Download filename sanitization. Encrypted receipt &amp; comment slots &mdash; private posts carry noise-prefilled encrypted slots in BlobHeader for delivery/read/react receipts and private comments; CDN-propagated as opaque bytes; slot key derived from post CEK; 3 new BlobHeaderDiffOps (WriteReceiptSlot, WriteCommentSlot, AddCommentSlots). Message UI &mdash; DM delivery indicators (checkmark/double/blue/emoji), auto-seen on view, react button on messages.</p>
<p><strong>v0.3.4</strong> (2026-03-18): Comment edit &amp; delete with trust-based propagation. Native notifications via Tauri plugin (messages, posts, reactions, comments). Forward-compatible BlobHeaderDiffOp::Unknown variant. Following Online/Offline lightbox. Comment threading scoping fix. Dropdown text legibility fix. Mobile hamburger nav for website.</p> <p><strong>v0.3.4</strong> (2026-03-18): Comment edit &amp; delete with trust-based propagation. Native notifications via Tauri plugin (messages, posts, reactions, comments). Forward-compatible BlobHeaderDiffOp::Unknown variant. Following Online/Offline lightbox. Comment threading scoping fix. Dropdown text legibility fix. Mobile hamburger nav for website.</p>

View file

@ -25,16 +25,16 @@
<section> <section>
<h1 style="font-size: 2rem; font-weight: 800; letter-spacing: -0.03em; margin-bottom: 0.25rem;">Download ItsGoin</h1> <h1 style="font-size: 2rem; font-weight: 800; letter-spacing: -0.03em; margin-bottom: 0.25rem;">Download ItsGoin</h1>
<p>Available for Android and Linux. Free and open source.</p> <p>Available for Android and Linux. Free and open source.</p>
<p style="color: var(--text-muted); font-size: 0.85rem;">Version 0.4.0 &mdash; March 15, 2026</p> <p style="color: var(--text-muted); font-size: 0.85rem;">Version 0.4.1 &mdash; March 15, 2026</p>
<div class="downloads"> <div class="downloads">
<a href="itsgoin-0.4.0.apk" class="download-btn btn-android"> <a href="itsgoin-0.4.1.apk" class="download-btn btn-android">
Android APK Android APK
<span class="sub">v0.4.0</span> <span class="sub">v0.4.1</span>
</a> </a>
<a href="itsgoin_0.4.0_amd64.AppImage" class="download-btn btn-linux"> <a href="itsgoin_0.4.1_amd64.AppImage" class="download-btn btn-linux">
Linux AppImage Linux AppImage
<span class="sub">v0.4.0</span> <span class="sub">v0.4.1</span>
</a> </a>
</div> </div>
</section> </section>
@ -46,7 +46,7 @@
<h3 style="color: var(--accent);">Android</h3> <h3 style="color: var(--accent);">Android</h3>
<ol class="steps"> <ol class="steps">
<li><strong>Download the APK</strong> &mdash; Tap the button above. Your browser may warn that this type of file can be harmful &mdash; tap <strong>Download anyway</strong>.</li> <li><strong>Download the APK</strong> &mdash; Tap the button above. Your browser may warn that this type of file can be harmful &mdash; tap <strong>Download anyway</strong>.</li>
<li><strong>Open the file</strong> &mdash; When the download finishes, tap the notification or find <code>itsgoin-0.4.0.apk</code> in your Downloads folder and tap it.</li> <li><strong>Open the file</strong> &mdash; When the download finishes, tap the notification or find <code>itsgoin-0.4.1.apk</code> in your Downloads folder and tap it.</li>
<li><strong>Allow installation</strong> &mdash; Android will ask you to allow installs from this source. Tap <strong>Settings</strong>, toggle <strong>"Allow from this source"</strong>, then go back and tap <strong>Install</strong>.</li> <li><strong>Allow installation</strong> &mdash; Android will ask you to allow installs from this source. Tap <strong>Settings</strong>, toggle <strong>"Allow from this source"</strong>, then go back and tap <strong>Install</strong>.</li>
<li><strong>Launch the app</strong> &mdash; Once installed, tap <strong>Open</strong> or find ItsGoin in your app drawer.</li> <li><strong>Launch the app</strong> &mdash; Once installed, tap <strong>Open</strong> or find ItsGoin in your app drawer.</li>
</ol> </ol>
@ -59,8 +59,8 @@
<h3 style="color: var(--green);">Linux (AppImage)</h3> <h3 style="color: var(--green);">Linux (AppImage)</h3>
<ol class="steps"> <ol class="steps">
<li><strong>Download the AppImage</strong> &mdash; Click the button above to download.</li> <li><strong>Download the AppImage</strong> &mdash; Click the button above to download.</li>
<li><strong>Make it executable</strong> &mdash; Open a terminal and run:<br><code>chmod +x itsgoin_0.4.0_amd64.AppImage</code></li> <li><strong>Make it executable</strong> &mdash; Open a terminal and run:<br><code>chmod +x itsgoin_0.4.1_amd64.AppImage</code></li>
<li><strong>Run it</strong> &mdash; Double-click the file, or from the terminal:<br><code>./itsgoin_0.4.0_amd64.AppImage</code></li> <li><strong>Run it</strong> &mdash; Double-click the file, or from the terminal:<br><code>./itsgoin_0.4.1_amd64.AppImage</code></li>
</ol> </ol>
<div class="note"> <div class="note">
<strong>Note:</strong> If it doesn't launch, you may need to install FUSE:<br><code>sudo apt install libfuse2</code> (Debian/Ubuntu) or <code>sudo dnf install fuse</code> (Fedora). <strong>Note:</strong> If it doesn't launch, you may need to install FUSE:<br><code>sudo apt install libfuse2</code> (Debian/Ubuntu) or <code>sudo dnf install fuse</code> (Fedora).
@ -71,6 +71,19 @@
<section> <section>
<h2>Changelog</h2> <h2>Changelog</h2>
<div class="changelog"> <div class="changelog">
<div class="changelog-date">v0.4.1 &mdash; March 21, 2026</div>
<ul>
<li><strong>Security: Reaction signatures</strong> &mdash; Reactions now carry ed25519 signatures. Forged reactions from other NodeIds are rejected. Backward-compatible with unsigned reactions from older nodes.</li>
<li><strong>Security: Comment signature verification</strong> &mdash; Comment signatures (already present) are now verified on receipt. Forged comments rejected.</li>
<li><strong>Security: Reaction removal auth</strong> &mdash; Only the reactor or post author can remove reactions. Previously any peer could strip reactions.</li>
<li><strong>Security: BlobHeader author verification</strong> &mdash; Header rebuild verifies author against stored post, not trusted from payload.</li>
<li><strong>Lock contention: ManifestPush discovery</strong> &mdash; cm lock released before PostFetch network I/O. Was holding lock during entire discovery (5s+ freeze).</li>
<li><strong>Lock contention: Pull request handler</strong> &mdash; Load posts under lock, filter without lock, brief re-lock for is_deleted. Was holding lock during full post list iteration.</li>
<li><strong>Lock contention: Pull sender</strong> &mdash; Split into two brief locks (store, then batch upstream+sync). Was holding one long lock for all operations.</li>
<li><strong>Lock contention: Engagement checker</strong> &mdash; Batch writes per chunk with single lock. Was acquiring lock per post (100+ times).</li>
<li><strong>Data cleanup</strong> &mdash; Post deletion now cleans up post_downstream, post_upstream, and seen_engagement tables.</li>
</ul>
<div class="changelog-date">v0.4.0 &mdash; March 21, 2026</div> <div class="changelog-date">v0.4.0 &mdash; March 21, 2026</div>
<ul> <ul>
<li><strong>Protocol v4: Header-driven sync</strong> &mdash; Major sync protocol revision. ManifestPush now triggers post discovery from CDN tree headers. Bandwidth reduced ~90% for established nodes.</li> <li><strong>Protocol v4: Header-driven sync</strong> &mdash; Major sync protocol revision. ManifestPush now triggers post discovery from CDN tree headers. Bandwidth reduced ~90% for established nodes.</li>