diff --git a/crates/core/src/connection.rs b/crates/core/src/connection.rs index 3f97624..76e751c 100644 --- a/crates/core/src/connection.rs +++ b/crates/core/src/connection.rs @@ -4935,41 +4935,31 @@ impl ConnectionManager { } } - // Send CDN delete notices (async, best-effort) + // Gather connections for CDN delete notices under lock, then send outside + let mut delete_notices: Vec<(iroh::endpoint::Connection, crate::protocol::BlobDeleteNoticePayload)> = Vec::new(); for (cid, downstream, upstream) in &blob_cleanup { - // Notify downstream (with our upstream info for tree healing) - let upstream_info = upstream.as_ref().map(|(nid, addrs)| { - PeerWithAddress { - n: hex::encode(nid), - a: addrs.clone(), - } - }); - let ds_payload = crate::protocol::BlobDeleteNoticePayload { - cid: *cid, - upstream_node: upstream_info, - }; + let upstream_info = upstream.as_ref().map(|(nid, addrs)| PeerWithAddress { n: hex::encode(nid), a: addrs.clone() }); + let ds_payload = crate::protocol::BlobDeleteNoticePayload { cid: *cid, upstream_node: upstream_info }; for (ds_nid, _) in downstream { if let Some(pc) = cm.connections_ref().get(ds_nid) { - if let Ok(mut send) = pc.connection.open_uni().await { - let _ = write_typed_message(&mut send, MessageType::BlobDeleteNotice, &ds_payload).await; - let _ = send.finish(); - } + delete_notices.push((pc.connection.clone(), ds_payload.clone())); } } - // Notify upstream (no upstream info — just "remove me") if let Some((up_nid, _)) = upstream { - let up_payload = crate::protocol::BlobDeleteNoticePayload { - cid: *cid, - upstream_node: None, - }; + let up_payload = crate::protocol::BlobDeleteNoticePayload { cid: *cid, upstream_node: None }; if let Some(pc) = cm.connections_ref().get(up_nid) { - if let Ok(mut send) = pc.connection.open_uni().await { - let _ = write_typed_message(&mut send, MessageType::BlobDeleteNotice, &up_payload).await; - let _ = send.finish(); - } + delete_notices.push((pc.connection.clone(), up_payload)); } } } + drop(cm); + // Send outside lock + for (conn, payload) in &delete_notices { + if let Ok(mut send) = conn.open_uni().await { + let _ = write_typed_message(&mut send, MessageType::BlobDeleteNotice, payload).await; + let _ = send.finish(); + } + } } MessageType::VisibilityUpdate => { let payload: crate::protocol::VisibilityUpdatePayload = @@ -5167,13 +5157,18 @@ impl ConnectionManager { } drop(storage); - // Relay to downstream (best-effort via mesh connections) - for (ds_nid, relay_payload) in &relay_targets { - if let Some(pc) = cm.connections_ref().get(ds_nid) { - if let Ok(mut send) = pc.connection.open_uni().await { - let _ = write_typed_message(&mut send, MessageType::ManifestPush, relay_payload).await; - let _ = send.finish(); - } + // Gather relay connections under lock, then relay outside + let relay_conns: Vec<(iroh::endpoint::Connection, crate::protocol::ManifestPushPayload)> = relay_targets.iter() + .filter_map(|(ds_nid, payload)| { + cm.connections_ref().get(ds_nid).map(|pc| (pc.connection.clone(), payload.clone())) + }) + .collect(); + drop(cm); + // Relay outside lock + for (conn, relay_payload) in &relay_conns { + if let Ok(mut send) = conn.open_uni().await { + let _ = write_typed_message(&mut send, MessageType::ManifestPush, relay_payload).await; + let _ = send.finish(); } } @@ -5291,8 +5286,7 @@ impl ConnectionManager { }); } - drop(cm); - debug!(peer = hex::encode(remote_node_id), stored, relayed = relay_targets.len(), "Received manifest push"); + debug!(peer = hex::encode(remote_node_id), stored, relayed = relay_conns.len(), "Received manifest push"); } MessageType::SocialDisconnectNotice => { let payload: SocialDisconnectNoticePayload = read_payload(recv, MAX_PAYLOAD).await?; diff --git a/crates/core/src/identity.rs b/crates/core/src/identity.rs index bdf71b0..4775873 100644 --- a/crates/core/src/identity.rs +++ b/crates/core/src/identity.rs @@ -188,8 +188,10 @@ impl IdentityManager { let id_dir = self.base_dir.join("identities").join(&node_id_hex); std::fs::create_dir_all(&id_dir)?; - // Write identity key - std::fs::write(id_dir.join("identity.key"), seed)?; + // Write identity key with restricted permissions + let key_path = id_dir.join("identity.key"); + std::fs::write(&key_path, seed)?; + set_key_permissions(&key_path); // Write metadata let now = now_ms(); @@ -223,7 +225,9 @@ impl IdentityManager { } std::fs::create_dir_all(&id_dir)?; - std::fs::write(id_dir.join("identity.key"), seed)?; + let key_path = id_dir.join("identity.key"); + std::fs::write(&key_path, seed)?; + set_key_permissions(&key_path); let now = now_ms(); let meta = IdentityMeta { @@ -390,3 +394,16 @@ fn now_ms() -> u64 { .unwrap_or_default() .as_millis() as u64 } + +/// Set restrictive permissions on identity key files (user-only read/write). +fn set_key_permissions(path: &std::path::Path) { + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let _ = std::fs::set_permissions(path, std::fs::Permissions::from_mode(0o600)); + } + #[cfg(not(unix))] + { + let _ = path; // no-op on non-Unix (Windows uses ACLs, Android sandboxes) + } +} diff --git a/crates/core/src/network.rs b/crates/core/src/network.rs index fe5cb67..446b2ef 100644 --- a/crates/core/src/network.rs +++ b/crates/core/src/network.rs @@ -61,11 +61,22 @@ fn is_public_ip(ip: IpAddr) -> bool { } } -/// Filter out addresses that are never useful to share (loopback, link-local, unspecified). -/// Keeps LAN addresses (192.168.x, 10.x, 172.16-31.x) since peers might be on the same LAN. +/// Filter out addresses that are never useful to share (loopback, link-local, unspecified, +/// Docker bridge). Keeps common LAN addresses (192.168.x, 10.x) for same-WiFi discovery. +/// Excludes 172.17-31.x (Docker/container bridges) to avoid topology disclosure. pub(crate) fn is_shareable_addr(addr: &SocketAddr) -> bool { match addr.ip() { - IpAddr::V4(v4) => !v4.is_loopback() && !v4.is_link_local() && !v4.is_unspecified(), + IpAddr::V4(v4) => { + if v4.is_loopback() || v4.is_link_local() || v4.is_unspecified() { + return false; + } + // Exclude Docker bridge range (172.17.0.0 - 172.31.255.255) + let octets = v4.octets(); + if octets[0] == 172 && octets[1] >= 17 { + return false; + } + true + } IpAddr::V6(v6) => !v6.is_loopback() && !v6.is_unspecified(), } } diff --git a/frontend/app.js b/frontend/app.js index a56c46c..a2b95b9 100644 --- a/frontend/app.js +++ b/frontend/app.js @@ -3141,7 +3141,7 @@ $('#share-details-btn').addEventListener('click', () => { overlay.querySelector('#share-close-btn').addEventListener('click', () => overlay.remove()); overlay.addEventListener('click', (e) => { if (e.target === overlay) overlay.remove(); }); }); -syncBtn.addEventListener('click', doSyncAll); +if (syncBtn) syncBtn.addEventListener('click', doSyncAll); if (copyBtn) copyBtn.addEventListener('click', async () => { try { await navigator.clipboard.writeText(connectString); diff --git a/website/design.html b/website/design.html index 8c4f6a5..c9ccfdc 100644 --- a/website/design.html +++ b/website/design.html @@ -39,7 +39,7 @@
- v0.3.1 — 2026-03-13 + v0.4.4 — 2026-03-31

Design Document

This is the canonical technical reference for ItsGoin. It describes the vision, the architecture, and the current state of every subsystem — with full implementation detail. This document is versioned; each update records what changed.

@@ -274,7 +274,7 @@
  1. Dead connection removal: Remove connections with close_reason() set, or idle > 600s (zombie)
  2. Stale entry pruning: N2/N3 entries tagged to a peer that is no longer connected are pruned immediately (on disconnect and on startup sweep). Age-based fallback: entries older than 7 days. Social route watchers older than 30 days.
  3. -
  4. Priority 0 — Preferred peer reconnection: Iterate preferred_peers table, reconnect any that are disconnected. If at capacity, evict the lowest-diversity non-preferred peer to make room. Prune preferred peers unreachable for 7+ days (slot released, does NOT auto-return on reconnect — must re-negotiate via MeshPrefer). After 7 days, social checkin frequency drops from 1–3 hours to daily until the 30-day reconnect watcher expires.
  5. +
  6. Priority 0 — Preferred peer reconnection: Iterate preferred_peers table, reconnect any that are disconnected. If at capacity, evict the lowest-diversity non-preferred peer to make room. Prune preferred peers unreachable for 7+ days (slot released, does NOT auto-return on reconnect — must re-negotiate via MeshPrefer). After 7 days, social checkin frequency drops from 1–4 hours to daily until the 30-day reconnect watcher expires.
  7. Priority 1 — Reconnect recently dead: Re-establish dropped non-preferred connections. Skip blacklisted nodes — do not attempt reconnection to peers in mesh_blacklist.
  8. Priority 2 — Signal growth loop: Fill remaining empty slots via growth loop
  9. Idle session cleanup: Reap interactive sessions idle > 300s (5 min). Keep-alive sessions are NOT reaped by idle timeout.
  10. @@ -355,7 +355,7 @@ - +
    LayerPurposeConnectionsSync trigger
    MeshStructural backbone: N1/N2/N3 routing, diversity, discovery101 mesh slots (preferred + non-preferred)N/A — mesh is infrastructure, not content
    SocialFollows, audience, DMs — the human relationshipsSocial routes + keep-alive sessions as neededPull posts when Self Last Encounter > 3 hours
    SocialFollows, audience, DMs — the human relationshipsSocial routes + keep-alive sessions as neededPull posts when Self Last Encounter > 4 hours
    FileContent storage and distribution — blobs, CDN treesUpstream/downstream file peers + keep-alive sessions as neededPull on blob request, push on post creation
    @@ -1017,7 +1017,7 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false } v0.2.0 change: Pull sync pulls posts from social layer peers (follows, audience) and upstream file peers, NOT from mesh peers. Mesh connections exist for routing diversity, not content. This separates infrastructure from content flow.
-

Self Last Encounter: For each peer we sync with, we track the timestamp of our last successful sync. When Self Last Encounter ages beyond 3 hours, a pull sync is triggered. Self Last Encounter is updated to the newer of: (a) what's currently stored, or (b) the "file last update" timestamp from file headers received during blob transfers. Since file headers include the author's recent post list, downloading a blob from any peer hosting that author's content can update Self Last Encounter for the author.

+

Self Last Encounter: For each peer we sync with, we track the timestamp of our last successful sync. When Self Last Encounter ages beyond 4 hours, a pull sync is triggered. Self Last Encounter is updated to the newer of: (a) what's currently stored, or (b) the "file last update" timestamp from file headers received during blob transfers. Since file headers include the author's recent post list, downloading a blob from any peer hosting that author's content can update Self Last Encounter for the author.

Pull sync filtering