diff --git a/crates/core/src/connection.rs b/crates/core/src/connection.rs index d4ea764..e375b69 100644 --- a/crates/core/src/connection.rs +++ b/crates/core/src/connection.rs @@ -25,7 +25,7 @@ use crate::protocol::{ SyncPost, VisibilityUpdatePayload, WormQueryPayload, WormResponsePayload, ReplicationRequestPayload, ReplicationResponsePayload, ALPN_V2, }; -use crate::storage::Storage; +use crate::storage::{Storage, StoragePool}; use crate::types::{ DeviceProfile, NodeId, PeerSlotKind, PeerWithAddress, PostId, PostVisibility, ReachMethod, SessionReachMethod, SocialRouteEntry, SocialStatus, WormId, WormResult, @@ -489,10 +489,58 @@ struct ReferralEntry { disconnected_at: Option, } +/// Data gathered under brief lock for anchor probe I/O. +pub struct AnchorProbeData { + pub payload: crate::protocol::AnchorProbeRequestPayload, + pub reporter_conn: iroh::endpoint::Connection, +} + +/// Result of anchor probe I/O (to be applied under brief re-lock). +pub struct AnchorProbeResult { + pub reachable: bool, + pub timed_out: bool, + pub outcome: anyhow::Result, +} + +/// Data gathered under brief lock for relay introduce forwarding. +enum RelayGathered { + WeAreTarget { + our_addrs: Vec, + endpoint: iroh::Endpoint, + storage: Arc, + our_node_id: NodeId, + our_nat_type: crate::types::NatType, + our_http_capable: bool, + our_http_addr: Option, + our_nat_profile: crate::types::NatProfile, + peer_nat_profile: crate::types::NatProfile, + }, + WeAreRelay { + target_conn: Option<(iroh::endpoint::Connection, Option)>, + requester_observed: Option, + relay_available: bool, + activity_log: Arc>, + ttl_reporters: Vec<(NodeId, iroh::endpoint::Connection)>, + }, +} + +/// Snapshot of ConnectionManager state needed for worm cascade — no lock required. +struct WormContext { + our_node_id: NodeId, + storage: Arc, + endpoint: iroh::Endpoint, + /// Snapshot of (node_id, connection) for all mesh peers + peer_conns: Vec<(NodeId, iroh::endpoint::Connection)>, + /// Set of connected node IDs for quick lookup + connected_ids: HashSet, + /// Arc to conn_mgr for resolve_address_unlocked + cm: Arc>, +} + pub struct ConnectionManager { connections: HashMap, endpoint: iroh::Endpoint, - storage: Arc>, + storage: Arc, our_node_id: NodeId, #[allow(dead_code)] is_anchor: Arc, @@ -565,7 +613,7 @@ pub struct ConnectionManager { impl ConnectionManager { pub fn new( endpoint: iroh::Endpoint, - storage: Arc>, + storage: Arc, our_node_id: NodeId, is_anchor: Arc, secret_seed: [u8; 32], @@ -698,7 +746,7 @@ impl ConnectionManager { let our_connections: HashSet = self.connections.keys().copied().collect(); let (witness, reporter) = { - let s = self.storage.lock().await; + let s = self.storage.get().await; match s.random_n2_stranger(&our_connections)? { Some(pair) => pair, None => { @@ -811,6 +859,96 @@ impl ConnectionManager { } } + /// Gather all data needed for an anchor probe under brief lock. + pub async fn gather_anchor_probe_data(&self) -> Option { + use crate::protocol::AnchorProbeRequestPayload; + + let our_connections: HashSet = self.connections.keys().copied().collect(); + let (witness, reporter) = { + let s = self.storage.get().await; + s.random_n2_stranger(&our_connections).ok()?? + }; + + let target_addr = self.build_anchor_advertised_addr()?; + + let mut candidate_addrs: Vec = self.endpoint.addr().ip_addrs() + .filter(|s| crate::network::is_publicly_routable(s)) + .map(|s| s.to_string()) + .collect(); + if let Some(ref ext) = self.upnp_external_addr { + let ext_str = ext.to_string(); + if !candidate_addrs.contains(&ext_str) { candidate_addrs.insert(0, ext_str); } + } + + let probe_id: [u8; 16] = { + use rand::Rng; + let mut id = [0u8; 16]; + rand::rng().fill(&mut id); + id + }; + + let reporter_conn = self.connections.get(&reporter)?.connection.clone(); + + Some(AnchorProbeData { + payload: AnchorProbeRequestPayload { + target_addr, + witness, + candidate: self.our_node_id, + candidate_addresses: candidate_addrs, + probe_id, + }, + reporter_conn, + }) + } + + /// Run the anchor probe I/O — does NOT require conn_mgr lock. + pub async fn run_anchor_probe_unlocked(data: AnchorProbeData) -> AnchorProbeResult { + use crate::protocol::{AnchorProbeResultPayload, MessageType, read_message_type, read_payload, write_typed_message}; + + let result = tokio::time::timeout( + std::time::Duration::from_secs(20), + async { + let (mut send, mut recv) = data.reporter_conn.open_bi().await?; + write_typed_message(&mut send, MessageType::AnchorProbeRequest, &data.payload).await?; + send.finish()?; + let msg_type = read_message_type(&mut recv).await?; + if msg_type != MessageType::AnchorProbeResult { + anyhow::bail!("expected AnchorProbeResult, got {:?}", msg_type); + } + let result: AnchorProbeResultPayload = read_payload(&mut recv, 4096).await?; + Ok::<_, anyhow::Error>(result) + } + ).await; + + match result { + Ok(Ok(r)) if r.reachable => AnchorProbeResult { reachable: true, timed_out: false, outcome: Ok(true) }, + Ok(Ok(_)) => AnchorProbeResult { reachable: false, timed_out: false, outcome: Ok(false) }, + Ok(Err(e)) => AnchorProbeResult { reachable: false, timed_out: false, outcome: Err(e) }, + Err(_) => AnchorProbeResult { reachable: false, timed_out: true, outcome: Ok(false) }, + } + } + + /// Apply anchor probe result — brief lock to update state. + pub fn apply_anchor_probe_result(&mut self, result: &AnchorProbeResult) { + if result.reachable { + self.last_probe_success_ms = now_ms(); + self.probe_failure_streak = 0; + self.log_activity(ActivityLevel::Info, ActivityCategory::Anchor, + "Anchor probe succeeded — confirmed reachable".to_string(), None); + info!("Anchor probe succeeded — confirmed reachable"); + } else if result.timed_out || !result.reachable { + self.probe_failure_streak += 1; + self.log_activity(ActivityLevel::Warn, ActivityCategory::Anchor, + format!("Anchor probe failed (streak: {})", self.probe_failure_streak), None); + if self.probe_failure_streak >= 2 { + self.is_anchor.store(false, Ordering::Relaxed); + self.log_activity(ActivityLevel::Warn, ActivityCategory::Anchor, + "Anchor status revoked (2 consecutive failures)".to_string(), None); + warn!("Anchor status revoked (2 consecutive probe failures)"); + } + } + } + /// Handle an incoming AnchorProbeRequest — either as reporter (forward to witness) or as witness (cold connect) pub async fn handle_anchor_probe_request( &mut self, @@ -1008,7 +1146,7 @@ impl ConnectionManager { /// already-connected peers, self, and unreachable peers. pub async fn score_n2_candidates(&self) -> Vec<(NodeId, f64)> { let candidates = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.score_n2_candidates_batch().unwrap_or_default() }; @@ -1113,14 +1251,14 @@ impl ConnectionManager { // Persist address to peers table so it survives restart if !addrs.is_empty() { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.upsert_peer(&peer_id, addrs, None); drop(storage); } // Record in mesh_peers table + touch social route { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.add_mesh_peer(&peer_id, slot_kind, 0); if storage.has_social_route(&peer_id).unwrap_or(false) { let _ = storage.touch_social_route_connect(&peer_id, addrs, ReachMethod::Direct); @@ -1142,35 +1280,183 @@ impl ConnectionManager { } /// Establish an outgoing mesh connection with a 15s timeout on the QUIC connect. - /// Used by rebalance_slots() and reconnect_preferred() which hold &mut self. - /// Note: holds the conn_mgr lock during the connect. For lock-free connecting, - /// use Network::connect_to_peer() which connects outside the lock. - pub async fn connect_to( + /// Quick check + register only — call connect_to_unlocked() for the actual QUIC connect + /// outside the lock, then pass the resulting connection here. + pub async fn register_new_connection( &mut self, peer_id: NodeId, - addr: iroh::EndpointAddr, + conn: iroh::endpoint::Connection, + addrs: &[std::net::SocketAddr], slot_kind: PeerSlotKind, - ) -> anyhow::Result<()> { + ) { if self.connections.contains_key(&peer_id) { - return Ok(()); // Already connected + return; // Already connected } + self.register_connection(peer_id, conn, addrs, slot_kind).await; + } - let addrs: Vec = addr.ip_addrs().copied().collect(); - if !addrs.is_empty() { - let storage = self.storage.lock().await; - let _ = storage.upsert_peer(&peer_id, &addrs, None); - } - - // 15s timeout to limit lock contention (QUIC default can be 60+s) + /// QUIC connect with 15s timeout — call this OUTSIDE the conn_mgr lock. + pub async fn connect_to_unlocked( + endpoint: &iroh::Endpoint, + addr: iroh::EndpointAddr, + ) -> anyhow::Result { let conn = tokio::time::timeout( std::time::Duration::from_secs(15), - self.endpoint.connect(addr, ALPN_V2), + endpoint.connect(addr, ALPN_V2), ).await .map_err(|_| anyhow::anyhow!("connect timed out (15s)"))? .map_err(|e| anyhow::anyhow!("connect failed: {e}"))?; + Ok(conn) + } - self.register_connection(peer_id, conn, &addrs, slot_kind).await; - Ok(()) + /// Pull posts from a peer — standalone version that doesn't require conn_mgr lock. + /// Takes a cloned connection and storage Arc. + pub async fn pull_from_peer_unlocked( + conn: iroh::endpoint::Connection, + storage: &Arc, + peer_id: &NodeId, + ) -> anyhow::Result { + let (our_follows, follows_sync) = { + let s = storage.get().await; + (s.list_follows()?, s.get_follows_with_last_sync().unwrap_or_default()) + }; + + let request = PullSyncRequestPayload { + follows: our_follows, + have_post_ids: vec![], + since_ms: follows_sync, + }; + + let (mut send, mut recv) = conn.open_bi().await?; + write_typed_message(&mut send, MessageType::PullSyncRequest, &request).await?; + send.finish()?; + + let msg_type = read_message_type(&mut recv).await?; + if msg_type != MessageType::PullSyncResponse { + anyhow::bail!("expected PullSyncResponse, got {:?}", msg_type); + } + let response: PullSyncResponsePayload = read_payload(&mut recv, MAX_PAYLOAD).await?; + + let mut posts_received = 0; + let mut vis_updates = 0; + let mut new_post_ids: Vec = Vec::new(); + let now_ms = crate::connection::now_ms(); + let mut synced_authors: HashSet = HashSet::new(); + + // Brief storage lock: store posts + { + let s = storage.get().await; + for sp in &response.posts { + if s.is_deleted(&sp.id)? { continue; } + if verify_post_id(&sp.id, &sp.post) { + if s.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility)? { + new_post_ids.push(sp.id); + posts_received += 1; + } + synced_authors.insert(sp.post.author); + } + } + } + + // Brief storage lock: upstream + last_sync + visibility updates + { + let s = storage.get().await; + for pid in &new_post_ids { + let prio = s.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0); + let _ = s.add_post_upstream(pid, peer_id, prio); + } + for author in &synced_authors { + let _ = s.update_follow_last_sync(author, now_ms); + } + for vu in response.visibility_updates { + if let Some(post) = s.get_post(&vu.post_id)? { + if post.author == vu.author { + if s.update_post_visibility(&vu.post_id, &vu.visibility)? { + vis_updates += 1; + } + } + } + } + } + + // Register as downstream (spawned, no lock needed) + if !new_post_ids.is_empty() { + let c = conn.clone(); + tokio::spawn(async move { + for post_id in new_post_ids.into_iter().take(50) { + let payload = PostDownstreamRegisterPayload { post_id }; + if let Ok(mut send) = c.open_uni().await { + let _ = write_typed_message(&mut send, MessageType::PostDownstreamRegister, &payload).await; + let _ = send.finish(); + } + } + }); + } + + Ok(PullSyncStats { posts_received, visibility_updates: vis_updates }) + } + + /// Fetch engagement headers from a peer — standalone version that doesn't require conn_mgr lock. + pub async fn fetch_engagement_unlocked( + conn: iroh::endpoint::Connection, + storage: &Arc, + _peer_id: &NodeId, + ) -> anyhow::Result { + let post_headers: Vec<([u8; 32], u64)> = { + let s = storage.get().await; + let due_ids = s.get_posts_due_for_engagement_check()?; + due_ids.into_iter().map(|pid| { + let ts = s.get_blob_header(&pid).ok().flatten().map(|(_, ts)| ts).unwrap_or(0); + (pid, ts) + }).collect() + }; + + let now_ms = crate::connection::now_ms(); + let mut updated = 0; + + for chunk in post_headers.chunks(20) { + let mut results: Vec<([u8; 32], Option<(String, crate::types::BlobHeader)>)> = Vec::new(); + for (post_id, current_ts) in chunk { + let result: anyhow::Result> = async { + let (mut send, mut recv) = conn.open_bi().await?; + let request = BlobHeaderRequestPayload { post_id: *post_id, current_updated_at: *current_ts }; + write_typed_message(&mut send, MessageType::BlobHeaderRequest, &request).await?; + send.finish()?; + let msg_type = read_message_type(&mut recv).await?; + if msg_type != MessageType::BlobHeaderResponse { anyhow::bail!("expected BlobHeaderResponse"); } + let response: BlobHeaderResponsePayload = read_payload(&mut recv, MAX_PAYLOAD).await?; + if response.updated { + if let Some(json) = response.header_json { + if let Ok(header) = serde_json::from_str::(&json) { + return Ok(Some((json, header))); + } + } + } + Ok(None) + }.await; + match result { + Ok(header_opt) => results.push((*post_id, header_opt)), + Err(e) => { trace!(post_id = hex::encode(post_id), error = %e, "Failed to fetch engagement header"); } + } + } + + if !results.is_empty() { + let s = storage.get().await; + for (post_id, header_opt) in &results { + let _ = s.update_post_last_check(post_id, now_ms); + if let Some((json, header)) = header_opt { + let _ = s.store_blob_header(&header.post_id, &header.author, json, header.updated_at); + for reaction in &header.reactions { let _ = s.store_reaction(reaction); } + for comment in &header.comments { let _ = s.store_comment(comment); } + let _ = s.set_comment_policy(&header.post_id, &header.policy); + let _ = s.update_post_last_engagement(post_id, now_ms); + updated += 1; + } + } + } + } + + Ok(updated) } /// Do the initial exchange after connecting: N1/N2 node lists + profile + deletes + peer addresses (both directions). @@ -1181,7 +1467,7 @@ impl ConnectionManager { ) -> anyhow::Result<()> { // Build our payload let our_payload = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let n1 = storage.build_n1_share()?; let n2 = storage.build_n2_share()?; let profile = storage.get_profile(&self.our_node_id)?; @@ -1223,7 +1509,7 @@ impl ConnectionManager { let their_payload: InitialExchangePayload = read_payload(&mut recv, MAX_PAYLOAD).await?; // Process their data - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // Their N1 → our N2 (tagged to this reporter) // Filter out our own ID and already-connected peers (they'd waste candidate slots) @@ -1320,7 +1606,7 @@ impl ConnectionManager { // Build and send our payload let our_payload = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let n1 = storage.build_n1_share()?; let n2 = storage.build_n2_share()?; let profile = storage.get_profile(&self.our_node_id)?; @@ -1351,7 +1637,7 @@ impl ConnectionManager { send.finish()?; // Process their data - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // Their N1 → our N2 (filter out self + already-connected peers) let filtered_n1: Vec = their_payload.n1_node_ids.iter() @@ -1404,7 +1690,7 @@ impl ConnectionManager { let seq = self.diff_seq.fetch_add(1, Ordering::Relaxed) + 1; let (current_n1, current_n2) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let n1: HashSet = storage.build_n1_share()?.into_iter().collect(); let n2: HashSet = storage.build_n2_share()?.into_iter().collect(); (n1, n2) @@ -1462,7 +1748,7 @@ impl ConnectionManager { reporter: &NodeId, diff: NodeListUpdatePayload, ) -> anyhow::Result { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let mut count = 0; // Their N1 added → add to our N2 (filter self + already-connected) @@ -1546,7 +1832,7 @@ impl ConnectionManager { conn: Option<&iroh::endpoint::Connection>, ) -> anyhow::Result { let dominated = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // Already have this post? if storage.get_post(¬ification.post_id)?.is_some() { return Ok(false); @@ -1570,7 +1856,7 @@ impl ConnectionManager { }; let (our_follows, follows_sync) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; ( storage.list_follows()?, storage.get_follows_with_last_sync().unwrap_or_default(), @@ -1600,7 +1886,7 @@ impl ConnectionManager { // Brief lock 1: store posts { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for sp in &response.posts { if verify_post_id(&sp.id, &sp.post) && !storage.is_deleted(&sp.id)? { let _ = storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility); @@ -1616,7 +1902,7 @@ impl ConnectionManager { // Brief lock 2: upstream + last_sync + visibility updates { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for pid in &new_post_ids { let prio = storage.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0); let _ = storage.add_post_upstream(pid, from, prio); @@ -1658,7 +1944,7 @@ impl ConnectionManager { .ok_or_else(|| anyhow::anyhow!("not connected to {}", hex::encode(peer_id)))?; let (our_follows, follows_sync) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; ( storage.list_follows()?, storage.get_follows_with_last_sync().unwrap_or_default(), @@ -1692,7 +1978,7 @@ impl ConnectionManager { // Brief lock 1: store posts let mut synced_authors: HashSet = HashSet::new(); { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for sp in &response.posts { if storage.is_deleted(&sp.id)? { continue; @@ -1710,7 +1996,7 @@ impl ConnectionManager { // Brief lock 2: upstream + last_sync + visibility updates { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for pid in &new_post_ids { let prio = storage.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0); let _ = storage.add_post_upstream(pid, peer_id, prio); @@ -1763,7 +2049,7 @@ impl ConnectionManager { // Brief lock: gather only posts DUE for engagement check (tiered frequency) let post_headers: Vec<([u8; 32], u64)> = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let due_ids = storage.get_posts_due_for_engagement_check()?; due_ids .into_iter() @@ -1827,7 +2113,7 @@ impl ConnectionManager { // Single lock for ALL writes in this chunk if !results.is_empty() { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for (post_id, header_opt) in &results { let _ = storage.update_post_last_check(post_id, now_ms); if let Some((json, header)) = header_opt { @@ -1876,7 +2162,7 @@ impl ConnectionManager { // Phase 1: Brief lock — load data let (all_posts, group_members) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let posts = storage.list_posts_with_visibility()?; let members = storage.get_all_group_members().unwrap_or_default(); (posts, members) @@ -1924,7 +2210,7 @@ impl ConnectionManager { // Phase 3: Brief re-lock for is_deleted checks on filtered posts let (posts, vis_updates) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let posts_to_send: Vec = candidates_to_send.into_iter() .filter(|(id, _, _)| !storage.is_deleted(id).unwrap_or(false)) .map(|(id, post, visibility)| SyncPost { id, post, visibility }) @@ -1956,7 +2242,7 @@ impl ConnectionManager { // Check if target is directly connected to us if let Some(_pc) = self.connections.get(&req.target) { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let addr = storage.get_peer_record(&req.target)? .and_then(|r| r.addresses.first().map(|a| a.to_string())); let response = crate::protocol::AddressResponsePayload { @@ -1970,7 +2256,7 @@ impl ConnectionManager { return Ok(()); } - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // Check social routes (richer info) if let Some(route) = storage.get_social_route(&req.target)? { @@ -2022,14 +2308,14 @@ impl ConnectionManager { pub async fn resolve_address(&self, target: &NodeId) -> anyhow::Result> { // Check if target is directly connected if self.connections.contains_key(target) { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; return Ok(storage.get_peer_record(target)? .and_then(|r| r.addresses.first().map(|a| a.to_string()))); } // Check social routes { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; if let Some(route) = storage.get_social_route(target)? { if route.status == SocialStatus::Online { if let Some(addr) = route.addresses.first() { @@ -2041,7 +2327,7 @@ impl ConnectionManager { // N2 lookup: ask tagged reporter for address let n2_reporters = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.find_in_n2(target)? }; for reporter in &n2_reporters { @@ -2064,7 +2350,7 @@ impl ConnectionManager { // N3 lookup: ask tagged reporter (chains one more hop) let n3_reporters = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.find_in_n3(target)? }; for reporter in &n3_reporters { @@ -2092,7 +2378,7 @@ impl ConnectionManager { /// Checks: peers table → social route cache. pub async fn resolve_peer_addr_local(&self, peer_id: &NodeId) -> Option { let endpoint_id = iroh::EndpointId::from_bytes(peer_id).ok()?; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // 1. Peers table if let Ok(Some(rec)) = storage.get_peer_record(peer_id) { @@ -2118,7 +2404,7 @@ impl ConnectionManager { if candidates.is_empty() { return None; } - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for nid in &candidates { if let Ok(Some(rec)) = storage.get_peer_record(nid) { if let Some(addr) = rec.addresses.first() { @@ -2144,7 +2430,7 @@ impl ConnectionManager { ) -> anyhow::Result> { // Gather needle_peers: target's recent_peers from stored profile (up to 10) let needle_peers: Vec = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let mut rp = storage.get_recent_peers(target)?; rp.truncate(10); rp @@ -2181,7 +2467,7 @@ impl ConnectionManager { pub async fn initiate_worm_lookup(&self, target: &NodeId) -> anyhow::Result> { // Check cooldown { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; if storage.is_worm_cooldown(target, WORM_COOLDOWN_MS)? { debug!(target = hex::encode(target), "Worm lookup on cooldown"); return Ok(None); @@ -2190,7 +2476,7 @@ impl ConnectionManager { // Gather needle_peers: target's recent_peers from stored profile (up to 10) let needle_peers: Vec = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let mut rp = storage.get_recent_peers(target)?; rp.truncate(10); rp @@ -2214,19 +2500,19 @@ impl ConnectionManager { match result { Ok(Ok(Some(wr))) => Ok(Some(wr)), Ok(Ok(None)) => { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.record_worm_miss(target); Ok(None) } Ok(Err(e)) => { debug!(target = hex::encode(target), error = %e, "Worm lookup failed"); - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.record_worm_miss(target); Err(e) } Err(_) => { debug!(target = hex::encode(target), "Worm lookup timed out"); - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.record_worm_miss(target); Ok(None) } @@ -2249,7 +2535,7 @@ impl ConnectionManager { // Check direct connections first for needle in all_needles { if self.connections.contains_key(needle) { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let addr = storage.get_peer_record(needle)? .and_then(|r| r.addresses.first().map(|a| a.to_string())); drop(storage); @@ -2265,7 +2551,7 @@ impl ConnectionManager { } // Check N2/N3 - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let found_entries = storage.find_any_in_n2_n3(all_needles)?; if let Some((found_id, _reporter, _level)) = found_entries.first() { drop(storage); @@ -2651,13 +2937,24 @@ impl ConnectionManager { .retain(|_, ts| now - *ts < WORM_DEDUP_EXPIRY_MS); } + self.handle_worm_query_after_dedup(payload, send, from_peer).await + } + + /// Worm query handler after dedup check — can be called from spawned task. + pub async fn handle_worm_query_after_dedup( + &mut self, + payload: WormQueryPayload, + mut send: iroh::endpoint::SendStream, + from_peer: NodeId, + ) -> anyhow::Result<()> { + // Check for post/blob content locally (CDN tree, replicas, blob store) let mut post_holder: Option = None; let mut blob_holder: Option = None; if let Some(ref post_id) = payload.post_id { let found = { - let store = self.storage.lock().await; + let store = self.storage.get().await; // Direct: do we have this post? if store.get_post_with_visibility(post_id).ok().flatten().is_some() { Some(self.our_node_id) @@ -2680,7 +2977,7 @@ impl ConnectionManager { blob_holder = Some(self.our_node_id); } else { // Check CDN: do we know who has it via blob post ownership? - let store = self.storage.lock().await; + let store = self.storage.get().await; if let Ok(Some(pid)) = store.get_blob_post_id(blob_id) { let downstream = store.get_post_downstream(&pid).unwrap_or_default(); if !downstream.is_empty() { @@ -2700,7 +2997,7 @@ impl ConnectionManager { let mut found = None; for needle in &all_needles { if self.connections.contains_key(needle) { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let addr = storage.get_peer_record(needle)? .and_then(|r| r.addresses.first().map(|a| a.to_string())); found = Some((*needle, addr.into_iter().collect::>(), 0u64)); @@ -2708,7 +3005,7 @@ impl ConnectionManager { } } if found.is_none() { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let entries = storage.find_any_in_n2_n3(&all_needles)?; if let Some((found_id, _reporter, _level)) = entries.first() { drop(storage); @@ -2856,7 +3153,7 @@ impl ConnectionManager { }; // Try each candidate until we find one with a known address - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for candidate in ordered { let nid = candidate.0; if let Ok(Some(rec)) = storage.get_peer_record(&nid) { @@ -2878,7 +3175,7 @@ impl ConnectionManager { // Mark disconnected in referral list (anchor-side) self.mark_referral_disconnected(peer_id); - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // Remove their N2 contributions (their N1 share → our N2) let _ = storage.clear_peer_n2(peer_id); // Remove their N3 contributions (their N2 share → our N3) @@ -2961,8 +3258,9 @@ impl ConnectionManager { } /// Rebalance connection slots: remove dead connections, prune stale N2/N3 entries. - /// Returns list of newly connected peer IDs (caller should spawn run_mesh_streams for them). - pub async fn rebalance_slots(&mut self) -> anyhow::Result> { + /// Returns (newly_connected, pending_connects). Caller should QUIC-connect the pending + /// list outside the lock, then register them. + pub async fn rebalance_slots(&mut self) -> anyhow::Result<(Vec, Vec<(NodeId, iroh::EndpointAddr, String, PeerSlotKind)>)> { self.log_activity(ActivityLevel::Info, ActivityCategory::Rebalance, "Rebalance started".into(), None); // 1. Remove dead + zombie connections @@ -2987,7 +3285,7 @@ impl ConnectionManager { // 2. Prune stale N2/N3 entries (5 hours) + stale watchers (30 days) { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let pruned = storage.prune_n2_n3(5 * 60 * 60 * 1000)?; if pruned > 0 { info!(pruned, "Pruned stale N2/N3 entries"); @@ -2997,7 +3295,7 @@ impl ConnectionManager { // 3. Diversity scoring: find low-diversity peers for potential eviction { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let connected: Vec = self.connections.keys().copied().collect(); let mut zero_diversity = Vec::new(); for peer_id in &connected { @@ -3011,12 +3309,13 @@ impl ConnectionManager { } } - let mut newly_connected: Vec = Vec::new(); + let newly_connected: Vec = Vec::new(); + let mut pending_connects: Vec<(NodeId, iroh::EndpointAddr, String, PeerSlotKind)> = Vec::new(); // Priority 0 (NEW): Reconnect preferred peers { let preferred_peers = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.list_preferred_peers().unwrap_or_default() }; @@ -3029,7 +3328,7 @@ impl ConnectionManager { if let Some(&failed_at) = self.unreachable_peers.get(peer_id) { if now.saturating_sub(failed_at) > PREFERRED_UNREACHABLE_PRUNE_MS { info!(peer = hex::encode(peer_id), "Removing preferred peer unreachable for 7 days+"); - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.remove_preferred_peer(peer_id); continue; } @@ -3053,10 +3352,28 @@ impl ConnectionManager { } } - if let Err(e) = self.reconnect_preferred(peer_id).await { - debug!(peer = hex::encode(peer_id), error = %e, "Preferred reconnection failed"); - } else if self.connections.contains_key(peer_id) { - newly_connected.push(*peer_id); + // Collect for connection outside the lock + let addr_str = if !self.is_likely_unreachable(peer_id) { + let storage = self.storage.get().await; + let addr = storage.get_peer_record(peer_id).ok().flatten() + .and_then(|r| r.addresses.first().map(|a| a.to_string())) + .or_else(|| { + storage.get_social_route(peer_id).ok().flatten() + .and_then(|r| r.addresses.first().map(|a| a.to_string())) + }); + drop(storage); + addr + } else { + None + }; + if let Some(addr_s) = addr_str { + if let Ok(eid) = iroh::EndpointId::from_bytes(peer_id) { + let mut addr = iroh::EndpointAddr::from(eid); + if let Ok(sock) = addr_s.parse::() { + addr = addr.with_ip_addr(sock); + } + pending_connects.push((*peer_id, addr, addr_s, PeerSlotKind::Preferred)); + } } } } @@ -3065,7 +3382,7 @@ impl ConnectionManager { let local_count = self.count_kind(PeerSlotKind::Local); if local_count < self.local_slots { let candidates: Vec<(NodeId, Option)> = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let mut cands = Vec::new(); // Priority 1: reconnect recently-dead non-preferred peers @@ -3112,15 +3429,8 @@ impl ConnectionManager { if let Ok(sock) = addr_s.parse::() { addr = addr.with_ip_addr(sock); } - match self.connect_to(peer_id, addr, PeerSlotKind::Local).await { - Ok(()) => { - info!(peer = hex::encode(peer_id), "Auto-connected to diverse peer"); - newly_connected.push(peer_id); - } - Err(e) => { - debug!(peer = hex::encode(peer_id), error = %e, "Auto-connect failed"); - } - } + // Collect for connection outside the lock + pending_connects.push((peer_id, addr, addr_s, PeerSlotKind::Local)); } } } @@ -3150,12 +3460,12 @@ impl ConnectionManager { // Backstop: signal growth loop to fill any remaining local slots self.notify_growth(); - Ok(newly_connected) + Ok((newly_connected, pending_connects)) } /// Find the lowest-diversity non-preferred peer to evict. async fn find_non_preferred_eviction_candidate(&self) -> Option { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let mut worst: Option<(NodeId, usize)> = None; for (peer_id, mc) in &self.connections { if mc.slot_kind == PeerSlotKind::Preferred { @@ -3232,7 +3542,7 @@ impl ConnectionManager { if response.accepted { // Persist agreement - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.add_preferred_peer(peer_id)?; storage.add_mesh_peer(peer_id, PeerSlotKind::Preferred, 100)?; drop(storage); @@ -3274,7 +3584,7 @@ impl ConnectionManager { let response = if can_accept { // Persist agreement - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.add_preferred_peer(&from_peer)?; storage.add_mesh_peer(&from_peer, PeerSlotKind::Preferred, 100)?; drop(storage); @@ -3310,95 +3620,9 @@ impl ConnectionManager { Ok(()) } - /// Reconnect a preferred peer via relay introduction if direct connect fails. - pub async fn reconnect_preferred(&mut self, peer_id: &NodeId) -> anyhow::Result<()> { - if self.connections.contains_key(peer_id) { - return Ok(()); // Already connected - } - - // Try direct connect first (from peers table or social route) - let addr_str = if !self.is_likely_unreachable(peer_id) { - let storage = self.storage.lock().await; - let addr = storage.get_peer_record(peer_id)? - .and_then(|r| r.addresses.first().map(|a| a.to_string())) - .or_else(|| { - storage.get_social_route(peer_id).ok().flatten() - .and_then(|r| r.addresses.first().map(|a| a.to_string())) - }); - drop(storage); - addr - } else { - None - }; - - if let Some(addr_s) = addr_str { - let endpoint_id = iroh::EndpointId::from_bytes(peer_id)?; - let mut addr = iroh::EndpointAddr::from(endpoint_id); - if let Ok(sock) = addr_s.parse::() { - addr = addr.with_ip_addr(sock); - } - match tokio::time::timeout( - std::time::Duration::from_millis(HOLE_PUNCH_TIMEOUT_MS), - self.connect_to(*peer_id, addr, PeerSlotKind::Preferred), - ).await { - Ok(Ok(())) => { - self.mark_reachable(peer_id); - info!(peer = hex::encode(peer_id), "Preferred peer reconnected directly"); - return Ok(()); - } - Ok(Err(e)) => { - debug!(peer = hex::encode(peer_id), error = %e, "Direct reconnect failed, trying relay"); - self.mark_unreachable(peer_id); - } - Err(_) => { - debug!(peer = hex::encode(peer_id), "Direct reconnect timed out, trying relay"); - self.mark_unreachable(peer_id); - } - } - } - - // Try relay introduction (with timeout to avoid holding lock forever) - let relays = self.find_relays_for(peer_id).await; - for (relay_peer, ttl) in relays { - let introduce_result = match tokio::time::timeout( - std::time::Duration::from_millis(RELAY_INTRO_TIMEOUT_MS), - self.send_relay_introduce(&relay_peer, peer_id, ttl), - ).await { - Ok(r) => r, - Err(_) => { - debug!(relay = hex::encode(relay_peer), "Relay introduce timed out"); - continue; - } - }; - match introduce_result { - Ok(result) if result.accepted => { - let our_profile = self.our_nat_profile(); - let peer_profile = { - let s = self.storage.lock().await; - s.get_peer_nat_profile(peer_id) - }; - if let Some(conn) = hole_punch_with_scanning(&self.endpoint, peer_id, &result.target_addresses, our_profile, peer_profile).await { - // Register as preferred mesh peer - self.register_connection(*peer_id, conn, &[], PeerSlotKind::Preferred).await; - self.mark_reachable(peer_id); - info!( - peer = hex::encode(peer_id), - relay = hex::encode(relay_peer), - "Preferred peer reconnected via relay hole punch" - ); - return Ok(()); - } - } - Ok(_) => {} // Not accepted, try next relay - Err(e) => { - debug!(relay = hex::encode(relay_peer), error = %e, "Relay introduce failed"); - } - } - } - - debug!(peer = hex::encode(peer_id), "Could not reconnect preferred peer"); - Ok(()) - } + // reconnect_preferred removed — direct connect now happens outside the lock + // via pending_connects in the actor dispatch. Relay fallback for preferred peers + // is handled by the growth loop's normal relay introduction path. // ---- Session connection management ---- @@ -3474,7 +3698,7 @@ impl ConnectionManager { // Client side: known anchors we're session-connected to (mesh was full) { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for nid in self.sessions.keys() { if storage.is_peer_anchor(nid).unwrap_or(false) && !self.connections.contains_key(nid) { keep_alive.insert(*nid); @@ -3545,7 +3769,7 @@ impl ConnectionManager { } /// Get a clone of the storage Arc (for standalone exchange functions). - pub fn storage_ref(&self) -> Arc> { + pub fn storage_ref(&self) -> Arc { Arc::clone(&self.storage) } @@ -3572,7 +3796,7 @@ impl ConnectionManager { /// Call after a successful hole punch to refine filtering classification. /// `used_scanning` = true if scanning was required (standard punch failed first). pub async fn infer_nat_filtering(&self, node_id: &NodeId, used_scanning: bool) { - let s = self.storage.lock().await; + let s = self.storage.get().await; let mut profile = s.get_peer_nat_profile(node_id); if profile.mapping == crate::types::NatMapping::EndpointDependent { if used_scanning { @@ -3609,7 +3833,7 @@ impl ConnectionManager { /// knows target directly (N2), ttl=1 means relay chains through their peer (N3). pub async fn find_relays_for(&self, target: &NodeId) -> Vec<(NodeId, u8)> { let mut candidates = Vec::new(); - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // Step 1 (NEW): Check target's preferred_tree from social_routes (~100 NodeIds) // Intersect with our connections → TTL=0 candidates (they know target or are stably nearby) @@ -3813,7 +4037,7 @@ impl ConnectionManager { let our_http_addr = self.http_addr.clone(); let our_nat_profile = self.our_nat_profile(); let peer_nat_profile = { - let s = self.storage.lock().await; + let s = self.storage.get().await; s.get_peer_nat_profile(&requester) }; tokio::spawn(async move { @@ -4016,7 +4240,7 @@ impl ConnectionManager { if payload.ttl > 0 { // Find an N2 reporter for the target and forward with ttl-1 let reporters = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.find_in_n2(&payload.target).unwrap_or_default() }; @@ -4356,7 +4580,7 @@ impl ConnectionManager { // Prefer observed remote address (NAT-mapped public IP) over self-reported let addresses = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let observed = storage.get_peer_record(&payload.node_id) .ok().flatten() .map(|r| r.addresses).unwrap_or_default(); @@ -4689,7 +4913,7 @@ impl ConnectionManager { MessageType::ProfileUpdate => { let payload: ProfileUpdatePayload = read_payload(recv, MAX_PAYLOAD).await?; let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; for profile in payload.profiles { let _ = storage.store_profile(&profile); } @@ -4702,7 +4926,7 @@ impl ConnectionManager { // Collect blob CIDs + CDN peers before async work let mut blob_cleanup: Vec<([u8; 32], Vec<(NodeId, Vec)>, Option<(NodeId, Vec)>)> = Vec::new(); { - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; for dr in &payload.records { if crypto::verify_delete_signature(&dr.author, &dr.post_id, &dr.signature) { // Collect blobs for CDN cleanup before deleting @@ -4764,7 +4988,7 @@ impl ConnectionManager { let payload: crate::protocol::VisibilityUpdatePayload = read_payload(recv, MAX_PAYLOAD).await?; let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; for vu in payload.updates { if let Some(post) = storage.get_post(&vu.post_id)? { if post.author == vu.author { @@ -4798,7 +5022,7 @@ impl ConnectionManager { MessageType::PostPush => { let push: PostPushPayload = read_payload(recv, MAX_PAYLOAD).await?; let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; if !storage.is_deleted(&push.post.id)? && storage.get_post(&push.post.id)?.is_none() && crate::content::verify_post_id(&push.post.id, &push.post.post) @@ -4825,7 +5049,7 @@ impl ConnectionManager { "Received audience request" ); let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; // Store as inbound pending request let _ = storage.store_audience( &req.requester, @@ -4843,7 +5067,7 @@ impl ConnectionManager { "Received audience response" ); let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let new_status = if resp.approved { crate::types::AudienceStatus::Approved } else { @@ -4858,7 +5082,7 @@ impl ConnectionManager { MessageType::SocialAddressUpdate => { let payload: SocialAddressUpdatePayload = read_payload(recv, MAX_PAYLOAD).await?; let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; if storage.has_social_route(&payload.node_id).unwrap_or(false) { let addrs: Vec = payload.addresses.iter() .filter_map(|a| a.parse().ok()).collect(); @@ -4878,7 +5102,7 @@ impl ConnectionManager { MessageType::ManifestPush => { let payload: crate::protocol::ManifestPushPayload = read_payload(recv, MAX_PAYLOAD).await?; let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let mut stored_entries: Vec = Vec::new(); for entry in &payload.manifests { if !crate::crypto::verify_manifest_signature(&entry.manifest.author_manifest) { @@ -5008,7 +5232,7 @@ impl ConnectionManager { // Brief re-acquire for storage writes only let stored = { let cm = cm_arc.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; if storage.store_post_with_visibility(&sync_post.id, &sync_post.post, &sync_post.visibility).unwrap_or(false) { let prio = storage.get_post_upstreams(&sync_post.id).map(|v| v.len() as u8).unwrap_or(0); let _ = storage.add_post_upstream(&sync_post.id, &sender_id, prio); @@ -5048,7 +5272,7 @@ impl ConnectionManager { MessageType::SocialDisconnectNotice => { let payload: SocialDisconnectNoticePayload = read_payload(recv, MAX_PAYLOAD).await?; let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; if storage.has_social_route(&payload.node_id).unwrap_or(false) { let _ = storage.set_social_route_status(&payload.node_id, SocialStatus::Disconnected); } @@ -5062,7 +5286,7 @@ impl ConnectionManager { let payload: crate::protocol::BlobDeleteNoticePayload = read_payload(recv, MAX_PAYLOAD).await?; let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let cid = payload.cid; // Check if sender was our upstream for this blob @@ -5102,7 +5326,7 @@ impl ConnectionManager { if payload.admin != remote_node_id { warn!(peer = hex::encode(remote_node_id), "GroupKeyDistribute from non-admin, ignoring"); } else { - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let record = crate::types::GroupKeyRecord { group_id: payload.group_id, circle_name: payload.circle_name.clone(), @@ -5143,7 +5367,7 @@ impl ConnectionManager { let cm = conn_mgr.lock().await; // Try to decrypt if we have the group seed - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let decrypted = storage .get_group_seed(&payload.group_id, payload.epoch) .ok() @@ -5233,7 +5457,7 @@ impl ConnectionManager { MessageType::PostDownstreamRegister => { let payload: PostDownstreamRegisterPayload = read_payload(recv, MAX_PAYLOAD).await?; let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let _ = storage.add_post_downstream(&payload.post_id, &remote_node_id); drop(storage); trace!( @@ -5290,7 +5514,7 @@ impl ConnectionManager { let payload: SocialCheckinPayload = read_payload(&mut recv, MAX_PAYLOAD).await?; let reply = { let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; // Update their social route if storage.has_social_route(&payload.node_id).unwrap_or(false) { let addrs: Vec = payload.addresses.iter() @@ -5323,45 +5547,64 @@ impl ConnectionManager { ttl = payload.ttl, "Received worm query" ); - let mut cm = conn_mgr.lock().await; - cm.handle_worm_query(payload, send, remote_node_id).await?; + // Brief lock: dedup check only. Then spawn the heavy work. + let should_process = { + let mut cm = conn_mgr.lock().await; + let now = now_ms(); + if let Some(&seen_at) = cm.seen_worms.get(&payload.worm_id) { + if now - seen_at < 30_000 { false } else { cm.seen_worms.insert(payload.worm_id, now); true } + } else { + cm.seen_worms.insert(payload.worm_id, now); + true + } + }; + if should_process { + // Snapshot everything under brief lock, then do all I/O outside + let ctx = ConnectionActor::snapshot_worm_context(conn_mgr).await; + let blob_store = { + let cm = conn_mgr.lock().await; + Arc::clone(&cm.blob_store) + }; + // Also snapshot wide-referral candidates (node_id, slot_kind) + let wide_candidates: Vec<(NodeId, PeerSlotKind)> = { + let cm = conn_mgr.lock().await; + cm.connections.iter() + .filter(|(nid, _)| **nid != remote_node_id && **nid != cm.our_node_id) + .map(|(nid, pc)| (*nid, pc.slot_kind)) + .collect() + }; + tokio::spawn(async move { + if let Err(e) = ConnectionActor::handle_worm_query_unlocked(ctx, blob_store, wide_candidates, payload, send, remote_node_id).await { + debug!(error = %e, "Worm query handler failed"); + } + }); + } else { + // Already seen — send empty response + let resp = WormResponsePayload { worm_id: payload.worm_id, found: false, found_id: None, addresses: vec![], reporter: None, hop: None, wide_referral: None, post_holder: None, blob_holder: None }; + write_typed_message(&mut send, MessageType::WormResponse, &resp).await?; + send.finish()?; + } } MessageType::PostFetchRequest => { let payload: crate::protocol::PostFetchRequestPayload = read_payload(&mut recv, 4096).await?; - debug!( - peer = hex::encode(remote_node_id), - post = hex::encode(payload.post_id), - "Received PostFetch request" - ); - let cm = conn_mgr.lock().await; + debug!(peer = hex::encode(remote_node_id), post = hex::encode(payload.post_id), "Received PostFetch request"); + // Brief lock: get storage Arc, then query + respond without conn_mgr lock + let storage = { + let cm = conn_mgr.lock().await; + Arc::clone(&cm.storage) + }; let result = { - let store = cm.storage.lock().await; + let store = storage.get().await; store.get_post_with_visibility(&payload.post_id).ok().flatten() }; let resp = if let Some((post, visibility)) = result { if matches!(visibility, PostVisibility::Public) { - crate::protocol::PostFetchResponsePayload { - post_id: payload.post_id, - found: true, - post: Some(SyncPost { - id: payload.post_id, - post, - visibility, - }), - } + crate::protocol::PostFetchResponsePayload { post_id: payload.post_id, found: true, post: Some(SyncPost { id: payload.post_id, post, visibility }) } } else { - crate::protocol::PostFetchResponsePayload { - post_id: payload.post_id, - found: false, - post: None, - } + crate::protocol::PostFetchResponsePayload { post_id: payload.post_id, found: false, post: None } } } else { - crate::protocol::PostFetchResponsePayload { - post_id: payload.post_id, - found: false, - post: None, - } + crate::protocol::PostFetchResponsePayload { post_id: payload.post_id, found: false, post: None } }; write_typed_message(&mut send, MessageType::PostFetchResponse, &resp).await?; send.finish()?; @@ -5374,20 +5617,18 @@ impl ConnectionManager { post = hex::encode(payload.post_id), "Received TcpPunch request" ); - // Validate: we hold this post and it's public - let (valid, http_port, http_addr) = { + // Brief lock: extract what we need, then validate + punch outside + let (storage, endpoint_port, http_capable, http_addr) = { let cm = conn_mgr.lock().await; - let has_post = { - let store = cm.storage.lock().await; - store.get_post_with_visibility(&payload.post_id) - .ok().flatten() - .map(|(_, v)| matches!(v, PostVisibility::Public)) - .unwrap_or(false) - }; - let port = cm.endpoint.bound_sockets().first() - .map(|s| s.port()).unwrap_or(0); - (has_post && cm.http_capable, port, cm.http_addr.clone()) + let port = cm.endpoint.bound_sockets().first().map(|s| s.port()).unwrap_or(0); + (Arc::clone(&cm.storage), port, cm.http_capable, cm.http_addr.clone()) }; + let has_post = { + let store = storage.get().await; + store.get_post_with_visibility(&payload.post_id).ok().flatten() + .map(|(_, v)| matches!(v, PostVisibility::Public)).unwrap_or(false) + }; + let (valid, http_port) = (has_post && http_capable, endpoint_port); let resp = if valid { // Parse browser IP and execute TCP punch if let Ok(browser_ip) = payload.browser_ip.parse::() { @@ -5407,141 +5648,67 @@ impl ConnectionManager { } MessageType::BlobRequest => { let payload: BlobRequestPayload = read_payload(&mut recv, 4096).await?; - let cm = conn_mgr.lock().await; - let data = cm.blob_store.get(&payload.cid)?; + // Extract Arcs under brief lock — no conn_mgr lock needed for blob serving + let (blob_store, storage, our_node_id) = { + let cm = conn_mgr.lock().await; + (Arc::clone(&cm.blob_store), Arc::clone(&cm.storage), cm.our_node_id) + }; + // All I/O outside the lock + let data = blob_store.get(&payload.cid)?; let response = match data { Some(bytes) => { - // Check delivery budget before serving - if !cm.blob_store.consume_delivery_budget(bytes.len() as u64) { - debug!( - peer = hex::encode(remote_node_id), - cid = hex::encode(payload.cid), - blob_size = bytes.len(), - "Delivery budget exhausted, declining blob request" - ); - BlobResponsePayload { - cid: payload.cid, - found: false, - data_b64: String::new(), - manifest: None, - cdn_registered: false, - cdn_redirect_peers: vec![], - } + if !blob_store.consume_delivery_budget(bytes.len() as u64) { + debug!(peer = hex::encode(remote_node_id), cid = hex::encode(payload.cid), "Delivery budget exhausted"); + BlobResponsePayload { cid: payload.cid, found: false, data_b64: String::new(), manifest: None, cdn_registered: false, cdn_redirect_peers: vec![] } } else { - use base64::Engine; - - // Load manifest if available, wrap in CdnManifest - let storage = cm.storage.lock().await; - let manifest: Option = storage - .get_cdn_manifest(&payload.cid) - .ok() - .flatten() - .and_then(|json| { - // Try as AuthorManifest first (author-side), then as CdnManifest (relay) + use base64::Engine; + let storage = storage.get().await; + let manifest: Option = storage.get_cdn_manifest(&payload.cid).ok().flatten().and_then(|json| { if let Ok(am) = serde_json::from_str::(&json) { let ds_count = storage.get_blob_downstream_count(&payload.cid).unwrap_or(0); - Some(crate::types::CdnManifest { - author_manifest: am, - host: cm.our_node_id, - host_addresses: vec![], // Filled by caller if needed - source: cm.our_node_id, - source_addresses: vec![], - downstream_count: ds_count, - }) - } else { - // Already a CdnManifest (from a relay/fetch) - serde_json::from_str(&json).ok() - } + Some(crate::types::CdnManifest { author_manifest: am, host: our_node_id, host_addresses: vec![], source: our_node_id, source_addresses: vec![], downstream_count: ds_count }) + } else { serde_json::from_str(&json).ok() } }); - - // Try to register requester as downstream - let (cdn_registered, cdn_redirect_peers) = if !payload.requester_addresses.is_empty() { - let ok = storage.add_blob_downstream( - &payload.cid, - &remote_node_id, - &payload.requester_addresses, - ).unwrap_or(false); - if ok { - (true, vec![]) - } else { - // Full — provide downstream list as redirect candidates - let downstream = storage.get_blob_downstream(&payload.cid).unwrap_or_default(); - let redirects: Vec = downstream.into_iter() - .map(|(nid, addrs)| PeerWithAddress { - n: hex::encode(nid), - a: addrs, - }) - .collect(); - (false, redirects) - } - } else { - (false, vec![]) - }; - drop(storage); - - BlobResponsePayload { - cid: payload.cid, - found: true, - data_b64: base64::engine::general_purpose::STANDARD.encode(&bytes), - manifest, - cdn_registered, - cdn_redirect_peers, + let (cdn_registered, cdn_redirect_peers) = if !payload.requester_addresses.is_empty() { + let ok = storage.add_blob_downstream(&payload.cid, &remote_node_id, &payload.requester_addresses).unwrap_or(false); + if ok { (true, vec![]) } else { + let downstream = storage.get_blob_downstream(&payload.cid).unwrap_or_default(); + let redirects: Vec = downstream.into_iter().map(|(nid, addrs)| PeerWithAddress { n: hex::encode(nid), a: addrs }).collect(); + (false, redirects) + } + } else { (false, vec![]) }; + drop(storage); + BlobResponsePayload { cid: payload.cid, found: true, data_b64: base64::engine::general_purpose::STANDARD.encode(&bytes), manifest, cdn_registered, cdn_redirect_peers } } - } // end delivery budget else } - None => BlobResponsePayload { - cid: payload.cid, - found: false, - data_b64: String::new(), - manifest: None, - cdn_registered: false, - cdn_redirect_peers: vec![], - }, + None => BlobResponsePayload { cid: payload.cid, found: false, data_b64: String::new(), manifest: None, cdn_registered: false, cdn_redirect_peers: vec![] }, }; - drop(cm); - // 15MB limit for base64 overhead on 10MB blobs + manifest write_typed_message(&mut send, MessageType::BlobResponse, &response).await?; send.finish()?; debug!(peer = hex::encode(remote_node_id), found = response.found, cdn_reg = response.cdn_registered, "Handled blob request"); } MessageType::ManifestRefreshRequest => { let payload: crate::protocol::ManifestRefreshRequestPayload = read_payload(&mut recv, 1024).await?; - let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; - let response = match storage.get_cdn_manifest(&payload.cid).ok().flatten() { - Some(json) => { - // Build CdnManifest from stored AuthorManifest - let manifest = if let Ok(am) = serde_json::from_str::(&json) { - if am.updated_at > payload.current_updated_at { - let ds_count = storage.get_blob_downstream_count(&payload.cid).unwrap_or(0); - Some(crate::types::CdnManifest { - author_manifest: am, - host: cm.our_node_id, - host_addresses: vec![], - source: cm.our_node_id, - source_addresses: vec![], - downstream_count: ds_count, - }) - } else { - None - } - } else { - None - }; - crate::protocol::ManifestRefreshResponsePayload { - cid: payload.cid, - updated: manifest.is_some(), - manifest, - } - } - None => crate::protocol::ManifestRefreshResponsePayload { - cid: payload.cid, - updated: false, - manifest: None, - }, + // Brief lock: get storage Arc + our_node_id + let (storage, our_node_id) = { + let cm = conn_mgr.lock().await; + (Arc::clone(&cm.storage), cm.our_node_id) + }; + let response = { + let store = storage.get().await; + match store.get_cdn_manifest(&payload.cid).ok().flatten() { + Some(json) => { + let manifest = if let Ok(am) = serde_json::from_str::(&json) { + if am.updated_at > payload.current_updated_at { + let ds_count = store.get_blob_downstream_count(&payload.cid).unwrap_or(0); + Some(crate::types::CdnManifest { author_manifest: am, host: our_node_id, host_addresses: vec![], source: our_node_id, source_addresses: vec![], downstream_count: ds_count }) + } else { None } + } else { None }; + crate::protocol::ManifestRefreshResponsePayload { cid: payload.cid, updated: manifest.is_some(), manifest } + } + None => crate::protocol::ManifestRefreshResponsePayload { cid: payload.cid, updated: false, manifest: None }, + } }; - drop(storage); - drop(cm); write_typed_message(&mut send, MessageType::ManifestRefreshResponse, &response).await?; send.finish()?; debug!(peer = hex::encode(remote_node_id), updated = response.updated, "Handled manifest refresh request"); @@ -5549,7 +5716,7 @@ impl ConnectionManager { MessageType::GroupKeyRequest => { let payload: GroupKeyRequestPayload = read_payload(&mut recv, 4096).await?; let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let response = match storage.get_group_key(&payload.group_id)? { Some(record) if record.admin == cm.our_node_id => { @@ -5615,9 +5782,165 @@ impl ConnectionManager { ttl = payload.ttl, "Received relay introduce" ); + // Brief lock: dedup + gather all data needed for forwarding + let relay_data = { + let mut cm = conn_mgr.lock().await; + let now = now_ms(); + // Dedup check + if let Some(&seen_at) = cm.seen_intros.get(&payload.intro_id) { + if now - seen_at < RELAY_INTRO_DEDUP_EXPIRY_MS { + let result = RelayIntroduceResultPayload { + intro_id: payload.intro_id, accepted: false, target_addresses: vec![], + relay_available: false, reject_reason: Some("duplicate intro".to_string()), + }; + write_typed_message(&mut send, MessageType::RelayIntroduceResult, &result).await?; + send.finish()?; + return Ok(()); + } + } + cm.seen_intros.insert(payload.intro_id, now); + + // Are we the target? + if payload.target == cm.our_node_id { + // Gather our addresses, then handle outside lock + let mut our_addrs: Vec = cm.endpoint.addr().ip_addrs() + .filter(|s| crate::network::is_publicly_routable(s)).map(|s| s.to_string()).collect(); + if let Some(ref ext) = cm.upnp_external_addr { + let ext_str = ext.to_string(); + if !our_addrs.contains(&ext_str) { our_addrs.insert(0, ext_str); } + } + let endpoint = cm.endpoint.clone(); + let storage = Arc::clone(&cm.storage); + let our_node_id = cm.our_node_id; + let our_nat_type = cm.nat_type; + let our_http_capable = cm.http_capable; + let our_http_addr = cm.http_addr.clone(); + let our_nat_profile = cm.our_nat_profile(); + let peer_nat_profile = { let s = cm.storage.get().await; s.get_peer_nat_profile(&payload.requester) }; + Some(RelayGathered::WeAreTarget { our_addrs, endpoint, storage, our_node_id, our_nat_type, our_http_capable, our_http_addr, our_nat_profile, peer_nat_profile }) + } else { + // We are relay — gather target connection, requester observed addr, etc. + let target_conn = cm.connections.get(&payload.target).map(|pc| (pc.connection.clone(), pc.remote_addr)) + .or_else(|| cm.sessions.get(&payload.target).map(|sc| (sc.connection.clone(), sc.remote_addr))); + let requester_observed = cm.connections.get(&remote_node_id).and_then(|pc| pc.remote_addr) + .or_else(|| cm.sessions.get(&remote_node_id).and_then(|s| s.remote_addr)); + let relay_available = cm.can_accept_relay_pipe(); + let activity_log = Arc::clone(&cm.activity_log); + // TTL chain: N2 reporters + their connections + let ttl_reporters = if target_conn.is_none() && payload.ttl > 0 { + let storage = cm.storage.get().await; + let reporters = storage.find_in_n2(&payload.target).unwrap_or_default(); + drop(storage); + reporters.into_iter() + .filter(|r| *r != remote_node_id && *r != cm.our_node_id) + .filter_map(|r| cm.connections.get(&r).map(|pc| (r, pc.connection.clone()))) + .collect::>() + } else { vec![] }; + Some(RelayGathered::WeAreRelay { target_conn, requester_observed, relay_available, activity_log, ttl_reporters }) + } + }; + // Lock DROPPED — all forwarding I/O happens without conn_mgr lock let cm_arc = Arc::clone(conn_mgr); - let mut cm = conn_mgr.lock().await; - cm.handle_relay_introduce(payload, send, remote_node_id, cm_arc).await?; + match relay_data { + Some(RelayGathered::WeAreTarget { our_addrs, endpoint, storage, our_node_id, our_nat_type, our_http_capable, our_http_addr, our_nat_profile, peer_nat_profile }) => { + let result = RelayIntroduceResultPayload { + intro_id: payload.intro_id, accepted: true, target_addresses: our_addrs, + relay_available: false, reject_reason: None, + }; + write_typed_message(&mut send, MessageType::RelayIntroduceResult, &result).await?; + send.finish()?; + let routable_addrs: Vec = payload.requester_addresses.iter() + .filter(|a| a.parse::().map_or(false, |s| crate::network::is_publicly_routable(&s))) + .cloned().collect(); + let requester = payload.requester; + tokio::spawn(async move { + if let Some(conn) = hole_punch_with_scanning(&endpoint, &requester, &routable_addrs, our_nat_profile, peer_nat_profile).await { + let remote_sock = routable_addrs.iter().filter_map(|a| a.parse::().ok()).find(|s| crate::network::is_publicly_routable(s)); + let mut cm = cm_arc.lock().await; + if cm.is_connected(&requester) { return; } + cm.add_session(requester, conn, SessionReachMethod::HolePunch, remote_sock); + cm.mark_reachable(&requester); + cm.log_activity(ActivityLevel::Info, ActivityCategory::Relay, format!("Target-side hole punch succeeded to {}", &hex::encode(requester)[..8]), Some(requester)); + if let Some(session) = cm.sessions.get(&requester) { + let session_conn = session.connection.clone(); + drop(cm); + let _ = initial_exchange_connect(&storage, &our_node_id, &session_conn, requester, None, our_nat_type, our_http_capable, our_http_addr, None, None).await; + } + } + }); + } + Some(RelayGathered::WeAreRelay { target_conn, requester_observed, relay_available, activity_log, ttl_reporters }) => { + // Build forwarded payload with requester's observed address + let mut forwarded_payload = payload.clone(); + if let Some(addr) = requester_observed { + let addr_str = addr.to_string(); + if !forwarded_payload.requester_addresses.contains(&addr_str) { + forwarded_payload.requester_addresses.insert(0, addr_str); + } + } + // Try direct target first, then TTL chain + let forward_result = if let Some((target_conn, target_observed_addr)) = target_conn { + let result = async { + let (mut fwd_send, mut fwd_recv) = target_conn.open_bi().await?; + write_typed_message(&mut fwd_send, MessageType::RelayIntroduce, &forwarded_payload).await?; + fwd_send.finish()?; + let msg_type = read_message_type(&mut fwd_recv).await?; + if msg_type != MessageType::RelayIntroduceResult { anyhow::bail!("expected RelayIntroduceResult"); } + let mut result: RelayIntroduceResultPayload = read_payload(&mut fwd_recv, MAX_PAYLOAD).await?; + result.relay_available = relay_available; + if let Some(addr) = target_observed_addr { + let addr_str = addr.to_string(); + if !result.target_addresses.contains(&addr_str) { result.target_addresses.insert(0, addr_str); } + } + anyhow::Ok(result) + }.await; + Some(result) + } else if !ttl_reporters.is_empty() && payload.ttl > 0 { + // TTL chain: try forwarding to N2 reporters + let mut chain_forwarded = forwarded_payload.clone(); + chain_forwarded.ttl = payload.ttl - 1; + let mut chain_result = None; + for (_reporter_id, reporter_conn) in &ttl_reporters { + let result = async { + let (mut fwd_send, mut fwd_recv) = reporter_conn.open_bi().await?; + write_typed_message(&mut fwd_send, MessageType::RelayIntroduce, &chain_forwarded).await?; + fwd_send.finish()?; + let msg_type = read_message_type(&mut fwd_recv).await?; + if msg_type != MessageType::RelayIntroduceResult { anyhow::bail!("expected RelayIntroduceResult from chain"); } + let mut result: RelayIntroduceResultPayload = read_payload(&mut fwd_recv, MAX_PAYLOAD).await?; + result.relay_available = relay_available; + anyhow::Ok(result) + }.await; + if let Ok(ref r) = result { if r.accepted { chain_result = Some(result); break; } } + chain_result = Some(result); + } + chain_result + } else { None }; + + match forward_result { + Some(Ok(result)) => { + write_typed_message(&mut send, MessageType::RelayIntroduceResult, &result).await?; + send.finish()?; + if let Ok(mut log) = activity_log.try_lock() { + log.log(ActivityLevel::Info, ActivityCategory::Relay, format!("Forwarded introduction {} -> {}", &hex::encode(payload.requester)[..8], &hex::encode(payload.target)[..8]), None); + } + } + other => { + let e_msg = match other { + Some(Err(e)) => format!("{}", e), + _ => "target not found".to_string(), + }; + let result = RelayIntroduceResultPayload { + intro_id: payload.intro_id, accepted: false, target_addresses: vec![], + relay_available: false, reject_reason: Some(format!("relay forward failed: {}", e_msg)), + }; + write_typed_message(&mut send, MessageType::RelayIntroduceResult, &result).await?; + send.finish()?; + } + } + } + None => {} + } } MessageType::SessionRelay => { let cm = Arc::clone(conn_mgr); @@ -5652,7 +5975,7 @@ impl ConnectionManager { let payload: BlobHeaderRequestPayload = read_payload(&mut recv, MAX_PAYLOAD).await?; let (header_json, _updated_at) = { let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; match storage.get_blob_header(&payload.post_id) { Ok(Some((json, ts))) if ts > payload.current_updated_at => (Some(json), ts), Ok(_) => (None, 0), @@ -5676,7 +5999,7 @@ impl ConnectionManager { let payload: ReplicationRequestPayload = read_payload(&mut recv, MAX_PAYLOAD).await?; let (accepted, rejected, needs_pull) = { let cm = conn_mgr.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let mut acc = Vec::new(); let mut rej = Vec::new(); let mut to_pull = Vec::new(); @@ -5752,7 +6075,7 @@ impl ConnectionManager { let attachments = sp.post.attachments.clone(); let post_author = sp.post.author; let cm = cm_arc.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let _ = storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility); let prio = storage.get_post_upstreams(&sp.id).map(|v| v.len() as u8).unwrap_or(0); let _ = storage.add_post_upstream(&sp.id, &sender, prio); @@ -5780,7 +6103,7 @@ impl ConnectionManager { let data = base64::engine::general_purpose::STANDARD.decode(resp.data_b64.as_bytes())?; blob_store.store(&att.cid, &data)?; let cm = cm_arc.lock().await; - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let _ = storage.record_blob(&att.cid, post_id, &post_author, data.len() as u64, &att.mime_type, att.size_bytes); let _ = storage.add_post_upstream(&att.cid, &sender, 0); } @@ -5813,7 +6136,7 @@ impl ConnectionManager { // Gather policy + audience data, then drop lock immediately let (policy, approved_audience, downstream, upstreams) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let policy = storage.get_comment_policy(&payload.post_id) .ok() .flatten() @@ -5836,7 +6159,7 @@ impl ConnectionManager { // Apply ops in a short lock acquisition { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for op in &payload.ops { match op { BlobHeaderDiffOp::AddReaction(reaction) => { @@ -6082,7 +6405,7 @@ pub enum ConnResponse { OptEndpointAddr(Option), Endpoint(iroh::Endpoint), SecretSeed([u8; 32]), - Storage(Arc>), + Storage(Arc), BlobStore(Arc), ActiveRelayPipes(Arc), Referrals(Vec), @@ -6178,7 +6501,7 @@ pub enum ConnCommand { reply: oneshot::Sender<[u8; 32]>, }, GetStorage { - reply: oneshot::Sender>>, + reply: oneshot::Sender>, }, GetBlobStore { reply: oneshot::Sender>, @@ -6559,7 +6882,7 @@ impl ConnHandle { rx.await.unwrap_or([0u8; 32]) } - pub async fn storage(&self) -> Arc> { + pub async fn storage(&self) -> Arc { let (tx, rx) = oneshot::channel(); let _ = self.tx.send(ConnCommand::GetStorage { reply: tx }).await; rx.await.expect("actor dropped") @@ -6946,6 +7269,13 @@ impl ConnHandle { pub struct ConnectionActor { cm: Arc>, rx: mpsc::Receiver, + // Hoisted from ConnectionManager — accessible without the conn_mgr lock + storage: Arc, + blob_store: Arc, + endpoint: iroh::Endpoint, + our_node_id: NodeId, + activity_log: Arc>, + is_anchor: Arc, } impl ConnectionActor { @@ -6953,7 +7283,20 @@ impl ConnectionActor { /// During migration, both the actor and legacy lock-callers share state. pub fn spawn_with_arc(cm: Arc>) -> ConnHandle { let (tx, rx) = mpsc::channel(256); - let actor = ConnectionActor { cm, rx }; + // Hoist frequently-needed Arcs so handlers can skip the conn_mgr lock + let (storage, blob_store, endpoint, our_node_id, activity_log, is_anchor) = { + // Brief lock just to clone the Arcs + let cm_guard = cm.blocking_lock(); + ( + Arc::clone(&cm_guard.storage), + Arc::clone(&cm_guard.blob_store), + cm_guard.endpoint.clone(), + cm_guard.our_node_id, + Arc::clone(&cm_guard.activity_log), + Arc::clone(&cm_guard.is_anchor), + ) + }; + let actor = ConnectionActor { cm, rx, storage, blob_store, endpoint, our_node_id, activity_log, is_anchor }; tokio::spawn(actor.run()); ConnHandle::new(tx) } @@ -6970,6 +7313,414 @@ impl ConnectionActor { debug!("ConnectionActor shutting down"); } + /// Resolve a peer's address without holding the conn_mgr lock. + /// Uses brief locks on storage and conn_mgr only to read data, with per-query timeouts. + async fn resolve_address_unlocked( + storage: &Arc, + cm: &Arc>, + _endpoint: &iroh::Endpoint, + target: &NodeId, + ) -> anyhow::Result> { + // Check if directly connected — brief lock + { + let cm_guard = cm.lock().await; + if cm_guard.connections.contains_key(target) { + let s = storage.get().await; + return Ok(s.get_peer_record(target)? + .and_then(|r| r.addresses.first().map(|a| a.to_string()))); + } + } + + // Check social routes — storage lock only + { + let s = storage.get().await; + if let Some(route) = s.get_social_route(target)? { + if route.status == SocialStatus::Online { + if let Some(addr) = route.addresses.first() { + return Ok(Some(addr.to_string())); + } + } + } + } + + // N2 lookup: brief lock to get reporters + their connections + let n2_queries: Vec = { + let s = storage.get().await; + let reporters = s.find_in_n2(target)?; + drop(s); + let cm_guard = cm.lock().await; + reporters.iter() + .filter_map(|r| cm_guard.connections.get(r).map(|pc| pc.connection.clone())) + .collect() + }; + for conn in &n2_queries { + let result = tokio::time::timeout(std::time::Duration::from_secs(5), async { + let (mut send, mut recv) = conn.open_bi().await?; + let req = crate::protocol::AddressRequestPayload { target: *target }; + write_typed_message(&mut send, MessageType::AddressRequest, &req).await?; + send.finish()?; + let _resp_type = read_message_type(&mut recv).await?; + let resp: crate::protocol::AddressResponsePayload = read_payload(&mut recv, MAX_PAYLOAD).await?; + anyhow::Ok(resp.address) + }).await; + if let Ok(Ok(Some(addr))) = result { + return Ok(Some(addr)); + } + } + + // N3 lookup: same pattern + let n3_queries: Vec = { + let s = storage.get().await; + let reporters = s.find_in_n3(target)?; + drop(s); + let cm_guard = cm.lock().await; + reporters.iter() + .filter_map(|r| cm_guard.connections.get(r).map(|pc| pc.connection.clone())) + .collect() + }; + for conn in &n3_queries { + let result = tokio::time::timeout(std::time::Duration::from_secs(5), async { + let (mut send, mut recv) = conn.open_bi().await?; + let req = crate::protocol::AddressRequestPayload { target: *target }; + write_typed_message(&mut send, MessageType::AddressRequest, &req).await?; + send.finish()?; + let _resp_type = read_message_type(&mut recv).await?; + let resp: crate::protocol::AddressResponsePayload = read_payload(&mut recv, MAX_PAYLOAD).await?; + anyhow::Ok(resp.address) + }).await; + if let Ok(Ok(Some(addr))) = result { + return Ok(Some(addr)); + } + } + + Ok(None) + } + + /// Snapshot ConnectionManager state into a WormContext — brief lock. + async fn snapshot_worm_context(cm: &Arc>) -> WormContext { + let guard = cm.lock().await; + WormContext { + our_node_id: guard.our_node_id, + storage: Arc::clone(&guard.storage), + endpoint: guard.endpoint.clone(), + peer_conns: guard.connections.iter().map(|(nid, pc)| (*nid, pc.connection.clone())).collect(), + connected_ids: guard.connections.keys().copied().collect(), + cm: Arc::clone(cm), + } + } + + /// Worm lookup without holding conn_mgr lock during I/O. + async fn worm_lookup_unlocked(ctx: WormContext, target: NodeId) -> anyhow::Result> { + // Cooldown check + { + let s = ctx.storage.get().await; + if s.is_worm_cooldown(&target, WORM_COOLDOWN_MS)? { + return Ok(None); + } + } + + let needle_peers: Vec = { + let s = ctx.storage.get().await; + let mut rp = s.get_recent_peers(&target)?; + rp.truncate(10); + rp + }; + + let mut all_needles = vec![target]; + all_needles.extend_from_slice(&needle_peers); + let worm_id: WormId = rand::random(); + let visited = vec![ctx.our_node_id]; + + let result = tokio::time::timeout( + std::time::Duration::from_millis(WORM_TOTAL_TIMEOUT_MS), + Self::worm_cascade_unlocked(&ctx, &target, &needle_peers, &worm_id, &visited, &all_needles, None, None), + ).await; + + match result { + Ok(Ok(Some(wr))) => Ok(Some(wr)), + Ok(Ok(None)) | Ok(Err(_)) | Err(_) => { + let s = ctx.storage.get().await; + let _ = s.record_worm_miss(&target); + Ok(None) + } + } + } + + /// Content search without holding conn_mgr lock during I/O. + async fn content_search_unlocked(ctx: WormContext, target: NodeId, post_id: Option, blob_id: Option<[u8; 32]>) -> anyhow::Result> { + let needle_peers: Vec = { + let s = ctx.storage.get().await; + let mut rp = s.get_recent_peers(&target)?; + rp.truncate(10); + rp + }; + + let mut all_needles = vec![target]; + all_needles.extend_from_slice(&needle_peers); + let worm_id: WormId = rand::random(); + let visited = vec![ctx.our_node_id]; + + let result = tokio::time::timeout( + std::time::Duration::from_millis(WORM_TOTAL_TIMEOUT_MS), + Self::worm_cascade_unlocked(&ctx, &target, &needle_peers, &worm_id, &visited, &all_needles, post_id, blob_id), + ).await; + + match result { + Ok(Ok(x)) => Ok(x), + Ok(Err(e)) => { debug!(error = %e, "Content search failed"); Ok(None) } + Err(_) => { debug!("Content search timed out"); Ok(None) } + } + } + + /// Worm cascade using snapshot — no conn_mgr lock held during I/O. + async fn worm_cascade_unlocked( + ctx: &WormContext, + target: &NodeId, + needle_peers: &[NodeId], + worm_id: &WormId, + visited: &[NodeId], + all_needles: &[NodeId], + post_id: Option, + blob_id: Option<[u8; 32]>, + ) -> anyhow::Result> { + // Step 0: Local check — find any needle in our connections snapshot or N2/N3 + for needle in all_needles { + if ctx.connected_ids.contains(needle) { + let s = ctx.storage.get().await; + let addr = s.get_peer_record(needle)?.and_then(|r| r.addresses.first().map(|a| a.to_string())); + return Ok(Some(WormResult { + node_id: *needle, addresses: addr.into_iter().collect(), + reporter: ctx.our_node_id, freshness_ms: 0, post_holder: None, blob_holder: None, + })); + } + } + { + let s = ctx.storage.get().await; + let found_entries = s.find_any_in_n2_n3(all_needles)?; + if let Some((found_id, _reporter, _level)) = found_entries.first() { + drop(s); + let address = Self::resolve_address_unlocked(&ctx.storage, &ctx.cm, &ctx.endpoint, found_id).await.ok().flatten(); + return Ok(Some(WormResult { + node_id: *found_id, addresses: address.into_iter().collect(), + reporter: ctx.our_node_id, freshness_ms: 0, post_holder: None, blob_holder: None, + })); + } + } + + // Step 2: Fan-out + let visited_set: HashSet = visited.iter().copied().collect(); + let fan_out_conns: Vec<(NodeId, iroh::endpoint::Connection)> = ctx.peer_conns.iter() + .filter(|(nid, _)| !visited_set.contains(nid)) + .cloned() + .collect(); + + if !fan_out_conns.is_empty() { + let fan_out_payload = WormQueryPayload { + worm_id: *worm_id, target: *target, needle_peers: needle_peers.to_vec(), + ttl: 0, visited: visited.to_vec(), post_id: post_id.clone(), blob_id, + }; + + let (hit, wide_referrals) = tokio::time::timeout( + std::time::Duration::from_millis(WORM_FAN_OUT_TIMEOUT_MS), + ConnectionManager::fan_out_worm_query_all(&fan_out_conns, &fan_out_payload), + ).await.unwrap_or((None, vec![])); + + if let Some(wr) = hit { return Ok(Some(wr)); } + + // Step 3: Wide-bloom + if !wide_referrals.is_empty() { + let bloom_payload = WormQueryPayload { + worm_id: *worm_id, target: *target, needle_peers: needle_peers.to_vec(), + ttl: 1, visited: visited.to_vec(), post_id: post_id.clone(), blob_id, + }; + + let bloom_result = tokio::time::timeout( + std::time::Duration::from_millis(WORM_BLOOM_TIMEOUT_MS), + Self::bloom_to_wide_peers_unlocked(ctx, &wide_referrals, bloom_payload), + ).await; + + if let Ok(Some(wr)) = bloom_result { return Ok(Some(wr)); } + } + } + + Ok(None) + } + + /// Bloom to wide peers using snapshot — no conn_mgr lock. + async fn bloom_to_wide_peers_unlocked( + ctx: &WormContext, + referrals: &[(NodeId, String)], + payload: WormQueryPayload, + ) -> Option { + use tokio::task::JoinSet; + + let mut seen = HashSet::new(); + let unique_referrals: Vec<&(NodeId, String)> = referrals.iter() + .filter(|(nid, _)| *nid != ctx.our_node_id && !ctx.connected_ids.contains(nid) && seen.insert(*nid)) + .collect(); + + if unique_referrals.is_empty() { return None; } + + let mut set = JoinSet::new(); + let endpoint = ctx.endpoint.clone(); + + for (ref_id, ref_addr) in unique_referrals { + let endpoint = endpoint.clone(); + let ref_id = *ref_id; + let ref_addr = ref_addr.clone(); + let payload = payload.clone(); + + set.spawn(async move { + let endpoint_id = iroh::EndpointId::from_bytes(&ref_id).ok()?; + let mut addr = iroh::EndpointAddr::from(endpoint_id); + if let Ok(sock) = ref_addr.parse::() { addr = addr.with_ip_addr(sock); } + let conn = endpoint.connect(addr, ALPN_V2).await.ok()?; + let resp = ConnectionManager::send_worm_query_raw(&conn, &payload).await.ok()?; + let is_hit = resp.found || resp.post_holder.is_some() || resp.blob_holder.is_some(); + if is_hit { + Some(WormResult { + node_id: resp.found_id.unwrap_or([0u8; 32]), addresses: resp.addresses, + reporter: resp.reporter.unwrap_or([0u8; 32]), freshness_ms: 0, + post_holder: resp.post_holder, blob_holder: resp.blob_holder, + }) + } else { None } + }); + } + + while let Some(result) = set.join_next().await { + if let Ok(Some(wr)) = result { set.abort_all(); return Some(wr); } + } + None + } + + /// Pick a random wide referral from a pre-snapshotted list of (node_id, slot_kind). + async fn pick_wide_referral(storage: &Arc, candidates: &[(NodeId, PeerSlotKind)], exclude: &NodeId) -> Option<(NodeId, String)> { + let filtered: Vec<(NodeId, PeerSlotKind)> = candidates.iter().filter(|(nid, _)| nid != exclude).copied().collect(); + if filtered.is_empty() { return None; } + let wide: Vec<(NodeId, PeerSlotKind)> = filtered.iter().filter(|(_, kind)| *kind == PeerSlotKind::Wide).copied().collect(); + let ordered = if !wide.is_empty() { wide } else { filtered }; + let s = storage.get().await; + for (nid, _) in &ordered { + if let Ok(Some(rec)) = s.get_peer_record(nid) { + if let Some(addr) = rec.addresses.first() { return Some((*nid, addr.to_string())); } + } + } + None + } + + /// Handle incoming WormQuery without holding conn_mgr lock during I/O. + async fn handle_worm_query_unlocked( + ctx: WormContext, + blob_store: Arc, + wide_candidates: Vec<(NodeId, PeerSlotKind)>, + payload: WormQueryPayload, + mut send: iroh::endpoint::SendStream, + from_peer: NodeId, + ) -> anyhow::Result<()> { + // Check for post/blob content locally + let mut post_holder: Option = None; + let mut blob_holder: Option = None; + + if let Some(ref post_id) = payload.post_id { + let s = ctx.storage.get().await; + if s.get_post_with_visibility(post_id).ok().flatten().is_some() { + post_holder = Some(ctx.our_node_id); + } else { + let downstream = s.get_post_downstream(post_id).unwrap_or_default(); + if !downstream.is_empty() { post_holder = Some(downstream[0]); } + } + } + + if let Some(ref blob_id) = payload.blob_id { + if blob_store.get(blob_id).ok().flatten().is_some() { + blob_holder = Some(ctx.our_node_id); + } else { + let s = ctx.storage.get().await; + if let Ok(Some(pid)) = s.get_blob_post_id(blob_id) { + let downstream = s.get_post_downstream(&pid).unwrap_or_default(); + if !downstream.is_empty() { blob_holder = Some(downstream[0]); } + } + } + } + + let mut all_needles = vec![payload.target]; + all_needles.extend_from_slice(&payload.needle_peers); + + // Check connections + N2/N3 + let local_result = { + let mut found = None; + for needle in &all_needles { + if ctx.connected_ids.contains(needle) { + let s = ctx.storage.get().await; + let addr = s.get_peer_record(needle)?.and_then(|r| r.addresses.first().map(|a| a.to_string())); + found = Some((*needle, addr.into_iter().collect::>(), 0u64)); + break; + } + } + if found.is_none() { + let s = ctx.storage.get().await; + let entries = s.find_any_in_n2_n3(&all_needles)?; + if let Some((found_id, _, _)) = entries.first() { + drop(s); + let address = Self::resolve_address_unlocked(&ctx.storage, &ctx.cm, &ctx.endpoint, found_id).await.ok().flatten(); + found = Some((*found_id, address.into_iter().collect::>(), 0u64)); + } + } + found + }; + + let content_found = post_holder.is_some() || blob_holder.is_some(); + + if let Some((found_id, addresses, _)) = local_result { + let wide_referral = Self::pick_wide_referral(&ctx.storage, &wide_candidates, &from_peer).await; + let resp = WormResponsePayload { worm_id: payload.worm_id, found: true, found_id: Some(found_id), addresses, reporter: Some(ctx.our_node_id), hop: None, wide_referral, post_holder, blob_holder }; + write_typed_message(&mut send, MessageType::WormResponse, &resp).await?; + send.finish()?; + return Ok(()); + } + + if content_found { + let wide_referral = Self::pick_wide_referral(&ctx.storage, &wide_candidates, &from_peer).await; + let resp = WormResponsePayload { worm_id: payload.worm_id, found: false, found_id: None, addresses: vec![], reporter: Some(ctx.our_node_id), hop: None, wide_referral, post_holder, blob_holder }; + write_typed_message(&mut send, MessageType::WormResponse, &resp).await?; + send.finish()?; + return Ok(()); + } + + // Fan-out if ttl > 0 + if payload.ttl > 0 { + let visited_set: HashSet = payload.visited.iter().copied().collect(); + let fan_conns: Vec<(NodeId, iroh::endpoint::Connection)> = ctx.peer_conns.iter() + .filter(|(nid, _)| !visited_set.contains(nid) && *nid != from_peer) + .cloned().collect(); + + if !fan_conns.is_empty() { + let fan_payload = WormQueryPayload { + worm_id: payload.worm_id, target: payload.target, needle_peers: payload.needle_peers.clone(), + ttl: 0, visited: payload.visited.clone(), post_id: payload.post_id.clone(), blob_id: payload.blob_id, + }; + let (hit, _) = tokio::time::timeout( + std::time::Duration::from_millis(WORM_FAN_OUT_TIMEOUT_MS), + ConnectionManager::fan_out_worm_query_all(&fan_conns, &fan_payload), + ).await.unwrap_or((None, vec![])); + + if let Some(wr) = hit { + let wide_referral = Self::pick_wide_referral(&ctx.storage, &wide_candidates, &from_peer).await; + let resp = WormResponsePayload { worm_id: payload.worm_id, found: true, found_id: Some(wr.node_id), addresses: wr.addresses, reporter: Some(wr.reporter), hop: None, wide_referral, post_holder: wr.post_holder, blob_holder: wr.blob_holder }; + write_typed_message(&mut send, MessageType::WormResponse, &resp).await?; + send.finish()?; + return Ok(()); + } + } + } + + let wide_referral = Self::pick_wide_referral(&ctx.storage, &wide_candidates, &from_peer).await; + let resp = WormResponsePayload { worm_id: payload.worm_id, found: false, found_id: None, addresses: vec![], reporter: None, hop: None, wide_referral, post_holder: None, blob_holder: None }; + write_typed_message(&mut send, MessageType::WormResponse, &resp).await?; + send.finish()?; + Ok(()) + } + async fn handle(&mut self, cmd: ConnCommand) { match cmd { // --- Reads --- @@ -7265,7 +8016,7 @@ impl ConnectionActor { cm.sticky_n1.retain(|_, expiry| *expiry > now); let sticky_peers: Vec = cm.sticky_n1.keys().copied().collect(); // Compute diff snapshot - let storage = cm.storage.lock().await; + let storage = cm.storage.get().await; let current_n1: HashSet = { let mut set = HashSet::new(); for nid in cm.connections_ref().keys() { @@ -7334,48 +8085,163 @@ impl ConnectionActor { // --- Complex operations --- ConnCommand::RebalanceSlots { reply } => { - let mut cm = self.cm.lock().await; - let r = cm.rebalance_slots().await; - let _ = reply.send(r); + let (endpoint, storage) = { + let cm = self.cm.lock().await; + (cm.endpoint.clone(), Arc::clone(&cm.storage)) + }; + let (mut newly_connected, pending_connects) = { + let mut cm = self.cm.lock().await; + cm.rebalance_slots().await.unwrap_or_default() + }; + // Connect outside the lock — no 15s hold + for (peer_id, addr, _addr_s, slot_kind) in pending_connects { + let addrs: Vec = addr.ip_addrs().copied().collect(); + if !addrs.is_empty() { + let s = storage.get().await; + let _ = s.upsert_peer(&peer_id, &addrs, None); + } + match ConnectionManager::connect_to_unlocked(&endpoint, addr).await { + Ok(conn) => { + let mut cm = self.cm.lock().await; + cm.register_new_connection(peer_id, conn, &addrs, slot_kind).await; + info!(peer = hex::encode(peer_id), "Auto-connected to peer"); + newly_connected.push(peer_id); + } + Err(e) => { + debug!(peer = hex::encode(peer_id), error = %e, "Auto-connect failed"); + } + } + } + let _ = reply.send(Ok(newly_connected)); } ConnCommand::WormLookup { target, reply } => { - let cm = self.cm.lock().await; - let r = cm.initiate_worm_lookup(&target).await; - let _ = reply.send(r); + // Brief lock: snapshot, then all cascade I/O outside lock + let ctx = Self::snapshot_worm_context(&self.cm).await; + tokio::spawn(async move { + let r = Self::worm_lookup_unlocked(ctx, target).await; + let _ = reply.send(r); + }); } ConnCommand::ContentSearch { target, post_id, blob_id, reply } => { - let cm = self.cm.lock().await; - let r = cm.initiate_content_search(&target, post_id, blob_id).await; - let _ = reply.send(r); + let ctx = Self::snapshot_worm_context(&self.cm).await; + tokio::spawn(async move { + let r = Self::content_search_unlocked(ctx, target, post_id, blob_id).await; + let _ = reply.send(r); + }); } ConnCommand::PostFetch { holder, post_id, reply } => { - let cm = self.cm.lock().await; - let r = cm.send_post_fetch(&holder, &post_id).await; + // Brief lock: grab connection or session clone + let conn = { + let cm = self.cm.lock().await; + cm.connections.get(&holder).map(|pc| pc.connection.clone()) + .or_else(|| cm.sessions.get(&holder).map(|sc| sc.connection.clone())) + }; + // All I/O outside the lock + let r = async { + use crate::protocol::{PostFetchRequestPayload, PostFetchResponsePayload}; + let conn = match conn { + Some(c) => c, + None => { + // resolve + connect outside lock + let addr = Self::resolve_address_unlocked(&self.storage, &self.cm, &self.endpoint, &holder).await?; + let addr_str = addr.ok_or_else(|| anyhow::anyhow!("No address for post holder"))?; + let eid = iroh::EndpointId::from_bytes(&holder).map_err(|_| anyhow::anyhow!("Invalid endpoint ID"))?; + let mut ep_addr = iroh::EndpointAddr::from(eid); + if let Ok(sock) = addr_str.parse::() { ep_addr = ep_addr.with_ip_addr(sock); } + ConnectionManager::connect_to_unlocked(&self.endpoint, ep_addr).await? + } + }; + let (mut send, mut recv) = conn.open_bi().await?; + write_typed_message(&mut send, MessageType::PostFetchRequest, &PostFetchRequestPayload { post_id }).await?; + send.finish()?; + let mt = tokio::time::timeout(std::time::Duration::from_secs(10), read_message_type(&mut recv)).await??; + if mt != MessageType::PostFetchResponse { return Ok(None); } + let resp: PostFetchResponsePayload = read_payload(&mut recv, MAX_PAYLOAD).await?; + Ok(resp.post) + }.await; let _ = reply.send(r); } ConnCommand::TcpPunch { holder, browser_ip, post_id, reply } => { - let cm = self.cm.lock().await; - let r = cm.send_tcp_punch(&holder, browser_ip, &post_id).await; + // Brief lock: grab connection or session clone + let conn = { + let cm = self.cm.lock().await; + cm.connections.get(&holder).map(|pc| pc.connection.clone()) + .or_else(|| cm.sessions.get(&holder).map(|sc| sc.connection.clone())) + }; + let r = async { + use crate::protocol::{TcpPunchRequestPayload, TcpPunchResultPayload}; + let conn = match conn { + Some(c) => c, + None => { + let addr = Self::resolve_address_unlocked(&self.storage, &self.cm, &self.endpoint, &holder).await?; + let addr_str = addr.ok_or_else(|| anyhow::anyhow!("No address for punch target"))?; + let eid = iroh::EndpointId::from_bytes(&holder).map_err(|_| anyhow::anyhow!("Invalid endpoint ID"))?; + let mut ep_addr = iroh::EndpointAddr::from(eid); + if let Ok(sock) = addr_str.parse::() { ep_addr = ep_addr.with_ip_addr(sock); } + ConnectionManager::connect_to_unlocked(&self.endpoint, ep_addr).await? + } + }; + let (mut send, mut recv) = conn.open_bi().await?; + write_typed_message(&mut send, MessageType::TcpPunchRequest, &TcpPunchRequestPayload { browser_ip, post_id }).await?; + send.finish()?; + let mt = tokio::time::timeout(std::time::Duration::from_secs(5), read_message_type(&mut recv)).await??; + if mt != MessageType::TcpPunchResult { return Ok(None); } + let resp: TcpPunchResultPayload = read_payload(&mut recv, 4096).await?; + Ok(if resp.success { resp.http_addr } else { None }) + }.await; let _ = reply.send(r); } ConnCommand::ResolveAddress { target, reply } => { - let cm = self.cm.lock().await; - let r = cm.resolve_address(&target).await; + // No conn_mgr lock — uses hoisted fields + brief locks as needed + let r = Self::resolve_address_unlocked(&self.storage, &self.cm, &self.endpoint, &target).await; let _ = reply.send(r); } ConnCommand::PullFromPeer { peer, reply } => { - let cm = self.cm.lock().await; - let r = cm.pull_from_peer(&peer).await; + // Brief lock: grab connection clone + follows data + let gather = { + let cm = self.cm.lock().await; + cm.connections.get(&peer).map(|pc| pc.connection.clone()) + }; + let r = match gather { + Some(conn) => { + // All I/O outside the lock, storage accessed via hoisted Arc + ConnectionManager::pull_from_peer_unlocked(conn, &self.storage, &peer).await + } + None => Err(anyhow::anyhow!("not connected to {}", hex::encode(peer))), + }; let _ = reply.send(r); } ConnCommand::FetchEngagement { peer, reply } => { - let cm = self.cm.lock().await; - let r = cm.fetch_engagement_from_peer(&peer).await; + // Brief lock: grab connection clone + let gather = { + let cm = self.cm.lock().await; + cm.connections.get(&peer).map(|pc| pc.connection.clone()) + }; + let r = match gather { + Some(conn) => { + ConnectionManager::fetch_engagement_unlocked(conn, &self.storage, &peer).await + } + None => Err(anyhow::anyhow!("not connected to {}", hex::encode(peer))), + }; let _ = reply.send(r); } ConnCommand::InitiateAnchorProbe { reply } => { - let mut cm = self.cm.lock().await; - let r = cm.initiate_anchor_probe().await; + // Brief lock: gather probe data + let probe_data = { + let cm = self.cm.lock().await; + cm.gather_anchor_probe_data().await + }; + let r = match probe_data { + Some(data) => { + // All probe I/O outside the lock + let result = ConnectionManager::run_anchor_probe_unlocked(data).await; + // Brief re-lock to update probe state + let mut cm = self.cm.lock().await; + cm.apply_anchor_probe_result(&result); + result.outcome + } + None => Err(anyhow::anyhow!("No probe data available")), + }; let _ = reply.send(r); } ConnCommand::GetPeerLastActivity { peer, reply } => { @@ -7391,7 +8257,7 @@ impl ConnectionActor { /// Standalone initial exchange (connector side) — does NOT require conn_mgr lock. /// Opens a bi-stream, sends our N1/N2/profile/deletes/post_ids, reads theirs. pub async fn initial_exchange_connect( - storage: &Arc>, + storage: &Arc, our_node_id: &NodeId, conn: &iroh::endpoint::Connection, remote_node_id: NodeId, @@ -7403,7 +8269,7 @@ pub async fn initial_exchange_connect( our_cache_pressure: Option, ) -> anyhow::Result { let our_payload = { - let storage = storage.lock().await; + let storage = storage.get().await; let n1 = storage.build_n1_share()?; let n2 = storage.build_n2_share()?; let profile = storage.get_profile(our_node_id)?; @@ -7460,7 +8326,7 @@ pub async fn initial_exchange_connect( /// Standalone initial exchange (acceptor side) — does NOT require conn_mgr lock. /// Message type byte already consumed by caller. pub async fn initial_exchange_accept( - storage: &Arc>, + storage: &Arc, our_node_id: &NodeId, mut send: iroh::endpoint::SendStream, mut recv: iroh::endpoint::RecvStream, @@ -7476,7 +8342,7 @@ pub async fn initial_exchange_accept( let their_payload: InitialExchangePayload = read_payload(&mut recv, MAX_PAYLOAD).await?; let our_payload = { - let storage = storage.lock().await; + let storage = storage.get().await; let n1 = storage.build_n1_share()?; let n2 = storage.build_n2_share()?; let profile = storage.get_profile(our_node_id)?; @@ -7511,12 +8377,12 @@ pub async fn initial_exchange_accept( /// Process the peer's initial exchange payload (shared between connect and accept sides). async fn process_exchange_payload( - storage: &Arc>, + storage: &Arc, our_node_id: &NodeId, remote_node_id: &NodeId, payload: &InitialExchangePayload, ) -> anyhow::Result<()> { - let storage = storage.lock().await; + let storage = storage.get().await; // Filter out our own ID from their N1 before storing as our N2 let filtered_n1: Vec = payload.n1_node_ids.iter() diff --git a/crates/core/src/http.rs b/crates/core/src/http.rs index 5efbe50..fa35631 100644 --- a/crates/core/src/http.rs +++ b/crates/core/src/http.rs @@ -12,7 +12,7 @@ use tokio::sync::Mutex; use tracing::{debug, info}; use crate::blob::BlobStore; -use crate::storage::Storage; +use crate::storage::{Storage, StoragePool}; use crate::types::PostVisibility; /// Connection budget: 5 content slots, 15 redirect slots, 1 per IP. @@ -104,7 +104,7 @@ impl HttpBudget { /// Run the HTTP server on the given port. Blocks forever. pub async fn run_http_server( port: u16, - storage: Arc>, + storage: Arc, blob_store: Arc, downstream_addrs: Arc>>>, ) -> anyhow::Result<()> { @@ -180,7 +180,7 @@ async fn handle_connection( mut stream: TcpStream, _ip: IpAddr, slot: SlotKind, - storage: &Arc>, + storage: &Arc, blob_store: &Arc, downstream_addrs: &Arc>>>, ) { @@ -281,12 +281,12 @@ fn validate_hex64(s: &str) -> Option<[u8; 32]> { async fn serve_post( stream: &mut TcpStream, post_id: &[u8; 32], - storage: &Arc>, + storage: &Arc, blob_store: &Arc, ) -> bool { // Look up post + visibility let result = { - let store = storage.lock().await; + let store = storage.get().await; store.get_post_with_visibility(post_id) }; @@ -301,7 +301,7 @@ async fn serve_post( // Look up author name let author_name = { - let store = storage.lock().await; + let store = storage.get().await; store .get_profile(&post.author) .ok() @@ -321,12 +321,12 @@ async fn serve_post( async fn serve_blob( stream: &mut TcpStream, blob_id: &[u8; 32], - storage: &Arc>, + storage: &Arc, blob_store: &Arc, ) -> bool { // Verify this blob belongs to a public post let (mime_type, _post_id) = { - let store = storage.lock().await; + let store = storage.get().await; match find_public_blob_info(&store, blob_id) { Some(info) => info, None => return false, // not found or not public — hard close @@ -367,12 +367,12 @@ fn find_public_blob_info(store: &Storage, blob_id: &[u8; 32]) -> Option<(String, async fn try_redirect( stream: &mut TcpStream, post_id: &[u8; 32], - storage: &Arc>, + storage: &Arc, _downstream_addrs: &Arc>>>, ) -> bool { // Get downstream peers for this post let downstream_peers = { - let store = storage.lock().await; + let store = storage.get().await; // Verify post exists and is public first match store.get_post_with_visibility(post_id) { Ok(Some((_, PostVisibility::Public))) => {} @@ -383,7 +383,7 @@ async fn try_redirect( // Get addresses for downstream peers let candidates: Vec = { - let store = storage.lock().await; + let store = storage.get().await; let mut addrs = Vec::new(); for peer_id in &downstream_peers { if let Ok(Some(peer)) = store.get_peer_record(peer_id) { diff --git a/crates/core/src/network.rs b/crates/core/src/network.rs index 729906a..e36e885 100644 --- a/crates/core/src/network.rs +++ b/crates/core/src/network.rs @@ -18,7 +18,7 @@ use crate::protocol::{ PullSyncRequestPayload, PullSyncResponsePayload, RefuseRedirectPayload, SocialAddressUpdatePayload, SocialDisconnectNoticePayload, SyncPost, ALPN_V2, }; -use crate::storage::Storage; +use crate::storage::{Storage, StoragePool}; use crate::types::{ DeleteRecord, DeviceProfile, DeviceRole, NodeId, PeerSlotKind, PeerWithAddress, Post, PostId, PostVisibility, PublicProfile, SessionReachMethod, WormResult, @@ -27,7 +27,7 @@ use crate::types::{ /// The network layer: manages the iroh endpoint and mesh connections pub struct Network { endpoint: iroh::Endpoint, - storage: Arc>, + storage: Arc, our_node_id: NodeId, is_anchor: Arc, conn_mgr: Arc>, @@ -79,7 +79,7 @@ pub(crate) fn is_publicly_routable(addr: &SocketAddr) -> bool { impl Network { pub async fn new( secret_key: iroh::SecretKey, - storage: Arc>, + storage: Arc, bind_addr: Option, secret_seed: [u8; 32], blob_store: Arc, @@ -469,7 +469,7 @@ impl Network { // Store peer with their address { - let storage = this.storage.lock().await; + let storage = this.storage.get().await; let _ = storage.upsert_peer(&remote_node_id, &[remote_sock], None); } @@ -512,7 +512,7 @@ impl Network { async fn handle_incoming_connection( conn_mgr: Arc>, conn_handle: ConnHandle, - storage: Arc>, + storage: Arc, conn: iroh::endpoint::Connection, remote_node_id: NodeId, remote_sock: SocketAddr, @@ -608,7 +608,7 @@ impl Network { /// Uses ConnHandle for all state access — no direct conn_mgr lock. async fn try_mesh_upgrade( conn_handle: &ConnHandle, - storage: &Arc>, + storage: &Arc, conn: &iroh::endpoint::Connection, remote_node_id: NodeId, remote_sock: SocketAddr, @@ -638,7 +638,7 @@ impl Network { } { - let s = storage.lock().await; + let s = storage.get().await; let _ = s.upsert_peer(&remote_node_id, &[remote_sock], None); let _ = s.add_mesh_peer(&remote_node_id, PeerSlotKind::Local, 0); if s.has_social_route(&remote_node_id).unwrap_or(false) { @@ -663,6 +663,7 @@ impl Network { } Err(e) => { error!(peer = hex::encode(remote_node_id), error = ?e, "Initial exchange failed"); + return false; } } @@ -692,13 +693,12 @@ impl Network { // Store addresses so they're available during initial exchange let addrs: Vec = addr.ip_addrs().copied().collect(); if !addrs.is_empty() { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.upsert_peer(&peer_id, &addrs, None); } - // QUIC connect OUTSIDE the conn_mgr lock — this can take 60+ seconds - // on unreachable peers and must not block other tasks - let conn = self.endpoint.connect(addr, ALPN_V2).await?; + // QUIC connect OUTSIDE the conn_mgr lock with 15s timeout + let conn = ConnectionManager::connect_to_unlocked(&self.endpoint, addr).await?; // Register the established connection self.conn_handle.register_connection(peer_id, conn.clone(), addrs, PeerSlotKind::Local).await; @@ -734,7 +734,7 @@ impl Network { let addrs: Vec = redir.a.iter() .filter_map(|a| a.parse::().ok()) .collect(); - let s = self.storage.lock().await; + let s = self.storage.get().await; let _ = s.upsert_peer(&redir_id, &addrs, None); drop(s); self.conn_handle.notify_growth(); @@ -837,7 +837,7 @@ impl Network { // Build full state: all current N1 and N2 as "added", nothing removed let all_n1 = self.conn_handle.connected_peers().await; let all_n2 = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.build_n2_share().unwrap_or_default() }; @@ -891,7 +891,7 @@ impl Network { } PostVisibility::GroupEncrypted { group_id, .. } => { // Push to all group members - match self.storage.lock().await.get_all_group_members() { + match self.storage.get().await.get_all_group_members() { Ok(map) => map.get(group_id).cloned().unwrap_or_default().into_iter().collect(), Err(_) => return 0, } @@ -1041,7 +1041,7 @@ impl Network { manifest: &crate::types::CdnManifest, ) -> usize { let downstream = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_blob_downstream(cid).unwrap_or_default() }; let payload = crate::protocol::ManifestPushPayload { @@ -1156,7 +1156,7 @@ impl Network { } let audience_members: Vec = { - match self.storage.lock().await.list_audience_members() { + match self.storage.get().await.list_audience_members() { Ok(m) => m, Err(_) => return 0, } @@ -1318,7 +1318,7 @@ impl Network { Ok(()) => Ok(()), Err(e) if e.to_string().contains("mesh refused") => { // Anchor refused mesh — reconnect as session for registration - let conn = self.endpoint.connect(addr, ALPN_V2).await?; + let conn = ConnectionManager::connect_to_unlocked(&self.endpoint, addr).await?; self.conn_handle.add_session(peer_id, conn, crate::types::SessionReachMethod::Direct, None).await; self.conn_handle.log_activity( ActivityLevel::Info, @@ -1352,7 +1352,7 @@ impl Network { Ok(ExchangeResult::Accepted) => { self.conn_handle.register_connection(peer_id, conn.clone(), vec![], PeerSlotKind::Local).await; { - let s = self.storage.lock().await; + let s = self.storage.get().await; let _ = s.add_mesh_peer(&peer_id, PeerSlotKind::Local, 0); } @@ -1464,7 +1464,7 @@ impl Network { let addrs: Vec = redir.a.iter() .filter_map(|a| a.parse::().ok()) .collect(); - let _ = self.storage.lock().await.upsert_peer(&redir_id, &addrs, None); + let _ = self.storage.get().await.upsert_peer(&redir_id, &addrs, None); } } } @@ -1568,7 +1568,7 @@ impl Network { } else { // Network resolution: get reporter connections, resolve outside lock let reporters_and_conns = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let n2 = storage.find_in_n2(&candidate_id).unwrap_or_default(); let n3 = storage.find_in_n3(&candidate_id).unwrap_or_default(); drop(storage); @@ -1660,7 +1660,7 @@ impl Network { // Find N2 reporter(s) who told us about this peer — they can introduce us let reporters = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.find_in_n2(&candidate_id).unwrap_or_default() }; @@ -1724,7 +1724,7 @@ impl Network { /// Send a uni-stream message to all audience members (persistent if available, ephemeral otherwise). async fn send_to_audience(&self, msg_type: MessageType, payload: &T) -> usize { - let audience: Vec = match self.storage.lock().await.list_audience_members() { + let audience: Vec = match self.storage.get().await.list_audience_members() { Ok(m) => m, Err(_) => return 0, }; @@ -1741,7 +1741,7 @@ impl Network { pub async fn pull_from_peer(&self, peer_id: &NodeId) -> anyhow::Result { let conn = self.get_connection(peer_id).await?; let (our_follows, follows_sync) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; ( storage.list_follows()?, storage.get_follows_with_last_sync().unwrap_or_default(), @@ -1768,7 +1768,7 @@ impl Network { .duration_since(std::time::UNIX_EPOCH) .unwrap_or_default() .as_millis() as u64; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let mut posts_received = 0; let mut vis_updates = 0; for sp in &response.posts { @@ -1968,7 +1968,7 @@ impl Network { Ok(Ok(result)) if result.accepted => { let our_profile = self.conn_handle.our_nat_profile().await; let peer_profile = { - let s = self.storage.lock().await; + let s = self.storage.get().await; s.get_peer_nat_profile(peer_id) }; if let Some(conn) = crate::connection::hole_punch_with_scanning(&self.endpoint, peer_id, &result.target_addresses, our_profile, peer_profile).await { @@ -2010,7 +2010,7 @@ impl Network { pub async fn addr_from_storage(&self, peer_id: &NodeId) -> Option { let endpoint_id = iroh::EndpointId::from_bytes(peer_id).ok()?; let mut addr = iroh::EndpointAddr::from(endpoint_id); - let storage = self.storage.lock().await; + let storage = self.storage.get().await; if let Ok(Some(rec)) = storage.get_peer_record(peer_id) { for sock in &rec.addresses { addr = addr.with_ip_addr(*sock); @@ -2158,7 +2158,7 @@ impl Network { /// Check if a peer is a known anchor. pub async fn is_anchor_peer(&self, node_id: &NodeId) -> bool { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.is_peer_anchor(node_id).unwrap_or(false) } @@ -2222,7 +2222,7 @@ impl Network { let our_profile = self.conn_handle.our_nat_profile().await; let peer_profile = { - let s = self.storage.lock().await; + let s = self.storage.get().await; s.get_peer_nat_profile(&target) }; @@ -2301,7 +2301,7 @@ impl Network { exclude_peer: &crate::types::NodeId, ) -> usize { let downstream = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_post_downstream(post_id).unwrap_or_default() }; let mut sent = 0; diff --git a/crates/core/src/node.rs b/crates/core/src/node.rs index 0838651..31db343 100644 --- a/crates/core/src/node.rs +++ b/crates/core/src/node.rs @@ -11,7 +11,7 @@ use crate::blob::BlobStore; use crate::content::compute_post_id; use crate::crypto; use crate::network::Network; -use crate::storage::Storage; +use crate::storage::{Storage, StoragePool}; use crate::types::{ Attachment, AudienceDirection, AudienceRecord, AudienceStatus, Circle, DeleteRecord, DeviceProfile, DeviceRole, NodeId, PeerRecord, PeerSlotKind, PeerWithAddress, Post, PostId, @@ -25,7 +25,7 @@ const DEFAULT_ANCHOR: &str = "17af141956ae0b50dc1cb9248cadf5fca371ea2d8531ac9add /// A distsoc node: ties together identity, storage, and networking pub struct Node { pub data_dir: PathBuf, - pub storage: Arc>, + pub storage: Arc, pub network: Arc, pub node_id: NodeId, pub blob_store: Arc, @@ -82,11 +82,11 @@ impl Node { // Open storage let db_path = data_dir.join("itsgoin.db"); - let storage = Arc::new(Mutex::new(Storage::open(&db_path)?)); + let storage = Arc::new(StoragePool::open(&db_path)?); // Startup sweep: clear stale N2/N3 and mesh_peers from prior session { - let s = storage.lock().await; + let s = storage.get().await; let n_cleared = s.clear_all_n2_n3().unwrap_or(0); let m_cleared = s.clear_all_mesh_peers().unwrap_or(0); if n_cleared > 0 || m_cleared > 0 { @@ -110,13 +110,13 @@ impl Node { // Auto-follow ourselves so our own posts show in the feed { - let s = storage.lock().await; + let s = storage.get().await; s.add_follow(&node_id)?; } // Bootstrap: if peers table is empty, try bootstrap.json then default anchor { - let s = storage.lock().await; + let s = storage.get().await; let has_peers = s.has_peers()?; drop(s); @@ -145,7 +145,7 @@ impl Node { info!(peer = hex::encode(nid), "Bootstrap: connecting to peer"); let ip_addrs: Vec<_> = addr.ip_addrs().copied().collect(); { - let s = storage.lock().await; + let s = storage.get().await; if ip_addrs.is_empty() { let _ = s.add_peer(&nid); } else { @@ -171,7 +171,7 @@ impl Node { // Always store anchor in known_anchors (even before referrals) // so the periodic cycle can re-register and request referrals later { - let s = storage.lock().await; + let s = storage.get().await; let anchor_addrs: Vec = s.get_peer_record(&nid) .ok().flatten() .map(|r| r.addresses).unwrap_or_default(); @@ -283,7 +283,7 @@ impl Node { // Without this, stale IPv6 addresses from previous sessions can block reconnection // on devices without IPv6 connectivity (see bugs-fixed.md #1). { - let s = storage.lock().await; + let s = storage.get().await; for (nid, addr) in &bootstrap_anchors { let ip_addrs: Vec = addr.ip_addrs().copied().collect(); if !ip_addrs.is_empty() { @@ -295,7 +295,7 @@ impl Node { // Rebuild social routes from follows + audience { - let s = storage.lock().await; + let s = storage.get().await; match s.rebuild_social_routes() { Ok(count) if count > 0 => info!(count, "Rebuilt social routes on startup"), _ => {} @@ -309,7 +309,7 @@ impl Node { let conn_count = network.connection_count().await; if conn_count < 5 { let known = { - let s = storage.lock().await; + let s = storage.get().await; s.list_known_anchors().unwrap_or_default() }; // Split into discovered anchors (priority) and bootstrap anchors (fallback) @@ -617,7 +617,7 @@ impl Node { VisibilityIntent::Circle(circle_name) => { // Try group encryption first let group_info = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_group_key_by_circle(circle_name)? .and_then(|gk| { storage.get_group_seed(&gk.group_id, gk.epoch).ok().flatten() @@ -709,7 +709,7 @@ impl Node { let post_id = compute_post_id(&post); { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.store_post_with_intent(&post_id, &post, &visibility, &intent)?; for att in &post.attachments { storage.record_blob(&att.cid, &post_id, &self.node_id, att.size_bytes, &att.mime_type, now)?; @@ -762,7 +762,7 @@ impl Node { // Build and store CDN manifests for blobs if !post.attachments.is_empty() { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let (previous, _following) = storage.get_author_post_neighborhood(&self.node_id, now, 10)?; drop(storage); @@ -782,7 +782,7 @@ impl Node { let manifest_json = serde_json::to_string(&manifest)?; { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; for att in &post.attachments { storage.store_cdn_manifest(&att.cid, &manifest_json, &self.node_id, now)?; } @@ -793,7 +793,7 @@ impl Node { // Push updated manifests to downstream peers let manifests_to_push = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_manifests_for_author_blobs(&self.node_id).unwrap_or_default() }; let our_addrs = self.network.our_addresses(); @@ -825,7 +825,7 @@ impl Node { /// Update the manifests of recent prior posts to include a newly created post /// in their following_posts list. Re-signs each updated manifest. async fn update_neighbor_manifests(&self, new_post_id: &PostId, new_timestamp_ms: u64) { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let manifests = match storage.get_manifests_for_author_blobs(&self.node_id) { Ok(m) => m, Err(e) => { @@ -866,14 +866,14 @@ impl Node { Ok(j) => j, Err(_) => continue, }; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.store_cdn_manifest(&cid, &updated_json, &self.node_id, new_timestamp_ms); drop(storage); } } async fn resolve_recipients(&self, intent: &VisibilityIntent) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; match intent { VisibilityIntent::Public => Ok(vec![]), VisibilityIntent::Friends => storage.list_public_follows(), @@ -886,7 +886,7 @@ impl Node { &self, ) -> anyhow::Result)>> { let (raw, group_seeds) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let posts = storage.get_feed()?; let seeds = storage.get_all_group_seeds_map().unwrap_or_default(); (posts, seeds) @@ -898,7 +898,7 @@ impl Node { &self, ) -> anyhow::Result)>> { let (raw, group_seeds) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let posts = storage.list_posts_reverse_chron()?; let seeds = storage.get_all_group_seeds_map().unwrap_or_default(); (posts, seeds) @@ -947,7 +947,7 @@ impl Node { pub async fn follow(&self, node_id: &NodeId) -> anyhow::Result<()> { let connected = self.network.is_connected(node_id).await; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.add_follow(node_id)?; // Upsert social route @@ -975,7 +975,7 @@ impl Node { } pub async fn unfollow(&self, node_id: &NodeId) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.remove_follow(node_id)?; // Downgrade or remove social route @@ -994,7 +994,7 @@ impl Node { } pub async fn list_follows(&self) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.list_follows() } @@ -1007,7 +1007,7 @@ impl Node { let recent_peers = self.current_recent_peers().await; let profile = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let existing_anchors = storage.get_peer_anchors(&self.node_id).unwrap_or_default(); let preferred_peers = storage.list_preferred_peers().unwrap_or_default(); @@ -1048,7 +1048,7 @@ impl Node { let recent_peers = self.current_recent_peers().await; let profile = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let existing = storage.get_profile(&self.node_id)?; let (display_name, bio, public_visible, avatar_cid) = match existing { Some(p) => (p.display_name, p.bio, p.public_visible, p.avatar_cid), @@ -1081,27 +1081,27 @@ impl Node { } pub async fn get_peer_anchors(&self, node_id: &NodeId) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_peer_anchors(node_id) } pub async fn get_profile(&self, node_id: &NodeId) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_profile(node_id) } pub async fn my_profile(&self) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_profile(&self.node_id) } pub async fn has_profile(&self) -> anyhow::Result { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; Ok(storage.get_profile(&self.node_id)?.is_some()) } pub async fn get_display_name(&self, node_id: &NodeId) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_display_name(node_id) } @@ -1111,7 +1111,7 @@ impl Node { pub async fn get_blob(&self, cid: &[u8; 32]) -> anyhow::Result>> { let data = self.blob_store.get(cid)?; if data.is_some() { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.touch_blob_access(cid); } Ok(data) @@ -1170,7 +1170,7 @@ impl Node { // Single lock acquisition for all DB reads let (post, visibility, group_seeds) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.touch_blob_access(cid); match storage.get_post_with_visibility(post_id)? { Some((post, vis)) => { @@ -1199,7 +1199,7 @@ impl Node { pub async fn prefetch_blobs_from_peer(&self, peer_id: &NodeId) { // Brief lock: get post IDs and their attachment info let posts_with_atts: Vec<(PostId, NodeId, Vec)> = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let post_ids = storage.list_post_ids().unwrap_or_default(); let mut result = Vec::new(); for pid in post_ids { @@ -1279,7 +1279,7 @@ impl Node { if let Some(ref data) = data { // Store blob locally self.blob_store.store(cid, data)?; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.record_blob(cid, post_id, author, data.len() as u64, mime_type, created_at)?; // Store AuthorManifest if provided (extract from CdnManifest wrapper) @@ -1319,7 +1319,7 @@ impl Node { ) -> anyhow::Result>> { // 1. Check local if let Some(data) = self.blob_store.get(cid)? { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.touch_blob_access(cid); return Ok(Some(data)); } @@ -1329,7 +1329,7 @@ impl Node { // 2. Try existing upstream (if we previously fetched this blob) let upstream = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_blob_upstream(cid)? }; if let Some((upstream_nid, _upstream_addrs)) = upstream { @@ -1343,7 +1343,7 @@ impl Node { // 3. Lateral N0-N2: mesh peers + N2 peers who have the author's posts // (sorted by get_lateral_blob_sources: non-anchors first) let lateral_sources = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_lateral_blob_sources(author, post_id).unwrap_or_default() }; for lateral in lateral_sources { @@ -1353,7 +1353,7 @@ impl Node { match self.network.fetch_blob_full(cid, &lateral).await { Ok((Some(data), response)) => { self.blob_store.store(cid, &data)?; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.record_blob(cid, post_id, author, data.len() as u64, mime_type, created_at)?; if let Some(ref cdn_manifest) = response.manifest { if crypto::verify_manifest_signature(&cdn_manifest.author_manifest) { @@ -1373,7 +1373,7 @@ impl Node { // 4. Try replica peers (before author — replicas are often closer/cheaper) let replicas = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_replica_peers(post_id, 3_600_000)? }; for replica in replicas { @@ -1410,7 +1410,7 @@ impl Node { // ---- Circles ---- pub async fn create_circle(&self, name: String) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.create_circle(&name)?; drop(storage); self.create_group_key_for_circle(&name).await?; @@ -1418,7 +1418,7 @@ impl Node { } pub async fn delete_circle(&self, name: String) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // Delete group key and associated data if let Ok(Some(gk)) = storage.get_group_key_by_circle(&name) { let _ = storage.delete_group_key(&gk.group_id); @@ -1428,13 +1428,13 @@ impl Node { pub async fn add_to_circle(&self, circle_name: String, node_id: NodeId) -> anyhow::Result<()> { { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.add_circle_member(&circle_name, &node_id)?; } // Wrap current group key for new member and distribute let distribute_payload = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; if let Ok(Some(gk)) = storage.get_group_key_by_circle(&circle_name) { if gk.admin == self.node_id { if let Ok(Some(seed)) = storage.get_group_seed(&gk.group_id, gk.epoch) { @@ -1478,7 +1478,7 @@ impl Node { node_id: NodeId, ) -> anyhow::Result<()> { { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.remove_circle_member(&circle_name, &node_id)?; } @@ -1505,7 +1505,7 @@ impl Node { created_at: now, }; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.create_group_key(&record, Some(&seed))?; storage.store_group_seed(&group_id, 1, &seed)?; @@ -1534,7 +1534,7 @@ impl Node { wrapped_group_key: wrapped, }; { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.store_group_member_key(&group_id, &mk); } let payload = crate::protocol::GroupKeyDistributePayload { @@ -1560,7 +1560,7 @@ impl Node { /// Rotate the group key for a circle (called on member removal). async fn rotate_group_key(&self, circle_name: &str) { let rotate_result = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let gk = match storage.get_group_key_by_circle(circle_name) { Ok(Some(gk)) if gk.admin == self.node_id => gk, _ => return, @@ -1588,7 +1588,7 @@ impl Node { if let Some((group_id, new_seed, new_pubkey, new_epoch, member_keys, circle_name)) = rotate_result { // Update storage { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.update_group_epoch(&group_id, new_epoch, &new_pubkey, Some(&new_seed)); let _ = storage.store_group_seed(&group_id, new_epoch, &new_seed); for mk in &member_keys { @@ -1617,7 +1617,7 @@ impl Node { } pub async fn list_circles(&self) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.list_circles() } @@ -1646,7 +1646,7 @@ impl Node { // Get group key for this circle let (encrypted_payload, wrapped_cek, group_id, epoch) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // Verify circle exists let circles = storage.list_circles()?; if !circles.iter().any(|c| c.name == circle_name) { @@ -1707,7 +1707,7 @@ impl Node { .as_millis() as u64; let payload = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let gk = storage.get_group_key_by_circle(&circle_name)? .ok_or_else(|| anyhow::anyhow!("no group key for circle '{}'", circle_name))?; let seed = storage.get_group_seed(&gk.group_id, gk.epoch)? @@ -1741,7 +1741,7 @@ impl Node { let recent_peers = self.current_recent_peers().await; let profile = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let existing = storage.get_profile(&self.node_id)?; let (display_name, bio, avatar_cid) = match existing { Some(p) => (p.display_name, p.bio, p.avatar_cid), @@ -1775,7 +1775,7 @@ impl Node { &self, author: &NodeId, ) -> anyhow::Result<(String, String, Option<[u8; 32]>)> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.resolve_display_for_peer(author, &self.node_id) } @@ -1784,13 +1784,13 @@ impl Node { &self, circle_name: &str, ) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_circle_profile(&self.node_id, circle_name) } /// Get the public_visible setting for our own profile. pub async fn get_public_visible(&self) -> anyhow::Result { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; Ok(storage .get_profile(&self.node_id)? .map(|p| p.public_visible) @@ -1801,13 +1801,13 @@ impl Node { /// Get a setting value by key. pub async fn get_setting(&self, key: &str) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_setting(key) } /// Set a setting value (upsert). pub async fn set_setting(&self, key: &str, value: &str) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.set_setting(key, value) } @@ -1816,7 +1816,7 @@ impl Node { /// Get cache statistics: (used_bytes, max_bytes, blob_count). /// max_bytes comes from the `cache_size_bytes` setting (default 1 GB, 0 = unlimited). pub async fn get_cache_stats(&self) -> anyhow::Result<(u64, u64, u64)> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let used = storage.total_blob_bytes()?; let count = storage.count_blobs()?; let max_str = storage.get_setting("cache_size_bytes")?.unwrap_or_default(); @@ -1836,7 +1836,7 @@ impl Node { let staleness_ms = 3600 * 1000; let (candidates, follows, audience_members) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let candidates = storage.get_eviction_candidates(staleness_ms)?; let follows = storage.list_follows().unwrap_or_default(); let audience = storage.list_audience_members().unwrap_or_default(); @@ -1882,25 +1882,25 @@ impl Node { /// Get seen engagement counts for a post. pub async fn get_seen_engagement(&self, post_id: &PostId) -> anyhow::Result<(u32, u32)> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_seen_engagement(post_id) } /// Mark a post's engagement as seen (upsert). pub async fn set_seen_engagement(&self, post_id: &PostId, react_count: u32, comment_count: u32) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.set_seen_engagement(post_id, react_count, comment_count) } /// Get last-read timestamp for a conversation partner. pub async fn get_last_read_message(&self, partner_id: &NodeId) -> anyhow::Result { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_last_read_message(partner_id) } /// Mark a conversation as read up to the given timestamp. pub async fn set_last_read_message(&self, partner_id: &NodeId, timestamp_ms: u64) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.set_last_read_message(partner_id, timestamp_ms) } @@ -1908,7 +1908,7 @@ impl Node { pub async fn delete_post(&self, post_id: &PostId) -> anyhow::Result<()> { let post = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage .get_post(post_id)? .ok_or_else(|| anyhow::anyhow!("post not found"))? @@ -1931,7 +1931,7 @@ impl Node { // Collect blob CIDs + CDN peers before cleanup let blob_cdn_info: Vec<([u8; 32], Vec<(NodeId, Vec)>, Option<(NodeId, Vec)>)> = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let cids = storage.get_blobs_for_post(post_id).unwrap_or_default(); cids.into_iter().map(|cid| { let downstream = storage.get_blob_downstream(&cid).unwrap_or_default(); @@ -1942,7 +1942,7 @@ impl Node { // Clean up blobs (DB metadata + CDN metadata + filesystem) let blob_cids = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let cids = storage.delete_blobs_for_post(post_id)?; for cid in &cids { let _ = storage.cleanup_cdn_for_blob(cid); @@ -1956,7 +1956,7 @@ impl Node { } { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.store_delete(&record)?; storage.apply_delete(&record)?; } @@ -1978,7 +1978,7 @@ impl Node { mode: RevocationMode, ) -> anyhow::Result> { let (post, visibility) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage .get_post_with_visibility(post_id)? .ok_or_else(|| anyhow::anyhow!("post not found"))? @@ -2018,7 +2018,7 @@ impl Node { recipients: new_wrapped, }; { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.update_post_visibility(post_id, &new_vis)?; } @@ -2052,7 +2052,7 @@ impl Node { let new_post_id = compute_post_id(&new_post); { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.store_post_with_visibility(&new_post_id, &new_post, &new_vis)?; } @@ -2079,7 +2079,7 @@ impl Node { mode: RevocationMode, ) -> anyhow::Result { let posts = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.find_posts_by_circle_intent(circle_name, &self.node_id)? }; @@ -2106,7 +2106,7 @@ impl Node { } pub async fn get_redundancy_summary(&self) -> anyhow::Result<(usize, usize, usize, usize)> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_redundancy_summary(&self.node_id, 3_600_000) } @@ -2139,7 +2139,7 @@ impl Node { // Step 0: Try social route cache (skipped for known-unreachable peers) if !skip_direct { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; if let Some(route) = storage.get_social_route(&peer_id)? { // Try cached addresses directly for addr in &route.addresses { @@ -2262,7 +2262,7 @@ impl Node { // Step 6: Relay introduction — find relay peer(s) and request introduction { let on_cooldown = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.is_relay_cooldown(&peer_id, 300_000).unwrap_or(false) }; @@ -2304,7 +2304,7 @@ impl Node { // Try direct connection to target's addresses (hole punch with scanning) let our_profile = self.network.conn_handle().our_nat_profile().await; let peer_profile = { - let s = self.storage.lock().await; + let s = self.storage.get().await; s.get_peer_nat_profile(&peer_id) }; if let Some(conn) = crate::connection::hole_punch_with_scanning( @@ -2375,7 +2375,7 @@ impl Node { // Record cooldown on failure (skip if all rejections were capacity-related) if !relay_candidates.is_empty() && !had_capacity_reject { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let _ = storage.record_relay_miss(&peer_id); } } @@ -2462,18 +2462,18 @@ impl Node { } pub async fn add_peer(&self, peer_id: NodeId) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.add_peer(&peer_id)?; Ok(()) } pub async fn list_peers(&self) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.list_peers() } pub async fn list_peer_records(&self) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.list_peer_records() } @@ -2487,7 +2487,7 @@ impl Node { } pub async fn stats(&self) -> anyhow::Result { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; Ok(NodeStats { post_count: storage.post_count()?, peer_count: storage.list_peers()?.len(), @@ -2526,7 +2526,7 @@ impl Node { // Tiered: only pull for stale authors (4-hour default) let stale_authors = { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; storage.get_stale_follows(4 * 3600 * 1000).unwrap_or_default() }; @@ -2652,7 +2652,7 @@ impl Node { // Gather anchors: known_anchors table, then anchor peers fallback let anchors: Vec<(crate::types::NodeId, Vec)> = { - let s = storage.lock().await; + let s = storage.get().await; let known = s.list_known_anchors().unwrap_or_default(); if !known.is_empty() { known @@ -2742,7 +2742,7 @@ impl Node { loop { interval.tick().await; let stale = { - let s = storage.lock().await; + let s = storage.get().await; s.list_stale_social_routes(interval_secs as u64 * 1000).unwrap_or_default() }; for route in stale { @@ -2753,7 +2753,7 @@ impl Node { ).await; match result { Ok(reply) => { - let s = storage.lock().await; + let s = storage.get().await; let addrs: Vec = reply.addresses.iter() .filter_map(|a| a.parse().ok()).collect(); let _ = s.touch_social_route_connect( @@ -2855,7 +2855,7 @@ impl Node { if conn_count < 10 { log_evt(ActivityLevel::Info, ActivityCategory::Anchor, format!("Low connections ({}), requesting referrals", conn_count), None); let known = { - let s = storage.lock().await; + let s = storage.get().await; s.list_known_anchors().unwrap_or_default() }; for (anchor_nid, anchor_addrs) in known { @@ -2965,7 +2965,7 @@ impl Node { if connected { true } else { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; let in_n2 = storage.find_in_n2(&bootstrap_nid).unwrap_or_default(); if !in_n2.is_empty() { true @@ -3029,13 +3029,13 @@ impl Node { .unwrap_or_default() .as_millis() as u64 - max_age_ms; let stale = { - let s = storage.lock().await; + let s = storage.get().await; s.get_stale_manifests(cutoff).unwrap_or_default() }; for (cid, upstream_nid, _upstream_addrs) in &stale { // Get current updated_at for this manifest let current_updated_at = { - let s = storage.lock().await; + let s = storage.get().await; s.get_cdn_manifest(cid).ok().flatten() .and_then(|json| serde_json::from_str::(&json).ok()) .map(|m| m.updated_at) @@ -3045,7 +3045,7 @@ impl Node { Ok(Some(cdn_manifest)) => { if crypto::verify_manifest_signature(&cdn_manifest.author_manifest) { let author_json = serde_json::to_string(&cdn_manifest.author_manifest).unwrap_or_default(); - let s = storage.lock().await; + let s = storage.get().await; let _ = s.store_cdn_manifest( cid, &author_json, @@ -3082,7 +3082,7 @@ impl Node { /// Build our N+10:Addresses (our connected peers with their addresses). pub async fn build_peer_addresses(&self) -> Vec { let conns = self.network.connection_info().await; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let mut result = Vec::new(); for (nid, kind, _) in conns { if nid == self.node_id { @@ -3110,7 +3110,7 @@ impl Node { /// List all social routes (for CLI/Tauri display). pub async fn list_social_routes(&self) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.list_social_routes() } @@ -3118,7 +3118,7 @@ impl Node { pub async fn request_audience(&self, node_id: &NodeId) -> anyhow::Result<()> { { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.store_audience(node_id, AudienceDirection::Outbound, AudienceStatus::Pending)?; } @@ -3134,7 +3134,7 @@ impl Node { pub async fn approve_audience(&self, node_id: &NodeId) -> anyhow::Result<()> { let connected = self.network.is_connected(node_id).await; { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.store_audience(node_id, AudienceDirection::Inbound, AudienceStatus::Approved)?; // Upsert social route (Audience or Mutual) @@ -3169,13 +3169,13 @@ impl Node { } pub async fn deny_audience(&self, node_id: &NodeId) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.store_audience(node_id, AudienceDirection::Inbound, AudienceStatus::Denied)?; Ok(()) } pub async fn remove_audience(&self, node_id: &NodeId) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.remove_audience(node_id, AudienceDirection::Inbound)?; // Downgrade or remove social route @@ -3193,7 +3193,7 @@ impl Node { } pub async fn list_audience_members(&self) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.list_audience_members() } @@ -3202,7 +3202,7 @@ impl Node { direction: AudienceDirection, status: Option, ) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.list_audience(direction, status) } @@ -3223,7 +3223,7 @@ impl Node { pub async fn delete_blob_with_cdn_notify(&self, cid: &[u8; 32]) -> anyhow::Result<()> { // Gather CDN peers before cleanup let (downstream, upstream) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let ds = storage.get_blob_downstream(cid).unwrap_or_default(); let up = storage.get_blob_upstream(cid).ok().flatten(); (ds, up) @@ -3234,7 +3234,7 @@ impl Node { // Clean up local storage { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.cleanup_cdn_for_blob(cid)?; storage.remove_blob(cid)?; } @@ -3248,7 +3248,7 @@ impl Node { /// Evict lowest-priority blobs until total storage is under max_bytes. pub async fn evict_blobs(&self, max_bytes: u64) -> anyhow::Result { let total = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.total_blob_bytes()? }; @@ -3264,7 +3264,7 @@ impl Node { let staleness_ms = 3600 * 1000; let (candidates, follows, audience_members) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let candidates = storage.get_eviction_candidates(staleness_ms)?; let follows = storage.list_follows().unwrap_or_default(); let audience = storage.list_audience_members().unwrap_or_default(); @@ -3446,7 +3446,7 @@ impl Node { pub async fn generate_share_link(&self, post_id: &PostId) -> anyhow::Result> { // Look up the post to verify it's public and get the author let (post, visibility) = { - let store = self.storage.lock().await; + let store = self.storage.get().await; match store.get_post_with_visibility(post_id)? { Some(pv) => pv, None => return Ok(None), @@ -3478,7 +3478,7 @@ impl Node { // For private reactions, look up the post author and encrypt let encrypted_payload = if private { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let post = storage.get_post(&post_id)? .ok_or_else(|| anyhow::anyhow!("post not found"))?; drop(storage); @@ -3505,7 +3505,7 @@ impl Node { }; // Store locally - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.store_reaction(&reaction)?; drop(storage); @@ -3521,7 +3521,7 @@ impl Node { network.propagate_engagement_diff(&post_id, &diff, &our_node_id).await; // Also send to all upstreams (toward author) — Phase 6 multi-upstream let upstreams = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_post_upstreams(&post_id).unwrap_or_default() }; for (up, _prio) in upstreams { @@ -3535,7 +3535,7 @@ impl Node { /// Remove a reaction from a post. pub async fn remove_reaction(&self, post_id: PostId, emoji: String) -> anyhow::Result<()> { let our_node_id = self.node_id; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.remove_reaction(&our_node_id, &post_id, &emoji)?; drop(storage); @@ -3563,7 +3563,7 @@ impl Node { /// Get all reactions for a post. Decrypts private reactions if we're the post author. pub async fn get_reactions(&self, post_id: PostId) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let reactions = storage.get_reactions(&post_id)?; let post_info = storage.get_post(&post_id)?; drop(storage); @@ -3591,7 +3591,7 @@ impl Node { /// Get reaction counts grouped by emoji for a post. pub async fn get_reaction_counts(&self, post_id: PostId) -> anyhow::Result> { let our_node_id = self.node_id; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let counts = storage.get_reaction_counts(&post_id, &our_node_id)?; Ok(counts) } @@ -3619,7 +3619,7 @@ impl Node { deleted_at: None, }; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.store_comment(&comment)?; drop(storage); @@ -3635,7 +3635,7 @@ impl Node { network.propagate_engagement_diff(&post_id, &diff, &our_node_id).await; // Also send to all upstreams (toward author) — Phase 6 multi-upstream let upstreams = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_post_upstreams(&post_id).unwrap_or_default() }; for (up, _prio) in upstreams { @@ -3658,7 +3658,7 @@ impl Node { .duration_since(std::time::UNIX_EPOCH)? .as_millis() as u64; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.edit_comment(&our_node_id, &post_id, timestamp_ms, &new_content)?; drop(storage); @@ -3679,7 +3679,7 @@ impl Node { network.propagate_engagement_diff(&post_id, &diff, &our_node_id).await; // Phase 6: send to all upstreams let upstreams = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_post_upstreams(&post_id).unwrap_or_default() }; for (up, _prio) in upstreams { @@ -3700,7 +3700,7 @@ impl Node { .duration_since(std::time::UNIX_EPOCH)? .as_millis() as u64; - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.delete_comment(&our_node_id, &post_id, timestamp_ms)?; drop(storage); @@ -3720,7 +3720,7 @@ impl Node { network.propagate_engagement_diff(&post_id, &diff, &our_node_id).await; // Phase 6: send to all upstreams let upstreams = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_post_upstreams(&post_id).unwrap_or_default() }; for (up, _prio) in upstreams { @@ -3732,7 +3732,7 @@ impl Node { /// Get all comments for a post. pub async fn get_comments(&self, post_id: PostId) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let comments = storage.get_comments(&post_id)?; Ok(comments) } @@ -3743,7 +3743,7 @@ impl Node { post_id: PostId, policy: crate::types::CommentPolicy, ) -> anyhow::Result<()> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.set_comment_policy(&post_id, &policy)?; drop(storage); @@ -3768,14 +3768,14 @@ impl Node { /// Get the comment policy for a post. pub async fn get_comment_policy(&self, post_id: PostId) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let policy = storage.get_comment_policy(&post_id)?; Ok(policy) } /// Get the full comment thread for a post (inline comments + split posts merged). pub async fn get_comment_thread(&self, post_id: PostId) -> anyhow::Result> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; // 1. Inline comments let mut comments = storage.get_comments(&post_id)?; @@ -3807,7 +3807,7 @@ impl Node { &self, post_id: &PostId, ) -> anyhow::Result)>> { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let (post, visibility) = match storage.get_post_with_visibility(post_id)? { Some(pv) => pv, None => return Ok(None), @@ -3834,7 +3834,7 @@ impl Node { } } PostVisibility::GroupEncrypted { group_id, epoch, wrapped_cek } => { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let group_seeds = storage.get_all_group_seeds_map().unwrap_or_default(); let group_key_record = storage.get_group_key(group_id)?; let members = if let Some(ref gk) = group_key_record { @@ -3893,7 +3893,7 @@ impl Node { let encrypted = crypto::encrypt_slot(&plaintext, &slot_key)?; // Update the BlobHeader - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let header = storage.get_blob_header(&post_id)?; let mut blob_header = if let Some((json, _ts)) = header { serde_json::from_str::(&json) @@ -3947,7 +3947,7 @@ impl Node { self.network.propagate_engagement_diff(&post_id, &diff, &self.node_id).await; // Phase 6: send to all upstreams let upstreams = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_post_upstreams(&post_id).unwrap_or_default() }; for (up, _prio) in upstreams { @@ -3982,7 +3982,7 @@ impl Node { let encrypted = crypto::encrypt_slot(&plaintext, &slot_key)?; // Find first available comment slot or add new ones - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let header = storage.get_blob_header(&post_id)?; let mut blob_header = if let Some((json, _ts)) = header { serde_json::from_str::(&json) @@ -4067,7 +4067,7 @@ impl Node { self.network.propagate_engagement_diff(&post_id, &diff, &self.node_id).await; // Phase 6: send to all upstreams let upstreams = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; storage.get_post_upstreams(&post_id).unwrap_or_default() }; for (up, _prio) in upstreams { @@ -4086,7 +4086,7 @@ impl Node { .ok_or_else(|| anyhow::anyhow!("not a participant of this encrypted post"))?; let slot_key = crypto::derive_slot_key(&cek); - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let header = storage.get_blob_header(&post_id)?; drop(storage); @@ -4148,7 +4148,7 @@ impl Node { .ok_or_else(|| anyhow::anyhow!("not a participant of this encrypted post"))?; let slot_key = crypto::derive_slot_key(&cek); - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let header = storage.get_blob_header(&post_id)?; drop(storage); @@ -4296,7 +4296,7 @@ impl Node { // Single lock: get under-replicated posts AND peer roles/pressure let (under_replicated, suitable_peers) = { - let storage = self.storage.lock().await; + let storage = self.storage.get().await; let recent_ids = match storage.get_own_recent_post_ids(&self.node_id, since_ms) { Ok(ids) => ids, Err(e) => { diff --git a/crates/core/src/protocol.rs b/crates/core/src/protocol.rs index e5abb47..50e2565 100644 --- a/crates/core/src/protocol.rs +++ b/crates/core/src/protocol.rs @@ -287,7 +287,7 @@ pub struct RefuseRedirectPayload { } /// Worm lookup query (bi-stream) — searches for nodes, posts, or blobs -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct WormQueryPayload { pub worm_id: WormId, pub target: NodeId, diff --git a/crates/core/src/storage.rs b/crates/core/src/storage.rs index dbea1d4..98df0dc 100644 --- a/crates/core/src/storage.rs +++ b/crates/core/src/storage.rs @@ -30,6 +30,44 @@ pub struct Storage { conn: Connection, } +/// Pool of Storage connections for concurrent SQLite access in WAL mode. +/// Each connection is independently locked — readers don't block each other. +/// Uses tokio::sync::Mutex so guards are Send (safe across .await points). +pub struct StoragePool { + slots: Vec>, +} + +const STORAGE_POOL_SIZE: usize = 8; + +impl StoragePool { + /// Create a pool of Storage connections to the same database. + pub fn open(path: impl AsRef) -> anyhow::Result { + let mut slots = Vec::with_capacity(STORAGE_POOL_SIZE); + // First connection does schema init + migration + let first = Storage::open(path.as_ref())?; + slots.push(tokio::sync::Mutex::new(first)); + // Additional connections just open + WAL mode (schema already exists) + for _ in 1..STORAGE_POOL_SIZE { + let conn = Connection::open(path.as_ref())?; + conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA busy_timeout=5000;")?; + slots.push(tokio::sync::Mutex::new(Storage { conn })); + } + Ok(Self { slots }) + } + + /// Get an available Storage connection. Tries each slot with try_lock; + /// if all busy, awaits the first (rare under normal load). + pub async fn get(&self) -> tokio::sync::MutexGuard<'_, Storage> { + for slot in &self.slots { + if let Ok(guard) = slot.try_lock() { + return guard; + } + } + // All busy — await the first + self.slots[0].lock().await + } +} + /// Current schema version. Bump this when making schema or data changes /// that require migration. Old databases with a lower version will be migrated. /// If the gap is too large (major version mismatch), the DB is reset instead. diff --git a/crates/core/src/web.rs b/crates/core/src/web.rs index 4fc5b52..b9fa902 100644 --- a/crates/core/src/web.rs +++ b/crates/core/src/web.rs @@ -126,7 +126,7 @@ async fn serve_post(stream: &mut TcpStream, path: &str, node: &Arc, browse // Single lock: gather holders, local post, AND author name if local let (holders, local_post, local_author_name) = { - let store = node.storage.lock().await; + let store = node.storage.get().await; let mut holders = Vec::new(); if let Some(author) = author_id { @@ -190,7 +190,7 @@ async fn serve_post(stream: &mut TcpStream, path: &str, node: &Arc, browse Ok(Ok(Some(sync_post))) => { // Single lock: store post AND get author name let author_name = { - let store = node.storage.lock().await; + let store = node.storage.get().await; let _ = store.store_post_with_visibility( &sync_post.id, &sync_post.post, &sync_post.visibility, ); @@ -230,7 +230,7 @@ async fn try_redirect( use crate::types::NatMapping; let post_hex = hex::encode(post_id); - let store = node.storage.lock().await; + let store = node.storage.get().await; // Classify holders into tiers let mut direct_candidates: Vec<(NodeId, String)> = Vec::new(); // http_addr known @@ -354,7 +354,7 @@ async fn serve_blob(stream: &mut TcpStream, path: &str, node: &Arc) { // Check blobs table first, then scan post attachments (for posts stored via PostFetch // which don't populate the blobs table). let (mime_type, author_id) = { - let store = node.storage.lock().await; + let store = node.storage.get().await; // Try blobs table first if let Some(mime) = find_public_blob_mime(&store, &blob_id) { let author = store.get_blob_post_id(&blob_id).ok().flatten().and_then(|pid| { diff --git a/crates/tauri-app/Cargo.toml b/crates/tauri-app/Cargo.toml index 38291b4..567c027 100644 --- a/crates/tauri-app/Cargo.toml +++ b/crates/tauri-app/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "itsgoin-desktop" -version = "0.4.2" +version = "0.4.3" edition = "2021" [lib] diff --git a/crates/tauri-app/src/lib.rs b/crates/tauri-app/src/lib.rs index 1226695..21c246a 100644 --- a/crates/tauri-app/src/lib.rs +++ b/crates/tauri-app/src/lib.rs @@ -198,7 +198,7 @@ async fn post_to_dto( // Resolve intent kind from storage let intent_kind = { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; match storage.get_post_intent(id) { Ok(Some(intent)) => match intent { VisibilityIntent::Public => "public".to_string(), @@ -237,14 +237,14 @@ async fn post_to_dto( .collect(); // Engagement data let reaction_counts = { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; storage.get_reaction_counts(id, &node.node_id).unwrap_or_default() .into_iter() .map(|(emoji, count, reacted_by_me)| ReactionCountDto { emoji, count, reacted_by_me }) .collect() }; let comment_count = { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; storage.get_comment_count(id).unwrap_or(0) }; @@ -286,7 +286,7 @@ async fn decrypt_just_created( } PostVisibility::GroupEncrypted { group_id, epoch, wrapped_cek } => { let seed_info = { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; storage.get_all_group_seeds_map().ok() .and_then(|map| map.get(&(*group_id, *epoch)).copied()) }; @@ -319,7 +319,7 @@ async fn get_node_info(state: State<'_, AppState>) -> Result::try_from(pid_bytes.as_slice()) { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; if let Ok(Some((_post, vis))) = storage.get_post_with_visibility(&post_id) { if !matches!(vis, PostVisibility::Public) { return Ok(None); @@ -524,7 +524,7 @@ async fn resolve_blob_data( // Try fetching from network if post_id provided if let Some(pid) = post_id { let post = { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; storage.get_post(&pid).map_err(|e| e.to_string())? }; if let Some(post) = post { @@ -665,7 +665,7 @@ async fn connect_peer( // Store peer with addresses let ip_addrs: Vec<_> = addr.ip_addrs().copied().collect(); { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; if ip_addrs.is_empty() { storage.add_peer(&nid).map_err(|e| e.to_string())?; } else { @@ -720,7 +720,7 @@ async fn list_follows(state: State<'_, AppState>) -> Result, String _ => None, }; // Try to get peer record for address info - let storage = node.storage.lock().await; + let storage = node.storage.get().await; let rec = storage.get_peer_record(nid).ok().flatten(); drop(storage); let is_online = node.network.is_connected(nid).await @@ -766,7 +766,7 @@ async fn list_peers(state: State<'_, AppState>) -> Result, String> .map(|(nid, _, _)| nid) .collect(); let (social_ids, n2_ids, n3_ids) = { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; let social: std::collections::HashSet<_> = storage .list_social_routes() .unwrap_or_default() @@ -994,7 +994,7 @@ async fn set_anchors( #[tauri::command] async fn list_anchor_peers(state: State<'_, AppState>) -> Result, String> { let node = state.inner(); - let storage = node.storage.lock().await; + let storage = node.storage.get().await; let records = storage.list_anchor_peers().map_err(|e| e.to_string())?; drop(storage); let mut dtos = Vec::with_capacity(records.len()); @@ -1026,7 +1026,7 @@ struct KnownAnchorDto { #[tauri::command] async fn list_known_anchors(state: State<'_, AppState>) -> Result, String> { let node = state.inner(); - let storage = node.storage.lock().await; + let storage = node.storage.get().await; let anchors = storage.list_known_anchors().map_err(|e| e.to_string())?; drop(storage); let mut dtos = Vec::with_capacity(anchors.len()); @@ -1444,7 +1444,7 @@ async fn get_badge_counts( last_feed_view_ms: u64, ) -> Result { let node = state.inner(); - let storage = node.storage.lock().await; + let storage = node.storage.get().await; // Feed badge: count non-DM posts from others newer than last_feed_view_ms let feed_posts = storage.get_feed().map_err(|e| e.to_string())?; @@ -1588,7 +1588,7 @@ async fn get_network_summary(state: State<'_, AppState>) -> Result) -> Result // Try known_anchors table first (populated by anchor register cycle), // fall back to anchor peers from the peers table (is_anchor = true) let anchors: Vec<(NodeId, Vec)> = { - let storage = node.storage.lock().await; + let storage = node.storage.get().await; let known = storage.list_known_anchors().unwrap_or_default(); if !known.is_empty() { known @@ -2096,7 +2096,7 @@ pub fn run() { // Start blob eviction cycle (every 5 min) let cache_max_bytes: u64 = { - let storage = n.storage.lock().await; + let storage = n.storage.get().await; storage.get_setting("cache_size_bytes") .ok() .flatten() diff --git a/crates/tauri-app/tauri.conf.json b/crates/tauri-app/tauri.conf.json index 9d03507..101faf6 100644 --- a/crates/tauri-app/tauri.conf.json +++ b/crates/tauri-app/tauri.conf.json @@ -1,6 +1,6 @@ { "productName": "itsgoin", - "version": "0.4.2", + "version": "0.4.3", "identifier": "com.itsgoin.app", "build": { "frontendDist": "../../frontend", diff --git a/frontend/app.js b/frontend/app.js index 806acab..87eff6e 100644 --- a/frontend/app.js +++ b/frontend/app.js @@ -3007,22 +3007,20 @@ $('#circle-profiles-toggle').addEventListener('click', () => { // --- Notifications popover --- // Text size toggle -const TEXT_SIZE_SCALES = { small: '100%', normal: '150%', large: '200%' }; -// Apply text size immediately (default Normal = 150%) -document.documentElement.style.fontSize = '150%'; -(async () => { - const saved = await invoke('get_setting', { key: 'text_size' }).catch(() => null) || 'normal'; - document.documentElement.style.fontSize = TEXT_SIZE_SCALES[saved] || '150%'; - document.querySelectorAll('.text-size-opt').forEach(b => { - b.classList.toggle('active', b.dataset.size === saved); - }); -})(); +const TEXT_SIZE_SCALES = { xsmall: '75%', small: '100%', normal: '125%', large: '150%', xlarge: '200%' }; +// Apply text size immediately from localStorage cache (no async wait) +const _cachedTextSize = localStorage.getItem('text_size') || 'normal'; +document.documentElement.style.fontSize = TEXT_SIZE_SCALES[_cachedTextSize] || '125%'; +document.querySelectorAll('.text-size-opt').forEach(b => { + b.classList.toggle('active', b.dataset.size === _cachedTextSize); +}); document.querySelectorAll('.text-size-opt').forEach(btn => { btn.addEventListener('click', async () => { const size = btn.dataset.size; document.documentElement.style.fontSize = TEXT_SIZE_SCALES[size] || ''; document.querySelectorAll('.text-size-opt').forEach(b => b.classList.remove('active')); btn.classList.add('active'); + localStorage.setItem('text_size', size); await invoke('set_setting', { key: 'text_size', value: size }).catch(() => {}); toast('Text size updated'); }); diff --git a/frontend/index.html b/frontend/index.html index e170984..9cf898b 100644 --- a/frontend/index.html +++ b/frontend/index.html @@ -30,11 +30,11 @@
@@ -229,9 +229,11 @@

Text Size

- - - + + + + +
diff --git a/frontend/style.css b/frontend/style.css index ab018c6..8b2a718 100644 --- a/frontend/style.css +++ b/frontend/style.css @@ -81,13 +81,25 @@ header h1 { font-size: clamp(1.4rem, 2.5vw, 2rem); color: #7fdbca; margin: 0; } .compose-right { display: flex; align-items: center; gap: 0.5rem; flex-shrink: 0; } .compose-left { display: flex; flex-direction: column; gap: 0.25rem; min-width: 0; } -/* Tabs */ +/* Tabs — desktop (top bar) */ #tabs { display: flex; gap: 0; margin-bottom: 1rem; border-bottom: 1px solid #333; } .tab { background: none; border: none; color: #99a; padding: 0.5rem 0.6rem; cursor: pointer; border-bottom: 2px solid transparent; font-size: 0.82rem; transition: color 0.15s, border-color 0.15s; position: relative; flex: 1; text-align: center; white-space: nowrap; } .tab:hover { color: #ccd; } .tab.active { color: #7fdbca; border-bottom-color: #7fdbca; } +.tab-icon { display: none; } .tab-badge { display: inline-flex; align-items: center; justify-content: center; background: #0f3460; color: #7fdbca; font-size: 0.6rem; min-width: 1.1rem; height: 1.1rem; border-radius: 0.55rem; padding: 0 0.3rem; margin-left: 0.25rem; font-family: system-ui, sans-serif; vertical-align: middle; } +/* Tabs — mobile/tablet (bottom nav bar) */ +@media (max-width: 768px) { + #tabs { position: fixed; bottom: 0; left: 0; right: 0; z-index: 900; background: #0a0a1a; border-bottom: none; border-top: 1px solid #333; margin-bottom: 0; padding: 0; padding-bottom: env(safe-area-inset-bottom, 0); } + .tab { flex-direction: column; align-items: center; padding: 0.4rem 0.2rem 0.3rem; border-bottom: none; border-top: 2px solid transparent; font-size: 0.6rem; gap: 0.15rem; display: flex; } + .tab.active { border-bottom: none; border-top-color: #7fdbca; } + .tab-icon { display: block; font-size: 1.2rem; line-height: 1; } + .tab-badge { position: absolute; top: 0.1rem; right: 0.2rem; margin-left: 0; font-size: 0.5rem; min-width: 0.9rem; height: 0.9rem; border-radius: 0.45rem; } + main { padding-bottom: 4rem; } + .toast { bottom: 4.5rem; } +} + /* Views / tab content transitions */ .view { display: none; animation: viewFadeIn 0.2s ease-out; } .view.active { display: block; } diff --git a/website/design.html b/website/design.html index 8aec760..f46608a 100644 --- a/website/design.html +++ b/website/design.html @@ -44,7 +44,8 @@

This is the canonical technical reference for ItsGoin. It describes the vision, the architecture, and the current state of every subsystem — with full implementation detail. This document is versioned; each update records what changed.

Changelog -

v0.4.2 (2026-03-22): Welcome screen — startup shows “How’s it goin?” with staggered counters (connections, posts, messages, reacts, comments) while backend bootstraps. Status ticker — header ticker for new posts, messages, reactions, comments, connection changes. Notification improvements — Tauri plugin → Web Notification → notify-rust fallback chain, Linux native notifications. Responsive text scaling — Small/Normal/Large (100%/150%/200%), persisted via settings. Diagnostics popover — diagnostics moved from inline section to overlay, connections on-demand, timers removed. Share details lightbox with QR code. Connect string prefers external address (UPnP/public IPv6/observed). Stale N1 fix — disconnected social routes excluded from N1 share. Replication handler fix — actively fetches posts + blobs from requester after accepting replication. Hole punch fix — target-side registers publicly routable remote address for relay introduction. Replication semaphore (3 concurrent max). Peer labels show truncated node ID.

+

v0.4.3 (2026-03-22): Lock contention overhaul — all conn_mgr lock holds during network I/O eliminated. PostFetch, TcpPunch, PullFromPeer, FetchEngagement, ResolveAddress, AnchorProbe, WormLookup, ContentSearch now use brief locks for data gathering only. Bi-stream handlers (BlobRequest, WormQuery, RelayIntroduce, PostFetchRequest, ManifestRefresh) fully lock-free for I/O. ConnectionActor hoists shared Arcs (storage, blob_store, endpoint) for lock-free access. ResolveAddress adds 5s per-query timeout (was unbounded). Worm cascade uses connection snapshots. Initial exchange failure now aborts mesh upgrade (was silently continuing). connect_to_peer/connect_to_anchor use 15s timeout. StoragePool — 8 concurrent SQLite connections in WAL mode replace single Mutex<Storage>. Reads run fully parallel; writes serialize only at SQLite level. Bottom nav bar for mobile/tablet (≤768px) with icon tabs. Text sizes: XS 75%, S 100%, M 125% (default), L 150%, XL 200%. Text size persisted to localStorage for instant restore.

+

v0.4.2 (2026-03-22): Welcome screen — startup shows “How’s it goin?” with staggered counters (connections, posts, messages, reacts, comments) while backend bootstraps. Status ticker — header ticker for new posts, messages, reactions, comments, connection changes. Notification improvements — Tauri plugin → Web Notification → notify-rust fallback chain, Linux native notifications. Responsive text scaling — Small/Normal/Large (100%/150%/200%), persisted via settings. Diagnostics popover — diagnostics moved from inline section to overlay, connections on-demand, timers removed. Share details lightbox with QR code. Connect string prefers external address (UPnP/public IPv6/observed). Stale N1 fix — disconnected social routes excluded from N1 share. Replication handler fix — actively fetches posts + blobs from requester after accepting replication. Hole punch fix — target-side registers publicly routable remote address for relay introduction. Replication semaphore (3 concurrent max). Peer labels show truncated node ID.

v0.4.1 (2026-03-21): Security hardening — reaction signatures (ed25519), comment signature verification on receipt, reaction removal authorization, BlobHeader author verification. Lock contention fixes — ManifestPush discovery (cm lock released during I/O), pull request handler (filter without lock), pull sender (split into brief locks), engagement checker (batch writes per chunk). Data cleanup — post deletion cleans downstream/upstream/seen tables.

v0.4.0 (2026-03-21): Protocol v4 — header-driven sync. ManifestPush as primary post notification. Slim PullSyncRequest (per-author timestamps, not full post ID list). Tiered engagement checks (5min/1hr/4hr/24hr by content age). Multi-upstream (3 max) with fallback chain. Auto-prefetch followed authors <90d. Self Last Encounter per-author tracking. Encrypted-but-not-for-us CDN caching. Serial engagement polling. ~90% bandwidth reduction for established nodes.

v0.3.6 (2026-03-20): Active CDN replication — all devices proactively replicate recent posts to peers (desktops > anchors > phones priority). ReplicationRequest/Response (0xE1/0xE2). Device roles (Intermittent/Available/Persistent) advertised in InitialExchange. Bandwidth budgets: replication (pull to cache) + delivery (serve requests), hourly auto-reset, phones 100MB/1GB, desktops 200MB/2GB, anchors 200MB/1GB. Cache management: 1GB default, configurable, eviction cycle activated with share-link priority boost. Engagement distribution fix — BlobHeader JSON rebuilt after diff ops. Tombstone system — deleted reactions/comments tombstoned, propagate via pull sync. Persistent notifications via seen_engagement/seen_messages tables. DOS hardening: fan-out cap (10), prefetch cap (20), downstream registration cap (50), delivery budget enforcement. Pull preference reordered: non-anchors first. Network indicator — header dot (black/red/yellow/green) + capability labels. Tab badges — contextual counts (new posts, engagement, online, unread). Message read tracking on open/close/send. Stats bar removed.

diff --git a/website/download.html b/website/download.html index 9c703fb..e7269a2 100644 --- a/website/download.html +++ b/website/download.html @@ -25,16 +25,16 @@

Download ItsGoin

Available for Android and Linux. Free and open source.

-

Version 0.4.2 — March 22, 2026

+

Version 0.4.3 — March 22, 2026

@@ -46,7 +46,7 @@

Android

  1. Download the APK — Tap the button above. Your browser may warn that this type of file can be harmful — tap Download anyway.
  2. -
  3. Open the file — When the download finishes, tap the notification or find itsgoin-0.4.2.apk in your Downloads folder and tap it.
  4. +
  5. Open the file — When the download finishes, tap the notification or find itsgoin-0.4.3.apk in your Downloads folder and tap it.
  6. Allow installation — Android will ask you to allow installs from this source. Tap Settings, toggle "Allow from this source", then go back and tap Install.
  7. Launch the app — Once installed, tap Open or find ItsGoin in your app drawer.
@@ -59,8 +59,8 @@

Linux (AppImage)

  1. Download the AppImage — Click the button above to download.
  2. -
  3. Make it executable — Open a terminal and run:
    chmod +x itsgoin_0.4.2_amd64.AppImage
  4. -
  5. Run it — Double-click the file, or from the terminal:
    ./itsgoin_0.4.2_amd64.AppImage
  6. +
  7. Make it executable — Open a terminal and run:
    chmod +x itsgoin_0.4.3_amd64.AppImage
  8. +
  9. Run it — Double-click the file, or from the terminal:
    ./itsgoin_0.4.3_amd64.AppImage
Note: If it doesn't launch, you may need to install FUSE:
sudo apt install libfuse2 (Debian/Ubuntu) or sudo dnf install fuse (Fedora). @@ -71,6 +71,17 @@

Changelog

+
v0.4.3 — March 22, 2026
+
    +
  • Lock contention overhaul — All conn_mgr lock holds during network I/O eliminated across 14 handlers. Brief locks for data gathering only; all network operations run lock-free.
  • +
  • StoragePool — 8 concurrent SQLite connections in WAL mode replace the single Mutex. Reads run fully parallel; writes serialize only at the SQLite level.
  • +
  • Initial exchange fix — Failed initial exchanges now abort the mesh upgrade instead of silently continuing with a broken connection.
  • +
  • Connect timeout — connect_to_peer and connect_to_anchor now use a consistent 15s timeout. ResolveAddress adds 5s per-query timeout (was unbounded).
  • +
  • Worm cascade unlock — WormLookup, ContentSearch, and WormQuery use connection snapshots for lock-free fan-out.
  • +
  • Bottom nav bar — Mobile/tablet (≤768px) gets a fixed bottom navigation bar with icon tabs. Desktop keeps the top tab bar.
  • +
  • Text size update — Five options: XS (75%), S (100%), M (125% default), L (150%), XL (200%). Persisted to localStorage for instant restore on startup.
  • +
+
v0.4.2 — March 22, 2026
  • Welcome screen — Startup shows “How’s it goin?” with staggered counters (connections, posts, messages, reacts, comments) while the backend bootstraps. Replaces the blank-screen wait.