use std::net::SocketAddr; use std::path::Path; use rusqlite::{params, Connection}; use crate::types::{ Attachment, Circle, CircleProfile, CommentPolicy, DeleteRecord, FollowVisibility, GossipPeerInfo, GroupEpoch, GroupId, GroupKeyRecord, GroupMemberKey, InlineComment, ManifestEntry, NodeId, PeerRecord, PeerSlotKind, PeerWithAddress, Post, PostId, PostVisibility, PostingIdentity, PublicProfile, Reaction, ReachMethod, SocialRelation, SocialRouteEntry, SocialStatus, ThreadMeta, VisibilityIntent, }; /// Direction for file_holders entries: whether we sent the file to this peer, /// received it from them, or both. Not load-bearing for propagation decisions — /// any holder can serve as a diff target — but retained for potential reuse. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum HolderDirection { Sent, Received, Both, } impl HolderDirection { pub fn as_str(&self) -> &'static str { match self { HolderDirection::Sent => "sent", HolderDirection::Received => "received", HolderDirection::Both => "both", } } } /// Blob metadata for eviction scoring. pub struct EvictionCandidate { pub cid: [u8; 32], pub post_id: PostId, pub author: NodeId, pub size_bytes: u64, pub created_at: u64, pub last_accessed_at: u64, pub pinned: bool, pub peer_copies: u32, /// Number of downstream CDN peers — proxy for share-link popularity. pub downstream_count: u32, } pub struct Storage { conn: Connection, } /// Pool of Storage connections for concurrent SQLite access in WAL mode. /// Each connection is independently locked — readers don't block each other. /// Uses tokio::sync::Mutex so guards are Send (safe across .await points). pub struct StoragePool { slots: Vec>, } /// Pool size: 4 connections balances parallelism with resource constraints (Android fd limits). const STORAGE_POOL_SIZE: usize = 4; impl StoragePool { /// Create a pool of Storage connections to the same database. pub fn open(path: impl AsRef) -> anyhow::Result { let mut slots = Vec::with_capacity(STORAGE_POOL_SIZE); // First connection does schema init + migration let first = Storage::open(path.as_ref())?; slots.push(tokio::sync::Mutex::new(first)); // Additional connections just open + WAL mode (schema already exists) for _ in 1..STORAGE_POOL_SIZE { let conn = Connection::open(path.as_ref())?; conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA busy_timeout=5000;")?; slots.push(tokio::sync::Mutex::new(Storage { conn })); } Ok(Self { slots }) } /// Get an available Storage connection. Tries each slot with try_lock; /// if all busy, awaits the first (rare under normal load). pub async fn get(&self) -> tokio::sync::MutexGuard<'_, Storage> { for slot in &self.slots { if let Ok(guard) = slot.try_lock() { return guard; } } // All busy — await the first self.slots[0].lock().await } } /// Current schema version. Bump this when making schema or data changes /// that require migration. Old databases with a lower version will be migrated. /// If the gap is too large (major version mismatch), the DB is reset instead. const SCHEMA_VERSION: u32 = 2; /// Minimum schema version we can migrate from. Anything older gets a full reset. const MIN_MIGRATABLE_VERSION: u32 = 1; impl Storage { pub fn open(path: impl AsRef) -> anyhow::Result { let conn = Connection::open(path.as_ref())?; conn.execute_batch("PRAGMA journal_mode=WAL;")?; // Check schema version let db_version: u32 = conn.pragma_query_value(None, "user_version", |row| row.get(0))?; if db_version > 0 && db_version < MIN_MIGRATABLE_VERSION { // Too old to migrate — reset the database tracing::warn!( db_version, current = SCHEMA_VERSION, "Database schema too old to migrate, resetting" ); drop(conn); std::fs::remove_file(path.as_ref())?; let conn = Connection::open(path.as_ref())?; conn.execute_batch("PRAGMA journal_mode=WAL;")?; let storage = Self { conn }; storage.init_tables()?; storage.set_schema_version(SCHEMA_VERSION)?; return Ok(storage); } let storage = Self { conn }; storage.init_tables()?; storage.migrate()?; if db_version < SCHEMA_VERSION { // Run version-specific data migrations storage.migrate_data(db_version)?; storage.set_schema_version(SCHEMA_VERSION)?; tracing::info!(from = db_version, to = SCHEMA_VERSION, "Database schema upgraded"); } Ok(storage) } fn set_schema_version(&self, version: u32) -> anyhow::Result<()> { self.conn.pragma_update(None, "user_version", version)?; Ok(()) } /// Data migrations that run once when upgrading between schema versions. fn migrate_data(&self, from_version: u32) -> anyhow::Result<()> { if from_version < 2 { // v1 → v2: Clear stale N2/N3 entries and mesh_peers that may prevent // bootstrap reconnection. The node will rediscover peers on next startup. tracing::info!("Schema v2: clearing stale network state for fresh discovery"); let _ = self.conn.execute_batch( "DELETE FROM reachable_n2; DELETE FROM reachable_n3; DELETE FROM mesh_peers;" ); } Ok(()) } fn init_tables(&self) -> anyhow::Result<()> { self.conn.execute_batch( "CREATE TABLE IF NOT EXISTS posts ( id BLOB PRIMARY KEY, author BLOB NOT NULL, content TEXT NOT NULL, attachments TEXT NOT NULL DEFAULT '[]', timestamp_ms INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS peers ( node_id BLOB PRIMARY KEY, addresses TEXT NOT NULL DEFAULT '[]', last_seen INTEGER NOT NULL, introduced_by BLOB, is_anchor INTEGER NOT NULL DEFAULT 0, first_seen INTEGER NOT NULL DEFAULT 0 ); CREATE TABLE IF NOT EXISTS follows ( node_id BLOB PRIMARY KEY, visibility TEXT NOT NULL DEFAULT 'public' ); CREATE TABLE IF NOT EXISTS profiles ( node_id BLOB PRIMARY KEY, display_name TEXT NOT NULL, bio TEXT NOT NULL DEFAULT '', updated_at INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS circles ( name TEXT PRIMARY KEY, created_at INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS circle_members ( circle_name TEXT NOT NULL, node_id BLOB NOT NULL, added_at INTEGER NOT NULL, PRIMARY KEY (circle_name, node_id) ); CREATE TABLE IF NOT EXISTS deleted_posts ( post_id BLOB PRIMARY KEY, author BLOB NOT NULL, deleted_at INTEGER NOT NULL, signature BLOB NOT NULL ); CREATE TABLE IF NOT EXISTS post_replicas ( post_id BLOB NOT NULL, node_id BLOB NOT NULL, last_confirmed_ms INTEGER NOT NULL, PRIMARY KEY (post_id, node_id) ); CREATE TABLE IF NOT EXISTS peer_neighbors ( peer_id BLOB NOT NULL, neighbor_id BLOB NOT NULL, is_anchor INTEGER NOT NULL DEFAULT 0, reported_at INTEGER NOT NULL, PRIMARY KEY (peer_id, neighbor_id) ); CREATE INDEX IF NOT EXISTS idx_peer_neighbors_neighbor ON peer_neighbors(neighbor_id); -- v0.6.2: audience table removed. Upgraded DBs still have the -- orphan table; it's untouched by new code. New DBs don't get it. CREATE TABLE IF NOT EXISTS worm_cooldowns ( target_id BLOB PRIMARY KEY, failed_at INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS reachable_n2 ( reporter_node_id BLOB NOT NULL, reachable_node_id BLOB NOT NULL, updated_at INTEGER NOT NULL, PRIMARY KEY (reporter_node_id, reachable_node_id) ); CREATE INDEX IF NOT EXISTS idx_n2_reachable ON reachable_n2(reachable_node_id); CREATE TABLE IF NOT EXISTS reachable_n3 ( reporter_node_id BLOB NOT NULL, reachable_node_id BLOB NOT NULL, updated_at INTEGER NOT NULL, PRIMARY KEY (reporter_node_id, reachable_node_id) ); CREATE INDEX IF NOT EXISTS idx_n3_reachable ON reachable_n3(reachable_node_id); CREATE TABLE IF NOT EXISTS mesh_peers ( node_id BLOB NOT NULL PRIMARY KEY, slot_kind TEXT NOT NULL, priority INTEGER NOT NULL DEFAULT 0, connected_at INTEGER NOT NULL, last_diff_seq INTEGER NOT NULL DEFAULT 0 ); CREATE TABLE IF NOT EXISTS social_routes ( node_id BLOB NOT NULL PRIMARY KEY, addresses TEXT NOT NULL DEFAULT '[]', peer_addresses TEXT NOT NULL DEFAULT '[]', relation TEXT NOT NULL, status TEXT NOT NULL DEFAULT 'online', last_connected_ms INTEGER NOT NULL DEFAULT 0, last_seen_ms INTEGER NOT NULL DEFAULT 0, reach_method TEXT NOT NULL DEFAULT 'direct' ); CREATE TABLE IF NOT EXISTS reconnect_watchers ( target_node_id BLOB NOT NULL, watcher_node_id BLOB NOT NULL, added_at INTEGER NOT NULL, PRIMARY KEY (target_node_id, watcher_node_id) ); CREATE TABLE IF NOT EXISTS blobs ( cid BLOB PRIMARY KEY, post_id BLOB NOT NULL, author BLOB NOT NULL, size_bytes INTEGER NOT NULL, mime_type TEXT NOT NULL, created_at INTEGER NOT NULL, stored_at INTEGER NOT NULL, last_accessed_at INTEGER NOT NULL, pinned INTEGER NOT NULL DEFAULT 0 ); CREATE INDEX IF NOT EXISTS idx_blobs_post_id ON blobs(post_id); CREATE INDEX IF NOT EXISTS idx_posts_author ON posts(author); CREATE INDEX IF NOT EXISTS idx_posts_timestamp ON posts(timestamp_ms DESC); CREATE TABLE IF NOT EXISTS cdn_manifests ( cid BLOB PRIMARY KEY, manifest_json TEXT NOT NULL, author BLOB NOT NULL, updated_at INTEGER NOT NULL ); CREATE INDEX IF NOT EXISTS idx_cdn_manifests_author ON cdn_manifests(author); CREATE TABLE IF NOT EXISTS group_keys ( group_id BLOB PRIMARY KEY, circle_name TEXT NOT NULL, epoch INTEGER NOT NULL DEFAULT 1, group_public_key BLOB NOT NULL, group_seed BLOB, admin BLOB NOT NULL, created_at INTEGER NOT NULL ); CREATE INDEX IF NOT EXISTS idx_group_keys_circle ON group_keys(circle_name); CREATE TABLE IF NOT EXISTS group_member_keys ( group_id BLOB NOT NULL, member BLOB NOT NULL, epoch INTEGER NOT NULL, wrapped_group_key BLOB NOT NULL, PRIMARY KEY (group_id, member, epoch) ); CREATE TABLE IF NOT EXISTS group_seeds ( group_id BLOB NOT NULL, epoch INTEGER NOT NULL, group_seed BLOB NOT NULL, PRIMARY KEY (group_id, epoch) ); CREATE TABLE IF NOT EXISTS relay_cooldowns ( target_id BLOB PRIMARY KEY, failed_at INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS preferred_peers ( node_id BLOB PRIMARY KEY, agreed_at INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS circle_profiles ( author BLOB NOT NULL, circle_name TEXT NOT NULL, display_name TEXT NOT NULL DEFAULT '', bio TEXT NOT NULL DEFAULT '', avatar_cid BLOB, updated_at INTEGER NOT NULL, encrypted_payload TEXT, wrapped_cek BLOB, group_id BLOB, epoch INTEGER, PRIMARY KEY (author, circle_name) ); CREATE TABLE IF NOT EXISTS known_anchors ( node_id BLOB NOT NULL PRIMARY KEY, addresses TEXT NOT NULL DEFAULT '[]', last_seen_ms INTEGER NOT NULL, success_count INTEGER NOT NULL DEFAULT 0 ); CREATE TABLE IF NOT EXISTS blob_headers ( post_id BLOB PRIMARY KEY, author BLOB NOT NULL, header_json TEXT NOT NULL, updated_at INTEGER NOT NULL ); CREATE TABLE IF NOT EXISTS reactions ( reactor BLOB NOT NULL, post_id BLOB NOT NULL, emoji TEXT NOT NULL, timestamp_ms INTEGER NOT NULL, encrypted_payload TEXT, PRIMARY KEY (reactor, post_id, emoji) ); CREATE INDEX IF NOT EXISTS idx_reactions_post ON reactions(post_id); CREATE TABLE IF NOT EXISTS comments ( author BLOB NOT NULL, post_id BLOB NOT NULL, content TEXT NOT NULL, timestamp_ms INTEGER NOT NULL, signature BLOB NOT NULL, ref_post_id BLOB, PRIMARY KEY (author, post_id, timestamp_ms) ); CREATE INDEX IF NOT EXISTS idx_comments_post ON comments(post_id); CREATE TABLE IF NOT EXISTS comment_policies ( post_id BLOB PRIMARY KEY, policy_json TEXT NOT NULL ); CREATE TABLE IF NOT EXISTS thread_meta ( post_id BLOB PRIMARY KEY, parent_post_id BLOB NOT NULL ); CREATE INDEX IF NOT EXISTS idx_thread_meta_parent ON thread_meta(parent_post_id); CREATE TABLE IF NOT EXISTS settings ( key TEXT PRIMARY KEY, value TEXT NOT NULL ); CREATE TABLE IF NOT EXISTS post_hosts ( post_id BLOB NOT NULL, host TEXT NOT NULL, last_seen_ms INTEGER NOT NULL, PRIMARY KEY (post_id, host) ); CREATE TABLE IF NOT EXISTS seen_engagement ( post_id BLOB PRIMARY KEY, seen_react_count INTEGER NOT NULL DEFAULT 0, seen_comment_count INTEGER NOT NULL DEFAULT 0, updated_at INTEGER NOT NULL DEFAULT 0 ); CREATE TABLE IF NOT EXISTS seen_messages ( partner_id BLOB PRIMARY KEY, last_read_ms INTEGER NOT NULL DEFAULT 0 ); CREATE TABLE IF NOT EXISTS file_holders ( file_id BLOB NOT NULL, peer_id BLOB NOT NULL, peer_addresses TEXT NOT NULL DEFAULT '[]', last_interaction_ms INTEGER NOT NULL, direction TEXT NOT NULL, PRIMARY KEY (file_id, peer_id) ); CREATE INDEX IF NOT EXISTS idx_file_holders_recency ON file_holders(file_id, last_interaction_ms DESC); CREATE TABLE IF NOT EXISTS post_recipients ( post_id BLOB NOT NULL, recipient BLOB NOT NULL, PRIMARY KEY (post_id, recipient) ); CREATE INDEX IF NOT EXISTS idx_post_recipients_recipient ON post_recipients(recipient); CREATE TABLE IF NOT EXISTS posting_identities ( node_id BLOB PRIMARY KEY, secret_seed BLOB NOT NULL, display_name TEXT NOT NULL DEFAULT '', created_at INTEGER NOT NULL );", )?; Ok(()) } /// Run schema migrations for existing databases fn migrate(&self) -> anyhow::Result<()> { // Add attachments column if missing (old schema) let has_attachments = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('posts') WHERE name='attachments'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_attachments == 0 { self.conn.execute_batch( "ALTER TABLE posts ADD COLUMN attachments TEXT NOT NULL DEFAULT '[]';" )?; } // Add visibility column if missing (old schema) let has_visibility = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('follows') WHERE name='visibility'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_visibility == 0 { self.conn.execute_batch( "ALTER TABLE follows ADD COLUMN visibility TEXT NOT NULL DEFAULT 'public';" )?; } // Add visibility column to posts if missing (Phase D migration) let has_post_visibility = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('posts') WHERE name='visibility'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_post_visibility == 0 { self.conn.execute_batch( "ALTER TABLE posts ADD COLUMN visibility TEXT NOT NULL DEFAULT '\"Public\"';" )?; } // Add visibility_intent column to posts if missing (Phase D-2 migration) let has_visibility_intent = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('posts') WHERE name='visibility_intent'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_visibility_intent == 0 { self.conn.execute_batch( "ALTER TABLE posts ADD COLUMN visibility_intent TEXT;" )?; } // Add new peer columns if missing (Phase B migration) let has_addresses = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='addresses'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_addresses == 0 { self.conn.execute_batch( "ALTER TABLE peers ADD COLUMN addresses TEXT NOT NULL DEFAULT '[]'; ALTER TABLE peers ADD COLUMN introduced_by BLOB; ALTER TABLE peers ADD COLUMN is_anchor INTEGER NOT NULL DEFAULT 0; ALTER TABLE peers ADD COLUMN first_seen INTEGER NOT NULL DEFAULT 0;" )?; // Backfill first_seen from last_seen for existing rows self.conn.execute_batch( "UPDATE peers SET first_seen = last_seen WHERE first_seen = 0;" )?; } // Add anchors column to profiles if missing (Phase C migration) let has_anchors = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='anchors'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_anchors == 0 { self.conn.execute_batch( "ALTER TABLE profiles ADD COLUMN anchors TEXT NOT NULL DEFAULT '[]';" )?; } // Add is_wide_peer column to peers if missing (Phase F migration) let has_wide_peer = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='is_wide_peer'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_wide_peer == 0 { self.conn.execute_batch( "ALTER TABLE peers ADD COLUMN is_wide_peer INTEGER NOT NULL DEFAULT 0;" )?; } // Add recent_peers column to profiles if missing (Phase 7b migration) let has_recent_peers = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='recent_peers'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_recent_peers == 0 { self.conn.execute_batch( "ALTER TABLE profiles ADD COLUMN recent_peers TEXT NOT NULL DEFAULT '[]';" )?; } // Rename persistent_peers → routing_peers → mesh_peers (existing DBs) let _ = self.conn.execute_batch( "ALTER TABLE persistent_peers RENAME TO mesh_peers;" ); let _ = self.conn.execute_batch( "ALTER TABLE routing_peers RENAME TO mesh_peers;" ); // Add post_id/author/created_at/last_accessed_at columns to blobs if missing // (blobs are recoverable from filesystem + posts, so drop-and-recreate is safe) let has_blob_post_id = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('blobs') WHERE name='post_id'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_blob_post_id == 0 { self.conn.execute_batch( "DROP TABLE IF EXISTS blobs; CREATE TABLE blobs ( cid BLOB PRIMARY KEY, post_id BLOB NOT NULL, author BLOB NOT NULL, size_bytes INTEGER NOT NULL, mime_type TEXT NOT NULL, created_at INTEGER NOT NULL, stored_at INTEGER NOT NULL, last_accessed_at INTEGER NOT NULL, pinned INTEGER NOT NULL DEFAULT 0 );" )?; } // Add pinned column to blobs if missing let has_pinned = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('blobs') WHERE name='pinned'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_pinned == 0 { self.conn.execute_batch( "ALTER TABLE blobs ADD COLUMN pinned INTEGER NOT NULL DEFAULT 0;" )?; } // Add preferred_peers column to profiles if missing (Preferred Mesh Peers migration) let has_preferred_peers = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='preferred_peers'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_preferred_peers == 0 { self.conn.execute_batch( "ALTER TABLE profiles ADD COLUMN preferred_peers TEXT NOT NULL DEFAULT '[]';" )?; } // Add preferred_tree column to social_routes if missing (Preferred Tree migration) let has_pref_tree = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('social_routes') WHERE name='preferred_tree'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_pref_tree == 0 { self.conn.execute_batch( "ALTER TABLE social_routes ADD COLUMN preferred_tree TEXT NOT NULL DEFAULT '[]';" )?; } // Add public_visible column to profiles if missing (Phase D-4 migration) let has_public_visible = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='public_visible'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_public_visible == 0 { self.conn.execute_batch( "ALTER TABLE profiles ADD COLUMN public_visible INTEGER NOT NULL DEFAULT 1;" )?; } // Add avatar_cid column to profiles if missing (Phase D-4 migration) let has_avatar_cid = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('profiles') WHERE name='avatar_cid'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_avatar_cid == 0 { self.conn.execute_batch( "ALTER TABLE profiles ADD COLUMN avatar_cid BLOB;" )?; } // Add nat_type column to peers if missing (STUN NAT detection) let has_nat_type = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='nat_type'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_nat_type == 0 { self.conn.execute_batch( "ALTER TABLE peers ADD COLUMN nat_type TEXT DEFAULT 'unknown';" )?; } // Add nat_mapping column to peers if missing (Advanced NAT traversal) let has_nat_mapping = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='nat_mapping'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_nat_mapping == 0 { self.conn.execute_batch( "ALTER TABLE peers ADD COLUMN nat_mapping TEXT DEFAULT NULL;" )?; } // Add nat_filtering column to peers if missing (Advanced NAT traversal) let has_nat_filtering = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='nat_filtering'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_nat_filtering == 0 { self.conn.execute_batch( "ALTER TABLE peers ADD COLUMN nat_filtering TEXT DEFAULT NULL;" )?; } // Add http_capable and http_addr columns to peers (TCP direct serve) let has_http_capable = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='http_capable'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_http_capable == 0 { self.conn.execute_batch( "ALTER TABLE peers ADD COLUMN http_capable INTEGER DEFAULT 0; ALTER TABLE peers ADD COLUMN http_addr TEXT DEFAULT NULL;" )?; } // Add deleted_at column to reactions if missing (tombstone support) let has_deleted_at_react = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('reactions') WHERE name='deleted_at'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_deleted_at_react == 0 { self.conn.execute_batch( "ALTER TABLE reactions ADD COLUMN deleted_at INTEGER DEFAULT NULL;" )?; } // Add deleted_at column to comments if missing (tombstone support) let has_deleted_at_comment = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('comments') WHERE name='deleted_at'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_deleted_at_comment == 0 { self.conn.execute_batch( "ALTER TABLE comments ADD COLUMN deleted_at INTEGER DEFAULT NULL;" )?; } // v0.6.2: add ref_post_id for rich comments (preview-inline, // full-body-in-referenced-post). NULL for plain comments. let has_ref_post_id = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('comments') WHERE name='ref_post_id'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_ref_post_id == 0 { self.conn.execute_batch( "ALTER TABLE comments ADD COLUMN ref_post_id BLOB DEFAULT NULL;" )?; } // Add device_role column to peers if missing (Active CDN replication) let has_device_role = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='device_role'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_device_role == 0 { self.conn.execute_batch( "ALTER TABLE peers ADD COLUMN device_role TEXT DEFAULT NULL; ALTER TABLE peers ADD COLUMN cache_pressure INTEGER DEFAULT NULL;" )?; } // Protocol v4: Add last_sync_ms to follows if missing let has_last_sync = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('follows') WHERE name='last_sync_ms'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_last_sync == 0 { self.conn.execute_batch( "ALTER TABLE follows ADD COLUMN last_sync_ms INTEGER NOT NULL DEFAULT 0;" )?; } // Protocol v4: Add last_engagement_ms and last_check_ms to posts if missing let has_last_engagement = self.conn.prepare( "SELECT COUNT(*) FROM pragma_table_info('posts') WHERE name='last_engagement_ms'" )?.query_row([], |row| row.get::<_, i64>(0))?; if has_last_engagement == 0 { self.conn.execute_batch( "ALTER TABLE posts ADD COLUMN last_engagement_ms INTEGER NOT NULL DEFAULT 0; ALTER TABLE posts ADD COLUMN last_check_ms INTEGER NOT NULL DEFAULT 0;" )?; } // 0.6.1-beta: seed file_holders from legacy upstream/downstream tables // before they're dropped. Idempotent — only fires on an empty // file_holders table. self.seed_file_holders_from_legacy()?; // 0.6.1-beta: drop legacy directional tables — replaced by file_holders. self.conn.execute_batch( "DROP TABLE IF EXISTS blob_upstream; DROP TABLE IF EXISTS blob_downstream; DROP TABLE IF EXISTS post_upstream; DROP TABLE IF EXISTS post_downstream;", )?; // 0.6.2-beta: seed post_recipients index from existing encrypted posts. self.seed_post_recipients_from_posts()?; Ok(()) } // ---- Posts ---- /// Store a post with default Public visibility. Returns true if it was new. pub fn store_post(&self, id: &PostId, post: &Post) -> anyhow::Result { self.store_post_with_visibility(id, post, &PostVisibility::Public) } /// Store a post with explicit visibility. Returns true if it was new. pub fn store_post_with_visibility( &self, id: &PostId, post: &Post, visibility: &PostVisibility, ) -> anyhow::Result { let attachments_json = serde_json::to_string(&post.attachments)?; let visibility_json = serde_json::to_string(visibility)?; let inserted = self.conn.execute( "INSERT OR IGNORE INTO posts (id, author, content, attachments, timestamp_ms, visibility) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", params![ id.as_slice(), post.author.as_slice(), post.content, attachments_json, post.timestamp_ms as i64, visibility_json, ], )?; if inserted > 0 { self.index_post_recipients(id, visibility)?; } Ok(inserted > 0) } pub fn get_post(&self, id: &PostId) -> anyhow::Result> { let mut stmt = self .conn .prepare("SELECT author, content, attachments, timestamp_ms FROM posts WHERE id = ?1")?; let mut rows = stmt.query(params![id.as_slice()])?; if let Some(row) = rows.next()? { let attachments: Vec = serde_json::from_str( &row.get::<_, String>(2)? ).unwrap_or_default(); Ok(Some(Post { author: blob_to_nodeid(row.get(0)?)?, content: row.get(1)?, attachments, timestamp_ms: row.get::<_, i64>(3)? as u64, })) } else { Ok(None) } } pub fn get_post_with_visibility( &self, id: &PostId, ) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT author, content, attachments, timestamp_ms, visibility FROM posts WHERE id = ?1", )?; let mut rows = stmt.query(params![id.as_slice()])?; if let Some(row) = rows.next()? { let attachments: Vec = serde_json::from_str(&row.get::<_, String>(2)?).unwrap_or_default(); let vis_json: String = row.get(4)?; let visibility: PostVisibility = serde_json::from_str(&vis_json).unwrap_or_default(); Ok(Some(( Post { author: blob_to_nodeid(row.get(0)?)?, content: row.get(1)?, attachments, timestamp_ms: row.get::<_, i64>(3)? as u64, }, visibility, ))) } else { Ok(None) } } /// Get the visibility intent for a post (if stored). pub fn get_post_intent(&self, id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT visibility_intent FROM posts WHERE id = ?1", )?; let mut rows = stmt.query(params![id.as_slice()])?; if let Some(row) = rows.next()? { let intent_json: Option = row.get(0)?; match intent_json { Some(json) => Ok(serde_json::from_str(&json).ok()), None => Ok(None), } } else { Ok(None) } } /// Get post IDs authored by `author` with timestamp_ms >= `since_ms`. pub fn get_own_recent_post_ids(&self, author: &NodeId, since_ms: u64) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT id FROM posts WHERE author = ?1 AND timestamp_ms >= ?2" )?; let rows = stmt.query_map(params![author.as_slice(), since_ms as i64], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut ids = Vec::new(); for row in rows { ids.push(blob_to_postid(row?)?); } Ok(ids) } /// Get a peer's cache_pressure score (0-255), or None if unknown. pub fn get_peer_cache_pressure(&self, node_id: &NodeId) -> anyhow::Result> { let result: Option = self.conn.query_row( "SELECT cache_pressure FROM peers WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get(0), ).ok().flatten(); Ok(result.map(|p| p as u8)) } pub fn list_post_ids(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare("SELECT id FROM posts")?; let rows = stmt.query_map([], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut ids = Vec::new(); for row in rows { ids.push(blob_to_postid(row?)?); } Ok(ids) } /// All posts, newest first (with visibility) pub fn list_posts_reverse_chron(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts WHERE (visibility_intent IS NULL OR (visibility_intent != '\"Control\"' AND visibility_intent != '\"Profile\"')) ORDER BY timestamp_ms DESC", )?; let rows = stmt.query_map([], |row| { let id_bytes: Vec = row.get(0)?; let author_bytes: Vec = row.get(1)?; let content: String = row.get(2)?; let attachments_json: String = row.get(3)?; let timestamp_ms: i64 = row.get(4)?; let vis_json: String = row.get(5)?; Ok((id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json)) })?; let mut posts = Vec::new(); for row in rows { let (id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json) = row?; let attachments: Vec = serde_json::from_str(&attachments_json).unwrap_or_default(); let visibility: PostVisibility = serde_json::from_str(&vis_json).unwrap_or_default(); posts.push(( blob_to_postid(id_bytes)?, Post { author: blob_to_nodeid(author_bytes)?, content, attachments, timestamp_ms: timestamp_ms as u64, }, visibility, )); } Ok(posts) } /// Feed: posts from followed users, reverse chronological (with visibility) pub fn get_feed(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility FROM posts p INNER JOIN follows f ON p.author = f.node_id WHERE (p.visibility_intent IS NULL OR (p.visibility_intent != '\"Control\"' AND p.visibility_intent != '\"Profile\"')) ORDER BY p.timestamp_ms DESC", )?; let rows = stmt.query_map([], |row| { let id_bytes: Vec = row.get(0)?; let author_bytes: Vec = row.get(1)?; let content: String = row.get(2)?; let attachments_json: String = row.get(3)?; let timestamp_ms: i64 = row.get(4)?; let vis_json: String = row.get(5)?; Ok((id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json)) })?; let mut posts = Vec::new(); for row in rows { let (id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json) = row?; let attachments: Vec = serde_json::from_str(&attachments_json).unwrap_or_default(); let visibility: PostVisibility = serde_json::from_str(&vis_json).unwrap_or_default(); posts.push(( blob_to_postid(id_bytes)?, Post { author: blob_to_nodeid(author_bytes)?, content, attachments, timestamp_ms: timestamp_ms as u64, }, visibility, )); } Ok(posts) } /// Feed: paginated — posts from followed users, cursor-based by timestamp pub fn get_feed_page(&self, before_ms: Option, limit: usize) -> anyhow::Result> { let sql = if before_ms.is_some() { "SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility FROM posts p INNER JOIN follows f ON p.author = f.node_id WHERE p.timestamp_ms < ?1 AND (p.visibility_intent IS NULL OR (p.visibility_intent != '\"Control\"' AND p.visibility_intent != '\"Profile\"')) ORDER BY p.timestamp_ms DESC LIMIT ?2" } else { "SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility FROM posts p INNER JOIN follows f ON p.author = f.node_id WHERE (p.visibility_intent IS NULL OR (p.visibility_intent != '\"Control\"' AND p.visibility_intent != '\"Profile\"')) ORDER BY p.timestamp_ms DESC LIMIT ?2" }; let mut stmt = self.conn.prepare(sql)?; let rows = if let Some(bms) = before_ms { stmt.query_map(rusqlite::params![bms as i64, limit as i64], Self::parse_post_row)? } else { stmt.query_map(rusqlite::params![i64::MAX, limit as i64], Self::parse_post_row)? }; Self::collect_posts(rows) } /// All posts: paginated — cursor-based by timestamp pub fn list_posts_page(&self, before_ms: Option, limit: usize) -> anyhow::Result> { let sql = if before_ms.is_some() { "SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts WHERE timestamp_ms < ?1 AND (visibility_intent IS NULL OR (visibility_intent != '\"Control\"' AND visibility_intent != '\"Profile\"')) ORDER BY timestamp_ms DESC LIMIT ?2" } else { "SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts WHERE (visibility_intent IS NULL OR (visibility_intent != '\"Control\"' AND visibility_intent != '\"Profile\"')) ORDER BY timestamp_ms DESC LIMIT ?2" }; let mut stmt = self.conn.prepare(sql)?; let rows = if let Some(bms) = before_ms { stmt.query_map(rusqlite::params![bms as i64, limit as i64], Self::parse_post_row)? } else { stmt.query_map(rusqlite::params![i64::MAX, limit as i64], Self::parse_post_row)? }; Self::collect_posts(rows) } /// Batch: reaction counts for multiple posts at once pub fn get_reaction_counts_batch(&self, post_ids: &[PostId], our_node_id: &NodeId) -> anyhow::Result>> { use std::collections::HashMap; let mut result: HashMap> = HashMap::new(); if post_ids.is_empty() { return Ok(result); } let placeholders: String = (0..post_ids.len()).map(|i| format!("?{}", i + 1)).collect::>().join(","); let sql = format!( "SELECT post_id, emoji, COUNT(*) as cnt, SUM(CASE WHEN reactor = ?{} THEN 1 ELSE 0 END) as my_count FROM reactions WHERE post_id IN ({}) AND deleted_at IS NULL GROUP BY post_id, emoji ORDER BY cnt DESC", post_ids.len() + 1, placeholders ); let mut stmt = self.conn.prepare(&sql)?; let mut params: Vec> = post_ids.iter().map(|id| Box::new(id.to_vec()) as Box).collect(); params.push(Box::new(our_node_id.to_vec())); let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect(); let rows = stmt.query_map(param_refs.as_slice(), |row| { let pid: Vec = row.get(0)?; let emoji: String = row.get(1)?; let count: i64 = row.get(2)?; let my_count: i64 = row.get(3)?; Ok((pid, emoji, count as u64, my_count > 0)) })?; for row in rows { let (pid, emoji, count, reacted_by_me) = row?; if let Ok(id) = blob_to_postid(pid) { result.entry(id).or_default().push((emoji, count, reacted_by_me)); } } Ok(result) } /// Batch: comment counts for multiple posts at once pub fn get_comment_counts_batch(&self, post_ids: &[PostId]) -> anyhow::Result> { use std::collections::HashMap; let mut result: HashMap = HashMap::new(); if post_ids.is_empty() { return Ok(result); } let placeholders: String = (0..post_ids.len()).map(|i| format!("?{}", i + 1)).collect::>().join(","); let sql = format!( "SELECT post_id, COUNT(*) FROM comments WHERE post_id IN ({}) AND deleted_at IS NULL GROUP BY post_id", placeholders ); let mut stmt = self.conn.prepare(&sql)?; let params: Vec> = post_ids.iter().map(|id| Box::new(id.to_vec()) as Box).collect(); let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect(); let rows = stmt.query_map(param_refs.as_slice(), |row| { let pid: Vec = row.get(0)?; let count: i64 = row.get(1)?; Ok((pid, count as u64)) })?; for row in rows { let (pid, count) = row?; if let Ok(id) = blob_to_postid(pid) { result.insert(id, count); } } Ok(result) } /// Batch: visibility intents for multiple posts at once pub fn get_post_intents_batch(&self, post_ids: &[PostId]) -> anyhow::Result> { use std::collections::HashMap; let mut result: HashMap = HashMap::new(); if post_ids.is_empty() { return Ok(result); } let placeholders: String = (0..post_ids.len()).map(|i| format!("?{}", i + 1)).collect::>().join(","); let sql = format!( "SELECT id, visibility_intent FROM posts WHERE id IN ({})", placeholders ); let mut stmt = self.conn.prepare(&sql)?; let params: Vec> = post_ids.iter().map(|id| Box::new(id.to_vec()) as Box).collect(); let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect(); let rows = stmt.query_map(param_refs.as_slice(), |row| { let pid: Vec = row.get(0)?; let intent: Option = row.get(1)?; Ok((pid, intent.unwrap_or_default())) })?; for row in rows { let (pid, intent) = row?; if let Ok(id) = blob_to_postid(pid) { result.insert(id, intent); } } Ok(result) } /// Helper: parse a post row from a query fn parse_post_row(row: &rusqlite::Row<'_>) -> rusqlite::Result<(Vec, Vec, String, String, i64, String)> { Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?, row.get(4)?, row.get(5)?)) } /// Helper: collect parsed post rows into typed results fn collect_posts(rows: rusqlite::MappedRows<'_, impl FnMut(&rusqlite::Row<'_>) -> rusqlite::Result<(Vec, Vec, String, String, i64, String)>>) -> anyhow::Result> { let mut posts = Vec::new(); for row in rows { let (id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json) = row?; let attachments: Vec = serde_json::from_str(&attachments_json).unwrap_or_default(); let visibility: PostVisibility = serde_json::from_str(&vis_json).unwrap_or_default(); posts.push(( blob_to_postid(id_bytes)?, Post { author: blob_to_nodeid(author_bytes)?, content, attachments, timestamp_ms: timestamp_ms as u64, }, visibility, )); } Ok(posts) } /// All posts with visibility (for sync protocol and export). /// Includes control/profile posts — they need to propagate through the /// CDN like any other post. pub fn list_posts_with_visibility(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts ORDER BY timestamp_ms DESC", )?; let rows = stmt.query_map([], |row| { let id_bytes: Vec = row.get(0)?; let author_bytes: Vec = row.get(1)?; let content: String = row.get(2)?; let attachments_json: String = row.get(3)?; let timestamp_ms: i64 = row.get(4)?; let vis_json: String = row.get(5)?; Ok((id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json)) })?; let mut posts = Vec::new(); for row in rows { let (id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json) = row?; let attachments: Vec = serde_json::from_str(&attachments_json).unwrap_or_default(); let visibility: PostVisibility = serde_json::from_str(&vis_json).unwrap_or_default(); posts.push(( blob_to_postid(id_bytes)?, Post { author: blob_to_nodeid(author_bytes)?, content, attachments, timestamp_ms: timestamp_ms as u64, }, visibility, )); } Ok(posts) } // ---- Follows ---- pub fn add_follow(&self, node_id: &NodeId) -> anyhow::Result<()> { self.conn.execute( "INSERT OR IGNORE INTO follows (node_id, visibility) VALUES (?1, 'public')", params![node_id.as_slice()], )?; Ok(()) } pub fn add_follow_with_visibility( &self, node_id: &NodeId, visibility: FollowVisibility, ) -> anyhow::Result<()> { let vis_str = match visibility { FollowVisibility::Public => "public", FollowVisibility::Private => "private", }; self.conn.execute( "INSERT OR REPLACE INTO follows (node_id, visibility) VALUES (?1, ?2)", params![node_id.as_slice(), vis_str], )?; Ok(()) } pub fn remove_follow(&self, node_id: &NodeId) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM follows WHERE node_id = ?1", params![node_id.as_slice()], )?; Ok(()) } pub fn list_follows(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare("SELECT node_id FROM follows")?; let rows = stmt.query_map([], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut ids = Vec::new(); for row in rows { ids.push(blob_to_nodeid(row?)?); } Ok(ids) } /// List only public follows (for gossip) pub fn list_public_follows(&self) -> anyhow::Result> { let mut stmt = self .conn .prepare("SELECT node_id FROM follows WHERE visibility = 'public'")?; let rows = stmt.query_map([], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut ids = Vec::new(); for row in rows { ids.push(blob_to_nodeid(row?)?); } Ok(ids) } // ---- Protocol v4: Per-Author Sync Tracking ---- /// Update the last_sync_ms timestamp for a followed author. pub fn update_follow_last_sync(&self, node_id: &NodeId, timestamp_ms: u64) -> anyhow::Result<()> { self.conn.execute( "UPDATE follows SET last_sync_ms = ?2 WHERE node_id = ?1", params![node_id.as_slice(), timestamp_ms as i64], )?; Ok(()) } /// Get all follows with their last_sync_ms timestamps. pub fn get_follows_with_last_sync(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare("SELECT node_id, last_sync_ms FROM follows")?; let rows = stmt.query_map([], |row| { let bytes: Vec = row.get(0)?; let ts: i64 = row.get(1)?; Ok((bytes, ts)) })?; let mut result = Vec::new(); for row in rows { let (bytes, ts) = row?; result.push((blob_to_nodeid(bytes)?, ts as u64)); } Ok(result) } /// Get follows whose last_sync_ms is older than max_age_ms from now. pub fn get_stale_follows(&self, max_age_ms: u64) -> anyhow::Result> { let now = now_ms() as u64; let cutoff = now.saturating_sub(max_age_ms) as i64; let mut stmt = self.conn.prepare( "SELECT node_id FROM follows WHERE last_sync_ms < ?1" )?; let rows = stmt.query_map(params![cutoff], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut ids = Vec::new(); for row in rows { ids.push(blob_to_nodeid(row?)?); } Ok(ids) } /// Get posts due for engagement check using tiered frequency: /// - Active (engagement within 72h): check every 5 min /// - Recent (engagement within 14d): check every 1 hour /// - Aging (engagement within 30d): check every 4 hours /// - Cold (older): check every 24 hours pub fn get_posts_due_for_engagement_check(&self) -> anyhow::Result> { let now = now_ms() as u64; let h72 = now.saturating_sub(72 * 3600 * 1000) as i64; let d14 = now.saturating_sub(14 * 24 * 3600 * 1000) as i64; let d30 = now.saturating_sub(30 * 24 * 3600 * 1000) as i64; let now_i64 = now as i64; let mut stmt = self.conn.prepare( "SELECT id FROM posts WHERE last_check_ms < ?1 - CASE WHEN last_engagement_ms > ?2 THEN 300000 WHEN last_engagement_ms > ?3 THEN 3600000 WHEN last_engagement_ms > ?4 THEN 14400000 ELSE 86400000 END" )?; let rows = stmt.query_map(params![now_i64, h72, d14, d30], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut ids = Vec::new(); for row in rows { let bytes = row?; if bytes.len() == 32 { let mut id = [0u8; 32]; id.copy_from_slice(&bytes); ids.push(id); } } Ok(ids) } /// Update the last_check_ms timestamp for a post. pub fn update_post_last_check(&self, post_id: &PostId, timestamp_ms: u64) -> anyhow::Result<()> { self.conn.execute( "UPDATE posts SET last_check_ms = ?2 WHERE id = ?1", params![post_id.as_slice(), timestamp_ms as i64], )?; Ok(()) } /// Update the last_engagement_ms timestamp for a post. pub fn update_post_last_engagement(&self, post_id: &PostId, timestamp_ms: u64) -> anyhow::Result<()> { self.conn.execute( "UPDATE posts SET last_engagement_ms = ?2 WHERE id = ?1", params![post_id.as_slice(), timestamp_ms as i64], )?; Ok(()) } // ---- Peers ---- /// Add or update a peer (backward-compat: no addresses) pub fn add_peer(&self, node_id: &NodeId) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT INTO peers (node_id, addresses, last_seen, first_seen) VALUES (?1, '[]', ?2, ?2) ON CONFLICT(node_id) DO UPDATE SET last_seen = ?2", params![node_id.as_slice(), now], )?; Ok(()) } /// List just node IDs (backward-compat) pub fn list_peers(&self) -> anyhow::Result> { let mut stmt = self .conn .prepare("SELECT node_id FROM peers ORDER BY last_seen DESC")?; let rows = stmt.query_map([], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut ids = Vec::new(); for row in rows { ids.push(blob_to_nodeid(row?)?); } Ok(ids) } /// Insert or update a peer with full details pub fn upsert_peer( &self, node_id: &NodeId, addresses: &[SocketAddr], introduced_by: Option<&NodeId>, ) -> anyhow::Result<()> { let now = now_ms(); let addrs_json = serde_json::to_string( &addresses.iter().map(|a| a.to_string()).collect::>() )?; self.conn.execute( "INSERT INTO peers (node_id, addresses, last_seen, introduced_by, first_seen) VALUES (?1, ?2, ?3, ?4, ?3) ON CONFLICT(node_id) DO UPDATE SET addresses = ?2, last_seen = ?3, introduced_by = COALESCE(peers.introduced_by, ?4)", params![ node_id.as_slice(), addrs_json, now, introduced_by.map(|n| n.as_slice()), ], )?; Ok(()) } /// Get a single peer record pub fn get_peer_record(&self, node_id: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id, addresses, last_seen, introduced_by, is_anchor, first_seen FROM peers WHERE node_id = ?1", )?; let mut rows = stmt.query(params![node_id.as_slice()])?; if let Some(row) = rows.next()? { Ok(Some(row_to_peer_record(row)?)) } else { Ok(None) } } /// List all peer records with full info pub fn list_peer_records(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id, addresses, last_seen, introduced_by, is_anchor, first_seen FROM peers ORDER BY last_seen DESC", )?; let mut records = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { records.push(row_to_peer_record(row)?); } Ok(records) } /// Merge additional addresses into an existing peer's record pub fn merge_peer_addresses( &self, node_id: &NodeId, new_addrs: &[SocketAddr], ) -> anyhow::Result<()> { let now = now_ms(); // Get existing addresses let existing: Option = self.conn.query_row( "SELECT addresses FROM peers WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get(0), ).ok(); if let Some(existing_json) = existing { let mut addrs: Vec = serde_json::from_str(&existing_json).unwrap_or_default(); for a in new_addrs { let s = a.to_string(); if !addrs.contains(&s) { addrs.push(s); } } let merged = serde_json::to_string(&addrs)?; self.conn.execute( "UPDATE peers SET addresses = ?1, last_seen = ?2 WHERE node_id = ?3", params![merged, now, node_id.as_slice()], )?; } else { // Peer doesn't exist yet — upsert self.upsert_peer(node_id, new_addrs, None)?; } Ok(()) } /// Set a peer's NAT type pub fn set_peer_nat_type(&self, node_id: &NodeId, nat_type: crate::types::NatType) -> anyhow::Result<()> { self.conn.execute( "UPDATE peers SET nat_type = ?1 WHERE node_id = ?2", params![nat_type.to_string(), node_id.as_slice()], )?; Ok(()) } /// Get a peer's NAT type pub fn get_peer_nat_type(&self, node_id: &NodeId) -> crate::types::NatType { self.conn.query_row( "SELECT nat_type FROM peers WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get::<_, String>(0), ) .map(|s| crate::types::NatType::from_str_label(&s)) .unwrap_or(crate::types::NatType::Unknown) } /// Set a peer's NAT profile (mapping + filtering) pub fn set_peer_nat_profile(&self, node_id: &NodeId, profile: &crate::types::NatProfile) -> anyhow::Result<()> { self.conn.execute( "UPDATE peers SET nat_mapping = ?1, nat_filtering = ?2 WHERE node_id = ?3", params![profile.mapping.to_string(), profile.filtering.to_string(), node_id.as_slice()], )?; Ok(()) } /// Get a peer's NAT profile (mapping + filtering) pub fn get_peer_nat_profile(&self, node_id: &NodeId) -> crate::types::NatProfile { self.conn.query_row( "SELECT nat_mapping, nat_filtering FROM peers WHERE node_id = ?1", params![node_id.as_slice()], |row| { let mapping: Option = row.get(0)?; let filtering: Option = row.get(1)?; Ok((mapping, filtering)) }, ) .map(|(m, f)| { let mapping = m.map(|s| crate::types::NatMapping::from_str_label(&s)) .unwrap_or(crate::types::NatMapping::Unknown); let filtering = f.map(|s| crate::types::NatFiltering::from_str_label(&s)) .unwrap_or(crate::types::NatFiltering::Unknown); crate::types::NatProfile::new(mapping, filtering) }) .unwrap_or_else(|_| crate::types::NatProfile::unknown()) } /// Set a peer's HTTP capability info pub fn set_peer_http_info(&self, node_id: &NodeId, capable: bool, addr: Option<&str>) -> anyhow::Result<()> { self.conn.execute( "UPDATE peers SET http_capable = ?1, http_addr = ?2 WHERE node_id = ?3", params![capable as i32, addr, node_id.as_slice()], )?; Ok(()) } /// Get a peer's HTTP capability (http_capable, http_addr) pub fn get_peer_http_info(&self, node_id: &NodeId) -> (bool, Option) { self.conn.query_row( "SELECT http_capable, http_addr FROM peers WHERE node_id = ?1", params![node_id.as_slice()], |row| { let capable: i32 = row.get(0)?; let addr: Option = row.get(1)?; Ok((capable != 0, addr)) }, ) .unwrap_or((false, None)) } /// Get a random N2 stranger (node in reachable_n2 but not in our connections) /// Returns (witness_node_id, reporter_node_id) for anchor probe pub fn random_n2_stranger(&self, our_connections: &std::collections::HashSet) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT reachable_node_id, reporter_node_id FROM reachable_n2 ORDER BY RANDOM() LIMIT 10" )?; let rows = stmt.query_map([], |row| { let rn: Vec = row.get(0)?; let rep: Vec = row.get(1)?; Ok((rn, rep)) })?; for row in rows { if let Ok((rn_bytes, rep_bytes)) = row { if rn_bytes.len() == 32 && rep_bytes.len() == 32 { let mut witness: NodeId = [0u8; 32]; let mut reporter: NodeId = [0u8; 32]; witness.copy_from_slice(&rn_bytes); reporter.copy_from_slice(&rep_bytes); // Witness must NOT be in our connections (stranger requirement) if !our_connections.contains(&witness) { // Reporter MUST be in our connections (we need to send them the request) if our_connections.contains(&reporter) { return Ok(Some((witness, reporter))); } } } } } Ok(None) } /// Set a peer's anchor status pub fn set_peer_anchor(&self, node_id: &NodeId, is_anchor: bool) -> anyhow::Result<()> { self.conn.execute( "UPDATE peers SET is_anchor = ?1 WHERE node_id = ?2", params![is_anchor as i32, node_id.as_slice()], )?; Ok(()) } /// Check if the peers table has any rows pub fn has_peers(&self) -> anyhow::Result { let count: i64 = self .conn .query_row("SELECT COUNT(*) FROM peers", [], |row| row.get(0))?; Ok(count > 0) } /// Build GossipPeerInfo list from all peers seen in the last 7 days. /// Address-free since sync/6 — only node IDs and anchor status. pub fn build_gossip_list(&self) -> anyhow::Result> { let cutoff = now_ms() - 7 * 24 * 60 * 60 * 1000; let mut stmt = self.conn.prepare( "SELECT node_id, is_anchor FROM peers WHERE last_seen > ?1", )?; let mut result = Vec::new(); let mut rows = stmt.query(params![cutoff])?; while let Some(row) = rows.next()? { let node_id = blob_to_nodeid(row.get(0)?)?; let is_anchor = row.get::<_, i32>(1)? != 0; result.push(GossipPeerInfo { node_id, is_anchor, }); } Ok(result) } // ---- Profiles ---- /// Store or update a profile. Only updates if the new profile is newer. /// Returns true if the profile was stored/updated. pub fn store_profile(&self, profile: &PublicProfile) -> anyhow::Result { // Check if we already have a newer version let existing_ts: Option = self.conn.query_row( "SELECT updated_at FROM profiles WHERE node_id = ?1", params![profile.node_id.as_slice()], |row| row.get(0), ).ok(); if let Some(ts) = existing_ts { if ts as u64 > profile.updated_at { return Ok(false); } } let anchors_json = serde_json::to_string( &profile.anchors.iter().map(hex::encode).collect::>() )?; let recent_peers_json = serde_json::to_string( &profile.recent_peers.iter().map(hex::encode).collect::>() )?; let preferred_peers_json = serde_json::to_string( &profile.preferred_peers.iter().map(hex::encode).collect::>() )?; let avatar_cid_slice = profile.avatar_cid.as_ref().map(|c| c.as_slice()); self.conn.execute( "INSERT OR REPLACE INTO profiles (node_id, display_name, bio, updated_at, anchors, recent_peers, preferred_peers, public_visible, avatar_cid) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)", params![ profile.node_id.as_slice(), profile.display_name, profile.bio, profile.updated_at as i64, anchors_json, recent_peers_json, preferred_peers_json, profile.public_visible as i64, avatar_cid_slice, ], )?; Ok(true) } // ---- Settings ---- /// Get a setting value by key. pub fn get_setting(&self, key: &str) -> anyhow::Result> { let mut stmt = self.conn.prepare("SELECT value FROM settings WHERE key = ?1")?; let mut rows = stmt.query(params![key])?; if let Some(row) = rows.next()? { Ok(Some(row.get(0)?)) } else { Ok(None) } } /// Set a setting value (upsert). pub fn set_setting(&self, key: &str, value: &str) -> anyhow::Result<()> { self.conn.execute( "INSERT INTO settings (key, value) VALUES (?1, ?2) ON CONFLICT(key) DO UPDATE SET value = excluded.value", params![key, value], )?; Ok(()) } // --- Seen engagement tracking --- /// Get the seen engagement counts for a post (react_count, comment_count). pub fn get_seen_engagement(&self, post_id: &PostId) -> anyhow::Result<(u32, u32)> { let mut stmt = self.conn.prepare( "SELECT seen_react_count, seen_comment_count FROM seen_engagement WHERE post_id = ?1" )?; let result = stmt.query_row(params![post_id.as_slice()], |row| { let rc: i64 = row.get(0)?; let cc: i64 = row.get(1)?; Ok((rc as u32, cc as u32)) }); match result { Ok(r) => Ok(r), Err(rusqlite::Error::QueryReturnedNoRows) => Ok((0, 0)), Err(e) => Err(e.into()), } } /// Set the seen engagement counts for a post (upsert). pub fn set_seen_engagement(&self, post_id: &PostId, react_count: u32, comment_count: u32) -> anyhow::Result<()> { let now_ms = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .map(|d| d.as_millis() as i64) .unwrap_or(0); self.conn.execute( "INSERT INTO seen_engagement (post_id, seen_react_count, seen_comment_count, updated_at) VALUES (?1, ?2, ?3, ?4) ON CONFLICT(post_id) DO UPDATE SET seen_react_count = excluded.seen_react_count, seen_comment_count = excluded.seen_comment_count, updated_at = excluded.updated_at", params![post_id.as_slice(), react_count as i64, comment_count as i64, now_ms], )?; Ok(()) } /// Get the last-read timestamp for a conversation partner. pub fn get_last_read_message(&self, partner_id: &NodeId) -> anyhow::Result { let mut stmt = self.conn.prepare( "SELECT last_read_ms FROM seen_messages WHERE partner_id = ?1" )?; let result = stmt.query_row(params![partner_id.as_slice()], |row| { let ts: i64 = row.get(0)?; Ok(ts as u64) }); match result { Ok(r) => Ok(r), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(0), Err(e) => Err(e.into()), } } /// Set the last-read timestamp for a conversation partner (upsert). pub fn set_last_read_message(&self, partner_id: &NodeId, timestamp_ms: u64) -> anyhow::Result<()> { self.conn.execute( "INSERT INTO seen_messages (partner_id, last_read_ms) VALUES (?1, ?2) ON CONFLICT(partner_id) DO UPDATE SET last_read_ms = excluded.last_read_ms", params![partner_id.as_slice(), timestamp_ms as i64], )?; Ok(()) } /// Initialize post_hosts table (called by web handler). pub fn init_post_hosts_table(&self) -> anyhow::Result<()> { // Already in init_tables, but safe to call again Ok(()) } /// Get known-good hosts for a post. pub fn get_post_hosts(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT host FROM post_hosts WHERE post_id = ?1 AND last_seen_ms > ?2 ORDER BY last_seen_ms DESC LIMIT 10" )?; // Only return hosts seen in last 30 minutes let cutoff = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_default() .as_millis() as i64 - 30 * 60 * 1000; let hosts: Vec = stmt.query_map(params![post_id.as_slice(), cutoff], |row| { row.get::<_, String>(0) })?.filter_map(|r| r.ok()) .filter_map(|s| s.parse::().ok()) .collect(); Ok(hosts) } /// Upsert a known-good host for a post. pub fn upsert_post_host(&self, post_id: &PostId, host: &SocketAddr) -> anyhow::Result<()> { let now = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_default() .as_millis() as i64; self.conn.execute( "INSERT INTO post_hosts (post_id, host, last_seen_ms) VALUES (?1, ?2, ?3) \ ON CONFLICT(post_id, host) DO UPDATE SET last_seen_ms = excluded.last_seen_ms", params![post_id.as_slice(), host.to_string(), now], )?; Ok(()) } /// Get a profile by node ID pub fn get_profile(&self, node_id: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id, display_name, bio, updated_at, anchors, recent_peers, preferred_peers, public_visible, avatar_cid FROM profiles WHERE node_id = ?1", )?; let mut rows = stmt.query(params![node_id.as_slice()])?; if let Some(row) = rows.next()? { let anchors = parse_anchors_json(&row.get::<_, String>(4)?); let recent_peers = parse_anchors_json(&row.get::<_, String>(5).unwrap_or_else(|_| "[]".to_string())); let preferred_peers = parse_anchors_json(&row.get::<_, String>(6).unwrap_or_else(|_| "[]".to_string())); let public_visible = row.get::<_, i64>(7).unwrap_or(1) != 0; let avatar_cid = row.get::<_, Option>>(8).unwrap_or(None) .and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok()); Ok(Some(PublicProfile { node_id: blob_to_nodeid(row.get(0)?)?, display_name: row.get(1)?, bio: row.get(2)?, updated_at: row.get::<_, i64>(3)? as u64, anchors, recent_peers, preferred_peers, public_visible, avatar_cid, })) } else { Ok(None) } } /// List all known profiles pub fn list_profiles(&self) -> anyhow::Result> { let mut stmt = self .conn .prepare("SELECT node_id, display_name, bio, updated_at, anchors, recent_peers, preferred_peers, public_visible, avatar_cid FROM profiles")?; let rows = stmt.query_map([], |row| { let node_id_bytes: Vec = row.get(0)?; let display_name: String = row.get(1)?; let bio: String = row.get(2)?; let updated_at: i64 = row.get(3)?; let anchors_json: String = row.get(4)?; let recent_peers_json: String = row.get::<_, String>(5).unwrap_or_else(|_| "[]".to_string()); let preferred_peers_json: String = row.get::<_, String>(6).unwrap_or_else(|_| "[]".to_string()); let public_visible: i64 = row.get::<_, i64>(7).unwrap_or(1); let avatar_cid_bytes: Option> = row.get::<_, Option>>(8).unwrap_or(None); Ok((node_id_bytes, display_name, bio, updated_at, anchors_json, recent_peers_json, preferred_peers_json, public_visible, avatar_cid_bytes)) })?; let mut profiles = Vec::new(); for row in rows { let (node_id_bytes, display_name, bio, updated_at, anchors_json, recent_peers_json, preferred_peers_json, public_visible, avatar_cid_bytes) = row?; let avatar_cid = avatar_cid_bytes.and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok()); profiles.push(PublicProfile { node_id: blob_to_nodeid(node_id_bytes)?, display_name, bio, updated_at: updated_at as u64, anchors: parse_anchors_json(&anchors_json), recent_peers: parse_anchors_json(&recent_peers_json), preferred_peers: parse_anchors_json(&preferred_peers_json), public_visible: public_visible != 0, avatar_cid, }); } Ok(profiles) } /// Get the anchor list from a peer's profile pub fn get_peer_anchors(&self, node_id: &NodeId) -> anyhow::Result> { let result: Option = self.conn.query_row( "SELECT anchors FROM profiles WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get(0), ).ok(); Ok(result.map(|j| parse_anchors_json(&j)).unwrap_or_default()) } /// List peers that are known anchors (is_anchor = true in peers table) pub fn list_anchor_peers(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id, addresses, last_seen, introduced_by, is_anchor, first_seen FROM peers WHERE is_anchor = 1 ORDER BY last_seen DESC", )?; let mut records = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { records.push(row_to_peer_record(row)?); } Ok(records) } // ---- Known anchors (persistent anchor cache for NAT traversal) ---- /// Upsert a known anchor. Increments success_count on conflict. Auto-prunes to 5. pub fn upsert_known_anchor(&self, node_id: &NodeId, addresses: &[SocketAddr]) -> anyhow::Result<()> { let addr_json = serde_json::to_string( &addresses.iter().map(|a| a.to_string()).collect::>(), )?; let now = now_ms(); self.conn.execute( "INSERT INTO known_anchors (node_id, addresses, last_seen_ms, success_count) VALUES (?1, ?2, ?3, 1) ON CONFLICT(node_id) DO UPDATE SET addresses = ?2, last_seen_ms = ?3, success_count = success_count + 1", params![node_id.as_slice(), addr_json, now], )?; self.prune_known_anchors(5)?; Ok(()) } /// List known anchors, ordered by success_count descending. pub fn list_known_anchors(&self) -> anyhow::Result)>> { let mut stmt = self.conn.prepare( "SELECT node_id, addresses FROM known_anchors ORDER BY success_count DESC LIMIT 5", )?; let mut result = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { let node_id = blob_to_nodeid(row.get(0)?)?; let addr_json: String = row.get(1)?; let addrs: Vec = serde_json::from_str::>(&addr_json) .unwrap_or_default() .iter() .filter_map(|a| a.parse().ok()) .collect(); result.push((node_id, addrs)); } Ok(result) } /// Prune known anchors to keep at most `max` entries (by highest success_count). pub fn prune_known_anchors(&self, max: usize) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM known_anchors", [], |row| row.get(0), )?; if count as usize <= max { return Ok(0); } let excess = count as usize - max; self.conn.execute( "DELETE FROM known_anchors WHERE node_id IN ( SELECT node_id FROM known_anchors ORDER BY success_count ASC, last_seen_ms ASC LIMIT ?1 )", params![excess as i64], )?; Ok(excess) } /// Check if a peer is marked as an anchor in the peers table. pub fn is_peer_anchor(&self, node_id: &NodeId) -> anyhow::Result { let result: Option = self.conn.query_row( "SELECT is_anchor FROM peers WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get(0), ).ok(); Ok(result.unwrap_or(0) != 0) } /// Set a peer's CDN device role and cache pressure. pub fn set_peer_device_role(&self, node_id: &NodeId, role: Option<&str>, pressure: Option) -> anyhow::Result<()> { self.conn.execute( "UPDATE peers SET device_role = ?2, cache_pressure = ?3 WHERE node_id = ?1", params![node_id.as_slice(), role, pressure.map(|p| p as i32)], )?; Ok(()) } /// Get a peer's CDN device role (from InitialExchange). pub fn get_peer_device_role(&self, node_id: &NodeId) -> anyhow::Result> { let result: Option = self.conn.query_row( "SELECT device_role FROM peers WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get(0), ).ok().flatten(); Ok(result) } /// Get the display name for a node, or None if no profile exists pub fn get_display_name(&self, node_id: &NodeId) -> anyhow::Result> { let result: Option = self.conn.query_row( "SELECT display_name FROM profiles WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get(0), ).ok(); Ok(result) } // ---- Circles ---- pub fn create_circle(&self, name: &str) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT OR IGNORE INTO circles (name, created_at) VALUES (?1, ?2)", params![name, now], )?; Ok(()) } pub fn delete_circle(&self, name: &str) -> anyhow::Result<()> { self.conn .execute("DELETE FROM circle_members WHERE circle_name = ?1", params![name])?; self.conn .execute("DELETE FROM circles WHERE name = ?1", params![name])?; Ok(()) } pub fn add_circle_member(&self, circle_name: &str, node_id: &NodeId) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT OR IGNORE INTO circle_members (circle_name, node_id, added_at) VALUES (?1, ?2, ?3)", params![circle_name, node_id.as_slice(), now], )?; Ok(()) } pub fn remove_circle_member( &self, circle_name: &str, node_id: &NodeId, ) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM circle_members WHERE circle_name = ?1 AND node_id = ?2", params![circle_name, node_id.as_slice()], )?; Ok(()) } pub fn get_circle_members(&self, name: &str) -> anyhow::Result> { let mut stmt = self .conn .prepare("SELECT node_id FROM circle_members WHERE circle_name = ?1")?; let rows = stmt.query_map(params![name], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut members = Vec::new(); for row in rows { members.push(blob_to_nodeid(row?)?); } Ok(members) } pub fn list_circles(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare("SELECT name, created_at FROM circles ORDER BY name")?; let mut circles = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { let name: String = row.get(0)?; let created_at = row.get::<_, i64>(1)? as u64; let members = self.get_circle_members(&name)?; circles.push(Circle { name, members, created_at, }); } Ok(circles) } // ---- Circle Profiles ---- /// Upsert our own circle profile (plaintext, for local circles we admin) pub fn set_circle_profile(&self, profile: &CircleProfile) -> anyhow::Result<()> { let avatar_cid_slice = profile.avatar_cid.as_ref().map(|c| c.as_slice()); self.conn.execute( "INSERT OR REPLACE INTO circle_profiles (author, circle_name, display_name, bio, avatar_cid, updated_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6)", params![ profile.author.as_slice(), profile.circle_name, profile.display_name, profile.bio, avatar_cid_slice, profile.updated_at as i64, ], )?; Ok(()) } /// Store encrypted form alongside decrypted circle profile (for relay + remote profiles) pub fn store_remote_circle_profile( &self, author: &NodeId, circle_name: &str, cp: &CircleProfile, encrypted_payload: &str, wrapped_cek: &[u8], group_id: &GroupId, epoch: GroupEpoch, ) -> anyhow::Result<()> { let avatar_cid_slice = cp.avatar_cid.as_ref().map(|c| c.as_slice()); self.conn.execute( "INSERT OR REPLACE INTO circle_profiles (author, circle_name, display_name, bio, avatar_cid, updated_at, encrypted_payload, wrapped_cek, group_id, epoch) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)", params![ author.as_slice(), circle_name, cp.display_name, cp.bio, avatar_cid_slice, cp.updated_at as i64, encrypted_payload, wrapped_cek, group_id.as_slice(), epoch as i64, ], )?; Ok(()) } /// Store only encrypted form (we don't have the group seed to decrypt) pub fn store_encrypted_circle_profile( &self, author: &NodeId, circle_name: &str, encrypted_payload: &str, wrapped_cek: &[u8], group_id: &GroupId, epoch: GroupEpoch, updated_at: u64, ) -> anyhow::Result<()> { self.conn.execute( "INSERT OR REPLACE INTO circle_profiles (author, circle_name, display_name, bio, updated_at, encrypted_payload, wrapped_cek, group_id, epoch) VALUES (?1, ?2, '', '', ?3, ?4, ?5, ?6, ?7)", params![ author.as_slice(), circle_name, updated_at as i64, encrypted_payload, wrapped_cek, group_id.as_slice(), epoch as i64, ], )?; Ok(()) } /// Get a circle profile by author + circle_name pub fn get_circle_profile(&self, author: &NodeId, circle_name: &str) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT author, circle_name, display_name, bio, avatar_cid, updated_at FROM circle_profiles WHERE author = ?1 AND circle_name = ?2", )?; let mut rows = stmt.query(params![author.as_slice(), circle_name])?; if let Some(row) = rows.next()? { let avatar_cid = row.get::<_, Option>>(4).unwrap_or(None) .and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok()); Ok(Some(CircleProfile { author: blob_to_nodeid(row.get(0)?)?, circle_name: row.get(1)?, display_name: row.get(2)?, bio: row.get(3)?, avatar_cid, updated_at: row.get::<_, i64>(5)? as u64, })) } else { Ok(None) } } /// List all circle profiles for a given author pub fn list_circle_profiles_for_author(&self, author: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT author, circle_name, display_name, bio, avatar_cid, updated_at FROM circle_profiles WHERE author = ?1 ORDER BY updated_at DESC", )?; let mut profiles = Vec::new(); let mut rows = stmt.query(params![author.as_slice()])?; while let Some(row) = rows.next()? { let avatar_cid = row.get::<_, Option>>(4).unwrap_or(None) .and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok()); profiles.push(CircleProfile { author: blob_to_nodeid(row.get(0)?)?, circle_name: row.get(1)?, display_name: row.get(2)?, bio: row.get(3)?, avatar_cid, updated_at: row.get::<_, i64>(5)? as u64, }); } Ok(profiles) } /// Delete a circle profile pub fn delete_circle_profile(&self, author: &NodeId, circle_name: &str) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM circle_profiles WHERE author = ?1 AND circle_name = ?2", params![author.as_slice(), circle_name], )?; Ok(()) } /// Get the encrypted form of a circle profile for redistribution pub fn get_encrypted_circle_profile( &self, author: &NodeId, circle_name: &str, ) -> anyhow::Result, GroupId, GroupEpoch)>> { let mut stmt = self.conn.prepare( "SELECT encrypted_payload, wrapped_cek, group_id, epoch FROM circle_profiles WHERE author = ?1 AND circle_name = ?2 AND encrypted_payload IS NOT NULL", )?; let mut rows = stmt.query(params![author.as_slice(), circle_name])?; if let Some(row) = rows.next()? { let encrypted_payload: String = row.get(0)?; let wrapped_cek: Vec = row.get(1)?; let group_id = blob_to_nodeid(row.get::<_, Vec>(2)?)?; let epoch = row.get::<_, i64>(3)? as u64; Ok(Some((encrypted_payload, wrapped_cek, group_id, epoch))) } else { Ok(None) } } /// Resolve display info for a peer: check circle profiles the viewer belongs to, /// then fall back to public profile. /// Returns (display_name, bio, avatar_cid). pub fn resolve_display_for_peer( &self, author: &NodeId, viewer: &NodeId, ) -> anyhow::Result<(String, String, Option<[u8; 32]>)> { // Find circles where viewer is a member and author has a circle profile let mut stmt = self.conn.prepare( "SELECT cp.display_name, cp.bio, cp.avatar_cid, cp.updated_at FROM circle_profiles cp INNER JOIN circle_members cm ON cp.circle_name = cm.circle_name WHERE cp.author = ?1 AND cm.node_id = ?2 ORDER BY cp.updated_at DESC LIMIT 1", )?; let mut rows = stmt.query(params![author.as_slice(), viewer.as_slice()])?; if let Some(row) = rows.next()? { let dn: String = row.get(0)?; // Only use circle profile if it has actual content if !dn.is_empty() { let bio: String = row.get(1)?; let avatar_cid = row.get::<_, Option>>(2).unwrap_or(None) .and_then(|b| <[u8; 32]>::try_from(b.as_slice()).ok()); return Ok((dn, bio, avatar_cid)); } } // Fall back to public profile if let Some(profile) = self.get_profile(author)? { if profile.public_visible { return Ok((profile.display_name, profile.bio, profile.avatar_cid)); } // Hidden profile — return empty return Ok((String::new(), String::new(), None)); } Ok((String::new(), String::new(), None)) } // ---- Group Keys ---- pub fn create_group_key(&self, record: &GroupKeyRecord, group_seed: Option<&[u8; 32]>) -> anyhow::Result<()> { self.conn.execute( "INSERT OR REPLACE INTO group_keys (group_id, circle_name, epoch, group_public_key, group_seed, admin, created_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", params![ record.group_id.as_slice(), record.circle_name, record.epoch as i64, record.group_public_key.as_slice(), group_seed.map(|s| s.as_slice()), record.admin.as_slice(), record.created_at as i64, ], )?; Ok(()) } pub fn get_group_key(&self, group_id: &GroupId) -> anyhow::Result> { let result = self.conn.query_row( "SELECT group_id, circle_name, epoch, group_public_key, admin, created_at FROM group_keys WHERE group_id = ?1", params![group_id.as_slice()], |row| { let gid: Vec = row.get(0)?; let circle_name: String = row.get(1)?; let epoch: i64 = row.get(2)?; let gpk: Vec = row.get(3)?; let admin: Vec = row.get(4)?; let created_at: i64 = row.get(5)?; Ok((gid, circle_name, epoch, gpk, admin, created_at)) }, ); match result { Ok((gid, circle_name, epoch, gpk, admin, created_at)) => { Ok(Some(GroupKeyRecord { group_id: blob_to_nodeid(gid)?, circle_name, epoch: epoch as u64, group_public_key: <[u8; 32]>::try_from(gpk.as_slice()) .map_err(|_| anyhow::anyhow!("invalid group public key"))?, admin: blob_to_nodeid(admin)?, created_at: created_at as u64, })) } Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } pub fn get_group_key_by_circle(&self, circle_name: &str) -> anyhow::Result> { let result = self.conn.query_row( "SELECT group_id, circle_name, epoch, group_public_key, admin, created_at FROM group_keys WHERE circle_name = ?1", params![circle_name], |row| { let gid: Vec = row.get(0)?; let circle_name: String = row.get(1)?; let epoch: i64 = row.get(2)?; let gpk: Vec = row.get(3)?; let admin: Vec = row.get(4)?; let created_at: i64 = row.get(5)?; Ok((gid, circle_name, epoch, gpk, admin, created_at)) }, ); match result { Ok((gid, circle_name, epoch, gpk, admin, created_at)) => { Ok(Some(GroupKeyRecord { group_id: blob_to_nodeid(gid)?, circle_name, epoch: epoch as u64, group_public_key: <[u8; 32]>::try_from(gpk.as_slice()) .map_err(|_| anyhow::anyhow!("invalid group public key"))?, admin: blob_to_nodeid(admin)?, created_at: created_at as u64, })) } Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } pub fn update_group_epoch( &self, group_id: &GroupId, new_epoch: GroupEpoch, new_public_key: &[u8; 32], new_seed: Option<&[u8; 32]>, ) -> anyhow::Result<()> { self.conn.execute( "UPDATE group_keys SET epoch = ?1, group_public_key = ?2, group_seed = ?3 WHERE group_id = ?4", params![ new_epoch as i64, new_public_key.as_slice(), new_seed.map(|s| s.as_slice()), group_id.as_slice(), ], )?; Ok(()) } pub fn delete_group_key(&self, group_id: &GroupId) -> anyhow::Result<()> { self.conn.execute("DELETE FROM group_member_keys WHERE group_id = ?1", params![group_id.as_slice()])?; self.conn.execute("DELETE FROM group_seeds WHERE group_id = ?1", params![group_id.as_slice()])?; self.conn.execute("DELETE FROM group_keys WHERE group_id = ?1", params![group_id.as_slice()])?; Ok(()) } pub fn store_group_member_key(&self, group_id: &GroupId, mk: &GroupMemberKey) -> anyhow::Result<()> { self.conn.execute( "INSERT OR REPLACE INTO group_member_keys (group_id, member, epoch, wrapped_group_key) VALUES (?1, ?2, ?3, ?4)", params![ group_id.as_slice(), mk.member.as_slice(), mk.epoch as i64, mk.wrapped_group_key, ], )?; Ok(()) } pub fn get_group_member_keys(&self, group_id: &GroupId, epoch: GroupEpoch) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT member, epoch, wrapped_group_key FROM group_member_keys WHERE group_id = ?1 AND epoch = ?2", )?; let rows = stmt.query_map(params![group_id.as_slice(), epoch as i64], |row| { let member: Vec = row.get(0)?; let ep: i64 = row.get(1)?; let wrapped: Vec = row.get(2)?; Ok((member, ep, wrapped)) })?; let mut keys = Vec::new(); for row in rows { let (member, ep, wrapped) = row?; keys.push(GroupMemberKey { member: blob_to_nodeid(member)?, epoch: ep as u64, wrapped_group_key: wrapped, }); } Ok(keys) } pub fn get_my_group_member_key( &self, group_id: &GroupId, epoch: GroupEpoch, our_node_id: &NodeId, ) -> anyhow::Result> { let result = self.conn.query_row( "SELECT member, epoch, wrapped_group_key FROM group_member_keys WHERE group_id = ?1 AND epoch = ?2 AND member = ?3", params![group_id.as_slice(), epoch as i64, our_node_id.as_slice()], |row| { let member: Vec = row.get(0)?; let ep: i64 = row.get(1)?; let wrapped: Vec = row.get(2)?; Ok((member, ep, wrapped)) }, ); match result { Ok((member, ep, wrapped)) => Ok(Some(GroupMemberKey { member: blob_to_nodeid(member)?, epoch: ep as u64, wrapped_group_key: wrapped, })), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } pub fn store_group_seed(&self, group_id: &GroupId, epoch: GroupEpoch, seed: &[u8; 32]) -> anyhow::Result<()> { self.conn.execute( "INSERT OR REPLACE INTO group_seeds (group_id, epoch, group_seed) VALUES (?1, ?2, ?3)", params![group_id.as_slice(), epoch as i64, seed.as_slice()], )?; Ok(()) } pub fn get_group_seed(&self, group_id: &GroupId, epoch: GroupEpoch) -> anyhow::Result> { let result = self.conn.query_row( "SELECT group_seed FROM group_seeds WHERE group_id = ?1 AND epoch = ?2", params![group_id.as_slice(), epoch as i64], |row| row.get::<_, Vec>(0), ); match result { Ok(bytes) => { let seed = <[u8; 32]>::try_from(bytes.as_slice()) .map_err(|_| anyhow::anyhow!("invalid group seed"))?; Ok(Some(seed)) } Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } pub fn delete_group_seeds(&self, group_id: &GroupId) -> anyhow::Result<()> { self.conn.execute("DELETE FROM group_seeds WHERE group_id = ?1", params![group_id.as_slice()])?; Ok(()) } /// Get all group seeds we have (for batch decrypt). Returns (group_id, epoch, seed, public_key). pub fn get_all_group_seeds(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT gs.group_id, gs.epoch, gs.group_seed, gk.group_public_key FROM group_seeds gs INNER JOIN group_keys gk ON gs.group_id = gk.group_id", )?; let rows = stmt.query_map([], |row| { let gid: Vec = row.get(0)?; let epoch: i64 = row.get(1)?; let seed: Vec = row.get(2)?; let pubkey: Vec = row.get(3)?; Ok((gid, epoch, seed, pubkey)) })?; let mut results = Vec::new(); for row in rows { let (gid, epoch, seed, pubkey) = row?; let group_id = blob_to_nodeid(gid)?; let seed_arr = <[u8; 32]>::try_from(seed.as_slice()) .map_err(|_| anyhow::anyhow!("invalid seed"))?; let pubkey_arr = <[u8; 32]>::try_from(pubkey.as_slice()) .map_err(|_| anyhow::anyhow!("invalid pubkey"))?; results.push((group_id, epoch as u64, seed_arr, pubkey_arr)); } Ok(results) } /// Get all group seeds we have, including for older epochs (where pubkey comes from current record). /// For decrypting older-epoch posts, we need the pubkey that was current at that epoch. /// We store the seed per-epoch but not the pubkey per-epoch, so for now we use the current pubkey /// which only works when the group_id matches. For rotated keys we need the epoch's pubkey. /// Returns HashMap<(GroupId, GroupEpoch), (seed, pubkey)>. pub fn get_all_group_seeds_map(&self) -> anyhow::Result> { // For each group_seed entry, join with group_keys to get pubkey. // But after rotation, the pubkey changes. We need the pubkey for that specific epoch. // Since we store group_seed per epoch and the DH is seed×pubkey, we need the matching pubkey. // After rotation, the new seed has a new pubkey. The old seed had the old pubkey. // We derive pubkey from seed: SigningKey::from_bytes(seed).verifying_key().to_bytes() let mut stmt = self.conn.prepare( "SELECT group_id, epoch, group_seed FROM group_seeds", )?; let rows = stmt.query_map([], |row| { let gid: Vec = row.get(0)?; let epoch: i64 = row.get(1)?; let seed: Vec = row.get(2)?; Ok((gid, epoch, seed)) })?; let mut map = std::collections::HashMap::new(); for row in rows { let (gid, epoch, seed) = row?; let group_id = blob_to_nodeid(gid)?; let seed_arr = <[u8; 32]>::try_from(seed.as_slice()) .map_err(|_| anyhow::anyhow!("invalid seed"))?; // Derive pubkey from seed let signing_key = ed25519_dalek::SigningKey::from_bytes(&seed_arr); let pubkey = signing_key.verifying_key().to_bytes(); map.insert((group_id, epoch as u64), (seed_arr, pubkey)); } Ok(map) } /// Get all group member sets: group_id → set of member NodeIds. pub fn get_all_group_members(&self) -> anyhow::Result>> { // Get group_id → circle_name mapping, then circle_name → members let mut stmt = self.conn.prepare("SELECT group_id, circle_name FROM group_keys")?; let rows = stmt.query_map([], |row| { let gid: Vec = row.get(0)?; let circle_name: String = row.get(1)?; Ok((gid, circle_name)) })?; let mut map = std::collections::HashMap::new(); for row in rows { let (gid, circle_name) = row?; let group_id = blob_to_nodeid(gid)?; let members = self.get_circle_members(&circle_name)?; let member_set: std::collections::HashSet = members.into_iter().collect(); map.insert(group_id, member_set); } Ok(map) } // ---- Delete records ---- /// Store a delete record. Returns true if it was new (not already stored). pub fn store_delete(&self, record: &DeleteRecord) -> anyhow::Result { let inserted = self.conn.execute( "INSERT OR IGNORE INTO deleted_posts (post_id, author, deleted_at, signature) VALUES (?1, ?2, ?3, ?4)", params![ record.post_id.as_slice(), record.author.as_slice(), record.timestamp_ms as i64, record.signature, ], )?; Ok(inserted > 0) } /// Apply a delete: remove the post from the posts table if author matches, /// and clean up associated downstream/upstream/engagement tracking rows. pub fn apply_delete(&self, record: &DeleteRecord) -> anyhow::Result { let deleted = self.conn.execute( "DELETE FROM posts WHERE id = ?1 AND author = ?2", params![record.post_id.as_slice(), record.author.as_slice()], )?; if deleted > 0 { self.conn.execute("DELETE FROM file_holders WHERE file_id = ?1", params![record.post_id.as_slice()])?; self.conn.execute("DELETE FROM post_recipients WHERE post_id = ?1", params![record.post_id.as_slice()])?; self.conn.execute("DELETE FROM seen_engagement WHERE post_id = ?1", params![record.post_id.as_slice()])?; } Ok(deleted > 0) } /// Check if a post has been deleted. pub fn is_deleted(&self, post_id: &PostId) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM deleted_posts WHERE post_id = ?1", params![post_id.as_slice()], |row| row.get(0), )?; Ok(count > 0) } /// List all delete records (for sync). pub fn list_delete_records(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT post_id, author, deleted_at, signature FROM deleted_posts", )?; let mut records = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { records.push(DeleteRecord { post_id: blob_to_postid(row.get(0)?)?, author: blob_to_nodeid(row.get(1)?)?, timestamp_ms: row.get::<_, i64>(2)? as u64, signature: row.get(3)?, }); } Ok(records) } // ---- Visibility updates ---- /// Update a post's visibility (e.g., after rewrap or re-encrypt). pub fn update_post_visibility( &self, post_id: &PostId, new_visibility: &PostVisibility, ) -> anyhow::Result { let vis_json = serde_json::to_string(new_visibility)?; let updated = self.conn.execute( "UPDATE posts SET visibility = ?1 WHERE id = ?2", params![vis_json, post_id.as_slice()], )?; if updated > 0 { // Rebuild recipient index from new visibility self.conn.execute( "DELETE FROM post_recipients WHERE post_id = ?1", params![post_id.as_slice()], )?; self.index_post_recipients(post_id, new_visibility)?; } Ok(updated > 0) } // ---- Posts with intent ---- /// Store a post with visibility and the original intent (for circle lookups). pub fn store_post_with_intent( &self, id: &PostId, post: &Post, visibility: &PostVisibility, intent: &VisibilityIntent, ) -> anyhow::Result { let attachments_json = serde_json::to_string(&post.attachments)?; let visibility_json = serde_json::to_string(visibility)?; let intent_json = serde_json::to_string(intent)?; let inserted = self.conn.execute( "INSERT OR IGNORE INTO posts (id, author, content, attachments, timestamp_ms, visibility, visibility_intent) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", params![ id.as_slice(), post.author.as_slice(), post.content, attachments_json, post.timestamp_ms as i64, visibility_json, intent_json, ], )?; if inserted > 0 { self.index_post_recipients(id, visibility)?; } Ok(inserted > 0) } /// Find posts authored by us that were intended for a specific circle. pub fn find_posts_by_circle_intent( &self, circle_name: &str, our_node_id: &NodeId, ) -> anyhow::Result> { // Use LIKE to find posts whose visibility_intent JSON contains the circle name. // The serialized form is {"Circle":"name"} so we search for that pattern. let pattern = format!("%\"Circle\":\"{}\"%", circle_name); let mut stmt = self.conn.prepare( "SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts WHERE author = ?1 AND visibility_intent LIKE ?2", )?; let mut posts = Vec::new(); let mut rows = stmt.query(params![our_node_id.as_slice(), pattern])?; while let Some(row) = rows.next()? { let attachments: Vec = serde_json::from_str(&row.get::<_, String>(3)?).unwrap_or_default(); let visibility: PostVisibility = serde_json::from_str(&row.get::<_, String>(5)?).unwrap_or_default(); posts.push(( blob_to_postid(row.get(0)?)?, Post { author: blob_to_nodeid(row.get(1)?)?, content: row.get(2)?, attachments, timestamp_ms: row.get::<_, i64>(4)? as u64, }, visibility, )); } Ok(posts) } // ---- Replica tracking ---- /// Record that a peer has a copy of a post (UPSERT). pub fn record_replica(&self, post_id: &PostId, node_id: &NodeId) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT INTO post_replicas (post_id, node_id, last_confirmed_ms) VALUES (?1, ?2, ?3) ON CONFLICT(post_id, node_id) DO UPDATE SET last_confirmed_ms = ?3", params![post_id.as_slice(), node_id.as_slice(), now], )?; Ok(()) } /// Count how many peers have a replica of a post (excluding stale ones). pub fn get_replica_count(&self, post_id: &PostId, staleness_ms: u64) -> anyhow::Result { let cutoff = now_ms() - staleness_ms as i64; let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM post_replicas WHERE post_id = ?1 AND last_confirmed_ms >= ?2", params![post_id.as_slice(), cutoff], |row| row.get(0), )?; Ok(count as usize) } /// Get node IDs of peers that have replicas of a post (within staleness window). pub fn get_replica_peers(&self, post_id: &PostId, staleness_ms: u64) -> anyhow::Result> { let cutoff = now_ms() - staleness_ms as i64; let mut stmt = self.conn.prepare( "SELECT node_id FROM post_replicas WHERE post_id = ?1 AND last_confirmed_ms >= ?2" )?; let peers: Vec = stmt.query_map(params![post_id.as_slice(), cutoff], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })? .filter_map(|r| r.ok()) .filter_map(|bytes| bytes.try_into().ok()) .collect(); Ok(peers) } /// Get a summary of redundancy across all our authored posts. /// Returns (total, zero_replicas, one_replica, two_plus_replicas). pub fn get_redundancy_summary( &self, our_node_id: &NodeId, staleness_ms: u64, ) -> anyhow::Result<(usize, usize, usize, usize)> { let cutoff = now_ms() - staleness_ms as i64; let mut stmt = self.conn.prepare( "SELECT p.id FROM posts p WHERE p.author = ?1", )?; let post_ids: Vec = { let mut rows = stmt.query(params![our_node_id.as_slice()])?; let mut ids = Vec::new(); while let Some(row) = rows.next()? { ids.push(blob_to_postid(row.get(0)?)?); } ids }; let total = post_ids.len(); let mut zero = 0usize; let mut one = 0usize; let mut two_plus = 0usize; for pid in &post_ids { let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM post_replicas WHERE post_id = ?1 AND last_confirmed_ms >= ?2", params![pid.as_slice(), cutoff], |row| row.get(0), )?; match count { 0 => zero += 1, 1 => one += 1, _ => two_plus += 1, } } Ok((total, zero, one, two_plus)) } // ---- Peer Neighbors (2-hop table) ---- /// Store a neighbor relationship: peer_id reported knowing neighbor_id. pub fn store_peer_neighbor( &self, peer_id: &NodeId, neighbor_id: &NodeId, is_anchor: bool, ) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT INTO peer_neighbors (peer_id, neighbor_id, is_anchor, reported_at) VALUES (?1, ?2, ?3, ?4) ON CONFLICT(peer_id, neighbor_id) DO UPDATE SET is_anchor = ?3, reported_at = ?4", params![peer_id.as_slice(), neighbor_id.as_slice(), is_anchor as i32, now], )?; Ok(()) } /// Store all neighbors reported by a peer during gossip. pub fn store_peer_neighbors( &self, reporting_peer: &NodeId, neighbors: &[GossipPeerInfo], our_node_id: &NodeId, ) -> anyhow::Result { let mut count = 0; for gp in neighbors { if &gp.node_id == our_node_id || &gp.node_id == reporting_peer { continue; } self.store_peer_neighbor(reporting_peer, &gp.node_id, gp.is_anchor)?; count += 1; } Ok(count) } /// Prune neighbor entries older than the given max age in milliseconds. pub fn prune_stale_neighbors(&self, max_age_ms: i64) -> anyhow::Result { let cutoff = now_ms() - max_age_ms; let deleted = self.conn.execute( "DELETE FROM peer_neighbors WHERE reported_at < ?1", params![cutoff], )?; Ok(deleted) } /// Find which direct peers reported a given neighbor (for address resolution). pub fn list_peers_with_neighbor(&self, neighbor_id: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT peer_id FROM peer_neighbors WHERE neighbor_id = ?1 ORDER BY reported_at DESC", )?; let mut result = Vec::new(); let mut rows = stmt.query(params![neighbor_id.as_slice()])?; while let Some(row) = rows.next()? { result.push(blob_to_nodeid(row.get(0)?)?); } Ok(result) } /// Lookup a node in the 2-hop table. Returns true if found as a neighbor of any direct peer. pub fn lookup_in_two_hop(&self, target: &NodeId) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM peer_neighbors WHERE neighbor_id = ?1", params![target.as_slice()], |row| row.get(0), )?; Ok(count > 0) } /// Get all neighbors reported by a specific peer. pub fn get_peer_neighbor_ids(&self, peer_id: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT neighbor_id FROM peer_neighbors WHERE peer_id = ?1", )?; let mut result = Vec::new(); let mut rows = stmt.query(params![peer_id.as_slice()])?; while let Some(row) = rows.next()? { result.push(blob_to_nodeid(row.get(0)?)?); } Ok(result) } /// Get the full 2-hop set: all unique neighbor IDs across all reporting peers. pub fn get_two_hop_set(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT DISTINCT neighbor_id FROM peer_neighbors", )?; let mut result = std::collections::HashSet::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { result.insert(blob_to_nodeid(row.get(0)?)?); } Ok(result) } // ---- Wide Peers ---- /// Set or clear the wide_peer flag on a peer. pub fn set_wide_peer(&self, node_id: &NodeId, is_wide: bool) -> anyhow::Result<()> { self.conn.execute( "UPDATE peers SET is_wide_peer = ?1 WHERE node_id = ?2", params![is_wide as i32, node_id.as_slice()], )?; Ok(()) } /// Clear all wide_peer flags. pub fn clear_all_wide_peers(&self) -> anyhow::Result<()> { self.conn.execute("UPDATE peers SET is_wide_peer = 0", [])?; Ok(()) } /// List peers marked as wide peers. pub fn list_wide_peers(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id, addresses, last_seen, introduced_by, is_anchor, first_seen FROM peers WHERE is_wide_peer = 1 ORDER BY last_seen DESC", )?; let mut records = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { records.push(row_to_peer_record(row)?); } Ok(records) } /// Compute diversity scores for all direct peers. /// Returns (node_id, score) pairs sorted by score descending. /// Score = |peer.neighbors − our_two_hop_set| / |peer.neighbors| pub fn compute_peer_diversity_scores( &self, our_two_hop_set: &std::collections::HashSet, ) -> anyhow::Result> { let peers = self.list_peers()?; let mut scores = Vec::new(); for peer_id in peers { let neighbors = self.get_peer_neighbor_ids(&peer_id)?; if neighbors.is_empty() { continue; } let neighbor_set: std::collections::HashSet = neighbors.into_iter().collect(); let unique_count = neighbor_set.difference(our_two_hop_set).count(); let score = unique_count as f64 / neighbor_set.len() as f64; scores.push((peer_id, score)); } scores.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal)); Ok(scores) } // ---- Worm cooldowns ---- /// Record that a worm search for a target failed (for cooldown). pub fn record_worm_miss(&self, target: &NodeId) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT INTO worm_cooldowns (target_id, failed_at) VALUES (?1, ?2) ON CONFLICT(target_id) DO UPDATE SET failed_at = ?2", params![target.as_slice(), now], )?; Ok(()) } /// Check if a target is on worm cooldown (failed within cooldown_ms). pub fn is_worm_cooldown(&self, target: &NodeId, cooldown_ms: i64) -> anyhow::Result { let cutoff = now_ms() - cooldown_ms; let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM worm_cooldowns WHERE target_id = ?1 AND failed_at > ?2", params![target.as_slice(), cutoff], |row| row.get(0), )?; Ok(count > 0) } /// Clear expired worm cooldowns. pub fn prune_worm_cooldowns(&self, cooldown_ms: i64) -> anyhow::Result { let cutoff = now_ms() - cooldown_ms; let deleted = self.conn.execute( "DELETE FROM worm_cooldowns WHERE failed_at < ?1", params![cutoff], )?; Ok(deleted) } // ---- Relay cooldowns ---- /// Record that a relay introduction for a target failed (for cooldown). pub fn record_relay_miss(&self, target: &NodeId) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT INTO relay_cooldowns (target_id, failed_at) VALUES (?1, ?2) ON CONFLICT(target_id) DO UPDATE SET failed_at = ?2", params![target.as_slice(), now], )?; Ok(()) } /// Check if a target is on relay cooldown (failed within cooldown_ms). pub fn is_relay_cooldown(&self, target: &NodeId, cooldown_ms: i64) -> anyhow::Result { let cutoff = now_ms() - cooldown_ms; let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM relay_cooldowns WHERE target_id = ?1 AND failed_at > ?2", params![target.as_slice(), cutoff], |row| row.get(0), )?; Ok(count > 0) } // ---- Reach: N2/N3 ---- /// Replace a peer's entire N1 set in reachable_n2 (their N1 share → our N2). pub fn set_peer_n1(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "DELETE FROM reachable_n2 WHERE reporter_node_id = ?1", params![reporter.as_slice()], )?; let mut stmt = self.conn.prepare( "INSERT OR REPLACE INTO reachable_n2 (reporter_node_id, reachable_node_id, updated_at) VALUES (?1, ?2, ?3)", )?; for nid in node_ids { stmt.execute(params![reporter.as_slice(), nid.as_slice(), now])?; } Ok(()) } /// Add NodeIds to a peer's N1 set in reachable_n2. pub fn add_peer_n1(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> { let now = now_ms(); let mut stmt = self.conn.prepare( "INSERT OR REPLACE INTO reachable_n2 (reporter_node_id, reachable_node_id, updated_at) VALUES (?1, ?2, ?3)", )?; for nid in node_ids { stmt.execute(params![reporter.as_slice(), nid.as_slice(), now])?; } Ok(()) } /// Remove NodeIds from a peer's N1 set in reachable_n2. pub fn remove_peer_n1(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> { let mut stmt = self.conn.prepare( "DELETE FROM reachable_n2 WHERE reporter_node_id = ?1 AND reachable_node_id = ?2", )?; for nid in node_ids { stmt.execute(params![reporter.as_slice(), nid.as_slice()])?; } Ok(()) } /// Remove all N2 entries from a specific reporter (on disconnect). pub fn clear_peer_n2(&self, reporter: &NodeId) -> anyhow::Result { let deleted = self.conn.execute( "DELETE FROM reachable_n2 WHERE reporter_node_id = ?1", params![reporter.as_slice()], )?; Ok(deleted) } /// Replace a peer's N2-reported entries in reachable_n3 (their N2 share → our N3). pub fn set_peer_n2(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "DELETE FROM reachable_n3 WHERE reporter_node_id = ?1", params![reporter.as_slice()], )?; let mut stmt = self.conn.prepare( "INSERT OR REPLACE INTO reachable_n3 (reporter_node_id, reachable_node_id, updated_at) VALUES (?1, ?2, ?3)", )?; for nid in node_ids { stmt.execute(params![reporter.as_slice(), nid.as_slice(), now])?; } Ok(()) } /// Add to N3 from a peer's N2 changes. pub fn add_peer_n2(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> { let now = now_ms(); let mut stmt = self.conn.prepare( "INSERT OR REPLACE INTO reachable_n3 (reporter_node_id, reachable_node_id, updated_at) VALUES (?1, ?2, ?3)", )?; for nid in node_ids { stmt.execute(params![reporter.as_slice(), nid.as_slice(), now])?; } Ok(()) } /// Remove from N3. pub fn remove_peer_n2(&self, reporter: &NodeId, node_ids: &[NodeId]) -> anyhow::Result<()> { let mut stmt = self.conn.prepare( "DELETE FROM reachable_n3 WHERE reporter_node_id = ?1 AND reachable_node_id = ?2", )?; for nid in node_ids { stmt.execute(params![reporter.as_slice(), nid.as_slice()])?; } Ok(()) } /// Remove all N3 entries from a specific reporter. pub fn clear_peer_n3(&self, reporter: &NodeId) -> anyhow::Result { let deleted = self.conn.execute( "DELETE FROM reachable_n3 WHERE reporter_node_id = ?1", params![reporter.as_slice()], )?; Ok(deleted) } /// Which reporters have this node in N2? pub fn find_in_n2(&self, node_id: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT reporter_node_id FROM reachable_n2 WHERE reachable_node_id = ?1", )?; let mut result = Vec::new(); let mut rows = stmt.query(params![node_id.as_slice()])?; while let Some(row) = rows.next()? { result.push(blob_to_nodeid(row.get(0)?)?); } Ok(result) } /// Which reporters have this node in N3? pub fn find_in_n3(&self, node_id: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT reporter_node_id FROM reachable_n3 WHERE reachable_node_id = ?1", )?; let mut result = Vec::new(); let mut rows = stmt.query(params![node_id.as_slice()])?; while let Some(row) = rows.next()? { result.push(blob_to_nodeid(row.get(0)?)?); } Ok(result) } /// Batch lookup: find any of the given node IDs in N2 or N3. /// Returns Vec<(target, reporter, level)> where level is 2 or 3, sorted by level ASC. pub fn find_any_in_n2_n3(&self, ids: &[NodeId]) -> anyhow::Result> { if ids.is_empty() { return Ok(vec![]); } let mut results = Vec::new(); // Check N2 first (closer) for id in ids { let reporters = self.find_in_n2(id)?; for r in reporters { results.push((*id, r, 2u8)); } } // Then N3 for id in ids { let reporters = self.find_in_n3(id)?; for r in reporters { results.push((*id, r, 3u8)); } } results.sort_by_key(|&(_, _, level)| level); Ok(results) } /// All NodeIds this peer can reach (from N2 table). pub fn list_n2_for_reporter(&self, reporter: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT reachable_node_id FROM reachable_n2 WHERE reporter_node_id = ?1", )?; let mut result = Vec::new(); let mut rows = stmt.query(params![reporter.as_slice()])?; while let Some(row) = rows.next()? { result.push(blob_to_nodeid(row.get(0)?)?); } Ok(result) } /// Build N1 share: merge mesh peers (connections) + social contacts NodeIds (deduplicated). pub fn build_n1_share(&self) -> anyhow::Result> { let mut ids = std::collections::HashSet::new(); // Add mesh peers (connections) let mesh_peers = self.list_mesh_peers()?; for (nid, _, _) in mesh_peers { ids.insert(nid); } // Add only ONLINE social routes (not disconnected) let routes = self.list_social_routes()?; for route in routes { if route.status == crate::types::SocialStatus::Online { ids.insert(route.node_id); } } Ok(ids.into_iter().collect()) } /// Build N2 share (reach): deduplicated unique NodeIds from all N2 entries. pub fn build_n2_share(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT DISTINCT reachable_node_id FROM reachable_n2", )?; let mut result = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { result.push(blob_to_nodeid(row.get(0)?)?); } Ok(result) } /// Count distinct reachable NodeIds in the N2 table. pub fn count_distinct_n2(&self) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(DISTINCT reachable_node_id) FROM reachable_n2", [], |row| row.get(0), )?; Ok(count as usize) } /// Count distinct reachable NodeIds in the N3 table. pub fn count_distinct_n3(&self) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(DISTINCT reachable_node_id) FROM reachable_n3", [], |row| row.get(0), )?; Ok(count as usize) } /// List distinct reachable NodeIds in the N3 table. pub fn list_distinct_n3(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT DISTINCT reachable_node_id FROM reachable_n3", )?; let mut result = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { result.push(blob_to_nodeid(row.get(0)?)?); } Ok(result) } /// Diversity score: how many unique NodeIds does this reporter contribute /// that no other reporter provides? pub fn count_unique_n2_for_reporter( &self, reporter: &NodeId, exclude_reporters: &[NodeId], ) -> anyhow::Result { // Get this reporter's N2 set let reporter_set: std::collections::HashSet = self.list_n2_for_reporter(reporter)?.into_iter().collect(); if reporter_set.is_empty() { return Ok(0); } // Get all other reporters' N2 sets (excluding specified reporters) let exclude_set: std::collections::HashSet = exclude_reporters.iter().copied().collect(); let mut other_nodes = std::collections::HashSet::new(); let mut stmt = self.conn.prepare( "SELECT reachable_node_id FROM reachable_n2 WHERE reporter_node_id != ?1", )?; let mut rows = stmt.query(params![reporter.as_slice()])?; while let Some(row) = rows.next()? { let rn: Vec = row.get(0)?; // Check if the reporter of this entry is excluded // (simplified: we just exclude the reporter itself) if let Ok(nid) = blob_to_nodeid(rn) { other_nodes.insert(nid); } } let unique = reporter_set.difference(&other_nodes).count(); let _ = exclude_set; // used for future filtering Ok(unique) } /// Remove stale N2/N3 entries. /// Clear ALL N2/N3 entries (startup sweep after unclean shutdown). pub fn clear_all_n2_n3(&self) -> anyhow::Result { let d1 = self.conn.execute("DELETE FROM reachable_n2", [])?; let d2 = self.conn.execute("DELETE FROM reachable_n3", [])?; Ok(d1 + d2) } /// Clear ALL mesh_peers entries (no connections exist at startup). pub fn clear_all_mesh_peers(&self) -> anyhow::Result { let deleted = self.conn.execute("DELETE FROM mesh_peers", [])?; Ok(deleted) } pub fn prune_n2_n3(&self, max_age_ms: u64) -> anyhow::Result { let cutoff = now_ms() - max_age_ms as i64; let d1 = self.conn.execute( "DELETE FROM reachable_n2 WHERE updated_at < ?1", params![cutoff], )?; let d2 = self.conn.execute( "DELETE FROM reachable_n3 WHERE updated_at < ?1", params![cutoff], )?; Ok(d1 + d2) } /// Score all N2 candidates for growth loop diversity selection. /// Returns (node_id, reporter_count, in_n3) for each unique N2 candidate. /// Lower reporter_count = more unique neighborhood = higher diversity value. pub fn score_n2_candidates_batch(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT n2.reachable_node_id, COUNT(DISTINCT n2.reporter_node_id) as reporter_count, CASE WHEN n3.reachable_node_id IS NOT NULL THEN 1 ELSE 0 END as in_n3 FROM reachable_n2 n2 LEFT JOIN (SELECT DISTINCT reachable_node_id FROM reachable_n3) n3 ON n2.reachable_node_id = n3.reachable_node_id GROUP BY n2.reachable_node_id", )?; let mut results = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { let nid = blob_to_nodeid(row.get(0)?)?; let reporter_count: usize = row.get::<_, i64>(1)? as usize; let in_n3: bool = row.get::<_, i64>(2)? != 0; results.push((nid, reporter_count, in_n3)); } Ok(results) } /// Get a peer's recent_peers from their stored profile. pub fn get_recent_peers(&self, node_id: &NodeId) -> anyhow::Result> { let result: Option = self.conn.query_row( "SELECT recent_peers FROM profiles WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get(0), ).ok(); Ok(result.map(|j| parse_anchors_json(&j)).unwrap_or_default()) } // ---- Mesh Peers ---- /// Add a mesh peer connection record. pub fn add_mesh_peer( &self, node_id: &NodeId, slot_kind: PeerSlotKind, priority: i32, ) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT INTO mesh_peers (node_id, slot_kind, priority, connected_at, last_diff_seq) VALUES (?1, ?2, ?3, ?4, 0) ON CONFLICT(node_id) DO UPDATE SET slot_kind = ?2, priority = ?3, connected_at = ?4", params![ node_id.as_slice(), slot_kind.to_string(), priority, now, ], )?; Ok(()) } /// Remove a mesh peer. pub fn remove_mesh_peer(&self, node_id: &NodeId) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM mesh_peers WHERE node_id = ?1", params![node_id.as_slice()], )?; Ok(()) } /// List all mesh peers: (node_id, slot_kind_str, priority). pub fn list_mesh_peers(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id, slot_kind, priority FROM mesh_peers ORDER BY connected_at DESC", )?; let mut result = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { let node_id = blob_to_nodeid(row.get(0)?)?; let slot_kind: String = row.get(1)?; let priority: i32 = row.get(2)?; result.push((node_id, slot_kind, priority)); } Ok(result) } /// Count mesh peers of a given slot kind. pub fn count_mesh_peers_by_kind(&self, slot_kind: PeerSlotKind) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM mesh_peers WHERE slot_kind = ?1", params![slot_kind.to_string()], |row| row.get(0), )?; Ok(count as usize) } /// Update last_diff_seq for a mesh peer. pub fn update_mesh_peer_seq(&self, node_id: &NodeId, seq: u64) -> anyhow::Result<()> { self.conn.execute( "UPDATE mesh_peers SET last_diff_seq = ?1 WHERE node_id = ?2", params![seq as i64, node_id.as_slice()], )?; Ok(()) } // ---- Preferred Peers ---- /// Add a bilateral preferred peer agreement. pub fn add_preferred_peer(&self, node_id: &NodeId) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT OR REPLACE INTO preferred_peers (node_id, agreed_at) VALUES (?1, ?2)", params![node_id.as_slice(), now], )?; Ok(()) } /// Remove a preferred peer agreement. pub fn remove_preferred_peer(&self, node_id: &NodeId) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM preferred_peers WHERE node_id = ?1", params![node_id.as_slice()], )?; Ok(()) } /// List all preferred peers. pub fn list_preferred_peers(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id FROM preferred_peers ORDER BY agreed_at DESC", )?; let mut result = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { result.push(blob_to_nodeid(row.get(0)?)?); } Ok(result) } /// Check if a peer is a preferred peer. pub fn is_preferred_peer(&self, node_id: &NodeId) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM preferred_peers WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get(0), )?; Ok(count > 0) } /// Count preferred peers. pub fn count_preferred_peers(&self) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM preferred_peers", [], |row| row.get(0), )?; Ok(count as usize) } // ---- Preferred Tree ---- /// Build 2-layer preferred peer tree from stored profiles. /// Layer 0: target. Layer 1: target's preferred_peers. Layer 2: each L1 peer's preferred_peers. /// Returns ~100 unique NodeIds. pub fn build_preferred_tree_for(&self, target: &NodeId) -> anyhow::Result> { let mut tree = std::collections::HashSet::new(); // Layer 0: target itself tree.insert(*target); // Layer 1: target's preferred peers from their profile let l1_peers = match self.get_profile(target)? { Some(profile) => profile.preferred_peers, None => return Ok(tree.into_iter().collect()), }; for pp in &l1_peers { tree.insert(*pp); } // Layer 2: each L1 peer's preferred peers for pp in &l1_peers { if let Some(profile) = self.get_profile(pp)? { for pp2 in &profile.preferred_peers { tree.insert(*pp2); } } } Ok(tree.into_iter().collect()) } /// Update the preferred_tree JSON for a social route. pub fn update_social_route_preferred_tree(&self, node_id: &NodeId, tree: &[NodeId]) -> anyhow::Result<()> { let json = serde_json::to_string( &tree.iter().map(hex::encode).collect::>() )?; self.conn.execute( "UPDATE social_routes SET preferred_tree = ?1 WHERE node_id = ?2", params![json, node_id.as_slice()], )?; Ok(()) } // ---- Social Routes ---- /// Insert or update a social route entry. pub fn upsert_social_route(&self, entry: &SocialRouteEntry) -> anyhow::Result<()> { let addrs_json = serde_json::to_string( &entry.addresses.iter().map(|a| a.to_string()).collect::>() )?; let peer_addrs_json = serde_json::to_string(&entry.peer_addresses)?; let pref_tree_json = serde_json::to_string( &entry.preferred_tree.iter().map(hex::encode).collect::>() )?; self.conn.execute( "INSERT INTO social_routes (node_id, addresses, peer_addresses, relation, status, last_connected_ms, last_seen_ms, reach_method, preferred_tree) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9) ON CONFLICT(node_id) DO UPDATE SET addresses = ?2, peer_addresses = ?3, relation = ?4, status = ?5, last_connected_ms = MAX(social_routes.last_connected_ms, ?6), last_seen_ms = MAX(social_routes.last_seen_ms, ?7), reach_method = ?8, preferred_tree = ?9", params![ entry.node_id.as_slice(), addrs_json, peer_addrs_json, entry.relation.to_string(), entry.status.to_string(), entry.last_connected_ms as i64, entry.last_seen_ms as i64, entry.reach_method.to_string(), pref_tree_json, ], )?; Ok(()) } /// Get a single social route entry. pub fn get_social_route(&self, node_id: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id, addresses, peer_addresses, relation, status, last_connected_ms, last_seen_ms, reach_method, preferred_tree FROM social_routes WHERE node_id = ?1", )?; let mut rows = stmt.query(params![node_id.as_slice()])?; if let Some(row) = rows.next()? { Ok(Some(row_to_social_route(row)?)) } else { Ok(None) } } /// Remove a social route entry. pub fn remove_social_route(&self, node_id: &NodeId) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM social_routes WHERE node_id = ?1", params![node_id.as_slice()], )?; Ok(()) } /// Update address + last_connected + status=online for a social route. pub fn touch_social_route_connect( &self, node_id: &NodeId, addrs: &[std::net::SocketAddr], method: ReachMethod, ) -> anyhow::Result<()> { let now = now_ms(); let addrs_json = serde_json::to_string( &addrs.iter().map(|a| a.to_string()).collect::>() )?; self.conn.execute( "UPDATE social_routes SET addresses = ?1, last_connected_ms = ?2, last_seen_ms = ?2, status = 'online', reach_method = ?3 WHERE node_id = ?4", params![addrs_json, now, method.to_string(), node_id.as_slice()], )?; Ok(()) } /// Update peer_addresses + last_seen for a social route. pub fn update_social_route_peer_addrs( &self, node_id: &NodeId, peer_addrs: &[PeerWithAddress], ) -> anyhow::Result<()> { let now = now_ms(); let json = serde_json::to_string(peer_addrs)?; self.conn.execute( "UPDATE social_routes SET peer_addresses = ?1, last_seen_ms = ?2 WHERE node_id = ?3", params![json, now, node_id.as_slice()], )?; Ok(()) } /// Update just the address of a social route. pub fn update_social_route_address( &self, node_id: &NodeId, addr: &str, ) -> anyhow::Result<()> { let now = now_ms(); let addrs_json = serde_json::to_string(&vec![addr])?; self.conn.execute( "UPDATE social_routes SET addresses = ?1, last_seen_ms = ?2 WHERE node_id = ?3", params![addrs_json, now, node_id.as_slice()], )?; Ok(()) } /// Mark a social route as online or disconnected. pub fn set_social_route_status(&self, node_id: &NodeId, status: SocialStatus) -> anyhow::Result<()> { self.conn.execute( "UPDATE social_routes SET status = ?1 WHERE node_id = ?2", params![status.to_string(), node_id.as_slice()], )?; Ok(()) } /// List all social routes, sorted by last_seen DESC. pub fn list_social_routes(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id, addresses, peer_addresses, relation, status, last_connected_ms, last_seen_ms, reach_method, preferred_tree FROM social_routes ORDER BY last_seen_ms DESC", )?; let mut entries = Vec::new(); let mut rows = stmt.query([])?; while let Some(row) = rows.next()? { entries.push(row_to_social_route(row)?); } Ok(entries) } /// List social routes with last_seen older than threshold. pub fn list_stale_social_routes(&self, max_age_ms: u64) -> anyhow::Result> { let cutoff = now_ms() - max_age_ms as i64; let mut stmt = self.conn.prepare( "SELECT node_id, addresses, peer_addresses, relation, status, last_connected_ms, last_seen_ms, reach_method, preferred_tree FROM social_routes WHERE last_seen_ms < ?1 ORDER BY last_seen_ms ASC", )?; let mut entries = Vec::new(); let mut rows = stmt.query(params![cutoff])?; while let Some(row) = rows.next()? { entries.push(row_to_social_route(row)?); } Ok(entries) } /// Check if a social route exists for a node. pub fn has_social_route(&self, node_id: &NodeId) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM social_routes WHERE node_id = ?1", params![node_id.as_slice()], |row| row.get(0), )?; Ok(count > 0) } /// Bulk-populate social_routes from follows + peers. /// Returns the number of routes created/updated. pub fn rebuild_social_routes(&self) -> anyhow::Result { let now = now_ms() as u64; let mut count = 0; // v0.6.2: audience removed; social routes are built purely from follows. let follows: std::collections::HashSet = self.list_follows()?.into_iter().collect(); for nid in follows { let relation = SocialRelation::Follow; // Look up addresses from peers table let addresses: Vec = self .get_peer_record(&nid)? .map(|r| r.addresses) .unwrap_or_default(); // Build peer_addresses from the contact's profile recent_peers let peer_addresses = self.build_peer_addresses_for(&nid)?; // Build preferred peer tree let preferred_tree = self.build_preferred_tree_for(&nid).unwrap_or_default(); // Only insert if not already present (don't overwrite runtime state) if !self.has_social_route(&nid)? { self.upsert_social_route(&SocialRouteEntry { node_id: nid, addresses, peer_addresses, relation, status: SocialStatus::Disconnected, last_connected_ms: 0, last_seen_ms: now, reach_method: ReachMethod::Direct, preferred_tree, })?; count += 1; } else { // Update the preferred tree for existing routes self.update_social_route_preferred_tree(&nid, &preferred_tree)?; } } Ok(count) } /// Build peer_addresses for a contact from their profile's recent_peers. pub fn build_peer_addresses_for(&self, node_id: &NodeId) -> anyhow::Result> { let recent_peers = self.get_recent_peers(node_id)?; let mut result = Vec::new(); for rp in recent_peers.iter().take(10) { let addrs: Vec = self .get_peer_record(rp)? .map(|r| r.addresses.iter().map(|a| a.to_string()).collect()) .unwrap_or_default(); result.push(PeerWithAddress { n: hex::encode(rp), a: addrs, }); } Ok(result) } // ---- Reconnect Watchers ---- /// Register a watcher for a disconnected peer. pub fn add_reconnect_watcher(&self, target: &NodeId, watcher: &NodeId) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT OR REPLACE INTO reconnect_watchers (target_node_id, watcher_node_id, added_at) VALUES (?1, ?2, ?3)", params![target.as_slice(), watcher.as_slice(), now], )?; Ok(()) } /// Get all watchers for a target. pub fn get_reconnect_watchers(&self, target: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT watcher_node_id FROM reconnect_watchers WHERE target_node_id = ?1", )?; let mut result = Vec::new(); let mut rows = stmt.query(params![target.as_slice()])?; while let Some(row) = rows.next()? { result.push(blob_to_nodeid(row.get(0)?)?); } Ok(result) } /// Remove all watchers for a target (after notifying). pub fn clear_reconnect_watchers(&self, target: &NodeId) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM reconnect_watchers WHERE target_node_id = ?1", params![target.as_slice()], )?; Ok(()) } /// Remove watchers older than max_age_ms. pub fn prune_stale_watchers(&self, max_age_ms: i64) -> anyhow::Result { let cutoff = now_ms() - max_age_ms; let deleted = self.conn.execute( "DELETE FROM reconnect_watchers WHERE added_at < ?1", params![cutoff], )?; Ok(deleted) } // ---- Stats ---- // ---- Blobs ---- /// Record blob metadata. INSERT OR IGNORE (idempotent). pub fn record_blob( &self, cid: &[u8; 32], post_id: &PostId, author: &NodeId, size_bytes: u64, mime_type: &str, created_at: u64, ) -> anyhow::Result<()> { let now = now_ms(); self.conn.execute( "INSERT OR IGNORE INTO blobs (cid, post_id, author, size_bytes, mime_type, created_at, stored_at, last_accessed_at, pinned) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?7, 0)", params![ cid.as_slice(), post_id.as_slice(), author.as_slice(), size_bytes as i64, mime_type, created_at as i64, now, ], )?; Ok(()) } /// Check if blob metadata exists. pub fn has_blob(&self, cid: &[u8; 32]) -> bool { self.conn .query_row( "SELECT 1 FROM blobs WHERE cid = ?1", params![cid.as_slice()], |_| Ok(()), ) .is_ok() } /// Remove blob metadata (for future eviction). pub fn remove_blob(&self, cid: &[u8; 32]) -> anyhow::Result<()> { self.conn.execute("DELETE FROM blobs WHERE cid = ?1", params![cid.as_slice()])?; Ok(()) } /// Update last_accessed_at for a blob (enables future LRU eviction). pub fn touch_blob_access(&self, cid: &[u8; 32]) -> anyhow::Result<()> { self.conn.execute( "UPDATE blobs SET last_accessed_at = ?1 WHERE cid = ?2", params![now_ms(), cid.as_slice()], )?; Ok(()) } /// Get the post_id associated with a blob. pub fn get_blob_post_id(&self, cid: &[u8; 32]) -> anyhow::Result> { let result = self.conn.query_row( "SELECT post_id FROM blobs WHERE cid = ?1", params![cid.as_slice()], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) }, ); match result { Ok(bytes) => { let pid: PostId = bytes.try_into().map_err(|_| anyhow::anyhow!("invalid post_id in blobs"))?; Ok(Some(pid)) } Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } /// Search post attachments JSON for a blob CID. Returns (mime_type, author_node_id). /// Fallback for when the blobs table doesn't have an entry (e.g. posts stored via PostFetch). pub fn find_blob_in_post_attachments(&self, blob_id: &[u8; 32]) -> anyhow::Result> { // Attachment.cid is [u8; 32], serde serializes as JSON array of numbers e.g. [37,147,227,240,...] // Build a LIKE pattern from the first 8 bytes to narrow the search let byte_pattern: String = blob_id[..8].iter() .map(|b| b.to_string()) .collect::>() .join(","); let pattern = format!("%{}%", byte_pattern); let mut stmt = self.conn.prepare( "SELECT author, attachments, visibility FROM posts WHERE attachments LIKE ?1 LIMIT 10" )?; let mut rows = stmt.query(params![pattern])?; while let Some(row) = rows.next()? { let author_bytes: Vec = row.get(0)?; let att_json: String = row.get(1)?; let vis_json: String = row.get(2)?; let visibility: PostVisibility = match serde_json::from_str(&vis_json) { Ok(v) => v, Err(_) => continue, }; if !matches!(visibility, PostVisibility::Public) { continue; } let attachments: Vec = match serde_json::from_str(&att_json) { Ok(a) => a, Err(_) => continue, }; for att in &attachments { if att.cid == *blob_id { if let Ok(author) = author_bytes.clone().try_into() { return Ok(Some((att.mime_type.clone(), author))); } } } } Ok(None) } /// Delete all blob metadata for a post, returning the CIDs for filesystem cleanup. pub fn delete_blobs_for_post(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT cid FROM blobs WHERE post_id = ?1" )?; let cids: Vec<[u8; 32]> = stmt.query_map(params![post_id.as_slice()], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })? .filter_map(|r| r.ok()) .filter_map(|bytes| bytes.try_into().ok()) .collect(); self.conn.execute( "DELETE FROM blobs WHERE post_id = ?1", params![post_id.as_slice()], )?; Ok(cids) } /// Total blob storage in bytes (for future quota). pub fn total_blob_bytes(&self) -> anyhow::Result { let total: i64 = self.conn.query_row( "SELECT COALESCE(SUM(size_bytes), 0) FROM blobs", [], |row| row.get(0), )?; Ok(total as u64) } /// Pin a blob (prevents eviction priority from being too low). pub fn pin_blob(&self, cid: &[u8; 32]) -> anyhow::Result<()> { self.conn.execute( "UPDATE blobs SET pinned = 1 WHERE cid = ?1", params![cid.as_slice()], )?; Ok(()) } /// Unpin a blob. pub fn unpin_blob(&self, cid: &[u8; 32]) -> anyhow::Result<()> { self.conn.execute( "UPDATE blobs SET pinned = 0 WHERE cid = ?1", params![cid.as_slice()], )?; Ok(()) } /// Check if a blob is pinned. pub fn is_blob_pinned(&self, cid: &[u8; 32]) -> bool { self.conn .query_row( "SELECT pinned FROM blobs WHERE cid = ?1", params![cid.as_slice()], |row| row.get::<_, i64>(0), ) .map(|v| v != 0) .unwrap_or(false) } /// Get eviction candidates with replica counts. /// Returns blob metadata + peer_copies from post_replicas (stale within staleness_ms). pub fn get_eviction_candidates(&self, staleness_ms: u64) -> anyhow::Result> { let cutoff = now_ms() - staleness_ms as i64; let mut stmt = self.conn.prepare( "SELECT b.cid, b.post_id, b.author, b.size_bytes, b.created_at, b.last_accessed_at, b.pinned, COALESCE(r.copies, 0) as peer_copies, COALESCE(d.ds_count, 0) as downstream_count FROM blobs b LEFT JOIN ( SELECT post_id, COUNT(*) as copies FROM post_replicas WHERE last_confirmed_ms >= ?1 GROUP BY post_id ) r ON b.post_id = r.post_id LEFT JOIN ( SELECT file_id, COUNT(*) as ds_count FROM file_holders GROUP BY file_id ) d ON b.cid = d.file_id" )?; let rows = stmt.query_map(params![cutoff], |row| { let cid_bytes: Vec = row.get(0)?; let post_id_bytes: Vec = row.get(1)?; let author_bytes: Vec = row.get(2)?; let size_bytes = row.get::<_, i64>(3)? as u64; let created_at = row.get::<_, i64>(4)? as u64; let last_accessed_at = row.get::<_, i64>(5)? as u64; let pinned = row.get::<_, i64>(6)? != 0; let peer_copies = row.get::<_, i64>(7)? as u32; let downstream_count = row.get::<_, i64>(8)? as u32; Ok((cid_bytes, post_id_bytes, author_bytes, size_bytes, created_at, last_accessed_at, pinned, peer_copies, downstream_count)) })?; let mut result = Vec::new(); for row in rows { let (cid_bytes, post_id_bytes, author_bytes, size_bytes, created_at, last_accessed_at, pinned, peer_copies, downstream_count) = row?; let cid: [u8; 32] = match cid_bytes.try_into() { Ok(c) => c, Err(_) => continue, }; let post_id: PostId = match post_id_bytes.try_into() { Ok(p) => p, Err(_) => continue, }; let author: NodeId = match author_bytes.try_into() { Ok(a) => a, Err(_) => continue, }; result.push(EvictionCandidate { cid, post_id, author, size_bytes, created_at, last_accessed_at, pinned, peer_copies, downstream_count, }); } Ok(result) } /// Count total number of blobs. pub fn count_blobs(&self) -> anyhow::Result { let count: i64 = self.conn.query_row( "SELECT COUNT(*) FROM blobs", [], |row| row.get(0), )?; Ok(count as u64) } /// Clean up all CDN metadata for a blob (manifests + file_holders). pub fn cleanup_cdn_for_blob(&self, cid: &[u8; 32]) -> anyhow::Result<()> { self.conn.execute("DELETE FROM cdn_manifests WHERE cid = ?1", params![cid.as_slice()])?; self.conn.execute("DELETE FROM file_holders WHERE file_id = ?1", params![cid.as_slice()])?; Ok(()) } /// Get all blob CIDs for a post (without deleting them). pub fn get_blobs_for_post(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT cid FROM blobs WHERE post_id = ?1" )?; let cids: Vec<[u8; 32]> = stmt.query_map(params![post_id.as_slice()], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })? .filter_map(|r| r.ok()) .filter_map(|bytes| bytes.try_into().ok()) .collect(); Ok(cids) } pub fn post_count(&self) -> anyhow::Result { let count: i64 = self .conn .query_row("SELECT COUNT(*) FROM posts", [], |row| row.get(0))?; Ok(count as usize) } // ---- CDN Manifests ---- /// Store or update a CDN manifest for a blob CID. pub fn store_cdn_manifest( &self, cid: &[u8; 32], manifest_json: &str, author: &NodeId, updated_at: u64, ) -> anyhow::Result<()> { self.conn.execute( "INSERT INTO cdn_manifests (cid, manifest_json, author, updated_at) VALUES (?1, ?2, ?3, ?4) ON CONFLICT(cid) DO UPDATE SET manifest_json = ?2, updated_at = ?4", params![cid.as_slice(), manifest_json, author.as_slice(), updated_at as i64], )?; Ok(()) } /// Get the raw manifest JSON for a blob CID. pub fn get_cdn_manifest(&self, cid: &[u8; 32]) -> anyhow::Result> { let result = self.conn.query_row( "SELECT manifest_json FROM cdn_manifests WHERE cid = ?1", params![cid.as_slice()], |row| row.get::<_, String>(0), ); match result { Ok(json) => Ok(Some(json)), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } /// Get all manifests for blobs by a specific author: (cid, manifest_json). pub fn get_manifests_for_author_blobs( &self, author: &NodeId, ) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT cid, manifest_json FROM cdn_manifests WHERE author = ?1" )?; let rows = stmt.query_map(params![author.as_slice()], |row| { let cid_bytes: Vec = row.get(0)?; let json: String = row.get(1)?; Ok((cid_bytes, json)) })?; let mut result = Vec::new(); for row in rows { let (cid_bytes, json) = row?; let cid: [u8; 32] = cid_bytes.try_into() .map_err(|_| anyhow::anyhow!("invalid cid in cdn_manifests"))?; result.push((cid, json)); } Ok(result) } /// Get CIDs of manifests older than a cutoff. Callers look up holders /// via file_holders to pick a refresh source. pub fn get_stale_manifest_cids(&self, older_than_ms: u64) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT cid FROM cdn_manifests WHERE updated_at < ?1", )?; let rows = stmt.query_map(params![older_than_ms as i64], |row| { let cid_bytes: Vec = row.get(0)?; Ok(cid_bytes) })?; let mut out = Vec::new(); for row in rows { let cid_bytes = row?; if let Ok(cid) = <[u8; 32]>::try_from(cid_bytes.as_slice()) { out.push(cid); } } Ok(out) } /// Get the 10 posts before and 10 posts after a reference timestamp for an author. /// Returns (previous, following) ManifestEntry vectors. pub fn get_author_post_neighborhood( &self, author: &NodeId, ref_timestamp_ms: u64, count: usize, ) -> anyhow::Result<(Vec, Vec)> { // Previous posts: timestamp < ref, descending, take `count` let mut prev_stmt = self.conn.prepare( "SELECT id, timestamp_ms, attachments FROM posts WHERE author = ?1 AND timestamp_ms < ?2 ORDER BY timestamp_ms DESC LIMIT ?3" )?; let prev_rows = prev_stmt.query_map( params![author.as_slice(), ref_timestamp_ms as i64, count as i64], |row| { let id_bytes: Vec = row.get(0)?; let ts: i64 = row.get(1)?; let att_json: String = row.get(2)?; Ok((id_bytes, ts, att_json)) }, )?; let mut previous = Vec::new(); for row in prev_rows { let (id_bytes, ts, att_json) = row?; let post_id = blob_to_postid(id_bytes)?; let attachments: Vec = serde_json::from_str(&att_json).unwrap_or_default(); previous.push(ManifestEntry { post_id, timestamp_ms: ts as u64, has_attachments: !attachments.is_empty(), }); } // Following posts: timestamp > ref, ascending, take `count` let mut next_stmt = self.conn.prepare( "SELECT id, timestamp_ms, attachments FROM posts WHERE author = ?1 AND timestamp_ms > ?2 ORDER BY timestamp_ms ASC LIMIT ?3" )?; let next_rows = next_stmt.query_map( params![author.as_slice(), ref_timestamp_ms as i64, count as i64], |row| { let id_bytes: Vec = row.get(0)?; let ts: i64 = row.get(1)?; let att_json: String = row.get(2)?; Ok((id_bytes, ts, att_json)) }, )?; let mut following = Vec::new(); for row in next_rows { let (id_bytes, ts, att_json) = row?; let post_id = blob_to_postid(id_bytes)?; let attachments: Vec = serde_json::from_str(&att_json).unwrap_or_default(); following.push(ManifestEntry { post_id, timestamp_ms: ts as u64, has_attachments: !attachments.is_empty(), }); } Ok((previous, following)) } /// Get mesh peers and N2 peers known to have an author's posts (from post_replicas overlap). /// Used by the lateral fetch cascade step. /// Results are sorted: non-anchor peers first (to save anchor delivery budget), /// then by specificity (peers with this exact post first) and recency. pub fn get_lateral_blob_sources(&self, author: &NodeId, post_id: &PostId) -> anyhow::Result> { // Find peers who have replicas of any post by this author, prioritizing those // who have this specific post, then any other posts by the same author. // Cross-reference with mesh_peers and reachable_n2 for reachability. // Sort: non-anchors first (COALESCE is_anchor default 0), then post match, then recency. let mut stmt = self.conn.prepare( "SELECT DISTINCT pr.node_id FROM post_replicas pr INNER JOIN posts p ON pr.post_id = p.id LEFT JOIN peers pe ON pr.node_id = pe.node_id WHERE p.author = ?1 AND ( pr.node_id IN (SELECT node_id FROM mesh_peers) OR pr.node_id IN (SELECT reachable_node_id FROM reachable_n2) ) ORDER BY COALESCE(pe.is_anchor, 0) ASC, CASE WHEN pr.post_id = ?2 THEN 0 ELSE 1 END, pr.last_confirmed_ms DESC LIMIT 10" )?; let rows = stmt.query_map(params![author.as_slice(), post_id.as_slice()], |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut result = Vec::new(); for row in rows { if let Ok(nid) = blob_to_nodeid(row?) { result.push(nid); } } Ok(result) } // --- Post recipients index (for merged-pull recipient-match) --- /// Insert all recipient NodeIds for an encrypted post into post_recipients. /// No-op for Public visibility. Called on post insert / visibility update. fn index_post_recipients( &self, post_id: &PostId, visibility: &PostVisibility, ) -> anyhow::Result<()> { match visibility { PostVisibility::Public => Ok(()), PostVisibility::Encrypted { recipients } => { for wk in recipients { self.conn.execute( "INSERT OR IGNORE INTO post_recipients (post_id, recipient) VALUES (?1, ?2)", params![post_id.as_slice(), wk.recipient.as_slice()], )?; } Ok(()) } PostVisibility::GroupEncrypted { group_id, .. } => { // For group-encrypted posts, index the group's members. let members = self.get_all_group_members() .ok() .and_then(|m| m.get(group_id).cloned()) .unwrap_or_default(); for member in members { self.conn.execute( "INSERT OR IGNORE INTO post_recipients (post_id, recipient) VALUES (?1, ?2)", params![post_id.as_slice(), member.as_slice()], )?; } Ok(()) } } } /// Return all post IDs for which any of the given NodeIds is a recipient. /// Uses the idx_post_recipients_recipient index. pub fn get_post_ids_for_recipients( &self, recipients: &[NodeId], ) -> anyhow::Result> { if recipients.is_empty() { return Ok(Vec::new()); } let placeholders: Vec<&str> = (0..recipients.len()).map(|_| "?").collect(); let sql = format!( "SELECT DISTINCT post_id FROM post_recipients WHERE recipient IN ({})", placeholders.join(",") ); let mut stmt = self.conn.prepare(&sql)?; let params = rusqlite::params_from_iter(recipients.iter().map(|r| r.to_vec())); let rows = stmt.query_map(params, |row| { let bytes: Vec = row.get(0)?; Ok(bytes) })?; let mut out = Vec::new(); for row in rows { if let Ok(pid) = <[u8; 32]>::try_from(row?.as_slice()) { out.push(pid); } } Ok(out) } /// Seed the post_recipients index from existing encrypted posts. /// One-time idempotent migration for users upgrading from pre-0.6.2. pub fn seed_post_recipients_from_posts(&self) -> anyhow::Result<()> { let existing: i64 = self.conn.prepare("SELECT COUNT(*) FROM post_recipients")? .query_row([], |row| row.get(0))?; if existing > 0 { return Ok(()); } // Scan all posts, parse visibility, index recipients. let mut stmt = self.conn.prepare("SELECT id, visibility FROM posts")?; let rows = stmt.query_map([], |row| { let id_bytes: Vec = row.get(0)?; let vis_json: String = row.get(1)?; Ok((id_bytes, vis_json)) })?; let entries: Vec<([u8; 32], PostVisibility)> = rows .filter_map(|r| r.ok()) .filter_map(|(id_bytes, vis_json)| { let pid = <[u8; 32]>::try_from(id_bytes.as_slice()).ok()?; let vis: PostVisibility = serde_json::from_str(&vis_json).ok()?; Some((pid, vis)) }) .collect(); drop(stmt); for (pid, vis) in entries { self.index_post_recipients(&pid, &vis)?; } Ok(()) } // --- Posting identities (multi-persona plumbing) --- pub fn upsert_posting_identity(&self, id: &PostingIdentity) -> anyhow::Result<()> { self.conn.execute( "INSERT INTO posting_identities (node_id, secret_seed, display_name, created_at) VALUES (?1, ?2, ?3, ?4) ON CONFLICT(node_id) DO UPDATE SET display_name = excluded.display_name", params![ id.node_id.as_slice(), id.secret_seed.as_slice(), id.display_name, id.created_at as i64, ], )?; Ok(()) } pub fn get_posting_identity(&self, node_id: &NodeId) -> anyhow::Result> { let result = self.conn.query_row( "SELECT node_id, secret_seed, display_name, created_at FROM posting_identities WHERE node_id = ?1", params![node_id.as_slice()], |row| { let nid: Vec = row.get(0)?; let seed: Vec = row.get(1)?; let name: String = row.get(2)?; let ts: i64 = row.get(3)?; Ok((nid, seed, name, ts)) }, ); match result { Ok((nid_bytes, seed_bytes, name, ts)) => { let nid: NodeId = nid_bytes.as_slice().try_into() .map_err(|_| anyhow::anyhow!("invalid posting identity node_id"))?; let seed: [u8; 32] = seed_bytes.as_slice().try_into() .map_err(|_| anyhow::anyhow!("invalid posting identity seed"))?; Ok(Some(PostingIdentity { node_id: nid, secret_seed: seed, display_name: name, created_at: ts as u64, })) } Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } pub fn list_posting_identities(&self) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT node_id, secret_seed, display_name, created_at FROM posting_identities ORDER BY created_at ASC", )?; let rows = stmt.query_map([], |row| { let nid: Vec = row.get(0)?; let seed: Vec = row.get(1)?; let name: String = row.get(2)?; let ts: i64 = row.get(3)?; Ok((nid, seed, name, ts)) })?; let mut out = Vec::new(); for row in rows { let (nid_bytes, seed_bytes, name, ts) = row?; let nid: NodeId = match nid_bytes.as_slice().try_into() { Ok(n) => n, Err(_) => continue, }; let seed: [u8; 32] = match seed_bytes.as_slice().try_into() { Ok(s) => s, Err(_) => continue, }; out.push(PostingIdentity { node_id: nid, secret_seed: seed, display_name: name, created_at: ts as u64, }); } Ok(out) } pub fn delete_posting_identity(&self, node_id: &NodeId) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM posting_identities WHERE node_id = ?1", params![node_id.as_slice()], )?; Ok(()) } /// Get the NodeId of the currently default posting identity. /// Stored in `settings` under key `active_default_posting_id`. pub fn get_default_posting_id(&self) -> anyhow::Result> { match self.get_setting("active_default_posting_id")? { Some(hex_str) => { let bytes = hex::decode(&hex_str).unwrap_or_default(); if bytes.len() == 32 { let mut nid = [0u8; 32]; nid.copy_from_slice(&bytes); Ok(Some(nid)) } else { Ok(None) } } None => Ok(None), } } pub fn set_default_posting_id(&self, node_id: &NodeId) -> anyhow::Result<()> { self.set_setting("active_default_posting_id", &hex::encode(node_id)) } pub fn count_posting_identities(&self) -> anyhow::Result { let n: i64 = self.conn.prepare( "SELECT COUNT(*) FROM posting_identities", )?.query_row([], |row| row.get(0))?; Ok(n as u64) } // --- File holders (flat, per-file, LRU-capped at 5) --- // // A single table for PostId-keyed engagement propagation and CID-keyed // manifest/blob propagation. Any 32-byte content-addressed file_id fits. /// Upsert a holder for a file. Bumps last_interaction_ms to now and /// enforces an LRU cap of 5 holders per file. pub fn touch_file_holder( &self, file_id: &[u8; 32], peer_id: &NodeId, peer_addresses: &[String], direction: HolderDirection, ) -> anyhow::Result<()> { let addrs_json = serde_json::to_string(peer_addresses)?; let now = now_ms(); let new_dir = direction.as_str(); // Upsert. If the row exists with a different direction, promote to "both". self.conn.execute( "INSERT INTO file_holders (file_id, peer_id, peer_addresses, last_interaction_ms, direction) VALUES (?1, ?2, ?3, ?4, ?5) ON CONFLICT(file_id, peer_id) DO UPDATE SET peer_addresses = CASE WHEN length(?3) > 2 THEN ?3 ELSE peer_addresses END, last_interaction_ms = ?4, direction = CASE WHEN direction = ?5 THEN direction ELSE 'both' END", params![file_id.as_slice(), peer_id.as_slice(), addrs_json, now as i64, new_dir], )?; // Enforce LRU cap of 5. Oldest get dropped. self.conn.execute( "DELETE FROM file_holders WHERE file_id = ?1 AND peer_id NOT IN ( SELECT peer_id FROM file_holders WHERE file_id = ?1 ORDER BY last_interaction_ms DESC LIMIT 5 )", params![file_id.as_slice()], )?; Ok(()) } /// Count file holders (bounded at 5 by touch_file_holder's LRU cap). pub fn get_file_holder_count(&self, file_id: &[u8; 32]) -> anyhow::Result { let count: i64 = self.conn.prepare( "SELECT COUNT(*) FROM file_holders WHERE file_id = ?1", )?.query_row(params![file_id.as_slice()], |row| row.get(0))?; Ok(count as u32) } /// Return the up-to-5 most recently interacted holders of a file. pub fn get_file_holders(&self, file_id: &[u8; 32]) -> anyhow::Result)>> { let mut stmt = self.conn.prepare( "SELECT peer_id, peer_addresses FROM file_holders WHERE file_id = ?1 ORDER BY last_interaction_ms DESC LIMIT 5", )?; let rows = stmt.query_map(params![file_id.as_slice()], |row| { let peer_bytes: Vec = row.get(0)?; let addrs_json: String = row.get(1)?; Ok((peer_bytes, addrs_json)) })?; let mut out = Vec::new(); for row in rows { let (peer_bytes, addrs_json) = row?; if peer_bytes.len() != 32 { continue; } let mut peer = [0u8; 32]; peer.copy_from_slice(&peer_bytes); let addrs: Vec = serde_json::from_str(&addrs_json).unwrap_or_default(); out.push((NodeId::from(peer), addrs)); } Ok(out) } /// Remove all holders for a file (e.g. on post/blob deletion). pub fn delete_file_holders(&self, file_id: &[u8; 32]) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM file_holders WHERE file_id = ?1", params![file_id.as_slice()], )?; Ok(()) } /// Remove a single peer's holder entry for a file. pub fn remove_file_holder(&self, file_id: &[u8; 32], peer_id: &NodeId) -> anyhow::Result<()> { self.conn.execute( "DELETE FROM file_holders WHERE file_id = ?1 AND peer_id = ?2", params![file_id.as_slice(), peer_id.as_slice()], )?; Ok(()) } /// One-time migration: seed file_holders from the legacy upstream/downstream /// tables so a user upgrading from pre-0.6.1 doesn't start with empty holder /// sets. Idempotent — inserts use ON CONFLICT DO NOTHING semantics via the /// PRIMARY KEY. Skips tables that don't exist on fresh installs. pub fn seed_file_holders_from_legacy(&self) -> anyhow::Result<()> { // Skip if file_holders already populated (idempotent re-run protection). let existing: i64 = self.conn.prepare("SELECT COUNT(*) FROM file_holders")? .query_row([], |row| row.get(0))?; if existing > 0 { return Ok(()); } let now = now_ms() as i64; let table_exists = |name: &str| -> anyhow::Result { let count: i64 = self.conn.prepare( "SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=?1", )?.query_row(params![name], |row| row.get(0))?; Ok(count > 0) }; if table_exists("post_upstream")? { self.conn.execute( "INSERT OR IGNORE INTO file_holders (file_id, peer_id, peer_addresses, last_interaction_ms, direction) SELECT post_id, peer_node_id, '[]', ?1, 'received' FROM post_upstream", params![now], )?; } if table_exists("post_downstream")? { self.conn.execute( "INSERT OR IGNORE INTO file_holders (file_id, peer_id, peer_addresses, last_interaction_ms, direction) SELECT post_id, peer_node_id, '[]', ?1, 'sent' FROM post_downstream", params![now], )?; } if table_exists("blob_upstream")? { self.conn.execute( "INSERT OR IGNORE INTO file_holders (file_id, peer_id, peer_addresses, last_interaction_ms, direction) SELECT cid, source_node_id, source_addresses, ?1, 'received' FROM blob_upstream", params![now], )?; } if table_exists("blob_downstream")? { self.conn.execute( "INSERT OR IGNORE INTO file_holders (file_id, peer_id, peer_addresses, last_interaction_ms, direction) SELECT cid, peer_node_id, peer_addresses, ?1, 'sent' FROM blob_downstream", params![now], )?; } Ok(()) } // --- Engagement: reactions --- /// Store a reaction (upsert by reactor+post_id+emoji). /// Tombstone-aware: incoming reaction wins only if its timestamp is newer. pub fn store_reaction(&self, reaction: &Reaction) -> anyhow::Result<()> { self.conn.execute( "INSERT INTO reactions (reactor, post_id, emoji, timestamp_ms, encrypted_payload, deleted_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6) ON CONFLICT(reactor, post_id, emoji) DO UPDATE SET timestamp_ms = CASE WHEN excluded.timestamp_ms > timestamp_ms THEN excluded.timestamp_ms ELSE timestamp_ms END, deleted_at = CASE WHEN excluded.timestamp_ms > timestamp_ms THEN excluded.deleted_at ELSE deleted_at END, encrypted_payload = CASE WHEN excluded.timestamp_ms > timestamp_ms THEN excluded.encrypted_payload ELSE encrypted_payload END", params![ reaction.reactor.as_slice(), reaction.post_id.as_slice(), reaction.emoji, reaction.timestamp_ms as i64, reaction.encrypted_payload, reaction.deleted_at.map(|v| v as i64), ], )?; Ok(()) } /// Tombstone a reaction (soft-delete by setting deleted_at). pub fn remove_reaction(&self, reactor: &NodeId, post_id: &PostId, emoji: &str) -> anyhow::Result<()> { self.conn.execute( "UPDATE reactions SET deleted_at = ?4 WHERE reactor = ?1 AND post_id = ?2 AND emoji = ?3", params![reactor.as_slice(), post_id.as_slice(), emoji, now_ms()], )?; Ok(()) } /// Get live (non-tombstoned) reactions for a post. Used for UI display. pub fn get_reactions(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT reactor, post_id, emoji, timestamp_ms, encrypted_payload FROM reactions WHERE post_id = ?1 AND deleted_at IS NULL ORDER BY timestamp_ms ASC" )?; let rows = stmt.query_map(params![post_id.as_slice()], |row| { let reactor: Vec = row.get(0)?; let pid: Vec = row.get(1)?; let emoji: String = row.get(2)?; let ts: i64 = row.get(3)?; let enc: Option = row.get(4)?; Ok((reactor, pid, emoji, ts, enc)) })?; let mut result = Vec::new(); for row in rows { let (reactor_bytes, pid_bytes, emoji, ts, enc) = row?; let reactor = blob_to_nodeid(reactor_bytes)?; let post_id = blob_to_postid(pid_bytes)?; result.push(Reaction { reactor, emoji, post_id, timestamp_ms: ts as u64, encrypted_payload: enc, deleted_at: None, signature: vec![], }); } Ok(result) } /// Get ALL reactions for a post, including tombstoned ones. Used for header rebuild /// so tombstones propagate through pull-based sync. pub fn get_reactions_with_tombstones(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT reactor, post_id, emoji, timestamp_ms, encrypted_payload, deleted_at FROM reactions WHERE post_id = ?1 ORDER BY timestamp_ms ASC" )?; let rows = stmt.query_map(params![post_id.as_slice()], |row| { let reactor: Vec = row.get(0)?; let pid: Vec = row.get(1)?; let emoji: String = row.get(2)?; let ts: i64 = row.get(3)?; let enc: Option = row.get(4)?; let del: Option = row.get(5)?; Ok((reactor, pid, emoji, ts, enc, del)) })?; let mut result = Vec::new(); for row in rows { let (reactor_bytes, pid_bytes, emoji, ts, enc, del) = row?; let reactor = blob_to_nodeid(reactor_bytes)?; let post_id = blob_to_postid(pid_bytes)?; result.push(Reaction { reactor, emoji, post_id, timestamp_ms: ts as u64, encrypted_payload: enc, deleted_at: del.map(|v| v as u64), signature: vec![], }); } Ok(result) } /// Get reaction counts grouped by emoji for a post (excludes tombstoned reactions). pub fn get_reaction_counts(&self, post_id: &PostId, my_node_id: &NodeId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT emoji, COUNT(*) as cnt, SUM(CASE WHEN reactor = ?2 THEN 1 ELSE 0 END) as my_count FROM reactions WHERE post_id = ?1 AND deleted_at IS NULL GROUP BY emoji ORDER BY cnt DESC" )?; let rows = stmt.query_map(params![post_id.as_slice(), my_node_id.as_slice()], |row| { let emoji: String = row.get(0)?; let count: i64 = row.get(1)?; let my_count: i64 = row.get(2)?; Ok((emoji, count as u64, my_count > 0)) })?; let mut result = Vec::new(); for row in rows { result.push(row?); } Ok(result) } // --- Engagement: comments --- /// Store a comment. Tombstone-aware upsert: if the incoming comment carries a /// deleted_at tombstone, store it so the tombstone propagates. pub fn store_comment(&self, comment: &InlineComment) -> anyhow::Result<()> { self.conn.execute( "INSERT INTO comments (author, post_id, content, timestamp_ms, signature, deleted_at, ref_post_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7) ON CONFLICT(author, post_id, timestamp_ms) DO UPDATE SET content = CASE WHEN excluded.deleted_at IS NOT NULL THEN content ELSE excluded.content END, deleted_at = CASE WHEN excluded.deleted_at IS NOT NULL THEN excluded.deleted_at ELSE deleted_at END, ref_post_id = COALESCE(excluded.ref_post_id, ref_post_id)", params![ comment.author.as_slice(), comment.post_id.as_slice(), comment.content, comment.timestamp_ms as i64, comment.signature, comment.deleted_at.map(|v| v as i64), comment.ref_post_id.as_ref().map(|r| r.as_slice()), ], )?; Ok(()) } /// Edit a comment (must match author + post_id + timestamp_ms). pub fn edit_comment(&self, author: &NodeId, post_id: &PostId, timestamp_ms: u64, new_content: &str) -> anyhow::Result { let updated = self.conn.execute( "UPDATE comments SET content = ?4 WHERE author = ?1 AND post_id = ?2 AND timestamp_ms = ?3", params![author.as_slice(), post_id.as_slice(), timestamp_ms as i64, new_content], )?; Ok(updated > 0) } /// Tombstone a comment (soft-delete by setting deleted_at). pub fn delete_comment(&self, author: &NodeId, post_id: &PostId, timestamp_ms: u64) -> anyhow::Result { let updated = self.conn.execute( "UPDATE comments SET deleted_at = ?4 WHERE author = ?1 AND post_id = ?2 AND timestamp_ms = ?3", params![author.as_slice(), post_id.as_slice(), timestamp_ms as i64, now_ms()], )?; Ok(updated > 0) } /// Get live (non-tombstoned) comments for a post. Used for UI display. pub fn get_comments(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT author, post_id, content, timestamp_ms, signature, ref_post_id FROM comments WHERE post_id = ?1 AND deleted_at IS NULL ORDER BY timestamp_ms ASC" )?; let rows = stmt.query_map(params![post_id.as_slice()], |row| { let author: Vec = row.get(0)?; let pid: Vec = row.get(1)?; let content: String = row.get(2)?; let ts: i64 = row.get(3)?; let sig: Vec = row.get(4)?; let ref_post: Option> = row.get(5)?; Ok((author, pid, content, ts, sig, ref_post)) })?; let mut result = Vec::new(); for row in rows { let (author_bytes, pid_bytes, content, ts, sig, ref_post) = row?; let author = blob_to_nodeid(author_bytes)?; let post_id = blob_to_postid(pid_bytes)?; let ref_post_id = match ref_post { Some(b) => Some(blob_to_postid(b)?), None => None, }; result.push(InlineComment { author, post_id, content, timestamp_ms: ts as u64, signature: sig, deleted_at: None, ref_post_id, }); } Ok(result) } /// Get ALL comments for a post, including tombstoned ones. Used for header rebuild /// so tombstones propagate through pull-based sync. pub fn get_comments_with_tombstones(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT author, post_id, content, timestamp_ms, signature, deleted_at, ref_post_id FROM comments WHERE post_id = ?1 ORDER BY timestamp_ms ASC" )?; let rows = stmt.query_map(params![post_id.as_slice()], |row| { let author: Vec = row.get(0)?; let pid: Vec = row.get(1)?; let content: String = row.get(2)?; let ts: i64 = row.get(3)?; let sig: Vec = row.get(4)?; let del: Option = row.get(5)?; let ref_post: Option> = row.get(6)?; Ok((author, pid, content, ts, sig, del, ref_post)) })?; let mut result = Vec::new(); for row in rows { let (author_bytes, pid_bytes, content, ts, sig, del, ref_post) = row?; let author = blob_to_nodeid(author_bytes)?; let post_id = blob_to_postid(pid_bytes)?; let ref_post_id = match ref_post { Some(b) => Some(blob_to_postid(b)?), None => None, }; result.push(InlineComment { author, post_id, content, timestamp_ms: ts as u64, signature: sig, deleted_at: del.map(|v| v as u64), ref_post_id, }); } Ok(result) } /// Get comment count for a post (excludes tombstoned comments). pub fn get_comment_count(&self, post_id: &PostId) -> anyhow::Result { let count: i64 = self.conn.prepare( "SELECT COUNT(*) FROM comments WHERE post_id = ?1 AND deleted_at IS NULL" )?.query_row(params![post_id.as_slice()], |row| row.get(0))?; Ok(count as u64) } // --- Engagement: comment policies --- /// Store or update a comment policy for a post. pub fn set_comment_policy(&self, post_id: &PostId, policy: &CommentPolicy) -> anyhow::Result<()> { let json = serde_json::to_string(policy)?; self.conn.execute( "INSERT INTO comment_policies (post_id, policy_json) VALUES (?1, ?2) ON CONFLICT(post_id) DO UPDATE SET policy_json = excluded.policy_json", params![post_id.as_slice(), json], )?; Ok(()) } /// Get the comment policy for a post. pub fn get_comment_policy(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT policy_json FROM comment_policies WHERE post_id = ?1" )?; let result = stmt.query_row(params![post_id.as_slice()], |row| { row.get::<_, String>(0) }); match result { Ok(json) => Ok(Some(serde_json::from_str(&json)?)), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } // --- Engagement: blob headers --- /// Store or update an aggregated blob header for a post. pub fn store_blob_header(&self, post_id: &PostId, author: &NodeId, header_json: &str, updated_at: u64) -> anyhow::Result<()> { self.conn.execute( "INSERT INTO blob_headers (post_id, author, header_json, updated_at) VALUES (?1, ?2, ?3, ?4) ON CONFLICT(post_id) DO UPDATE SET header_json = excluded.header_json, updated_at = excluded.updated_at", params![post_id.as_slice(), author.as_slice(), header_json, updated_at as i64], )?; Ok(()) } /// Get the blob header for a post. pub fn get_blob_header(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT header_json, updated_at FROM blob_headers WHERE post_id = ?1" )?; let result = stmt.query_row(params![post_id.as_slice()], |row| { let json: String = row.get(0)?; let ts: i64 = row.get(1)?; Ok((json, ts as u64)) }); match result { Ok(r) => Ok(Some(r)), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } // --- Engagement: thread metadata --- /// Store a thread split link (child → parent). pub fn store_thread_meta(&self, meta: &ThreadMeta) -> anyhow::Result<()> { self.conn.execute( "INSERT INTO thread_meta (post_id, parent_post_id) VALUES (?1, ?2) ON CONFLICT DO NOTHING", params![meta.post_id.as_slice(), meta.parent_post_id.as_slice()], )?; Ok(()) } /// Get all child posts for a parent (thread splits). pub fn get_thread_children(&self, parent_post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT post_id FROM thread_meta WHERE parent_post_id = ?1" )?; let rows = stmt.query_map(params![parent_post_id.as_slice()], |row| row.get::<_, Vec>(0))?; let mut result = Vec::new(); for row in rows { result.push(blob_to_postid(row?)?); } Ok(result) } /// Get the parent post for a thread split child. pub fn get_thread_parent(&self, post_id: &PostId) -> anyhow::Result> { let mut stmt = self.conn.prepare( "SELECT parent_post_id FROM thread_meta WHERE post_id = ?1" )?; let result = stmt.query_row(params![post_id.as_slice()], |row| row.get::<_, Vec>(0)); match result { Ok(bytes) => Ok(Some(blob_to_postid(bytes)?)), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e.into()), } } } /// Parse a JSON array of hex-encoded node IDs into Vec. fn parse_anchors_json(json: &str) -> Vec { let hex_ids: Vec = serde_json::from_str(json).unwrap_or_default(); hex_ids .iter() .filter_map(|h| hex::decode(h).ok()) .filter_map(|b| b.try_into().ok()) .collect() } fn blob_to_postid(bytes: Vec) -> anyhow::Result { bytes .try_into() .map_err(|v: Vec| anyhow::anyhow!("invalid post id length: {}", v.len())) } fn blob_to_nodeid(bytes: Vec) -> anyhow::Result { bytes .try_into() .map_err(|v: Vec| anyhow::anyhow!("invalid node id length: {}", v.len())) } fn now_ms() -> i64 { std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_default() .as_millis() as i64 } fn row_to_peer_record(row: &rusqlite::Row) -> anyhow::Result { let node_id = blob_to_nodeid(row.get(0)?)?; let addrs_json: String = row.get(1)?; let addr_strings: Vec = serde_json::from_str(&addrs_json).unwrap_or_default(); let addresses: Vec = addr_strings .iter() .filter_map(|s| s.parse().ok()) .collect(); let last_seen = row.get::<_, i64>(2)? as u64; let introduced_by: Option> = row.get(3)?; let introduced_by = introduced_by .map(|b| blob_to_nodeid(b)) .transpose()?; let is_anchor = row.get::<_, i32>(4)? != 0; let first_seen = row.get::<_, i64>(5)? as u64; Ok(PeerRecord { node_id, addresses, last_seen, introduced_by, is_anchor, first_seen, }) } fn row_to_social_route(row: &rusqlite::Row) -> anyhow::Result { let node_id = blob_to_nodeid(row.get(0)?)?; let addrs_json: String = row.get(1)?; let addr_strings: Vec = serde_json::from_str(&addrs_json).unwrap_or_default(); let addresses: Vec = addr_strings .iter() .filter_map(|s| s.parse().ok()) .collect(); let peer_addrs_json: String = row.get(2)?; let peer_addresses: Vec = serde_json::from_str(&peer_addrs_json).unwrap_or_default(); let relation_str: String = row.get(3)?; let relation: SocialRelation = relation_str.parse().unwrap_or(SocialRelation::Follow); let status_str: String = row.get(4)?; let status: SocialStatus = status_str.parse().unwrap_or(SocialStatus::Disconnected); let last_connected_ms = row.get::<_, i64>(5)? as u64; let last_seen_ms = row.get::<_, i64>(6)? as u64; let method_str: String = row.get(7)?; let reach_method: ReachMethod = method_str.parse().unwrap_or(ReachMethod::Direct); let pref_tree_json: String = row.get::<_, String>(8).unwrap_or_else(|_| "[]".to_string()); let preferred_tree = parse_anchors_json(&pref_tree_json); Ok(SocialRouteEntry { node_id, addresses, peer_addresses, relation, status, last_connected_ms, last_seen_ms, reach_method, preferred_tree, }) } #[cfg(test)] mod tests { use super::*; use crate::types::DeviceProfile; fn make_node_id(byte: u8) -> NodeId { [byte; 32] } fn make_post_id(byte: u8) -> PostId { [byte; 32] } fn temp_storage() -> Storage { Storage::open(":memory:").unwrap() } fn set_peer_last_seen(storage: &Storage, node_id: &NodeId, last_seen: i64) { storage.conn.execute( "UPDATE peers SET last_seen = ?1 WHERE node_id = ?2", params![last_seen, node_id.as_slice()], ).unwrap(); } #[test] fn gossip_includes_recent_peer() { let s = temp_storage(); let nid = make_node_id(1); let addr: std::net::SocketAddr = "10.0.0.1:1234".parse().unwrap(); s.upsert_peer(&nid, &[addr], None).unwrap(); let gossip = s.build_gossip_list().unwrap(); assert_eq!(gossip.len(), 1); assert_eq!(gossip[0].node_id, nid); } #[test] fn gossip_excludes_stale_peer() { let s = temp_storage(); let nid = make_node_id(2); let addr: std::net::SocketAddr = "10.0.0.2:1234".parse().unwrap(); s.upsert_peer(&nid, &[addr], None).unwrap(); // Set last_seen to 8 days ago let eight_days_ago = now_ms() - 8 * 24 * 60 * 60 * 1000; set_peer_last_seen(&s, &nid, eight_days_ago); let gossip = s.build_gossip_list().unwrap(); assert!(gossip.is_empty()); } #[test] fn gossip_includes_peer_without_addresses() { let s = temp_storage(); let nid = make_node_id(3); // add_peer stores with empty addresses — still included in address-free gossip s.add_peer(&nid).unwrap(); let gossip = s.build_gossip_list().unwrap(); assert_eq!(gossip.len(), 1); } #[test] fn gossip_includes_non_followed_peer() { let s = temp_storage(); let nid = make_node_id(4); let addr: std::net::SocketAddr = "10.0.0.4:1234".parse().unwrap(); s.upsert_peer(&nid, &[addr], None).unwrap(); let gossip = s.build_gossip_list().unwrap(); assert_eq!(gossip.len(), 1); assert_eq!(gossip[0].node_id, nid); } // ---- Phase F: neighbor, wide peer, worm, audience tests ---- #[test] fn store_and_list_peer_neighbors() { let s = temp_storage(); let peer_a = make_node_id(1); let neighbor_b = make_node_id(2); let neighbor_c = make_node_id(3); let our_id = make_node_id(99); let gossip = vec![ GossipPeerInfo { node_id: neighbor_b, is_anchor: false }, GossipPeerInfo { node_id: neighbor_c, is_anchor: true }, GossipPeerInfo { node_id: our_id, is_anchor: false }, // should be skipped ]; let count = s.store_peer_neighbors(&peer_a, &gossip, &our_id).unwrap(); assert_eq!(count, 2); // lookup_in_two_hop assert!(s.lookup_in_two_hop(&neighbor_b).unwrap()); assert!(s.lookup_in_two_hop(&neighbor_c).unwrap()); assert!(!s.lookup_in_two_hop(&our_id).unwrap()); // list_peers_with_neighbor let reporters = s.list_peers_with_neighbor(&neighbor_b).unwrap(); assert_eq!(reporters, vec![peer_a]); // get_two_hop_set let two_hop = s.get_two_hop_set().unwrap(); assert_eq!(two_hop.len(), 2); assert!(two_hop.contains(&neighbor_b)); assert!(two_hop.contains(&neighbor_c)); } #[test] fn prune_stale_neighbors() { let s = temp_storage(); let peer = make_node_id(1); let neighbor = make_node_id(2); let our_id = make_node_id(99); s.store_peer_neighbors(&peer, &[GossipPeerInfo { node_id: neighbor, is_anchor: false }], &our_id).unwrap(); // Should find it assert!(s.lookup_in_two_hop(&neighbor).unwrap()); // Prune with 0ms max age (everything is stale) // We need to set reported_at in the past s.conn.execute( "UPDATE peer_neighbors SET reported_at = ?1", params![now_ms() - 7200_000], ).unwrap(); let pruned = s.prune_stale_neighbors(3600_000).unwrap(); assert_eq!(pruned, 1); assert!(!s.lookup_in_two_hop(&neighbor).unwrap()); } #[test] fn wide_peer_flag() { let s = temp_storage(); let nid = make_node_id(1); s.add_peer(&nid).unwrap(); s.set_wide_peer(&nid, true).unwrap(); let wide = s.list_wide_peers().unwrap(); assert_eq!(wide.len(), 1); assert_eq!(wide[0].node_id, nid); s.clear_all_wide_peers().unwrap(); let wide = s.list_wide_peers().unwrap(); assert!(wide.is_empty()); } #[test] fn worm_cooldown() { let s = temp_storage(); let target = make_node_id(1); assert!(!s.is_worm_cooldown(&target, 3600_000).unwrap()); s.record_worm_miss(&target).unwrap(); assert!(s.is_worm_cooldown(&target, 3600_000).unwrap()); } #[test] fn relay_cooldown() { let s = temp_storage(); let target = make_node_id(1); assert!(!s.is_relay_cooldown(&target, 3600_000).unwrap()); s.record_relay_miss(&target).unwrap(); assert!(s.is_relay_cooldown(&target, 3600_000).unwrap()); // Very short cooldown should not match (timestamp just set) // but since we just wrote it, it will be within 1ms, so 0 cooldown should still show assert!(s.is_relay_cooldown(&target, 1).unwrap()); } #[test] fn n2_n3_crud() { let s = temp_storage(); let reporter_a = make_node_id(1); let reporter_b = make_node_id(2); let node_x = make_node_id(10); let node_y = make_node_id(11); let node_z = make_node_id(12); // Set reporter_a's N1 (their connections) → our N2 s.set_peer_n1(&reporter_a, &[node_x, node_y]).unwrap(); let found = s.find_in_n2(&node_x).unwrap(); assert_eq!(found, vec![reporter_a]); // Set reporter_b's N1 → our N2 s.set_peer_n1(&reporter_b, &[node_y, node_z]).unwrap(); let found = s.find_in_n2(&node_y).unwrap(); assert_eq!(found.len(), 2); // Both reporters have node_y // Build N2 share (deduplicated) let n2_share = s.build_n2_share().unwrap(); assert_eq!(n2_share.len(), 3); // node_x, node_y, node_z // Clear reporter_a's N2 contributions let cleared = s.clear_peer_n2(&reporter_a).unwrap(); assert_eq!(cleared, 2); let found = s.find_in_n2(&node_x).unwrap(); assert!(found.is_empty()); // N3 operations s.set_peer_n2(&reporter_a, &[node_z]).unwrap(); let found = s.find_in_n3(&node_z).unwrap(); assert_eq!(found, vec![reporter_a]); s.clear_peer_n3(&reporter_a).unwrap(); let found = s.find_in_n3(&node_z).unwrap(); assert!(found.is_empty()); } #[test] fn n1_share_build() { let s = temp_storage(); let peer_a = make_node_id(1); let follow_b = make_node_id(2); let addr: std::net::SocketAddr = "10.0.0.1:4433".parse().unwrap(); // Add a mesh peer s.add_mesh_peer(&peer_a, PeerSlotKind::Local, 0).unwrap(); // Add a follow with social route s.add_follow(&follow_b).unwrap(); s.upsert_social_route(&SocialRouteEntry { node_id: follow_b, addresses: vec![addr], peer_addresses: vec![], relation: SocialRelation::Follow, status: SocialStatus::Disconnected, last_connected_ms: 0, last_seen_ms: 1000, reach_method: ReachMethod::Direct, preferred_tree: vec![], }).unwrap(); // Disconnected routes should NOT be in N1 share let n1 = s.build_n1_share().unwrap(); assert!(n1.contains(&peer_a)); assert!(!n1.contains(&follow_b), "Disconnected social route should not be in N1"); // Set to Online — now it should be included s.set_social_route_status(&follow_b, SocialStatus::Online).unwrap(); let n1 = s.build_n1_share().unwrap(); assert!(n1.contains(&peer_a)); assert!(n1.contains(&follow_b), "Online social route should be in N1"); } #[test] fn diversity_scoring() { let s = temp_storage(); let reporter_a = make_node_id(1); let reporter_b = make_node_id(2); let unique_node = make_node_id(10); let shared_node = make_node_id(11); // reporter_a has unique_node + shared_node s.set_peer_n1(&reporter_a, &[unique_node, shared_node]).unwrap(); // reporter_b only has shared_node s.set_peer_n1(&reporter_b, &[shared_node]).unwrap(); // reporter_a contributes 1 unique node (unique_node) let unique = s.count_unique_n2_for_reporter(&reporter_a, &[]).unwrap(); assert_eq!(unique, 1); // reporter_b contributes 0 unique nodes let unique = s.count_unique_n2_for_reporter(&reporter_b, &[]).unwrap(); assert_eq!(unique, 0); } #[test] fn find_any_in_n2_n3() { let s = temp_storage(); let reporter = make_node_id(1); let node_n2 = make_node_id(10); let node_n3 = make_node_id(11); let node_nowhere = make_node_id(12); s.set_peer_n1(&reporter, &[node_n2]).unwrap(); s.set_peer_n2(&reporter, &[node_n3]).unwrap(); let results = s.find_any_in_n2_n3(&[node_n2, node_n3, node_nowhere]).unwrap(); assert_eq!(results.len(), 2); assert_eq!(results[0].2, 2); // N2 first assert_eq!(results[1].2, 3); // N3 second } #[test] fn mesh_peers_crud() { use crate::types::PeerSlotKind; let s = temp_storage(); let nid = make_node_id(1); s.add_mesh_peer(&nid, PeerSlotKind::Local, 4).unwrap(); let peers = s.list_mesh_peers().unwrap(); assert_eq!(peers.len(), 1); assert_eq!(peers[0].0, nid); assert_eq!(peers[0].1, "local"); assert_eq!(peers[0].2, 4); assert_eq!(s.count_mesh_peers_by_kind(PeerSlotKind::Local).unwrap(), 1); assert_eq!(s.count_mesh_peers_by_kind(PeerSlotKind::Wide).unwrap(), 0); s.remove_mesh_peer(&nid).unwrap(); assert_eq!(s.count_mesh_peers_by_kind(PeerSlotKind::Local).unwrap(), 0); } // ---- Social routes tests ---- #[test] fn social_route_crud() { use crate::types::{PeerWithAddress, ReachMethod, SocialRelation, SocialRouteEntry, SocialStatus}; let s = temp_storage(); let nid = make_node_id(1); let addr: std::net::SocketAddr = "10.0.0.1:4433".parse().unwrap(); let entry = SocialRouteEntry { node_id: nid, addresses: vec![addr], peer_addresses: vec![PeerWithAddress { n: hex::encode(make_node_id(2)), a: vec!["10.0.0.2:4433".to_string()], }], relation: SocialRelation::Follow, status: SocialStatus::Online, last_connected_ms: 1000, last_seen_ms: 2000, reach_method: ReachMethod::Direct, preferred_tree: vec![], }; s.upsert_social_route(&entry).unwrap(); assert!(s.has_social_route(&nid).unwrap()); let got = s.get_social_route(&nid).unwrap().unwrap(); assert_eq!(got.relation, SocialRelation::Follow); assert_eq!(got.status, SocialStatus::Online); assert_eq!(got.addresses.len(), 1); assert_eq!(got.peer_addresses.len(), 1); // List all let routes = s.list_social_routes().unwrap(); assert_eq!(routes.len(), 1); // Update status s.set_social_route_status(&nid, SocialStatus::Disconnected).unwrap(); let got = s.get_social_route(&nid).unwrap().unwrap(); assert_eq!(got.status, SocialStatus::Disconnected); // Remove s.remove_social_route(&nid).unwrap(); assert!(!s.has_social_route(&nid).unwrap()); } #[test] fn social_route_rebuild() { use crate::types::SocialRelation; let s = temp_storage(); let follow_a = make_node_id(1); let follow_b = make_node_id(2); s.add_follow(&follow_a).unwrap(); s.add_follow(&follow_b).unwrap(); let count = s.rebuild_social_routes().unwrap(); assert_eq!(count, 2); let route_a = s.get_social_route(&follow_a).unwrap().unwrap(); assert_eq!(route_a.relation, SocialRelation::Follow); let route_b = s.get_social_route(&follow_b).unwrap().unwrap(); assert_eq!(route_b.relation, SocialRelation::Follow); } #[test] fn social_route_stale() { use crate::types::{ReachMethod, SocialRelation, SocialRouteEntry, SocialStatus}; let s = temp_storage(); let nid = make_node_id(1); let entry = SocialRouteEntry { node_id: nid, addresses: vec![], peer_addresses: vec![], relation: SocialRelation::Follow, status: SocialStatus::Online, last_connected_ms: 0, last_seen_ms: 0, reach_method: ReachMethod::Direct, preferred_tree: vec![], }; s.upsert_social_route(&entry).unwrap(); // With last_seen_ms=0, it should be stale relative to now let stale = s.list_stale_social_routes(1000).unwrap(); assert_eq!(stale.len(), 1); } #[test] fn reconnect_watchers() { let s = temp_storage(); let target = make_node_id(1); let watcher1 = make_node_id(2); let watcher2 = make_node_id(3); s.add_reconnect_watcher(&target, &watcher1).unwrap(); s.add_reconnect_watcher(&target, &watcher2).unwrap(); let watchers = s.get_reconnect_watchers(&target).unwrap(); assert_eq!(watchers.len(), 2); s.clear_reconnect_watchers(&target).unwrap(); let watchers = s.get_reconnect_watchers(&target).unwrap(); assert!(watchers.is_empty()); } // ---- CDN manifest tests ---- #[test] fn cdn_manifest_crud() { let s = temp_storage(); let cid = [42u8; 32]; let author = make_node_id(1); let manifest_json = r#"{"test": true}"#; // Store s.store_cdn_manifest(&cid, manifest_json, &author, 1000).unwrap(); let got = s.get_cdn_manifest(&cid).unwrap().unwrap(); assert_eq!(got, manifest_json); // Update let updated_json = r#"{"test": true, "updated": true}"#; s.store_cdn_manifest(&cid, updated_json, &author, 2000).unwrap(); let got = s.get_cdn_manifest(&cid).unwrap().unwrap(); assert_eq!(got, updated_json); // Missing assert!(s.get_cdn_manifest(&[99u8; 32]).unwrap().is_none()); // By author let manifests = s.get_manifests_for_author_blobs(&author).unwrap(); assert_eq!(manifests.len(), 1); assert_eq!(manifests[0].0, cid); } #[test] fn blob_pin_unpin() { let s = temp_storage(); let cid = [42u8; 32]; let post_id = make_node_id(1); let author = make_node_id(2); s.record_blob(&cid, &post_id, &author, 1000, "image/png", 100).unwrap(); // Not pinned by default assert!(!s.is_blob_pinned(&cid)); // Pin s.pin_blob(&cid).unwrap(); assert!(s.is_blob_pinned(&cid)); // Unpin s.unpin_blob(&cid).unwrap(); assert!(!s.is_blob_pinned(&cid)); } #[test] fn eviction_candidates_with_replicas() { let s = temp_storage(); let author = make_node_id(1); let post_id = [10u8; 32]; let cid1 = [20u8; 32]; let cid2 = [30u8; 32]; s.record_blob(&cid1, &post_id, &author, 500, "image/png", 100).unwrap(); s.record_blob(&cid2, &post_id, &author, 300, "image/jpeg", 200).unwrap(); // Add replicas for the post let peer1 = make_node_id(10); let peer2 = make_node_id(11); let now = super::now_ms(); s.conn.execute( "INSERT INTO post_replicas (post_id, node_id, last_confirmed_ms) VALUES (?1, ?2, ?3)", params![post_id.as_slice(), peer1.as_slice(), now as i64], ).unwrap(); s.conn.execute( "INSERT INTO post_replicas (post_id, node_id, last_confirmed_ms) VALUES (?1, ?2, ?3)", params![post_id.as_slice(), peer2.as_slice(), now as i64], ).unwrap(); let candidates = s.get_eviction_candidates(3600_000).unwrap(); assert_eq!(candidates.len(), 2); // Both should have 2 peer_copies for c in &candidates { assert_eq!(c.peer_copies, 2); assert_eq!(c.author, author); } } #[test] fn eviction_candidates_stale_replicas_excluded() { let s = temp_storage(); let author = make_node_id(1); let post_id = [10u8; 32]; let cid = [20u8; 32]; s.record_blob(&cid, &post_id, &author, 500, "image/png", 100).unwrap(); // Add a stale replica (confirmed 2 hours ago) let peer = make_node_id(10); let two_hours_ago = super::now_ms() - 7200_000; s.conn.execute( "INSERT INTO post_replicas (post_id, node_id, last_confirmed_ms) VALUES (?1, ?2, ?3)", params![post_id.as_slice(), peer.as_slice(), two_hours_ago as i64], ).unwrap(); // With 1-hour staleness, the replica should be excluded let candidates = s.get_eviction_candidates(3600_000).unwrap(); assert_eq!(candidates.len(), 1); assert_eq!(candidates[0].peer_copies, 0); } #[test] fn cdn_cleanup_for_blob() { let s = temp_storage(); let cid = [42u8; 32]; let author = make_node_id(1); let peer = make_node_id(2); s.store_cdn_manifest(&cid, r#"{"test": true}"#, &author, 100).unwrap(); s.touch_file_holder(&cid, &peer, &["10.0.0.1:4433".to_string()], HolderDirection::Received).unwrap(); assert!(s.get_cdn_manifest(&cid).unwrap().is_some()); assert_eq!(s.get_file_holder_count(&cid).unwrap(), 1); s.cleanup_cdn_for_blob(&cid).unwrap(); assert!(s.get_cdn_manifest(&cid).unwrap().is_none()); assert_eq!(s.get_file_holder_count(&cid).unwrap(), 0); } #[test] fn get_blobs_for_post() { let s = temp_storage(); let author = make_node_id(1); let post_id = [10u8; 32]; let cid1 = [20u8; 32]; let cid2 = [30u8; 32]; s.record_blob(&cid1, &post_id, &author, 500, "image/png", 100).unwrap(); s.record_blob(&cid2, &post_id, &author, 300, "image/jpeg", 200).unwrap(); let cids = s.get_blobs_for_post(&post_id).unwrap(); assert_eq!(cids.len(), 2); assert!(cids.contains(&cid1)); assert!(cids.contains(&cid2)); } #[test] fn author_post_neighborhood() { let s = temp_storage(); let author = make_node_id(1); // Create posts at timestamps 100, 200, 300, 400, 500 for ts in [100u64, 200, 300, 400, 500] { let post = Post { author, content: format!("post at {}", ts), attachments: vec![], timestamp_ms: ts, }; let id = blake3::hash(&serde_json::to_vec(&post).unwrap()); s.store_post(id.as_bytes(), &post).unwrap(); } // Neighborhood around ts=300 let (prev, next) = s.get_author_post_neighborhood(&author, 300, 10).unwrap(); assert_eq!(prev.len(), 2); // ts=200, ts=100 assert_eq!(next.len(), 2); // ts=400, ts=500 assert_eq!(prev[0].timestamp_ms, 200); // most recent first in prev assert_eq!(next[0].timestamp_ms, 400); // oldest first in next } #[test] fn group_key_crud() { let s = temp_storage(); let admin = make_node_id(1); let member = make_node_id(2); let group_id = [42u8; 32]; let pubkey = [99u8; 32]; let seed = [55u8; 32]; // Create group key let record = crate::types::GroupKeyRecord { group_id, circle_name: "friends".to_string(), epoch: 1, group_public_key: pubkey, admin, created_at: 1000, }; s.create_group_key(&record, Some(&seed)).unwrap(); // Retrieve by group_id let got = s.get_group_key(&group_id).unwrap().unwrap(); assert_eq!(got.circle_name, "friends"); assert_eq!(got.epoch, 1); assert_eq!(got.group_public_key, pubkey); assert_eq!(got.admin, admin); // Retrieve by circle name let got2 = s.get_group_key_by_circle("friends").unwrap().unwrap(); assert_eq!(got2.group_id, group_id); // Store member key let mk = crate::types::GroupMemberKey { member, epoch: 1, wrapped_group_key: vec![0u8; 60], }; s.store_group_member_key(&group_id, &mk).unwrap(); let keys = s.get_group_member_keys(&group_id, 1).unwrap(); assert_eq!(keys.len(), 1); assert_eq!(keys[0].member, member); let my_key = s.get_my_group_member_key(&group_id, 1, &member).unwrap(); assert!(my_key.is_some()); let no_key = s.get_my_group_member_key(&group_id, 1, &admin).unwrap(); assert!(no_key.is_none()); // Store seed s.store_group_seed(&group_id, 1, &seed).unwrap(); let got_seed = s.get_group_seed(&group_id, 1).unwrap().unwrap(); assert_eq!(got_seed, seed); // Update epoch let new_pubkey = [88u8; 32]; let new_seed = [77u8; 32]; s.update_group_epoch(&group_id, 2, &new_pubkey, Some(&new_seed)).unwrap(); let updated = s.get_group_key(&group_id).unwrap().unwrap(); assert_eq!(updated.epoch, 2); assert_eq!(updated.group_public_key, new_pubkey); // get_all_group_members requires circle + members s.create_circle("friends").unwrap(); s.add_circle_member("friends", &member).unwrap(); let all = s.get_all_group_members().unwrap(); assert!(all.contains_key(&group_id)); assert!(all[&group_id].contains(&member)); // Delete s.delete_group_key(&group_id).unwrap(); assert!(s.get_group_key(&group_id).unwrap().is_none()); assert!(s.get_group_seed(&group_id, 1).unwrap().is_none()); } #[test] fn group_seeds_map() { let s = temp_storage(); let admin = make_node_id(1); let group_id = [42u8; 32]; let pubkey = [99u8; 32]; let seed = [55u8; 32]; let record = crate::types::GroupKeyRecord { group_id, circle_name: "test".to_string(), epoch: 1, group_public_key: pubkey, admin, created_at: 1000, }; s.create_group_key(&record, Some(&seed)).unwrap(); s.store_group_seed(&group_id, 1, &seed).unwrap(); let map = s.get_all_group_seeds_map().unwrap(); assert!(map.contains_key(&(group_id, 1))); let (got_seed, got_pubkey) = map[&(group_id, 1)]; assert_eq!(got_seed, seed); // pubkey is derived from seed, not the stored one let expected_pubkey = ed25519_dalek::SigningKey::from_bytes(&seed).verifying_key().to_bytes(); assert_eq!(got_pubkey, expected_pubkey); } // ---- Preferred peers tests ---- #[test] fn preferred_peers_crud() { let s = temp_storage(); let peer_a = make_node_id(1); let peer_b = make_node_id(2); assert_eq!(s.count_preferred_peers().unwrap(), 0); assert!(!s.is_preferred_peer(&peer_a).unwrap()); s.add_preferred_peer(&peer_a).unwrap(); s.add_preferred_peer(&peer_b).unwrap(); assert_eq!(s.count_preferred_peers().unwrap(), 2); assert!(s.is_preferred_peer(&peer_a).unwrap()); assert!(s.is_preferred_peer(&peer_b).unwrap()); let list = s.list_preferred_peers().unwrap(); assert_eq!(list.len(), 2); assert!(list.contains(&peer_a)); assert!(list.contains(&peer_b)); s.remove_preferred_peer(&peer_a).unwrap(); assert!(!s.is_preferred_peer(&peer_a).unwrap()); assert_eq!(s.count_preferred_peers().unwrap(), 1); } #[test] fn preferred_peers_idempotent() { let s = temp_storage(); let peer = make_node_id(1); s.add_preferred_peer(&peer).unwrap(); s.add_preferred_peer(&peer).unwrap(); // no error on duplicate assert_eq!(s.count_preferred_peers().unwrap(), 1); } #[test] fn profile_stores_preferred_peers() { let s = temp_storage(); let nid = make_node_id(1); let pref_a = make_node_id(10); let pref_b = make_node_id(11); let profile = PublicProfile { node_id: nid, display_name: "test".to_string(), bio: "".to_string(), updated_at: 1000, anchors: vec![], recent_peers: vec![], preferred_peers: vec![pref_a, pref_b], public_visible: true, avatar_cid: None, }; s.store_profile(&profile).unwrap(); let got = s.get_profile(&nid).unwrap().unwrap(); assert_eq!(got.preferred_peers.len(), 2); assert!(got.preferred_peers.contains(&pref_a)); assert!(got.preferred_peers.contains(&pref_b)); assert!(got.public_visible); assert!(got.avatar_cid.is_none()); } #[test] fn preferred_slot_counts() { assert_eq!(DeviceProfile::Desktop.preferred_slots(), 10); assert_eq!(DeviceProfile::Desktop.local_slots(), 71); assert_eq!(DeviceProfile::Mobile.preferred_slots(), 3); assert_eq!(DeviceProfile::Mobile.local_slots(), 7); // Total unchanged assert_eq!( DeviceProfile::Desktop.preferred_slots() + DeviceProfile::Desktop.local_slots() + DeviceProfile::Desktop.wide_slots(), 101 ); assert_eq!( DeviceProfile::Mobile.preferred_slots() + DeviceProfile::Mobile.local_slots() + DeviceProfile::Mobile.wide_slots(), 15 ); } #[test] fn peer_slot_kind_preferred_roundtrip() { let kind: PeerSlotKind = "preferred".parse().unwrap(); assert_eq!(kind, PeerSlotKind::Preferred); assert_eq!(kind.to_string(), "preferred"); } // ---- Preferred tree tests ---- #[test] fn build_preferred_tree_empty() { let s = temp_storage(); let target = make_node_id(1); // No profile stored — tree should just contain the target let tree = s.build_preferred_tree_for(&target).unwrap(); assert_eq!(tree.len(), 1); assert!(tree.contains(&target)); } #[test] fn build_preferred_tree_two_layers() { let s = temp_storage(); let target = make_node_id(1); let l1_a = make_node_id(10); let l1_b = make_node_id(11); let l2_a1 = make_node_id(20); let l2_a2 = make_node_id(21); let l2_b1 = make_node_id(30); // Target's profile with 2 preferred peers s.store_profile(&PublicProfile { node_id: target, display_name: "target".to_string(), bio: "".to_string(), updated_at: 1000, anchors: vec![], recent_peers: vec![], preferred_peers: vec![l1_a, l1_b], public_visible: true, avatar_cid: None, }).unwrap(); // L1 peer A's profile with 2 preferred peers s.store_profile(&PublicProfile { node_id: l1_a, display_name: "l1a".to_string(), bio: "".to_string(), updated_at: 1000, anchors: vec![], recent_peers: vec![], preferred_peers: vec![l2_a1, l2_a2], public_visible: true, avatar_cid: None, }).unwrap(); // L1 peer B's profile with 1 preferred peer s.store_profile(&PublicProfile { node_id: l1_b, display_name: "l1b".to_string(), bio: "".to_string(), updated_at: 1000, anchors: vec![], recent_peers: vec![], preferred_peers: vec![l2_b1], public_visible: true, avatar_cid: None, }).unwrap(); let tree = s.build_preferred_tree_for(&target).unwrap(); // Should contain: target, l1_a, l1_b, l2_a1, l2_a2, l2_b1 = 6 unique nodes assert_eq!(tree.len(), 6); assert!(tree.contains(&target)); assert!(tree.contains(&l1_a)); assert!(tree.contains(&l1_b)); assert!(tree.contains(&l2_a1)); assert!(tree.contains(&l2_a2)); assert!(tree.contains(&l2_b1)); } #[test] fn build_preferred_tree_deduplicates() { let s = temp_storage(); let target = make_node_id(1); let shared = make_node_id(10); let l1_a = make_node_id(11); // Target's preferred peers include shared s.store_profile(&PublicProfile { node_id: target, display_name: "target".to_string(), bio: "".to_string(), updated_at: 1000, anchors: vec![], recent_peers: vec![], preferred_peers: vec![l1_a, shared], public_visible: true, avatar_cid: None, }).unwrap(); // L1 peer's preferred peers also include shared s.store_profile(&PublicProfile { node_id: l1_a, display_name: "l1a".to_string(), bio: "".to_string(), updated_at: 1000, anchors: vec![], recent_peers: vec![], preferred_peers: vec![shared, target], public_visible: true, avatar_cid: None, }).unwrap(); let tree = s.build_preferred_tree_for(&target).unwrap(); // Should contain: target, l1_a, shared = 3 unique nodes (no duplicates) assert_eq!(tree.len(), 3); } #[test] fn social_route_preferred_tree_roundtrip() { use crate::types::{ReachMethod, SocialRelation, SocialRouteEntry, SocialStatus}; let s = temp_storage(); let nid = make_node_id(1); let tree_node = make_node_id(10); let entry = SocialRouteEntry { node_id: nid, addresses: vec![], peer_addresses: vec![], relation: SocialRelation::Follow, status: SocialStatus::Online, last_connected_ms: 0, last_seen_ms: 1000, reach_method: ReachMethod::Direct, preferred_tree: vec![tree_node], }; s.upsert_social_route(&entry).unwrap(); let got = s.get_social_route(&nid).unwrap().unwrap(); assert_eq!(got.preferred_tree.len(), 1); assert!(got.preferred_tree.contains(&tree_node)); // Update preferred tree let new_tree = vec![make_node_id(20), make_node_id(21)]; s.update_social_route_preferred_tree(&nid, &new_tree).unwrap(); let got2 = s.get_social_route(&nid).unwrap().unwrap(); assert_eq!(got2.preferred_tree.len(), 2); } // ---- Circle Profile tests ---- #[test] fn circle_profile_crud() { let s = temp_storage(); let author = make_node_id(1); let cp = CircleProfile { author, circle_name: "friends".to_string(), display_name: "Alice (friends)".to_string(), bio: "Hi friends!".to_string(), avatar_cid: None, updated_at: 1000, }; s.set_circle_profile(&cp).unwrap(); // Get let got = s.get_circle_profile(&author, "friends").unwrap().unwrap(); assert_eq!(got.display_name, "Alice (friends)"); assert_eq!(got.bio, "Hi friends!"); assert!(got.avatar_cid.is_none()); // List let list = s.list_circle_profiles_for_author(&author).unwrap(); assert_eq!(list.len(), 1); // Update let cp2 = CircleProfile { author, circle_name: "friends".to_string(), display_name: "Alice Updated".to_string(), bio: "New bio".to_string(), avatar_cid: Some([42u8; 32]), updated_at: 2000, }; s.set_circle_profile(&cp2).unwrap(); let got2 = s.get_circle_profile(&author, "friends").unwrap().unwrap(); assert_eq!(got2.display_name, "Alice Updated"); assert_eq!(got2.avatar_cid, Some([42u8; 32])); // Delete s.delete_circle_profile(&author, "friends").unwrap(); assert!(s.get_circle_profile(&author, "friends").unwrap().is_none()); } #[test] fn resolve_display_for_peer_circle_member() { let s = temp_storage(); let author = make_node_id(1); let viewer = make_node_id(2); let stranger = make_node_id(3); // Set up public profile s.store_profile(&PublicProfile { node_id: author, display_name: "Alice Public".to_string(), bio: "Public bio".to_string(), updated_at: 1000, anchors: vec![], recent_peers: vec![], preferred_peers: vec![], public_visible: true, avatar_cid: None, }).unwrap(); // Create circle and add viewer as member s.create_circle("close-friends").unwrap(); s.add_circle_member("close-friends", &viewer).unwrap(); // Set circle profile let cp = CircleProfile { author, circle_name: "close-friends".to_string(), display_name: "Alice (CF)".to_string(), bio: "Circle bio".to_string(), avatar_cid: Some([99u8; 32]), updated_at: 2000, }; s.set_circle_profile(&cp).unwrap(); // Viewer in circle sees circle profile let (dn, bio, avatar) = s.resolve_display_for_peer(&author, &viewer).unwrap(); assert_eq!(dn, "Alice (CF)"); assert_eq!(bio, "Circle bio"); assert_eq!(avatar, Some([99u8; 32])); // Stranger sees public profile let (dn2, bio2, avatar2) = s.resolve_display_for_peer(&author, &stranger).unwrap(); assert_eq!(dn2, "Alice Public"); assert_eq!(bio2, "Public bio"); assert!(avatar2.is_none()); } #[test] fn resolve_display_hidden_profile() { let s = temp_storage(); let author = make_node_id(1); let stranger = make_node_id(3); // Hidden public profile s.store_profile(&PublicProfile { node_id: author, display_name: "Alice Hidden".to_string(), bio: "Secret bio".to_string(), updated_at: 1000, anchors: vec![], recent_peers: vec![], preferred_peers: vec![], public_visible: false, avatar_cid: None, }).unwrap(); // Stranger sees nothing let (dn, bio, avatar) = s.resolve_display_for_peer(&author, &stranger).unwrap(); assert!(dn.is_empty()); assert!(bio.is_empty()); assert!(avatar.is_none()); } #[test] fn public_visible_migration_defaults_true() { let s = temp_storage(); let nid = make_node_id(1); // Store a profile with public_visible=true (default) s.store_profile(&PublicProfile { node_id: nid, display_name: "test".to_string(), bio: "".to_string(), updated_at: 1000, anchors: vec![], recent_peers: vec![], preferred_peers: vec![], public_visible: true, avatar_cid: None, }).unwrap(); let got = s.get_profile(&nid).unwrap().unwrap(); assert!(got.public_visible); } // ---- Known anchors tests ---- #[test] fn known_anchors_upsert_and_list() { let s = temp_storage(); let a1 = make_node_id(1); let a2 = make_node_id(2); let a3 = make_node_id(3); let addr: SocketAddr = "10.0.0.1:4433".parse().unwrap(); s.upsert_known_anchor(&a1, &[addr]).unwrap(); s.upsert_known_anchor(&a2, &[addr]).unwrap(); s.upsert_known_anchor(&a3, &[addr]).unwrap(); // a1 gets extra success bumps s.upsert_known_anchor(&a1, &[addr]).unwrap(); s.upsert_known_anchor(&a1, &[addr]).unwrap(); let anchors = s.list_known_anchors().unwrap(); assert_eq!(anchors.len(), 3); // a1 should be first (highest success_count = 3) assert_eq!(anchors[0].0, a1); } #[test] fn known_anchors_prune() { let s = temp_storage(); let addr: SocketAddr = "10.0.0.1:4433".parse().unwrap(); for i in 0..10u8 { let nid = make_node_id(i + 1); s.upsert_known_anchor(&nid, &[addr]).unwrap(); } // Auto-prune should keep only 5 let anchors = s.list_known_anchors().unwrap(); assert_eq!(anchors.len(), 5); } #[test] fn is_peer_anchor_check() { let s = temp_storage(); let nid = make_node_id(1); let addr: SocketAddr = "10.0.0.1:4433".parse().unwrap(); s.upsert_peer(&nid, &[addr], None).unwrap(); assert!(!s.is_peer_anchor(&nid).unwrap()); s.set_peer_anchor(&nid, true).unwrap(); assert!(s.is_peer_anchor(&nid).unwrap()); // Non-existent peer let unknown = make_node_id(99); assert!(!s.is_peer_anchor(&unknown).unwrap()); } // --- Engagement tests --- #[test] fn file_holders_lru_cap() { let s = temp_storage(); let file = [42u8; 32]; // Sleep between inserts so last_interaction_ms actually differs (ms resolution). for i in 0..7u8 { s.touch_file_holder(&file, &make_node_id(i), &[], HolderDirection::Received).unwrap(); std::thread::sleep(std::time::Duration::from_millis(2)); } // Only 5 most-recent survive assert_eq!(s.get_file_holder_count(&file).unwrap(), 5); let holders = s.get_file_holders(&file).unwrap(); assert_eq!(holders.len(), 5); let kept: std::collections::HashSet<_> = holders.iter().map(|(n, _)| *n).collect(); // Oldest two (i=0, i=1) got evicted; most recent (i=6) survives assert!(!kept.contains(&make_node_id(0))); assert!(!kept.contains(&make_node_id(1))); assert!(kept.contains(&make_node_id(6))); } #[test] fn file_holders_direction_promotion() { let s = temp_storage(); let file = [42u8; 32]; let peer = make_node_id(1); s.touch_file_holder(&file, &peer, &[], HolderDirection::Received).unwrap(); s.touch_file_holder(&file, &peer, &[], HolderDirection::Sent).unwrap(); // Re-insert with opposite direction should promote to "both" let dir: String = s.conn.query_row( "SELECT direction FROM file_holders WHERE file_id = ?1 AND peer_id = ?2", rusqlite::params![file.as_slice(), peer.as_slice()], |row| row.get(0), ).unwrap(); assert_eq!(dir, "both"); } #[test] fn reaction_crud() { use crate::types::Reaction; let s = temp_storage(); let post_id = make_post_id(1); let reactor1 = make_node_id(1); let reactor2 = make_node_id(2); let me = make_node_id(1); s.store_reaction(&Reaction { reactor: reactor1, emoji: "👍".to_string(), post_id, timestamp_ms: 1000, encrypted_payload: None, deleted_at: None, signature: vec![], }).unwrap(); s.store_reaction(&Reaction { reactor: reactor2, emoji: "👍".to_string(), post_id, timestamp_ms: 1001, encrypted_payload: None, deleted_at: None, signature: vec![], }).unwrap(); s.store_reaction(&Reaction { reactor: reactor1, emoji: "❤️".to_string(), post_id, timestamp_ms: 1002, encrypted_payload: None, deleted_at: None, signature: vec![], }).unwrap(); let reactions = s.get_reactions(&post_id).unwrap(); assert_eq!(reactions.len(), 3); let counts = s.get_reaction_counts(&post_id, &me).unwrap(); assert_eq!(counts.len(), 2); // 👍 and ❤️ // 👍 has 2 reactions, one from me let thumbs = counts.iter().find(|(e, _, _)| e == "👍").unwrap(); assert_eq!(thumbs.1, 2); assert!(thumbs.2); // I reacted // Remove s.remove_reaction(&reactor1, &post_id, "👍").unwrap(); let reactions = s.get_reactions(&post_id).unwrap(); assert_eq!(reactions.len(), 2); } #[test] fn comment_crud() { use crate::types::InlineComment; let s = temp_storage(); let post_id = make_post_id(1); let author1 = make_node_id(1); let author2 = make_node_id(2); s.store_comment(&InlineComment { author: author1, post_id, content: "Nice post!".to_string(), timestamp_ms: 1000, signature: vec![0u8; 64], deleted_at: None, ref_post_id: None, }).unwrap(); s.store_comment(&InlineComment { author: author2, post_id, content: "I agree".to_string(), timestamp_ms: 1001, signature: vec![1u8; 64], deleted_at: None, ref_post_id: None, }).unwrap(); let comments = s.get_comments(&post_id).unwrap(); assert_eq!(comments.len(), 2); assert_eq!(comments[0].content, "Nice post!"); assert_eq!(comments[1].content, "I agree"); assert_eq!(s.get_comment_count(&post_id).unwrap(), 2); } #[test] fn rich_comment_ref_post_id_roundtrip() { use crate::types::InlineComment; let s = temp_storage(); let post_id = make_post_id(1); let author = make_node_id(5); let ref_post = make_post_id(42); s.store_comment(&InlineComment { author, post_id, content: "(preview of a long body)".to_string(), timestamp_ms: 2000, signature: vec![9u8; 64], deleted_at: None, ref_post_id: Some(ref_post), }).unwrap(); let live = s.get_comments(&post_id).unwrap(); assert_eq!(live.len(), 1); assert_eq!(live[0].ref_post_id, Some(ref_post)); let all = s.get_comments_with_tombstones(&post_id).unwrap(); assert_eq!(all.len(), 1); assert_eq!(all[0].ref_post_id, Some(ref_post)); } #[test] fn comment_policy_crud() { use crate::types::{CommentPermission, CommentPolicy, ModerationMode, ReactPermission}; let s = temp_storage(); let post_id = make_post_id(1); // No policy initially assert!(s.get_comment_policy(&post_id).unwrap().is_none()); let policy = CommentPolicy { allow_comments: CommentPermission::FollowersOnly, allow_reacts: ReactPermission::Public, moderation: ModerationMode::AuthorBlocklist, blocklist: vec![make_node_id(99)], }; s.set_comment_policy(&post_id, &policy).unwrap(); let loaded = s.get_comment_policy(&post_id).unwrap().unwrap(); assert_eq!(loaded.allow_comments, CommentPermission::FollowersOnly); assert_eq!(loaded.allow_reacts, ReactPermission::Public); assert_eq!(loaded.blocklist.len(), 1); // Update let policy2 = CommentPolicy { allow_comments: CommentPermission::None, ..Default::default() }; s.set_comment_policy(&post_id, &policy2).unwrap(); let loaded2 = s.get_comment_policy(&post_id).unwrap().unwrap(); assert_eq!(loaded2.allow_comments, CommentPermission::None); } #[test] fn blob_header_crud() { let s = temp_storage(); let post_id = make_post_id(1); let author = make_node_id(1); assert!(s.get_blob_header(&post_id).unwrap().is_none()); s.store_blob_header(&post_id, &author, "{\"test\":true}", 1000).unwrap(); let (json, ts) = s.get_blob_header(&post_id).unwrap().unwrap(); assert_eq!(json, "{\"test\":true}"); assert_eq!(ts, 1000); // Update s.store_blob_header(&post_id, &author, "{\"test\":false}", 2000).unwrap(); let (json2, ts2) = s.get_blob_header(&post_id).unwrap().unwrap(); assert_eq!(json2, "{\"test\":false}"); assert_eq!(ts2, 2000); } #[test] fn thread_meta_crud() { use crate::types::ThreadMeta; let s = temp_storage(); let parent = make_post_id(1); let child1 = make_post_id(2); let child2 = make_post_id(3); s.store_thread_meta(&ThreadMeta { post_id: child1, parent_post_id: parent, }).unwrap(); s.store_thread_meta(&ThreadMeta { post_id: child2, parent_post_id: parent, }).unwrap(); let children = s.get_thread_children(&parent).unwrap(); assert_eq!(children.len(), 2); let found_parent = s.get_thread_parent(&child1).unwrap().unwrap(); assert_eq!(found_parent, parent); assert!(s.get_thread_parent(&parent).unwrap().is_none()); } }