Compare commits

..

No commits in common. "de6aa06acfe404556888e4b189e2fb8053919ade" and "3c5b80d0177f64a5df5f0938a9b614dfcb684a14" have entirely different histories.

21 changed files with 1542 additions and 2124 deletions

6
Cargo.lock generated
View file

@ -2732,7 +2732,7 @@ checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]] [[package]]
name = "itsgoin-cli" name = "itsgoin-cli"
version = "0.6.2" version = "0.6.1"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"hex", "hex",
@ -2744,7 +2744,7 @@ dependencies = [
[[package]] [[package]]
name = "itsgoin-core" name = "itsgoin-core"
version = "0.6.2" version = "0.6.1"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"base64 0.22.1", "base64 0.22.1",
@ -2767,7 +2767,7 @@ dependencies = [
[[package]] [[package]]
name = "itsgoin-desktop" name = "itsgoin-desktop"
version = "0.6.2" version = "0.6.1"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"base64 0.22.1", "base64 0.22.1",

View file

@ -1,6 +1,6 @@
[package] [package]
name = "itsgoin-cli" name = "itsgoin-cli"
version = "0.6.2" version = "0.6.1"
edition = "2021" edition = "2021"
[[bin]] [[bin]]

View file

@ -138,6 +138,11 @@ async fn main() -> anyhow::Result<()> {
println!(" revoke <id> <node_id> [mode] Revoke access (mode: sync|reencrypt)"); println!(" revoke <id> <node_id> [mode] Revoke access (mode: sync|reencrypt)");
println!(" revoke-circle <circle> <nid> [m] Revoke circle access for a node"); println!(" revoke-circle <circle> <nid> [m] Revoke circle access for a node");
println!(" redundancy Show replica counts for your posts"); println!(" redundancy Show replica counts for your posts");
println!(" audience List audience members");
println!(" audience-request <node_id> Request to join peer's audience");
println!(" audience-pending Show pending audience requests");
println!(" audience-approve <node_id> Approve audience request");
println!(" audience-remove <node_id> Remove from audience");
println!(" worm <node_id> Worm lookup (find peer beyond 3-hop map)"); println!(" worm <node_id> Worm lookup (find peer beyond 3-hop map)");
println!(" connections Show mesh connections"); println!(" connections Show mesh connections");
println!(" social-routes Show social routing cache"); println!(" social-routes Show social routing cache");
@ -708,6 +713,91 @@ async fn main() -> anyhow::Result<()> {
} }
} }
"audience" => {
match node.list_audience_members().await {
Ok(members) => {
if members.is_empty() {
println!("(no audience members)");
} else {
println!("Audience members ({}):", members.len());
for nid in members {
let name = node.get_display_name(&nid).await.unwrap_or(None);
let label = name.unwrap_or_else(|| hex::encode(&nid)[..12].to_string());
println!(" {}", label);
}
}
}
Err(e) => println!("Error: {}", e),
}
}
"audience-request" => {
if let Some(id_hex) = arg {
match itsgoin_core::parse_node_id_hex(id_hex) {
Ok(nid) => {
match node.request_audience(&nid).await {
Ok(()) => println!("Audience request sent"),
Err(e) => println!("Error: {}", e),
}
}
Err(e) => println!("Invalid node ID: {}", e),
}
} else {
println!("Usage: audience-request <node_id_hex>");
}
}
"audience-pending" => {
use itsgoin_core::types::{AudienceDirection, AudienceStatus};
match node.list_audience(AudienceDirection::Inbound, Some(AudienceStatus::Pending)).await {
Ok(records) => {
if records.is_empty() {
println!("(no pending audience requests)");
} else {
println!("Pending audience requests ({}):", records.len());
for rec in records {
let name = node.get_display_name(&rec.node_id).await.unwrap_or(None);
let label = name.unwrap_or_else(|| hex::encode(&rec.node_id)[..12].to_string());
println!(" {}", label);
}
}
}
Err(e) => println!("Error: {}", e),
}
}
"audience-approve" => {
if let Some(id_hex) = arg {
match itsgoin_core::parse_node_id_hex(id_hex) {
Ok(nid) => {
match node.approve_audience(&nid).await {
Ok(()) => println!("Approved audience member"),
Err(e) => println!("Error: {}", e),
}
}
Err(e) => println!("Invalid node ID: {}", e),
}
} else {
println!("Usage: audience-approve <node_id_hex>");
}
}
"audience-remove" => {
if let Some(id_hex) = arg {
match itsgoin_core::parse_node_id_hex(id_hex) {
Ok(nid) => {
match node.remove_audience(&nid).await {
Ok(()) => println!("Removed from audience"),
Err(e) => println!("Error: {}", e),
}
}
Err(e) => println!("Invalid node ID: {}", e),
}
} else {
println!("Usage: audience-remove <node_id_hex>");
}
}
"worm" => { "worm" => {
if let Some(id_hex) = arg { if let Some(id_hex) = arg {
match itsgoin_core::parse_node_id_hex(id_hex) { match itsgoin_core::parse_node_id_hex(id_hex) {

View file

@ -1,6 +1,6 @@
[package] [package]
name = "itsgoin-core" name = "itsgoin-core"
version = "0.6.2" version = "0.6.1"
edition = "2021" edition = "2021"
[dependencies] [dependencies]

View file

@ -13,11 +13,12 @@ use crate::crypto;
use crate::protocol::{ use crate::protocol::{
read_message_type, read_payload, write_typed_message, AnchorReferral, read_message_type, read_payload, write_typed_message, AnchorReferral,
AnchorReferralRequestPayload, AnchorReferralResponsePayload, AnchorRegisterPayload, AnchorReferralRequestPayload, AnchorReferralResponsePayload, AnchorRegisterPayload,
BlobHeaderDiffPayload, AudienceRequestPayload, AudienceResponsePayload, BlobHeaderDiffPayload,
BlobHeaderRequestPayload, BlobHeaderResponsePayload, BlobRequestPayload, BlobResponsePayload, BlobHeaderRequestPayload, BlobHeaderResponsePayload, BlobRequestPayload, BlobResponsePayload,
CircleProfileUpdatePayload, GroupKeyRequestPayload, CircleProfileUpdatePayload, GroupKeyDistributePayload, GroupKeyRequestPayload,
GroupKeyResponsePayload, InitialExchangePayload, MeshPreferPayload, GroupKeyResponsePayload, InitialExchangePayload, MeshPreferPayload,
MessageType, NodeListUpdatePayload, PostDownstreamRegisterPayload, MessageType, NodeListUpdatePayload, PostDownstreamRegisterPayload,
PostNotificationPayload, PostPushPayload,
ProfileUpdatePayload, PullSyncRequestPayload, PullSyncResponsePayload, ProfileUpdatePayload, PullSyncRequestPayload, PullSyncResponsePayload,
RefuseRedirectPayload, RelayIntroducePayload, RelayIntroduceResultPayload, SessionRelayPayload, RefuseRedirectPayload, RelayIntroducePayload, RelayIntroduceResultPayload, SessionRelayPayload,
SocialAddressUpdatePayload, SocialCheckinPayload, SocialDisconnectNoticePayload, SocialAddressUpdatePayload, SocialCheckinPayload, SocialDisconnectNoticePayload,
@ -155,20 +156,6 @@ const SCAN_PUNCH_INTERVAL_SECS: u64 = 2;
/// Maximum scan duration (seconds) — accept the cost for otherwise-impossible connections /// Maximum scan duration (seconds) — accept the cost for otherwise-impossible connections
const SCAN_MAX_DURATION_SECS: u64 = 300; // 5 minutes const SCAN_MAX_DURATION_SECS: u64 = 300; // 5 minutes
/// Global cap on concurrent port-scan hole punches. Each scanner fires
/// ~100 QUIC ClientHellos/sec for up to `SCAN_MAX_DURATION_SECS`, which
/// is ~1 Mbps per active scanner. Without a cap, multiple parallel
/// referrals (growth loop, anchor referrals, replication) can spawn
/// several scanners at once and drive sustained multi-Mbps upload —
/// especially pathological on obfuscated VPNs where every probe stalls
/// at proxy timeouts. A permit is acquired before the scanning loop
/// starts and held until the scanner returns; extra callers fall back
/// to the cheaper `hole_punch_parallel`.
fn scanner_semaphore() -> &'static tokio::sync::Semaphore {
static SEM: std::sync::OnceLock<tokio::sync::Semaphore> = std::sync::OnceLock::new();
SEM.get_or_init(|| tokio::sync::Semaphore::new(1))
}
/// Advanced hole punch with port scanning fallback for EDM/port-restricted NAT. /// Advanced hole punch with port scanning fallback for EDM/port-restricted NAT.
/// ///
/// **Role-based behavior** (each side calls this independently): /// **Role-based behavior** (each side calls this independently):
@ -202,21 +189,6 @@ pub(crate) async fn hole_punch_with_scanning(
return hole_punch_parallel(endpoint, target, addresses).await; return hole_punch_parallel(endpoint, target, addresses).await;
} }
// v0.6.2: cap to one concurrent port scanner per node. Additional
// callers fall back to the cheaper `hole_punch_parallel` instead of
// spawning another 100-probes-per-second scanner. The permit is held
// for the lifetime of the scanner loop below (dropped on return).
let _scan_permit = match scanner_semaphore().try_acquire() {
Ok(p) => p,
Err(_) => {
tracing::debug!(
peer = hex::encode(target),
"another port scan already in progress — falling back to parallel punch"
);
return hole_punch_parallel(endpoint, target, addresses).await;
}
};
// Filter to reachable families, then use observed address (first in list, injected by relay) // Filter to reachable families, then use observed address (first in list, injected by relay)
let reachable = filter_reachable_families(endpoint, addresses); let reachable = filter_reachable_families(endpoint, addresses);
let observed_addr = reachable.first() let observed_addr = reachable.first()
@ -666,31 +638,6 @@ pub struct ConnectionManager {
/// Sticky N1 entries: NodeIds to report in N1 share until expiry (ms). /// Sticky N1 entries: NodeIds to report in N1 share until expiry (ms).
/// Used to advertise the bootstrap anchor for 24h after isolation recovery. /// Used to advertise the bootstrap anchor for 24h after isolation recovery.
sticky_n1: HashMap<NodeId, u64>, sticky_n1: HashMap<NodeId, u64>,
/// NodeIds with an outgoing connect attempt currently in flight.
/// Used by `try_begin_connect` to suppress duplicate concurrent outgoing
/// connects from racing paths (auto-reconnect, rebalance, relay
/// introduction target-side) against the same peer. Held in a sync
/// Mutex because every operation is a single O(1) hash insert/remove —
/// never held across an await.
pending_connects: Arc<std::sync::Mutex<HashSet<NodeId>>>,
}
/// RAII guard for a pending-outgoing-connect entry. Returned by
/// `ConnectionManager::try_begin_connect`. The NodeId is inserted into
/// `pending_connects` at construction and removed on drop, so a second
/// call to `try_begin_connect` for the same peer returns `None` for as
/// long as this guard is alive.
pub struct PendingConnectGuard {
peer_id: NodeId,
set: Arc<std::sync::Mutex<HashSet<NodeId>>>,
}
impl Drop for PendingConnectGuard {
fn drop(&mut self) {
if let Ok(mut s) = self.set.lock() {
s.remove(&self.peer_id);
}
}
} }
impl ConnectionManager { impl ConnectionManager {
@ -753,36 +700,9 @@ impl ConnectionManager {
http_capable: false, http_capable: false,
http_addr: None, http_addr: None,
sticky_n1: HashMap::new(), sticky_n1: HashMap::new(),
pending_connects: Arc::new(std::sync::Mutex::new(HashSet::new())),
} }
} }
/// Reserve an outgoing-connect slot for `peer`. Returns `Some(guard)`
/// if no other outgoing connect to this peer is already in flight and
/// we aren't already connected. The guard is held by the caller for
/// the duration of the connect attempt — subsequent calls for the
/// same peer return `None` until the guard drops.
///
/// Only gates outgoing duplicates; has no effect on incoming
/// connections from the peer, which are accepted normally.
pub fn try_begin_connect(&self, peer: NodeId) -> Option<PendingConnectGuard> {
if self.connections.contains_key(&peer) || self.sessions.contains_key(&peer) {
return None;
}
let mut set = match self.pending_connects.lock() {
Ok(g) => g,
Err(_) => return None, // Poisoned — fail closed rather than risk a racing connect.
};
if set.contains(&peer) {
return None;
}
set.insert(peer);
Some(PendingConnectGuard {
peer_id: peer,
set: self.pending_connects.clone(),
})
}
/// Our detected NAT type /// Our detected NAT type
pub fn nat_type(&self) -> crate::types::NatType { pub fn nat_type(&self) -> crate::types::NatType {
self.nat_type self.nat_type
@ -1426,25 +1346,18 @@ impl ConnectionManager {
conn: iroh::endpoint::Connection, conn: iroh::endpoint::Connection,
storage: &Arc<StoragePool>, storage: &Arc<StoragePool>,
peer_id: &NodeId, peer_id: &NodeId,
_our_node_id: NodeId, our_node_id: NodeId,
) -> anyhow::Result<PullSyncStats> { ) -> anyhow::Result<PullSyncStats> {
let (our_follows, follows_sync, our_personas) = { let (our_follows, follows_sync) = {
let s = storage.get().await; let s = storage.get().await;
( (s.list_follows()?, s.get_follows_with_last_sync().unwrap_or_default())
s.list_follows()?,
s.get_follows_with_last_sync().unwrap_or_default(),
s.list_posting_identities().unwrap_or_default(),
)
}; };
// Merged pull: include every posting identity we hold so DMs to any // Merged pull: include our own NodeId in the query so the peer returns
// of our personas match via wrapped_key.recipient. Network NodeId is // posts where we're either a followed author OR a recipient (DM).
// never an author or recipient and would never match.
let mut query_list = our_follows; let mut query_list = our_follows;
for pi in &our_personas { if !query_list.contains(&our_node_id) {
if !query_list.contains(&pi.node_id) { query_list.push(our_node_id);
query_list.push(pi.node_id);
}
} }
let request = PullSyncRequestPayload { let request = PullSyncRequestPayload {
@ -1475,19 +1388,11 @@ impl ConnectionManager {
for sp in &response.posts { for sp in &response.posts {
if s.is_deleted(&sp.id)? { continue; } if s.is_deleted(&sp.id)? { continue; }
if verify_post_id(&sp.id, &sp.post) { if verify_post_id(&sp.id, &sp.post) {
match crate::control::receive_post(&s, &sp.id, &sp.post, &sp.visibility, sp.intent.as_ref()) { if s.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility)? {
Ok(true) => { new_post_ids.push(sp.id);
new_post_ids.push(sp.id); posts_received += 1;
posts_received += 1;
synced_authors.insert(sp.post.author);
}
Ok(false) => {
synced_authors.insert(sp.post.author);
}
Err(e) => {
warn!(post_id = hex::encode(sp.id), error = %e, "rejecting post");
}
} }
synced_authors.insert(sp.post.author);
} }
} }
} }
@ -1974,6 +1879,129 @@ impl ConnectionManager {
sent sent
} }
/// Handle an incoming post notification: if we follow the author, pull the post.
/// `conn` is a fallback connection for ephemeral callers (not persistently connected).
pub async fn handle_post_notification(
&self,
from: &NodeId,
notification: PostNotificationPayload,
conn: Option<&iroh::endpoint::Connection>,
) -> anyhow::Result<bool> {
let dominated = {
let storage = self.storage.get().await;
// Already have this post?
if storage.get_post(&notification.post_id)?.is_some() {
return Ok(false);
}
// Do we follow the author?
let follows = storage.list_follows()?;
follows.contains(&notification.author)
};
if !dominated {
return Ok(false);
}
// We follow the author and don't have the post — pull it from the notifier
let pull_conn = match self.connections.get(from) {
Some(pc) => pc.connection.clone(),
None => match conn {
Some(c) => c.clone(),
None => return Ok(false),
},
};
let (our_follows, follows_sync) = {
let storage = self.storage.get().await;
(
storage.list_follows()?,
storage.get_follows_with_last_sync().unwrap_or_default(),
)
};
// Merged pull: include our own NodeId in the query list.
let mut query_list = our_follows;
if !query_list.contains(&self.our_node_id) {
query_list.push(self.our_node_id);
}
let (mut send, mut recv) = pull_conn.open_bi().await?;
let request = PullSyncRequestPayload {
follows: query_list,
have_post_ids: vec![], // v4: empty, using since_ms instead
since_ms: follows_sync,
};
write_typed_message(&mut send, MessageType::PullSyncRequest, &request).await?;
send.finish()?;
let _resp_type = read_message_type(&mut recv).await?;
let response: PullSyncResponsePayload =
read_payload(&mut recv, MAX_PAYLOAD).await?;
let now_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64;
let mut stored = false;
let mut new_post_ids: Vec<PostId> = Vec::new();
let mut synced_authors: HashSet<NodeId> = HashSet::new();
// Brief lock 1: store posts
{
let storage = self.storage.get().await;
for sp in &response.posts {
if verify_post_id(&sp.id, &sp.post) && !storage.is_deleted(&sp.id)? {
let _ = storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility);
new_post_ids.push(sp.id);
synced_authors.insert(sp.post.author);
if sp.id == notification.post_id {
stored = true;
}
}
}
}
// Lock RELEASED
// Brief lock 2: upstream + last_sync + visibility updates
{
let storage = self.storage.get().await;
for pid in &new_post_ids {
let _ = storage.touch_file_holder(
pid,
from,
&[],
crate::storage::HolderDirection::Received,
);
}
for author in &synced_authors {
let _ = storage.update_follow_last_sync(author, now_ms);
}
for vu in &response.visibility_updates {
if let Some(post) = storage.get_post(&vu.post_id)? {
if post.author == vu.author {
let _ = storage.update_post_visibility(&vu.post_id, &vu.visibility);
}
}
}
}
// Register as downstream for new posts (cap at 50 to avoid flooding)
if !new_post_ids.is_empty() {
let reg_conn = pull_conn.clone();
tokio::spawn(async move {
for post_id in new_post_ids.into_iter().take(50) {
let payload = PostDownstreamRegisterPayload { post_id };
if let Ok(mut send) = reg_conn.open_uni().await {
let _ = write_typed_message(&mut send, MessageType::PostDownstreamRegister, &payload).await;
let _ = send.finish();
}
}
});
}
Ok(stored)
}
/// Pull posts from a connected peer. /// Pull posts from a connected peer.
pub async fn pull_from_peer(&self, peer_id: &NodeId) -> anyhow::Result<PullSyncStats> { pub async fn pull_from_peer(&self, peer_id: &NodeId) -> anyhow::Result<PullSyncStats> {
let pc = self let pc = self
@ -1981,21 +2009,18 @@ impl ConnectionManager {
.get(peer_id) .get(peer_id)
.ok_or_else(|| anyhow::anyhow!("not connected to {}", hex::encode(peer_id)))?; .ok_or_else(|| anyhow::anyhow!("not connected to {}", hex::encode(peer_id)))?;
let (our_follows, follows_sync, our_personas) = { let (our_follows, follows_sync) = {
let storage = self.storage.get().await; let storage = self.storage.get().await;
( (
storage.list_follows()?, storage.list_follows()?,
storage.get_follows_with_last_sync().unwrap_or_default(), storage.get_follows_with_last_sync().unwrap_or_default(),
storage.list_posting_identities().unwrap_or_default(),
) )
}; };
// Merged pull: include every posting identity so DMs match recipient. // Merged pull: include our own NodeId in the query list.
let mut query_list = our_follows; let mut query_list = our_follows;
for pi in &our_personas { if !query_list.contains(&self.our_node_id) {
if !query_list.contains(&pi.node_id) { query_list.push(self.our_node_id);
query_list.push(pi.node_id);
}
} }
let request = PullSyncRequestPayload { let request = PullSyncRequestPayload {
@ -2031,19 +2056,11 @@ impl ConnectionManager {
continue; continue;
} }
if verify_post_id(&sp.id, &sp.post) { if verify_post_id(&sp.id, &sp.post) {
match crate::control::receive_post(&storage, &sp.id, &sp.post, &sp.visibility, sp.intent.as_ref()) { if storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility)? {
Ok(true) => { new_post_ids.push(sp.id);
new_post_ids.push(sp.id); posts_received += 1;
posts_received += 1;
synced_authors.insert(sp.post.author);
}
Ok(false) => {
synced_authors.insert(sp.post.author);
}
Err(e) => {
warn!(post_id = hex::encode(sp.id), error = %e, "rejecting post");
}
} }
synced_authors.insert(sp.post.author);
} }
} }
} }
@ -2264,15 +2281,12 @@ impl ConnectionManager {
} }
} }
// Phase 3: Brief re-lock for is_deleted checks + intent fetch on filtered posts // Phase 3: Brief re-lock for is_deleted checks on filtered posts
let (posts, vis_updates) = { let (posts, vis_updates) = {
let s = storage.get().await; let s = storage.get().await;
let posts_to_send: Vec<SyncPost> = candidates_to_send.into_iter() let posts_to_send: Vec<SyncPost> = candidates_to_send.into_iter()
.filter(|(id, _, _)| !s.is_deleted(id).unwrap_or(false)) .filter(|(id, _, _)| !s.is_deleted(id).unwrap_or(false))
.map(|(id, post, visibility)| { .map(|(id, post, visibility)| SyncPost { id, post, visibility })
let intent = s.get_post_intent(&id).ok().flatten();
SyncPost { id, post, visibility, intent }
})
.collect(); .collect();
(posts_to_send, vis_updates_to_send) (posts_to_send, vis_updates_to_send)
}; };
@ -3943,16 +3957,6 @@ impl ConnectionManager {
s.get_peer_nat_profile(&requester) s.get_peer_nat_profile(&requester)
}; };
tokio::spawn(async move { tokio::spawn(async move {
// Reserve the outgoing-connect slot for this requester so
// rebalance / auto-reconnect can't fire a parallel connect
// to the same peer while our hole-punch is in flight.
let _connect_guard = {
let cm = conn_mgr_arc.lock().await;
match cm.try_begin_connect(requester) {
Some(g) => g,
None => return, // Already connected or connect in flight.
}
};
if let Some(conn) = hole_punch_with_scanning(&endpoint, &requester, &requester_addrs, our_nat_profile, peer_nat_profile).await { if let Some(conn) = hole_punch_with_scanning(&endpoint, &requester, &requester_addrs, our_nat_profile, peer_nat_profile).await {
// Register as session with the peer's address for relay introduction // Register as session with the peer's address for relay introduction
let remote_sock = requester_addrs.iter() let remote_sock = requester_addrs.iter()
@ -4502,16 +4506,13 @@ impl ConnectionManager {
tokio::spawn(async move { tokio::spawn(async move {
// Brief delay to let the disconnect settle and avoid reconnect storms // Brief delay to let the disconnect settle and avoid reconnect storms
tokio::time::sleep(std::time::Duration::from_secs(3)).await; tokio::time::sleep(std::time::Duration::from_secs(3)).await;
// Reserve the outgoing-connect slot for this peer. If // Check if already reconnected (by the other side or growth loop)
// another path (rebalance, relay-introduction) is {
// already connecting to them, skip.
let _connect_guard = {
let cm = cm_arc.lock().await; let cm = cm_arc.lock().await;
match cm.try_begin_connect(remote_node_id) { if cm.connections.contains_key(&remote_node_id) || cm.sessions.contains_key(&remote_node_id) {
Some(g) => g, return; // Already reconnected
None => return, // Already connected or connect in flight.
} }
}; }
if let Ok(eid) = iroh::EndpointId::from_bytes(&remote_node_id) { if let Ok(eid) = iroh::EndpointId::from_bytes(&remote_node_id) {
let ep_addr = iroh::EndpointAddr::from(eid).with_ip_addr(addr); let ep_addr = iroh::EndpointAddr::from(eid).with_ip_addr(addr);
let endpoint = { let endpoint = {
@ -4929,11 +4930,24 @@ impl ConnectionManager {
} }
} }
// Gather connections for CDN delete notices under lock, then send outside
let mut delete_notices: Vec<(iroh::endpoint::Connection, crate::protocol::BlobDeleteNoticePayload)> = Vec::new();
for (cid, holders) in &blob_cleanup {
let payload = crate::protocol::BlobDeleteNoticePayload { cid: *cid, upstream_node: None };
for (peer, _addrs) in holders {
if let Some(pc) = cm.connections_ref().get(peer) {
delete_notices.push((pc.connection.clone(), payload.clone()));
}
}
}
drop(cm); drop(cm);
// BlobDeleteNotice removed in v0.6.2: orphaned blobs on remote // Send outside lock
// holders are evicted naturally via LRU rather than by a for (conn, payload) in &delete_notices {
// persona-signed push. if let Ok(mut send) = conn.open_uni().await {
let _ = blob_cleanup; let _ = write_typed_message(&mut send, MessageType::BlobDeleteNotice, payload).await;
let _ = send.finish();
}
}
} }
MessageType::VisibilityUpdate => { MessageType::VisibilityUpdate => {
let payload: crate::protocol::VisibilityUpdatePayload = let payload: crate::protocol::VisibilityUpdatePayload =
@ -4948,6 +4962,102 @@ impl ConnectionManager {
} }
} }
} }
MessageType::PostNotification => {
let notification: PostNotificationPayload =
read_payload(recv, MAX_PAYLOAD).await?;
info!(
peer = hex::encode(remote_node_id),
post_id = hex::encode(notification.post_id),
author = hex::encode(notification.author),
"Received post notification"
);
let cm = conn_mgr.lock().await;
match cm.handle_post_notification(&remote_node_id, notification, None).await {
Ok(true) => {
info!(peer = hex::encode(remote_node_id), "Pulled post from notification");
}
Ok(false) => {
info!(peer = hex::encode(remote_node_id), "Post notification ignored (not following or already have)");
}
Err(e) => {
warn!(peer = hex::encode(remote_node_id), error = %e, "Post notification pull failed");
}
}
}
MessageType::PostPush => {
let push: PostPushPayload = read_payload(recv, MAX_PAYLOAD).await?;
// Encrypted posts are no longer accepted via direct push — they propagate
// via the CDN to eliminate the sender→recipient traffic signal.
if !matches!(push.post.visibility, crate::types::PostVisibility::Public) {
debug!(
peer = hex::encode(remote_node_id),
post_id = hex::encode(push.post.id),
"Ignoring non-public PostPush"
);
} else {
let cm = conn_mgr.lock().await;
let storage = cm.storage.get().await;
if !storage.is_deleted(&push.post.id)?
&& storage.get_post(&push.post.id)?.is_none()
&& crate::content::verify_post_id(&push.post.id, &push.post.post)
{
let _ = storage.store_post_with_visibility(
&push.post.id,
&push.post.post,
&push.post.visibility,
);
let _ = storage.touch_file_holder(
&push.post.id,
&remote_node_id,
&[],
crate::storage::HolderDirection::Received,
);
info!(
peer = hex::encode(remote_node_id),
post_id = hex::encode(push.post.id),
"Received direct post push"
);
}
}
}
MessageType::AudienceRequest => {
let req: AudienceRequestPayload = read_payload(recv, MAX_PAYLOAD).await?;
info!(
peer = hex::encode(remote_node_id),
requester = hex::encode(req.requester),
"Received audience request"
);
let cm = conn_mgr.lock().await;
let storage = cm.storage.get().await;
// Store as inbound pending request
let _ = storage.store_audience(
&req.requester,
crate::types::AudienceDirection::Inbound,
crate::types::AudienceStatus::Pending,
);
}
MessageType::AudienceResponse => {
let resp: AudienceResponsePayload = read_payload(recv, MAX_PAYLOAD).await?;
let status = if resp.approved { "approved" } else { "denied" };
info!(
peer = hex::encode(remote_node_id),
responder = hex::encode(resp.responder),
status,
"Received audience response"
);
let cm = conn_mgr.lock().await;
let storage = cm.storage.get().await;
let new_status = if resp.approved {
crate::types::AudienceStatus::Approved
} else {
crate::types::AudienceStatus::Denied
};
let _ = storage.store_audience(
&resp.responder,
crate::types::AudienceDirection::Outbound,
new_status,
);
}
MessageType::SocialAddressUpdate => { MessageType::SocialAddressUpdate => {
let payload: SocialAddressUpdatePayload = read_payload(recv, MAX_PAYLOAD).await?; let payload: SocialAddressUpdatePayload = read_payload(recv, MAX_PAYLOAD).await?;
let cm = conn_mgr.lock().await; let cm = conn_mgr.lock().await;
@ -5114,14 +5224,7 @@ impl ConnectionManager {
let stored = { let stored = {
let cm = cm_arc.lock().await; let cm = cm_arc.lock().await;
let storage = cm.storage.get().await; let storage = cm.storage.get().await;
let newly_stored = crate::control::receive_post( if storage.store_post_with_visibility(&sync_post.id, &sync_post.post, &sync_post.visibility).unwrap_or(false) {
&storage,
&sync_post.id,
&sync_post.post,
&sync_post.visibility,
sync_post.intent.as_ref(),
).unwrap_or(false);
if newly_stored {
let _ = storage.touch_file_holder( let _ = storage.touch_file_holder(
&sync_post.id, &sync_post.id,
&sender_id, &sender_id,
@ -5211,6 +5314,68 @@ impl ConnectionManager {
"Received social disconnect notice" "Received social disconnect notice"
); );
} }
MessageType::BlobDeleteNotice => {
let payload: crate::protocol::BlobDeleteNoticePayload =
read_payload(recv, MAX_PAYLOAD).await?;
let cm = conn_mgr.lock().await;
let storage = cm.storage.get().await;
let cid = payload.cid;
// Flat-holder model: drop the sender as a holder of this file.
// The author's DeleteRecord (separate signed message) is what
// triggers the actual blob removal for followers.
let _ = storage.remove_file_holder(&cid, &remote_node_id);
info!(
peer = hex::encode(remote_node_id),
cid = hex::encode(cid),
"Received blob delete notice"
);
}
MessageType::GroupKeyDistribute => {
let payload: GroupKeyDistributePayload = read_payload(recv, MAX_PAYLOAD).await?;
let cm = conn_mgr.lock().await;
// Verify the sender is the admin
if payload.admin != remote_node_id {
warn!(peer = hex::encode(remote_node_id), "GroupKeyDistribute from non-admin, ignoring");
} else {
let storage = cm.storage.get().await;
let record = crate::types::GroupKeyRecord {
group_id: payload.group_id,
circle_name: payload.circle_name.clone(),
epoch: payload.epoch,
group_public_key: payload.group_public_key,
admin: payload.admin,
created_at: now_ms(),
};
let _ = storage.create_group_key(&record, None);
// Find our wrapped key and unwrap the group seed
for mk in &payload.member_keys {
let _ = storage.store_group_member_key(&payload.group_id, mk);
if mk.member == cm.our_node_id {
match crypto::unwrap_group_key(
&cm.secret_seed,
&payload.admin,
&mk.wrapped_group_key,
) {
Ok(seed) => {
let _ = storage.store_group_seed(&payload.group_id, payload.epoch, &seed);
info!(
circle = %payload.circle_name,
epoch = payload.epoch,
"Received and unwrapped group key"
);
}
Err(e) => {
warn!(error = %e, "Failed to unwrap group key");
}
}
}
}
}
}
MessageType::CircleProfileUpdate => { MessageType::CircleProfileUpdate => {
let payload: CircleProfileUpdatePayload = read_payload(recv, MAX_PAYLOAD).await?; let payload: CircleProfileUpdatePayload = read_payload(recv, MAX_PAYLOAD).await?;
let cm = conn_mgr.lock().await; let cm = conn_mgr.lock().await;
@ -5497,13 +5662,11 @@ impl ConnectionManager {
}; };
let result = { let result = {
let store = storage.get().await; let store = storage.get().await;
let pv = store.get_post_with_visibility(&payload.post_id).ok().flatten(); store.get_post_with_visibility(&payload.post_id).ok().flatten()
let intent = store.get_post_intent(&payload.post_id).ok().flatten();
pv.map(|(p, v)| (p, v, intent))
}; };
let resp = if let Some((post, visibility, intent)) = result { let resp = if let Some((post, visibility)) = result {
if matches!(visibility, PostVisibility::Public) { if matches!(visibility, PostVisibility::Public) {
crate::protocol::PostFetchResponsePayload { post_id: payload.post_id, found: true, post: Some(SyncPost { id: payload.post_id, post, visibility, intent }) } crate::protocol::PostFetchResponsePayload { post_id: payload.post_id, found: true, post: Some(SyncPost { id: payload.post_id, post, visibility }) }
} else { } else {
crate::protocol::PostFetchResponsePayload { post_id: payload.post_id, found: false, post: None } crate::protocol::PostFetchResponsePayload { post_id: payload.post_id, found: false, post: None }
} }
@ -5791,17 +5954,6 @@ impl ConnectionManager {
.cloned().collect(); .cloned().collect();
let requester = payload.requester; let requester = payload.requester;
tokio::spawn(async move { tokio::spawn(async move {
// Reserve the outgoing-connect slot for this
// requester so we don't race with rebalance /
// auto-reconnect paths firing their own
// outgoing connect to the same peer.
let _connect_guard = {
let cm = cm_arc.lock().await;
match cm.try_begin_connect(requester) {
Some(g) => g,
None => return, // Already connected or connect in flight.
}
};
if let Some(conn) = hole_punch_with_scanning(&endpoint, &requester, &routable_addrs, our_nat_profile, peer_nat_profile).await { if let Some(conn) = hole_punch_with_scanning(&endpoint, &requester, &routable_addrs, our_nat_profile, peer_nat_profile).await {
let remote_sock = routable_addrs.iter().filter_map(|a| a.parse::<std::net::SocketAddr>().ok()).find(|s| crate::network::is_shareable_addr(s)); let remote_sock = routable_addrs.iter().filter_map(|a| a.parse::<std::net::SocketAddr>().ok()).find(|s| crate::network::is_shareable_addr(s));
let mut cm = cm_arc.lock().await; let mut cm = cm_arc.lock().await;
@ -6030,13 +6182,7 @@ impl ConnectionManager {
let post_author = sp.post.author; let post_author = sp.post.author;
let cm = cm_arc.lock().await; let cm = cm_arc.lock().await;
let storage = cm.storage.get().await; let storage = cm.storage.get().await;
let _ = crate::control::receive_post( let _ = storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility);
&storage,
&sp.id,
&sp.post,
&sp.visibility,
sp.intent.as_ref(),
);
let _ = storage.touch_file_holder( let _ = storage.touch_file_holder(
&sp.id, &sp.id,
&sender, &sender,
@ -6104,18 +6250,18 @@ impl ConnectionManager {
async fn handle_blob_header_diff(&self, payload: BlobHeaderDiffPayload, sender: NodeId) { async fn handle_blob_header_diff(&self, payload: BlobHeaderDiffPayload, sender: NodeId) {
use crate::types::BlobHeaderDiffOp; use crate::types::BlobHeaderDiffOp;
// Gather policy + followers set + holders, then drop lock immediately. // Gather policy + audience data + holders, then drop lock immediately.
// Remote peer clearly holds this post — record them as a holder. // Remote peer clearly holds this post — record them as a holder.
// v0.6.2: `AudienceOnly` → `FollowersOnly`; checked against our public let (policy, approved_audience, holders) = {
// follows list rather than a separate audience table.
let (policy, followers_set, holders) = {
let storage = self.storage.get().await; let storage = self.storage.get().await;
let policy = storage.get_comment_policy(&payload.post_id) let policy = storage.get_comment_policy(&payload.post_id)
.ok() .ok()
.flatten() .flatten()
.unwrap_or_default(); .unwrap_or_default();
let follows: std::collections::HashSet<NodeId> = let approved = storage.list_audience(
storage.list_public_follows().unwrap_or_default().into_iter().collect(); crate::types::AudienceDirection::Inbound,
Some(crate::types::AudienceStatus::Approved),
).unwrap_or_default();
let _ = storage.touch_file_holder( let _ = storage.touch_file_holder(
&payload.post_id, &payload.post_id,
&sender, &sender,
@ -6127,9 +6273,12 @@ impl ConnectionManager {
.into_iter() .into_iter()
.map(|(nid, _addrs)| nid) .map(|(nid, _addrs)| nid)
.collect(); .collect();
(policy, follows, holders) (policy, approved, holders)
}; };
// Filter ops using gathered data (no lock held)
let audience_set: std::collections::HashSet<NodeId> = approved_audience.iter().map(|a| a.node_id).collect();
// Apply ops in a short lock acquisition // Apply ops in a short lock acquisition
{ {
let storage = self.storage.get().await; let storage = self.storage.get().await;
@ -6165,8 +6314,8 @@ impl ConnectionManager {
} }
match policy.allow_comments { match policy.allow_comments {
crate::types::CommentPermission::None => continue, crate::types::CommentPermission::None => continue,
crate::types::CommentPermission::FollowersOnly => { crate::types::CommentPermission::AudienceOnly => {
if !followers_set.contains(&comment.author) { if !audience_set.contains(&comment.author) {
continue; continue;
} }
} }
@ -6178,7 +6327,6 @@ impl ConnectionManager {
&comment.content, &comment.content,
comment.timestamp_ms, comment.timestamp_ms,
&comment.signature, &comment.signature,
comment.ref_post_id.as_ref(),
) { ) {
continue; // Skip forged comments continue; // Skip forged comments
} }
@ -8071,21 +8219,8 @@ impl ConnectionActor {
let mut cm = self.cm.lock().await; let mut cm = self.cm.lock().await;
cm.rebalance_slots().await.unwrap_or_default() cm.rebalance_slots().await.unwrap_or_default()
}; };
// Connect outside the lock — no 15s hold. Reserve an // Connect outside the lock — no 15s hold
// outgoing-connect slot per peer so we don't race with
// auto-reconnect / relay-introduction paths for the same
// target; skip peers already mid-connect.
for (peer_id, addr, _addr_s, slot_kind) in pending_connects { for (peer_id, addr, _addr_s, slot_kind) in pending_connects {
let _connect_guard = {
let cm = self.cm.lock().await;
match cm.try_begin_connect(peer_id) {
Some(g) => g,
None => {
debug!(peer = hex::encode(peer_id), "rebalance: skipping — connect already in flight");
continue;
}
}
};
let addrs: Vec<std::net::SocketAddr> = addr.ip_addrs().copied().collect(); let addrs: Vec<std::net::SocketAddr> = addr.ip_addrs().copied().collect();
if !addrs.is_empty() { if !addrs.is_empty() {
let s = storage.get().await; let s = storage.get().await;
@ -8497,61 +8632,3 @@ fn now_ms() -> u64 {
.unwrap_or_default() .unwrap_or_default()
.as_millis() as u64 .as_millis() as u64
} }
#[cfg(test)]
mod tests {
use super::{scanner_semaphore, PendingConnectGuard};
use crate::types::NodeId;
use std::collections::HashSet;
use std::sync::{Arc, Mutex as StdMutex};
#[test]
fn scanner_semaphore_caps_concurrent_scans_at_one() {
let sem = scanner_semaphore();
// Fresh — one permit should be available.
let p1 = sem.try_acquire().expect("first scan should acquire");
// Second concurrent caller must be rejected.
assert!(sem.try_acquire().is_err(), "second scan must not acquire while first holds permit");
// Dropping the first permit returns it to the pool.
drop(p1);
let p2 = sem.try_acquire().expect("after release, next scan should acquire");
drop(p2);
}
/// Construct a guard directly against a test-owned set, bypassing
/// ConnectionManager. This verifies the guard's state-machine
/// (insert on acquire, remove on drop) without needing a full CM.
fn try_begin(set: &Arc<StdMutex<HashSet<NodeId>>>, peer: NodeId) -> Option<PendingConnectGuard> {
let mut s = set.lock().ok()?;
if s.contains(&peer) { return None; }
s.insert(peer);
Some(PendingConnectGuard { peer_id: peer, set: Arc::clone(set) })
}
#[test]
fn pending_connect_guard_gates_same_peer_and_releases_on_drop() {
let set: Arc<StdMutex<HashSet<NodeId>>> = Arc::new(StdMutex::new(HashSet::new()));
let peer_a: NodeId = [1u8; 32];
let peer_b: NodeId = [2u8; 32];
// First acquire for A succeeds.
let g_a = try_begin(&set, peer_a).expect("first guard should acquire for peer A");
// Second concurrent acquire for A is rejected.
assert!(try_begin(&set, peer_a).is_none(), "second concurrent guard for A must be refused");
// A different peer is unaffected.
let g_b = try_begin(&set, peer_b).expect("guard for peer B should acquire independently");
// Dropping A's guard releases the slot.
drop(g_a);
assert!(!set.lock().unwrap().contains(&peer_a), "peer A should be removed from pending_connects on drop");
// A new acquire for A now succeeds.
let g_a2 = try_begin(&set, peer_a).expect("after release, new guard for A should acquire");
// B's guard still active — independent.
assert!(set.lock().unwrap().contains(&peer_b));
drop(g_a2);
drop(g_b);
assert!(set.lock().unwrap().is_empty(), "all guards dropped — set should be empty");
}
}

View file

@ -1,254 +0,0 @@
//! Control posts: signed protocol operations carried as public posts that
//! receivers apply to local state (delete, update visibility) without
//! rendering in feeds.
//!
//! Wire flow:
//! 1. Author creates a `Post { author, content = ControlOp JSON, ... }` with
//! `VisibilityIntent::Control`.
//! 2. Post propagates via CDN like any other post (header-diffs on neighbor
//! posts ship the reference; receivers pull the control post).
//! 3. On receive, callers invoke `apply_control_post_if_applicable` to
//! decode, verify the ControlOp's signature against the post's author,
//! confirm the target post's author matches, and apply.
//!
//! Control posts themselves are stored with `VisibilityIntent::Control`; feed
//! queries exclude them. They remain in storage as tombstones so we can
//! re-propagate them to peers and so future arrivals of the target post are
//! rejected via the delete tombstone.
use crate::crypto;
use crate::storage::Storage;
use crate::types::{ControlOp, DeleteRecord, NodeId, Post, PostId, PostVisibility, VisibilityIntent};
/// Parse the post's content as a `ControlOp`, verify its signature against
/// the post's author, verify target ownership, and apply to local storage.
/// No-op (returns Ok) if the post is not a control post. Returns an error
/// on a control post with an invalid signature or mismatched target author.
/// Callers pass an existing storage guard so the apply happens under the
/// same lock as the post-store that triggered the call.
pub fn apply_control_post_if_applicable(
s: &Storage,
post: &Post,
intent: Option<&VisibilityIntent>,
) -> anyhow::Result<()> {
if !matches!(intent, Some(VisibilityIntent::Control)) {
return Ok(());
}
let op: ControlOp = serde_json::from_str(&post.content)
.map_err(|e| anyhow::anyhow!("control post content is not a valid ControlOp: {}", e))?;
match op {
ControlOp::DeletePost { post_id, timestamp_ms, signature } => {
if !crypto::verify_control_delete(&post.author, &post_id, timestamp_ms, &signature) {
anyhow::bail!("invalid control-delete signature");
}
if let Some(target) = s.get_post(&post_id)? {
if target.author != post.author {
anyhow::bail!("control-delete author does not match target post's author");
}
}
let record = DeleteRecord {
post_id,
author: post.author,
timestamp_ms,
signature: signature.clone(),
};
let _ = s.store_delete(&record);
let _ = s.apply_delete(&record);
Ok(())
}
ControlOp::UpdateVisibility { post_id, new_visibility, timestamp_ms, signature } => {
if !crypto::verify_control_visibility(&post.author, &post_id, &new_visibility, timestamp_ms, &signature) {
anyhow::bail!("invalid control-visibility signature");
}
if let Some(target) = s.get_post(&post_id)? {
if target.author != post.author {
anyhow::bail!("control-visibility author does not match target post's author");
}
let _ = s.update_post_visibility(&post_id, &new_visibility);
}
let _ = (timestamp_ms, new_visibility);
Ok(())
}
}
}
/// Unified receive path: for every incoming post, call this instead of
/// `store_post_with_visibility` / `store_post_with_intent`. If the post is a
/// control post, the op is verified and applied atomically under the same
/// storage guard; if verification fails the post is NOT stored (so we don't
/// propagate bogus controls to other peers via neighbor-manifest diffs).
///
/// Returns Ok(true) if the post was newly stored, Ok(false) if already known,
/// and an error for control posts with invalid signatures or mismatched
/// target authors.
pub fn receive_post(
s: &Storage,
id: &PostId,
post: &Post,
visibility: &PostVisibility,
intent: Option<&VisibilityIntent>,
) -> anyhow::Result<bool> {
// Verify signed intent posts BEFORE storing. Bogus signed posts must
// never enter storage and get re-propagated via neighbor-manifest diffs.
match intent {
Some(VisibilityIntent::Control) => {
let op: ControlOp = serde_json::from_str(&post.content).map_err(|e| {
anyhow::anyhow!("control post content is not a valid ControlOp: {}", e)
})?;
match &op {
ControlOp::DeletePost { post_id, timestamp_ms, signature } => {
if !crypto::verify_control_delete(&post.author, post_id, *timestamp_ms, signature) {
anyhow::bail!("invalid control-delete signature");
}
}
ControlOp::UpdateVisibility { post_id, new_visibility, timestamp_ms, signature } => {
if !crypto::verify_control_visibility(&post.author, post_id, new_visibility, *timestamp_ms, signature) {
anyhow::bail!("invalid control-visibility signature");
}
}
}
}
Some(VisibilityIntent::Profile) => {
crate::profile::verify_profile_post(post)?;
}
_ => {}
}
let stored = if let Some(intent) = intent {
s.store_post_with_intent(id, post, visibility, intent)?
} else {
s.store_post_with_visibility(id, post, visibility)?
};
if stored {
apply_control_post_if_applicable(s, post, intent)?;
crate::profile::apply_profile_post_if_applicable(s, post, intent)?;
}
Ok(stored)
}
/// Build a Post representing a control-delete operation. Caller is
/// responsible for storing and propagating it.
pub fn build_delete_control_post(
author: &NodeId,
author_secret: &[u8; 32],
target_post_id: &crate::types::PostId,
) -> Post {
let timestamp_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0);
let signature = crypto::sign_control_delete(author_secret, target_post_id, timestamp_ms);
let op = ControlOp::DeletePost {
post_id: *target_post_id,
timestamp_ms,
signature,
};
Post {
author: *author,
content: serde_json::to_string(&op).unwrap_or_default(),
attachments: vec![],
timestamp_ms,
}
}
/// Build a Post representing a control-update-visibility operation. Caller
/// is responsible for storing and propagating it.
pub fn build_visibility_control_post(
author: &NodeId,
author_secret: &[u8; 32],
target_post_id: &crate::types::PostId,
new_visibility: &PostVisibility,
) -> Post {
let timestamp_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0);
let signature = crypto::sign_control_visibility(author_secret, target_post_id, new_visibility, timestamp_ms);
let op = ControlOp::UpdateVisibility {
post_id: *target_post_id,
new_visibility: new_visibility.clone(),
timestamp_ms,
signature,
};
Post {
author: *author,
content: serde_json::to_string(&op).unwrap_or_default(),
attachments: vec![],
timestamp_ms,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::Storage;
use ed25519_dalek::SigningKey;
fn temp_storage() -> Storage {
Storage::open(":memory:").unwrap()
}
fn make_keypair(seed_byte: u8) -> ([u8; 32], NodeId) {
let seed = [seed_byte; 32];
let signing_key = SigningKey::from_bytes(&seed);
let public = signing_key.verifying_key();
(seed, *public.as_bytes())
}
#[test]
fn control_delete_roundtrip_verifies_and_applies() {
let s = temp_storage();
let (author_sec, author_pub) = make_keypair(7);
let post = Post {
author: author_pub,
content: "hello".to_string(),
attachments: vec![],
timestamp_ms: 1000,
};
let post_id = crate::content::compute_post_id(&post);
s.store_post_with_visibility(&post_id, &post, &PostVisibility::Public).unwrap();
let control = build_delete_control_post(&author_pub, &author_sec, &post_id);
let control_id = crate::content::compute_post_id(&control);
let stored = receive_post(
&s,
&control_id,
&control,
&PostVisibility::Public,
Some(&VisibilityIntent::Control),
).unwrap();
assert!(stored);
assert!(s.is_deleted(&post_id).unwrap());
}
#[test]
fn control_delete_rejects_wrong_author() {
let s = temp_storage();
let (_author_sec, author_pub) = make_keypair(7);
let (other_sec, _other_pub) = make_keypair(9);
let post = Post {
author: author_pub,
content: "hello".to_string(),
attachments: vec![],
timestamp_ms: 1000,
};
let post_id = crate::content::compute_post_id(&post);
s.store_post_with_visibility(&post_id, &post, &PostVisibility::Public).unwrap();
// Sign with wrong secret → invalid signature for `author_pub`.
let control = build_delete_control_post(&author_pub, &other_sec, &post_id);
let control_id = crate::content::compute_post_id(&control);
let res = receive_post(
&s,
&control_id,
&control,
&PostVisibility::Public,
Some(&VisibilityIntent::Control),
);
assert!(res.is_err());
assert!(s.get_post(&control_id).unwrap().is_none());
assert!(!s.is_deleted(&post_id).unwrap());
}
}

View file

@ -289,128 +289,6 @@ pub fn sign_delete(seed: &[u8; 32], post_id: &PostId) -> Vec<u8> {
sig.to_bytes().to_vec() sig.to_bytes().to_vec()
} }
/// Canonical bytes for a ControlOp::DeletePost signature.
fn control_delete_bytes(post_id: &PostId, timestamp_ms: u64) -> Vec<u8> {
let mut buf = Vec::with_capacity(12 + 32 + 8);
buf.extend_from_slice(b"ctrl:delete:");
buf.extend_from_slice(post_id);
buf.extend_from_slice(&timestamp_ms.to_le_bytes());
buf
}
/// Sign a control-post DeletePost operation.
pub fn sign_control_delete(seed: &[u8; 32], post_id: &PostId, timestamp_ms: u64) -> Vec<u8> {
let signing_key = SigningKey::from_bytes(seed);
let sig = signing_key.sign(&control_delete_bytes(post_id, timestamp_ms));
sig.to_bytes().to_vec()
}
pub fn verify_control_delete(
author: &NodeId,
post_id: &PostId,
timestamp_ms: u64,
signature: &[u8],
) -> bool {
if signature.len() != 64 { return false; }
let sig_bytes: [u8; 64] = match signature.try_into() { Ok(b) => b, Err(_) => return false };
let sig = ed25519_dalek::Signature::from_bytes(&sig_bytes);
let Ok(vk) = VerifyingKey::from_bytes(author) else { return false };
vk.verify_strict(&control_delete_bytes(post_id, timestamp_ms), &sig).is_ok()
}
/// Canonical bytes for a ControlOp::UpdateVisibility signature. Uses JSON
/// round-trip on the visibility payload because PostVisibility is an enum
/// with variable shape; callers must pass the exact same bytes when verifying.
fn control_visibility_bytes(
post_id: &PostId,
new_visibility_canonical: &[u8],
timestamp_ms: u64,
) -> Vec<u8> {
let mut buf = Vec::with_capacity(10 + 32 + new_visibility_canonical.len() + 8);
buf.extend_from_slice(b"ctrl:vis:");
buf.extend_from_slice(post_id);
buf.extend_from_slice(new_visibility_canonical);
buf.extend_from_slice(&timestamp_ms.to_le_bytes());
buf
}
pub fn sign_control_visibility(
seed: &[u8; 32],
post_id: &PostId,
new_visibility: &crate::types::PostVisibility,
timestamp_ms: u64,
) -> Vec<u8> {
let canon = serde_json::to_vec(new_visibility).unwrap_or_default();
let signing_key = SigningKey::from_bytes(seed);
let sig = signing_key.sign(&control_visibility_bytes(post_id, &canon, timestamp_ms));
sig.to_bytes().to_vec()
}
pub fn verify_control_visibility(
author: &NodeId,
post_id: &PostId,
new_visibility: &crate::types::PostVisibility,
timestamp_ms: u64,
signature: &[u8],
) -> bool {
if signature.len() != 64 { return false; }
let sig_bytes: [u8; 64] = match signature.try_into() { Ok(b) => b, Err(_) => return false };
let sig = ed25519_dalek::Signature::from_bytes(&sig_bytes);
let Ok(vk) = VerifyingKey::from_bytes(author) else { return false };
let canon = match serde_json::to_vec(new_visibility) { Ok(v) => v, Err(_) => return false };
vk.verify_strict(&control_visibility_bytes(post_id, &canon, timestamp_ms), &sig).is_ok()
}
/// Canonical bytes for a Profile-post signature: length-prefixed display_name
/// and bio, 32-byte avatar_cid (or zeros), then timestamp_ms. Length prefixes
/// prevent extension/reordering attacks.
fn profile_post_bytes(
display_name: &str,
bio: &str,
avatar_cid: &Option<[u8; 32]>,
timestamp_ms: u64,
) -> Vec<u8> {
let dn = display_name.as_bytes();
let bio_bytes = bio.as_bytes();
let mut buf = Vec::with_capacity(5 + 8 + dn.len() + 8 + bio_bytes.len() + 32 + 8);
buf.extend_from_slice(b"prof:");
buf.extend_from_slice(&(dn.len() as u64).to_le_bytes());
buf.extend_from_slice(dn);
buf.extend_from_slice(&(bio_bytes.len() as u64).to_le_bytes());
buf.extend_from_slice(bio_bytes);
let avatar = avatar_cid.unwrap_or([0u8; 32]);
buf.extend_from_slice(&avatar);
buf.extend_from_slice(&timestamp_ms.to_le_bytes());
buf
}
pub fn sign_profile(
seed: &[u8; 32],
display_name: &str,
bio: &str,
avatar_cid: &Option<[u8; 32]>,
timestamp_ms: u64,
) -> Vec<u8> {
let signing_key = SigningKey::from_bytes(seed);
let sig = signing_key.sign(&profile_post_bytes(display_name, bio, avatar_cid, timestamp_ms));
sig.to_bytes().to_vec()
}
pub fn verify_profile(
author: &NodeId,
display_name: &str,
bio: &str,
avatar_cid: &Option<[u8; 32]>,
timestamp_ms: u64,
signature: &[u8],
) -> bool {
if signature.len() != 64 { return false; }
let sig_bytes: [u8; 64] = match signature.try_into() { Ok(b) => b, Err(_) => return false };
let sig = ed25519_dalek::Signature::from_bytes(&sig_bytes);
let Ok(vk) = VerifyingKey::from_bytes(author) else { return false };
vk.verify_strict(&profile_post_bytes(display_name, bio, avatar_cid, timestamp_ms), &sig).is_ok()
}
/// Verify an ed25519 delete signature: the author's public key signed the post_id. /// Verify an ed25519 delete signature: the author's public key signed the post_id.
pub fn verify_delete_signature(author: &NodeId, post_id: &PostId, signature: &[u8]) -> bool { pub fn verify_delete_signature(author: &NodeId, post_id: &PostId, signature: &[u8]) -> bool {
if signature.len() != 64 { if signature.len() != 64 {
@ -728,37 +606,20 @@ pub fn decrypt_private_reaction(
} }
/// Sign a comment: ed25519 over BLAKE3(author || post_id || content || timestamp_ms). /// Sign a comment: ed25519 over BLAKE3(author || post_id || content || timestamp_ms).
fn comment_digest(
author: &NodeId,
post_id: &PostId,
content: &str,
timestamp_ms: u64,
ref_post_id: Option<&PostId>,
) -> blake3::Hash {
let mut hasher = blake3::Hasher::new_derive_key(COMMENT_SIGN_CONTEXT);
hasher.update(author);
hasher.update(post_id);
hasher.update(content.as_bytes());
hasher.update(&timestamp_ms.to_le_bytes());
// Domain-separated append: `None` yields the same digest as the v0.6.1
// scheme, so plain comments keep verifying; `Some(ref)` adds the ref id.
if let Some(rid) = ref_post_id {
hasher.update(b"ref:");
hasher.update(rid);
}
hasher.finalize()
}
pub fn sign_comment( pub fn sign_comment(
seed: &[u8; 32], seed: &[u8; 32],
author: &NodeId, author: &NodeId,
post_id: &PostId, post_id: &PostId,
content: &str, content: &str,
timestamp_ms: u64, timestamp_ms: u64,
ref_post_id: Option<&PostId>,
) -> Vec<u8> { ) -> Vec<u8> {
let signing_key = SigningKey::from_bytes(seed); let signing_key = SigningKey::from_bytes(seed);
let digest = comment_digest(author, post_id, content, timestamp_ms, ref_post_id); let mut hasher = blake3::Hasher::new_derive_key(COMMENT_SIGN_CONTEXT);
hasher.update(author);
hasher.update(post_id);
hasher.update(content.as_bytes());
hasher.update(&timestamp_ms.to_le_bytes());
let digest = hasher.finalize();
signing_key.sign(digest.as_bytes()).to_bytes().to_vec() signing_key.sign(digest.as_bytes()).to_bytes().to_vec()
} }
@ -769,7 +630,6 @@ pub fn verify_comment_signature(
content: &str, content: &str,
timestamp_ms: u64, timestamp_ms: u64,
signature: &[u8], signature: &[u8],
ref_post_id: Option<&PostId>,
) -> bool { ) -> bool {
let Ok(verifying_key) = VerifyingKey::from_bytes(author) else { let Ok(verifying_key) = VerifyingKey::from_bytes(author) else {
return false; return false;
@ -777,7 +637,12 @@ pub fn verify_comment_signature(
let Ok(sig) = ed25519_dalek::Signature::from_slice(signature) else { let Ok(sig) = ed25519_dalek::Signature::from_slice(signature) else {
return false; return false;
}; };
let digest = comment_digest(author, post_id, content, timestamp_ms, ref_post_id); let mut hasher = blake3::Hasher::new_derive_key(COMMENT_SIGN_CONTEXT);
hasher.update(author);
hasher.update(post_id);
hasher.update(content.as_bytes());
hasher.update(&timestamp_ms.to_le_bytes());
let digest = hasher.finalize();
verifying_key.verify(digest.as_bytes(), &sig).is_ok() verifying_key.verify(digest.as_bytes(), &sig).is_ok()
} }
@ -1012,29 +877,6 @@ mod tests {
} }
} }
#[test]
fn comment_signature_binds_ref_post_id() {
let (seed, nid) = make_keypair(7);
let post_id = [1u8; 32];
let ref_post = [2u8; 32];
let content = "preview";
let ts = 1000u64;
// Signature including ref_post_id.
let sig_with_ref = sign_comment(&seed, &nid, &post_id, content, ts, Some(&ref_post));
// Verifies only when the ref is supplied.
assert!(verify_comment_signature(&nid, &post_id, content, ts, &sig_with_ref, Some(&ref_post)));
// Same signature must NOT verify when the ref is dropped (binding).
assert!(!verify_comment_signature(&nid, &post_id, content, ts, &sig_with_ref, None));
// Nor when the ref is swapped.
let other_ref = [3u8; 32];
assert!(!verify_comment_signature(&nid, &post_id, content, ts, &sig_with_ref, Some(&other_ref)));
// Plain-comment signature still works (backward compat with v0.6.1).
let sig_plain = sign_comment(&seed, &nid, &post_id, content, ts, None);
assert!(verify_comment_signature(&nid, &post_id, content, ts, &sig_plain, None));
}
#[test] #[test]
fn test_sign_verify_manifest() { fn test_sign_verify_manifest() {
use crate::types::{AuthorManifest, ManifestEntry}; use crate::types::{AuthorManifest, ManifestEntry};

View file

@ -1,296 +0,0 @@
//! Group-key distribution as an encrypted post.
//!
//! v0.6.2 replaces the v0.6.1 `GroupKeyDistribute` wire push (admin →
//! member, uni-stream) with a standard public post that carries the group
//! seed inside `PostVisibility::Encrypted`. Each member is a recipient; the
//! post's CEK is wrapped per member using the admin's posting key. Members
//! receive the post via normal CDN / pull paths, decrypt with their posting
//! secret, and recover the seed + metadata.
//!
//! Removing the direct push eliminates the wire-level signal that a given
//! network endpoint is coordinating group membership with another specific
//! endpoint.
//!
//! Note: Members are identified by their **posting** NodeIds (the
//! author/recipient namespace since the v0.6.1 identity split), not network
//! NodeIds. The admin wraps the CEK using their default_posting_secret; the
//! receiver unwraps using one of their posting identity secrets.
use crate::content::compute_post_id;
use crate::crypto;
use crate::storage::Storage;
use crate::types::{
GroupKeyDistributionContent, GroupKeyRecord, GroupMemberKey, NodeId, Post, PostId,
PostVisibility, PostingIdentity, VisibilityIntent,
};
/// Build an encrypted key-distribution post. Authored by the admin's
/// posting identity; recipients are the member posting NodeIds. Returns
/// `(PostId, Post, PostVisibility)` — caller stores with intent=
/// `GroupKeyDistribute` and propagates via the normal neighbor-manifest CDN
/// path.
pub fn build_distribution_post(
admin: &NodeId,
admin_secret: &[u8; 32],
record: &GroupKeyRecord,
group_seed: &[u8; 32],
members: &[NodeId],
) -> anyhow::Result<(PostId, Post, PostVisibility)> {
let content = GroupKeyDistributionContent {
group_id: record.group_id,
circle_name: record.circle_name.clone(),
epoch: record.epoch,
group_public_key: record.group_public_key,
admin: *admin,
canonical_root_post_id: record.canonical_root_post_id,
group_seed: *group_seed,
};
let plaintext = serde_json::to_string(&content)?;
// Wrap the CEK to each member (their posting pubkey).
let (ciphertext_b64, wrapped_keys) =
crypto::encrypt_post(&plaintext, admin_secret, admin, members)?;
let timestamp_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0);
let post = Post {
author: *admin,
content: ciphertext_b64,
attachments: vec![],
timestamp_ms,
};
let post_id = compute_post_id(&post);
let visibility = PostVisibility::Encrypted { recipients: wrapped_keys };
Ok((post_id, post, visibility))
}
/// Attempt to decrypt + apply a stored GroupKeyDistribute post using each
/// posting identity's secret in turn. Returns `Ok(true)` on successful
/// apply, `Ok(false)` if none of our personas were recipients (or content
/// was malformed, or the seed had already been stored), `Err` on hard
/// errors during storage.
pub fn try_apply_distribution_post(
s: &Storage,
post: &Post,
visibility: &PostVisibility,
our_personas: &[PostingIdentity],
) -> anyhow::Result<bool> {
let wrapped_keys = match visibility {
PostVisibility::Encrypted { recipients } => recipients,
_ => return Ok(false), // Only Encrypted posts can carry seeds.
};
for persona in our_personas {
match crypto::decrypt_post(
&post.content,
&persona.secret_seed,
&persona.node_id,
&post.author,
wrapped_keys,
) {
Ok(Some(plaintext)) => {
let content: GroupKeyDistributionContent = match serde_json::from_str(&plaintext) {
Ok(c) => c,
Err(_) => continue, // Bad payload — try next persona.
};
// Critical: the `admin` claimed inside the decrypted
// payload must match the post author. Without this, any
// peer who knows a member's posting id and the group's
// group_id could craft an encrypted post claiming to be
// from the admin and overwrite the member's stored group
// key (create_group_key uses INSERT OR REPLACE).
if content.admin != post.author {
tracing::warn!(
post_author = hex::encode(post.author),
claimed_admin = hex::encode(content.admin),
group_id = hex::encode(content.group_id),
"rejecting group-key-distribution post: claimed admin != post author"
);
continue;
}
apply_content(s, &content)?;
return Ok(true);
}
Ok(None) | Err(_) => continue,
}
}
Ok(false)
}
fn apply_content(s: &Storage, content: &GroupKeyDistributionContent) -> anyhow::Result<()> {
let record = GroupKeyRecord {
group_id: content.group_id,
circle_name: content.circle_name.clone(),
epoch: content.epoch,
group_public_key: content.group_public_key,
admin: content.admin,
created_at: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0),
canonical_root_post_id: content.canonical_root_post_id,
};
s.create_group_key(&record, Some(&content.group_seed))?;
s.store_group_seed(&content.group_id, content.epoch, &content.group_seed)?;
Ok(())
}
/// Scan stored posts with `VisibilityIntent::GroupKeyDistribute` and apply
/// any that one of our posting identities can decrypt. Intended to run
/// after a pull-sync so newly-received distribution posts take effect
/// immediately.
pub fn process_pending(
s: &Storage,
our_personas: &[PostingIdentity],
) -> anyhow::Result<usize> {
// Cheap scan: iterate all posts, filter by intent. The table is small
// in practice (few groups × few epochs).
let all = s.list_posts_with_visibility()?;
let mut applied = 0;
for (id, post, visibility) in all {
let intent = s.get_post_intent(&id)?;
if !matches!(intent, Some(VisibilityIntent::GroupKeyDistribute)) {
continue;
}
if try_apply_distribution_post(s, &post, &visibility, our_personas)? {
applied += 1;
}
}
Ok(applied)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::Storage;
use ed25519_dalek::SigningKey;
fn temp_storage() -> Storage {
Storage::open(":memory:").unwrap()
}
fn make_keypair(seed_byte: u8) -> ([u8; 32], NodeId) {
let seed = [seed_byte; 32];
let signing_key = SigningKey::from_bytes(&seed);
let public = signing_key.verifying_key();
(seed, *public.as_bytes())
}
fn mk_persona(seed: [u8; 32], node_id: NodeId) -> PostingIdentity {
PostingIdentity {
node_id,
secret_seed: seed,
display_name: String::new(),
created_at: 0,
}
}
#[test]
fn forged_admin_is_rejected() {
// Scenario: an attacker knows the victim's posting pubkey and the
// target group_id. They craft an encrypted distribution post
// addressed to the victim, claiming themselves as the group admin.
// Without the author-vs-admin check the victim would overwrite
// their legitimate group key record.
let s = temp_storage();
let (real_admin_sec, real_admin_id) = make_keypair(1);
let (attacker_sec, attacker_id) = make_keypair(9);
let (victim_sec, victim_id) = make_keypair(2);
// Seed the victim with a legitimate group record so we can
// verify it isn't overwritten by the forgery.
let group_id = [77u8; 32];
let real_pubkey = [1u8; 32];
let real_seed = [42u8; 32];
let real_record = GroupKeyRecord {
group_id,
circle_name: "real".to_string(),
epoch: 1,
group_public_key: real_pubkey,
admin: real_admin_id,
created_at: 100,
canonical_root_post_id: None,
};
let (_, real_post, real_vis) = build_distribution_post(
&real_admin_id, &real_admin_sec, &real_record, &real_seed, &[victim_id],
).unwrap();
let victim_personas = vec![mk_persona(victim_sec, victim_id)];
assert!(try_apply_distribution_post(&s, &real_post, &real_vis, &victim_personas).unwrap());
assert_eq!(s.get_group_key(&group_id).unwrap().unwrap().admin, real_admin_id);
// Attacker authors a forgery: post.author is attacker, but the
// inner `admin` field claims to be the real admin.
let forged_content = GroupKeyDistributionContent {
group_id,
circle_name: "real".to_string(),
epoch: 2,
group_public_key: [255u8; 32],
admin: real_admin_id, // lies inside the encrypted payload
canonical_root_post_id: None,
group_seed: [0xFFu8; 32],
};
let plaintext = serde_json::to_string(&forged_content).unwrap();
let (ciphertext, wrapped) = crate::crypto::encrypt_post(
&plaintext, &attacker_sec, &attacker_id, &[victim_id],
).unwrap();
let forged_post = Post {
author: attacker_id, // real author — attacker, not admin
content: ciphertext,
attachments: vec![],
timestamp_ms: 200,
};
let forged_vis = PostVisibility::Encrypted { recipients: wrapped };
let applied = try_apply_distribution_post(&s, &forged_post, &forged_vis, &victim_personas).unwrap();
assert!(!applied, "forged distribution post must not be applied");
// Legitimate group key must be untouched.
let stored = s.get_group_key(&group_id).unwrap().unwrap();
assert_eq!(stored.admin, real_admin_id);
assert_eq!(stored.group_public_key, real_pubkey);
}
#[test]
fn member_decrypts_and_applies() {
let s = temp_storage();
let (admin_sec, admin_id) = make_keypair(1);
let (member_sec, member_id) = make_keypair(2);
let (nonmember_sec, nonmember_id) = make_keypair(3);
let group_id = [42u8; 32];
let group_pubkey = [7u8; 32];
let group_seed = [9u8; 32];
let record = GroupKeyRecord {
group_id,
circle_name: "fam".to_string(),
epoch: 1,
group_public_key: group_pubkey,
admin: admin_id,
created_at: 100,
canonical_root_post_id: None,
};
let (_pid, post, visibility) = build_distribution_post(
&admin_id, &admin_sec, &record, &group_seed, &[member_id],
).unwrap();
// Member applies successfully.
let member_personas = vec![mk_persona(member_sec, member_id)];
let applied = try_apply_distribution_post(&s, &post, &visibility, &member_personas).unwrap();
assert!(applied);
let stored = s.get_group_key(&group_id).unwrap().unwrap();
assert_eq!(stored.circle_name, "fam");
let seed = s.get_group_seed(&group_id, 1).unwrap().unwrap();
assert_eq!(seed, group_seed);
// Non-member can't.
let s2 = temp_storage();
let nonmember_personas = vec![mk_persona(nonmember_sec, nonmember_id)];
let applied2 = try_apply_distribution_post(&s2, &post, &visibility, &nonmember_personas).unwrap();
assert!(!applied2);
assert!(s2.get_group_key(&group_id).unwrap().is_none());
}
}

View file

@ -2,16 +2,13 @@ pub mod activity;
pub mod blob; pub mod blob;
pub mod connection; pub mod connection;
pub mod content; pub mod content;
pub mod control;
pub mod crypto; pub mod crypto;
pub mod group_key_distribution;
pub mod http; pub mod http;
pub mod export; pub mod export;
pub mod identity; pub mod identity;
pub mod import; pub mod import;
pub mod network; pub mod network;
pub mod node; pub mod node;
pub mod profile;
pub mod protocol; pub mod protocol;
pub mod storage; pub mod storage;
pub mod stun; pub mod stun;

View file

@ -12,14 +12,15 @@ use crate::blob::BlobStore;
use crate::connection::{initial_exchange_accept, initial_exchange_connect, ConnHandle, ConnectionActor, ConnectionManager, ExchangeResult}; use crate::connection::{initial_exchange_accept, initial_exchange_connect, ConnHandle, ConnectionActor, ConnectionManager, ExchangeResult};
use crate::content::verify_post_id; use crate::content::verify_post_id;
use crate::protocol::{ use crate::protocol::{
read_message_type, read_payload, write_typed_message, BlobRequestPayload, BlobResponsePayload, read_message_type, read_payload, write_typed_message, AudienceRequestPayload,
MessageType, ProfileUpdatePayload, AudienceResponsePayload, BlobRequestPayload, BlobResponsePayload, DeleteRecordPayload,
MessageType, PostNotificationPayload, PostPushPayload, ProfileUpdatePayload,
PullSyncRequestPayload, PullSyncResponsePayload, RefuseRedirectPayload, PullSyncRequestPayload, PullSyncResponsePayload, RefuseRedirectPayload,
ALPN_V2, SocialAddressUpdatePayload, SocialDisconnectNoticePayload, SyncPost, ALPN_V2,
}; };
use crate::storage::StoragePool; use crate::storage::StoragePool;
use crate::types::{ use crate::types::{
DeviceProfile, DeviceRole, NodeId, PeerSlotKind, Post, PostId, DeleteRecord, DeviceProfile, DeviceRole, NodeId, PeerSlotKind, PeerWithAddress, Post, PostId,
PostVisibility, PublicProfile, SessionReachMethod, WormResult, PostVisibility, PublicProfile, SessionReachMethod, WormResult,
}; };
@ -892,7 +893,16 @@ impl Network {
Ok(sent) Ok(sent)
} }
/// Push a profile update to all audience members (ephemeral-capable). /// Send a post notification to all audience members (ephemeral-capable).
pub async fn notify_post(&self, post_id: &crate::types::PostId, author: &NodeId) -> usize {
let payload = PostNotificationPayload {
post_id: *post_id,
author: *author,
};
self.send_to_audience(MessageType::PostNotification, &payload).await
}
/// Push a profile update to all audience members (ephemeral-capable).
pub async fn push_profile(&self, profile: &PublicProfile) -> usize { pub async fn push_profile(&self, profile: &PublicProfile) -> usize {
// v0.6.1: profiles broadcast on the wire are keyed by the network // v0.6.1: profiles broadcast on the wire are keyed by the network
// NodeId. They carry ONLY routing metadata (anchors, recent_peers, // NodeId. They carry ONLY routing metadata (anchors, recent_peers,
@ -949,7 +959,38 @@ impl Network {
sent sent
} }
/// Push a visibility update to all connected peers. /// Push a delete record to all audience members (ephemeral-capable).
pub async fn push_delete(&self, record: &DeleteRecord) -> usize {
let payload = DeleteRecordPayload {
records: vec![record.clone()],
};
self.send_to_audience(MessageType::DeleteRecord, &payload).await
}
/// Push a disconnect notice to all audience members (ephemeral-capable).
pub async fn push_disconnect_to_audience(&self, disconnected_peer: &NodeId) -> usize {
let payload = SocialDisconnectNoticePayload {
node_id: *disconnected_peer,
};
self.send_to_audience(MessageType::SocialDisconnectNotice, &payload).await
}
/// Push a social address update to all audience members (ephemeral-capable).
pub async fn push_address_update_to_audience(
&self,
node_id: &NodeId,
addresses: &[String],
peer_addresses: &[PeerWithAddress],
) -> usize {
let payload = SocialAddressUpdatePayload {
node_id: *node_id,
addresses: addresses.to_vec(),
peer_addresses: peer_addresses.to_vec(),
};
self.send_to_audience(MessageType::SocialAddressUpdate, &payload).await
}
/// Push a visibility update to all connected peers.
/// Gets connections snapshot, sends I/O outside the lock. /// Gets connections snapshot, sends I/O outside the lock.
pub async fn push_visibility(&self, update: &crate::types::VisibilityUpdate) -> usize { pub async fn push_visibility(&self, update: &crate::types::VisibilityUpdate) -> usize {
use crate::protocol::{VisibilityUpdatePayload, write_typed_message, MessageType}; use crate::protocol::{VisibilityUpdatePayload, write_typed_message, MessageType};
@ -1008,7 +1049,29 @@ impl Network {
sent sent
} }
/// Request a manifest refresh from the upstream peer for a blob CID. /// Send blob delete notices to all known holders of a file.
/// Second argument kept as Option for signature stability; flat-holder
/// model doesn't need separate upstream handling.
pub async fn send_blob_delete_notices(
&self,
cid: &[u8; 32],
holders: &[(NodeId, Vec<String>)],
_legacy_upstream: Option<&(NodeId, Vec<String>)>,
) -> usize {
let payload = crate::protocol::BlobDeleteNoticePayload {
cid: *cid,
upstream_node: None,
};
let mut sent = 0;
for (peer, _addrs) in holders {
if self.send_to_peer_uni(peer, MessageType::BlobDeleteNotice, &payload).await.is_ok() {
sent += 1;
}
}
sent
}
/// Request a manifest refresh from the upstream peer for a blob CID.
/// Returns the updated manifest if the upstream has a newer version. /// Returns the updated manifest if the upstream has a newer version.
pub async fn request_manifest_refresh( pub async fn request_manifest_refresh(
&self, &self,
@ -1033,7 +1096,71 @@ impl Network {
} }
} }
/// Send a social checkin to a peer (persistent if available, ephemeral otherwise). /// Send an audience request to a peer (persistent if available, ephemeral otherwise).
pub async fn send_audience_request(&self, target: &NodeId) -> anyhow::Result<()> {
let payload = AudienceRequestPayload {
requester: self.our_node_id,
};
self.send_to_peer_uni(target, MessageType::AudienceRequest, &payload).await
}
/// Send an audience response to a peer (persistent if available, ephemeral otherwise).
pub async fn send_audience_response(&self, target: &NodeId, approved: bool) -> anyhow::Result<()> {
let payload = AudienceResponsePayload {
responder: self.our_node_id,
approved,
};
self.send_to_peer_uni(target, MessageType::AudienceResponse, &payload).await
}
/// Push a public post to audience members (persistent if available, ephemeral otherwise).
pub async fn push_to_audience(
&self,
post_id: &crate::types::PostId,
post: &Post,
visibility: &PostVisibility,
) -> usize {
if !matches!(visibility, PostVisibility::Public) {
return 0;
}
let audience_members: Vec<NodeId> = {
match self.storage.get().await.list_audience_members() {
Ok(m) => m,
Err(_) => return 0,
}
};
let payload = PostPushPayload {
post: SyncPost {
id: *post_id,
post: post.clone(),
visibility: visibility.clone(),
},
};
let mut pushed = 0;
for member in &audience_members {
if self.send_to_peer_uni(member, MessageType::PostPush, &payload).await.is_ok() {
pushed += 1;
}
}
pushed
}
/// Push a group key to a specific peer (uni-stream).
pub async fn push_group_key(
&self,
peer: &NodeId,
payload: &crate::protocol::GroupKeyDistributePayload,
) -> bool {
self.send_to_peer_uni(peer, MessageType::GroupKeyDistribute, payload)
.await
.is_ok()
}
/// Send a social checkin to a peer (persistent if available, ephemeral otherwise).
pub async fn send_social_checkin( pub async fn send_social_checkin(
&self, &self,
peer_id: &NodeId, peer_id: &NodeId,
@ -1566,26 +1693,37 @@ impl Network {
} }
} }
/// Pull posts from a peer (persistent if available, ephemeral otherwise). // ---- Audience-targeted + ephemeral helpers ----
/// Send a uni-stream message to all audience members (persistent if available, ephemeral otherwise).
async fn send_to_audience<T: Serialize>(&self, msg_type: MessageType, payload: &T) -> usize {
let audience: Vec<NodeId> = match self.storage.get().await.list_audience_members() {
Ok(m) => m,
Err(_) => return 0,
};
let mut sent = 0;
for member in &audience {
if self.send_to_peer_uni(member, msg_type, payload).await.is_ok() {
sent += 1;
}
}
sent
}
/// Pull posts from a peer (persistent if available, ephemeral otherwise).
pub async fn pull_from_peer(&self, peer_id: &NodeId) -> anyhow::Result<PullStats> { pub async fn pull_from_peer(&self, peer_id: &NodeId) -> anyhow::Result<PullStats> {
let conn = self.get_connection(peer_id).await?; let conn = self.get_connection(peer_id).await?;
let (our_follows, follows_sync, our_personas) = { let (our_follows, follows_sync) = {
let storage = self.storage.get().await; let storage = self.storage.get().await;
( (
storage.list_follows()?, storage.list_follows()?,
storage.get_follows_with_last_sync().unwrap_or_default(), storage.get_follows_with_last_sync().unwrap_or_default(),
storage.list_posting_identities().unwrap_or_default(),
) )
}; };
// Merged pull: include every posting identity we hold so DMs addressed // Merged pull: include our own NodeId so DMs addressed to us match.
// to any of our personas match on recipient. Our network NodeId is
// never an author nor a wrapped_key recipient — including it would
// never match and would leak the network↔posting boundary.
let mut query_list = our_follows; let mut query_list = our_follows;
for pi in &our_personas { if !query_list.contains(&self.our_node_id) {
if !query_list.contains(&pi.node_id) { query_list.push(self.our_node_id);
query_list.push(pi.node_id);
}
} }
let (mut send, mut recv) = conn.open_bi().await?; let (mut send, mut recv) = conn.open_bi().await?;
write_typed_message( write_typed_message(

File diff suppressed because it is too large Load diff

View file

@ -1,181 +0,0 @@
//! Profile posts: persona display metadata (display_name, bio, avatar_cid)
//! carried as a signed public post with `VisibilityIntent::Profile`.
//!
//! The post's `author` is the posting identity; the signature inside
//! `ProfilePostContent` is by that identity's secret. Profile posts propagate
//! via the normal CDN path (pull + header-diff). Receivers verify the
//! signature, then upsert a row in the `profiles` table keyed by the post's
//! author (= posting identity) with the new display fields.
//!
//! Profile posts are never rendered in feeds — the feed filter excludes
//! `VisibilityIntent::Profile` posts (see `Storage::get_feed*`).
use crate::crypto;
use crate::storage::Storage;
use crate::types::{NodeId, Post, PostId, PostVisibility, ProfilePostContent, PublicProfile, VisibilityIntent};
/// Verify a profile-post signature without any other side effects. Used by
/// receive paths before storing, so bogus profile posts with invalid
/// signatures never enter storage and can't be re-propagated.
pub fn verify_profile_post(post: &Post) -> anyhow::Result<ProfilePostContent> {
let content: ProfilePostContent = serde_json::from_str(&post.content)
.map_err(|e| anyhow::anyhow!("profile post content is not a valid ProfilePostContent: {}", e))?;
if !crypto::verify_profile(
&post.author,
&content.display_name,
&content.bio,
&content.avatar_cid,
content.timestamp_ms,
&content.signature,
) {
anyhow::bail!("invalid profile-post signature");
}
Ok(content)
}
/// If the post is a Profile post, verify + apply by upserting the
/// `profiles` row keyed by the post's author (= posting identity). Only
/// applied if newer than the existing row's `updated_at`.
pub fn apply_profile_post_if_applicable(
s: &Storage,
post: &Post,
intent: Option<&VisibilityIntent>,
) -> anyhow::Result<()> {
if !matches!(intent, Some(VisibilityIntent::Profile)) {
return Ok(());
}
let content = verify_profile_post(post)?;
// Only apply if newer than the stored row (last-writer-wins by timestamp).
if let Some(existing) = s.get_profile(&post.author)? {
if existing.updated_at >= content.timestamp_ms {
return Ok(());
}
}
let profile = PublicProfile {
node_id: post.author,
display_name: content.display_name,
bio: content.bio,
updated_at: content.timestamp_ms,
anchors: vec![],
recent_peers: vec![],
preferred_peers: vec![],
public_visible: true,
avatar_cid: content.avatar_cid,
};
s.store_profile(&profile)?;
Ok(())
}
/// Build a Profile post signed by the posting identity. Caller is
/// responsible for storing and propagating it.
pub fn build_profile_post(
author: &NodeId,
author_secret: &[u8; 32],
display_name: &str,
bio: &str,
avatar_cid: Option<[u8; 32]>,
) -> Post {
let timestamp_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0);
let signature = crypto::sign_profile(author_secret, display_name, bio, &avatar_cid, timestamp_ms);
let content = ProfilePostContent {
display_name: display_name.to_string(),
bio: bio.to_string(),
avatar_cid,
timestamp_ms,
signature,
};
Post {
author: *author,
content: serde_json::to_string(&content).unwrap_or_default(),
attachments: vec![],
timestamp_ms,
}
}
/// Profile-post visibility is always Public on the wire: the signature binds
/// the content to the posting identity and no recipient targeting is needed.
pub fn profile_post_visibility() -> PostVisibility {
PostVisibility::Public
}
/// Compute the `PostId` for a freshly-built profile post.
pub fn profile_post_id(post: &Post) -> PostId {
crate::content::compute_post_id(post)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::Storage;
use ed25519_dalek::SigningKey;
fn temp_storage() -> Storage {
Storage::open(":memory:").unwrap()
}
fn make_keypair(seed_byte: u8) -> ([u8; 32], NodeId) {
let seed = [seed_byte; 32];
let signing_key = SigningKey::from_bytes(&seed);
let public = signing_key.verifying_key();
(seed, *public.as_bytes())
}
#[test]
fn profile_roundtrip_verifies_and_stores() {
let s = temp_storage();
let (sec, pub_id) = make_keypair(11);
let post = build_profile_post(&pub_id, &sec, "Alice", "hello world", None);
apply_profile_post_if_applicable(&s, &post, Some(&VisibilityIntent::Profile)).unwrap();
let stored = s.get_profile(&pub_id).unwrap().expect("profile stored");
assert_eq!(stored.display_name, "Alice");
assert_eq!(stored.bio, "hello world");
}
#[test]
fn profile_rejects_wrong_author_signature() {
let s = temp_storage();
let (_sec_a, pub_a) = make_keypair(1);
let (sec_b, _pub_b) = make_keypair(2);
// Build a post claiming `pub_a` but signing with `sec_b`.
let post = build_profile_post(&pub_a, &sec_b, "Impostor", "", None);
let res = apply_profile_post_if_applicable(&s, &post, Some(&VisibilityIntent::Profile));
assert!(res.is_err());
assert!(s.get_profile(&pub_a).unwrap().is_none());
}
#[test]
fn profile_ignores_older_timestamp() {
let s = temp_storage();
let (sec, pub_id) = make_keypair(3);
// Seed with a newer profile.
let mut newer = build_profile_post(&pub_id, &sec, "NewName", "", None);
// Hack the timestamp to make it clearly newer.
let mut content: ProfilePostContent = serde_json::from_str(&newer.content).unwrap();
content.timestamp_ms = 10_000;
content.signature = crypto::sign_profile(&sec, &content.display_name, &content.bio, &content.avatar_cid, content.timestamp_ms);
newer.content = serde_json::to_string(&content).unwrap();
newer.timestamp_ms = 10_000;
apply_profile_post_if_applicable(&s, &newer, Some(&VisibilityIntent::Profile)).unwrap();
// Apply an older profile — should be ignored.
let mut older = build_profile_post(&pub_id, &sec, "OldName", "", None);
let mut content_o: ProfilePostContent = serde_json::from_str(&older.content).unwrap();
content_o.timestamp_ms = 5_000;
content_o.signature = crypto::sign_profile(&sec, &content_o.display_name, &content_o.bio, &content_o.avatar_cid, content_o.timestamp_ms);
older.content = serde_json::to_string(&content_o).unwrap();
older.timestamp_ms = 5_000;
apply_profile_post_if_applicable(&s, &older, Some(&VisibilityIntent::Profile)).unwrap();
let stored = s.get_profile(&pub_id).unwrap().unwrap();
assert_eq!(stored.display_name, "NewName");
}
}

View file

@ -14,11 +14,6 @@ pub struct SyncPost {
pub id: PostId, pub id: PostId,
pub post: Post, pub post: Post,
pub visibility: PostVisibility, pub visibility: PostVisibility,
/// Optional originator's intent, so receivers can filter control posts
/// out of the feed and process their ControlOp payload. Absent on
/// pre-v0.6.2 senders; receivers treat as "unknown"/regular post.
#[serde(default)]
pub intent: Option<crate::types::VisibilityIntent>,
} }
/// Message type byte for stream multiplexing /// Message type byte for stream multiplexing
@ -32,9 +27,10 @@ pub enum MessageType {
RefuseRedirect = 0x05, RefuseRedirect = 0x05,
PullSyncRequest = 0x40, PullSyncRequest = 0x40,
PullSyncResponse = 0x41, PullSyncResponse = 0x41,
// 0x42 (PostNotification), 0x43 (PostPush), 0x44 (AudienceRequest), PostNotification = 0x42,
// 0x45 (AudienceResponse) retired in v0.6.2: persona-signed direct pushes PostPush = 0x43,
// are gone. Public posts propagate via the CDN; encrypted posts via pull. AudienceRequest = 0x44,
AudienceResponse = 0x45,
ProfileUpdate = 0x50, ProfileUpdate = 0x50,
DeleteRecord = 0x51, DeleteRecord = 0x51,
VisibilityUpdate = 0x52, VisibilityUpdate = 0x52,
@ -49,9 +45,8 @@ pub enum MessageType {
ManifestRefreshRequest = 0x92, ManifestRefreshRequest = 0x92,
ManifestRefreshResponse = 0x93, ManifestRefreshResponse = 0x93,
ManifestPush = 0x94, ManifestPush = 0x94,
// 0x95 (BlobDeleteNotice) retired in v0.6.2 — remote holders evict via LRU. BlobDeleteNotice = 0x95,
// 0xA0 (GroupKeyDistribute) retired in v0.6.2 — group seeds now travel GroupKeyDistribute = 0xA0,
// as encrypted posts via the CDN. See `group_key_distribution` module.
GroupKeyRequest = 0xA1, GroupKeyRequest = 0xA1,
GroupKeyResponse = 0xA2, GroupKeyResponse = 0xA2,
RelayIntroduce = 0xB0, RelayIntroduce = 0xB0,
@ -90,6 +85,10 @@ impl MessageType {
0x05 => Some(Self::RefuseRedirect), 0x05 => Some(Self::RefuseRedirect),
0x40 => Some(Self::PullSyncRequest), 0x40 => Some(Self::PullSyncRequest),
0x41 => Some(Self::PullSyncResponse), 0x41 => Some(Self::PullSyncResponse),
0x42 => Some(Self::PostNotification),
0x43 => Some(Self::PostPush),
0x44 => Some(Self::AudienceRequest),
0x45 => Some(Self::AudienceResponse),
0x50 => Some(Self::ProfileUpdate), 0x50 => Some(Self::ProfileUpdate),
0x51 => Some(Self::DeleteRecord), 0x51 => Some(Self::DeleteRecord),
0x52 => Some(Self::VisibilityUpdate), 0x52 => Some(Self::VisibilityUpdate),
@ -103,6 +102,8 @@ impl MessageType {
0x92 => Some(Self::ManifestRefreshRequest), 0x92 => Some(Self::ManifestRefreshRequest),
0x93 => Some(Self::ManifestRefreshResponse), 0x93 => Some(Self::ManifestRefreshResponse),
0x94 => Some(Self::ManifestPush), 0x94 => Some(Self::ManifestPush),
0x95 => Some(Self::BlobDeleteNotice),
0xA0 => Some(Self::GroupKeyDistribute),
0xA1 => Some(Self::GroupKeyRequest), 0xA1 => Some(Self::GroupKeyRequest),
0xA2 => Some(Self::GroupKeyResponse), 0xA2 => Some(Self::GroupKeyResponse),
0xB0 => Some(Self::RelayIntroduce), 0xB0 => Some(Self::RelayIntroduce),
@ -236,6 +237,32 @@ pub struct VisibilityUpdatePayload {
pub updates: Vec<VisibilityUpdate>, pub updates: Vec<VisibilityUpdate>,
} }
/// Post notification: lightweight push when a new post is created
#[derive(Debug, Serialize, Deserialize)]
pub struct PostNotificationPayload {
pub post_id: PostId,
pub author: NodeId,
}
/// Audience request: ask a peer to join their audience
#[derive(Debug, Serialize, Deserialize)]
pub struct AudienceRequestPayload {
pub requester: NodeId,
}
/// Audience response: approve or deny an audience request
#[derive(Debug, Serialize, Deserialize)]
pub struct AudienceResponsePayload {
pub responder: NodeId,
pub approved: bool,
}
/// Post push: full post content pushed directly to a recipient
#[derive(Debug, Serialize, Deserialize)]
pub struct PostPushPayload {
pub post: SyncPost,
}
/// Address resolution request (bi-stream: ask reporter for a hop-2 peer's address) /// Address resolution request (bi-stream: ask reporter for a hop-2 peer's address)
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct AddressRequestPayload { pub struct AddressRequestPayload {
@ -384,11 +411,27 @@ pub struct ManifestPushEntry {
pub manifest: CdnManifest, pub manifest: CdnManifest,
} }
/// Notify upstream/downstream that a blob has been deleted (uni-stream)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BlobDeleteNoticePayload {
pub cid: [u8; 32],
/// If sender was upstream and is providing their own upstream for tree healing
#[serde(default)]
pub upstream_node: Option<PeerWithAddress>,
}
// --- Group key distribution payloads --- // --- Group key distribution payloads ---
// GroupKeyDistributePayload (v0.6.1) retired: group seeds now travel as /// Admin pushes wrapped group key to a member (uni-stream)
// encrypted posts (`VisibilityIntent::GroupKeyDistribute`). See #[derive(Debug, Serialize, Deserialize)]
// `crate::group_key_distribution` and `types::GroupKeyDistributionContent`. pub struct GroupKeyDistributePayload {
pub group_id: GroupId,
pub circle_name: String,
pub epoch: GroupEpoch,
pub group_public_key: [u8; 32],
pub admin: NodeId,
pub member_keys: Vec<GroupMemberKey>,
}
/// Member requests current group key (bi-stream request) /// Member requests current group key (bi-stream request)
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
@ -732,6 +775,10 @@ mod tests {
MessageType::RefuseRedirect, MessageType::RefuseRedirect,
MessageType::PullSyncRequest, MessageType::PullSyncRequest,
MessageType::PullSyncResponse, MessageType::PullSyncResponse,
MessageType::PostNotification,
MessageType::PostPush,
MessageType::AudienceRequest,
MessageType::AudienceResponse,
MessageType::ProfileUpdate, MessageType::ProfileUpdate,
MessageType::DeleteRecord, MessageType::DeleteRecord,
MessageType::VisibilityUpdate, MessageType::VisibilityUpdate,
@ -745,6 +792,8 @@ mod tests {
MessageType::ManifestRefreshRequest, MessageType::ManifestRefreshRequest,
MessageType::ManifestRefreshResponse, MessageType::ManifestRefreshResponse,
MessageType::ManifestPush, MessageType::ManifestPush,
MessageType::BlobDeleteNotice,
MessageType::GroupKeyDistribute,
MessageType::GroupKeyRequest, MessageType::GroupKeyRequest,
MessageType::GroupKeyResponse, MessageType::GroupKeyResponse,
MessageType::RelayIntroduce, MessageType::RelayIntroduce,
@ -786,6 +835,36 @@ mod tests {
assert!(MessageType::from_byte(0x06).is_none()); assert!(MessageType::from_byte(0x06).is_none());
} }
#[test]
fn blob_delete_notice_payload_roundtrip() {
use crate::types::PeerWithAddress;
// Without upstream
let payload = BlobDeleteNoticePayload {
cid: [42u8; 32],
upstream_node: None,
};
let json = serde_json::to_string(&payload).unwrap();
let decoded: BlobDeleteNoticePayload = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.cid, [42u8; 32]);
assert!(decoded.upstream_node.is_none());
// With upstream
let payload_with_up = BlobDeleteNoticePayload {
cid: [99u8; 32],
upstream_node: Some(PeerWithAddress {
n: hex::encode([1u8; 32]),
a: vec!["10.0.0.1:4433".to_string()],
}),
};
let json2 = serde_json::to_string(&payload_with_up).unwrap();
let decoded2: BlobDeleteNoticePayload = serde_json::from_str(&json2).unwrap();
assert_eq!(decoded2.cid, [99u8; 32]);
assert!(decoded2.upstream_node.is_some());
let up = decoded2.upstream_node.unwrap();
assert_eq!(up.a, vec!["10.0.0.1:4433".to_string()]);
}
#[test] #[test]
fn relay_introduce_payload_roundtrip() { fn relay_introduce_payload_roundtrip() {
let payload = RelayIntroducePayload { let payload = RelayIntroducePayload {

View file

@ -4,7 +4,7 @@ use std::path::Path;
use rusqlite::{params, Connection}; use rusqlite::{params, Connection};
use crate::types::{ use crate::types::{
Attachment, Circle, CircleProfile, Attachment, AudienceDirection, AudienceRecord, AudienceStatus, Circle, CircleProfile,
CommentPolicy, DeleteRecord, FollowVisibility, GossipPeerInfo, GroupEpoch, GroupId, CommentPolicy, DeleteRecord, FollowVisibility, GossipPeerInfo, GroupEpoch, GroupId,
GroupKeyRecord, GroupMemberKey, InlineComment, ManifestEntry, NodeId, PeerRecord, GroupKeyRecord, GroupMemberKey, InlineComment, ManifestEntry, NodeId, PeerRecord,
PeerSlotKind, PeerWithAddress, Post, PostId, PostVisibility, PostingIdentity, PeerSlotKind, PeerWithAddress, Post, PostId, PostVisibility, PostingIdentity,
@ -212,8 +212,14 @@ impl Storage {
PRIMARY KEY (peer_id, neighbor_id) PRIMARY KEY (peer_id, neighbor_id)
); );
CREATE INDEX IF NOT EXISTS idx_peer_neighbors_neighbor ON peer_neighbors(neighbor_id); CREATE INDEX IF NOT EXISTS idx_peer_neighbors_neighbor ON peer_neighbors(neighbor_id);
-- v0.6.2: audience table removed. Upgraded DBs still have the CREATE TABLE IF NOT EXISTS audience (
-- orphan table; it's untouched by new code. New DBs don't get it. node_id BLOB NOT NULL,
direction TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'pending',
requested_at INTEGER NOT NULL,
approved_at INTEGER,
PRIMARY KEY (node_id, direction)
);
CREATE TABLE IF NOT EXISTS worm_cooldowns ( CREATE TABLE IF NOT EXISTS worm_cooldowns (
target_id BLOB PRIMARY KEY, target_id BLOB PRIMARY KEY,
failed_at INTEGER NOT NULL failed_at INTEGER NOT NULL
@ -283,11 +289,9 @@ impl Storage {
group_public_key BLOB NOT NULL, group_public_key BLOB NOT NULL,
group_seed BLOB, group_seed BLOB,
admin BLOB NOT NULL, admin BLOB NOT NULL,
created_at INTEGER NOT NULL, created_at INTEGER NOT NULL
canonical_root_post_id BLOB
); );
CREATE INDEX IF NOT EXISTS idx_group_keys_circle ON group_keys(circle_name); CREATE INDEX IF NOT EXISTS idx_group_keys_circle ON group_keys(circle_name);
CREATE INDEX IF NOT EXISTS idx_group_keys_root ON group_keys(canonical_root_post_id);
CREATE TABLE IF NOT EXISTS group_member_keys ( CREATE TABLE IF NOT EXISTS group_member_keys (
group_id BLOB NOT NULL, group_id BLOB NOT NULL,
member BLOB NOT NULL, member BLOB NOT NULL,
@ -349,7 +353,6 @@ impl Storage {
content TEXT NOT NULL, content TEXT NOT NULL,
timestamp_ms INTEGER NOT NULL, timestamp_ms INTEGER NOT NULL,
signature BLOB NOT NULL, signature BLOB NOT NULL,
ref_post_id BLOB,
PRIMARY KEY (author, post_id, timestamp_ms) PRIMARY KEY (author, post_id, timestamp_ms)
); );
CREATE INDEX IF NOT EXISTS idx_comments_post ON comments(post_id); CREATE INDEX IF NOT EXISTS idx_comments_post ON comments(post_id);
@ -639,30 +642,6 @@ impl Storage {
)?; )?;
} }
// v0.6.2: add ref_post_id for rich comments (preview-inline,
// full-body-in-referenced-post). NULL for plain comments.
let has_ref_post_id = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('comments') WHERE name='ref_post_id'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_ref_post_id == 0 {
self.conn.execute_batch(
"ALTER TABLE comments ADD COLUMN ref_post_id BLOB DEFAULT NULL;"
)?;
}
// v0.6.2: add canonical_root_post_id to group_keys. When set, the
// record is a group (many-way, anchored at a public root post);
// when NULL it's a traditional circle (one-way, admin-only).
let has_canonical_root = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('group_keys') WHERE name='canonical_root_post_id'"
)?.query_row([], |row| row.get::<_, i64>(0))?;
if has_canonical_root == 0 {
self.conn.execute_batch(
"ALTER TABLE group_keys ADD COLUMN canonical_root_post_id BLOB DEFAULT NULL;
CREATE INDEX IF NOT EXISTS idx_group_keys_root ON group_keys(canonical_root_post_id);"
)?;
}
// Add device_role column to peers if missing (Active CDN replication) // Add device_role column to peers if missing (Active CDN replication)
let has_device_role = self.conn.prepare( let has_device_role = self.conn.prepare(
"SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='device_role'" "SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='device_role'"
@ -854,9 +833,7 @@ impl Storage {
/// All posts, newest first (with visibility) /// All posts, newest first (with visibility)
pub fn list_posts_reverse_chron(&self) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> { pub fn list_posts_reverse_chron(&self) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
let mut stmt = self.conn.prepare( let mut stmt = self.conn.prepare(
"SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts "SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts ORDER BY timestamp_ms DESC",
WHERE (visibility_intent IS NULL OR (visibility_intent != '\"Control\"' AND visibility_intent != '\"Profile\"'))
ORDER BY timestamp_ms DESC",
)?; )?;
let rows = stmt.query_map([], |row| { let rows = stmt.query_map([], |row| {
let id_bytes: Vec<u8> = row.get(0)?; let id_bytes: Vec<u8> = row.get(0)?;
@ -892,7 +869,6 @@ impl Storage {
"SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility "SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility
FROM posts p FROM posts p
INNER JOIN follows f ON p.author = f.node_id INNER JOIN follows f ON p.author = f.node_id
WHERE (p.visibility_intent IS NULL OR (p.visibility_intent != '\"Control\"' AND p.visibility_intent != '\"Profile\"'))
ORDER BY p.timestamp_ms DESC", ORDER BY p.timestamp_ms DESC",
)?; )?;
let rows = stmt.query_map([], |row| { let rows = stmt.query_map([], |row| {
@ -929,12 +905,10 @@ impl Storage {
"SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility "SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility
FROM posts p INNER JOIN follows f ON p.author = f.node_id FROM posts p INNER JOIN follows f ON p.author = f.node_id
WHERE p.timestamp_ms < ?1 WHERE p.timestamp_ms < ?1
AND (p.visibility_intent IS NULL OR (p.visibility_intent != '\"Control\"' AND p.visibility_intent != '\"Profile\"'))
ORDER BY p.timestamp_ms DESC LIMIT ?2" ORDER BY p.timestamp_ms DESC LIMIT ?2"
} else { } else {
"SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility "SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility
FROM posts p INNER JOIN follows f ON p.author = f.node_id FROM posts p INNER JOIN follows f ON p.author = f.node_id
WHERE (p.visibility_intent IS NULL OR (p.visibility_intent != '\"Control\"' AND p.visibility_intent != '\"Profile\"'))
ORDER BY p.timestamp_ms DESC LIMIT ?2" ORDER BY p.timestamp_ms DESC LIMIT ?2"
}; };
let mut stmt = self.conn.prepare(sql)?; let mut stmt = self.conn.prepare(sql)?;
@ -950,15 +924,11 @@ impl Storage {
pub fn list_posts_page(&self, before_ms: Option<u64>, limit: usize) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> { pub fn list_posts_page(&self, before_ms: Option<u64>, limit: usize) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
let sql = if before_ms.is_some() { let sql = if before_ms.is_some() {
"SELECT id, author, content, attachments, timestamp_ms, visibility "SELECT id, author, content, attachments, timestamp_ms, visibility
FROM posts FROM posts WHERE timestamp_ms < ?1
WHERE timestamp_ms < ?1
AND (visibility_intent IS NULL OR (visibility_intent != '\"Control\"' AND visibility_intent != '\"Profile\"'))
ORDER BY timestamp_ms DESC LIMIT ?2" ORDER BY timestamp_ms DESC LIMIT ?2"
} else { } else {
"SELECT id, author, content, attachments, timestamp_ms, visibility "SELECT id, author, content, attachments, timestamp_ms, visibility
FROM posts FROM posts ORDER BY timestamp_ms DESC LIMIT ?2"
WHERE (visibility_intent IS NULL OR (visibility_intent != '\"Control\"' AND visibility_intent != '\"Profile\"'))
ORDER BY timestamp_ms DESC LIMIT ?2"
}; };
let mut stmt = self.conn.prepare(sql)?; let mut stmt = self.conn.prepare(sql)?;
let rows = if let Some(bms) = before_ms { let rows = if let Some(bms) = before_ms {
@ -1081,39 +1051,9 @@ impl Storage {
Ok(posts) Ok(posts)
} }
/// All posts with visibility (for sync protocol and export). /// All posts with visibility (for sync protocol)
/// Includes control/profile posts — they need to propagate through the
/// CDN like any other post.
pub fn list_posts_with_visibility(&self) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> { pub fn list_posts_with_visibility(&self) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
let mut stmt = self.conn.prepare( self.list_posts_reverse_chron()
"SELECT id, author, content, attachments, timestamp_ms, visibility FROM posts ORDER BY timestamp_ms DESC",
)?;
let rows = stmt.query_map([], |row| {
let id_bytes: Vec<u8> = row.get(0)?;
let author_bytes: Vec<u8> = row.get(1)?;
let content: String = row.get(2)?;
let attachments_json: String = row.get(3)?;
let timestamp_ms: i64 = row.get(4)?;
let vis_json: String = row.get(5)?;
Ok((id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json))
})?;
let mut posts = Vec::new();
for row in rows {
let (id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json) = row?;
let attachments: Vec<Attachment> = serde_json::from_str(&attachments_json).unwrap_or_default();
let visibility: PostVisibility = serde_json::from_str(&vis_json).unwrap_or_default();
posts.push((
blob_to_postid(id_bytes)?,
Post {
author: blob_to_nodeid(author_bytes)?,
content,
attachments,
timestamp_ms: timestamp_ms as u64,
},
visibility,
));
}
Ok(posts)
} }
// ---- Follows ---- // ---- Follows ----
@ -2164,7 +2104,7 @@ impl Storage {
pub fn create_group_key(&self, record: &GroupKeyRecord, group_seed: Option<&[u8; 32]>) -> anyhow::Result<()> { pub fn create_group_key(&self, record: &GroupKeyRecord, group_seed: Option<&[u8; 32]>) -> anyhow::Result<()> {
self.conn.execute( self.conn.execute(
"INSERT OR REPLACE INTO group_keys (group_id, circle_name, epoch, group_public_key, group_seed, admin, created_at, canonical_root_post_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", "INSERT OR REPLACE INTO group_keys (group_id, circle_name, epoch, group_public_key, group_seed, admin, created_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
params![ params![
record.group_id.as_slice(), record.group_id.as_slice(),
record.circle_name, record.circle_name,
@ -2173,40 +2113,14 @@ impl Storage {
group_seed.map(|s| s.as_slice()), group_seed.map(|s| s.as_slice()),
record.admin.as_slice(), record.admin.as_slice(),
record.created_at as i64, record.created_at as i64,
record.canonical_root_post_id.as_ref().map(|r| r.as_slice()),
], ],
)?; )?;
Ok(()) Ok(())
} }
fn row_to_group_key(
gid: Vec<u8>,
circle_name: String,
epoch: i64,
gpk: Vec<u8>,
admin: Vec<u8>,
created_at: i64,
canonical_root: Option<Vec<u8>>,
) -> anyhow::Result<GroupKeyRecord> {
let canonical_root_post_id = match canonical_root {
Some(b) => Some(blob_to_postid(b)?),
None => None,
};
Ok(GroupKeyRecord {
group_id: blob_to_nodeid(gid)?,
circle_name,
epoch: epoch as u64,
group_public_key: <[u8; 32]>::try_from(gpk.as_slice())
.map_err(|_| anyhow::anyhow!("invalid group public key"))?,
admin: blob_to_nodeid(admin)?,
created_at: created_at as u64,
canonical_root_post_id,
})
}
pub fn get_group_key(&self, group_id: &GroupId) -> anyhow::Result<Option<GroupKeyRecord>> { pub fn get_group_key(&self, group_id: &GroupId) -> anyhow::Result<Option<GroupKeyRecord>> {
let result = self.conn.query_row( let result = self.conn.query_row(
"SELECT group_id, circle_name, epoch, group_public_key, admin, created_at, canonical_root_post_id FROM group_keys WHERE group_id = ?1", "SELECT group_id, circle_name, epoch, group_public_key, admin, created_at FROM group_keys WHERE group_id = ?1",
params![group_id.as_slice()], params![group_id.as_slice()],
|row| { |row| {
let gid: Vec<u8> = row.get(0)?; let gid: Vec<u8> = row.get(0)?;
@ -2215,13 +2129,20 @@ impl Storage {
let gpk: Vec<u8> = row.get(3)?; let gpk: Vec<u8> = row.get(3)?;
let admin: Vec<u8> = row.get(4)?; let admin: Vec<u8> = row.get(4)?;
let created_at: i64 = row.get(5)?; let created_at: i64 = row.get(5)?;
let canonical_root: Option<Vec<u8>> = row.get(6)?; Ok((gid, circle_name, epoch, gpk, admin, created_at))
Ok((gid, circle_name, epoch, gpk, admin, created_at, canonical_root))
}, },
); );
match result { match result {
Ok((gid, circle_name, epoch, gpk, admin, created_at, canonical_root)) => { Ok((gid, circle_name, epoch, gpk, admin, created_at)) => {
Ok(Some(Self::row_to_group_key(gid, circle_name, epoch, gpk, admin, created_at, canonical_root)?)) Ok(Some(GroupKeyRecord {
group_id: blob_to_nodeid(gid)?,
circle_name,
epoch: epoch as u64,
group_public_key: <[u8; 32]>::try_from(gpk.as_slice())
.map_err(|_| anyhow::anyhow!("invalid group public key"))?,
admin: blob_to_nodeid(admin)?,
created_at: created_at as u64,
}))
} }
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()), Err(e) => Err(e.into()),
@ -2230,7 +2151,7 @@ impl Storage {
pub fn get_group_key_by_circle(&self, circle_name: &str) -> anyhow::Result<Option<GroupKeyRecord>> { pub fn get_group_key_by_circle(&self, circle_name: &str) -> anyhow::Result<Option<GroupKeyRecord>> {
let result = self.conn.query_row( let result = self.conn.query_row(
"SELECT group_id, circle_name, epoch, group_public_key, admin, created_at, canonical_root_post_id FROM group_keys WHERE circle_name = ?1", "SELECT group_id, circle_name, epoch, group_public_key, admin, created_at FROM group_keys WHERE circle_name = ?1",
params![circle_name], params![circle_name],
|row| { |row| {
let gid: Vec<u8> = row.get(0)?; let gid: Vec<u8> = row.get(0)?;
@ -2239,39 +2160,20 @@ impl Storage {
let gpk: Vec<u8> = row.get(3)?; let gpk: Vec<u8> = row.get(3)?;
let admin: Vec<u8> = row.get(4)?; let admin: Vec<u8> = row.get(4)?;
let created_at: i64 = row.get(5)?; let created_at: i64 = row.get(5)?;
let canonical_root: Option<Vec<u8>> = row.get(6)?; Ok((gid, circle_name, epoch, gpk, admin, created_at))
Ok((gid, circle_name, epoch, gpk, admin, created_at, canonical_root))
}, },
); );
match result { match result {
Ok((gid, circle_name, epoch, gpk, admin, created_at, canonical_root)) => { Ok((gid, circle_name, epoch, gpk, admin, created_at)) => {
Ok(Some(Self::row_to_group_key(gid, circle_name, epoch, gpk, admin, created_at, canonical_root)?)) Ok(Some(GroupKeyRecord {
} group_id: blob_to_nodeid(gid)?,
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), circle_name,
Err(e) => Err(e.into()), epoch: epoch as u64,
} group_public_key: <[u8; 32]>::try_from(gpk.as_slice())
} .map_err(|_| anyhow::anyhow!("invalid group public key"))?,
admin: blob_to_nodeid(admin)?,
/// Look up a group by its canonical root post id. Returns None if the created_at: created_at as u64,
/// record has no canonical_root_post_id (i.e. it's a circle). }))
pub fn get_group_by_canonical_root(&self, root_post_id: &PostId) -> anyhow::Result<Option<GroupKeyRecord>> {
let result = self.conn.query_row(
"SELECT group_id, circle_name, epoch, group_public_key, admin, created_at, canonical_root_post_id FROM group_keys WHERE canonical_root_post_id = ?1",
params![root_post_id.as_slice()],
|row| {
let gid: Vec<u8> = row.get(0)?;
let circle_name: String = row.get(1)?;
let epoch: i64 = row.get(2)?;
let gpk: Vec<u8> = row.get(3)?;
let admin: Vec<u8> = row.get(4)?;
let created_at: i64 = row.get(5)?;
let canonical_root: Option<Vec<u8>> = row.get(6)?;
Ok((gid, circle_name, epoch, gpk, admin, created_at, canonical_root))
},
);
match result {
Ok((gid, circle_name, epoch, gpk, admin, created_at, canonical_root)) => {
Ok(Some(Self::row_to_group_key(gid, circle_name, epoch, gpk, admin, created_at, canonical_root)?))
} }
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e.into()), Err(e) => Err(e.into()),
@ -2915,6 +2817,111 @@ impl Storage {
Ok(count > 0) Ok(count > 0)
} }
// ---- Audience ----
/// Store an audience relationship.
pub fn store_audience(
&self,
node_id: &NodeId,
direction: AudienceDirection,
status: AudienceStatus,
) -> anyhow::Result<()> {
let now = now_ms();
let dir_str = match direction {
AudienceDirection::Inbound => "inbound",
AudienceDirection::Outbound => "outbound",
};
let status_str = match status {
AudienceStatus::Pending => "pending",
AudienceStatus::Approved => "approved",
AudienceStatus::Denied => "denied",
};
let approved_at = if status == AudienceStatus::Approved {
Some(now)
} else {
None
};
self.conn.execute(
"INSERT INTO audience (node_id, direction, status, requested_at, approved_at)
VALUES (?1, ?2, ?3, ?4, ?5)
ON CONFLICT(node_id, direction) DO UPDATE SET
status = ?3, approved_at = COALESCE(?5, audience.approved_at)",
params![node_id.as_slice(), dir_str, status_str, now, approved_at],
)?;
Ok(())
}
/// Get audience members by direction and status.
pub fn list_audience(
&self,
direction: AudienceDirection,
status: Option<AudienceStatus>,
) -> anyhow::Result<Vec<AudienceRecord>> {
let dir_str = match direction {
AudienceDirection::Inbound => "inbound",
AudienceDirection::Outbound => "outbound",
};
let (query, bind_status) = match status {
Some(s) => {
let s_str = match s {
AudienceStatus::Pending => "pending",
AudienceStatus::Approved => "approved",
AudienceStatus::Denied => "denied",
};
(
"SELECT node_id, direction, status, requested_at, approved_at FROM audience WHERE direction = ?1 AND status = ?2",
Some(s_str),
)
}
None => (
"SELECT node_id, direction, status, requested_at, approved_at FROM audience WHERE direction = ?1",
None,
),
};
let mut records = Vec::new();
if let Some(status_str) = bind_status {
let mut stmt = self.conn.prepare(query)?;
let mut rows = stmt.query(params![dir_str, status_str])?;
while let Some(row) = rows.next()? {
records.push(row_to_audience_record(row)?);
}
} else {
let mut stmt = self.conn.prepare(query)?;
let mut rows = stmt.query(params![dir_str])?;
while let Some(row) = rows.next()? {
records.push(row_to_audience_record(row)?);
}
}
Ok(records)
}
/// Get approved inbound audience members (nodes we push posts to).
pub fn list_audience_members(&self) -> anyhow::Result<Vec<NodeId>> {
let records = self.list_audience(
AudienceDirection::Inbound,
Some(AudienceStatus::Approved),
)?;
Ok(records.into_iter().map(|r| r.node_id).collect())
}
/// Remove an audience relationship.
pub fn remove_audience(
&self,
node_id: &NodeId,
direction: AudienceDirection,
) -> anyhow::Result<()> {
let dir_str = match direction {
AudienceDirection::Inbound => "inbound",
AudienceDirection::Outbound => "outbound",
};
self.conn.execute(
"DELETE FROM audience WHERE node_id = ?1 AND direction = ?2",
params![node_id.as_slice(), dir_str],
)?;
Ok(())
}
// ---- Reach: N2/N3 ---- // ---- Reach: N2/N3 ----
/// Replace a peer's entire N1 set in reachable_n2 (their N1 share → our N2). /// Replace a peer's entire N1 set in reachable_n2 (their N1 share → our N2).
@ -3561,18 +3568,32 @@ impl Storage {
Ok(count > 0) Ok(count > 0)
} }
/// Bulk-populate social_routes from follows + peers. /// Bulk-populate social_routes from follows + audience + peers.
/// Returns the number of routes created/updated. /// Returns the number of routes created/updated.
pub fn rebuild_social_routes(&self) -> anyhow::Result<usize> { pub fn rebuild_social_routes(&self) -> anyhow::Result<usize> {
let now = now_ms() as u64; let now = now_ms() as u64;
let mut count = 0; let mut count = 0;
// v0.6.2: audience removed; social routes are built purely from follows. // Collect follows
let follows: std::collections::HashSet<NodeId> = let follows: std::collections::HashSet<NodeId> =
self.list_follows()?.into_iter().collect(); self.list_follows()?.into_iter().collect();
for nid in follows { // Collect approved audience members (inbound = they are in our audience)
let relation = SocialRelation::Follow; let audience_members: std::collections::HashSet<NodeId> =
self.list_audience_members()?.into_iter().collect();
// Union of all social contacts
let mut all_contacts: std::collections::HashSet<NodeId> = std::collections::HashSet::new();
all_contacts.extend(&follows);
all_contacts.extend(&audience_members);
for nid in all_contacts {
let relation = match (follows.contains(&nid), audience_members.contains(&nid)) {
(true, true) => SocialRelation::Mutual,
(true, false) => SocialRelation::Follow,
(false, true) => SocialRelation::Audience,
(false, false) => continue,
};
// Look up addresses from peers table // Look up addresses from peers table
let addresses: Vec<std::net::SocketAddr> = self let addresses: Vec<std::net::SocketAddr> = self
@ -4606,12 +4627,11 @@ impl Storage {
/// deleted_at tombstone, store it so the tombstone propagates. /// deleted_at tombstone, store it so the tombstone propagates.
pub fn store_comment(&self, comment: &InlineComment) -> anyhow::Result<()> { pub fn store_comment(&self, comment: &InlineComment) -> anyhow::Result<()> {
self.conn.execute( self.conn.execute(
"INSERT INTO comments (author, post_id, content, timestamp_ms, signature, deleted_at, ref_post_id) "INSERT INTO comments (author, post_id, content, timestamp_ms, signature, deleted_at)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7) VALUES (?1, ?2, ?3, ?4, ?5, ?6)
ON CONFLICT(author, post_id, timestamp_ms) DO UPDATE SET ON CONFLICT(author, post_id, timestamp_ms) DO UPDATE SET
content = CASE WHEN excluded.deleted_at IS NOT NULL THEN content ELSE excluded.content END, content = CASE WHEN excluded.deleted_at IS NOT NULL THEN content ELSE excluded.content END,
deleted_at = CASE WHEN excluded.deleted_at IS NOT NULL THEN excluded.deleted_at ELSE deleted_at END, deleted_at = CASE WHEN excluded.deleted_at IS NOT NULL THEN excluded.deleted_at ELSE deleted_at END",
ref_post_id = COALESCE(excluded.ref_post_id, ref_post_id)",
params![ params![
comment.author.as_slice(), comment.author.as_slice(),
comment.post_id.as_slice(), comment.post_id.as_slice(),
@ -4619,7 +4639,6 @@ impl Storage {
comment.timestamp_ms as i64, comment.timestamp_ms as i64,
comment.signature, comment.signature,
comment.deleted_at.map(|v| v as i64), comment.deleted_at.map(|v| v as i64),
comment.ref_post_id.as_ref().map(|r| r.as_slice()),
], ],
)?; )?;
Ok(()) Ok(())
@ -4646,7 +4665,7 @@ impl Storage {
/// Get live (non-tombstoned) comments for a post. Used for UI display. /// Get live (non-tombstoned) comments for a post. Used for UI display.
pub fn get_comments(&self, post_id: &PostId) -> anyhow::Result<Vec<InlineComment>> { pub fn get_comments(&self, post_id: &PostId) -> anyhow::Result<Vec<InlineComment>> {
let mut stmt = self.conn.prepare( let mut stmt = self.conn.prepare(
"SELECT author, post_id, content, timestamp_ms, signature, ref_post_id "SELECT author, post_id, content, timestamp_ms, signature
FROM comments WHERE post_id = ?1 AND deleted_at IS NULL ORDER BY timestamp_ms ASC" FROM comments WHERE post_id = ?1 AND deleted_at IS NULL ORDER BY timestamp_ms ASC"
)?; )?;
let rows = stmt.query_map(params![post_id.as_slice()], |row| { let rows = stmt.query_map(params![post_id.as_slice()], |row| {
@ -4655,18 +4674,13 @@ impl Storage {
let content: String = row.get(2)?; let content: String = row.get(2)?;
let ts: i64 = row.get(3)?; let ts: i64 = row.get(3)?;
let sig: Vec<u8> = row.get(4)?; let sig: Vec<u8> = row.get(4)?;
let ref_post: Option<Vec<u8>> = row.get(5)?; Ok((author, pid, content, ts, sig))
Ok((author, pid, content, ts, sig, ref_post))
})?; })?;
let mut result = Vec::new(); let mut result = Vec::new();
for row in rows { for row in rows {
let (author_bytes, pid_bytes, content, ts, sig, ref_post) = row?; let (author_bytes, pid_bytes, content, ts, sig) = row?;
let author = blob_to_nodeid(author_bytes)?; let author = blob_to_nodeid(author_bytes)?;
let post_id = blob_to_postid(pid_bytes)?; let post_id = blob_to_postid(pid_bytes)?;
let ref_post_id = match ref_post {
Some(b) => Some(blob_to_postid(b)?),
None => None,
};
result.push(InlineComment { result.push(InlineComment {
author, author,
post_id, post_id,
@ -4674,7 +4688,6 @@ impl Storage {
timestamp_ms: ts as u64, timestamp_ms: ts as u64,
signature: sig, signature: sig,
deleted_at: None, deleted_at: None,
ref_post_id,
}); });
} }
Ok(result) Ok(result)
@ -4684,7 +4697,7 @@ impl Storage {
/// so tombstones propagate through pull-based sync. /// so tombstones propagate through pull-based sync.
pub fn get_comments_with_tombstones(&self, post_id: &PostId) -> anyhow::Result<Vec<InlineComment>> { pub fn get_comments_with_tombstones(&self, post_id: &PostId) -> anyhow::Result<Vec<InlineComment>> {
let mut stmt = self.conn.prepare( let mut stmt = self.conn.prepare(
"SELECT author, post_id, content, timestamp_ms, signature, deleted_at, ref_post_id "SELECT author, post_id, content, timestamp_ms, signature, deleted_at
FROM comments WHERE post_id = ?1 ORDER BY timestamp_ms ASC" FROM comments WHERE post_id = ?1 ORDER BY timestamp_ms ASC"
)?; )?;
let rows = stmt.query_map(params![post_id.as_slice()], |row| { let rows = stmt.query_map(params![post_id.as_slice()], |row| {
@ -4694,18 +4707,13 @@ impl Storage {
let ts: i64 = row.get(3)?; let ts: i64 = row.get(3)?;
let sig: Vec<u8> = row.get(4)?; let sig: Vec<u8> = row.get(4)?;
let del: Option<i64> = row.get(5)?; let del: Option<i64> = row.get(5)?;
let ref_post: Option<Vec<u8>> = row.get(6)?; Ok((author, pid, content, ts, sig, del))
Ok((author, pid, content, ts, sig, del, ref_post))
})?; })?;
let mut result = Vec::new(); let mut result = Vec::new();
for row in rows { for row in rows {
let (author_bytes, pid_bytes, content, ts, sig, del, ref_post) = row?; let (author_bytes, pid_bytes, content, ts, sig, del) = row?;
let author = blob_to_nodeid(author_bytes)?; let author = blob_to_nodeid(author_bytes)?;
let post_id = blob_to_postid(pid_bytes)?; let post_id = blob_to_postid(pid_bytes)?;
let ref_post_id = match ref_post {
Some(b) => Some(blob_to_postid(b)?),
None => None,
};
result.push(InlineComment { result.push(InlineComment {
author, author,
post_id, post_id,
@ -4713,7 +4721,6 @@ impl Storage {
timestamp_ms: ts as u64, timestamp_ms: ts as u64,
signature: sig, signature: sig,
deleted_at: del.map(|v| v as u64), deleted_at: del.map(|v| v as u64),
ref_post_id,
}); });
} }
Ok(result) Ok(result)
@ -4854,6 +4861,30 @@ fn now_ms() -> i64 {
.as_millis() as i64 .as_millis() as i64
} }
fn row_to_audience_record(row: &rusqlite::Row) -> anyhow::Result<AudienceRecord> {
let node_id = blob_to_nodeid(row.get(0)?)?;
let dir_str: String = row.get(1)?;
let status_str: String = row.get(2)?;
let requested_at = row.get::<_, i64>(3)? as u64;
let approved_at: Option<i64> = row.get(4)?;
let direction = match dir_str.as_str() {
"inbound" => AudienceDirection::Inbound,
_ => AudienceDirection::Outbound,
};
let status = match status_str.as_str() {
"approved" => AudienceStatus::Approved,
"denied" => AudienceStatus::Denied,
_ => AudienceStatus::Pending,
};
Ok(AudienceRecord {
node_id,
direction,
status,
requested_at,
approved_at: approved_at.map(|v| v as u64),
})
}
fn row_to_peer_record(row: &rusqlite::Row) -> anyhow::Result<PeerRecord> { fn row_to_peer_record(row: &rusqlite::Row) -> anyhow::Result<PeerRecord> {
let node_id = blob_to_nodeid(row.get(0)?)?; let node_id = blob_to_nodeid(row.get(0)?)?;
let addrs_json: String = row.get(1)?; let addrs_json: String = row.get(1)?;
@ -5212,7 +5243,30 @@ mod tests {
assert_eq!(s.count_mesh_peers_by_kind(PeerSlotKind::Local).unwrap(), 0); assert_eq!(s.count_mesh_peers_by_kind(PeerSlotKind::Local).unwrap(), 0);
} }
// ---- Social routes tests ---- #[test]
fn audience_crud() {
use crate::types::{AudienceDirection, AudienceStatus};
let s = temp_storage();
let nid = make_node_id(1);
s.store_audience(&nid, AudienceDirection::Inbound, AudienceStatus::Pending).unwrap();
let pending = s.list_audience(AudienceDirection::Inbound, Some(AudienceStatus::Pending)).unwrap();
assert_eq!(pending.len(), 1);
assert_eq!(pending[0].status, AudienceStatus::Pending);
// Approve
s.store_audience(&nid, AudienceDirection::Inbound, AudienceStatus::Approved).unwrap();
let members = s.list_audience_members().unwrap();
assert_eq!(members.len(), 1);
assert_eq!(members[0], nid);
// Remove
s.remove_audience(&nid, AudienceDirection::Inbound).unwrap();
let members = s.list_audience_members().unwrap();
assert!(members.is_empty());
}
// ---- Social routes tests ----
#[test] #[test]
fn social_route_crud() { fn social_route_crud() {
@ -5261,21 +5315,28 @@ mod tests {
#[test] #[test]
fn social_route_rebuild() { fn social_route_rebuild() {
use crate::types::SocialRelation; use crate::types::{AudienceDirection, AudienceStatus, SocialRelation};
let s = temp_storage(); let s = temp_storage();
let follow_a = make_node_id(1); let follow_nid = make_node_id(1);
let follow_b = make_node_id(2); let audience_nid = make_node_id(2);
let mutual_nid = make_node_id(3);
s.add_follow(&follow_a).unwrap(); s.add_follow(&follow_nid).unwrap();
s.add_follow(&follow_b).unwrap(); s.add_follow(&mutual_nid).unwrap();
s.store_audience(&audience_nid, AudienceDirection::Inbound, AudienceStatus::Approved).unwrap();
s.store_audience(&mutual_nid, AudienceDirection::Inbound, AudienceStatus::Approved).unwrap();
let count = s.rebuild_social_routes().unwrap(); let count = s.rebuild_social_routes().unwrap();
assert_eq!(count, 2); assert_eq!(count, 3);
let route_a = s.get_social_route(&follow_a).unwrap().unwrap(); let follow_route = s.get_social_route(&follow_nid).unwrap().unwrap();
assert_eq!(route_a.relation, SocialRelation::Follow); assert_eq!(follow_route.relation, SocialRelation::Follow);
let route_b = s.get_social_route(&follow_b).unwrap().unwrap();
assert_eq!(route_b.relation, SocialRelation::Follow); let audience_route = s.get_social_route(&audience_nid).unwrap().unwrap();
assert_eq!(audience_route.relation, SocialRelation::Audience);
let mutual_route = s.get_social_route(&mutual_nid).unwrap().unwrap();
assert_eq!(mutual_route.relation, SocialRelation::Mutual);
} }
#[test] #[test]
@ -5503,7 +5564,6 @@ mod tests {
group_public_key: pubkey, group_public_key: pubkey,
admin, admin,
created_at: 1000, created_at: 1000,
canonical_root_post_id: None,
}; };
s.create_group_key(&record, Some(&seed)).unwrap(); s.create_group_key(&record, Some(&seed)).unwrap();
@ -5561,49 +5621,6 @@ mod tests {
assert!(s.get_group_seed(&group_id, 1).unwrap().is_none()); assert!(s.get_group_seed(&group_id, 1).unwrap().is_none());
} }
#[test]
fn group_lookup_by_canonical_root() {
let s = temp_storage();
let admin = make_node_id(1);
let group_id = [43u8; 32];
let pubkey = [100u8; 32];
let root = make_post_id(99);
let record = crate::types::GroupKeyRecord {
group_id,
circle_name: "group:test".to_string(),
epoch: 1,
group_public_key: pubkey,
admin,
created_at: 1000,
canonical_root_post_id: Some(root),
};
s.create_group_key(&record, None).unwrap();
// Lookup by root returns the group.
let got = s.get_group_by_canonical_root(&root).unwrap().unwrap();
assert_eq!(got.group_id, group_id);
assert_eq!(got.canonical_root_post_id, Some(root));
// A different root returns None.
let other = make_post_id(7);
assert!(s.get_group_by_canonical_root(&other).unwrap().is_none());
// A circle (no canonical_root) is not returned when looking up by root.
let circle_record = crate::types::GroupKeyRecord {
group_id: [44u8; 32],
circle_name: "friends".to_string(),
epoch: 1,
group_public_key: [101u8; 32],
admin,
created_at: 1000,
canonical_root_post_id: None,
};
s.create_group_key(&circle_record, None).unwrap();
// The circle has no root, so it's invisible to the root lookup.
assert!(s.get_group_by_canonical_root(&make_post_id(0)).unwrap().is_none());
}
#[test] #[test]
fn group_seeds_map() { fn group_seeds_map() {
let s = temp_storage(); let s = temp_storage();
@ -5619,7 +5636,6 @@ mod tests {
group_public_key: pubkey, group_public_key: pubkey,
admin, admin,
created_at: 1000, created_at: 1000,
canonical_root_post_id: None,
}; };
s.create_group_key(&record, Some(&seed)).unwrap(); s.create_group_key(&record, Some(&seed)).unwrap();
s.store_group_seed(&group_id, 1, &seed).unwrap(); s.store_group_seed(&group_id, 1, &seed).unwrap();
@ -6169,7 +6185,6 @@ mod tests {
timestamp_ms: 1000, timestamp_ms: 1000,
signature: vec![0u8; 64], signature: vec![0u8; 64],
deleted_at: None, deleted_at: None,
ref_post_id: None,
}).unwrap(); }).unwrap();
s.store_comment(&InlineComment { s.store_comment(&InlineComment {
@ -6179,7 +6194,6 @@ mod tests {
timestamp_ms: 1001, timestamp_ms: 1001,
signature: vec![1u8; 64], signature: vec![1u8; 64],
deleted_at: None, deleted_at: None,
ref_post_id: None,
}).unwrap(); }).unwrap();
let comments = s.get_comments(&post_id).unwrap(); let comments = s.get_comments(&post_id).unwrap();
@ -6189,33 +6203,6 @@ mod tests {
assert_eq!(s.get_comment_count(&post_id).unwrap(), 2); assert_eq!(s.get_comment_count(&post_id).unwrap(), 2);
} }
#[test]
fn rich_comment_ref_post_id_roundtrip() {
use crate::types::InlineComment;
let s = temp_storage();
let post_id = make_post_id(1);
let author = make_node_id(5);
let ref_post = make_post_id(42);
s.store_comment(&InlineComment {
author,
post_id,
content: "(preview of a long body)".to_string(),
timestamp_ms: 2000,
signature: vec![9u8; 64],
deleted_at: None,
ref_post_id: Some(ref_post),
}).unwrap();
let live = s.get_comments(&post_id).unwrap();
assert_eq!(live.len(), 1);
assert_eq!(live[0].ref_post_id, Some(ref_post));
let all = s.get_comments_with_tombstones(&post_id).unwrap();
assert_eq!(all.len(), 1);
assert_eq!(all[0].ref_post_id, Some(ref_post));
}
#[test] #[test]
fn comment_policy_crud() { fn comment_policy_crud() {
use crate::types::{CommentPermission, CommentPolicy, ModerationMode, ReactPermission}; use crate::types::{CommentPermission, CommentPolicy, ModerationMode, ReactPermission};
@ -6226,7 +6213,7 @@ mod tests {
assert!(s.get_comment_policy(&post_id).unwrap().is_none()); assert!(s.get_comment_policy(&post_id).unwrap().is_none());
let policy = CommentPolicy { let policy = CommentPolicy {
allow_comments: CommentPermission::FollowersOnly, allow_comments: CommentPermission::AudienceOnly,
allow_reacts: ReactPermission::Public, allow_reacts: ReactPermission::Public,
moderation: ModerationMode::AuthorBlocklist, moderation: ModerationMode::AuthorBlocklist,
blocklist: vec![make_node_id(99)], blocklist: vec![make_node_id(99)],
@ -6234,7 +6221,7 @@ mod tests {
s.set_comment_policy(&post_id, &policy).unwrap(); s.set_comment_policy(&post_id, &policy).unwrap();
let loaded = s.get_comment_policy(&post_id).unwrap().unwrap(); let loaded = s.get_comment_policy(&post_id).unwrap().unwrap();
assert_eq!(loaded.allow_comments, CommentPermission::FollowersOnly); assert_eq!(loaded.allow_comments, CommentPermission::AudienceOnly);
assert_eq!(loaded.allow_reacts, ReactPermission::Public); assert_eq!(loaded.allow_reacts, ReactPermission::Public);
assert_eq!(loaded.blocklist.len(), 1); assert_eq!(loaded.blocklist.len(), 1);

View file

@ -157,6 +157,42 @@ pub struct WormResult {
pub blob_holder: Option<NodeId>, pub blob_holder: Option<NodeId>,
} }
/// Audience relationship direction
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AudienceDirection {
/// They are in our audience (we push to them)
Inbound,
/// We are in their audience (they push to us)
Outbound,
}
/// Audience membership status
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AudienceStatus {
Pending,
Approved,
Denied,
}
/// An audience membership record
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AudienceRecord {
pub node_id: NodeId,
pub direction: AudienceDirection,
pub status: AudienceStatus,
pub requested_at: u64,
pub approved_at: Option<u64>,
}
/// Audience approval mode setting
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AudienceApprovalMode {
/// Auto-accept all audience join requests
PublicApprove,
/// Queue requests for manual review
ApprovalQueue,
}
// --- Encryption / Circles --- // --- Encryption / Circles ---
/// Circle name (unique per node) /// Circle name (unique per node)
@ -177,13 +213,7 @@ pub struct GroupMemberKey {
pub wrapped_group_key: Vec<u8>, pub wrapped_group_key: Vec<u8>,
} }
/// A group key record (circle ↔ group key binding). /// A group key record (circle ↔ group key binding)
///
/// v0.6.2: `canonical_root_post_id` distinguishes **groups** (many-way,
/// anchored at a public root post; any member can post) from **circles**
/// (one-way, admin-only, `None`). The encryption primitives are identical;
/// the flag is a UX + query hint so UIs can cluster group posts under
/// their root.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GroupKeyRecord { pub struct GroupKeyRecord {
pub group_id: GroupId, pub group_id: GroupId,
@ -192,10 +222,6 @@ pub struct GroupKeyRecord {
pub group_public_key: [u8; 32], pub group_public_key: [u8; 32],
pub admin: NodeId, pub admin: NodeId,
pub created_at: u64, pub created_at: u64,
/// When set, this record represents a group rooted at the given public
/// post. When `None`, the record is a traditional circle.
#[serde(default)]
pub canonical_root_post_id: Option<PostId>,
} }
/// Visibility of a post — separate from Post struct so it doesn't affect PostId /// Visibility of a post — separate from Post struct so it doesn't affect PostId
@ -225,15 +251,7 @@ pub struct WrappedKey {
pub wrapped_cek: Vec<u8>, pub wrapped_cek: Vec<u8>,
} }
/// User-facing intent for post visibility (resolved to recipients before encryption). /// User-facing intent for post visibility (resolved to recipients before encryption)
///
/// A few variants exist for structural distinctions rather than visibility:
/// - `Control` — the post carries a signed operation (delete / visibility
/// update) that receivers apply. Wire visibility is Public; the post is
/// filtered out of feeds and rendered nowhere.
/// - `Profile` — the post carries persona display metadata (display_name,
/// bio, avatar). Wire visibility is Public; the post is not shown in the
/// feed but consulted when rendering the author's name on other posts.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub enum VisibilityIntent { pub enum VisibilityIntent {
Public, Public,
@ -243,74 +261,6 @@ pub enum VisibilityIntent {
Circle(String), Circle(String),
/// Specific recipients /// Specific recipients
Direct(Vec<NodeId>), Direct(Vec<NodeId>),
/// Protocol-control post (delete / visibility change).
Control,
/// Persona profile post (display_name, bio, avatar).
Profile,
/// Encrypted distribution of a group/circle seed to that group's
/// members. Replaces the v0.6.1 `GroupKeyDistribute` wire push with a
/// standard encrypted post that propagates via the CDN. Members
/// decrypt with their posting secret to recover the seed.
GroupKeyDistribute,
}
/// Content payload of a `VisibilityIntent::Profile` post — persona display
/// metadata (display_name, bio, avatar_cid) signed by the posting identity.
/// The post's `author` IS the posting identity; `signature` is an ed25519
/// signature by that identity's secret over the fields (see `crypto::sign_profile`).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProfilePostContent {
pub display_name: String,
#[serde(default)]
pub bio: String,
#[serde(default)]
pub avatar_cid: Option<[u8; 32]>,
pub timestamp_ms: u64,
/// 64-byte ed25519 signature. See `crypto::sign_profile` for the byte
/// layout signed by the posting identity.
pub signature: Vec<u8>,
}
/// Content payload of a `VisibilityIntent::GroupKeyDistribute` post.
/// Wrapped inside a standard `PostVisibility::Encrypted` envelope — members
/// decrypt via `crypto::decrypt_post` with their posting secret, then parse
/// this struct to recover the group seed and metadata.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GroupKeyDistributionContent {
pub group_id: GroupId,
pub circle_name: String,
pub epoch: GroupEpoch,
pub group_public_key: [u8; 32],
pub admin: NodeId,
#[serde(default)]
pub canonical_root_post_id: Option<PostId>,
/// The raw group seed (32 bytes). This is the sensitive field — its
/// confidentiality is protected by the enclosing encrypted post, which
/// is wrapped to each member's posting public key.
pub group_seed: [u8; 32],
}
/// Content payload of a `VisibilityIntent::Control` post, serialized as JSON
/// into the post's content field.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "op", rename_all = "snake_case")]
pub enum ControlOp {
/// Delete post `post_id`. Signature is over `b"ctrl:delete:" || post_id
/// || timestamp_ms (LE)`, by the target post's author.
DeletePost {
post_id: PostId,
timestamp_ms: u64,
/// 64-byte ed25519 signature
signature: Vec<u8>,
},
/// Update post `post_id` visibility. Signature is over
/// `b"ctrl:vis:" || post_id || canonical(new_visibility) || timestamp_ms (LE)`.
UpdateVisibility {
post_id: PostId,
new_visibility: PostVisibility,
timestamp_ms: u64,
signature: Vec<u8>,
},
} }
/// A named group of recipients /// A named group of recipients
@ -662,17 +612,20 @@ impl std::str::FromStr for ReachMethod {
} }
} }
/// Social relationship type. v0.6.2: audience removed; only `Follow` remains. /// Social relationship type
/// Kept as an enum for forward compatibility (future persona-level relations).
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SocialRelation { pub enum SocialRelation {
Follow, Follow,
Audience,
Mutual,
} }
impl std::fmt::Display for SocialRelation { impl std::fmt::Display for SocialRelation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
SocialRelation::Follow => write!(f, "follow"), SocialRelation::Follow => write!(f, "follow"),
SocialRelation::Audience => write!(f, "audience"),
SocialRelation::Mutual => write!(f, "mutual"),
} }
} }
} }
@ -682,8 +635,8 @@ impl std::str::FromStr for SocialRelation {
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s { match s {
"follow" => Ok(SocialRelation::Follow), "follow" => Ok(SocialRelation::Follow),
// Legacy DB values from v0.6.1 and earlier — map to Follow. "audience" => Ok(SocialRelation::Audience),
"audience" | "mutual" => Ok(SocialRelation::Follow), "mutual" => Ok(SocialRelation::Mutual),
_ => Err(anyhow::anyhow!("unknown social relation: {}", s)), _ => Err(anyhow::anyhow!("unknown social relation: {}", s)),
} }
} }
@ -811,36 +764,22 @@ pub struct Reaction {
pub signature: Vec<u8>, pub signature: Vec<u8>,
} }
/// An inline comment on a post. /// An inline comment on a post
///
/// v0.6.2 adds `ref_post_id`: when present, `content` is a short preview
/// string and the full comment body (long text, attachments, rich formatting)
/// lives in a separate referenced Post authored by the commenter. Clients
/// pull the referenced post lazily when rendering the expanded view.
/// When `ref_post_id` is `None`, `content` is the complete comment text
/// (the v0.6.1 shape).
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct InlineComment { pub struct InlineComment {
/// Comment author /// Comment author
pub author: NodeId, pub author: NodeId,
/// Which post this comment is on /// Which post this comment is on
pub post_id: PostId, pub post_id: PostId,
/// Either the full comment text (short comments) or a short preview of /// Comment text
/// the referenced post (when `ref_post_id` is set).
pub content: String, pub content: String,
/// When the comment was created (ms) /// When the comment was created (ms)
pub timestamp_ms: u64, pub timestamp_ms: u64,
/// ed25519 signature. Binds author/post_id/content/timestamp_ms, plus /// ed25519 signature over BLAKE3(author || post_id || content || timestamp_ms)
/// `ref_post_id` when present. See `crypto::sign_comment`.
pub signature: Vec<u8>, pub signature: Vec<u8>,
/// Tombstone timestamp — if set, this comment has been soft-deleted /// Tombstone timestamp — if set, this comment has been soft-deleted
#[serde(default)] #[serde(default)]
pub deleted_at: Option<u64>, pub deleted_at: Option<u64>,
/// Optional reference to a full-content Post (long body + attachments).
/// When set, `content` is a preview; readers fetch the referenced post
/// for the expanded view.
#[serde(default)]
pub ref_post_id: Option<PostId>,
} }
/// Permission level for comments on a post /// Permission level for comments on a post
@ -848,9 +787,8 @@ pub struct InlineComment {
pub enum CommentPermission { pub enum CommentPermission {
/// Anyone can comment /// Anyone can comment
Public, Public,
/// Only people the author follows publicly can comment. /// Only people in author's audience can comment
/// Renamed from `AudienceOnly` in v0.6.2 when audience was removed. AudienceOnly,
FollowersOnly,
/// Comments disabled /// Comments disabled
None, None,
} }
@ -885,9 +823,8 @@ impl Default for ReactPermission {
pub enum ModerationMode { pub enum ModerationMode {
/// Author maintains a blocklist of users /// Author maintains a blocklist of users
AuthorBlocklist, AuthorBlocklist,
/// Only people the author follows publicly can engage. /// Only audience members can engage
/// Renamed from `AudienceOnly` in v0.6.2. AudienceOnly,
FollowersOnly,
} }
impl Default for ModerationMode { impl Default for ModerationMode {

View file

@ -1,6 +1,6 @@
[package] [package]
name = "itsgoin-desktop" name = "itsgoin-desktop"
version = "0.6.2" version = "0.6.1"
edition = "2021" edition = "2021"
[lib] [lib]

View file

@ -238,9 +238,6 @@ async fn post_to_dto(
VisibilityIntent::Friends => "friends".to_string(), VisibilityIntent::Friends => "friends".to_string(),
VisibilityIntent::Circle(_) => "circle".to_string(), VisibilityIntent::Circle(_) => "circle".to_string(),
VisibilityIntent::Direct(_) => "direct".to_string(), VisibilityIntent::Direct(_) => "direct".to_string(),
VisibilityIntent::Control => "control".to_string(),
VisibilityIntent::Profile => "profile".to_string(),
VisibilityIntent::GroupKeyDistribute => "group_key_distribute".to_string(),
}, },
_ => "unknown".to_string(), _ => "unknown".to_string(),
} }
@ -1357,6 +1354,111 @@ async fn list_known_anchors(state: State<'_, AppNode>) -> Result<Vec<KnownAnchor
Ok(dtos) Ok(dtos)
} }
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct AudienceDto {
node_id: String,
display_name: Option<String>,
direction: String,
status: String,
requested_at: u64,
approved_at: Option<u64>,
}
#[tauri::command]
async fn list_audience(state: State<'_, AppNode>) -> Result<Vec<AudienceDto>, String> {
let node = get_node(&state).await;
let records = node
.list_audience(
itsgoin_core::types::AudienceDirection::Inbound,
None,
)
.await
.map_err(|e| e.to_string())?;
let mut dtos = Vec::with_capacity(records.len());
for r in &records {
let display_name = node.get_display_name(&r.node_id).await.unwrap_or(None);
let direction = match r.direction {
itsgoin_core::types::AudienceDirection::Inbound => "inbound",
itsgoin_core::types::AudienceDirection::Outbound => "outbound",
};
let status = match r.status {
itsgoin_core::types::AudienceStatus::Pending => "pending",
itsgoin_core::types::AudienceStatus::Approved => "approved",
itsgoin_core::types::AudienceStatus::Denied => "denied",
};
dtos.push(AudienceDto {
node_id: hex::encode(r.node_id),
display_name,
direction: direction.to_string(),
status: status.to_string(),
requested_at: r.requested_at,
approved_at: r.approved_at,
});
}
Ok(dtos)
}
#[tauri::command]
async fn list_audience_outbound(state: State<'_, AppNode>) -> Result<Vec<AudienceDto>, String> {
let node = get_node(&state).await;
let records = node
.list_audience(
itsgoin_core::types::AudienceDirection::Outbound,
None,
)
.await
.map_err(|e| e.to_string())?;
let mut dtos = Vec::with_capacity(records.len());
for r in &records {
let display_name = node.get_display_name(&r.node_id).await.unwrap_or(None);
let status = match r.status {
itsgoin_core::types::AudienceStatus::Pending => "pending",
itsgoin_core::types::AudienceStatus::Approved => "approved",
itsgoin_core::types::AudienceStatus::Denied => "denied",
};
dtos.push(AudienceDto {
node_id: hex::encode(r.node_id),
display_name,
direction: "outbound".to_string(),
status: status.to_string(),
requested_at: r.requested_at,
approved_at: r.approved_at,
});
}
Ok(dtos)
}
#[tauri::command]
async fn request_audience(
state: State<'_, AppNode>,
node_id_hex: String,
) -> Result<(), String> {
let node = get_node(&state).await;
let nid = parse_node_id(&node_id_hex)?;
node.request_audience(&nid).await.map_err(|e| e.to_string())
}
#[tauri::command]
async fn approve_audience(
state: State<'_, AppNode>,
node_id_hex: String,
) -> Result<(), String> {
let node = get_node(&state).await;
let nid = parse_node_id(&node_id_hex)?;
node.approve_audience(&nid).await.map_err(|e| e.to_string())
}
#[tauri::command]
async fn remove_audience(
state: State<'_, AppNode>,
node_id_hex: String,
) -> Result<(), String> {
let node = get_node(&state).await;
let nid = parse_node_id(&node_id_hex)?;
node.remove_audience(&nid).await.map_err(|e| e.to_string())
}
#[derive(Serialize)] #[derive(Serialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct WormResultDto { struct WormResultDto {
@ -2297,7 +2399,7 @@ async fn set_comment_policy(
let node = get_node(&state).await; let node = get_node(&state).await;
let pid = hex_to_postid(&post_id)?; let pid = hex_to_postid(&post_id)?;
let comment_perm = match allow_comments.as_str() { let comment_perm = match allow_comments.as_str() {
"followers_only" | "audience_only" => itsgoin_core::types::CommentPermission::FollowersOnly, "audience_only" => itsgoin_core::types::CommentPermission::AudienceOnly,
"none" => itsgoin_core::types::CommentPermission::None, "none" => itsgoin_core::types::CommentPermission::None,
_ => itsgoin_core::types::CommentPermission::Public, _ => itsgoin_core::types::CommentPermission::Public,
}; };
@ -2907,6 +3009,11 @@ pub fn run() {
set_anchors, set_anchors,
list_anchor_peers, list_anchor_peers,
list_known_anchors, list_known_anchors,
list_audience,
list_audience_outbound,
request_audience,
approve_audience,
remove_audience,
list_connections, list_connections,
worm_lookup, worm_lookup,
list_social_routes, list_social_routes,

View file

@ -1,6 +1,6 @@
{ {
"productName": "itsgoin", "productName": "itsgoin",
"version": "0.6.2", "version": "0.6.1",
"identifier": "com.itsgoin.app", "identifier": "com.itsgoin.app",
"build": { "build": {
"frontendDist": "../../frontend", "frontendDist": "../../frontend",

View file

@ -1422,8 +1422,14 @@ async function loadPeerBios(container) {
async function loadFollows() { async function loadFollows() {
try { try {
// v0.6.2: audience removed. No more audience/mutual badges or request flow. const [follows, outbound, inbound] = await Promise.all([
const follows = await invoke('list_follows'); invoke('list_follows'),
invoke('list_audience_outbound'),
invoke('list_audience'),
]);
const outboundSet = new Set(outbound.map(r => r.nodeId));
const approvedSet = new Set(outbound.filter(r => r.status === 'approved').map(r => r.nodeId));
const inboundApprovedSet = new Set(inbound.filter(r => r.status === 'approved').map(r => r.nodeId));
// Filter out self before rendering // Filter out self before rendering
const others = follows.filter(f => f.nodeId !== myNodeId); const others = follows.filter(f => f.nodeId !== myNodeId);
@ -1437,21 +1443,34 @@ async function loadFollows() {
const label = escapeHtml(peerLabel(f.nodeId, f.displayName)); const label = escapeHtml(peerLabel(f.nodeId, f.displayName));
const isSelf = f.nodeId === myNodeId; const isSelf = f.nodeId === myNodeId;
let audienceBadge = '';
let mutualBadge = '';
let lastSeenHtml = ''; let lastSeenHtml = '';
let actions = ''; let actions = '';
if (isSelf) { if (isSelf) {
actions = '<span class="self-tag">(you)</span>'; actions = '<span class="self-tag">(you)</span>';
} else { } else {
if (inboundApprovedSet.has(f.nodeId)) {
mutualBadge = '<span class="mutual-badge">mutual</span>';
}
if (approvedSet.has(f.nodeId)) {
audienceBadge = '<span class="audience-badge">audience</span>';
} else if (outboundSet.has(f.nodeId)) {
audienceBadge = '<span class="audience-badge pending">requested</span>';
}
if (!f.isOnline && f.lastActivityMs > 0) { if (!f.isOnline && f.lastActivityMs > 0) {
lastSeenHtml = `<span class="last-seen">Last online: ${formatTimeAgo(f.lastActivityMs)}</span>`; lastSeenHtml = `<span class="last-seen">Last online: ${formatTimeAgo(f.lastActivityMs)}</span>`;
} }
const audienceBtn = !approvedSet.has(f.nodeId) && !outboundSet.has(f.nodeId)
? `<button class="btn btn-ghost btn-sm request-audience-btn" data-node-id="${f.nodeId}">Ask to join audience</button>`
: '';
const syncBtn = `<button class="btn btn-ghost btn-sm sync-peer-btn" data-node-id="${f.nodeId}" title="Sync posts from this peer">Sync</button>`; const syncBtn = `<button class="btn btn-ghost btn-sm sync-peer-btn" data-node-id="${f.nodeId}" title="Sync posts from this peer">Sync</button>`;
const msgBtn = `<button class="btn btn-ghost btn-sm msg-peer-btn" data-node-id="${f.nodeId}" title="Send message">msg</button>`; const msgBtn = `<button class="btn btn-ghost btn-sm msg-peer-btn" data-node-id="${f.nodeId}" title="Send message">msg</button>`;
const unfollowBtn = `<button class="btn btn-ghost btn-sm unfollow-btn" data-node-id="${f.nodeId}">Unfollow</button>`; const unfollowBtn = `<button class="btn btn-ghost btn-sm unfollow-btn" data-node-id="${f.nodeId}">Unfollow</button>`;
actions = `${syncBtn} ${msgBtn} ${unfollowBtn}`; actions = `${audienceBtn} ${syncBtn} ${msgBtn} ${unfollowBtn}`;
} }
return `<div class="peer-card" data-node-id="${f.nodeId}"> return `<div class="peer-card" data-node-id="${f.nodeId}">
<div class="peer-card-row">${icon} <a class="peer-name-link" data-node-id="${f.nodeId}">${label}</a></div> <div class="peer-card-row">${icon} <a class="peer-name-link" data-node-id="${f.nodeId}">${label}</a> ${mutualBadge} ${audienceBadge}</div>
${lastSeenHtml ? `<div class="peer-card-lastseen">${lastSeenHtml}</div>` : ''} ${lastSeenHtml ? `<div class="peer-card-lastseen">${lastSeenHtml}</div>` : ''}
<div class="peer-card-bio"></div> <div class="peer-card-bio"></div>
<div class="peer-card-actions">${actions}</div> <div class="peer-card-actions">${actions}</div>
@ -1543,6 +1562,22 @@ async function loadFollows() {
}); });
}); });
// Attach audience request handlers
followsList.querySelectorAll('.request-audience-btn').forEach(btn => {
btn.addEventListener('click', async () => {
btn.disabled = true;
try {
await invoke('request_audience', { nodeIdHex: btn.dataset.nodeId });
toast('Audience request sent!');
loadFollows();
} catch (e) {
toast('Error: ' + e);
} finally {
btn.disabled = false;
}
});
});
// Lazy-load bios // Lazy-load bios
loadPeerBios(followsList); loadPeerBios(followsList);
} }
@ -1667,13 +1702,81 @@ async function loadRedundancy() {
} }
} }
// v0.6.2: audience removed. loadAudience is a no-op kept so existing call // --- Audience management ---
// sites don't break; DOM panels (if still in markup) are hidden.
async function loadAudience() { async function loadAudience() {
if (audiencePendingList) audiencePendingList.style.display = 'none'; try {
if (audienceApprovedList) audienceApprovedList.style.display = 'none'; const records = await invoke('list_audience');
const headings = document.querySelectorAll('.audience-section, #audience-section'); const pending = records.filter(r => r.status === 'pending');
headings.forEach(el => { el.style.display = 'none'; }); const approved = records.filter(r => r.status === 'approved');
if (pending.length === 0) {
audiencePendingList.innerHTML = '<p class="empty-hint">No pending requests</p>';
} else {
audiencePendingList.innerHTML = pending.map(r => {
const label = escapeHtml(peerLabel(r.nodeId, r.displayName));
const icon = generateIdenticon(r.nodeId, 18);
return `<div class="peer-card">
<div class="peer-card-row">${icon} ${label}</div>
<div class="peer-card-meta"><span>${relativeTime(r.requestedAt)}</span></div>
<div class="peer-card-actions">
<button class="btn btn-primary btn-sm approve-audience-btn" data-node-id="${r.nodeId}">Approve</button>
<button class="btn btn-danger btn-sm deny-audience-btn" data-node-id="${r.nodeId}">Deny</button>
</div>
</div>`;
}).join('');
audiencePendingList.querySelectorAll('.approve-audience-btn').forEach(btn => {
btn.addEventListener('click', async () => {
btn.disabled = true;
try {
await invoke('approve_audience', { nodeIdHex: btn.dataset.nodeId });
toast('Audience approved');
loadAudience();
} catch (e) { toast('Error: ' + e); }
});
});
audiencePendingList.querySelectorAll('.deny-audience-btn').forEach(btn => {
btn.addEventListener('click', async () => {
btn.disabled = true;
try {
await invoke('remove_audience', { nodeIdHex: btn.dataset.nodeId });
toast('Audience denied');
loadAudience();
} catch (e) { toast('Error: ' + e); }
});
});
}
if (approved.length === 0) {
audienceApprovedList.innerHTML = '<p class="empty-hint">No approved audience members</p>';
} else {
audienceApprovedList.innerHTML = approved.map(r => {
const label = escapeHtml(peerLabel(r.nodeId, r.displayName));
const icon = generateIdenticon(r.nodeId, 18);
return `<div class="peer-card">
<div class="peer-card-row">${icon} ${label}</div>
<div class="peer-card-meta"><span>Approved ${r.approvedAt ? relativeTime(r.approvedAt) : ''}</span></div>
<div class="peer-card-actions">
<button class="btn btn-danger btn-sm remove-audience-btn" data-node-id="${r.nodeId}">Remove</button>
</div>
</div>`;
}).join('');
audienceApprovedList.querySelectorAll('.remove-audience-btn').forEach(btn => {
btn.addEventListener('click', async () => {
if (!confirm('Remove this audience member?')) return;
btn.disabled = true;
try {
await invoke('remove_audience', { nodeIdHex: btn.dataset.nodeId });
toast('Audience member removed');
loadAudience();
} catch (e) { toast('Error: ' + e); }
});
});
}
} catch (e) {
audiencePendingList.innerHTML = `<p class="status-err">Error: ${e}</p>`;
}
} }
// --- Network diagnostics --- // --- Network diagnostics ---

View file

@ -96,7 +96,7 @@
<select id="circle-select" class="hidden"></select> <select id="circle-select" class="hidden"></select>
<select id="comment-perm-select" title="Comment permission"> <select id="comment-perm-select" title="Comment permission">
<option value="public">Comments: All</option> <option value="public">Comments: All</option>
<option value="followers_only">Comments: Followers</option> <option value="audience_only">Comments: Audience</option>
<option value="none">Comments: Off</option> <option value="none">Comments: Off</option>
</select> </select>
<select id="react-perm-select" title="React permission"> <select id="react-perm-select" title="React permission">
@ -132,7 +132,16 @@
</div> </div>
</div> </div>
<div class="section-card" style="display:flex;gap:0.5rem;flex-wrap:wrap"> <div class="section-card">
<h3>Audience</h3>
<p class="empty-hint">People who receive your public posts.</p>
<h4 class="subsection-title">Pending Requests</h4>
<div id="audience-pending-list"></div>
<h4 class="subsection-title">Approved</h4>
<div id="audience-approved-list"></div>
</div>
<div class="section-card" style="display:flex;gap:0.5rem;flex-wrap:wrap">
<button id="share-details-btn" class="btn btn-ghost btn-sm">Share my details</button> <button id="share-details-btn" class="btn btn-ghost btn-sm">Share my details</button>
<button id="connect-toggle" class="btn btn-ghost btn-sm">Add peer manually</button> <button id="connect-toggle" class="btn btn-ghost btn-sm">Add peer manually</button>
<div id="connect-body" class="hidden"> <div id="connect-body" class="hidden">

View file

@ -46,38 +46,6 @@
<p style="margin: 0.5rem 0 0 0; font-size: 0.8rem; color: var(--text-muted);">v0.5.3 is kept online only as an upgrade bridge &mdash; it no longer connects to the live network.</p> <p style="margin: 0.5rem 0 0 0; font-size: 0.8rem; color: var(--text-muted);">v0.5.3 is kept online only as an upgrade bridge &mdash; it no longer connects to the live network.</p>
</div> </div>
<h2 style="margin-top: 2rem;">v0.6.2 &mdash; April 22, 2026</h2>
<p style="color: var(--text-muted); font-size: 0.85rem;">Every remaining persona-signed direct push is off the wire. Deletes, visibility changes, profile updates, and group-key distribution now travel as encrypted / signed posts through the CDN. Groups are a first-class primitive. Plus two pre-release fixes &mdash; an admin-forgery check on group keys and a cap on concurrent port-scan hole punches that explains the 10 Mbps upload storm some users saw on VPNs.</p>
<div class="downloads">
<a href="itsgoin-0.6.2.apk" class="download-btn btn-android">
Android APK
<span class="sub">v0.6.2</span>
</a>
<a href="itsgoin_0.6.2_amd64.AppImage" class="download-btn btn-linux">
Linux AppImage
<span class="sub">v0.6.2</span>
</a>
<a href="itsgoin-cli-0.6.2-linux-amd64" class="download-btn btn-linux">
Linux CLI / Anchor
<span class="sub">v0.6.2</span>
</a>
</div>
<ul style="color: var(--text-muted); font-size: 0.85rem; line-height: 1.6; margin-top: 1rem;">
<li><strong>Deletes + visibility changes travel as signed control posts</strong> through the CDN. The <code>DeleteRecord</code> / <code>VisibilityUpdate</code> direct pushes are gone.</li>
<li><strong>Profile display data (name, bio, avatar) travels as a persona-signed profile post.</strong> Peer-visible names are back &mdash; but bound to the posting identity, not the network endpoint.</li>
<li><strong>Rich comments</strong> &mdash; a comment can reference a separate post for long bodies or attachments; inline preview is signed alongside the reference.</li>
<li><strong>Groups as a primitive</strong> &mdash; many-way posting anchored at a public root post. Circles remain one-way (admin-only).</li>
<li><strong>Group keys distribute as encrypted posts</strong> &mdash; the <code>GroupKeyDistribute</code> wire message is gone.</li>
<li><strong>Audience removed.</strong> Simpler social graph; anyone-can-send model via follows.</li>
<li><strong>PostPush / PostNotification wire messages retired</strong> &mdash; all content propagates via CDN.</li>
<li><strong>Port-scan hole punches are now capped at 1 concurrent</strong> &mdash; fixes sustained multi-Mbps upload on obfuscated VPNs after anchor connect.</li>
<li><strong>Outgoing-connect dedup</strong> &mdash; auto-reconnect, rebalance, and relay-introduction no longer race to the same peer.</li>
<li><strong>Security fix: group-key distribution verifies the claimed admin matches the post author</strong>, preventing a pollution attack where a peer who knows your posting id could overwrite your stored group key.</li>
</ul>
<p style="color: var(--text-muted); font-size: 0.85rem;">v0.6.2 is a wire-breaking fork from v0.6.1 (the retired message types are not optional). Upgrade both ends.</p>
<h2 style="margin-top: 2rem;">v0.6.1 &mdash; April 22, 2026</h2> <h2 style="margin-top: 2rem;">v0.6.1 &mdash; April 22, 2026</h2>
<p style="color: var(--text-muted); font-size: 0.85rem;">Network identity is now fully separated from posting identity on every install. Plus: Android auto-backup disabled by default, Reset actually resets, import preserves your personas, and display name is optional.</p> <p style="color: var(--text-muted); font-size: 0.85rem;">Network identity is now fully separated from posting identity on every install. Plus: Android auto-backup disabled by default, Reset actually resets, import preserves your personas, and display name is optional.</p>
@ -160,22 +128,6 @@
<section> <section>
<h2>Changelog</h2> <h2>Changelog</h2>
<div class="changelog"> <div class="changelog">
<div class="changelog-date">v0.6.2 &mdash; April 22, 2026</div>
<ul>
<li><strong>Deletes and visibility changes travel as signed control posts.</strong> New <code>VisibilityIntent::Control</code> post type carries a signed <code>DeletePost</code> / <code>UpdateVisibility</code> operation. Receivers verify the ed25519 signature against the target post's author, then apply. <code>DeleteRecord</code> and <code>VisibilityUpdate</code> wire pushes are removed.</li>
<li><strong>Persona display data (name / bio / avatar) travels as a signed profile post.</strong> New <code>VisibilityIntent::Profile</code>; authored by the posting identity, propagates via the CDN. The <code>ProfileUpdate</code> wire message keeps only routing fields (anchors, recent_peers, preferred_peers).</li>
<li><strong>Rich comments.</strong> <code>InlineComment</code> gains an optional <code>ref_post_id</code>; when set, <code>content</code> is a short preview and the full body (long text, attachments) lives in the referenced post. Signature binds the reference so a peer can't strip or swap it.</li>
<li><strong>Groups vs circles.</strong> Groups are a new many-way primitive &mdash; anchored at a public root post, any member can post to the group once they have the seed. Circles stay one-way (admin-only). The distinction is a <code>canonical_root_post_id</code> field on the group-key record; groups reuse the same encryption machinery.</li>
<li><strong>Group-key distribution as an encrypted post.</strong> The <code>GroupKeyDistribute</code> (0xA0) wire push is retired. Admins publish an encrypted post carrying the seed; members decrypt with their posting secret. Removes the last persona-signed direct push.</li>
<li><strong>Audience primitive removed.</strong> No more audience tables, no more <code>AudienceRequest</code>/<code>AudienceResponse</code> wire messages, no more <code>SocialRelation::Audience</code>/<code>Mutual</code>. Comment permission <code>AudienceOnly</code> renamed to <code>FollowersOnly</code>.</li>
<li><strong>PostPush + PostNotification retired.</strong> All content propagates only via CDN (pull + header-diff neighbor propagation). <code>BlobDeleteNotice</code> also retired &mdash; orphan blobs on remote holders evict via LRU.</li>
<li><strong>Security: group-key admin-forgery rejection.</strong> Distribution posts whose inner <code>admin</code> field doesn't match the post's author are rejected before storage. Prevents an attacker who knows a victim's posting id and the target group_id from overwriting the victim's legitimate group-key record.</li>
<li><strong>Bandwidth: port-scan hole punch capped at one concurrent scanner.</strong> Each scanner fires ~100 QUIC ClientHellos/sec for up to 5 minutes. Without a cap, parallel referrals could drive sustained multi-Mbps upload &mdash; especially on obfuscated VPNs where every probe stalls at a proxy timeout. Extra callers fall back to the standard 2s-round hole punch.</li>
<li><strong>Outgoing-connect dedup.</strong> <code>PendingConnectGuard</code> prevents auto-reconnect, rebalance-slots, and relay-introduction from racing to connect to the same peer. Same-peer only &mdash; different peers connect independently; inbound connections are unaffected.</li>
<li><strong>Merged-pull bugfix.</strong> The pull query now includes every posting identity we hold (not just the network id), so DMs addressed to any of our personas are found via the recipient-match path.</li>
</ul>
<p style="color: var(--text-muted); font-size: 0.8rem; font-style: italic; margin-bottom: 1rem;">v0.6.2 is a wire-breaking fork from v0.6.1. Retired message types (0x42 PostNotification, 0x43 PostPush, 0x44 AudienceRequest, 0x45 AudienceResponse, 0x95 BlobDeleteNotice, 0xA0 GroupKeyDistribute) are not optional &mdash; upgrade both ends.</p>
<div class="changelog-date">v0.6.1 &mdash; April 22, 2026</div> <div class="changelog-date">v0.6.1 &mdash; April 22, 2026</div>
<ul> <ul>
<li><strong>Network ID and posting ID are now separate by default.</strong> Fresh installs generate two independent ed25519 keys. Upgraders rotate their network key on first launch; the old key stays as the default posting persona. Peers see the same author; only the QUIC endpoint changes.</li> <li><strong>Network ID and posting ID are now separate by default.</strong> Fresh installs generate two independent ed25519 keys. Upgraders rotate their network key on first launch; the old key stays as the default posting persona. Peers see the same author; only the QUIC endpoint changes.</li>