v0.3.6: Active CDN replication, device roles, budgets, tombstones, engagement fix, DOS hardening
Active CDN replication: - All devices proactively replicate recent posts (<72h, <2 replicas) to peers - Target priority: desktops (300) > anchors (200) > phones (100) + cache_pressure - ReplicationRequest/Response (0xE1/0xE2) wire messages - 10-min cycle, 2-min initial delay, cap 20 posts per request - Graceful with small networks (1 peer = 1 replica, 0 peers = silent skip) Device roles & budgets: - Intermittent (phone), Available (desktop), Persistent (anchor) - Advertised in InitialExchange, stored per-peer - Replication budget: phones 100MB/hr, desktops/anchors 200MB/hr - Delivery budget: phones 1GB/hr, desktops 2GB/hr, anchors 1GB/hr - Hourly auto-reset, enforcement on blob serving Cache management: - 1GB default cache limit, configurable in settings UI - Eviction cycle activated (was implemented but never started) - Share-link priority boost (+100 for 3+ downstream) - Cache pressure score (0-255) for replication targeting Engagement distribution fix: - BlobHeader JSON rebuilt after BlobHeaderDiff ops - Previously reactions/comments stored in tables but header stayed stale Tombstone system: - deleted_at column on reactions and comments - Tombstones propagate through pull sync (additive merge respects timestamps) - UI queries filter WHERE deleted_at IS NULL Persistent notifications: - seen_engagement and seen_messages tables replace in-memory Sets - Only notify on genuinely unseen content, survives restarts DOS hardening: - BlobHeaderDiff fan-out: single batched task, max 10 concurrent via JoinSet - Blob prefetch: cap 20 per cycle, newest first - PostDownstreamRegister: cap 50 per sync - Delivery budget enforcement on BlobRequest handler - Pull preference: non-anchors first to preserve anchor delivery budget Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
b7f2d369fa
commit
a7e632de88
16 changed files with 1254 additions and 158 deletions
|
|
@ -22,6 +22,8 @@ pub struct EvictionCandidate {
|
|||
pub last_accessed_at: u64,
|
||||
pub pinned: bool,
|
||||
pub peer_copies: u32,
|
||||
/// Number of downstream CDN peers — proxy for share-link popularity.
|
||||
pub downstream_count: u32,
|
||||
}
|
||||
|
||||
pub struct Storage {
|
||||
|
|
@ -338,6 +340,16 @@ impl Storage {
|
|||
host TEXT NOT NULL,
|
||||
last_seen_ms INTEGER NOT NULL,
|
||||
PRIMARY KEY (post_id, host)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS seen_engagement (
|
||||
post_id BLOB PRIMARY KEY,
|
||||
seen_react_count INTEGER NOT NULL DEFAULT 0,
|
||||
seen_comment_count INTEGER NOT NULL DEFAULT 0,
|
||||
updated_at INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS seen_messages (
|
||||
partner_id BLOB PRIMARY KEY,
|
||||
last_read_ms INTEGER NOT NULL DEFAULT 0
|
||||
);",
|
||||
)?;
|
||||
Ok(())
|
||||
|
|
@ -563,6 +575,37 @@ impl Storage {
|
|||
)?;
|
||||
}
|
||||
|
||||
// Add deleted_at column to reactions if missing (tombstone support)
|
||||
let has_deleted_at_react = self.conn.prepare(
|
||||
"SELECT COUNT(*) FROM pragma_table_info('reactions') WHERE name='deleted_at'"
|
||||
)?.query_row([], |row| row.get::<_, i64>(0))?;
|
||||
if has_deleted_at_react == 0 {
|
||||
self.conn.execute_batch(
|
||||
"ALTER TABLE reactions ADD COLUMN deleted_at INTEGER DEFAULT NULL;"
|
||||
)?;
|
||||
}
|
||||
|
||||
// Add deleted_at column to comments if missing (tombstone support)
|
||||
let has_deleted_at_comment = self.conn.prepare(
|
||||
"SELECT COUNT(*) FROM pragma_table_info('comments') WHERE name='deleted_at'"
|
||||
)?.query_row([], |row| row.get::<_, i64>(0))?;
|
||||
if has_deleted_at_comment == 0 {
|
||||
self.conn.execute_batch(
|
||||
"ALTER TABLE comments ADD COLUMN deleted_at INTEGER DEFAULT NULL;"
|
||||
)?;
|
||||
}
|
||||
|
||||
// Add device_role column to peers if missing (Active CDN replication)
|
||||
let has_device_role = self.conn.prepare(
|
||||
"SELECT COUNT(*) FROM pragma_table_info('peers') WHERE name='device_role'"
|
||||
)?.query_row([], |row| row.get::<_, i64>(0))?;
|
||||
if has_device_role == 0 {
|
||||
self.conn.execute_batch(
|
||||
"ALTER TABLE peers ADD COLUMN device_role TEXT DEFAULT NULL;
|
||||
ALTER TABLE peers ADD COLUMN cache_pressure INTEGER DEFAULT NULL;"
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -661,6 +704,32 @@ impl Storage {
|
|||
}
|
||||
}
|
||||
|
||||
/// Get post IDs authored by `author` with timestamp_ms >= `since_ms`.
|
||||
pub fn get_own_recent_post_ids(&self, author: &NodeId, since_ms: u64) -> anyhow::Result<Vec<PostId>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id FROM posts WHERE author = ?1 AND timestamp_ms >= ?2"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![author.as_slice(), since_ms as i64], |row| {
|
||||
let bytes: Vec<u8> = row.get(0)?;
|
||||
Ok(bytes)
|
||||
})?;
|
||||
let mut ids = Vec::new();
|
||||
for row in rows {
|
||||
ids.push(blob_to_postid(row?)?);
|
||||
}
|
||||
Ok(ids)
|
||||
}
|
||||
|
||||
/// Get a peer's cache_pressure score (0-255), or None if unknown.
|
||||
pub fn get_peer_cache_pressure(&self, node_id: &NodeId) -> anyhow::Result<Option<u8>> {
|
||||
let result: Option<i32> = self.conn.query_row(
|
||||
"SELECT cache_pressure FROM peers WHERE node_id = ?1",
|
||||
params![node_id.as_slice()],
|
||||
|row| row.get(0),
|
||||
).ok().flatten();
|
||||
Ok(result.map(|p| p as u8))
|
||||
}
|
||||
|
||||
pub fn list_post_ids(&self) -> anyhow::Result<Vec<PostId>> {
|
||||
let mut stmt = self.conn.prepare("SELECT id FROM posts")?;
|
||||
let rows = stmt.query_map([], |row| {
|
||||
|
|
@ -1141,6 +1210,69 @@ impl Storage {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// --- Seen engagement tracking ---
|
||||
|
||||
/// Get the seen engagement counts for a post (react_count, comment_count).
|
||||
pub fn get_seen_engagement(&self, post_id: &PostId) -> anyhow::Result<(u32, u32)> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT seen_react_count, seen_comment_count FROM seen_engagement WHERE post_id = ?1"
|
||||
)?;
|
||||
let result = stmt.query_row(params![post_id.as_slice()], |row| {
|
||||
let rc: i64 = row.get(0)?;
|
||||
let cc: i64 = row.get(1)?;
|
||||
Ok((rc as u32, cc as u32))
|
||||
});
|
||||
match result {
|
||||
Ok(r) => Ok(r),
|
||||
Err(rusqlite::Error::QueryReturnedNoRows) => Ok((0, 0)),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the seen engagement counts for a post (upsert).
|
||||
pub fn set_seen_engagement(&self, post_id: &PostId, react_count: u32, comment_count: u32) -> anyhow::Result<()> {
|
||||
let now_ms = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.map(|d| d.as_millis() as i64)
|
||||
.unwrap_or(0);
|
||||
self.conn.execute(
|
||||
"INSERT INTO seen_engagement (post_id, seen_react_count, seen_comment_count, updated_at)
|
||||
VALUES (?1, ?2, ?3, ?4)
|
||||
ON CONFLICT(post_id) DO UPDATE SET
|
||||
seen_react_count = excluded.seen_react_count,
|
||||
seen_comment_count = excluded.seen_comment_count,
|
||||
updated_at = excluded.updated_at",
|
||||
params![post_id.as_slice(), react_count as i64, comment_count as i64, now_ms],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the last-read timestamp for a conversation partner.
|
||||
pub fn get_last_read_message(&self, partner_id: &NodeId) -> anyhow::Result<u64> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT last_read_ms FROM seen_messages WHERE partner_id = ?1"
|
||||
)?;
|
||||
let result = stmt.query_row(params![partner_id.as_slice()], |row| {
|
||||
let ts: i64 = row.get(0)?;
|
||||
Ok(ts as u64)
|
||||
});
|
||||
match result {
|
||||
Ok(r) => Ok(r),
|
||||
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(0),
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the last-read timestamp for a conversation partner (upsert).
|
||||
pub fn set_last_read_message(&self, partner_id: &NodeId, timestamp_ms: u64) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO seen_messages (partner_id, last_read_ms) VALUES (?1, ?2)
|
||||
ON CONFLICT(partner_id) DO UPDATE SET last_read_ms = excluded.last_read_ms",
|
||||
params![partner_id.as_slice(), timestamp_ms as i64],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize post_hosts table (called by web handler).
|
||||
pub fn init_post_hosts_table(&self) -> anyhow::Result<()> {
|
||||
// Already in init_tables, but safe to call again
|
||||
|
|
@ -1340,6 +1472,25 @@ impl Storage {
|
|||
Ok(result.unwrap_or(0) != 0)
|
||||
}
|
||||
|
||||
/// Set a peer's CDN device role and cache pressure.
|
||||
pub fn set_peer_device_role(&self, node_id: &NodeId, role: Option<&str>, pressure: Option<u8>) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE peers SET device_role = ?2, cache_pressure = ?3 WHERE node_id = ?1",
|
||||
params![node_id.as_slice(), role, pressure.map(|p| p as i32)],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a peer's CDN device role (from InitialExchange).
|
||||
pub fn get_peer_device_role(&self, node_id: &NodeId) -> anyhow::Result<Option<String>> {
|
||||
let result: Option<String> = self.conn.query_row(
|
||||
"SELECT device_role FROM peers WHERE node_id = ?1",
|
||||
params![node_id.as_slice()],
|
||||
|row| row.get(0),
|
||||
).ok().flatten();
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Get the display name for a node, or None if no profile exists
|
||||
pub fn get_display_name(&self, node_id: &NodeId) -> anyhow::Result<Option<String>> {
|
||||
let result: Option<String> = self.conn.query_row(
|
||||
|
|
@ -3390,14 +3541,20 @@ impl Storage {
|
|||
let mut stmt = self.conn.prepare(
|
||||
"SELECT b.cid, b.post_id, b.author, b.size_bytes, b.created_at,
|
||||
b.last_accessed_at, b.pinned,
|
||||
COALESCE(r.copies, 0) as peer_copies
|
||||
COALESCE(r.copies, 0) as peer_copies,
|
||||
COALESCE(d.ds_count, 0) as downstream_count
|
||||
FROM blobs b
|
||||
LEFT JOIN (
|
||||
SELECT post_id, COUNT(*) as copies
|
||||
FROM post_replicas
|
||||
WHERE last_confirmed_ms >= ?1
|
||||
GROUP BY post_id
|
||||
) r ON b.post_id = r.post_id"
|
||||
) r ON b.post_id = r.post_id
|
||||
LEFT JOIN (
|
||||
SELECT cid, COUNT(*) as ds_count
|
||||
FROM blob_downstream
|
||||
GROUP BY cid
|
||||
) d ON b.cid = d.cid"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![cutoff], |row| {
|
||||
let cid_bytes: Vec<u8> = row.get(0)?;
|
||||
|
|
@ -3408,11 +3565,12 @@ impl Storage {
|
|||
let last_accessed_at = row.get::<_, i64>(5)? as u64;
|
||||
let pinned = row.get::<_, i64>(6)? != 0;
|
||||
let peer_copies = row.get::<_, i64>(7)? as u32;
|
||||
Ok((cid_bytes, post_id_bytes, author_bytes, size_bytes, created_at, last_accessed_at, pinned, peer_copies))
|
||||
let downstream_count = row.get::<_, i64>(8)? as u32;
|
||||
Ok((cid_bytes, post_id_bytes, author_bytes, size_bytes, created_at, last_accessed_at, pinned, peer_copies, downstream_count))
|
||||
})?;
|
||||
let mut result = Vec::new();
|
||||
for row in rows {
|
||||
let (cid_bytes, post_id_bytes, author_bytes, size_bytes, created_at, last_accessed_at, pinned, peer_copies) = row?;
|
||||
let (cid_bytes, post_id_bytes, author_bytes, size_bytes, created_at, last_accessed_at, pinned, peer_copies, downstream_count) = row?;
|
||||
let cid: [u8; 32] = match cid_bytes.try_into() {
|
||||
Ok(c) => c,
|
||||
Err(_) => continue,
|
||||
|
|
@ -3434,11 +3592,22 @@ impl Storage {
|
|||
last_accessed_at,
|
||||
pinned,
|
||||
peer_copies,
|
||||
downstream_count,
|
||||
});
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Count total number of blobs.
|
||||
pub fn count_blobs(&self) -> anyhow::Result<u64> {
|
||||
let count: i64 = self.conn.query_row(
|
||||
"SELECT COUNT(*) FROM blobs",
|
||||
[],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
Ok(count as u64)
|
||||
}
|
||||
|
||||
/// Clean up all CDN metadata for a blob (manifests + upstream + downstream).
|
||||
pub fn cleanup_cdn_for_blob(&self, cid: &[u8; 32]) -> anyhow::Result<()> {
|
||||
self.conn.execute("DELETE FROM cdn_manifests WHERE cid = ?1", params![cid.as_slice()])?;
|
||||
|
|
@ -3730,19 +3899,24 @@ impl Storage {
|
|||
|
||||
/// Get mesh peers and N2 peers known to have an author's posts (from post_replicas overlap).
|
||||
/// Used by the lateral fetch cascade step.
|
||||
/// Results are sorted: non-anchor peers first (to save anchor delivery budget),
|
||||
/// then by specificity (peers with this exact post first) and recency.
|
||||
pub fn get_lateral_blob_sources(&self, author: &NodeId, post_id: &PostId) -> anyhow::Result<Vec<NodeId>> {
|
||||
// Find peers who have replicas of any post by this author, prioritizing those
|
||||
// who have this specific post, then any other posts by the same author.
|
||||
// Cross-reference with mesh_peers and reachable_n2 for reachability.
|
||||
// Sort: non-anchors first (COALESCE is_anchor default 0), then post match, then recency.
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT DISTINCT pr.node_id FROM post_replicas pr
|
||||
INNER JOIN posts p ON pr.post_id = p.id
|
||||
LEFT JOIN peers pe ON pr.node_id = pe.node_id
|
||||
WHERE p.author = ?1
|
||||
AND (
|
||||
pr.node_id IN (SELECT node_id FROM mesh_peers)
|
||||
OR pr.node_id IN (SELECT reachable_node_id FROM reachable_n2)
|
||||
)
|
||||
ORDER BY CASE WHEN pr.post_id = ?2 THEN 0 ELSE 1 END,
|
||||
ORDER BY COALESCE(pe.is_anchor, 0) ASC,
|
||||
CASE WHEN pr.post_id = ?2 THEN 0 ELSE 1 END,
|
||||
pr.last_confirmed_ms DESC
|
||||
LIMIT 10"
|
||||
)?;
|
||||
|
|
@ -3839,38 +4013,41 @@ impl Storage {
|
|||
// --- Engagement: reactions ---
|
||||
|
||||
/// Store a reaction (upsert by reactor+post_id+emoji).
|
||||
/// Tombstone-aware: incoming reaction wins only if its timestamp is newer.
|
||||
pub fn store_reaction(&self, reaction: &Reaction) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO reactions (reactor, post_id, emoji, timestamp_ms, encrypted_payload)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5)
|
||||
"INSERT INTO reactions (reactor, post_id, emoji, timestamp_ms, encrypted_payload, deleted_at)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6)
|
||||
ON CONFLICT(reactor, post_id, emoji) DO UPDATE SET
|
||||
timestamp_ms = excluded.timestamp_ms,
|
||||
encrypted_payload = excluded.encrypted_payload",
|
||||
timestamp_ms = CASE WHEN excluded.timestamp_ms > timestamp_ms THEN excluded.timestamp_ms ELSE timestamp_ms END,
|
||||
deleted_at = CASE WHEN excluded.timestamp_ms > timestamp_ms THEN excluded.deleted_at ELSE deleted_at END,
|
||||
encrypted_payload = CASE WHEN excluded.timestamp_ms > timestamp_ms THEN excluded.encrypted_payload ELSE encrypted_payload END",
|
||||
params![
|
||||
reaction.reactor.as_slice(),
|
||||
reaction.post_id.as_slice(),
|
||||
reaction.emoji,
|
||||
reaction.timestamp_ms as i64,
|
||||
reaction.encrypted_payload,
|
||||
reaction.deleted_at.map(|v| v as i64),
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove a reaction.
|
||||
/// Tombstone a reaction (soft-delete by setting deleted_at).
|
||||
pub fn remove_reaction(&self, reactor: &NodeId, post_id: &PostId, emoji: &str) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"DELETE FROM reactions WHERE reactor = ?1 AND post_id = ?2 AND emoji = ?3",
|
||||
params![reactor.as_slice(), post_id.as_slice(), emoji],
|
||||
"UPDATE reactions SET deleted_at = ?4 WHERE reactor = ?1 AND post_id = ?2 AND emoji = ?3",
|
||||
params![reactor.as_slice(), post_id.as_slice(), emoji, now_ms()],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get all reactions for a post.
|
||||
/// Get live (non-tombstoned) reactions for a post. Used for UI display.
|
||||
pub fn get_reactions(&self, post_id: &PostId) -> anyhow::Result<Vec<Reaction>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT reactor, post_id, emoji, timestamp_ms, encrypted_payload
|
||||
FROM reactions WHERE post_id = ?1 ORDER BY timestamp_ms ASC"
|
||||
FROM reactions WHERE post_id = ?1 AND deleted_at IS NULL ORDER BY timestamp_ms ASC"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![post_id.as_slice()], |row| {
|
||||
let reactor: Vec<u8> = row.get(0)?;
|
||||
|
|
@ -3891,17 +4068,51 @@ impl Storage {
|
|||
post_id,
|
||||
timestamp_ms: ts as u64,
|
||||
encrypted_payload: enc,
|
||||
deleted_at: None,
|
||||
});
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Get reaction counts grouped by emoji for a post.
|
||||
/// Get ALL reactions for a post, including tombstoned ones. Used for header rebuild
|
||||
/// so tombstones propagate through pull-based sync.
|
||||
pub fn get_reactions_with_tombstones(&self, post_id: &PostId) -> anyhow::Result<Vec<Reaction>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT reactor, post_id, emoji, timestamp_ms, encrypted_payload, deleted_at
|
||||
FROM reactions WHERE post_id = ?1 ORDER BY timestamp_ms ASC"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![post_id.as_slice()], |row| {
|
||||
let reactor: Vec<u8> = row.get(0)?;
|
||||
let pid: Vec<u8> = row.get(1)?;
|
||||
let emoji: String = row.get(2)?;
|
||||
let ts: i64 = row.get(3)?;
|
||||
let enc: Option<String> = row.get(4)?;
|
||||
let del: Option<i64> = row.get(5)?;
|
||||
Ok((reactor, pid, emoji, ts, enc, del))
|
||||
})?;
|
||||
let mut result = Vec::new();
|
||||
for row in rows {
|
||||
let (reactor_bytes, pid_bytes, emoji, ts, enc, del) = row?;
|
||||
let reactor = blob_to_nodeid(reactor_bytes)?;
|
||||
let post_id = blob_to_postid(pid_bytes)?;
|
||||
result.push(Reaction {
|
||||
reactor,
|
||||
emoji,
|
||||
post_id,
|
||||
timestamp_ms: ts as u64,
|
||||
encrypted_payload: enc,
|
||||
deleted_at: del.map(|v| v as u64),
|
||||
});
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Get reaction counts grouped by emoji for a post (excludes tombstoned reactions).
|
||||
pub fn get_reaction_counts(&self, post_id: &PostId, my_node_id: &NodeId) -> anyhow::Result<Vec<(String, u64, bool)>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT emoji, COUNT(*) as cnt,
|
||||
SUM(CASE WHEN reactor = ?2 THEN 1 ELSE 0 END) as my_count
|
||||
FROM reactions WHERE post_id = ?1 GROUP BY emoji ORDER BY cnt DESC"
|
||||
FROM reactions WHERE post_id = ?1 AND deleted_at IS NULL GROUP BY emoji ORDER BY cnt DESC"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![post_id.as_slice(), my_node_id.as_slice()], |row| {
|
||||
let emoji: String = row.get(0)?;
|
||||
|
|
@ -3918,18 +4129,22 @@ impl Storage {
|
|||
|
||||
// --- Engagement: comments ---
|
||||
|
||||
/// Store a comment.
|
||||
/// Store a comment. Tombstone-aware upsert: if the incoming comment carries a
|
||||
/// deleted_at tombstone, store it so the tombstone propagates.
|
||||
pub fn store_comment(&self, comment: &InlineComment) -> anyhow::Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO comments (author, post_id, content, timestamp_ms, signature)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5)
|
||||
ON CONFLICT DO NOTHING",
|
||||
"INSERT INTO comments (author, post_id, content, timestamp_ms, signature, deleted_at)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6)
|
||||
ON CONFLICT(author, post_id, timestamp_ms) DO UPDATE SET
|
||||
content = CASE WHEN excluded.deleted_at IS NOT NULL THEN content ELSE excluded.content END,
|
||||
deleted_at = CASE WHEN excluded.deleted_at IS NOT NULL THEN excluded.deleted_at ELSE deleted_at END",
|
||||
params![
|
||||
comment.author.as_slice(),
|
||||
comment.post_id.as_slice(),
|
||||
comment.content,
|
||||
comment.timestamp_ms as i64,
|
||||
comment.signature,
|
||||
comment.deleted_at.map(|v| v as i64),
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
|
|
@ -3944,20 +4159,20 @@ impl Storage {
|
|||
Ok(updated > 0)
|
||||
}
|
||||
|
||||
/// Delete a comment (must match author + post_id + timestamp_ms).
|
||||
/// Tombstone a comment (soft-delete by setting deleted_at).
|
||||
pub fn delete_comment(&self, author: &NodeId, post_id: &PostId, timestamp_ms: u64) -> anyhow::Result<bool> {
|
||||
let deleted = self.conn.execute(
|
||||
"DELETE FROM comments WHERE author = ?1 AND post_id = ?2 AND timestamp_ms = ?3",
|
||||
params![author.as_slice(), post_id.as_slice(), timestamp_ms as i64],
|
||||
let updated = self.conn.execute(
|
||||
"UPDATE comments SET deleted_at = ?4 WHERE author = ?1 AND post_id = ?2 AND timestamp_ms = ?3",
|
||||
params![author.as_slice(), post_id.as_slice(), timestamp_ms as i64, now_ms()],
|
||||
)?;
|
||||
Ok(deleted > 0)
|
||||
Ok(updated > 0)
|
||||
}
|
||||
|
||||
/// Get all comments for a post, ordered by timestamp.
|
||||
/// Get live (non-tombstoned) comments for a post. Used for UI display.
|
||||
pub fn get_comments(&self, post_id: &PostId) -> anyhow::Result<Vec<InlineComment>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT author, post_id, content, timestamp_ms, signature
|
||||
FROM comments WHERE post_id = ?1 ORDER BY timestamp_ms ASC"
|
||||
FROM comments WHERE post_id = ?1 AND deleted_at IS NULL ORDER BY timestamp_ms ASC"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![post_id.as_slice()], |row| {
|
||||
let author: Vec<u8> = row.get(0)?;
|
||||
|
|
@ -3978,15 +4193,49 @@ impl Storage {
|
|||
content,
|
||||
timestamp_ms: ts as u64,
|
||||
signature: sig,
|
||||
deleted_at: None,
|
||||
});
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Get comment count for a post.
|
||||
/// Get ALL comments for a post, including tombstoned ones. Used for header rebuild
|
||||
/// so tombstones propagate through pull-based sync.
|
||||
pub fn get_comments_with_tombstones(&self, post_id: &PostId) -> anyhow::Result<Vec<InlineComment>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT author, post_id, content, timestamp_ms, signature, deleted_at
|
||||
FROM comments WHERE post_id = ?1 ORDER BY timestamp_ms ASC"
|
||||
)?;
|
||||
let rows = stmt.query_map(params![post_id.as_slice()], |row| {
|
||||
let author: Vec<u8> = row.get(0)?;
|
||||
let pid: Vec<u8> = row.get(1)?;
|
||||
let content: String = row.get(2)?;
|
||||
let ts: i64 = row.get(3)?;
|
||||
let sig: Vec<u8> = row.get(4)?;
|
||||
let del: Option<i64> = row.get(5)?;
|
||||
Ok((author, pid, content, ts, sig, del))
|
||||
})?;
|
||||
let mut result = Vec::new();
|
||||
for row in rows {
|
||||
let (author_bytes, pid_bytes, content, ts, sig, del) = row?;
|
||||
let author = blob_to_nodeid(author_bytes)?;
|
||||
let post_id = blob_to_postid(pid_bytes)?;
|
||||
result.push(InlineComment {
|
||||
author,
|
||||
post_id,
|
||||
content,
|
||||
timestamp_ms: ts as u64,
|
||||
signature: sig,
|
||||
deleted_at: del.map(|v| v as u64),
|
||||
});
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Get comment count for a post (excludes tombstoned comments).
|
||||
pub fn get_comment_count(&self, post_id: &PostId) -> anyhow::Result<u64> {
|
||||
let count: i64 = self.conn.prepare(
|
||||
"SELECT COUNT(*) FROM comments WHERE post_id = ?1"
|
||||
"SELECT COUNT(*) FROM comments WHERE post_id = ?1 AND deleted_at IS NULL"
|
||||
)?.query_row(params![post_id.as_slice()], |row| row.get(0))?;
|
||||
Ok(count as u64)
|
||||
}
|
||||
|
|
@ -5460,6 +5709,7 @@ mod tests {
|
|||
post_id,
|
||||
timestamp_ms: 1000,
|
||||
encrypted_payload: None,
|
||||
deleted_at: None,
|
||||
}).unwrap();
|
||||
|
||||
s.store_reaction(&Reaction {
|
||||
|
|
@ -5468,6 +5718,7 @@ mod tests {
|
|||
post_id,
|
||||
timestamp_ms: 1001,
|
||||
encrypted_payload: None,
|
||||
deleted_at: None,
|
||||
}).unwrap();
|
||||
|
||||
s.store_reaction(&Reaction {
|
||||
|
|
@ -5476,6 +5727,7 @@ mod tests {
|
|||
post_id,
|
||||
timestamp_ms: 1002,
|
||||
encrypted_payload: None,
|
||||
deleted_at: None,
|
||||
}).unwrap();
|
||||
|
||||
let reactions = s.get_reactions(&post_id).unwrap();
|
||||
|
|
@ -5508,6 +5760,7 @@ mod tests {
|
|||
content: "Nice post!".to_string(),
|
||||
timestamp_ms: 1000,
|
||||
signature: vec![0u8; 64],
|
||||
deleted_at: None,
|
||||
}).unwrap();
|
||||
|
||||
s.store_comment(&InlineComment {
|
||||
|
|
@ -5516,6 +5769,7 @@ mod tests {
|
|||
content: "I agree".to_string(),
|
||||
timestamp_ms: 1001,
|
||||
signature: vec![1u8; 64],
|
||||
deleted_at: None,
|
||||
}).unwrap();
|
||||
|
||||
let comments = s.get_comments(&post_id).unwrap();
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue