v0.4.0: Protocol v4 — header-driven sync, tiered engagement, multi-upstream
Protocol v4 sync overhaul: - Slim PullSyncRequest: per-author timestamps (since_ms) replace full post ID lists Request size O(follows) instead of O(posts). Backward-compatible via serde default. - Tiered pull frequency: 60s ticks, only syncs stale authors (4hr default) Full pull only on first tick (bootstrap). Most ticks skip — no stale authors. - Tiered engagement checks: frequency scales with content age 5min (<72h), 1hr (3-14d), 4hr (14-30d), 24hr (>30d) Single SQL query filters posts due for check. - Header-driven post discovery: ManifestPush triggers PostFetch for missing followed-author posts (capped 10 per manifest). CDN tree = notification system. - Multi-upstream (3 max): composite PK, priority ordering, engagement diffs sent to all upstreams, promote/remove on failure. DB schema: - follows.last_sync_ms — Self Last Encounter per author - posts.last_engagement_ms — last reaction/comment timestamp - posts.last_check_ms — last engagement check timestamp - post_upstream: single-row → 3-row with priority column Lock contention fixes: - get_blob_for_post: 3 locks → 1 - prefetch_blobs_from_peer: lock-free blob checks - fetch_engagement_from_peer: explicit lock release before I/O - serve_post: 4 locks → 2 (eliminated redundant queries) - run_replication_check: 2 locks → 1 - Badge cycle: N+2 IPC calls → 1 (get_badge_counts) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
1df00eebf8
commit
bbaacf9b6c
10 changed files with 489 additions and 100 deletions
|
|
@ -2501,31 +2501,53 @@ impl Node {
|
|||
tokio::spawn(async move { network.run_accept_loop().await })
|
||||
}
|
||||
|
||||
/// Start pull cycle: every interval_secs, pull from connected peers + prefetch blobs.
|
||||
pub fn start_pull_cycle(self: &Arc<Self>, interval_secs: u64) -> tokio::task::JoinHandle<()> {
|
||||
/// Start pull cycle: Protocol v4 tiered pull — 60s ticks, full pull on first tick,
|
||||
/// then only pull for stale authors (last_sync_ms > 4 hours old).
|
||||
pub fn start_pull_cycle(self: &Arc<Self>, _interval_secs: u64) -> tokio::task::JoinHandle<()> {
|
||||
let node = Arc::clone(self);
|
||||
tokio::spawn(async move {
|
||||
let mut interval =
|
||||
tokio::time::interval(std::time::Duration::from_secs(interval_secs));
|
||||
tokio::time::interval(std::time::Duration::from_secs(60));
|
||||
let mut is_first_tick = true;
|
||||
loop {
|
||||
interval.tick().await;
|
||||
match node.network.pull_from_all().await {
|
||||
Ok(stats) => {
|
||||
if stats.posts_received > 0 {
|
||||
tracing::debug!(
|
||||
posts = stats.posts_received,
|
||||
peers = stats.peers_pulled,
|
||||
"Pull cycle complete"
|
||||
);
|
||||
// Prefetch blobs for newly received posts
|
||||
let peers = node.network.conn_handle().connected_peers().await;
|
||||
for peer_id in peers {
|
||||
node.prefetch_blobs_from_peer(&peer_id).await;
|
||||
|
||||
if is_first_tick {
|
||||
// Full pull on startup
|
||||
let _ = node.network.pull_from_all().await;
|
||||
is_first_tick = false;
|
||||
// Prefetch after initial sync
|
||||
let peers = node.network.conn_handle().connected_peers().await;
|
||||
for peer_id in peers {
|
||||
node.prefetch_blobs_from_peer(&peer_id).await;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Tiered: only pull for stale authors (4-hour default)
|
||||
let stale_authors = {
|
||||
let storage = node.storage.lock().await;
|
||||
storage.get_stale_follows(4 * 3600 * 1000).unwrap_or_default()
|
||||
};
|
||||
|
||||
if stale_authors.is_empty() {
|
||||
continue; // Most ticks skip — no stale authors
|
||||
}
|
||||
|
||||
// Find a connected peer and pull
|
||||
let peers = node.network.conn_handle().connected_peers().await;
|
||||
if let Some(peer_id) = peers.first() {
|
||||
match node.network.conn_handle().pull_from_peer(peer_id).await {
|
||||
Ok(stats) => {
|
||||
if stats.posts_received > 0 {
|
||||
tracing::debug!(
|
||||
posts = stats.posts_received,
|
||||
"Tiered pull complete"
|
||||
);
|
||||
node.prefetch_blobs_from_peer(peer_id).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::debug!(error = %e, "Pull cycle failed");
|
||||
Err(e) => tracing::debug!(error = %e, "Tiered pull failed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3495,12 +3517,12 @@ impl Node {
|
|||
timestamp_ms: now,
|
||||
};
|
||||
network.propagate_engagement_diff(&post_id, &diff, &our_node_id).await;
|
||||
// Also send upstream (toward author)
|
||||
let upstream = {
|
||||
// Also send to all upstreams (toward author) — Phase 6 multi-upstream
|
||||
let upstreams = {
|
||||
let storage = self.storage.lock().await;
|
||||
storage.get_post_upstream(&post_id).ok().flatten()
|
||||
storage.get_post_upstreams(&post_id).unwrap_or_default()
|
||||
};
|
||||
if let Some(up) = upstream {
|
||||
for (up, _prio) in upstreams {
|
||||
let _ = network.send_to_peer_uni(&up, crate::protocol::MessageType::BlobHeaderDiff, &diff).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -3609,12 +3631,12 @@ impl Node {
|
|||
timestamp_ms: now,
|
||||
};
|
||||
network.propagate_engagement_diff(&post_id, &diff, &our_node_id).await;
|
||||
// Also send upstream (toward author)
|
||||
let upstream = {
|
||||
// Also send to all upstreams (toward author) — Phase 6 multi-upstream
|
||||
let upstreams = {
|
||||
let storage = self.storage.lock().await;
|
||||
storage.get_post_upstream(&post_id).ok().flatten()
|
||||
storage.get_post_upstreams(&post_id).unwrap_or_default()
|
||||
};
|
||||
if let Some(up) = upstream {
|
||||
for (up, _prio) in upstreams {
|
||||
let _ = network.send_to_peer_uni(&up, crate::protocol::MessageType::BlobHeaderDiff, &diff).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -3653,11 +3675,12 @@ impl Node {
|
|||
timestamp_ms: now,
|
||||
};
|
||||
network.propagate_engagement_diff(&post_id, &diff, &our_node_id).await;
|
||||
let upstream = {
|
||||
// Phase 6: send to all upstreams
|
||||
let upstreams = {
|
||||
let storage = self.storage.lock().await;
|
||||
storage.get_post_upstream(&post_id).ok().flatten()
|
||||
storage.get_post_upstreams(&post_id).unwrap_or_default()
|
||||
};
|
||||
if let Some(up) = upstream {
|
||||
for (up, _prio) in upstreams {
|
||||
let _ = network.send_to_peer_uni(&up, crate::protocol::MessageType::BlobHeaderDiff, &diff).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -3693,11 +3716,12 @@ impl Node {
|
|||
timestamp_ms: now,
|
||||
};
|
||||
network.propagate_engagement_diff(&post_id, &diff, &our_node_id).await;
|
||||
let upstream = {
|
||||
// Phase 6: send to all upstreams
|
||||
let upstreams = {
|
||||
let storage = self.storage.lock().await;
|
||||
storage.get_post_upstream(&post_id).ok().flatten()
|
||||
storage.get_post_upstreams(&post_id).unwrap_or_default()
|
||||
};
|
||||
if let Some(up) = upstream {
|
||||
for (up, _prio) in upstreams {
|
||||
let _ = network.send_to_peer_uni(&up, crate::protocol::MessageType::BlobHeaderDiff, &diff).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -3919,11 +3943,12 @@ impl Node {
|
|||
timestamp_ms: now,
|
||||
};
|
||||
self.network.propagate_engagement_diff(&post_id, &diff, &self.node_id).await;
|
||||
let upstream = {
|
||||
// Phase 6: send to all upstreams
|
||||
let upstreams = {
|
||||
let storage = self.storage.lock().await;
|
||||
storage.get_post_upstream(&post_id).ok().flatten()
|
||||
storage.get_post_upstreams(&post_id).unwrap_or_default()
|
||||
};
|
||||
if let Some(up) = upstream {
|
||||
for (up, _prio) in upstreams {
|
||||
let _ = self.network.send_to_peer_uni(&up, crate::protocol::MessageType::BlobHeaderDiff, &diff).await;
|
||||
}
|
||||
|
||||
|
|
@ -4038,11 +4063,12 @@ impl Node {
|
|||
timestamp_ms: now,
|
||||
};
|
||||
self.network.propagate_engagement_diff(&post_id, &diff, &self.node_id).await;
|
||||
let upstream = {
|
||||
// Phase 6: send to all upstreams
|
||||
let upstreams = {
|
||||
let storage = self.storage.lock().await;
|
||||
storage.get_post_upstream(&post_id).ok().flatten()
|
||||
storage.get_post_upstreams(&post_id).unwrap_or_default()
|
||||
};
|
||||
if let Some(up) = upstream {
|
||||
for (up, _prio) in upstreams {
|
||||
let _ = self.network.send_to_peer_uni(&up, crate::protocol::MessageType::BlobHeaderDiff, &diff).await;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue