Phase 2d (0.6.1-beta): route manifest + blob ops through file_holders
Switch ALL propagation-decision reads to the flat holder set. push_manifest_to_downstream now targets file_holders instead of blob_downstream. ManifestPush receive-side relay likewise — known holders fan out to up to 5 most-recent peers instead of a directional tree. Blob delete notices: single flat fan-out to file_holders; the legacy upstream_node tree-healing field is emitted as None (wire-stable via serde default) and ignored on receive — the post-0.6 flat model doesn't need sender-role distinction. send_blob_delete_notices keeps its Option<&Upstream> parameter as an unused placeholder for signature stability with the call sites in this commit. Other reads migrated: - blob fetch cascade: step 2 now tries "known holders" (up to 5) instead of a single upstream - manifest refresh: downstream_count reported from file_holder_count - web/http post holder enumeration - Worm search post/blob holder fallback (both connection.rs paths) - DeleteRecord fan-out rewires to file_holders - Under-replication replication check: < 2 holders Storage additions: - get_file_holder_count(file_id) - remove_file_holder(file_id, peer_id) Legacy upstream/downstream writes are still happening from Phase 2b; those + the tables themselves go in 2e. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
3a0d2e93ab
commit
60463d1817
6 changed files with 103 additions and 128 deletions
|
|
@ -1384,16 +1384,17 @@ impl Node {
|
|||
// Collect redirect peers from responses in case we need them later
|
||||
let mut redirect_peers: Vec<crate::types::PeerWithAddress> = Vec::new();
|
||||
|
||||
// 2. Try existing upstream (if we previously fetched this blob)
|
||||
let upstream = {
|
||||
// 2. Try known holders (up to 5 most-recent peers we've interacted
|
||||
// with about this file).
|
||||
let known_holders = {
|
||||
let storage = self.storage.get().await;
|
||||
storage.get_blob_upstream(cid)?
|
||||
storage.get_file_holders(cid).unwrap_or_default()
|
||||
};
|
||||
if let Some((upstream_nid, _upstream_addrs)) = upstream {
|
||||
match self.fetch_blob_from_peer(cid, &upstream_nid, post_id, author, mime_type, created_at).await {
|
||||
for (holder_nid, _addrs) in &known_holders {
|
||||
match self.fetch_blob_from_peer(cid, holder_nid, post_id, author, mime_type, created_at).await {
|
||||
Ok(Some(data)) => return Ok(Some(data)),
|
||||
Ok(None) => {}
|
||||
Err(e) => warn!(error = %e, "blob fetch from upstream failed"),
|
||||
Err(e) => warn!(error = %e, "blob fetch from known holder failed"),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1992,14 +1993,13 @@ impl Node {
|
|||
signature,
|
||||
};
|
||||
|
||||
// Collect blob CIDs + CDN peers before cleanup
|
||||
// Collect blob CIDs + known holders before cleanup (for delete notices)
|
||||
let blob_cdn_info: Vec<([u8; 32], Vec<(NodeId, Vec<String>)>, Option<(NodeId, Vec<String>)>)> = {
|
||||
let storage = self.storage.get().await;
|
||||
let cids = storage.get_blobs_for_post(post_id).unwrap_or_default();
|
||||
cids.into_iter().map(|cid| {
|
||||
let downstream = storage.get_blob_downstream(&cid).unwrap_or_default();
|
||||
let upstream = storage.get_blob_upstream(&cid).ok().flatten();
|
||||
(cid, downstream, upstream)
|
||||
let holders = storage.get_file_holders(&cid).unwrap_or_default();
|
||||
(cid, holders, None::<(NodeId, Vec<String>)>)
|
||||
}).collect()
|
||||
};
|
||||
|
||||
|
|
@ -3119,10 +3119,10 @@ impl Node {
|
|||
&cdn_manifest.author_manifest.author,
|
||||
cdn_manifest.author_manifest.updated_at,
|
||||
);
|
||||
// Relay to our downstream
|
||||
let downstream = s.get_blob_downstream(cid).unwrap_or_default();
|
||||
// Relay to known holders (flat set)
|
||||
let holders = s.get_file_holders(cid).unwrap_or_default();
|
||||
drop(s);
|
||||
if !downstream.is_empty() {
|
||||
if !holders.is_empty() {
|
||||
network.push_manifest_to_downstream(cid, &cdn_manifest).await;
|
||||
}
|
||||
tracing::debug!(
|
||||
|
|
@ -3286,18 +3286,16 @@ impl Node {
|
|||
compute_blob_priority_standalone(candidate, &self.node_id, follows, audience_members, now_ms)
|
||||
}
|
||||
|
||||
/// Delete a blob with CDN notifications to upstream/downstream.
|
||||
/// Delete a blob with CDN notifications to known holders.
|
||||
pub async fn delete_blob_with_cdn_notify(&self, cid: &[u8; 32]) -> anyhow::Result<()> {
|
||||
// Gather CDN peers before cleanup
|
||||
let (downstream, upstream) = {
|
||||
// Gather known holders before cleanup
|
||||
let holders = {
|
||||
let storage = self.storage.get().await;
|
||||
let ds = storage.get_blob_downstream(cid).unwrap_or_default();
|
||||
let up = storage.get_blob_upstream(cid).ok().flatten();
|
||||
(ds, up)
|
||||
storage.get_file_holders(cid).unwrap_or_default()
|
||||
};
|
||||
|
||||
// Send CDN delete notices
|
||||
self.network.send_blob_delete_notices(cid, &downstream, upstream.as_ref()).await;
|
||||
// Send CDN delete notices to all holders
|
||||
self.network.send_blob_delete_notices(cid, &holders, None).await;
|
||||
|
||||
// Clean up local storage
|
||||
{
|
||||
|
|
@ -4330,10 +4328,10 @@ impl Node {
|
|||
}
|
||||
};
|
||||
|
||||
// Filter to under-replicated (< 2 downstream)
|
||||
// Filter to under-replicated (< 2 holders)
|
||||
let mut needs_replication = Vec::new();
|
||||
for pid in &recent_ids {
|
||||
match storage.get_post_downstream_count(pid) {
|
||||
match storage.get_file_holder_count(pid) {
|
||||
Ok(count) if count < 2 => {
|
||||
needs_replication.push(*pid);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue