Phase 2d (0.6.1-beta): route manifest + blob ops through file_holders
Switch ALL propagation-decision reads to the flat holder set. push_manifest_to_downstream now targets file_holders instead of blob_downstream. ManifestPush receive-side relay likewise — known holders fan out to up to 5 most-recent peers instead of a directional tree. Blob delete notices: single flat fan-out to file_holders; the legacy upstream_node tree-healing field is emitted as None (wire-stable via serde default) and ignored on receive — the post-0.6 flat model doesn't need sender-role distinction. send_blob_delete_notices keeps its Option<&Upstream> parameter as an unused placeholder for signature stability with the call sites in this commit. Other reads migrated: - blob fetch cascade: step 2 now tries "known holders" (up to 5) instead of a single upstream - manifest refresh: downstream_count reported from file_holder_count - web/http post holder enumeration - Worm search post/blob holder fallback (both connection.rs paths) - DeleteRecord fan-out rewires to file_holders - Under-replication replication check: < 2 holders Storage additions: - get_file_holder_count(file_id) - remove_file_holder(file_id, peer_id) Legacy upstream/downstream writes are still happening from Phase 2b; those + the tables themselves go in 2e. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
3a0d2e93ab
commit
60463d1817
6 changed files with 103 additions and 128 deletions
|
|
@ -2828,13 +2828,9 @@ impl ConnectionManager {
|
|||
if store.get_post_with_visibility(post_id).ok().flatten().is_some() {
|
||||
Some(self.our_node_id)
|
||||
} else {
|
||||
// CDN tree: do any of our downstream hosts have it?
|
||||
let downstream = store.get_post_downstream(post_id).unwrap_or_default();
|
||||
if !downstream.is_empty() {
|
||||
Some(downstream[0])
|
||||
} else {
|
||||
None
|
||||
}
|
||||
// Any known holder of this post?
|
||||
let holders = store.get_file_holders(post_id).unwrap_or_default();
|
||||
holders.first().map(|(nid, _)| *nid)
|
||||
}
|
||||
};
|
||||
post_holder = found;
|
||||
|
|
@ -2848,9 +2844,9 @@ impl ConnectionManager {
|
|||
// Check CDN: do we know who has it via blob post ownership?
|
||||
let store = self.storage.get().await;
|
||||
if let Ok(Some(pid)) = store.get_blob_post_id(blob_id) {
|
||||
let downstream = store.get_post_downstream(&pid).unwrap_or_default();
|
||||
if !downstream.is_empty() {
|
||||
blob_holder = Some(downstream[0]);
|
||||
let holders = store.get_file_holders(&pid).unwrap_or_default();
|
||||
if let Some((nid, _)) = holders.first() {
|
||||
blob_holder = Some(*nid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -4889,7 +4885,7 @@ impl ConnectionManager {
|
|||
let cm = conn_mgr.lock().await;
|
||||
|
||||
// Collect blob CIDs + CDN peers before async work
|
||||
let mut blob_cleanup: Vec<([u8; 32], Vec<(NodeId, Vec<String>)>, Option<(NodeId, Vec<String>)>)> = Vec::new();
|
||||
let mut blob_cleanup: Vec<([u8; 32], Vec<(NodeId, Vec<String>)>)> = Vec::new();
|
||||
{
|
||||
let storage = cm.storage.get().await;
|
||||
for dr in &payload.records {
|
||||
|
|
@ -4897,9 +4893,8 @@ impl ConnectionManager {
|
|||
// Collect blobs for CDN cleanup before deleting
|
||||
let blob_cids = storage.get_blobs_for_post(&dr.post_id).unwrap_or_default();
|
||||
for cid in blob_cids {
|
||||
let downstream = storage.get_blob_downstream(&cid).unwrap_or_default();
|
||||
let upstream = storage.get_blob_upstream(&cid).ok().flatten();
|
||||
blob_cleanup.push((cid, downstream, upstream));
|
||||
let holders = storage.get_file_holders(&cid).unwrap_or_default();
|
||||
blob_cleanup.push((cid, holders));
|
||||
}
|
||||
let _ = storage.store_delete(dr);
|
||||
let _ = storage.apply_delete(dr);
|
||||
|
|
@ -4915,18 +4910,11 @@ impl ConnectionManager {
|
|||
|
||||
// Gather connections for CDN delete notices under lock, then send outside
|
||||
let mut delete_notices: Vec<(iroh::endpoint::Connection, crate::protocol::BlobDeleteNoticePayload)> = Vec::new();
|
||||
for (cid, downstream, upstream) in &blob_cleanup {
|
||||
let upstream_info = upstream.as_ref().map(|(nid, addrs)| PeerWithAddress { n: hex::encode(nid), a: addrs.clone() });
|
||||
let ds_payload = crate::protocol::BlobDeleteNoticePayload { cid: *cid, upstream_node: upstream_info };
|
||||
for (ds_nid, _) in downstream {
|
||||
if let Some(pc) = cm.connections_ref().get(ds_nid) {
|
||||
delete_notices.push((pc.connection.clone(), ds_payload.clone()));
|
||||
}
|
||||
}
|
||||
if let Some((up_nid, _)) = upstream {
|
||||
let up_payload = crate::protocol::BlobDeleteNoticePayload { cid: *cid, upstream_node: None };
|
||||
if let Some(pc) = cm.connections_ref().get(up_nid) {
|
||||
delete_notices.push((pc.connection.clone(), up_payload));
|
||||
for (cid, holders) in &blob_cleanup {
|
||||
let payload = crate::protocol::BlobDeleteNoticePayload { cid: *cid, upstream_node: None };
|
||||
for (peer, _addrs) in holders {
|
||||
if let Some(pc) = cm.connections_ref().get(peer) {
|
||||
delete_notices.push((pc.connection.clone(), payload.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -5106,15 +5094,15 @@ impl ConnectionManager {
|
|||
);
|
||||
stored_entries.push(entry.clone());
|
||||
}
|
||||
// Gather downstream peers for relay before dropping locks
|
||||
// Gather file holders for relay before dropping locks
|
||||
let mut relay_targets: Vec<(NodeId, crate::protocol::ManifestPushPayload)> = Vec::new();
|
||||
for entry in &stored_entries {
|
||||
let downstream = storage.get_blob_downstream(&entry.cid).unwrap_or_default();
|
||||
for (ds_nid, _) in downstream {
|
||||
if ds_nid == remote_node_id {
|
||||
let holders = storage.get_file_holders(&entry.cid).unwrap_or_default();
|
||||
for (peer, _addrs) in holders {
|
||||
if peer == remote_node_id {
|
||||
continue;
|
||||
}
|
||||
relay_targets.push((ds_nid, crate::protocol::ManifestPushPayload {
|
||||
relay_targets.push((peer, crate::protocol::ManifestPushPayload {
|
||||
manifests: vec![entry.clone()],
|
||||
}));
|
||||
}
|
||||
|
|
@ -5315,32 +5303,14 @@ impl ConnectionManager {
|
|||
let storage = cm.storage.get().await;
|
||||
let cid = payload.cid;
|
||||
|
||||
// Check if sender was our upstream for this blob
|
||||
let was_upstream = storage.get_blob_upstream(&cid).ok().flatten()
|
||||
.map(|(nid, _)| nid == remote_node_id)
|
||||
.unwrap_or(false);
|
||||
|
||||
if was_upstream {
|
||||
// Sender was our upstream — clear it
|
||||
let _ = storage.remove_blob_upstream(&cid);
|
||||
|
||||
// If they provided their upstream, store it as our new upstream
|
||||
if let Some(ref new_up) = payload.upstream_node {
|
||||
if let Ok(nid_bytes) = hex::decode(&new_up.n) {
|
||||
if let Ok(nid) = <[u8; 32]>::try_from(nid_bytes.as_slice()) {
|
||||
let _ = storage.store_blob_upstream(&cid, &nid, &new_up.a);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Sender was our downstream — remove them
|
||||
let _ = storage.remove_blob_downstream(&cid, &remote_node_id);
|
||||
}
|
||||
// Flat-holder model: drop the sender as a holder of this file.
|
||||
// The author's DeleteRecord (separate signed message) is what
|
||||
// triggers the actual blob removal for followers.
|
||||
let _ = storage.remove_file_holder(&cid, &remote_node_id);
|
||||
|
||||
info!(
|
||||
peer = hex::encode(remote_node_id),
|
||||
cid = hex::encode(cid),
|
||||
was_upstream,
|
||||
"Received blob delete notice"
|
||||
);
|
||||
}
|
||||
|
|
@ -5745,21 +5715,28 @@ impl ConnectionManager {
|
|||
let storage = storage.get().await;
|
||||
let manifest: Option<crate::types::CdnManifest> = storage.get_cdn_manifest(&payload.cid).ok().flatten().and_then(|json| {
|
||||
if let Ok(am) = serde_json::from_str::<crate::types::AuthorManifest>(&json) {
|
||||
let ds_count = storage.get_blob_downstream_count(&payload.cid).unwrap_or(0);
|
||||
let ds_count = storage.get_file_holder_count(&payload.cid).unwrap_or(0);
|
||||
Some(crate::types::CdnManifest { author_manifest: am, host: our_node_id, host_addresses: vec![], source: our_node_id, source_addresses: vec![], downstream_count: ds_count })
|
||||
} else { serde_json::from_str(&json).ok() }
|
||||
});
|
||||
let (cdn_registered, cdn_redirect_peers) = if !payload.requester_addresses.is_empty() {
|
||||
let ok = storage.add_blob_downstream(&payload.cid, &remote_node_id, &payload.requester_addresses).unwrap_or(false);
|
||||
let prior_count = storage.get_file_holder_count(&payload.cid).unwrap_or(0);
|
||||
let _ = storage.touch_file_holder(
|
||||
&payload.cid,
|
||||
&remote_node_id,
|
||||
&payload.requester_addresses,
|
||||
crate::storage::HolderDirection::Sent,
|
||||
);
|
||||
if ok { (true, vec![]) } else {
|
||||
let downstream = storage.get_blob_downstream(&payload.cid).unwrap_or_default();
|
||||
let redirects: Vec<PeerWithAddress> = downstream.into_iter().map(|(nid, addrs)| PeerWithAddress { n: hex::encode(nid), a: addrs }).collect();
|
||||
// If we already had 5 holders before adding this one, the
|
||||
// requester should consult them too for CDN lookups.
|
||||
if prior_count < 5 {
|
||||
(true, vec![])
|
||||
} else {
|
||||
let holders = storage.get_file_holders(&payload.cid).unwrap_or_default();
|
||||
let redirects: Vec<PeerWithAddress> = holders.into_iter()
|
||||
.filter(|(nid, _)| *nid != remote_node_id)
|
||||
.map(|(nid, addrs)| PeerWithAddress { n: hex::encode(nid), a: addrs })
|
||||
.collect();
|
||||
(false, redirects)
|
||||
}
|
||||
} else { (false, vec![]) };
|
||||
|
|
@ -5786,7 +5763,7 @@ impl ConnectionManager {
|
|||
Some(json) => {
|
||||
let manifest = if let Ok(am) = serde_json::from_str::<crate::types::AuthorManifest>(&json) {
|
||||
if am.updated_at > payload.current_updated_at {
|
||||
let ds_count = store.get_blob_downstream_count(&payload.cid).unwrap_or(0);
|
||||
let ds_count = store.get_file_holder_count(&payload.cid).unwrap_or(0);
|
||||
Some(crate::types::CdnManifest { author_manifest: am, host: our_node_id, host_addresses: vec![], source: our_node_id, source_addresses: vec![], downstream_count: ds_count })
|
||||
} else { None }
|
||||
} else { None };
|
||||
|
|
@ -7758,8 +7735,8 @@ impl ConnectionActor {
|
|||
if s.get_post_with_visibility(post_id).ok().flatten().is_some() {
|
||||
post_holder = Some(ctx.our_node_id);
|
||||
} else {
|
||||
let downstream = s.get_post_downstream(post_id).unwrap_or_default();
|
||||
if !downstream.is_empty() { post_holder = Some(downstream[0]); }
|
||||
let holders = s.get_file_holders(post_id).unwrap_or_default();
|
||||
if let Some((nid, _)) = holders.first() { post_holder = Some(*nid); }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -7769,8 +7746,8 @@ impl ConnectionActor {
|
|||
} else {
|
||||
let s = ctx.storage.get().await;
|
||||
if let Ok(Some(pid)) = s.get_blob_post_id(blob_id) {
|
||||
let downstream = s.get_post_downstream(&pid).unwrap_or_default();
|
||||
if !downstream.is_empty() { blob_holder = Some(downstream[0]); }
|
||||
let holders = s.get_file_holders(&pid).unwrap_or_default();
|
||||
if let Some((nid, _)) = holders.first() { blob_holder = Some(*nid); }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue