Phase 2d (0.6.1-beta): route manifest + blob ops through file_holders
Switch ALL propagation-decision reads to the flat holder set. push_manifest_to_downstream now targets file_holders instead of blob_downstream. ManifestPush receive-side relay likewise — known holders fan out to up to 5 most-recent peers instead of a directional tree. Blob delete notices: single flat fan-out to file_holders; the legacy upstream_node tree-healing field is emitted as None (wire-stable via serde default) and ignored on receive — the post-0.6 flat model doesn't need sender-role distinction. send_blob_delete_notices keeps its Option<&Upstream> parameter as an unused placeholder for signature stability with the call sites in this commit. Other reads migrated: - blob fetch cascade: step 2 now tries "known holders" (up to 5) instead of a single upstream - manifest refresh: downstream_count reported from file_holder_count - web/http post holder enumeration - Worm search post/blob holder fallback (both connection.rs paths) - DeleteRecord fan-out rewires to file_holders - Under-replication replication check: < 2 holders Storage additions: - get_file_holder_count(file_id) - remove_file_holder(file_id, peer_id) Legacy upstream/downstream writes are still happening from Phase 2b; those + the tables themselves go in 2e. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
3a0d2e93ab
commit
60463d1817
6 changed files with 103 additions and 128 deletions
|
|
@ -1015,15 +1015,16 @@ impl Network {
|
|||
sent
|
||||
}
|
||||
|
||||
/// Push updated manifests to all downstream peers for a given CID.
|
||||
/// Push an updated manifest to all known holders of the file (flat set,
|
||||
/// up to 5 most-recent). Replaces the legacy downstream-tree push.
|
||||
pub async fn push_manifest_to_downstream(
|
||||
&self,
|
||||
cid: &[u8; 32],
|
||||
manifest: &crate::types::CdnManifest,
|
||||
) -> usize {
|
||||
let downstream = {
|
||||
let holders = {
|
||||
let storage = self.storage.get().await;
|
||||
storage.get_blob_downstream(cid).unwrap_or_default()
|
||||
storage.get_file_holders(cid).unwrap_or_default()
|
||||
};
|
||||
let payload = crate::protocol::ManifestPushPayload {
|
||||
manifests: vec![crate::protocol::ManifestPushEntry {
|
||||
|
|
@ -1032,15 +1033,14 @@ impl Network {
|
|||
}],
|
||||
};
|
||||
let mut sent = 0;
|
||||
for (ds_nid, ds_addrs) in &downstream {
|
||||
if self.send_to_peer_uni(ds_nid, MessageType::ManifestPush, &payload).await.is_ok() {
|
||||
for (peer, peer_addrs) in &holders {
|
||||
if self.send_to_peer_uni(peer, MessageType::ManifestPush, &payload).await.is_ok() {
|
||||
sent += 1;
|
||||
// We pushed this file's manifest → downstream peer now holds it.
|
||||
let storage = self.storage.get().await;
|
||||
let _ = storage.touch_file_holder(
|
||||
cid,
|
||||
ds_nid,
|
||||
ds_addrs,
|
||||
peer,
|
||||
peer_addrs,
|
||||
crate::storage::HolderDirection::Sent,
|
||||
);
|
||||
}
|
||||
|
|
@ -1048,46 +1048,25 @@ impl Network {
|
|||
sent
|
||||
}
|
||||
|
||||
/// Send blob delete notices to downstream and upstream peers.
|
||||
/// Downstream peers receive our upstream info for tree healing.
|
||||
/// Upstream peers receive no upstream info (just "remove me as downstream").
|
||||
/// Send blob delete notices to all known holders of a file.
|
||||
/// Second argument kept as Option for signature stability; flat-holder
|
||||
/// model doesn't need separate upstream handling.
|
||||
pub async fn send_blob_delete_notices(
|
||||
&self,
|
||||
cid: &[u8; 32],
|
||||
downstream: &[(NodeId, Vec<String>)],
|
||||
upstream: Option<&(NodeId, Vec<String>)>,
|
||||
holders: &[(NodeId, Vec<String>)],
|
||||
_legacy_upstream: Option<&(NodeId, Vec<String>)>,
|
||||
) -> usize {
|
||||
let upstream_info = upstream.map(|(nid, addrs)| {
|
||||
crate::types::PeerWithAddress {
|
||||
n: hex::encode(nid),
|
||||
a: addrs.clone(),
|
||||
}
|
||||
});
|
||||
|
||||
let mut sent = 0;
|
||||
|
||||
// Notify downstream (with upstream info for tree healing)
|
||||
let ds_payload = crate::protocol::BlobDeleteNoticePayload {
|
||||
let payload = crate::protocol::BlobDeleteNoticePayload {
|
||||
cid: *cid,
|
||||
upstream_node: upstream_info,
|
||||
upstream_node: None,
|
||||
};
|
||||
for (ds_nid, _) in downstream {
|
||||
if self.send_to_peer_uni(ds_nid, MessageType::BlobDeleteNotice, &ds_payload).await.is_ok() {
|
||||
let mut sent = 0;
|
||||
for (peer, _addrs) in holders {
|
||||
if self.send_to_peer_uni(peer, MessageType::BlobDeleteNotice, &payload).await.is_ok() {
|
||||
sent += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Notify upstream (no upstream info)
|
||||
if let Some((up_nid, _)) = upstream {
|
||||
let up_payload = crate::protocol::BlobDeleteNoticePayload {
|
||||
cid: *cid,
|
||||
upstream_node: None,
|
||||
};
|
||||
if self.send_to_peer_uni(up_nid, MessageType::BlobDeleteNotice, &up_payload).await.is_ok() {
|
||||
sent += 1;
|
||||
}
|
||||
}
|
||||
|
||||
sent
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue