Phase 2e (0.6.1-beta): drop legacy upstream/downstream tables
The file_holders table is now the only tracker of per-file peer relationships. post_upstream, post_downstream, blob_upstream, and blob_downstream are dropped at first launch after the seed migration copies any existing entries. Schema: - DROP TABLE IF EXISTS on all four legacy tables after seeding - Seed migration guards with sqlite_master table_exists check so fresh installs don't crash trying to read non-existent sources - Remove CREATE TABLE statements for the four tables from init - Remove Protocol v4 Phase 6 post_upstream priority migration (dead) - Remove blob_upstream preferred_tree column migration (dead) Rust: - Remove add/get/remove post_upstream, post_downstream, blob_upstream, blob_downstream methods - Remove get_blob_upstream_preferred_tree / update variant - Rewrite get_eviction_candidates's downstream_count subquery to count file_holders entries - Rewrite apply_delete's cascade cleanup to clear file_holders instead of post_upstream/post_downstream - cleanup_cdn_for_blob now clears file_holders for the CID Callers: - All dual-write sites in connection.rs and node.rs now do touch_file_holder only (legacy writes removed) - get_stale_manifests replaced with get_stale_manifest_cids; caller in node.rs picks a refresh source from file_holders Tests: - Remove blob_upstream_crud, blob_downstream_crud_and_limit, blob_upstream_preferred_tree, remove_blob_upstream, post_downstream_crud - Add file_holders_lru_cap and file_holders_direction_promotion tests All 110 core tests passing. Workspace compiles clean. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
60463d1817
commit
5d9ba22427
3 changed files with 112 additions and 504 deletions
|
|
@ -1393,8 +1393,6 @@ impl ConnectionManager {
|
|||
{
|
||||
let s = storage.get().await;
|
||||
for pid in &new_post_ids {
|
||||
let prio = s.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0);
|
||||
let _ = s.add_post_upstream(pid, peer_id, prio);
|
||||
let _ = s.touch_file_holder(
|
||||
pid,
|
||||
peer_id,
|
||||
|
|
@ -1946,8 +1944,6 @@ impl ConnectionManager {
|
|||
{
|
||||
let storage = self.storage.get().await;
|
||||
for pid in &new_post_ids {
|
||||
let prio = storage.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0);
|
||||
let _ = storage.add_post_upstream(pid, from, prio);
|
||||
let _ = storage.touch_file_holder(
|
||||
pid,
|
||||
from,
|
||||
|
|
@ -2046,8 +2042,6 @@ impl ConnectionManager {
|
|||
{
|
||||
let storage = self.storage.get().await;
|
||||
for pid in &new_post_ids {
|
||||
let prio = storage.get_post_upstreams(pid).map(|v| v.len() as u8).unwrap_or(0);
|
||||
let _ = storage.add_post_upstream(pid, peer_id, prio);
|
||||
let _ = storage.touch_file_holder(
|
||||
pid,
|
||||
peer_id,
|
||||
|
|
@ -4984,8 +4978,6 @@ impl ConnectionManager {
|
|||
&push.post.post,
|
||||
&push.post.visibility,
|
||||
);
|
||||
let prio = storage.get_post_upstreams(&push.post.id).map(|v| v.len() as u8).unwrap_or(0);
|
||||
let _ = storage.add_post_upstream(&push.post.id, &remote_node_id, prio);
|
||||
let _ = storage.touch_file_holder(
|
||||
&push.post.id,
|
||||
&remote_node_id,
|
||||
|
|
@ -5205,8 +5197,6 @@ impl ConnectionManager {
|
|||
let cm = cm_arc.lock().await;
|
||||
let storage = cm.storage.get().await;
|
||||
if storage.store_post_with_visibility(&sync_post.id, &sync_post.post, &sync_post.visibility).unwrap_or(false) {
|
||||
let prio = storage.get_post_upstreams(&sync_post.id).map(|v| v.len() as u8).unwrap_or(0);
|
||||
let _ = storage.add_post_upstream(&sync_post.id, &sender_id, prio);
|
||||
let _ = storage.touch_file_holder(
|
||||
&sync_post.id,
|
||||
&sender_id,
|
||||
|
|
@ -5454,7 +5444,6 @@ impl ConnectionManager {
|
|||
let payload: PostDownstreamRegisterPayload = read_payload(recv, MAX_PAYLOAD).await?;
|
||||
let cm = conn_mgr.lock().await;
|
||||
let storage = cm.storage.get().await;
|
||||
let _ = storage.add_post_downstream(&payload.post_id, &remote_node_id);
|
||||
let _ = storage.touch_file_holder(
|
||||
&payload.post_id,
|
||||
&remote_node_id,
|
||||
|
|
@ -6108,9 +6097,8 @@ impl ConnectionManager {
|
|||
to_pull.push(*pid);
|
||||
}
|
||||
|
||||
// Register as downstream for all accepted posts
|
||||
// Register as holder for all accepted posts
|
||||
for pid in &acc {
|
||||
let _ = storage.add_post_downstream(pid, &remote_node_id);
|
||||
let _ = storage.touch_file_holder(
|
||||
pid,
|
||||
&remote_node_id,
|
||||
|
|
@ -6167,8 +6155,6 @@ impl ConnectionManager {
|
|||
let cm = cm_arc.lock().await;
|
||||
let storage = cm.storage.get().await;
|
||||
let _ = storage.store_post_with_visibility(&sp.id, &sp.post, &sp.visibility);
|
||||
let prio = storage.get_post_upstreams(&sp.id).map(|v| v.len() as u8).unwrap_or(0);
|
||||
let _ = storage.add_post_upstream(&sp.id, &sender, prio);
|
||||
let _ = storage.touch_file_holder(
|
||||
&sp.id,
|
||||
&sender,
|
||||
|
|
@ -6201,7 +6187,6 @@ impl ConnectionManager {
|
|||
let cm = cm_arc.lock().await;
|
||||
let storage = cm.storage.get().await;
|
||||
let _ = storage.record_blob(&att.cid, post_id, &post_author, data.len() as u64, &att.mime_type, att.size_bytes);
|
||||
let _ = storage.add_post_upstream(&att.cid, &sender, 0);
|
||||
let _ = storage.touch_file_holder(
|
||||
&att.cid,
|
||||
&sender,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue