Feed pagination, duplicate identity detection, pkarr leak fix, Android SAF
Feed pagination: - Cursor-based pagination: get_feed_page/get_all_posts_page (20 posts/page) - Batched engagement queries (3 bulk SQL queries instead of 4 per post) - IntersectionObserver for infinite scroll (sentinel at midpoint) - Viewport-based media loading (blobs only load when post enters view) - Pre-fetch next page immediately after current page renders Duplicate identity detection: - Anchor detects when a NodeId is already mesh-connected during initial exchange and sets duplicate_active flag in response - Client skips sync tasks when duplicate detected - Frontend shows red warning banner Privacy: - Fixed pkarr leak: clear_address_lookup() removes default dns.iroh.link publishing. Only mDNS (local network) discovery enabled. Android: - SAF integration via tauri-plugin-android-fs: exports open native "Save As" dialog so users can save to Downloads/Drive/etc. - Download/export paths use app data dir on Android (writable) - File picker gated behind desktop cfg (blocking_pick not on Android) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
5e7eed9638
commit
288b53ffb1
12 changed files with 910 additions and 120 deletions
|
|
@ -74,7 +74,7 @@ pub type IntroId = [u8; 16];
|
|||
|
||||
/// Result of initial exchange: accepted or refused with optional redirect peer.
|
||||
pub enum ExchangeResult {
|
||||
Accepted,
|
||||
Accepted { duplicate_active: bool },
|
||||
Refused { redirect: Option<PeerWithAddress> },
|
||||
}
|
||||
|
||||
|
|
@ -1519,6 +1519,7 @@ impl ConnectionManager {
|
|||
http_addr: self.http_addr.clone(),
|
||||
device_role: None,
|
||||
cache_pressure: None,
|
||||
duplicate_active: None,
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -1658,6 +1659,7 @@ impl ConnectionManager {
|
|||
http_addr: self.http_addr.clone(),
|
||||
device_role: None,
|
||||
cache_pressure: None,
|
||||
duplicate_active: None,
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -3937,7 +3939,7 @@ impl ConnectionManager {
|
|||
let session_conn = session.connection.clone();
|
||||
drop(cm); // release lock before async work
|
||||
match initial_exchange_connect(&storage_clone, &our_node_id, &session_conn, requester, None, our_nat_type, our_http_capable, our_http_addr.clone(), None, None).await {
|
||||
Ok(ExchangeResult::Accepted) => {
|
||||
Ok(ExchangeResult::Accepted { .. }) => {
|
||||
tracing::info!(peer = hex::encode(requester), "Target-side: initial exchange after hole punch");
|
||||
}
|
||||
Ok(ExchangeResult::Refused { .. }) => {
|
||||
|
|
@ -5474,11 +5476,13 @@ impl ConnectionManager {
|
|||
ConnectionManager::handle_pull_request_unlocked(&storage, our_node_id, remote_node_id, recv, send).await?;
|
||||
}
|
||||
MessageType::InitialExchange => {
|
||||
let (storage, our_node_id, anchor_addr, our_nat_type, our_http_capable, our_http_addr) = {
|
||||
let (storage, our_node_id, anchor_addr, our_nat_type, our_http_capable, our_http_addr, is_duplicate) = {
|
||||
let cm = conn_mgr.lock().await;
|
||||
(cm.storage_ref(), *cm.our_node_id(), cm.build_anchor_advertised_addr(), cm.nat_type(), cm.http_capable, cm.http_addr.clone())
|
||||
// Duplicate identity detection: is this NodeId already mesh-connected?
|
||||
let dup = cm.connections.contains_key(&remote_node_id);
|
||||
(cm.storage_ref(), *cm.our_node_id(), cm.build_anchor_advertised_addr(), cm.nat_type(), cm.http_capable, cm.http_addr.clone(), dup)
|
||||
};
|
||||
initial_exchange_accept(&storage, &our_node_id, send, recv, remote_node_id, anchor_addr, None, our_nat_type, our_http_capable, our_http_addr, None, None)
|
||||
initial_exchange_accept(&storage, &our_node_id, send, recv, remote_node_id, anchor_addr, None, our_nat_type, our_http_capable, our_http_addr, None, None, is_duplicate)
|
||||
.await?;
|
||||
}
|
||||
MessageType::AddressRequest => {
|
||||
|
|
@ -8331,6 +8335,7 @@ pub async fn initial_exchange_connect(
|
|||
http_addr: our_http_addr,
|
||||
device_role: our_device_role.map(|r| r.as_str().to_string()),
|
||||
cache_pressure: our_cache_pressure,
|
||||
duplicate_active: None,
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -8349,8 +8354,12 @@ pub async fn initial_exchange_connect(
|
|||
anyhow::bail!("expected InitialExchange, got {:?}", msg_type);
|
||||
}
|
||||
let their_payload: InitialExchangePayload = read_payload(&mut recv, MAX_PAYLOAD).await?;
|
||||
let dup = their_payload.duplicate_active.unwrap_or(false);
|
||||
if dup {
|
||||
tracing::warn!(peer = hex::encode(remote_node_id), "Anchor reports duplicate identity active on network");
|
||||
}
|
||||
process_exchange_payload(storage, our_node_id, &remote_node_id, &their_payload).await?;
|
||||
Ok(ExchangeResult::Accepted)
|
||||
Ok(ExchangeResult::Accepted { duplicate_active: dup })
|
||||
};
|
||||
|
||||
match tokio::time::timeout(std::time::Duration::from_secs(10), exchange_fut).await {
|
||||
|
|
@ -8377,6 +8386,7 @@ pub async fn initial_exchange_accept(
|
|||
our_http_addr: Option<String>,
|
||||
our_device_role: Option<crate::types::DeviceRole>,
|
||||
our_cache_pressure: Option<u8>,
|
||||
duplicate_detected: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let their_payload: InitialExchangePayload = read_payload(&mut recv, MAX_PAYLOAD).await?;
|
||||
|
||||
|
|
@ -8404,9 +8414,14 @@ pub async fn initial_exchange_accept(
|
|||
http_addr: our_http_addr,
|
||||
device_role: our_device_role.map(|r| r.as_str().to_string()),
|
||||
cache_pressure: our_cache_pressure,
|
||||
duplicate_active: if duplicate_detected { Some(true) } else { None },
|
||||
}
|
||||
};
|
||||
|
||||
if duplicate_detected {
|
||||
tracing::warn!(peer = hex::encode(remote_node_id), "Duplicate identity detected — notifying connecting node");
|
||||
}
|
||||
|
||||
write_typed_message(&mut send, MessageType::InitialExchange, &our_payload).await?;
|
||||
send.finish()?;
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue