Feed pagination, duplicate identity detection, pkarr leak fix, Android SAF

Feed pagination:
- Cursor-based pagination: get_feed_page/get_all_posts_page (20 posts/page)
- Batched engagement queries (3 bulk SQL queries instead of 4 per post)
- IntersectionObserver for infinite scroll (sentinel at midpoint)
- Viewport-based media loading (blobs only load when post enters view)
- Pre-fetch next page immediately after current page renders

Duplicate identity detection:
- Anchor detects when a NodeId is already mesh-connected during initial
  exchange and sets duplicate_active flag in response
- Client skips sync tasks when duplicate detected
- Frontend shows red warning banner

Privacy:
- Fixed pkarr leak: clear_address_lookup() removes default dns.iroh.link
  publishing. Only mDNS (local network) discovery enabled.

Android:
- SAF integration via tauri-plugin-android-fs: exports open native "Save As"
  dialog so users can save to Downloads/Drive/etc.
- Download/export paths use app data dir on Android (writable)
- File picker gated behind desktop cfg (blocking_pick not on Android)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Scott Reimers 2026-04-18 15:35:23 -04:00
parent 5e7eed9638
commit 288b53ffb1
12 changed files with 910 additions and 120 deletions

View file

@ -892,6 +892,158 @@ impl Storage {
Ok(posts)
}
/// Feed: paginated — posts from followed users, cursor-based by timestamp
pub fn get_feed_page(&self, before_ms: Option<u64>, limit: usize) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
let sql = if before_ms.is_some() {
"SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility
FROM posts p INNER JOIN follows f ON p.author = f.node_id
WHERE p.timestamp_ms < ?1
ORDER BY p.timestamp_ms DESC LIMIT ?2"
} else {
"SELECT p.id, p.author, p.content, p.attachments, p.timestamp_ms, p.visibility
FROM posts p INNER JOIN follows f ON p.author = f.node_id
ORDER BY p.timestamp_ms DESC LIMIT ?2"
};
let mut stmt = self.conn.prepare(sql)?;
let rows = if let Some(bms) = before_ms {
stmt.query_map(rusqlite::params![bms as i64, limit as i64], Self::parse_post_row)?
} else {
stmt.query_map(rusqlite::params![i64::MAX, limit as i64], Self::parse_post_row)?
};
Self::collect_posts(rows)
}
/// All posts: paginated — cursor-based by timestamp
pub fn list_posts_page(&self, before_ms: Option<u64>, limit: usize) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
let sql = if before_ms.is_some() {
"SELECT id, author, content, attachments, timestamp_ms, visibility
FROM posts WHERE timestamp_ms < ?1
ORDER BY timestamp_ms DESC LIMIT ?2"
} else {
"SELECT id, author, content, attachments, timestamp_ms, visibility
FROM posts ORDER BY timestamp_ms DESC LIMIT ?2"
};
let mut stmt = self.conn.prepare(sql)?;
let rows = if let Some(bms) = before_ms {
stmt.query_map(rusqlite::params![bms as i64, limit as i64], Self::parse_post_row)?
} else {
stmt.query_map(rusqlite::params![i64::MAX, limit as i64], Self::parse_post_row)?
};
Self::collect_posts(rows)
}
/// Batch: reaction counts for multiple posts at once
pub fn get_reaction_counts_batch(&self, post_ids: &[PostId], our_node_id: &NodeId) -> anyhow::Result<std::collections::HashMap<PostId, Vec<(String, u64, bool)>>> {
use std::collections::HashMap;
let mut result: HashMap<PostId, Vec<(String, u64, bool)>> = HashMap::new();
if post_ids.is_empty() { return Ok(result); }
let placeholders: String = (0..post_ids.len()).map(|i| format!("?{}", i + 1)).collect::<Vec<_>>().join(",");
let sql = format!(
"SELECT post_id, emoji, COUNT(*) as cnt, SUM(CASE WHEN reactor = ?{} THEN 1 ELSE 0 END) as my_count
FROM reactions WHERE post_id IN ({}) AND deleted_at IS NULL
GROUP BY post_id, emoji ORDER BY cnt DESC",
post_ids.len() + 1, placeholders
);
let mut stmt = self.conn.prepare(&sql)?;
let mut params: Vec<Box<dyn rusqlite::types::ToSql>> = post_ids.iter().map(|id| Box::new(id.to_vec()) as Box<dyn rusqlite::types::ToSql>).collect();
params.push(Box::new(our_node_id.to_vec()));
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect();
let rows = stmt.query_map(param_refs.as_slice(), |row| {
let pid: Vec<u8> = row.get(0)?;
let emoji: String = row.get(1)?;
let count: i64 = row.get(2)?;
let my_count: i64 = row.get(3)?;
Ok((pid, emoji, count as u64, my_count > 0))
})?;
for row in rows {
let (pid, emoji, count, reacted_by_me) = row?;
if let Ok(id) = blob_to_postid(pid) {
result.entry(id).or_default().push((emoji, count, reacted_by_me));
}
}
Ok(result)
}
/// Batch: comment counts for multiple posts at once
pub fn get_comment_counts_batch(&self, post_ids: &[PostId]) -> anyhow::Result<std::collections::HashMap<PostId, u64>> {
use std::collections::HashMap;
let mut result: HashMap<PostId, u64> = HashMap::new();
if post_ids.is_empty() { return Ok(result); }
let placeholders: String = (0..post_ids.len()).map(|i| format!("?{}", i + 1)).collect::<Vec<_>>().join(",");
let sql = format!(
"SELECT post_id, COUNT(*) FROM comments WHERE post_id IN ({}) AND deleted_at IS NULL GROUP BY post_id",
placeholders
);
let mut stmt = self.conn.prepare(&sql)?;
let params: Vec<Box<dyn rusqlite::types::ToSql>> = post_ids.iter().map(|id| Box::new(id.to_vec()) as Box<dyn rusqlite::types::ToSql>).collect();
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect();
let rows = stmt.query_map(param_refs.as_slice(), |row| {
let pid: Vec<u8> = row.get(0)?;
let count: i64 = row.get(1)?;
Ok((pid, count as u64))
})?;
for row in rows {
let (pid, count) = row?;
if let Ok(id) = blob_to_postid(pid) {
result.insert(id, count);
}
}
Ok(result)
}
/// Batch: visibility intents for multiple posts at once
pub fn get_post_intents_batch(&self, post_ids: &[PostId]) -> anyhow::Result<std::collections::HashMap<PostId, String>> {
use std::collections::HashMap;
let mut result: HashMap<PostId, String> = HashMap::new();
if post_ids.is_empty() { return Ok(result); }
let placeholders: String = (0..post_ids.len()).map(|i| format!("?{}", i + 1)).collect::<Vec<_>>().join(",");
let sql = format!(
"SELECT id, visibility_intent FROM posts WHERE id IN ({})",
placeholders
);
let mut stmt = self.conn.prepare(&sql)?;
let params: Vec<Box<dyn rusqlite::types::ToSql>> = post_ids.iter().map(|id| Box::new(id.to_vec()) as Box<dyn rusqlite::types::ToSql>).collect();
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect();
let rows = stmt.query_map(param_refs.as_slice(), |row| {
let pid: Vec<u8> = row.get(0)?;
let intent: Option<String> = row.get(1)?;
Ok((pid, intent.unwrap_or_default()))
})?;
for row in rows {
let (pid, intent) = row?;
if let Ok(id) = blob_to_postid(pid) {
result.insert(id, intent);
}
}
Ok(result)
}
/// Helper: parse a post row from a query
fn parse_post_row(row: &rusqlite::Row<'_>) -> rusqlite::Result<(Vec<u8>, Vec<u8>, String, String, i64, String)> {
Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?, row.get(4)?, row.get(5)?))
}
/// Helper: collect parsed post rows into typed results
fn collect_posts(rows: rusqlite::MappedRows<'_, impl FnMut(&rusqlite::Row<'_>) -> rusqlite::Result<(Vec<u8>, Vec<u8>, String, String, i64, String)>>) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
let mut posts = Vec::new();
for row in rows {
let (id_bytes, author_bytes, content, attachments_json, timestamp_ms, vis_json) = row?;
let attachments: Vec<Attachment> = serde_json::from_str(&attachments_json).unwrap_or_default();
let visibility: PostVisibility = serde_json::from_str(&vis_json).unwrap_or_default();
posts.push((
blob_to_postid(id_bytes)?,
Post {
author: blob_to_nodeid(author_bytes)?,
content,
attachments,
timestamp_ms: timestamp_ms as u64,
},
visibility,
));
}
Ok(posts)
}
/// All posts with visibility (for sync protocol)
pub fn list_posts_with_visibility(&self) -> anyhow::Result<Vec<(PostId, Post, PostVisibility)>> {
self.list_posts_reverse_chron()