Design doc audit: update badges, fix outdated descriptions, add CDN/replication docs
Badge updates: - BlobHeader: Planned → Complete (has receipt/comment slots, reactions, policy) - LAN Discovery: Planned → Complete (iroh mDNS integration) - UPnP TCP: Planned → Complete (both UDP+TCP renewal cycles) - HTTP Post Delivery: added Complete badge Description fixes: - Share links: removed hostlist encoding, added tiered serving (redirect → punch → proxy) - Eviction formula: added share_boost factor (+100 for 3+ downstream) - Message types table: added ReplicationRequest/Response (0xE1/0xE2), count 41 → 49 - Engagement: added tombstone propagation description New sections: - Device roles & bandwidth budgets (Intermittent/Available/Persistent) - Active CDN replication (10-min cycle, target prioritization, graceful degradation) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
075366e876
commit
3cc39590a7
4 changed files with 94 additions and 42 deletions
|
|
@ -9,6 +9,8 @@
|
|||
"notification:allow-request-permission",
|
||||
"notification:allow-notify",
|
||||
"notification:allow-check-permissions",
|
||||
"notification:allow-show"
|
||||
"notification:allow-show",
|
||||
"notification:allow-cancel",
|
||||
"notification:allow-remove-active"
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
{"default":{"identifier":"default","description":"Default capability for the main window","local":true,"windows":["main"],"permissions":["core:default","notification:default","notification:allow-is-permission-granted","notification:allow-request-permission","notification:allow-notify","notification:allow-check-permissions","notification:allow-show"]}}
|
||||
{"default":{"identifier":"default","description":"Default capability for the main window","local":true,"windows":["main"],"permissions":["core:default","notification:default","notification:allow-is-permission-granted","notification:allow-request-permission","notification:allow-notify","notification:allow-check-permissions","notification:allow-show","notification:allow-cancel","notification:allow-remove-active"]}}
|
||||
|
|
@ -357,6 +357,8 @@ function toast(msg) {
|
|||
|
||||
// --- Notifications (Tauri plugin) ---
|
||||
let _notifReady = false;
|
||||
let _activeNotificationIds = new Set();
|
||||
|
||||
async function maybeNotify(title, body, tag) {
|
||||
try {
|
||||
if (window.__TAURI__?.notification) {
|
||||
|
|
@ -367,16 +369,42 @@ async function maybeNotify(title, body, tag) {
|
|||
granted = perm === 'granted';
|
||||
}
|
||||
if (granted) {
|
||||
sendNotification({ title, body, channelId: 'default' });
|
||||
sendNotification({ title, body, channelId: 'default', id: tag ? hashCode(tag) : undefined });
|
||||
if (tag) _activeNotificationIds.add(tag);
|
||||
}
|
||||
} else if ('Notification' in window) {
|
||||
// Fallback for browsers
|
||||
if (Notification.permission === 'default') await Notification.requestPermission();
|
||||
if (Notification.permission === 'granted') new Notification(title, { body, tag, silent: false });
|
||||
}
|
||||
} catch (_) {}
|
||||
}
|
||||
|
||||
async function clearNotifications(tagPrefix) {
|
||||
try {
|
||||
if (window.__TAURI__?.notification) {
|
||||
const { removeActive } = window.__TAURI__.notification;
|
||||
if (!removeActive) return;
|
||||
const toRemove = [..._activeNotificationIds].filter(t => t.startsWith(tagPrefix));
|
||||
for (const tag of toRemove) {
|
||||
try {
|
||||
await removeActive({ notifications: [{ id: hashCode(tag) }] });
|
||||
} catch (_) {}
|
||||
_activeNotificationIds.delete(tag);
|
||||
}
|
||||
}
|
||||
} catch (_) {}
|
||||
}
|
||||
|
||||
function hashCode(str) {
|
||||
let hash = 0;
|
||||
for (let i = 0; i < str.length; i++) {
|
||||
const ch = str.charCodeAt(i);
|
||||
hash = ((hash << 5) - hash) + ch;
|
||||
hash |= 0;
|
||||
}
|
||||
return Math.abs(hash);
|
||||
}
|
||||
|
||||
// --- Popover helpers ---
|
||||
let popoverOnClose = null;
|
||||
function openPopover(title, html, opts = {}) {
|
||||
|
|
@ -897,8 +925,9 @@ async function loadMessages(force) {
|
|||
const input = $('#popover-reply-input');
|
||||
if (input) setTimeout(() => input.focus(), 100);
|
||||
|
||||
// Mark conversation as read (DB-backed)
|
||||
// Mark conversation as read (DB-backed) and clear notifications
|
||||
invoke('mark_conversation_read', { partnerId }).catch(() => {});
|
||||
clearNotifications(`msg-`);
|
||||
|
||||
// Mark incoming encrypted messages as "seen"
|
||||
for (const p of threadPosts) {
|
||||
|
|
@ -952,6 +981,7 @@ async function loadMessages(force) {
|
|||
onClose() {
|
||||
// Mark conversation as read when closing the popover
|
||||
invoke('mark_conversation_read', { partnerId }).catch(() => {});
|
||||
clearNotifications(`msg-`);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
|
@ -2730,6 +2760,7 @@ document.querySelectorAll('.tab').forEach(tab => {
|
|||
if (target === 'messages') {
|
||||
if (!conversationsList.children.length) conversationsList.innerHTML = renderLoading();
|
||||
loadMessages(true); loadDmRecipientOptions();
|
||||
clearNotifications('msg-');
|
||||
}
|
||||
if (target === 'settings') { loadRedundancy(); loadPublicVisible(); loadCacheSizeSetting(); }
|
||||
});
|
||||
|
|
|
|||
|
|
@ -653,9 +653,9 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false }</code></pre
|
|||
<!-- 12. LAN Discovery -->
|
||||
<section id="lan">
|
||||
<h2>12. LAN Discovery</h2>
|
||||
<h3>Status: <span class="badge badge-planned">Planned</span></h3>
|
||||
<h3>Status: <span class="badge badge-complete">Complete</span></h3>
|
||||
|
||||
<p>iroh's mDNS address lookup broadcasts peer presence on the local network via multicast DNS (service name <code>"irohv1"</code>, backed by the <code>swarm-discovery</code> crate). Currently this is configured as a passive address resolver — if we already know a peer's NodeId, mDNS can resolve its LAN address. But mDNS also <strong>discovers</strong> unknown peers on the same network, and iroh exposes this via <code>MdnsAddressLookup::subscribe()</code>.</p>
|
||||
<p>mDNS-based LAN discovery is integrated via iroh's built-in <code>MdnsAddressLookupBuilder</code>. It works automatically — peers on the same local network are discovered and connected without manual configuration. iroh's mDNS address lookup broadcasts peer presence on the local network via multicast DNS (service name <code>"irohv1"</code>, backed by the <code>swarm-discovery</code> crate).</p>
|
||||
|
||||
<h3>Discovery flow</h3>
|
||||
<ol style="padding-left: 1.25rem; margin: 0.5rem 0; color: var(--text-muted);">
|
||||
|
|
@ -885,17 +885,18 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false }</code></pre
|
|||
<h3>Blob content immutability</h3>
|
||||
<p>Blob data is BLAKE3-addressed — the CID <em>is</em> the hash of the content. This means blob content is <strong>immutable by definition</strong>. Any mutable metadata (neighborhood, host lists, signatures) MUST be stored separately in a <strong>BlobHeader</strong>. Inline mutable headers are architecturally incompatible with content addressing.</p>
|
||||
|
||||
<h3>BlobHeader <span class="badge badge-planned">Planned</span></h3>
|
||||
<p>Formal mutable structure replacing/extending CdnManifest. Stored and transmitted separately from blob data.</p>
|
||||
<h3>BlobHeader <span class="badge badge-complete">Complete</span></h3>
|
||||
<p>Mutable structure stored and transmitted separately from blob data. Carries engagement state, CDN metadata, and encrypted slots for private posts.</p>
|
||||
<pre><code>BlobHeader {
|
||||
cid, // BLAKE3 hash of blob content
|
||||
author_nplus10, // Author's N+10 (NodeId + 10 preferred peers)
|
||||
author_recent_posts, // 25 previous + 25 following PostIds (neighborhood)
|
||||
upstream_nplus10, // Upstream file source's N+10 (if not author)
|
||||
downstream_hosts, // Up to min(100, floor(170MB / blob_size)) downstream hosts
|
||||
author_signature, // ed25519 signature over author fields
|
||||
host_signature, // ed25519 signature by current host
|
||||
post_id, // PostId this header belongs to
|
||||
author, // Author NodeId
|
||||
reactions, // Vec of public reactions (emoji + reactor NodeId + timestamp)
|
||||
comments, // Vec of comments (text + author + timestamp + signature)
|
||||
policy, // Author-controlled comment/react policy
|
||||
updated_at, // Timestamp of last header update
|
||||
thread_splits, // Linked thread posts when comments exceed 16KB
|
||||
receipt_slots, // Encrypted delivery/read/react receipt slots (private posts)
|
||||
comment_slots, // Encrypted comment slots (private posts)
|
||||
}</code></pre>
|
||||
<ul style="padding-left: 1.25rem; margin: 0.5rem 0; color: var(--text-muted);">
|
||||
<li><strong>Post neighborhood</strong>: 25 previous + 25 following PostIds. Forward slots are empty at publish time and populate via <code>BlobHeaderDiff</code> propagation as the author continues posting. Empty forward slots are not an error condition.</li>
|
||||
|
|
@ -927,7 +928,7 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false }</code></pre
|
|||
</ul>
|
||||
|
||||
<h3>Blob eviction <span class="badge badge-complete">Complete</span></h3>
|
||||
<pre><code>priority = pin_boost + (relationship * heart_recency * freshness / (peer_copies + 1))</code></pre>
|
||||
<pre><code>priority = pin_boost + share_boost + (relationship * heart_recency * freshness / (peer_copies + 1))</code></pre>
|
||||
<table>
|
||||
<tr><th>Factor</th><th>Calculation</th></tr>
|
||||
<tr><td><code>pin_boost</code></td><td>1000.0 if pinned, else 0.0. Own blobs auto-pinned.</td></tr>
|
||||
|
|
@ -935,6 +936,7 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false }</code></pre
|
|||
<tr><td><code>heart_recency</code></td><td>Linear decay over 30 days: <code>max(0, 1 - age/30d)</code></td></tr>
|
||||
<tr><td><code>freshness</code></td><td><code>1 / (1 + post_age_days)</code></td></tr>
|
||||
<tr><td><code>peer_copies</code></td><td>Known replica count (from <code>post_replicas</code>, only if < 1 hour old)</td></tr>
|
||||
<tr><td><code>share_boost</code></td><td>+100.0 if 3+ downstream peers (shared link with healthy distribution), scaled linearly for 1–2 downstream peers (33.3 per peer). Keeps shared content cached longer.</td></tr>
|
||||
</table>
|
||||
|
||||
<h3>Pin modes <span class="badge badge-planned">Planned</span></h3>
|
||||
|
|
@ -973,7 +975,7 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false }</code></pre
|
|||
</li>
|
||||
</ul>
|
||||
|
||||
<h3>Message types (41 total)</h3>
|
||||
<h3>Message types (49 total)</h3>
|
||||
<table>
|
||||
<tr><th>Hex</th><th>Name</th><th>Stream</th><th>Purpose</th></tr>
|
||||
<tr><td><code>0x01</code></td><td>NodeListUpdate</td><td>Uni</td><td>Incremental N1/N2 diff broadcast</td></tr>
|
||||
|
|
@ -1023,6 +1025,8 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false }</code></pre
|
|||
<tr><td><code>0xD6</code></td><td>TcpPunchRequest</td><td>Bi</td><td>Ask holder to punch TCP toward browser IP</td></tr>
|
||||
<tr><td><code>0xD7</code></td><td>TcpPunchResult</td><td>Bi</td><td>Punch result + HTTP address for redirect</td></tr>
|
||||
<tr><td><code>0xE0</code></td><td>MeshKeepalive</td><td>Uni</td><td>30s connection heartbeat</td></tr>
|
||||
<tr><td><code>0xE1</code></td><td>ReplicationRequest</td><td>Bi</td><td>Request peer to cache specific posts</td></tr>
|
||||
<tr><td><code>0xE2</code></td><td>ReplicationResponse</td><td>Bi</td><td>Accept/reject replication request</td></tr>
|
||||
</table>
|
||||
|
||||
<h3>Engagement propagation</h3>
|
||||
|
|
@ -1031,9 +1035,34 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false }</code></pre
|
|||
<li><strong>Push (real-time)</strong>: On react/comment, the diff is sent to both <strong>downstream</strong> peers (CDN tree children) and the <strong>upstream</strong> peer (who we got the post from). Each intermediate node re-propagates both directions, excluding sender. This flows the diff up to the author and down to all holders.</li>
|
||||
<li><strong>Auto downstream registration</strong>: Nodes that receive a post via pull sync or push notification automatically send <code>PostDownstreamRegister</code> (0xD3) to the sender, ensuring bidirectional diff flow.</li>
|
||||
<li><strong>Pull (safety net)</strong>: Every 5 minutes, the pull cycle requests <code>BlobHeaderRequest</code> (0xD1) with the local header timestamp. Peers respond with the full header only if theirs is newer. Additive merge — <code>store_reaction</code> upserts, <code>store_comment</code> inserts with ON CONFLICT DO NOTHING.</li>
|
||||
<li><strong>Tombstones</strong>: Deleted reactions and comments are not hard-deleted. Instead, a <code>deleted_at</code> timestamp is set on the record. Tombstones propagate via pull sync headers — when a peer receives a header with a tombstoned entry, it applies the deletion locally. This prevents deleted engagement from being re-introduced by peers that haven't yet received the deletion.</li>
|
||||
<li><strong>Planned</strong>: Pull engagement from both upstream and downstream peers to catch missed diffs from either direction.</li>
|
||||
</ul>
|
||||
|
||||
<h3>Device roles & bandwidth budgets <span class="badge badge-complete">Complete</span></h3>
|
||||
<p>Each node advertises its device role in <code>InitialExchange</code>, which determines its bandwidth budgets for replication (pulling posts to cache) and delivery (serving requests from peers):</p>
|
||||
<table>
|
||||
<tr><th>Role</th><th>Replication / hour</th><th>Delivery / hour</th></tr>
|
||||
<tr><td><strong>Intermittent</strong> (phones)</td><td>100 MB</td><td>1 GB</td></tr>
|
||||
<tr><td><strong>Available</strong> (desktops)</td><td>200 MB</td><td>2 GB</td></tr>
|
||||
<tr><td><strong>Persistent</strong> (anchors)</td><td>200 MB</td><td>1 GB</td></tr>
|
||||
</table>
|
||||
<ul style="padding-left: 1.25rem; margin: 0.5rem 0; color: var(--text-muted);">
|
||||
<li>Budgets auto-reset every hour</li>
|
||||
<li>Role is self-declared based on device type and advertised to peers in <code>InitialExchange</code></li>
|
||||
<li>Peers respect advertised budgets when selecting replication targets</li>
|
||||
</ul>
|
||||
|
||||
<h3>Active CDN replication <span class="badge badge-complete">Complete</span></h3>
|
||||
<p>All devices proactively replicate recent under-replicated posts to peers, not just passively serve on request:</p>
|
||||
<ul style="padding-left: 1.25rem; margin: 0.5rem 0; color: var(--text-muted);">
|
||||
<li><strong>10-minute cycle</strong>: All devices initiate replication checks every 10 minutes</li>
|
||||
<li><strong>Target prioritization</strong>: Desktops > anchors > phones, scored by available bandwidth budget and connection quality</li>
|
||||
<li><strong>Selection criteria</strong>: Posts less than 72 hours old with fewer than 2 downstream replicas are selected for replication</li>
|
||||
<li><strong>Protocol</strong>: <code>ReplicationRequest</code> (0xE1) asks a peer to cache specific posts; <code>ReplicationResponse</code> (0xE2) accepts or rejects based on available budget and storage</li>
|
||||
<li><strong>Graceful degradation</strong>: In small networks with few peers, the cycle runs but finds few or no viable targets — no wasted bandwidth. As the network grows, replication naturally increases.</li>
|
||||
</ul>
|
||||
|
||||
<h3>Connection rate limiting</h3>
|
||||
<p>Incoming QUIC connections that fail authentication are rate-limited per source IP to prevent CPU exhaustion from rogue or stale nodes:</p>
|
||||
<ul style="padding-left: 1.25rem; margin: 0.5rem 0; color: var(--text-muted);">
|
||||
|
|
@ -1258,6 +1287,8 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false }</code></pre
|
|||
<!-- 25. HTTP Post Delivery -->
|
||||
<section id="http-delivery">
|
||||
<h2>25. HTTP Post Delivery</h2>
|
||||
<h3>Status: <span class="badge badge-complete">Complete</span></h3>
|
||||
<p style="color: var(--text-muted); font-size: 0.9rem;">Direct peer-to-browser HTTP serving is implemented. For share link delivery, this is now part of the tiered web serving strategy (redirect → TCP punch → QUIC proxy) described in <a href="#share-links">Section 26</a>.</p>
|
||||
<h3>Intent</h3>
|
||||
<p>Every ItsGoin node that is publicly reachable can serve its cached public posts directly to browsers over HTTP — no extra infrastructure, no additional dependencies, no new binary. The same QUIC UDP port used for app traffic is accompanied by a TCP listener on the same port number. UDP goes to the QUIC stack as always. TCP goes to a minimal raw HTTP/1.1 handler baked into the binary.</p>
|
||||
<p>This makes every publicly-reachable node a browser-accessible content endpoint, enabling share links that deliver content peer-to-browser without routing any post bytes through itsgoin.net.</p>
|
||||
|
|
@ -1324,34 +1355,22 @@ FAILURE: C → B → A: AnchorProbeResult { reachable: false }</code></pre
|
|||
</ul>
|
||||
|
||||
<h3>URL format</h3>
|
||||
<pre><code>https://itsgoin.net/p/<postid_hex>/<encoded_hostlist></code></pre>
|
||||
<pre><code>https://itsgoin.net/p/<postid_hex>/<author_nodeid_hex></code></pre>
|
||||
<ul style="padding-left: 1.25rem; margin: 0.5rem 0; color: var(--text-muted);">
|
||||
<li><code>postid_hex</code>: 64 hex characters (BLAKE3 post hash)</li>
|
||||
<li><code>encoded_hostlist</code>: base64url-encoded binary list of up to 5 host entries (see encoding below)</li>
|
||||
<li><code>author_nodeid_hex</code>: 64 hex characters (author's ed25519 public key). Enables direct QUIC connection as fast path; worm search handles the case where author is unreachable.</li>
|
||||
</ul>
|
||||
<p>Example: <code>https://itsgoin.net/p/3a7f...c921/AAEC...Zg==</code></p>
|
||||
<p>Example: <code>https://itsgoin.net/p/3a7f...c921/b4e2...f817</code></p>
|
||||
<p>Simple, human-inspectable, no binary encoding needed. Author ID in the URL is sufficient for the tiered serving strategy below.</p>
|
||||
|
||||
<h3>Host list encoding</h3>
|
||||
<p>Compact binary encoding — optimized for QR code scanability:</p>
|
||||
<pre><code>Per IPv6 host: [0x06][16 bytes IP][2 bytes port] = 19 bytes
|
||||
Per IPv4 host: [0x04][4 bytes IP][2 bytes port] = 7 bytes
|
||||
|
||||
5× IPv6: 95 bytes → ~127 chars base64url (comfortably scannable QR)</code></pre>
|
||||
<p>All integers big-endian. base64url-encoded (URL-safe, no padding).</p>
|
||||
|
||||
<h3>Host list generation (at share time)</h3>
|
||||
<p>When a user taps “Share” on a post:</p>
|
||||
<h3>Tiered web serving</h3>
|
||||
<p>When a browser visits a share link, itsgoin.net attempts three tiers to deliver the post:</p>
|
||||
<ol style="padding-left: 1.25rem; margin: 0.5rem 0; color: var(--text-muted);">
|
||||
<li>Query <code>post_downstream</code> for this postid</li>
|
||||
<li>Filter to hosts with a known public address (IPv6 or UPnP-mapped IPv4)</li>
|
||||
<li>Select up to 5 — prefer IPv6 public over UPnP IPv4, prefer most recently seen over stale</li>
|
||||
<li>Include self if this node is publicly reachable</li>
|
||||
<li>Encode and embed in URL</li>
|
||||
<li><strong>302 redirect</strong>: If a publicly-reachable holder is known (IPv6, UPnP TCP), redirect the browser directly to that node's HTTP endpoint. Zero post bytes flow through itsgoin.net.</li>
|
||||
<li><strong>TCP punch</strong>: If a holder is connected but not publicly reachable, send <code>TcpPunchRequest</code> (0xD6) asking the holder to punch TCP toward the browser's IP. On success, <code>TcpPunchResult</code> (0xD7) returns the HTTP address for a redirect.</li>
|
||||
<li><strong>QUIC proxy</strong>: If neither redirect nor punch works, itsgoin.net fetches the post on-demand via PostFetch (0xD4/0xD5), renders HTML, and serves directly to the browser.</li>
|
||||
</ol>
|
||||
|
||||
<h3>Availability math</h3>
|
||||
<p>At 80% per-node uptime (conservative for a mix of home and mobile nodes), 5 independent hosts gives <strong>1 - (0.2<sup>5</sup>) = 99.97%</strong> link availability. Hosts are selected from nodes that have already demonstrated they cached this specific post — not random peers.</p>
|
||||
|
||||
<h3>itsgoin.net QUIC proxy handler</h3>
|
||||
<p>Route: <code>GET /p/<postid_hex>/<author_nodeid_hex></code></p>
|
||||
<pre><code>1. Check local storage (fast path — post already fetched recently)
|
||||
|
|
@ -1502,7 +1521,7 @@ Per IPv4 host: [0x04][4 bytes IP][2 bytes port] = 7 bytes
|
|||
<tr><td>UPnP port mapping (desktop)</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>NAT type detection (STUN) + hard+hard skip</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>Advanced NAT traversal (role-based scanning + filter probe)</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>LAN discovery (mDNS scan + auto-connect)</td><td><span class="badge badge-planned">Planned</span></td></tr>
|
||||
<tr><td>LAN discovery (mDNS scan + auto-connect)</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>Content propagation via attention</td><td><span class="badge badge-partial">Partial</span></td></tr>
|
||||
<tr><td>BlobHeader separation from blob content</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>25+25 neighborhood with HeaderDiff propagation</td><td><span class="badge badge-partial">Partial</span> (engagement diffs work, neighborhood diffs planned)</td></tr>
|
||||
|
|
@ -1519,13 +1538,13 @@ Per IPv4 host: [0x04][4 bytes IP][2 bytes port] = 7 bytes
|
|||
<tr><td><code>--max-mesh</code> flag (test affordance)</td><td><span class="badge badge-planned">Planned</span></td></tr>
|
||||
<tr><td>Audience sharding</td><td><span class="badge badge-planned">Planned</span></td></tr>
|
||||
<tr><td>Custom feeds</td><td><span class="badge badge-planned">Planned</span></td></tr>
|
||||
<tr><td>HTTP post delivery (TCP listener, single route, load shedding)</td><td><span class="badge badge-planned">Planned</span></td></tr>
|
||||
<tr><td>HTTP post delivery (TCP listener, single route, load shedding)</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>Share link generation (postid + author NodeId)</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>itsgoin.net QUIC proxy handler (on-demand fetch + render)</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>PostFetch (0xD4/0xD5) single-post retrieval</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>Universal Links / App Links (itsgoin.net/p/*)</td><td><span class="badge badge-planned">Planned</span></td></tr>
|
||||
<tr><td>itsgoin.net ItsGoin node (anchor + web handler)</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
<tr><td>UPnP TCP port mapping alongside UDP</td><td><span class="badge badge-planned">Planned</span></td></tr>
|
||||
<tr><td>UPnP TCP port mapping alongside UDP</td><td><span class="badge badge-complete">Complete</span></td></tr>
|
||||
</table>
|
||||
</section>
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue