Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 82 additions & 0 deletions db_migrations/0013_normalize_relay_urls.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
-- Normalize relay URLs to ensure consistent storage
-- This migration removes ALL trailing slashes from URLs to ensure consistency
-- Since most WebSocket/relay implementations treat URLs with and without trailing
-- slashes as equivalent, normalizing to no trailing slash provides the cleanest
-- and most predictable behavior
--
-- This migration is idempotent and safe to run multiple times.
-- It uses a multi-step approach to avoid UNIQUE constraint violations:
-- 1. Compute normalized URLs in a CTE
-- 2. Identify keeper rows (oldest/smallest ID per normalized URL)
-- 3. Update foreign key references to point to keepers
-- 4. Delete duplicate rows
-- 5. Update keeper rows to normalized URLs

-- Step 1: Create a temporary table with normalized URL mappings
-- This identifies which relays will be kept and which will be removed
-- Handles ALL duplicates correctly (3+ duplicates) by keeping MIN(id) per normalized URL
CREATE TEMPORARY TABLE relay_normalization_plan AS
WITH normalized_relays AS (
-- Compute normalized URL for every relay
SELECT
id,
url,
RTRIM(url, '/') as normalized_url
FROM relays
),
keeper_selection AS (
-- For each normalized URL, select the keeper (oldest/smallest ID)
SELECT
normalized_url,
MIN(id) as keeper_id
FROM normalized_relays
GROUP BY normalized_url
)
SELECT
nr.id as original_id,
nr.url as original_url,
nr.normalized_url,
ks.keeper_id
FROM normalized_relays nr
JOIN keeper_selection ks ON nr.normalized_url = ks.normalized_url;

-- Step 2: Update foreign key references in user_relays
-- Point all relay_id references to the keeper for that normalized URL
-- This handles ALL non-keeper IDs (not just MAX, but any ID != MIN)
UPDATE user_relays
SET relay_id = (
SELECT keeper_id
FROM relay_normalization_plan
WHERE original_id = user_relays.relay_id
)
WHERE relay_id IN (
SELECT original_id
FROM relay_normalization_plan
WHERE original_id != keeper_id
);

-- Step 3: Delete ALL duplicate relay rows (non-keepers)
-- This removes every relay except the keeper (MIN(id)) for each normalized URL
DELETE FROM relays
WHERE id IN (
SELECT original_id
FROM relay_normalization_plan
WHERE original_id != keeper_id
);

-- Step 4: Update keeper rows to have normalized URLs
-- Only update if the URL actually needs normalization (idempotent)
UPDATE relays
SET url = (
SELECT normalized_url
FROM relay_normalization_plan
WHERE original_id = relays.id
)
WHERE id IN (
SELECT original_id
FROM relay_normalization_plan
WHERE original_id = keeper_id AND original_url != normalized_url
);

-- Clean up temporary table
DROP TABLE relay_normalization_plan;
53 changes: 37 additions & 16 deletions src/nostr_manager/query.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! This module contains functions for querying Nostr events from relays.

use std::collections::HashSet;
use std::{collections::HashSet, time::Duration};

use nostr_sdk::prelude::*;

Expand All @@ -10,20 +10,30 @@ use crate::{
RelayType,
};

/// Maximum allowed skew for event timestamps in the future (1 hour)
const MAX_FUTURE_SKEW: Duration = Duration::from_secs(60 * 60);

impl NostrManager {
pub(crate) async fn fetch_metadata_from(
&self,
nip65_relays: &[Relay], // TODO: Replace with &[RelayUrl]
pubkey: PublicKey,
) -> Result<Option<Metadata>> {
let filter: Filter = Filter::new().author(pubkey).kind(Kind::Metadata).limit(1);
let filter: Filter = Filter::new().author(pubkey).kind(Kind::Metadata);
let urls: Vec<RelayUrl> = nip65_relays.iter().map(|r| r.url.clone()).collect();
let events: Events = self
.client
.fetch_events_from(urls, filter, self.timeout)
.await?;
match events.first() {
Some(event) => Ok(Some(Metadata::try_from(event)?)),

// Filter out events with timestamps too far in the future
let cutoff = Timestamp::now() + MAX_FUTURE_SKEW;
let latest = events
.into_iter()
.filter(|e| e.created_at <= cutoff)
.max_by_key(|e| (e.created_at, e.id));
match latest {
Some(event) => Ok(Some(Metadata::try_from(&event)?)),
None => Ok(None),
}
}
Expand All @@ -34,20 +44,28 @@ impl NostrManager {
relay_type: RelayType,
nip65_relays: &[Relay], // TODO: Replace with &[RelayUrl]
) -> Result<HashSet<RelayUrl>> {
let filter = Filter::new()
.author(pubkey)
.kind(relay_type.into())
.limit(1);
let filter = Filter::new().author(pubkey).kind(relay_type.into());
let urls: Vec<RelayUrl> = nip65_relays.iter().map(|r| r.url.clone()).collect();
let relay_events = self
.client
.fetch_events_from(urls, filter, self.timeout)
.await?;
tracing::debug!("Fetched relay events {:?}", relay_events);

match relay_events.first() {
// Filter out events with timestamps too far in the future
let cutoff = Timestamp::now() + MAX_FUTURE_SKEW;
let latest = relay_events
.into_iter()
.filter(|e| e.created_at <= cutoff)
.max_by_key(|e| (e.created_at, e.id));

tracing::debug!(
"Fetched relay events, using latest: {:?}",
latest.as_ref().map(|e| e.created_at)
);

match latest {
None => Ok(HashSet::new()),
Some(event) => Ok(Self::relay_urls_from_event(event.clone())),
Some(event) => Ok(Self::relay_urls_from_event(event)),
}
}

Expand All @@ -56,16 +74,19 @@ impl NostrManager {
pubkey: PublicKey,
relays: &[RelayUrl],
) -> Result<Option<Event>> {
let filter = Filter::new()
.kind(Kind::MlsKeyPackage)
.author(pubkey)
.limit(1);
let filter = Filter::new().kind(Kind::MlsKeyPackage).author(pubkey);
let events = self
.client
.fetch_events_from(relays, filter, self.timeout)
.await?;

Ok(events.first_owned())
// Filter out events with timestamps too far in the future
let cutoff = Timestamp::now() + MAX_FUTURE_SKEW;
let latest = events
.into_iter()
.filter(|e| e.created_at <= cutoff)
.max_by_key(|e| (e.created_at, e.id));
Ok(latest)
}
}

Expand Down
Loading