From 6639c98e4d61f9f1b4e52af7d7301dcef16dce1a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 11 Feb 2025 15:29:25 +0000 Subject: [PATCH 01/14] refactor: [#1258] make things private or pub(crate) when possible. Limit the exposed funtionality for pacakges. Specially the new `tracker-core` package which has not been published yet. --- packages/tracker-core/src/announce_handler.rs | 8 +- .../src/authentication/handler.rs | 8 +- .../src/authentication/key/mod.rs | 8 +- .../key/repository/in_memory.rs | 9 +- .../key/repository/persisted.rs | 6 +- packages/tracker-core/src/core_tests.rs | 215 ----------------- .../tracker-core/src/databases/driver/mod.rs | 8 +- .../src/databases/driver/mysql.rs | 2 +- .../src/databases/driver/sqlite.rs | 2 +- packages/tracker-core/src/error.rs | 2 +- packages/tracker-core/src/lib.rs | 8 +- packages/tracker-core/src/test_helpers.rs | 219 ++++++++++++++++++ packages/tracker-core/src/torrent/manager.rs | 8 +- packages/tracker-core/src/torrent/mod.rs | 9 +- .../src/torrent/repository/in_memory.rs | 35 +-- .../src/torrent/repository/persisted.rs | 6 +- packages/tracker-core/src/torrent/services.rs | 2 +- .../src/whitelist/authorization.rs | 4 +- .../tracker-core/src/whitelist/manager.rs | 6 +- packages/tracker-core/src/whitelist/mod.rs | 6 +- .../src/whitelist/repository/in_memory.rs | 6 +- .../src/whitelist/repository/persisted.rs | 8 +- .../src/whitelist/test_helpers.rs | 32 +++ .../src/whitelist/whitelist_tests.rs | 27 --- src/bootstrap/app.rs | 1 + src/servers/http/mod.rs | 1 + src/servers/http/test_helpers.rs | 16 ++ src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/services/announce.rs | 2 +- src/servers/http/v1/services/scrape.rs | 11 +- 30 files changed, 357 insertions(+), 320 deletions(-) delete mode 100644 packages/tracker-core/src/core_tests.rs create mode 100644 packages/tracker-core/src/test_helpers.rs create mode 100644 packages/tracker-core/src/whitelist/test_helpers.rs delete mode 100644 packages/tracker-core/src/whitelist/whitelist_tests.rs create mode 100644 src/servers/http/test_helpers.rs diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 85dd354bf..cd0a9b861 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -182,8 +182,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::announce_handler::AnnounceHandler; - use crate::core_tests::initialize_handlers; use crate::scrape_handler::ScrapeHandler; + use crate::test_helpers::tests::initialize_handlers; fn public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -244,7 +244,7 @@ mod tests { peer_ip, public_tracker, sample_peer_1, sample_peer_2, sample_peer_3, }; use crate::announce_handler::PeersWanted; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; mod should_assign_the_ip_to_the_peer { @@ -411,7 +411,7 @@ mod tests { use crate::announce_handler::tests::the_announce_handler::{peer_ip, public_tracker}; use crate::announce_handler::PeersWanted; - use crate::core_tests::{completed_peer, leecher, sample_info_hash, seeder, started_peer}; + use crate::test_helpers::tests::{completed_peer, leecher, sample_info_hash, seeder, started_peer}; #[tokio::test] async fn when_the_peer_is_a_seeder() { @@ -474,8 +474,8 @@ mod tests { use crate::announce_handler::tests::the_announce_handler::peer_ip; use crate::announce_handler::{AnnounceHandler, PeersWanted}; - use crate::core_tests::{sample_info_hash, sample_peer}; use crate::databases::setup::initialize_database; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index f758830ac..136060916 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -132,7 +132,7 @@ impl KeysHandler { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_permanent_peer_key(&self) -> Result { + pub(crate) async fn generate_permanent_peer_key(&self) -> Result { self.generate_expiring_peer_key(None).await } @@ -170,7 +170,7 @@ impl KeysHandler { /// # Arguments /// /// * `key` - The pre-generated key. - pub async fn add_permanent_peer_key(&self, key: Key) -> Result { + pub(crate) async fn add_permanent_peer_key(&self, key: Key) -> Result { self.add_expiring_peer_key(key, None).await } @@ -188,7 +188,7 @@ impl KeysHandler { /// * `key` - The pre-generated key. /// * `lifetime` - The duration in seconds for the new key. The key will be /// no longer valid after `lifetime` seconds. - pub async fn add_expiring_peer_key( + pub(crate) async fn add_expiring_peer_key( &self, key: Key, valid_until: Option, @@ -219,7 +219,7 @@ impl KeysHandler { } /// It removes an authentication key from memory. - pub async fn remove_in_memory_auth_key(&self, key: &Key) { + pub(crate) async fn remove_in_memory_auth_key(&self, key: &Key) { self.in_memory_key_repository.remove(key).await; } diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index 8ec368ebc..fce18c0dd 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -59,17 +59,19 @@ pub type ParseKeyError = peer_key::ParseKeyError; /// /// For more information see function [`generate_key`](crate::authentication::key::generate_key) to generate the /// [`PeerKey`](crate::authentication::PeerKey). -pub const AUTH_KEY_LENGTH: usize = 32; +pub(crate) const AUTH_KEY_LENGTH: usize = 32; /// It generates a new permanent random key [`PeerKey`]. +#[cfg(test)] #[must_use] -pub fn generate_permanent_key() -> PeerKey { +pub(crate) fn generate_permanent_key() -> PeerKey { generate_key(None) } /// It generates a new expiring random key [`PeerKey`]. +#[cfg(test)] #[must_use] -pub fn generate_expiring_key(lifetime: Duration) -> PeerKey { +pub(crate) fn generate_expiring_key(lifetime: Duration) -> PeerKey { generate_key(Some(lifetime)) } diff --git a/packages/tracker-core/src/authentication/key/repository/in_memory.rs b/packages/tracker-core/src/authentication/key/repository/in_memory.rs index 0a2fc50cd..13664e27c 100644 --- a/packages/tracker-core/src/authentication/key/repository/in_memory.rs +++ b/packages/tracker-core/src/authentication/key/repository/in_memory.rs @@ -9,21 +9,22 @@ pub struct InMemoryKeyRepository { impl InMemoryKeyRepository { /// It adds a new authentication key. - pub async fn insert(&self, auth_key: &PeerKey) { + pub(crate) async fn insert(&self, auth_key: &PeerKey) { self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); } /// It removes an authentication key. - pub async fn remove(&self, key: &Key) { + pub(crate) async fn remove(&self, key: &Key) { self.keys.write().await.remove(key); } - pub async fn get(&self, key: &Key) -> Option { + pub(crate) async fn get(&self, key: &Key) -> Option { self.keys.read().await.get(key).cloned() } /// It clears all the authentication keys. - pub async fn clear(&self) { + #[allow(dead_code)] + pub(crate) async fn clear(&self) { let mut keys = self.keys.write().await; keys.clear(); } diff --git a/packages/tracker-core/src/authentication/key/repository/persisted.rs b/packages/tracker-core/src/authentication/key/repository/persisted.rs index 7edee62c0..95a3b874c 100644 --- a/packages/tracker-core/src/authentication/key/repository/persisted.rs +++ b/packages/tracker-core/src/authentication/key/repository/persisted.rs @@ -21,7 +21,7 @@ impl DatabaseKeyRepository { /// # Errors /// /// Will return a `databases::error::Error` if unable to add the `auth_key` to the database. - pub fn add(&self, peer_key: &PeerKey) -> Result<(), databases::error::Error> { + pub(crate) fn add(&self, peer_key: &PeerKey) -> Result<(), databases::error::Error> { self.database.add_key_to_keys(peer_key)?; Ok(()) } @@ -31,7 +31,7 @@ impl DatabaseKeyRepository { /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` from the database. - pub fn remove(&self, key: &Key) -> Result<(), databases::error::Error> { + pub(crate) fn remove(&self, key: &Key) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key)?; Ok(()) } @@ -41,7 +41,7 @@ impl DatabaseKeyRepository { /// # Errors /// /// Will return a `database::Error` if unable to load the keys from the database. - pub fn load_keys(&self) -> Result, databases::error::Error> { + pub(crate) fn load_keys(&self) -> Result, databases::error::Error> { let keys = self.database.load_keys()?; Ok(keys) } diff --git a/packages/tracker-core/src/core_tests.rs b/packages/tracker-core/src/core_tests.rs deleted file mode 100644 index 165c8790e..000000000 --- a/packages/tracker-core/src/core_tests.rs +++ /dev/null @@ -1,215 +0,0 @@ -//! Some generic test helpers functions. -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::sync::Arc; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use bittorrent_primitives::info_hash::InfoHash; -use rand::Rng; -use torrust_tracker_configuration::Configuration; -#[cfg(test)] -use torrust_tracker_configuration::Core; -use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_primitives::DurationSinceUnixEpoch; -#[cfg(test)] -use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; - -use super::announce_handler::AnnounceHandler; -use super::databases::setup::initialize_database; -use super::scrape_handler::ScrapeHandler; -use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use super::whitelist::repository::in_memory::InMemoryWhitelist; -use super::whitelist::{self}; - -/// Generates a random `InfoHash`. -#[must_use] -pub fn random_info_hash() -> InfoHash { - let mut rng = rand::rng(); - let mut random_bytes = [0u8; 20]; - rng.fill(&mut random_bytes); - - InfoHash::from_bytes(&random_bytes) -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid info hash. -#[must_use] -pub fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid info hash. -#[must_use] -pub fn sample_info_hash_one() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid info hash. -#[must_use] -pub fn sample_info_hash_two() -> InfoHash { - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid info hash. -#[must_use] -pub fn sample_info_hash_alphabetically_ordered_after_sample_info_hash_one() -> InfoHash { - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// Sample peer whose state is not relevant for the tests. -#[must_use] -pub fn sample_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -#[must_use] -pub fn sample_peer_one() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000001"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -#[must_use] -pub fn sample_peer_two() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000002"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -#[must_use] -pub fn seeder() -> Peer { - complete_peer() -} - -#[must_use] -pub fn leecher() -> Peer { - incomplete_peer() -} - -#[must_use] -pub fn started_peer() -> Peer { - incomplete_peer() -} - -#[must_use] -pub fn completed_peer() -> Peer { - complete_peer() -} - -/// A peer that counts as `complete` is swarm metadata -/// IMPORTANT!: it only counts if the it has been announce at least once before -/// announcing the `AnnounceEvent::Completed` event. -#[must_use] -pub fn complete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -/// A peer that counts as `incomplete` is swarm metadata -#[must_use] -pub fn incomplete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(1000), // Still bytes to download - event: AnnounceEvent::Started, - } -} - -#[must_use] -pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { - let database = initialize_database(&config.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( - &config.core, - &in_memory_whitelist.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (announce_handler, scrape_handler) -} - -/// # Panics -/// -/// Will panic if the temporary database file path is not a valid UFT string. -#[cfg(test)] -#[must_use] -pub fn ephemeral_configuration() -> Core { - let mut config = Core::default(); - - let temp_file = ephemeral_sqlite_database(); - temp_file.to_str().unwrap().clone_into(&mut config.database.path); - - config -} - -/// # Panics -/// -/// Will panic if the temporary database file path is not a valid UFT string. -#[cfg(test)] -#[must_use] -pub fn ephemeral_configuration_for_listed_tracker() -> Core { - let mut config = Core { - listed: true, - ..Default::default() - }; - - let temp_file = ephemeral_sqlite_database(); - temp_file.to_str().unwrap().clone_into(&mut config.database.path); - - config -} diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 1e42e4414..2bc6a1e3c 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -73,7 +73,7 @@ pub mod sqlite; /// # Errors /// /// Will return `Error` if unable to build the driver. -pub fn build(driver: &Driver, db_path: &str) -> Result, Error> { +pub(crate) fn build(driver: &Driver, db_path: &str) -> Result, Error> { let database: Box = match driver { Driver::Sqlite3 => Box::new(Sqlite::new(db_path)?), Driver::MySQL => Box::new(Mysql::new(db_path)?), @@ -85,7 +85,7 @@ pub fn build(driver: &Driver, db_path: &str) -> Result, Error> } #[cfg(test)] -mod tests { +pub(crate) mod tests { use std::sync::Arc; use std::time::Duration; @@ -152,8 +152,8 @@ mod tests { use std::sync::Arc; - use crate::core_tests::sample_info_hash; use crate::databases::Database; + use crate::test_helpers::tests::sample_info_hash; pub fn it_should_save_and_load_persistent_torrents(driver: &Arc>) { let infohash = sample_info_hash(); @@ -232,8 +232,8 @@ mod tests { use std::sync::Arc; - use crate::core_tests::random_info_hash; use crate::databases::Database; + use crate::test_helpers::tests::random_info_hash; pub fn it_should_load_the_whitelist(driver: &Arc>) { let infohash = random_info_hash(); diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index 1e1e29f36..365bd0ad9 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -15,7 +15,7 @@ use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::MySQL; -pub struct Mysql { +pub(crate) struct Mysql { pool: Pool, } diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index 37f5254a5..36ca4eabe 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -14,7 +14,7 @@ use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; -pub struct Sqlite { +pub(crate) struct Sqlite { pool: Pool, } diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 515510b85..dcdd89668 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -41,8 +41,8 @@ mod tests { mod whitelist_error { - use crate::core_tests::sample_info_hash; use crate::error::WhitelistError; + use crate::test_helpers::tests::sample_info_hash; #[test] fn torrent_not_whitelisted() { diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index 9334e4a02..ecbaef9c5 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -391,8 +391,8 @@ pub mod scrape_handler; pub mod torrent; pub mod whitelist; -pub mod core_tests; pub mod peer_tests; +pub mod test_helpers; use torrust_tracker_clock::clock; /// This code needs to be copied into each crate. @@ -416,8 +416,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::announce_handler::AnnounceHandler; - use crate::core_tests::initialize_handlers; use crate::scrape_handler::ScrapeHandler; + use crate::test_helpers::tests::initialize_handlers; fn initialize_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -445,7 +445,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::announce_handler::PeersWanted; - use crate::core_tests::{complete_peer, incomplete_peer}; + use crate::test_helpers::tests::{complete_peer, incomplete_peer}; use crate::tests::the_tracker::initialize_handlers_for_public_tracker; #[tokio::test] @@ -500,7 +500,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::announce_handler::PeersWanted; - use crate::core_tests::{complete_peer, incomplete_peer}; + use crate::test_helpers::tests::{complete_peer, incomplete_peer}; use crate::tests::the_tracker::{initialize_handlers_for_listed_tracker, peer_ip}; #[tokio::test] diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs new file mode 100644 index 000000000..06f5ce384 --- /dev/null +++ b/packages/tracker-core/src/test_helpers.rs @@ -0,0 +1,219 @@ +//! Some generic test helpers functions. + +#[cfg(test)] +pub(crate) mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_primitives::info_hash::InfoHash; + use rand::Rng; + use torrust_tracker_configuration::Configuration; + #[cfg(test)] + use torrust_tracker_configuration::Core; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + #[cfg(test)] + use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; + + use crate::announce_handler::AnnounceHandler; + use crate::databases::setup::initialize_database; + use crate::scrape_handler::ScrapeHandler; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use crate::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::whitelist::{self}; + + /// Generates a random `InfoHash`. + #[must_use] + pub fn random_info_hash() -> InfoHash { + let mut rng = rand::rng(); + let mut random_bytes = [0u8; 20]; + rng.fill(&mut random_bytes); + + InfoHash::from_bytes(&random_bytes) + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_one() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_two() -> InfoHash { + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_alphabetically_ordered_after_sample_info_hash_one() -> InfoHash { + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// Sample peer whose state is not relevant for the tests. + #[must_use] + pub fn sample_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn sample_peer_one() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn sample_peer_two() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn seeder() -> Peer { + complete_peer() + } + + #[must_use] + pub fn leecher() -> Peer { + incomplete_peer() + } + + #[must_use] + pub fn started_peer() -> Peer { + incomplete_peer() + } + + #[must_use] + pub fn completed_peer() -> Peer { + complete_peer() + } + + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. + #[must_use] + pub fn complete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + /// A peer that counts as `incomplete` is swarm metadata + #[must_use] + pub fn incomplete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(1000), // Still bytes to download + event: AnnounceEvent::Started, + } + } + + #[must_use] + pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { + let database = initialize_database(&config.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( + &config.core, + &in_memory_whitelist.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (announce_handler, scrape_handler) + } + + /// # Panics + /// + /// Will panic if the temporary database file path is not a valid UFT string. + #[cfg(test)] + #[must_use] + pub fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config + } + + /// # Panics + /// + /// Will panic if the temporary database file path is not a valid UFT string. + #[cfg(test)] + #[must_use] + pub fn ephemeral_configuration_for_listed_tracker() -> Core { + let mut config = Core { + listed: true, + ..Default::default() + }; + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config + } +} diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 778ac6d92..9dac35258 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -16,6 +16,7 @@ pub struct TorrentsManager { in_memory_torrent_repository: Arc, /// The persistent torrents repository. + #[allow(dead_code)] db_torrent_repository: Arc, } @@ -40,7 +41,8 @@ impl TorrentsManager { /// # Errors /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + #[allow(dead_code)] + pub(crate) fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.db_torrent_repository.load_all()?; self.in_memory_torrent_repository.import_persistent(&persistent_torrents); @@ -71,8 +73,8 @@ mod tests { use torrust_tracker_torrent_repository::entry::EntrySync; use super::{DatabasePersistentTorrentRepository, TorrentsManager}; - use crate::core_tests::{ephemeral_configuration, sample_info_hash}; use crate::databases::setup::initialize_database; + use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; struct TorrentsManagerDeps { @@ -138,7 +140,7 @@ mod tests { use torrust_tracker_clock::clock::{self}; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core_tests::{ephemeral_configuration, sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash, sample_peer}; use crate::torrent::manager::tests::{initialize_torrents_manager, initialize_torrents_manager_with}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; diff --git a/packages/tracker-core/src/torrent/mod.rs b/packages/tracker-core/src/torrent/mod.rs index 340f049d2..7ca9000f8 100644 --- a/packages/tracker-core/src/torrent/mod.rs +++ b/packages/tracker-core/src/torrent/mod.rs @@ -29,8 +29,11 @@ pub mod manager; pub mod repository; pub mod services; -use torrust_tracker_torrent_repository::{EntryMutexStd, TorrentsSkipMapMutexStd}; +#[cfg(test)] +use torrust_tracker_torrent_repository::EntryMutexStd; +use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; // Currently used types from the torrent repository crate. -pub type Torrents = TorrentsSkipMapMutexStd; -pub type TorrentEntry = EntryMutexStd; +pub(crate) type Torrents = TorrentsSkipMapMutexStd; +#[cfg(test)] +pub(crate) type TorrentEntry = EntryMutexStd; diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index baa0c4fdb..26302260b 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -32,33 +32,34 @@ impl InMemoryTorrentRepository { self.torrents.upsert_peer(info_hash, peer); } + #[cfg(test)] #[must_use] - pub fn remove(&self, key: &InfoHash) -> Option { + pub(crate) fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key) } - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { self.torrents.remove_inactive_peers(current_cutoff); } - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { self.torrents.remove_peerless_torrents(policy); } #[must_use] - pub fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { self.torrents.get(key) } #[must_use] - pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { self.torrents.get_paginated(pagination) } /// It returns the data for a `scrape` response or empty if the torrent is /// not found. #[must_use] - pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + pub(crate) fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { match self.torrents.get(info_hash) { Some(torrent_entry) => torrent_entry.get_swarm_metadata(), None => SwarmMetadata::zeroed(), @@ -69,7 +70,7 @@ impl InMemoryTorrentRepository { /// /// It filters out the client making the request. #[must_use] - pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { match self.torrents.get(info_hash) { None => vec![], Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), @@ -135,7 +136,7 @@ mod tests { use std::sync::Arc; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; #[tokio::test] @@ -171,7 +172,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -233,7 +234,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -303,7 +304,7 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; #[tokio::test] @@ -374,7 +375,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_torrent_repository::entry::EntrySync; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::torrent::TorrentEntry; @@ -429,7 +430,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -467,7 +468,7 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core_tests::{ + use crate::test_helpers::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, }; @@ -577,7 +578,7 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; - use crate::core_tests::{complete_peer, leecher, sample_info_hash, seeder}; + use crate::test_helpers::tests::{complete_peer, leecher, sample_info_hash, seeder}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; // todo: refactor to use test parametrization @@ -689,7 +690,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core_tests::{leecher, sample_info_hash}; + use crate::test_helpers::tests::{leecher, sample_info_hash}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; #[tokio::test] @@ -728,7 +729,7 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; #[tokio::test] diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 224919d0e..0430f03bb 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -29,7 +29,7 @@ impl DatabasePersistentTorrentRepository { /// # Errors /// /// Will return a database `Err` if unable to load. - pub fn load_all(&self) -> Result { + pub(crate) fn load_all(&self) -> Result { self.database.load_persistent_torrents() } @@ -38,7 +38,7 @@ impl DatabasePersistentTorrentRepository { /// # Errors /// /// Will return a database `Err` if unable to save. - pub fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { + pub(crate) fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { self.database.save_persistent_torrent(info_hash, downloaded) } } @@ -49,8 +49,8 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; use super::DatabasePersistentTorrentRepository; - use crate::core_tests::{ephemeral_configuration, sample_info_hash, sample_info_hash_one, sample_info_hash_two}; use crate::databases::setup::initialize_database; + use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash, sample_info_hash_one, sample_info_hash_two}; fn initialize_db_persistent_torrent_repository() -> DatabasePersistentTorrentRepository { let config = ephemeral_configuration(); diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index c36190ed1..4c470bb74 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -302,7 +302,7 @@ mod tests { use std::sync::Arc; - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::torrent::services::tests::sample_peer; use crate::torrent::services::{get_torrents, BasicInfo}; diff --git a/packages/tracker-core/src/whitelist/authorization.rs b/packages/tracker-core/src/whitelist/authorization.rs index 66f909226..3b7b8b4fb 100644 --- a/packages/tracker-core/src/whitelist/authorization.rs +++ b/packages/tracker-core/src/whitelist/authorization.rs @@ -88,8 +88,8 @@ mod tests { use torrust_tracker_configuration::Core; - use crate::core_tests::sample_info_hash; use crate::error::WhitelistError; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::authorization::tests::the_whitelist_authorization_for_announce_and_scrape_actions::{ initialize_whitelist_authorization_and_dependencies_with, initialize_whitelist_authorization_with, }; @@ -129,7 +129,7 @@ mod tests { use torrust_tracker_configuration::Core; - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::authorization::tests::the_whitelist_authorization_for_announce_and_scrape_actions::{ initialize_whitelist_authorization_and_dependencies_with, initialize_whitelist_authorization_with, }; diff --git a/packages/tracker-core/src/whitelist/manager.rs b/packages/tracker-core/src/whitelist/manager.rs index e1cd2f89e..5ebd6db36 100644 --- a/packages/tracker-core/src/whitelist/manager.rs +++ b/packages/tracker-core/src/whitelist/manager.rs @@ -73,9 +73,9 @@ mod tests { use torrust_tracker_configuration::Core; - use crate::core_tests::ephemeral_configuration_for_listed_tracker; use crate::databases::setup::initialize_database; use crate::databases::Database; + use crate::test_helpers::tests::ephemeral_configuration_for_listed_tracker; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::repository::persisted::DatabaseWhitelist; @@ -111,7 +111,7 @@ mod tests { mod configured_as_whitelisted { mod handling_the_torrent_whitelist { - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; #[tokio::test] @@ -141,7 +141,7 @@ mod tests { } mod persistence { - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; #[tokio::test] diff --git a/packages/tracker-core/src/whitelist/mod.rs b/packages/tracker-core/src/whitelist/mod.rs index 8521485f7..a39768e93 100644 --- a/packages/tracker-core/src/whitelist/mod.rs +++ b/packages/tracker-core/src/whitelist/mod.rs @@ -2,7 +2,7 @@ pub mod authorization; pub mod manager; pub mod repository; pub mod setup; -pub mod whitelist_tests; +pub mod test_helpers; #[cfg(test)] mod tests { @@ -10,8 +10,8 @@ mod tests { mod configured_as_whitelisted { mod handling_authorization { - use crate::core_tests::sample_info_hash; - use crate::whitelist::whitelist_tests::initialize_whitelist_services_for_listed_tracker; + use crate::test_helpers::tests::sample_info_hash; + use crate::whitelist::test_helpers::tests::initialize_whitelist_services_for_listed_tracker; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { diff --git a/packages/tracker-core/src/whitelist/repository/in_memory.rs b/packages/tracker-core/src/whitelist/repository/in_memory.rs index befd6fed6..4faeda784 100644 --- a/packages/tracker-core/src/whitelist/repository/in_memory.rs +++ b/packages/tracker-core/src/whitelist/repository/in_memory.rs @@ -14,7 +14,7 @@ impl InMemoryWhitelist { } /// It removes a torrent from the whitelist in memory. - pub async fn remove(&self, info_hash: &InfoHash) -> bool { + pub(crate) async fn remove(&self, info_hash: &InfoHash) -> bool { self.whitelist.write().await.remove(info_hash) } @@ -24,7 +24,7 @@ impl InMemoryWhitelist { } /// It clears the whitelist. - pub async fn clear(&self) { + pub(crate) async fn clear(&self) { let mut whitelist = self.whitelist.write().await; whitelist.clear(); } @@ -33,7 +33,7 @@ impl InMemoryWhitelist { #[cfg(test)] mod tests { - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::repository::in_memory::InMemoryWhitelist; #[tokio::test] diff --git a/packages/tracker-core/src/whitelist/repository/persisted.rs b/packages/tracker-core/src/whitelist/repository/persisted.rs index 5101b5e35..4773cfbe6 100644 --- a/packages/tracker-core/src/whitelist/repository/persisted.rs +++ b/packages/tracker-core/src/whitelist/repository/persisted.rs @@ -22,7 +22,7 @@ impl DatabaseWhitelist { /// # Errors /// /// Will return a `database::Error` if unable to add the `info_hash` to the whitelist database. - pub fn add(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + pub(crate) fn add(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; if is_whitelisted { @@ -39,7 +39,7 @@ impl DatabaseWhitelist { /// # Errors /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub fn remove(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + pub(crate) fn remove(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; if !is_whitelisted { @@ -56,7 +56,7 @@ impl DatabaseWhitelist { /// # Errors /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub fn load_from_database(&self) -> Result, databases::error::Error> { + pub(crate) fn load_from_database(&self) -> Result, databases::error::Error> { self.database.load_whitelist() } } @@ -65,8 +65,8 @@ impl DatabaseWhitelist { mod tests { mod the_persisted_whitelist_repository { - use crate::core_tests::{ephemeral_configuration_for_listed_tracker, sample_info_hash}; use crate::databases::setup::initialize_database; + use crate::test_helpers::tests::{ephemeral_configuration_for_listed_tracker, sample_info_hash}; use crate::whitelist::repository::persisted::DatabaseWhitelist; fn initialize_database_whitelist() -> DatabaseWhitelist { diff --git a/packages/tracker-core/src/whitelist/test_helpers.rs b/packages/tracker-core/src/whitelist/test_helpers.rs new file mode 100644 index 000000000..cc30c4476 --- /dev/null +++ b/packages/tracker-core/src/whitelist/test_helpers.rs @@ -0,0 +1,32 @@ +//! Some generic test helpers functions. + +#[cfg(test)] +pub(crate) mod tests { + + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + + use crate::databases::setup::initialize_database; + use crate::whitelist::authorization::WhitelistAuthorization; + use crate::whitelist::manager::WhitelistManager; + use crate::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::whitelist::setup::initialize_whitelist_manager; + + #[must_use] + pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { + let database = initialize_database(&config.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + (whitelist_authorization, whitelist_manager) + } + + #[must_use] + pub fn initialize_whitelist_services_for_listed_tracker() -> (Arc, Arc) { + use torrust_tracker_test_helpers::configuration; + + initialize_whitelist_services(&configuration::ephemeral_listed()) + } +} diff --git a/packages/tracker-core/src/whitelist/whitelist_tests.rs b/packages/tracker-core/src/whitelist/whitelist_tests.rs deleted file mode 100644 index d2fd275f2..000000000 --- a/packages/tracker-core/src/whitelist/whitelist_tests.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::sync::Arc; - -use torrust_tracker_configuration::Configuration; - -use super::authorization::WhitelistAuthorization; -use super::manager::WhitelistManager; -use super::repository::in_memory::InMemoryWhitelist; -use crate::databases::setup::initialize_database; -use crate::whitelist::setup::initialize_whitelist_manager; - -#[must_use] -pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { - let database = initialize_database(&config.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - - (whitelist_authorization, whitelist_manager) -} - -#[cfg(test)] -#[must_use] -pub fn initialize_whitelist_services_for_listed_tracker() -> (Arc, Arc) { - use torrust_tracker_test_helpers::configuration; - - initialize_whitelist_services(&configuration::ephemeral_listed()) -} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 0236215f2..e0d77ab8a 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -120,6 +120,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let torrents_manager = Arc::new(TorrentsManager::new( &configuration.core, &in_memory_torrent_repository, diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index fa0ccc776..6bc93992f 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -306,6 +306,7 @@ use serde::{Deserialize, Serialize}; pub mod server; +pub mod test_helpers; pub mod v1; pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; diff --git a/src/servers/http/test_helpers.rs b/src/servers/http/test_helpers.rs new file mode 100644 index 000000000..8c3020c52 --- /dev/null +++ b/src/servers/http/test_helpers.rs @@ -0,0 +1,16 @@ +//! Some generic test helpers functions. + +#[cfg(test)] +pub(crate) mod tests { + use bittorrent_primitives::info_hash::InfoHash; + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } +} diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index f76aa7a07..64939ff48 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -254,7 +254,6 @@ mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; - use bittorrent_tracker_core::core_tests::sample_info_hash; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; @@ -264,6 +263,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::packages::http_tracker_core; + use crate::servers::http::test_helpers::tests::sample_info_hash; struct CoreTrackerServices { pub core_config: Arc, diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 4de9296b3..e321ad01f 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -153,7 +153,6 @@ mod tests { use std::sync::Arc; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; - use bittorrent_tracker_core::core_tests::sample_info_hash; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; @@ -165,6 +164,7 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::packages::http_tracker_core; + use crate::servers::http::test_helpers::tests::sample_info_hash; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{ initialize_core_tracker_services, sample_peer, MockHttpStatsEventSender, diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 3a2323693..e2eb4f87c 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -84,7 +84,6 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::core_tests::sample_info_hash; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -98,6 +97,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::packages::http_tracker_core; + use crate::servers::http::test_helpers::tests::sample_info_hash; fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -162,10 +162,11 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::packages::{self, http_tracker_core}; + use crate::servers::http::test_helpers::tests::sample_info_hash; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ - initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hash, - sample_info_hashes, sample_peer, MockHttpStatsEventSender, + initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hashes, + sample_peer, MockHttpStatsEventSender, }; #[tokio::test] @@ -247,10 +248,10 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use crate::packages::{self, http_tracker_core}; + use crate::servers::http::test_helpers::tests::sample_info_hash; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ - initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hash, sample_info_hashes, sample_peer, - MockHttpStatsEventSender, + initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; #[tokio::test] From 74d0d2851c97ef220052bef3f1d7bf6543b49bfd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Feb 2025 09:59:27 +0000 Subject: [PATCH 02/14] docs: [#1261] fix doc errors in tracker-core --- packages/tracker-core/src/authentication/key/mod.rs | 4 ++-- packages/tracker-core/src/databases/driver/mod.rs | 3 --- packages/tracker-core/src/databases/error.rs | 2 +- packages/tracker-core/src/databases/mod.rs | 4 ++-- packages/tracker-core/src/lib.rs | 13 ------------- 5 files changed, 5 insertions(+), 21 deletions(-) diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index fce18c0dd..ea9edb7d5 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -6,7 +6,7 @@ //! //! There are services to [`generate_key`] and [`verify_key_expiration`] authentication keys. //! -//! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means +//! Authentication keys are used only by HTTP trackers. All keys have an expiration time, that means //! they are only valid during a period of time. After that time the expiring key will no longer be valid. //! //! Keys are stored in this struct: @@ -112,7 +112,7 @@ pub fn generate_key(lifetime: Option) -> PeerKey { /// /// # Errors /// -/// Will return a verification error [`crate::authentication::key::Error`] if +/// Will return a verification error [`enum@crate::authentication::key::Error`] if /// it cannot verify the key. pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = CurrentClock::now(); diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 2bc6a1e3c..06e912f7c 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -1,7 +1,4 @@ //! Database driver factory. -//! -//! See [`databases::driver::build`](crate::core::databases::driver::build) -//! function for more information. use mysql::Mysql; use serde::{Deserialize, Serialize}; use sqlite::Sqlite; diff --git a/packages/tracker-core/src/databases/error.rs b/packages/tracker-core/src/databases/error.rs index 0f3207587..6b340080e 100644 --- a/packages/tracker-core/src/databases/error.rs +++ b/packages/tracker-core/src/databases/error.rs @@ -1,6 +1,6 @@ //! Database errors. //! -//! This module contains the [Database errors](crate::core::databases::error::Error). +//! This module contains the [Database errors](crate::databases::error::Error). use std::panic::Location; use std::sync::Arc; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 010252139..1de13332f 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -4,8 +4,8 @@ //! //! There are two implementations of the trait (two drivers): //! -//! - [`Mysql`](crate::core::databases::mysql::Mysql) -//! - [`Sqlite`](crate::core::databases::sqlite::Sqlite) +//! - `Mysql` +//! - `Sqlite` //! //! > **NOTICE**: There are no database migrations. If there are any changes, //! > we will implemented them or provide a script to migrate to the new schema. diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index ecbaef9c5..ac6e4edac 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -25,7 +25,6 @@ //! - [Torrents](#torrents) //! - [Peers](#peers) //! - [Configuration](#configuration) -//! - [Services](#services) //! - [Authentication](#authentication) //! - [Statistics](#statistics) //! - [Persistence](#persistence) @@ -342,18 +341,6 @@ //! //! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. //! -//! # Services -//! -//! Services are domain services on top of the core tracker domain. Right now there are two types of service: -//! -//! - For statistics: [`crate::packages::statistics::services`] -//! - For torrents: [`crate::core::torrent::services`] -//! -//! Services usually format the data inside the tracker to make it easier to consume by other parts. -//! They also decouple the internal data structure, used by the tracker, from the way we deliver that data to the consumers. -//! The internal data structure is designed for performance or low memory consumption. And it should be changed -//! without affecting the external consumers. -//! //! Services can include extra features like pagination, for example. //! //! # Authentication From 181c27e749fe8da1f86c10960cb622bc1a5e082a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Feb 2025 10:40:28 +0000 Subject: [PATCH 03/14] docs: [#1261] review docs for tracker-core package --- packages/http-protocol/src/v1/query.rs | 7 + packages/tracker-core/src/announce_handler.rs | 131 ++++++- .../src/authentication/handler.rs | 141 ++++--- .../src/authentication/key/mod.rs | 102 +++-- .../src/authentication/key/peer_key.rs | 103 ++++- .../key/repository/in_memory.rs | 53 ++- .../src/authentication/key/repository/mod.rs | 1 + .../key/repository/persisted.rs | 40 +- .../tracker-core/src/authentication/mod.rs | 15 + .../src/authentication/service.rs | 53 ++- .../src/databases/driver/mysql.rs | 11 + .../src/databases/driver/sqlite.rs | 23 +- packages/tracker-core/src/databases/error.rs | 38 +- packages/tracker-core/src/databases/mod.rs | 149 ++++---- packages/tracker-core/src/databases/setup.rs | 44 ++- packages/tracker-core/src/error.rs | 26 +- packages/tracker-core/src/lib.rs | 358 +++--------------- packages/tracker-core/src/scrape_handler.rs | 74 +++- packages/tracker-core/src/torrent/manager.rs | 50 ++- packages/tracker-core/src/torrent/mod.rs | 180 ++++++++- .../src/torrent/repository/in_memory.rs | 146 ++++++- .../src/torrent/repository/mod.rs | 1 + .../src/torrent/repository/persisted.rs | 52 ++- packages/tracker-core/src/torrent/services.rs | 109 +++++- .../src/whitelist/authorization.rs | 29 +- .../tracker-core/src/whitelist/manager.rs | 48 ++- packages/tracker-core/src/whitelist/mod.rs | 18 + .../src/whitelist/repository/in_memory.rs | 25 +- .../src/whitelist/repository/mod.rs | 1 + .../src/whitelist/repository/persisted.rs | 22 +- packages/tracker-core/src/whitelist/setup.rs | 26 ++ .../src/whitelist/test_helpers.rs | 7 +- 32 files changed, 1467 insertions(+), 616 deletions(-) diff --git a/packages/http-protocol/src/v1/query.rs b/packages/http-protocol/src/v1/query.rs index f77145cb6..66afddf65 100644 --- a/packages/http-protocol/src/v1/query.rs +++ b/packages/http-protocol/src/v1/query.rs @@ -249,6 +249,13 @@ mod tests { assert_eq!(query.get_param("param2"), Some("value2".to_string())); } + #[test] + fn should_ignore_duplicate_param_values_when_asked_to_return_only_one_value() { + let query = Query::from(vec![("param1", "value1"), ("param1", "value2")]); + + assert_eq!(query.get_param("param1"), Some("value1".to_string())); + } + #[test] fn should_fail_parsing_an_invalid_query_string() { let invalid_raw_query = "name=value=value"; diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index cd0a9b861..6707f1917 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -1,3 +1,95 @@ +//! Announce handler. +//! +//! Handling `announce` requests is the most important task for a `BitTorrent` +//! tracker. +//! +//! A `BitTorrent` swarm is a network of peers that are all trying to download +//! the same torrent. When a peer wants to find other peers it announces itself +//! to the swarm via the tracker. The peer sends its data to the tracker so that +//! the tracker can add it to the swarm. The tracker responds to the peer with +//! the list of other peers in the swarm so that the peer can contact them to +//! start downloading pieces of the file from them. +//! +//! Once you have instantiated the `AnnounceHandler` you can `announce` a new [`peer::Peer`](torrust_tracker_primitives) with: +//! +//! ```rust,no_run +//! use std::net::SocketAddr; +//! use std::net::IpAddr; +//! use std::net::Ipv4Addr; +//! use std::str::FromStr; +//! +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::peer; +//! use bittorrent_primitives::info_hash::InfoHash; +//! +//! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); +//! +//! let peer = peer::Peer { +//! peer_id: PeerId(*b"-qB00000000000000001"), +//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), +//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), +//! uploaded: NumberOfBytes::new(0), +//! downloaded: NumberOfBytes::new(0), +//! left: NumberOfBytes::new(0), +//! event: AnnounceEvent::Completed, +//! }; +//! +//! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); +//! ``` +//! +//! ```text +//! let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip).await; +//! ``` +//! +//! The handler returns the list of peers for the torrent with the infohash +//! `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, filtering out the peer that is +//! making the `announce` request. +//! +//! > **NOTICE**: that the peer argument is mutable because the handler can +//! > change the peer IP if the peer is using a loopback IP. +//! +//! The `peer_ip` argument is the resolved peer ip. It's a common practice that +//! trackers ignore the peer ip in the `announce` request params, and resolve +//! the peer ip using the IP of the client making the request. As the tracker is +//! a domain service, the peer IP must be provided for the handler user, which +//! is usually a higher component with access the the request metadata, for +//! example, connection data, proxy headers, etcetera. +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_configuration::AnnouncePolicy; +//! +//! pub struct AnnounceData { +//! pub peers: Vec, +//! pub swarm_stats: SwarmMetadata, +//! pub policy: AnnouncePolicy, // the tracker announce policy. +//! } +//! +//! pub struct SwarmMetadata { +//! pub completed: u32, // The number of peers that have ever completed downloading +//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) +//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! +//! // Core tracker configuration +//! pub struct AnnounceInterval { +//! // ... +//! pub interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker +//! pub interval_min: u32, // Minimum announce interval. Clients must not reannounce more frequently than this +//! // ... +//! } +//! ``` +//! +//! ## Related BEPs: +//! +//! Refer to `BitTorrent` BEPs and other sites for more information about the `announce` request: +//! +//! - [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Announce) use std::net::IpAddr; use std::sync::Arc; @@ -10,18 +102,20 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; +/// Handles `announce` requests from `BitTorrent` clients. pub struct AnnounceHandler { /// The tracker configuration. config: Core, - /// The in-memory torrents repository. + /// Repository for in-memory torrent data. in_memory_torrent_repository: Arc, - /// The persistent torrents repository. + /// Repository for persistent torrent data (database). db_torrent_repository: Arc, } impl AnnounceHandler { + /// Creates a new `AnnounceHandler`. #[must_use] pub fn new( config: &Core, @@ -35,9 +129,20 @@ impl AnnounceHandler { } } - /// It handles an announce request. + /// Processes an announce request from a peer. /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). + /// + /// # Parameters + /// + /// - `info_hash`: The unique identifier of the torrent. + /// - `peer`: The peer announcing itself (may be updated if IP is adjusted). + /// - `remote_client_ip`: The IP address of the client making the request. + /// - `peers_wanted`: Specifies how many peers the client wants in the response. + /// + /// # Returns + /// + /// An `AnnounceData` struct containing the list of peers, swarm statistics, and tracker policy. pub fn announce( &self, info_hash: &InfoHash, @@ -77,9 +182,8 @@ impl AnnounceHandler { } } - /// It updates the torrent entry in memory, it also stores in the database - /// the torrent info data which is persistent, and finally return the data - /// needed for a `announce` request response. + /// Updates the torrent data in memory, persists statistics if needed, and + /// returns the updated swarm stats. #[must_use] fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { let swarm_metadata_before = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); @@ -95,7 +199,7 @@ impl AnnounceHandler { swarm_metadata_after } - /// It stores the torrents stats into the database (if persistency is enabled). + /// Persists torrent statistics to the database if persistence is enabled. fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { if self.config.tracker_policy.persistent_torrent_completed_stat { let completed = swarm_metadata.downloaded; @@ -106,22 +210,25 @@ impl AnnounceHandler { } } -/// How many peers the peer announcing wants in the announce response. +/// Specifies how many peers a client wants in the announce response. #[derive(Clone, Debug, PartialEq, Default)] pub enum PeersWanted { - /// The peer wants as many peers as possible in the announce response. + /// Request as many peers as possible (default behavior). #[default] AsManyAsPossible, - /// The peer only wants a certain amount of peers in the announce response. + + /// Request a specific number of peers. Only { amount: usize }, } impl PeersWanted { + /// Request a specific number of peers. #[must_use] pub fn only(limit: u32) -> Self { limit.into() } + /// Returns the maximum number of peers allowed based on the request and tracker limit. fn limit(&self) -> usize { match self { PeersWanted::AsManyAsPossible => TORRENT_PEERS_LIMIT, @@ -159,6 +266,10 @@ impl From for PeersWanted { } } +/// Assigns the correct IP address to a peer based on tracker settings. +/// +/// If the client IP is a loopback address and the tracker has an external IP +/// configured, the external IP will be assigned to the peer. #[must_use] fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index 136060916..178895b8d 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -1,3 +1,11 @@ +//! This module implements the `KeysHandler` service +//! +//! It's responsible for managing authentication keys for the `BitTorrent` tracker. +//! +//! The service handles both persistent and in-memory storage of peer keys, and +//! supports adding new keys (either pre-generated or randomly created), +//! removing keys, and loading keys from the database into memory. Keys can be +//! either permanent or expire after a configurable duration per key. use std::sync::Arc; use std::time::Duration; @@ -11,29 +19,44 @@ use super::{key, CurrentClock, Key, PeerKey}; use crate::databases; use crate::error::PeerKeyError; -/// This type contains the info needed to add a new tracker key. +/// Contains the information needed to add a new tracker key. /// -/// You can upload a pre-generated key or let the app to generate a new one. -/// You can also set an expiration date or leave it empty (`None`) if you want -/// to create a permanent key that does not expire. +/// A new key can either be a pre-generated key provided by the user or can be +/// randomly generated by the application. Additionally, the key may be set to +/// expire after a certain number of seconds, or be permanent (if no expiration +/// is specified). #[derive(Debug)] pub struct AddKeyRequest { - /// The pre-generated key. Use `None` to generate a random key. + /// The pre-generated key as a string. If `None` the service will generate a + /// random key. pub opt_key: Option, - /// How long the key will be valid in seconds. Use `None` for permanent keys. + /// The duration (in seconds) for which the key is valid. Use `None` for + /// permanent keys. pub opt_seconds_valid: Option, } +/// The `KeysHandler` service manages the creation, addition, removal, and loading +/// of authentication keys for the tracker. +/// +/// It uses both a persistent (database) repository and an in-memory repository +/// to manage keys. pub struct KeysHandler { - /// The database repository for the authentication keys. + /// The database repository for storing authentication keys persistently. db_key_repository: Arc, - /// In-memory implementation of the authentication key repository. + /// The in-memory repository for caching authentication keys. in_memory_key_repository: Arc, } impl KeysHandler { + /// Creates a new instance of the `KeysHandler` service. + /// + /// # Parameters + /// + /// - `db_key_repository`: A shared reference to the database key repository. + /// - `in_memory_key_repository`: A shared reference to the in-memory key + /// repository. #[must_use] pub fn new(db_key_repository: &Arc, in_memory_key_repository: &Arc) -> Self { Self { @@ -42,18 +65,24 @@ impl KeysHandler { } } - /// Adds new peer keys to the tracker. + /// Adds a new peer key to the tracker. + /// + /// The key may be pre-generated or generated on-the-fly. + /// + /// Depending on whether an expiration duration is specified, the key will + /// be either expiring or permanent. /// - /// Keys can be pre-generated or randomly created. They can also be - /// permanent or expire. + /// # Parameters + /// + /// - `add_key_req`: The request containing options for key creation. /// /// # Errors /// - /// Will return an error if: + /// Returns an error if: /// - /// - The key duration overflows the duration type maximum value. + /// - The provided key duration exceeds the maximum allowed value. /// - The provided pre-generated key is invalid. - /// - The key could not been persisted due to database issues. + /// - There is an error persisting the key in the database. pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { if let Some(pre_existing_key) = add_key_req.opt_key { // Pre-generated key @@ -125,29 +154,31 @@ impl KeysHandler { } } - /// It generates a new permanent authentication key. + /// Generates a new permanent authentication key. /// - /// Authentication keys are used by HTTP trackers. + /// Permanent keys do not expire. /// /// # Errors /// - /// Will return a `database::Error` if unable to add the `auth_key` to the database. + /// Returns a `databases::error::Error` if the key cannot be persisted in + /// the database. pub(crate) async fn generate_permanent_peer_key(&self) -> Result { self.generate_expiring_peer_key(None).await } - /// It generates a new expiring authentication key. + /// Generates a new authentication key with an optional expiration lifetime. /// - /// Authentication keys are used by HTTP trackers. + /// If a `lifetime` is provided, the generated key will expire after that + /// duration. The new key is stored both in the database and in memory. /// - /// # Errors + /// # Parameters /// - /// Will return a `database::Error` if unable to add the `auth_key` to the database. + /// - `lifetime`: An optional duration specifying how long the key is valid. /// - /// # Arguments + /// # Errors /// - /// * `lifetime` - The duration in seconds for the new key. The key will be - /// no longer valid after `lifetime` seconds. + /// Returns a `databases::error::Error` if there is an issue adding the key + /// to the database. pub async fn generate_expiring_peer_key(&self, lifetime: Option) -> Result { let peer_key = key::generate_key(lifetime); @@ -158,36 +189,36 @@ impl KeysHandler { Ok(peer_key) } - /// It adds a pre-generated permanent authentication key. + /// Adds a pre-generated permanent authentication key. /// - /// Authentication keys are used by HTTP trackers. + /// Internally, this calls `add_expiring_peer_key` with no expiration. /// - /// # Errors + /// # Parameters /// - /// Will return a `database::Error` if unable to add the `auth_key` to the - /// database. For example, if the key already exist. + /// - `key`: The pre-generated key. /// - /// # Arguments + /// # Errors /// - /// * `key` - The pre-generated key. + /// Returns a `databases::error::Error` if there is an issue persisting the + /// key. pub(crate) async fn add_permanent_peer_key(&self, key: Key) -> Result { self.add_expiring_peer_key(key, None).await } - /// It adds a pre-generated authentication key. + /// Adds a pre-generated authentication key with an optional expiration. /// - /// Authentication keys are used by HTTP trackers. + /// The key is stored in both the database and the in-memory repository. /// - /// # Errors + /// # Parameters /// - /// Will return a `database::Error` if unable to add the `auth_key` to the - /// database. For example, if the key already exist. + /// - `key`: The pre-generated key. + /// - `valid_until`: An optional timestamp (as a duration since the Unix + /// epoch) after which the key expires. /// - /// # Arguments + /// # Errors /// - /// * `key` - The pre-generated key. - /// * `lifetime` - The duration in seconds for the new key. The key will be - /// no longer valid after `lifetime` seconds. + /// Returns a `databases::error::Error` if there is an issue adding the key + /// to the database. pub(crate) async fn add_expiring_peer_key( &self, key: Key, @@ -205,11 +236,18 @@ impl KeysHandler { Ok(peer_key) } - /// It removes an authentication key. + /// Removes an authentication key. + /// + /// The key is removed from both the database and the in-memory repository. + /// + /// # Parameters + /// + /// - `key`: A reference to the key to be removed. /// /// # Errors /// - /// Will return a `database::Error` if unable to remove the `key` to the database. + /// Returns a `databases::error::Error` if the key cannot be removed from + /// the database. pub async fn remove_peer_key(&self, key: &Key) -> Result<(), databases::error::Error> { self.db_key_repository.remove(key)?; @@ -218,19 +256,26 @@ impl KeysHandler { Ok(()) } - /// It removes an authentication key from memory. + /// Removes an authentication key from the in-memory repository. + /// + /// This function does not interact with the database. + /// + /// # Parameters + /// + /// - `key`: A reference to the key to be removed. pub(crate) async fn remove_in_memory_auth_key(&self, key: &Key) { self.in_memory_key_repository.remove(key).await; } - /// The `Tracker` stores the authentication keys in memory and in the - /// database. In case you need to restart the `Tracker` you can load the - /// keys from the database into memory with this function. Keys are - /// automatically stored in the database when they are generated. + /// Loads all authentication keys from the database into the in-memory + /// repository. + /// + /// This is useful during tracker startup to ensure that all persisted keys + /// are available in memory. /// /// # Errors /// - /// Will return a `database::Error` if unable to `load_keys` from the database. + /// Returns a `databases::error::Error` if there is an issue loading the keys from the database. pub async fn load_peer_keys_from_database(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.db_key_repository.load_keys()?; diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index ea9edb7d5..648143928 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -1,42 +1,45 @@ -//! Tracker authentication services and structs. +//! Tracker authentication services and types. //! -//! This module contains functions to handle tracker keys. -//! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs -//! in `private` or `private_listed` modes. +//! This module provides functions and data structures for handling tracker keys. +//! Tracker keys are tokens used to authenticate tracker clients when the +//! tracker is running in `private` mode. //! -//! There are services to [`generate_key`] and [`verify_key_expiration`] authentication keys. +//! Authentication keys are used exclusively by HTTP trackers. Every key has an +//! expiration time, meaning that it is only valid for a predetermined period. +//! Once the expiration time is reached, an expiring key will be rejected. //! -//! Authentication keys are used only by HTTP trackers. All keys have an expiration time, that means -//! they are only valid during a period of time. After that time the expiring key will no longer be valid. +//! The primary key structure is [`PeerKey`], which couples a randomly generated +//! [`Key`] (a 32-character alphanumeric string) with an optional expiration +//! timestamp. //! -//! Keys are stored in this struct: +//! # Examples //! -//! ```rust,no_run +//! Generating a new key valid for `9999` seconds: +//! +//! ```rust +//! use bittorrent_tracker_core::authentication; +//! use std::time::Duration; +//! +//! let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); +//! +//! // Later, verify that the key is still valid. +//! assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); +//! ``` +//! +//! The core key types are defined as follows: +//! +//! ```rust //! use bittorrent_tracker_core::authentication::Key; //! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! //! pub struct PeerKey { -//! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` +//! /// A random 32-character authentication token (e.g., `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ`) //! pub key: Key, //! -//! /// Timestamp, the key will be no longer valid after this timestamp. -//! /// If `None` the keys will not expire (permanent key). +//! /// The timestamp after which the key expires. If `None`, the key is permanent. //! pub valid_until: Option, //! } //! ``` -//! -//! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: -//! -//! ```rust,no_run -//! use bittorrent_tracker_core::authentication; -//! use std::time::Duration; -//! -//! let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); -//! -//! // And you can later verify it with: -//! -//! assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); -//! ``` pub mod peer_key; pub mod repository; @@ -75,17 +78,33 @@ pub(crate) fn generate_expiring_key(lifetime: Duration) -> PeerKey { generate_key(Some(lifetime)) } -/// It generates a new random 32-char authentication [`PeerKey`]. +/// Generates a new random 32-character authentication key (`PeerKey`). /// -/// It can be an expiring or permanent key. +/// If a lifetime is provided, the generated key will expire after the specified +/// duration; otherwise, the key is permanent (i.e., it never expires). /// /// # Panics /// -/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. +/// Panics if the addition of the lifetime to the current time overflows +/// (an extremely unlikely event). /// /// # Arguments /// -/// * `lifetime`: if `None` the key will be permanent. +/// * `lifetime`: An optional duration specifying how long the key is valid. +/// If `None`, the key is permanent. +/// +/// # Examples +/// +/// ```rust +/// use bittorrent_tracker_core::authentication::key; +/// use std::time::Duration; +/// +/// // Generate an expiring key valid for 3600 seconds. +/// let expiring_key = key::generate_key(Some(Duration::from_secs(3600))); +/// +/// // Generate a permanent key. +/// let permanent_key = key::generate_key(None); +/// ``` #[must_use] pub fn generate_key(lifetime: Option) -> PeerKey { let random_key = Key::random(); @@ -107,13 +126,27 @@ pub fn generate_key(lifetime: Option) -> PeerKey { } } -/// It verifies an [`PeerKey`]. It checks if the expiration date has passed. -/// Permanent keys without duration (`None`) do not expire. +/// Verifies whether a given authentication key (`PeerKey`) is still valid. +/// +/// For expiring keys, this function compares the key's expiration timestamp +/// against the current time. Permanent keys (with `None` as their expiration) +/// are always valid. /// /// # Errors /// -/// Will return a verification error [`enum@crate::authentication::key::Error`] if -/// it cannot verify the key. +/// Returns a verification error of type [`enum@Error`] if the key has expired. +/// +/// # Examples +/// +/// ```rust +/// use bittorrent_tracker_core::authentication::key; +/// use std::time::Duration; +/// +/// let expiring_key = key::generate_key(Some(Duration::from_secs(100))); +/// +/// // If the key's expiration time has passed, the verification will fail. +/// assert!(key::verify_key_expiration(&expiring_key).is_ok()); +/// ``` pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = CurrentClock::now(); @@ -136,17 +169,20 @@ pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { #[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { + /// Wraps an underlying error encountered during key verification. #[error("Key could not be verified: {source}")] KeyVerificationError { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + /// Indicates that the key could not be read or found. #[error("Failed to read key: {key}, {location}")] UnableToReadKey { location: &'static Location<'static>, key: Box, }, + /// Indicates that the key has expired. #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, } diff --git a/packages/tracker-core/src/authentication/key/peer_key.rs b/packages/tracker-core/src/authentication/key/peer_key.rs index 1d2b1fadc..41aba950b 100644 --- a/packages/tracker-core/src/authentication/key/peer_key.rs +++ b/packages/tracker-core/src/authentication/key/peer_key.rs @@ -1,3 +1,13 @@ +//! Authentication keys for private trackers. +//! +//! This module defines the types and functionality for managing authentication +//! keys used by the tracker. These keys, represented by the `Key` and `PeerKey` +//! types, are essential for authenticating peers in private tracker +//! environments. +//! +//! A `Key` is a 32-character alphanumeric token, while a `PeerKey` couples a +//! `Key` with an optional expiration timestamp. If the expiration is set (via +//! `valid_until`), the key will become invalid after that time. use std::str::FromStr; use std::time::Duration; @@ -11,22 +21,42 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::AUTH_KEY_LENGTH; -/// An authentication key which can potentially have an expiration time. -/// After that time is will automatically become invalid. +/// A peer authentication key with an optional expiration time. +/// +/// A `PeerKey` associates a generated `Key` (a 32-character alphanumeric string) +/// with an optional expiration timestamp (`valid_until`). If `valid_until` is +/// `None`, the key is considered permanent. +/// +/// # Example +/// +/// ```rust +/// use std::time::Duration; +/// use bittorrent_tracker_core::authentication::key::peer_key::{Key, PeerKey}; +/// +/// let expiring_key = PeerKey { +/// key: Key::random(), +/// valid_until: Some(Duration::from_secs(3600)), // Expires in 1 hour +/// }; +/// +/// let permanent_key = PeerKey { +/// key: Key::random(), +/// valid_until: None, +/// }; +/// ``` #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PeerKey { - /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` + /// A 32-character authentication key. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` pub key: Key, - /// Timestamp, the key will be no longer valid after this timestamp. - /// If `None` the keys will not expire (permanent key). + /// An optional expiration timestamp. If set, the key becomes invalid after + /// this time. A value of `None` indicates a permanent key. pub valid_until: Option, } impl PartialEq for PeerKey { fn eq(&self, other: &Self) -> bool { - // We ignore the fractions of seconds when comparing the timestamps - // because we only store the seconds in the database. + // When comparing two PeerKeys, ignore fractions of seconds since only + // whole seconds are stored in the database. self.key == other.key && match (&self.valid_until, &other.valid_until) { (Some(a), Some(b)) => a.as_secs() == b.as_secs(), @@ -53,14 +83,17 @@ impl PeerKey { self.key.clone() } - /// It returns the expiry time. For example, for the starting time for Unix Epoch - /// (timestamp 0) it will return a `DateTime` whose string representation is - /// `1970-01-01 00:00:00 UTC`. + /// Computes and returns the expiration time as a UTC `DateTime`, if one + /// exists. + /// + /// The returned time is derived from the stored seconds since the Unix + /// epoch. Note that any fractional seconds are discarded since only whole + /// seconds are stored in the database. /// /// # Panics /// - /// Will panic when the key timestamp overflows the internal i64 type. - /// (this will naturally happen in 292.5 billion years) + /// Panics if the key's timestamp overflows the internal `i64` type (this is + /// extremely unlikely, happening roughly 292.5 billion years from now). #[must_use] pub fn expiry_time(&self) -> Option> { // We remove the fractions of seconds because we only store the seconds @@ -72,17 +105,37 @@ impl PeerKey { /// A token used for authentication. /// -/// - It contains only ascii alphanumeric chars: lower and uppercase letters and -/// numbers. -/// - It's a 32-char string. +/// The `Key` type encapsulates a 32-character string that must consist solely +/// of ASCII alphanumeric characters (0-9, a-z, A-Z). This key is used by the +/// tracker to authenticate peers. +/// +/// # Examples +/// +/// Creating a key from a valid string: +/// +/// ``` +/// use bittorrent_tracker_core::authentication::key::peer_key::Key; +/// let key = Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); +/// ``` +/// +/// Generating a random key: +/// +/// ``` +/// use bittorrent_tracker_core::authentication::key::peer_key::Key; +/// let random_key = Key::random(); +/// ``` #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] pub struct Key(String); impl Key { + /// Constructs a new `Key` from the given string. + /// /// # Errors /// - /// Will return an error is the string represents an invalid key. - /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. + /// Returns a `ParseKeyError` if: + /// + /// - The input string does not have exactly 32 characters. + /// - The input string contains characters that are not ASCII alphanumeric. pub fn new(value: &str) -> Result { if value.len() != AUTH_KEY_LENGTH { return Err(ParseKeyError::InvalidKeyLength); @@ -95,11 +148,14 @@ impl Key { Ok(Self(value.to_owned())) } - /// It generates a random key. + /// Generates a new random authentication key. + /// + /// The random key is generated by sampling 32 ASCII alphanumeric characters. /// /// # Panics /// - /// Will panic if the random number generator fails to generate a valid key. + /// Panics if the random number generator fails to produce a valid key + /// (extremely unlikely). pub fn random() -> Self { let random_id: String = rng() .sample_iter(&Alphanumeric) @@ -115,9 +171,11 @@ impl Key { } } -/// Error returned when a key cannot be parsed from a string. +/// Errors that can occur when parsing a string into a `Key`. +/// +/// # Examples /// -/// ```text +/// ```rust /// use bittorrent_tracker_core::authentication::Key; /// use std::str::FromStr; /// @@ -132,9 +190,12 @@ impl Key { /// this error. #[derive(Debug, Error)] pub enum ParseKeyError { + /// The provided key does not have exactly 32 characters. #[error("Invalid key length. Key must be have 32 chars")] InvalidKeyLength, + /// The provided key contains invalid characters. Only ASCII alphanumeric + /// characters are allowed. #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] InvalidChars, } diff --git a/packages/tracker-core/src/authentication/key/repository/in_memory.rs b/packages/tracker-core/src/authentication/key/repository/in_memory.rs index 13664e27c..5911771d4 100644 --- a/packages/tracker-core/src/authentication/key/repository/in_memory.rs +++ b/packages/tracker-core/src/authentication/key/repository/in_memory.rs @@ -1,6 +1,11 @@ +//! In-memory implementation of the authentication key repository. use crate::authentication::key::{Key, PeerKey}; -/// In-memory implementation of the authentication key repository. +/// An in-memory repository for storing authentication keys. +/// +/// This repository maintains a mapping between a peer's [`Key`] and its +/// corresponding [`PeerKey`]. It is designed for use in private tracker +/// environments where keys are maintained in memory. #[derive(Debug, Default)] pub struct InMemoryKeyRepository { /// Tracker users' keys. Only for private trackers. @@ -8,28 +13,66 @@ pub struct InMemoryKeyRepository { } impl InMemoryKeyRepository { - /// It adds a new authentication key. + /// Inserts a new authentication key into the repository. + /// + /// This function acquires a write lock on the internal storage and inserts + /// the provided [`PeerKey`], using its inner [`Key`] as the map key. + /// + /// # Arguments + /// + /// * `auth_key` - A reference to the [`PeerKey`] to be inserted. pub(crate) async fn insert(&self, auth_key: &PeerKey) { self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); } - /// It removes an authentication key. + /// Removes an authentication key from the repository. + /// + /// This function acquires a write lock on the internal storage and removes + /// the key that matches the provided [`Key`]. + /// + /// # Arguments + /// + /// * `key` - A reference to the [`Key`] corresponding to the key to be removed. pub(crate) async fn remove(&self, key: &Key) { self.keys.write().await.remove(key); } + /// Retrieves an authentication key from the repository. + /// + /// This function acquires a read lock on the internal storage and returns a + /// cloned [`PeerKey`] if the provided [`Key`] exists. + /// + /// # Arguments + /// + /// * `key` - A reference to the [`Key`] to look up. + /// + /// # Returns + /// + /// An `Option` containing the matching key if found, or `None` + /// otherwise. pub(crate) async fn get(&self, key: &Key) -> Option { self.keys.read().await.get(key).cloned() } - /// It clears all the authentication keys. + /// Clears all authentication keys from the repository. + /// + /// This function acquires a write lock on the internal storage and removes + /// all entries. #[allow(dead_code)] pub(crate) async fn clear(&self) { let mut keys = self.keys.write().await; keys.clear(); } - /// It resets the authentication keys with a new list of keys. + /// Resets the repository with a new list of authentication keys. + /// + /// This function clears all existing keys and then inserts each key from + /// the provided vector. + /// + /// # Arguments + /// + /// * `peer_keys` - A vector of [`PeerKey`] instances that will replace the + /// current set of keys. pub async fn reset_with(&self, peer_keys: Vec) { let mut keys_lock = self.keys.write().await; diff --git a/packages/tracker-core/src/authentication/key/repository/mod.rs b/packages/tracker-core/src/authentication/key/repository/mod.rs index 51723b68d..3df783622 100644 --- a/packages/tracker-core/src/authentication/key/repository/mod.rs +++ b/packages/tracker-core/src/authentication/key/repository/mod.rs @@ -1,2 +1,3 @@ +//! Key repository implementations. pub mod in_memory; pub mod persisted; diff --git a/packages/tracker-core/src/authentication/key/repository/persisted.rs b/packages/tracker-core/src/authentication/key/repository/persisted.rs index 95a3b874c..e84a23c9b 100644 --- a/packages/tracker-core/src/authentication/key/repository/persisted.rs +++ b/packages/tracker-core/src/authentication/key/repository/persisted.rs @@ -1,14 +1,28 @@ +//! The database repository for the authentication keys. use std::sync::Arc; use crate::authentication::key::{Key, PeerKey}; use crate::databases::{self, Database}; -/// The database repository for the authentication keys. +/// A repository for storing authentication keys in a persistent database. +/// +/// This repository provides methods to add, remove, and load authentication +/// keys from the underlying database. It wraps an instance of a type +/// implementing the [`Database`] trait. pub struct DatabaseKeyRepository { database: Arc>, } impl DatabaseKeyRepository { + /// Creates a new `DatabaseKeyRepository` instance. + /// + /// # Arguments + /// + /// * `database` - A shared reference to a boxed database implementation. + /// + /// # Returns + /// + /// A new instance of `DatabaseKeyRepository` #[must_use] pub fn new(database: &Arc>) -> Self { Self { @@ -16,31 +30,43 @@ impl DatabaseKeyRepository { } } - /// It adds a new key to the database. + /// Adds a new authentication key to the database. + /// + /// # Arguments + /// + /// * `peer_key` - A reference to the [`PeerKey`] to be persisted. /// /// # Errors /// - /// Will return a `databases::error::Error` if unable to add the `auth_key` to the database. + /// Returns a [`databases::error::Error`] if the key cannot be added. pub(crate) fn add(&self, peer_key: &PeerKey) -> Result<(), databases::error::Error> { self.database.add_key_to_keys(peer_key)?; Ok(()) } - /// It removes an key from the database. + /// Removes an authentication key from the database. + /// + /// # Arguments + /// + /// * `key` - A reference to the [`Key`] corresponding to the key to remove. /// /// # Errors /// - /// Will return a `database::Error` if unable to remove the `key` from the database. + /// Returns a [`databases::error::Error`] if the key cannot be removed. pub(crate) fn remove(&self, key: &Key) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key)?; Ok(()) } - /// It loads all keys from the database. + /// Loads all authentication keys from the database. /// /// # Errors /// - /// Will return a `database::Error` if unable to load the keys from the database. + /// Returns a [`databases::error::Error`] if the keys cannot be loaded. + /// + /// # Returns + /// + /// A vector containing all persisted [`PeerKey`] entries. pub(crate) fn load_keys(&self) -> Result, databases::error::Error> { let keys = self.database.load_keys()?; Ok(keys) diff --git a/packages/tracker-core/src/authentication/mod.rs b/packages/tracker-core/src/authentication/mod.rs index 52138d26b..12b742b8b 100644 --- a/packages/tracker-core/src/authentication/mod.rs +++ b/packages/tracker-core/src/authentication/mod.rs @@ -1,3 +1,18 @@ +//! Tracker authentication services and structs. +//! +//! One of the crate responsibilities is to create and keep authentication keys. +//! Auth keys are used by HTTP trackers when the tracker is running in `private` +//! mode. +//! +//! HTTP tracker's clients need to obtain an authentication key before starting +//! requesting the tracker. Once they get one they have to include a `PATH` +//! param with the key in all the HTTP requests. For example, when a peer wants +//! to `announce` itself it has to use the HTTP tracker endpoint: +//! +//! `GET /announce/:key` +//! +//! The common way to obtain the keys is by using the tracker API directly or +//! via other applications like the [Torrust Index](https://github.com/torrust/torrust-index). use crate::CurrentClock; pub mod handler; diff --git a/packages/tracker-core/src/authentication/service.rs b/packages/tracker-core/src/authentication/service.rs index 5ca0a09ec..75b28944f 100644 --- a/packages/tracker-core/src/authentication/service.rs +++ b/packages/tracker-core/src/authentication/service.rs @@ -1,3 +1,4 @@ +//! Authentication service. use std::panic::Location; use std::sync::Arc; @@ -6,6 +7,11 @@ use torrust_tracker_configuration::Core; use super::key::repository::in_memory::InMemoryKeyRepository; use super::{key, Error, Key}; +/// The authentication service responsible for validating peer keys. +/// +/// The service uses an in-memory key repository along with the tracker +/// configuration to determine whether a given peer key is valid. In a private +/// tracker, only registered keys (and optionally unexpired keys) are allowed. #[derive(Debug)] pub struct AuthenticationService { /// The tracker configuration. @@ -16,6 +22,18 @@ pub struct AuthenticationService { } impl AuthenticationService { + /// Creates a new instance of the `AuthenticationService`. + /// + /// # Parameters + /// + /// - `config`: A reference to the tracker core configuration. + /// - `in_memory_key_repository`: A shared reference to an in-memory key + /// repository. + /// + /// # Returns + /// + /// An `AuthenticationService` instance initialized with the given + /// configuration and repository. #[must_use] pub fn new(config: &Core, in_memory_key_repository: &Arc) -> Self { Self { @@ -24,12 +42,23 @@ impl AuthenticationService { } } - /// It authenticates the peer `key` against the `Tracker` authentication - /// key list. + /// Authenticates a peer key against the tracker's authentication key list. + /// + /// For private trackers, the key must be registered (and optionally not + /// expired) to be considered valid. For public trackers, authentication + /// always succeeds. + /// + /// # Parameters + /// + /// - `key`: A reference to the peer key that needs to be authenticated. /// /// # Errors /// - /// Will return an error if the the authentication key cannot be verified. + /// Returns an error if: + /// + /// - The tracker is in private mode and the key cannot be found in the + /// repository. + /// - The key is found but fails the expiration check (if expiration is enforced). pub async fn authenticate(&self, key: &Key) -> Result<(), Error> { if self.tracker_is_private() { self.verify_auth_key(key).await @@ -44,11 +73,25 @@ impl AuthenticationService { self.config.private } - /// It verifies an authentication key. + /// Verifies the authentication key against the in-memory repository. + /// + /// This function retrieves the key from the repository. If the key is not + /// found, it returns an error with the caller's location. If the key is + /// found, the function then checks the key's expiration based on the + /// tracker configuration. The behavior differs depending on whether a + /// `private` configuration is provided and whether key expiration checking + /// is enabled. + /// + /// # Parameters + /// + /// - `key`: A reference to the peer key that needs to be verified. /// /// # Errors /// - /// Will return a `key::Error` if unable to get any `auth_key`. + /// Returns an error if: + /// + /// - The key is not found in the repository. + /// - The key fails the expiration check when such verification is required. async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { match self.in_memory_key_repository.get(key).await { None => Err(Error::UnableToReadKey { diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index 365bd0ad9..624e34c9b 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -1,4 +1,10 @@ //! The `MySQL` database driver. +//! +//! This module provides an implementation of the [`Database`] trait for `MySQL` +//! using the `r2d2_mysql` connection pool. It configures the MySQL connection +//! based on a URL, creates the necessary tables (for torrent metrics, torrent +//! whitelist, and authentication keys), and implements all CRUD operations +//! required by the persistence layer. use std::str::FromStr; use std::time::Duration; @@ -15,6 +21,11 @@ use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::MySQL; +/// `MySQL` driver implementation. +/// +/// This struct encapsulates a connection pool for `MySQL`, built using the +/// `r2d2_mysql` connection manager. It implements the [`Database`] trait to +/// provide persistence operations. pub(crate) struct Mysql { pool: Pool, } diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index 36ca4eabe..bab2fb6a7 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -1,4 +1,10 @@ //! The `SQLite3` database driver. +//! +//! This module provides an implementation of the [`Database`] trait for +//! `SQLite3` using the `r2d2_sqlite` connection pool. It defines the schema for +//! whitelist, torrent metrics, and authentication keys, and provides methods +//! to create and drop tables as well as perform CRUD operations on these +//! persistent objects. use std::panic::Location; use std::str::FromStr; @@ -14,18 +20,29 @@ use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; +/// `SQLite` driver implementation. +/// +/// This struct encapsulates a connection pool for `SQLite` using the `r2d2_sqlite` +/// connection manager. pub(crate) struct Sqlite { pool: Pool, } impl Sqlite { - /// It instantiates a new `SQLite3` database driver. + /// Instantiates a new `SQLite3` database driver. /// - /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). + /// This function creates a connection manager for the `SQLite` database + /// located at `db_path` and then builds a connection pool using `r2d2`. If + /// the pool cannot be created, an error is returned (wrapped with the + /// appropriate driver information). + /// + /// # Arguments + /// + /// * `db_path` - A string slice representing the file path to the `SQLite` database. /// /// # Errors /// - /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. + /// Returns an [`Error`] if the connection pool cannot be built. pub fn new(db_path: &str) -> Result { let manager = SqliteConnectionManager::file(db_path); let pool = r2d2::Pool::builder().build(manager).map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/error.rs b/packages/tracker-core/src/databases/error.rs index 6b340080e..fd9adfc22 100644 --- a/packages/tracker-core/src/databases/error.rs +++ b/packages/tracker-core/src/databases/error.rs @@ -1,6 +1,13 @@ //! Database errors. //! -//! This module contains the [Database errors](crate::databases::error::Error). +//! This module defines the [`Error`] enum used to represent errors that occur +//! during database operations. These errors encapsulate issues such as missing +//! query results, malformed queries, connection failures, and connection pool +//! creation errors. Each error variant includes contextual information such as +//! the associated database driver and, when applicable, the source error. +//! +//! External errors from database libraries (e.g., `rusqlite`, `mysql`) are +//! converted into this error type using the provided `From` implementations. use std::panic::Location; use std::sync::Arc; @@ -9,30 +16,43 @@ use torrust_tracker_located_error::{DynError, Located, LocatedError}; use super::driver::Driver; +/// Database error type that encapsulates various failures encountered during +/// database operations. #[derive(thiserror::Error, Debug, Clone)] pub enum Error { - /// The query unexpectedly returned nothing. + /// Indicates that a query unexpectedly returned no rows. + /// + /// This error variant is used when a query that is expected to return a + /// result does not. #[error("The {driver} query unexpectedly returned nothing: {source}")] QueryReturnedNoRows { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, driver: Driver, }, - /// The query was malformed. + /// Indicates that the query was malformed. + /// + /// This error variant is used when the SQL query itself is invalid or + /// improperly formatted. #[error("The {driver} query was malformed: {source}")] InvalidQuery { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, driver: Driver, }, - /// Unable to insert a record into the database + /// Indicates a failure to insert a record into the database. + /// + /// This error is raised when an insertion operation fails. #[error("Unable to insert record into {driver} database, {location}")] InsertFailed { location: &'static Location<'static>, driver: Driver, }, - /// Unable to delete a record into the database + /// Indicates a failure to delete a record from the database. + /// + /// This error includes an error code that may be returned by the database + /// driver. #[error("Failed to remove record from {driver} database, error-code: {error_code}, {location}")] DeleteFailed { location: &'static Location<'static>, @@ -40,14 +60,18 @@ pub enum Error { driver: Driver, }, - /// Unable to connect to the database + /// Indicates a failure to connect to the database. + /// + /// This error variant wraps connection-related errors, such as those caused by an invalid URL. #[error("Failed to connect to {driver} database: {source}")] ConnectionError { source: LocatedError<'static, UrlError>, driver: Driver, }, - /// Unable to create a connection pool + /// Indicates a failure to create a connection pool. + /// + /// This error variant is used when the connection pool creation (using r2d2) fails. #[error("Failed to create r2d2 {driver} connection pool: {source}")] ConnectionPool { source: LocatedError<'static, r2d2::Error>, diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 1de13332f..33a7e3c69 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -1,48 +1,51 @@ //! The persistence module. //! -//! Persistence is currently implemented with one [`Database`] trait. +//! Persistence is currently implemented using a single [`Database`] trait. //! //! There are two implementations of the trait (two drivers): //! -//! - `Mysql` -//! - `Sqlite` +//! - **`MySQL`** +//! - **`Sqlite`** //! -//! > **NOTICE**: There are no database migrations. If there are any changes, -//! > we will implemented them or provide a script to migrate to the new schema. +//! > **NOTICE**: There are no database migrations at this time. If schema +//! > changes occur, either migration functionality will be implemented or a +//! > script will be provided to migrate to the new schema. //! -//! The persistent objects are: +//! The persistent objects handled by this module include: //! -//! - [Torrent metrics](#torrent-metrics) -//! - [Torrent whitelist](torrent-whitelist) -//! - [Authentication keys](authentication-keys) +//! - **Torrent metrics**: Metrics such as the number of completed downloads for +//! each torrent. +//! - **Torrent whitelist**: A list of torrents (by infohash) that are allowed. +//! - **Authentication keys**: Expiring authentication keys used to secure +//! access to private trackers. //! -//! # Torrent metrics +//! # Torrent Metrics //! -//! Field | Sample data | Description -//! ---|---|--- -//! `id` | 1 | Autoincrement id -//! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 -//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](torrust_tracker_torrent_repository::entry::Entry) for more information. +//! | Field | Sample data | Description | +//! |-------------|--------------------------------------------|-----------------------------------------------------------------------------| +//! | `id` | 1 | Auto-increment id | +//! | `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 | +//! | `completed` | 20 | The number of peers that have completed downloading the associated torrent. | //! -//! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be -//! > regenerated again after some minutes. +//! > **NOTICE**: The peer list for a torrent is not persisted. Because peers re-announce at +//! > intervals, the peer list is regenerated periodically. //! -//! # Torrent whitelist +//! # Torrent Whitelist //! -//! Field | Sample data | Description -//! ---|---|--- -//! `id` | 1 | Autoincrement id -//! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 +//! | Field | Sample data | Description | +//! |-------------|--------------------------------------------|--------------------------------| +//! | `id` | 1 | Auto-increment id | +//! | `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 | //! -//! # Authentication keys +//! # Authentication Keys //! -//! Field | Sample data | Description -//! ---|---|--- -//! `id` | 1 | Autoincrement id -//! `key` | `IrweYtVuQPGbG9Jzx1DihcPmJGGpVy82` | Token -//! `valid_until` | 1672419840 | Timestamp for the expiring date +//! | Field | Sample data | Description | +//! |---------------|------------------------------------|--------------------------------------| +//! | `id` | 1 | Auto-increment id | +//! | `key` | `IrweYtVuQPGbG9Jzx1DihcPmJGGpVy82` | Authentication token (32 chars) | +//! | `valid_until` | 1672419840 | Timestamp indicating expiration time | //! -//! > **NOTICE**: All keys must have an expiration date. +//! > **NOTICE**: All authentication keys must have an expiration date. pub mod driver; pub mod error; pub mod setup; @@ -54,143 +57,159 @@ use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; use crate::authentication::{self, Key}; -/// The persistence trait. It contains all the methods to interact with the database. +/// The persistence trait. +/// +/// This trait defines all the methods required to interact with the database, +/// including creating and dropping schema tables, and CRUD operations for +/// torrent metrics, whitelists, and authentication keys. Implementations of +/// this trait must ensure that operations are safe, consistent, and report +/// errors using the [`Error`] type. #[automock] pub trait Database: Sync + Send { - /// It generates the database tables. SQL queries are hardcoded in the trait - /// implementation. + /// Creates the necessary database tables. + /// + /// The SQL queries for table creation are hardcoded in the trait implementation. /// /// # Context: Schema /// /// # Errors /// - /// Will return `Error` if unable to create own tables. + /// Returns an [`Error`] if the tables cannot be created. fn create_database_tables(&self) -> Result<(), Error>; - /// It drops the database tables. + /// Drops the database tables. + /// + /// This operation removes the persistent schema. /// /// # Context: Schema /// /// # Errors /// - /// Will return `Err` if unable to drop tables. + /// Returns an [`Error`] if the tables cannot be dropped. fn drop_database_tables(&self) -> Result<(), Error>; // Torrent Metrics - /// It loads the torrent metrics data from the database. + /// Loads torrent metrics data from the database. /// - /// It returns an array of tuples with the torrent - /// [`InfoHash`] and the - /// [`downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded) counter - /// which is the number of times the torrent has been downloaded. - /// See [`Entry::downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded). + /// This function returns the persistent torrent metrics as a collection of + /// tuples, where each tuple contains an [`InfoHash`] and the `downloaded` + /// counter (i.e. the number of times the torrent has been downloaded). /// /// # Context: Torrent Metrics /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the metrics cannot be loaded. fn load_persistent_torrents(&self) -> Result; - /// It saves the torrent metrics data into the database. + /// Saves torrent metrics data into the database. + /// + /// # Arguments + /// + /// * `info_hash` - A reference to the torrent's info hash. + /// * `downloaded` - The number of times the torrent has been downloaded. /// /// # Context: Torrent Metrics /// /// # Errors /// - /// Will return `Err` if unable to save. + /// Returns an [`Error`] if the metrics cannot be saved. fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; // Whitelist - /// It loads the whitelisted torrents from the database. + /// Loads the whitelisted torrents from the database. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the whitelist cannot be loaded. fn load_whitelist(&self) -> Result, Error>; - /// It checks if the torrent is whitelisted. + /// Retrieves a whitelisted torrent from the database. /// - /// It returns `Some(InfoHash)` if the torrent is whitelisted, `None` otherwise. + /// Returns `Some(InfoHash)` if the torrent is in the whitelist, or `None` + /// otherwise. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the whitelist cannot be queried. fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error>; - /// It adds the torrent to the whitelist. + /// Adds a torrent to the whitelist. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to save. + /// Returns an [`Error`] if the torrent cannot be added to the whitelist. fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; - /// It checks if the torrent is whitelisted. + /// Checks whether a torrent is whitelisted. + /// + /// This default implementation returns `true` if the infohash is included + /// in the whitelist, or `false` otherwise. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the whitelist cannot be queried. fn is_info_hash_whitelisted(&self, info_hash: InfoHash) -> Result { Ok(self.get_info_hash_from_whitelist(info_hash)?.is_some()) } - /// It removes the torrent from the whitelist. + /// Removes a torrent from the whitelist. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to save. + /// Returns an [`Error`] if the torrent cannot be removed from the whitelist. fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; // Authentication keys - /// It loads the expiring authentication keys from the database. + /// Loads all authentication keys from the database. /// /// # Context: Authentication Keys /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the keys cannot be loaded. fn load_keys(&self) -> Result, Error>; - /// It gets an expiring authentication key from the database. + /// Retrieves a specific authentication key from the database. /// - /// It returns `Some(PeerKey)` if a [`PeerKey`](crate::authentication::PeerKey) - /// with the input [`Key`] exists, `None` otherwise. + /// Returns `Some(PeerKey)` if a key corresponding to the provided [`Key`] + /// exists, or `None` otherwise. /// /// # Context: Authentication Keys /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the key cannot be queried. fn get_key_from_keys(&self, key: &Key) -> Result, Error>; - /// It adds an expiring authentication key to the database. + /// Adds an authentication key to the database. /// /// # Context: Authentication Keys /// /// # Errors /// - /// Will return `Err` if unable to save. + /// Returns an [`Error`] if the key cannot be saved. fn add_key_to_keys(&self, auth_key: &authentication::PeerKey) -> Result; - /// It removes an expiring authentication key from the database. + /// Removes an authentication key from the database. /// /// # Context: Authentication Keys /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the key cannot be removed. fn remove_key_from_keys(&self, key: &Key) -> Result; } diff --git a/packages/tracker-core/src/databases/setup.rs b/packages/tracker-core/src/databases/setup.rs index 73ff23feb..6ba9f2a64 100644 --- a/packages/tracker-core/src/databases/setup.rs +++ b/packages/tracker-core/src/databases/setup.rs @@ -1,3 +1,4 @@ +//! This module provides functionality for setting up databases. use std::sync::Arc; use torrust_tracker_configuration::Core; @@ -5,9 +6,38 @@ use torrust_tracker_configuration::Core; use super::driver::{self, Driver}; use super::Database; +/// Initializes and returns a database instance based on the provided configuration. +/// +/// This function creates a new database instance according to the settings +/// defined in the [`Core`] configuration. It selects the appropriate driver +/// (either `Sqlite3` or `MySQL`) as specified in `config.database.driver` and +/// attempts to build the database connection using the path defined in +/// `config.database.path`. +/// +/// The resulting database instance is wrapped in a shared pointer (`Arc`) to a +/// boxed trait object, allowing safe sharing of the database connection across +/// multiple threads. +/// /// # Panics /// -/// Will panic if database cannot be initialized. +/// This function will panic if the database cannot be initialized (i.e., if the +/// driver fails to build the connection). This is enforced by the use of +/// [`expect`](std::result::Result::expect) in the implementation. +/// +/// # Example +/// +/// ```rust,no_run +/// use torrust_tracker_configuration::Core; +/// use bittorrent_tracker_core::databases::setup::initialize_database; +/// +/// // Create a default configuration (ensure it is properly set up for your environment) +/// let config = Core::default(); +/// +/// // Initialize the database; this will panic if initialization fails. +/// let database = initialize_database(&config); +/// +/// // The returned database instance can now be used for persistence operations. +/// ``` #[must_use] pub fn initialize_database(config: &Core) -> Arc> { let driver = match config.database.driver { @@ -17,3 +47,15 @@ pub fn initialize_database(config: &Core) -> Arc> { Arc::new(driver::build(&driver, &config.database.path).expect("Database driver build failed.")) } + +#[cfg(test)] +mod tests { + use super::initialize_database; + use crate::test_helpers::tests::ephemeral_configuration; + + #[test] + fn it_should_initialize_the_sqlite_database() { + let config = ephemeral_configuration(); + let _database = initialize_database(&config); + } +} diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index dcdd89668..99ac48ed3 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -1,4 +1,12 @@ -//! Errors returned by the core tracker. +//! Core tracker errors. +//! +//! This module defines the error types used internally by the `BitTorrent` +//! tracker core. +//! +//! These errors encapsulate issues such as whitelisting violations, invalid +//! peer key data, and database persistence failures. Each error variant +//! includes contextual information (such as source code location) to facilitate +//! debugging. use std::panic::Location; use bittorrent_primitives::info_hash::InfoHash; @@ -7,9 +15,13 @@ use torrust_tracker_located_error::LocatedError; use super::authentication::key::ParseKeyError; use super::databases; -/// Whitelist errors returned by the core tracker. +/// Errors related to torrent whitelisting. +/// +/// This error is returned when an operation involves a torrent that is not +/// present in the whitelist. #[derive(thiserror::Error, Debug, Clone)] pub enum WhitelistError { + /// Indicates that the torrent identified by `info_hash` is not whitelisted. #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { info_hash: InfoHash, @@ -17,19 +29,27 @@ pub enum WhitelistError { }, } -/// Peers keys errors returned by the core tracker. +/// Errors related to peer key operations. +/// +/// This error type covers issues encountered during the handling of peer keys, +/// including validation of key durations, parsing errors, and database +/// persistence problems. #[allow(clippy::module_name_repetitions)] #[derive(thiserror::Error, Debug, Clone)] pub enum PeerKeyError { + /// Returned when the duration specified for the peer key exceeds the + /// maximum. #[error("Invalid peer key duration: {seconds_valid:?}, is not valid")] DurationOverflow { seconds_valid: u64 }, + /// Returned when the provided peer key is invalid. #[error("Invalid key: {key}")] InvalidKey { key: String, source: LocatedError<'static, ParseKeyError>, }, + /// Returned when persisting the peer key to the database fails. #[error("Can't persist key: {source}")] DatabaseError { source: LocatedError<'static, databases::error::Error>, diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index ac6e4edac..843817deb 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -1,315 +1,57 @@ -//! The core `tracker` module contains the generic `BitTorrent` tracker logic which is independent of the delivery layer. +//! The core `bittorrent-tracker-core` crate contains the generic `BitTorrent` +//! tracker logic which is independent of the delivery layer. //! -//! It contains the tracker services and their dependencies. It's a domain layer which does not -//! specify how the end user should connect to the `Tracker`. +//! It contains the tracker services and their dependencies. It's a domain layer +//! which does not specify how the end user should connect to the `Tracker`. //! -//! Typically this module is intended to be used by higher modules like: +//! Typically this crate is intended to be used by higher components like: //! //! - A UDP tracker //! - A HTTP tracker //! - A tracker REST API //! //! ```text -//! Delivery layer Domain layer -//! -//! HTTP tracker | -//! UDP tracker |> Core tracker -//! Tracker REST API | +//! Delivery layer | Domain layer +//! ----------------------------------- +//! HTTP tracker | +//! UDP tracker |-> Core tracker +//! Tracker REST API | //! ``` //! //! # Table of contents //! -//! - [Tracker](#tracker) -//! - [Announce request](#announce-request) -//! - [Scrape request](#scrape-request) -//! - [Torrents](#torrents) -//! - [Peers](#peers) +//! - [Introduction](#introduction) //! - [Configuration](#configuration) +//! - [Announce handler](#announce-handler) +//! - [Scrape handler](#scrape-handler) //! - [Authentication](#authentication) -//! - [Statistics](#statistics) -//! - [Persistence](#persistence) -//! -//! # Tracker -//! -//! The `Tracker` is the main struct in this module. `The` tracker has some groups of responsibilities: -//! -//! - **Core tracker**: it handles the information about torrents and peers. -//! - **Authentication**: it handles authentication keys which are used by HTTP trackers. -//! - **Authorization**: it handles the permission to perform requests. -//! - **Whitelist**: when the tracker runs in `listed` or `private_listed` mode all operations are restricted to whitelisted torrents. -//! - **Statistics**: it keeps and serves the tracker statistics. -//! -//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration) crate docs to get more information about the tracker settings. -//! -//! ## Announce request -//! -//! Handling `announce` requests is the most important task for a `BitTorrent` tracker. -//! -//! A `BitTorrent` swarm is a network of peers that are all trying to download the same torrent. -//! When a peer wants to find other peers it announces itself to the swarm via the tracker. -//! The peer sends its data to the tracker so that the tracker can add it to the swarm. -//! The tracker responds to the peer with the list of other peers in the swarm so that -//! the peer can contact them to start downloading pieces of the file from them. +//! - [Databases](#databases) +//! - [Torrent](#torrent) +//! - [Whitelist](#whitelist) //! -//! Once you have instantiated the `AnnounceHandler` you can `announce` a new [`peer::Peer`](torrust_tracker_primitives::peer::Peer) with: +//! # Introduction //! -//! ```rust,no_run -//! use std::net::SocketAddr; -//! use std::net::IpAddr; -//! use std::net::Ipv4Addr; -//! use std::str::FromStr; +//! The main purpose of this crate is to provide a generic `BitTorrent` tracker. //! -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -//! use torrust_tracker_primitives::DurationSinceUnixEpoch; -//! use torrust_tracker_primitives::peer; -//! use bittorrent_primitives::info_hash::InfoHash; +//! It has two main responsibilities: //! -//! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); +//! - To handle **announce** requests. +//! - To handle **scrape** requests. //! -//! let peer = peer::Peer { -//! peer_id: PeerId(*b"-qB00000000000000001"), -//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), -//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), -//! uploaded: NumberOfBytes::new(0), -//! downloaded: NumberOfBytes::new(0), -//! left: NumberOfBytes::new(0), -//! event: AnnounceEvent::Completed, -//! }; -//! -//! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); -//! ``` -//! -//! ```text -//! let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip).await; -//! ``` -//! -//! The `Tracker` returns the list of peers for the torrent with the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, -//! filtering out the peer that is making the `announce` request. -//! -//! > **NOTICE**: that the peer argument is mutable because the `Tracker` can change the peer IP if the peer is using a loopback IP. -//! -//! The `peer_ip` argument is the resolved peer ip. It's a common practice that trackers ignore the peer ip in the `announce` request params, -//! and resolve the peer ip using the IP of the client making the request. As the tracker is a domain service, the peer IP must be provided -//! for the `Tracker` user, which is usually a higher component with access the the request metadata, for example, connection data, proxy headers, -//! etcetera. -//! -//! The returned struct is: -//! -//! ```rust,no_run -//! use torrust_tracker_primitives::peer; -//! use torrust_tracker_configuration::AnnouncePolicy; -//! -//! pub struct AnnounceData { -//! pub peers: Vec, -//! pub swarm_stats: SwarmMetadata, -//! pub policy: AnnouncePolicy, // the tracker announce policy. -//! } -//! -//! pub struct SwarmMetadata { -//! pub completed: u32, // The number of peers that have ever completed downloading -//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) -//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) -//! } -//! -//! // Core tracker configuration -//! pub struct AnnounceInterval { -//! // ... -//! pub interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker -//! pub interval_min: u32, // Minimum announce interval. Clients must not reannounce more frequently than this -//! // ... -//! } -//! ``` +//! The crate has also other features: //! -//! Refer to `BitTorrent` BEPs and other sites for more information about the `announce` request: +//! - **Authentication**: It handles authentication keys which are used by HTTP trackers. +//! - **Persistence**: It handles persistence of data into a database. +//! - **Torrent**: It handles the torrent data. +//! - **Whitelist**: When the tracker runs in [`listed`](https://docs.rs/torrust-tracker-configuration/latest/torrust_tracker_configuration/type.Core.html) mode +//! all operations are restricted to whitelisted torrents. //! -//! - [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) -//! - [Vuze docs](https://wiki.vuze.com/w/Announce) -//! -//! ## Scrape request -//! -//! The `scrape` request allows clients to query metadata about the swarm in bulk. -//! -//! An `scrape` request includes a list of infohashes whose swarm metadata you want to collect. -//! -//! The returned struct is: -//! -//! ```rust,no_run -//! use bittorrent_primitives::info_hash::InfoHash; -//! use std::collections::HashMap; -//! -//! pub struct ScrapeData { -//! pub files: HashMap, -//! } -//! -//! pub struct SwarmMetadata { -//! pub complete: u32, // The number of active peers that have completed downloading (seeders) -//! pub downloaded: u32, // The number of peers that have ever completed downloading -//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) -//! } -//! ``` -//! -//! The JSON representation of a sample `scrape` response would be like the following: -//! -//! ```json -//! { -//! 'files': { -//! 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, -//! 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} -//! } -//! } -//! ``` -//! -//! `xxxxxxxxxxxxxxxxxxxx` and `yyyyyyyyyyyyyyyyyyyy` are 20-byte infohash arrays. -//! There are two data structures for infohashes: byte arrays and hex strings: -//! -//! ```rust,no_run -//! use bittorrent_primitives::info_hash::InfoHash; -//! use std::str::FromStr; -//! -//! let info_hash: InfoHash = [255u8; 20].into(); -//! -//! assert_eq!( -//! info_hash, -//! InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() -//! ); -//! ``` -//! Refer to `BitTorrent` BEPs and other sites for more information about the `scrape` request: -//! -//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`. Scrape section](https://www.bittorrent.org/beps/bep_0015.html) -//! - [Vuze docs](https://wiki.vuze.com/w/Scrape) -//! -//! ## Torrents -//! -//! The [`torrent`] module contains all the data structures stored by the `Tracker` except for peers. -//! -//! We can represent the data stored in memory internally by the `Tracker` with this JSON object: -//! -//! ```json -//! { -//! "c1277613db1d28709b034a017ab2cae4be07ae10": { -//! "completed": 0, -//! "peers": { -//! "-qB00000000000000001": { -//! "peer_id": "-qB00000000000000001", -//! "peer_addr": "2.137.87.41:1754", -//! "updated": 1672419840, -//! "uploaded": 120, -//! "downloaded": 60, -//! "left": 60, -//! "event": "started" -//! }, -//! "-qB00000000000000002": { -//! "peer_id": "-qB00000000000000002", -//! "peer_addr": "23.17.287.141:2345", -//! "updated": 1679415984, -//! "uploaded": 80, -//! "downloaded": 20, -//! "left": 40, -//! "event": "started" -//! } -//! } -//! } -//! } -//! ``` -//! -//! The `Tracker` maintains an indexed-by-info-hash list of torrents. For each torrent, it stores a torrent `Entry`. -//! The torrent entry has two attributes: -//! -//! - `completed`: which is hte number of peers that have completed downloading the torrent file/s. As they have completed downloading, -//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". -//! - `peers`: an indexed and orderer list of peer for the torrent. Each peer contains the data received from the peer in the `announce` request. -//! -//! The [`torrent`] module not only contains the original data obtained from peer via `announce` requests, it also contains -//! aggregate data that can be derived from the original data. For example: -//! -//! ```rust,no_run -//! pub struct SwarmMetadata { -//! pub complete: u32, // The number of active peers that have completed downloading (seeders) -//! pub downloaded: u32, // The number of peers that have ever completed downloading -//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) -//! } -//! -//! ``` -//! -//! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". -//! -//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` -//! is used for the rest of cases. -//! -//! Refer to [`torrent`] module for more details about these data structures. -//! -//! ## Peers -//! -//! A `Peer` is the struct used by the `Tracker` to keep peers data: -//! -//! ```rust,no_run -//! use std::net::SocketAddr; - -//! use aquatic_udp_protocol::PeerId; -//! use torrust_tracker_primitives::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::NumberOfBytes; -//! use aquatic_udp_protocol::AnnounceEvent; -//! -//! pub struct Peer { -//! pub peer_id: PeerId, // The peer ID -//! pub peer_addr: SocketAddr, // Peer socket address -//! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated -//! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far -//! pub downloaded: NumberOfBytes, // Number of bytes the peer has downloaded so far -//! pub left: NumberOfBytes, // The number of bytes this peer still has to download -//! pub event: AnnounceEvent, // The event the peer has announced: `started`, `completed`, `stopped` -//! } -//! ``` -//! -//! Notice that most of the attributes are obtained from the `announce` request. -//! For example, an HTTP announce request would contain the following `GET` parameters: -//! -//! -//! -//! The `Tracker` keeps an in-memory ordered data structure with all the torrents and a list of peers for each torrent, together with some swarm metrics. -//! -//! We can represent the data stored in memory with this JSON object: -//! -//! ```json -//! { -//! "c1277613db1d28709b034a017ab2cae4be07ae10": { -//! "completed": 0, -//! "peers": { -//! "-qB00000000000000001": { -//! "peer_id": "-qB00000000000000001", -//! "peer_addr": "2.137.87.41:1754", -//! "updated": 1672419840, -//! "uploaded": 120, -//! "downloaded": 60, -//! "left": 60, -//! "event": "started" -//! }, -//! "-qB00000000000000002": { -//! "peer_id": "-qB00000000000000002", -//! "peer_addr": "23.17.287.141:2345", -//! "updated": 1679415984, -//! "uploaded": 80, -//! "downloaded": 20, -//! "left": 40, -//! "event": "started" -//! } -//! } -//! } -//! } -//! ``` -//! -//! That JSON object does not exist, it's only a representation of the `Tracker` torrents data. -//! -//! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers -//! that have a full version of the torrent data, also known as seeders. -//! -//! Refer to [`peer`](torrust_tracker_primitives::peer) for more information about peers. +//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration) +//! crate docs to get more information about the tracker settings. //! //! # Configuration //! -//! You can control the behavior of this module with the module settings: +//! You can control the behavior of this crate with the `Core` settings: //! //! ```toml //! [logging] @@ -341,35 +83,41 @@ //! //! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. //! -//! Services can include extra features like pagination, for example. +//! # Announce handler +//! +//! The `AnnounceHandler` is responsible for handling announce requests. +//! +//! Please refer to the [`announce_handler`] documentation. +//! +//! # Scrape handler +//! +//! The `ScrapeHandler` is responsible for handling scrape requests. +//! +//! Please refer to the [`scrape_handler`] documentation. //! //! # Authentication //! -//! One of the core `Tracker` responsibilities is to create and keep authentication keys. Auth keys are used by HTTP trackers -//! when the tracker is running in `private` or `private_listed` mode. +//! The `Authentication` module is responsible for handling authentication keys which are used by HTTP trackers. +//! +//! Please refer to the [`authentication`] documentation. //! -//! HTTP tracker's clients need to obtain an auth key before starting requesting the tracker. Once the get one they have to include -//! a `PATH` param with the key in all the HTTP requests. For example, when a peer wants to `announce` itself it has to use the -//! HTTP tracker endpoint `GET /announce/:key`. +//! # Databases //! -//! The common way to obtain the keys is by using the tracker API directly or via other applications like the [Torrust Index](https://github.com/torrust/torrust-index). +//! The `Databases` module is responsible for handling persistence of data into a database. //! -//! To learn more about tracker authentication, refer to the following modules : +//! Please refer to the [`databases`] documentation. //! -//! - [`authentication`] module. +//! # Torrent //! -//! # Persistence +//! The `Torrent` module is responsible for handling the torrent data. //! -//! Right now the `Tracker` is responsible for storing and load data into and -//! from the database, when persistence is enabled. +//! Please refer to the [`torrent`] documentation. //! -//! There are three types of persistent object: +//! # Whitelist //! -//! - Authentication keys (only expiring keys) -//! - Torrent whitelist -//! - Torrent metrics +//! The `Whitelist` module is responsible for handling the whitelist. //! -//! Refer to [`databases`] module for more information about persistence. +//! Please refer to the [`whitelist`] documentation. pub mod announce_handler; pub mod authentication; pub mod databases; diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 60d15de71..1e75580ab 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -1,3 +1,64 @@ +//! Scrape handler. +//! +//! The `scrape` request allows clients to query metadata about the swarm in bulk. +//! +//! An `scrape` request includes a list of infohashes whose swarm metadata you +//! want to collect. +//! +//! ## Scrape Response Format +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! use bittorrent_primitives::info_hash::InfoHash; +//! use std::collections::HashMap; +//! +//! pub struct ScrapeData { +//! pub files: HashMap, +//! } +//! +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! ``` +//! +//! ## Example JSON Response +//! +//! The JSON representation of a sample `scrape` response would be like the following: +//! +//! ```json +//! { +//! 'files': { +//! 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +//! 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +//! } +//! } +//! ``` +//! +//! `xxxxxxxxxxxxxxxxxxxx` and `yyyyyyyyyyyyyyyyyyyy` are 20-byte infohash arrays. +//! There are two data structures for infohashes: byte arrays and hex strings: +//! +//! ```rust,no_run +//! use bittorrent_primitives::info_hash::InfoHash; +//! use std::str::FromStr; +//! +//! let info_hash: InfoHash = [255u8; 20].into(); +//! +//! assert_eq!( +//! info_hash, +//! InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() +//! ); +//! ``` +//! +//! ## References: +//! +//! Refer to `BitTorrent` BEPs and other sites for more information about the `scrape` request: +//! +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`. Scrape section](https://www.bittorrent.org/beps/bep_0015.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Scrape) use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -7,8 +68,9 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::whitelist; +/// Handles scrape requests, providing torrent swarm metadata. pub struct ScrapeHandler { - /// The service to check is a torrent is whitelisted. + /// Service for authorizing access to whitelisted torrents. whitelist_authorization: Arc, /// The in-memory torrents repository. @@ -16,6 +78,7 @@ pub struct ScrapeHandler { } impl ScrapeHandler { + /// Creates a new `ScrapeHandler` instance. #[must_use] pub fn new( whitelist_authorization: &Arc, @@ -27,9 +90,14 @@ impl ScrapeHandler { } } - /// It handles a scrape request. + /// Handles a scrape request for multiple torrents. /// - /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). + /// - Returns metadata for each requested torrent. + /// - If a torrent isn't whitelisted or doesn't exist, returns zeroed stats. + /// + /// # BEP Reference: + /// + /// [BEP 48: Scrape Protocol](https://www.bittorrent.org/beps/bep_0048.html) pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { let mut scrape_data = ScrapeData::empty(); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 9dac35258..51df97fb5 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -1,3 +1,4 @@ +//! Torrents manager. use std::sync::Arc; use std::time::Duration; @@ -8,6 +9,18 @@ use super::repository::in_memory::InMemoryTorrentRepository; use super::repository::persisted::DatabasePersistentTorrentRepository; use crate::{databases, CurrentClock}; +/// The `TorrentsManager` is responsible for managing torrent entries by +/// integrating persistent storage and in-memory state. It provides methods to +/// load torrent data from the database into memory, and to periodically clean +/// up stale torrent entries by removing inactive peers or entire torrent +/// entries that no longer have active peers. +/// +/// This manager relies on two repositories: +/// +/// - An **in-memory repository** to provide fast access to the current torrent +/// state. +/// - A **persistent repository** that stores aggregate torrent metrics (e.g., +/// seeders count) across tracker restarts. pub struct TorrentsManager { /// The tracker configuration. config: Core, @@ -21,6 +34,19 @@ pub struct TorrentsManager { } impl TorrentsManager { + /// Creates a new instance of `TorrentsManager`. + /// + /// # Arguments + /// + /// * `config` - A reference to the tracker configuration. + /// * `in_memory_torrent_repository` - A shared reference to the in-memory + /// repository of torrents. + /// * `db_torrent_repository` - A shared reference to the persistent + /// repository for torrent metrics. + /// + /// # Returns + /// + /// A new `TorrentsManager` instance with cloned references of the provided dependencies. #[must_use] pub fn new( config: &Core, @@ -34,13 +60,16 @@ impl TorrentsManager { } } - /// It loads the torrents from database into memory. It only loads the - /// torrent entry list with the number of seeders for each torrent. Peers - /// data is not persisted. + /// Loads torrents from the persistent database into the in-memory repository. + /// + /// This function retrieves the list of persistent torrent entries (which + /// include only the aggregate metrics, not the detailed peer lists) from + /// the database, and then imports that data into the in-memory repository. /// /// # Errors /// - /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. + /// Returns a `databases::error::Error` if unable to load the persistent + /// torrent data. #[allow(dead_code)] pub(crate) fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.db_torrent_repository.load_all()?; @@ -50,7 +79,18 @@ impl TorrentsManager { Ok(()) } - /// Remove inactive peers and (optionally) peerless torrents. + /// Cleans up torrent entries by removing inactive peers and, optionally, + /// torrents with no active peers. + /// + /// This function performs two cleanup tasks: + /// + /// 1. It removes peers from torrent entries that have not been updated + /// within a cutoff time. The cutoff time is calculated as the current + /// time minus the maximum allowed peer timeout, as specified in the + /// tracker policy. + /// 2. If the tracker is configured to remove peerless torrents + /// (`remove_peerless_torrents` is set), it removes entire torrent + /// entries that have no active peers. pub fn cleanup_torrents(&self) { let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) .unwrap_or_default(); diff --git a/packages/tracker-core/src/torrent/mod.rs b/packages/tracker-core/src/torrent/mod.rs index 7ca9000f8..8ee8fa6d3 100644 --- a/packages/tracker-core/src/torrent/mod.rs +++ b/packages/tracker-core/src/torrent/mod.rs @@ -1,30 +1,168 @@ -//! Structs to store the swarm data. +//! Swarm Data Structures. //! -//! There are to main data structures: +//! This module defines the primary data structures used to store and manage +//! swarm data within the tracker. In `BitTorrent` terminology, a "swarm" is +//! the collection of peers that are sharing or downloading a given torrent. //! -//! - A torrent [`Entry`](torrust_tracker_torrent_repository::entry::Entry): it contains all the information stored by the tracker for one torrent. -//! - The [`SwarmMetadata`](torrust_tracker_primitives::swarm_metadata::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. +//! There are two main types of data stored: //! -//! A "swarm" is a network of peers that are trying to download the same torrent. +//! - **Torrent Entry** (`Entry`): Contains all the information the tracker +//! stores for a single torrent, including the list of peers currently in the +//! swarm. This data is crucial for peers to locate each other and initiate +//! downloads. //! -//! The torrent entry contains the "swarm" data, which is basically the list of peers in the swarm. -//! That's the most valuable information the peer want to get from the tracker, because it allows them to -//! start downloading torrent from those peers. +//! - **Swarm Metadata** (`SwarmMetadata`): Contains aggregate data derived from +//! all torrent entries. This metadata is split into: +//! - **Active Peers Data:** Metrics related to the peers that are currently +//! active in the swarm. +//! - **Historical Data:** Metrics collected since the tracker started, such +//! as the total number of completed downloads. //! -//! The "swarm metadata" contains aggregate data derived from the torrent entries. There two types of data: +//! ## Metrics Collected //! -//! - For **active peers**: metrics related to the current active peers in the swarm. -//! - **Historical data**: since the tracker started running. +//! The tracker collects and aggregates the following metrics: //! -//! The tracker collects metrics for: +//! - The total number of peers that have completed downloading the torrent +//! since the tracker began collecting metrics. +//! - The number of completed downloads from peers that remain active (i.e., seeders). +//! - The number of active peers that have not completed downloading the torrent (i.e., leechers). //! -//! - The number of peers that have completed downloading the torrent since the tracker started collecting metrics. -//! - The number of peers that have completed downloading the torrent and are still active, that means they are actively participating in the network, -//! by announcing themselves periodically to the tracker. Since they have completed downloading they have a full copy of the torrent data. Peers with a -//! full copy of the data are called "seeders". -//! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. -//! Peer that don not have a full copy of the torrent data are called "leechers". +//! This information is used both to inform peers about available connections +//! and to provide overall swarm statistics. //! +//! This module re-exports core types from the torrent repository crate to +//! simplify integration. +//! +//! ## Internal Data Structures +//! +//! The [`torrent`](crate::torrent) module contains all the data structures +//! stored by the tracker except for peers. +//! +//! We can represent the data stored in memory internally by the tracker with +//! this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! The tracker maintains an indexed-by-info-hash list of torrents. For each +//! torrent, it stores a torrent `Entry`. The torrent entry has two attributes: +//! +//! - `completed`: which is hte number of peers that have completed downloading +//! the torrent file/s. As they have completed downloading, they have a full +//! version of the torrent data, and they can provide the full data to other +//! peers. That's why they are also known as "seeders". +//! - `peers`: an indexed and orderer list of peer for the torrent. Each peer +//! contains the data received from the peer in the `announce` request. +//! +//! The [`crate::torrent`] module not only contains the original data obtained +//! from peer via `announce` requests, it also contains aggregate data that can +//! be derived from the original data. For example: +//! +//! ```rust,no_run +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! ``` +//! +//! > **NOTICE**: that `complete` or `completed` peers are the peers that have +//! > completed downloading, but only the active ones are considered "seeders". +//! +//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See +//! [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` +//! is used for the rest of cases. +//! +//! ## Peers +//! +//! A `Peer` is the struct used by the tracker to keep peers data: +//! +//! ```rust,no_run +//! use std::net::SocketAddr; +//! use aquatic_udp_protocol::PeerId; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! use aquatic_udp_protocol::NumberOfBytes; +//! use aquatic_udp_protocol::AnnounceEvent; +//! +//! pub struct Peer { +//! pub peer_id: PeerId, // The peer ID +//! pub peer_addr: SocketAddr, // Peer socket address +//! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated +//! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far +//! pub downloaded: NumberOfBytes, // Number of bytes the peer has downloaded so far +//! pub left: NumberOfBytes, // The number of bytes this peer still has to download +//! pub event: AnnounceEvent, // The event the peer has announced: `started`, `completed`, `stopped` +//! } +//! ``` +//! +//! Notice that most of the attributes are obtained from the `announce` request. +//! For example, an HTTP announce request would contain the following `GET` parameters: +//! +//! +//! +//! The `Tracker` keeps an in-memory ordered data structure with all the torrents and a list of peers for each torrent, together with some swarm metrics. +//! +//! We can represent the data stored in memory with this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! That JSON object does not exist, it's only a representation of the `Tracker` torrents data. +//! +//! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers +//! that have a full version of the torrent data, also known as seeders. +//! +//! Refer to [`peer`](torrust_tracker_primitives::peer) for more information about peers. pub mod manager; pub mod repository; pub mod services; @@ -33,7 +171,11 @@ pub mod services; use torrust_tracker_torrent_repository::EntryMutexStd; use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; -// Currently used types from the torrent repository crate. +/// Alias for the primary torrent collection type, implemented as a skip map +/// wrapped in a mutex. This type is used internally by the tracker to manage +/// and access torrent entries. pub(crate) type Torrents = TorrentsSkipMapMutexStd; + +/// Alias for a single torrent entry. #[cfg(test)] pub(crate) type TorrentEntry = EntryMutexStd; diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 26302260b..584feabc9 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -1,3 +1,4 @@ +//! In-memory torrents repository. use std::cmp::max; use std::sync::Arc; @@ -13,51 +14,126 @@ use torrust_tracker_torrent_repository::EntryMutexStd; use crate::torrent::Torrents; -/// The in-memory torrents repository. +/// In-memory repository for torrent entries. /// -/// There are many implementations of the repository trait. We tried with -/// different types of data structures, but the best performance was with -/// the one we use for production. We kept the other implementations for -/// reference. +/// This repository manages the torrent entries and their associated peer lists +/// in memory. It is built on top of a high-performance data structure (the +/// production implementation) and provides methods to update, query, and remove +/// torrent entries as well as to import persisted data. +/// +/// Multiple implementations were considered, and the chosen implementation is +/// used in production. Other implementations are kept for reference. #[derive(Debug, Default)] pub struct InMemoryTorrentRepository { - /// The in-memory torrents repository implementation. + /// The underlying in-memory data structure that stores torrent entries. torrents: Arc, } impl InMemoryTorrentRepository { - /// It inserts (or updates if it's already in the list) the peer in the - /// torrent entry. + /// Inserts or updates a peer in the torrent entry corresponding to the + /// given infohash. + /// + /// If the torrent entry already exists, the peer is added to its peer list; + /// otherwise, a new torrent entry is created. + /// + /// # Arguments + /// + /// * `info_hash` - The unique identifier of the torrent. + /// * `peer` - The peer to insert or update in the torrent entry. pub fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.torrents.upsert_peer(info_hash, peer); } + /// Removes a torrent entry from the repository. + /// + /// This method is only available in tests. It removes the torrent entry + /// associated with the given info hash and returns the removed entry if it + /// existed. + /// + /// # Arguments + /// + /// * `key` - The info hash of the torrent to remove. + /// + /// # Returns + /// + /// An `Option` containing the removed torrent entry if it existed. #[cfg(test)] #[must_use] pub(crate) fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key) } + /// Removes inactive peers from all torrent entries. + /// + /// A peer is considered inactive if its last update timestamp is older than + /// the provided cutoff time. + /// + /// # Arguments + /// + /// * `current_cutoff` - The cutoff timestamp; peers not updated since this + /// time will be removed. pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { self.torrents.remove_inactive_peers(current_cutoff); } + /// Removes torrent entries that have no active peers. + /// + /// Depending on the tracker policy, torrents without any peers may be + /// removed to conserve memory. + /// + /// # Arguments + /// + /// * `policy` - The tracker policy containing the configuration for + /// removing peerless torrents. pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { self.torrents.remove_peerless_torrents(policy); } + /// Retrieves a torrent entry by its infohash. + /// + /// # Arguments + /// + /// * `key` - The info hash of the torrent. + /// + /// # Returns + /// + /// An `Option` containing the torrent entry if found. #[must_use] pub(crate) fn get(&self, key: &InfoHash) -> Option { self.torrents.get(key) } + /// Retrieves a paginated list of torrent entries. + /// + /// This method returns a vector of tuples, each containing an infohash and + /// its associated torrent entry. The pagination parameters (offset and limit) + /// can be used to control the size of the result set. + /// + /// # Arguments + /// + /// * `pagination` - An optional reference to a `Pagination` object. + /// + /// # Returns + /// + /// A vector of `(InfoHash, EntryMutexStd)` tuples. #[must_use] pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { self.torrents.get_paginated(pagination) } - /// It returns the data for a `scrape` response or empty if the torrent is - /// not found. + /// Retrieves swarm metadata for a given torrent. + /// + /// This method returns the swarm metadata (aggregate information such as + /// peer counts) for the torrent specified by the infohash. If the torrent + /// entry is not found, a zeroed metadata struct is returned. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// + /// # Returns + /// + /// A `SwarmMetadata` struct containing the aggregated torrent data. #[must_use] pub(crate) fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { match self.torrents.get(info_hash) { @@ -66,9 +142,23 @@ impl InMemoryTorrentRepository { } } - /// Get torrent peers for a given torrent and client. + /// Retrieves torrent peers for a given torrent and client, excluding the + /// requesting client. + /// + /// This method filters out the client making the request (based on its + /// network address) and returns up to a maximum number of peers, defined by + /// the greater of the provided limit or the global `TORRENT_PEERS_LIMIT`. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// * `peer` - The client peer that should be excluded from the returned list. + /// * `limit` - The maximum number of peers to return. + /// + /// # Returns /// - /// It filters out the client making the request. + /// A vector of peers (wrapped in `Arc`) representing the active peers for + /// the torrent, excluding the requesting client. #[must_use] pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { match self.torrents.get(info_hash) { @@ -77,7 +167,19 @@ impl InMemoryTorrentRepository { } } - /// Get torrent peers for a given torrent. + /// Retrieves the list of peers for a given torrent. + /// + /// This method returns up to `TORRENT_PEERS_LIMIT` peers for the torrent + /// specified by the info-hash. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// + /// # Returns + /// + /// A vector of peers (wrapped in `Arc`) representing the active peers for + /// the torrent. #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { match self.torrents.get(info_hash) { @@ -86,12 +188,28 @@ impl InMemoryTorrentRepository { } } - /// It calculates and returns the general [`TorrentsMetrics`]. + /// Calculates and returns overall torrent metrics. + /// + /// The returned [`TorrentsMetrics`] contains aggregate data such as the + /// total number of torrents, total complete (seeders), incomplete (leechers), + /// and downloaded counts. + /// + /// # Returns + /// + /// A [`TorrentsMetrics`] struct with the aggregated metrics. #[must_use] pub fn get_torrents_metrics(&self) -> TorrentsMetrics { self.torrents.get_metrics() } + /// Imports persistent torrent data into the in-memory repository. + /// + /// This method takes a set of persisted torrent entries (e.g., from a database) + /// and imports them into the in-memory repository for immediate access. + /// + /// # Arguments + /// + /// * `persistent_torrents` - A reference to the persisted torrent data. pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { self.torrents.import_persistent(persistent_torrents); } diff --git a/packages/tracker-core/src/torrent/repository/mod.rs b/packages/tracker-core/src/torrent/repository/mod.rs index 51723b68d..ae789e5e9 100644 --- a/packages/tracker-core/src/torrent/repository/mod.rs +++ b/packages/tracker-core/src/torrent/repository/mod.rs @@ -1,2 +1,3 @@ +//! Torrent repository implementations. pub mod in_memory; pub mod persisted; diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 0430f03bb..694a2fe7c 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -1,3 +1,4 @@ +//! The repository that stored persistent torrents' data into the database. use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -6,17 +7,39 @@ use torrust_tracker_primitives::PersistentTorrents; use crate::databases::error::Error; use crate::databases::Database; -/// Torrent repository implementation that persists the torrents in a database. +/// Torrent repository implementation that persists torrent metrics in a database. /// -/// Not all the torrent in-memory data is persisted. For now only some of the -/// torrent metrics are persisted. +/// This repository persists only a subset of the torrent data: the torrent +/// metrics, specifically the number of downloads (or completed counts) for each +/// torrent. It relies on a database driver (either `SQLite3` or `MySQL`) that +/// implements the [`Database`] trait to perform the actual persistence +/// operations. +/// +/// # Note +/// +/// Not all in-memory torrent data is persisted; only the aggregate metrics are +/// stored. pub struct DatabasePersistentTorrentRepository { - /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) - /// or [`MySQL`](crate::core::databases::mysql) + /// A shared reference to the database driver implementation. + /// + /// The driver must implement the [`Database`] trait. This allows for + /// different underlying implementations (e.g., `SQLite3` or `MySQL`) to be + /// used interchangeably. database: Arc>, } impl DatabasePersistentTorrentRepository { + /// Creates a new instance of `DatabasePersistentTorrentRepository`. + /// + /// # Arguments + /// + /// * `database` - A shared reference to a boxed database driver + /// implementing the [`Database`] trait. + /// + /// # Returns + /// + /// A new `DatabasePersistentTorrentRepository` instance with a cloned + /// reference to the provided database. #[must_use] pub fn new(database: &Arc>) -> DatabasePersistentTorrentRepository { Self { @@ -24,20 +47,31 @@ impl DatabasePersistentTorrentRepository { } } - /// It loads the persistent torrents from the database. + /// Loads all persistent torrent metrics from the database. + /// + /// This function retrieves the torrent metrics (e.g., download counts) from the persistent store + /// and returns them as a [`PersistentTorrents`] map. /// /// # Errors /// - /// Will return a database `Err` if unable to load. + /// Returns an [`Error`] if the underlying database query fails. pub(crate) fn load_all(&self) -> Result { self.database.load_persistent_torrents() } - /// It saves the persistent torrent into the database. + /// Saves the persistent torrent metric into the database. + /// + /// This function stores or updates the download count for the torrent + /// identified by the provided infohash. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// * `downloaded` - The number of times the torrent has been downloaded. /// /// # Errors /// - /// Will return a database `Err` if unable to save. + /// Returns an [`Error`] if the database operation fails. pub(crate) fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { self.database.save_persistent_torrent(info_hash, downloaded) } diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 4c470bb74..98d25ba47 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -1,9 +1,17 @@ //! Core tracker domain services. //! -//! There are two services: +//! This module defines the primary services for retrieving torrent-related data +//! from the tracker. There are two main services: //! -//! - [`get_torrent_info`]: it returns all the data about one torrent. -//! - [`get_torrents`]: it returns data about some torrent in bulk excluding the peer list. +//! - [`get_torrent_info`]: Returns all available data (including the list of +//! peers) about a single torrent. +//! - [`get_torrents_page`] and [`get_torrents`]: Return summarized data about +//! multiple torrents, excluding the peer list. +//! +//! The full torrent info is represented by the [`Info`] struct, which includes +//! swarm data (peer list) and aggregate metrics. The [`BasicInfo`] struct +//! provides similar data but without the list of peers, making it suitable for +//! bulk queries. use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -13,37 +21,74 @@ use torrust_tracker_torrent_repository::entry::EntrySync; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; -/// It contains all the information the tracker has about a torrent +/// Full torrent information, including swarm (peer) details. +/// +/// This struct contains all the information that the tracker holds about a +/// torrent, including the infohash, aggregate swarm metrics (seeders, leechers, +/// completed downloads), and the complete list of peers in the swarm. #[derive(Debug, PartialEq)] pub struct Info { /// The infohash of the torrent this data is related to pub info_hash: InfoHash, - /// The total number of seeders for this torrent. Peer that actively serving a full copy of the torrent data + + /// The total number of seeders for this torrent. Peer that actively serving + /// a full copy of the torrent data pub seeders: u64, - /// The total number of peers that have ever complete downloading this torrent + + /// The total number of peers that have ever complete downloading this + /// torrent pub completed: u64, - /// The total number of leechers for this torrent. Peers that actively downloading this torrent + + /// The total number of leechers for this torrent. Peers that actively + /// downloading this torrent pub leechers: u64, - /// The swarm: the list of peers that are actively trying to download or serving this torrent + + /// The swarm: the list of peers that are actively trying to download or + /// serving this torrent pub peers: Option>, } -/// It contains only part of the information the tracker has about a torrent +/// Basic torrent information, excluding the list of peers. /// -/// It contains the same data as [Info] but without the list of peers in the swarm. +/// This struct contains the same aggregate metrics as [`Info`] (infohash, +/// seeders, completed, leechers) but omits the peer list. It is used when only +/// summary information is needed. #[derive(Debug, PartialEq, Clone)] pub struct BasicInfo { /// The infohash of the torrent this data is related to pub info_hash: InfoHash, - /// The total number of seeders for this torrent. Peer that actively serving a full copy of the torrent data + + /// The total number of seeders for this torrent. Peer that actively serving + /// a full copy of the torrent data pub seeders: u64, - /// The total number of peers that have ever complete downloading this torrent + + /// The total number of peers that have ever complete downloading this + /// torrent pub completed: u64, - /// The total number of leechers for this torrent. Peers that actively downloading this torrent + + /// The total number of leechers for this torrent. Peers that actively + /// downloading this torrent pub leechers: u64, } -/// It returns all the information the tracker has about one torrent in a [Info] struct. +/// Retrieves complete torrent information for a given torrent. +/// +/// This function queries the in-memory torrent repository for a torrent entry +/// matching the provided infohash. If found, it extracts the swarm metadata +/// (aggregate metrics) and the current list of peers, and returns an [`Info`] +/// struct. +/// +/// # Arguments +/// +/// * `in_memory_torrent_repository` - A shared reference to the in-memory +/// torrent repository. +/// * `info_hash` - A reference to the torrent's infohash. +/// +/// # Returns +/// +/// An [`Option`] which is: +/// - `Some(Info)` if the torrent exists in the repository. +/// - `None` if the torrent is not found. #[must_use] pub fn get_torrent_info(in_memory_torrent_repository: &Arc, info_hash: &InfoHash) -> Option { let torrent_entry_option = in_memory_torrent_repository.get(info_hash); @@ -65,7 +110,23 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc, @@ -87,7 +148,23 @@ pub fn get_torrents_page( basic_infos } -/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. +/// Retrieves summarized torrent information for a specified list of torrents. +/// +/// This function iterates over a slice of infohashes, fetches the corresponding +/// swarm metadata from the in-memory repository (if available), and returns a +/// vector of [`BasicInfo`] structs. This function is useful for bulk queries +/// where detailed peer information is not required. +/// +/// # Arguments +/// +/// * `in_memory_torrent_repository` - A shared reference to the in-memory +/// torrent repository. +/// * `info_hashes` - A slice of infohashes for which to retrieve the torrent +/// information. +/// +/// # Returns +/// +/// A vector of [`BasicInfo`] structs for the requested torrents. #[must_use] pub fn get_torrents(in_memory_torrent_repository: &Arc, info_hashes: &[InfoHash]) -> Vec { let mut basic_infos: Vec = vec![]; diff --git a/packages/tracker-core/src/whitelist/authorization.rs b/packages/tracker-core/src/whitelist/authorization.rs index 3b7b8b4fb..a8323457b 100644 --- a/packages/tracker-core/src/whitelist/authorization.rs +++ b/packages/tracker-core/src/whitelist/authorization.rs @@ -1,3 +1,4 @@ +//! Whitelist authorization. use std::panic::Location; use std::sync::Arc; @@ -8,6 +9,10 @@ use tracing::instrument; use super::repository::in_memory::InMemoryWhitelist; use crate::error::WhitelistError; +/// Manages the authorization of torrents based on the whitelist. +/// +/// Used to determine whether a given torrent (`infohash`) is allowed +/// to be announced or scraped from the tracker. pub struct WhitelistAuthorization { /// Core tracker configuration. config: Core, @@ -17,7 +22,14 @@ pub struct WhitelistAuthorization { } impl WhitelistAuthorization { - /// Creates a new authorization instance. + /// Creates a new `WhitelistAuthorization` instance. + /// + /// # Arguments + /// - `config`: Tracker configuration. + /// - `in_memory_whitelist`: The in-memory whitelist instance. + /// + /// # Returns + /// A new `WhitelistAuthorization` instance. pub fn new(config: &Core, in_memory_whitelist: &Arc) -> Self { Self { config: config.clone(), @@ -25,12 +37,15 @@ impl WhitelistAuthorization { } } - /// It returns true if the torrent is authorized. + /// Checks whether a torrent is authorized. /// - /// # Errors + /// - If the tracker is **public**, all torrents are authorized. + /// - If the tracker is **private** (listed mode), only whitelisted torrents + /// are authorized. /// - /// Will return an error if the tracker is running in `listed` mode - /// and the infohash is not whitelisted. + /// # Errors + /// Returns `WhitelistError::TorrentNotWhitelisted` if the tracker is in `listed` mode + /// and the `info_hash` is not in the whitelist. #[instrument(skip(self, info_hash), err)] pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), WhitelistError> { if !self.is_listed() { @@ -47,12 +62,12 @@ impl WhitelistAuthorization { }) } - /// Returns `true` is the tracker is in listed mode. + /// Checks if the tracker is running in "listed" mode. fn is_listed(&self) -> bool { self.config.listed } - /// It checks if a torrent is whitelisted. + /// Checks if a torrent is present in the whitelist. async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { self.in_memory_whitelist.contains(info_hash).await } diff --git a/packages/tracker-core/src/whitelist/manager.rs b/packages/tracker-core/src/whitelist/manager.rs index 5ebd6db36..452fcb6c5 100644 --- a/packages/tracker-core/src/whitelist/manager.rs +++ b/packages/tracker-core/src/whitelist/manager.rs @@ -1,3 +1,7 @@ +//! Whitelist manager. +//! +//! This module provides the `WhitelistManager` struct, which is responsible for +//! managing the whitelist of torrents. use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -5,8 +9,11 @@ use bittorrent_primitives::info_hash::InfoHash; use super::repository::in_memory::InMemoryWhitelist; use super::repository::persisted::DatabaseWhitelist; use crate::databases; - -/// It handles the list of allowed torrents. Only for listed trackers. +/// Manages the whitelist of allowed torrents. +/// +/// This structure handles both the in-memory and persistent representations of +/// the whitelist. It is primarily relevant for private trackers that restrict +/// access to specific torrents. pub struct WhitelistManager { /// The in-memory list of allowed torrents. in_memory_whitelist: Arc, @@ -16,6 +23,17 @@ pub struct WhitelistManager { } impl WhitelistManager { + /// Creates a new `WhitelistManager` instance. + /// + /// # Arguments + /// + /// - `database_whitelist`: Persistent database-backed whitelist repository. + /// - `in_memory_whitelist`: In-memory whitelist repository for fast runtime + /// access. + /// + /// # Returns + /// + /// A new `WhitelistManager` instance. #[must_use] pub fn new(database_whitelist: Arc, in_memory_whitelist: Arc) -> Self { Self { @@ -24,35 +42,39 @@ impl WhitelistManager { } } - /// It adds a torrent to the whitelist. - /// Adding torrents is not relevant to public trackers. + /// Adds a torrent to the whitelist. /// - /// # Errors + /// This operation is relevant for private trackers to control which + /// torrents are allowed. /// - /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. + /// # Errors + /// Returns a `database::Error` if the operation fails in the database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.database_whitelist.add(info_hash)?; self.in_memory_whitelist.add(info_hash).await; Ok(()) } - /// It removes a torrent from the whitelist. - /// Removing torrents is not relevant to public trackers. + /// Removes a torrent from the whitelist. /// - /// # Errors + /// This operation is relevant for private trackers to revoke access to + /// specific torrents. /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + /// # Errors + /// Returns a `database::Error` if the operation fails in the database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.database_whitelist.remove(info_hash)?; self.in_memory_whitelist.remove(info_hash).await; Ok(()) } - /// It loads the whitelist from the database. + /// Loads the whitelist from the database into memory. /// - /// # Errors + /// This is useful when restarting the tracker to ensure the in-memory + /// whitelist is synchronized with the database. /// - /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + /// # Errors + /// Returns a `database::Error` if the operation fails to load from the database. pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { let whitelisted_torrents_from_database = self.database_whitelist.load_from_database()?; diff --git a/packages/tracker-core/src/whitelist/mod.rs b/packages/tracker-core/src/whitelist/mod.rs index a39768e93..d9ad18311 100644 --- a/packages/tracker-core/src/whitelist/mod.rs +++ b/packages/tracker-core/src/whitelist/mod.rs @@ -1,3 +1,21 @@ +//! This module contains the logic to manage the torrent whitelist. +//! +//! In tracker configurations where the tracker operates in "listed" mode, only +//! torrents that have been explicitly added to the whitelist are allowed to +//! perform announce and scrape actions. This module provides all the +//! functionality required to manage such a whitelist. +//! +//! The module is organized into the following submodules: +//! +//! - **`authorization`**: Contains the logic to authorize torrents based on their +//! whitelist status. +//! - **`manager`**: Provides high-level management functions for the whitelist, +//! such as adding or removing torrents. +//! - **`repository`**: Implements persistence for whitelist data. +//! - **`setup`**: Provides initialization routines for setting up the whitelist +//! system. +//! - **`test_helpers`**: Contains helper functions and fixtures for testing +//! whitelist functionality. pub mod authorization; pub mod manager; pub mod repository; diff --git a/packages/tracker-core/src/whitelist/repository/in_memory.rs b/packages/tracker-core/src/whitelist/repository/in_memory.rs index 4faeda784..0cee3a94b 100644 --- a/packages/tracker-core/src/whitelist/repository/in_memory.rs +++ b/packages/tracker-core/src/whitelist/repository/in_memory.rs @@ -1,29 +1,42 @@ +//! The in-memory list of allowed torrents. use bittorrent_primitives::info_hash::InfoHash; -/// The in-memory list of allowed torrents. +/// In-memory whitelist to manage allowed torrents. +/// +/// Stores `InfoHash` values for quick lookup and modification. #[derive(Debug, Default)] pub struct InMemoryWhitelist { - /// The list of allowed torrents. + /// A thread-safe set of whitelisted `InfoHash` values. whitelist: tokio::sync::RwLock>, } impl InMemoryWhitelist { - /// It adds a torrent from the whitelist in memory. + /// Adds a torrent to the in-memory whitelist. + /// + /// # Returns + /// + /// - `true` if the torrent was newly added. + /// - `false` if the torrent was already in the whitelist. pub async fn add(&self, info_hash: &InfoHash) -> bool { self.whitelist.write().await.insert(*info_hash) } - /// It removes a torrent from the whitelist in memory. + /// Removes a torrent from the in-memory whitelist. + /// + /// # Returns + /// + /// - `true` if the torrent was present and removed. + /// - `false` if the torrent was not found. pub(crate) async fn remove(&self, info_hash: &InfoHash) -> bool { self.whitelist.write().await.remove(info_hash) } - /// It checks if it contains an info-hash. + /// Checks if a torrent is in the whitelist. pub async fn contains(&self, info_hash: &InfoHash) -> bool { self.whitelist.read().await.contains(info_hash) } - /// It clears the whitelist. + /// Clears all torrents from the whitelist. pub(crate) async fn clear(&self) { let mut whitelist = self.whitelist.write().await; whitelist.clear(); diff --git a/packages/tracker-core/src/whitelist/repository/mod.rs b/packages/tracker-core/src/whitelist/repository/mod.rs index 51723b68d..d900a8c29 100644 --- a/packages/tracker-core/src/whitelist/repository/mod.rs +++ b/packages/tracker-core/src/whitelist/repository/mod.rs @@ -1,2 +1,3 @@ +//! Repository implementations for the whitelist. pub mod in_memory; pub mod persisted; diff --git a/packages/tracker-core/src/whitelist/repository/persisted.rs b/packages/tracker-core/src/whitelist/repository/persisted.rs index 4773cfbe6..eec6704d6 100644 --- a/packages/tracker-core/src/whitelist/repository/persisted.rs +++ b/packages/tracker-core/src/whitelist/repository/persisted.rs @@ -1,3 +1,4 @@ +//! The repository that persists the whitelist. use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -5,6 +6,9 @@ use bittorrent_primitives::info_hash::InfoHash; use crate::databases::{self, Database}; /// The persisted list of allowed torrents. +/// +/// This repository handles adding, removing, and loading torrents +/// from a persistent database like `SQLite` or `MySQL`ç. pub struct DatabaseWhitelist { /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) /// or [`MySQL`](crate::core::databases::mysql) @@ -12,16 +16,17 @@ pub struct DatabaseWhitelist { } impl DatabaseWhitelist { + /// Creates a new `DatabaseWhitelist`. #[must_use] pub fn new(database: Arc>) -> Self { Self { database } } - /// It adds a torrent to the whitelist if it has not been whitelisted previously + /// Adds a torrent to the whitelist if not already present. /// /// # Errors - /// - /// Will return a `database::Error` if unable to add the `info_hash` to the whitelist database. + /// Returns a `database::Error` if unable to add the `info_hash` to the + /// whitelist. pub(crate) fn add(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; @@ -34,11 +39,10 @@ impl DatabaseWhitelist { Ok(()) } - /// It removes a torrent from the whitelist in the database. + /// Removes a torrent from the whitelist if it exists. /// /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + /// Returns a `database::Error` if unable to remove the `info_hash`. pub(crate) fn remove(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; @@ -51,11 +55,11 @@ impl DatabaseWhitelist { Ok(()) } - /// It loads the whitelist from the database. + /// Loads the entire whitelist from the database. /// /// # Errors - /// - /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + /// Returns a `database::Error` if unable to load whitelisted `info_hash` + /// values. pub(crate) fn load_from_database(&self) -> Result, databases::error::Error> { self.database.load_whitelist() } diff --git a/packages/tracker-core/src/whitelist/setup.rs b/packages/tracker-core/src/whitelist/setup.rs index 5b2a5de40..cb18c1478 100644 --- a/packages/tracker-core/src/whitelist/setup.rs +++ b/packages/tracker-core/src/whitelist/setup.rs @@ -1,3 +1,7 @@ +//! Initializes the whitelist manager. +//! +//! This module provides functions to set up the `WhitelistManager`, which is responsible +//! for managing whitelisted torrents in both the in-memory and persistent database repositories. use std::sync::Arc; use super::manager::WhitelistManager; @@ -5,6 +9,28 @@ use super::repository::in_memory::InMemoryWhitelist; use super::repository::persisted::DatabaseWhitelist; use crate::databases::Database; +/// Initializes the `WhitelistManager` by combining in-memory and database +/// repositories. +/// +/// The `WhitelistManager` handles the operations related to whitelisted +/// torrents, such as adding, removing, and verifying torrents in the whitelist. +/// It operates with: +/// +/// 1. **In-Memory Whitelist:** Provides fast, runtime-based access to +/// whitelisted torrents. +/// 2. **Database Whitelist:** Ensures persistent storage of the whitelist data. +/// +/// # Arguments +/// +/// * `database` - An `Arc>` representing the database connection, +/// sed for persistent whitelist storage. +/// * `in_memory_whitelist` - An `Arc` representing the in-memory +/// whitelist repository for fast access. +/// +/// # Returns +/// +/// An `Arc` instance that manages both the in-memory and database +/// whitelist repositories. #[must_use] pub fn initialize_whitelist_manager( database: Arc>, diff --git a/packages/tracker-core/src/whitelist/test_helpers.rs b/packages/tracker-core/src/whitelist/test_helpers.rs index cc30c4476..cf1699be4 100644 --- a/packages/tracker-core/src/whitelist/test_helpers.rs +++ b/packages/tracker-core/src/whitelist/test_helpers.rs @@ -1,5 +1,8 @@ -//! Some generic test helpers functions. - +//! Generic test helper functions for the whitelist module. +//! +//! This module provides utility functions to initialize the whitelist services required for testing. +//! In particular, it sets up the `WhitelistAuthorization` and `WhitelistManager` services using a +//! configured database and an in-memory whitelist repository. #[cfg(test)] pub(crate) mod tests { From 35ca4280affaae18aecf84f01f952ee173cd7943 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Feb 2025 12:47:19 +0000 Subject: [PATCH 04/14] test: [#1266] add integartion test for bittorrent_tracker_core lib --- packages/tracker-core/tests/integration.rs | 132 +++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 packages/tracker-core/tests/integration.rs diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs new file mode 100644 index 000000000..4dbd60b9e --- /dev/null +++ b/packages/tracker-core/tests/integration.rs @@ -0,0 +1,132 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::str::FromStr; +use std::sync::Arc; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::databases::setup::initialize_database; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use bittorrent_tracker_core::whitelist; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; + +/// # Panics +/// +/// Will panic if the temporary file path is not a valid UTF-8 string. +#[must_use] +pub fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config +} + +/// # Panics +/// +/// Will panic if the string representation of the info hash is not a valid infohash. +#[must_use] +pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") +} + +/// Sample peer whose state is not relevant for the tests. +#[must_use] +pub fn sample_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(remote_client_ip(), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } +} + +// The client peer IP. +#[must_use] +fn remote_client_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) +} + +struct Container { + pub announce_handler: Arc, + pub scrape_handler: Arc, +} + +impl Container { + pub fn initialize(config: &Core) -> Self { + let database = initialize_database(config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( + config, + &in_memory_whitelist.clone(), + )); + let announce_handler = Arc::new(AnnounceHandler::new( + config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + Self { + announce_handler, + scrape_handler, + } + } +} + +#[tokio::test] +async fn test_announce_and_scrape_requests() { + let config = ephemeral_configuration(); + + let container = Container::initialize(&config); + + let info_hash = sample_info_hash(); + + let mut peer = sample_peer(); + + // Announce + + // First announce: download started + peer.event = AnnounceEvent::Started; + let announce_data = + container + .announce_handler + .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible); + + // NOTICE: you don't get back the peer making the request. + assert_eq!(announce_data.peers.len(), 0); + assert_eq!(announce_data.stats.downloaded, 0); + + // Second announce: download completed + peer.event = AnnounceEvent::Completed; + let announce_data = + container + .announce_handler + .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible); + + assert_eq!(announce_data.peers.len(), 0); + assert_eq!(announce_data.stats.downloaded, 1); + + // Scrape + + let scrape_data = container.scrape_handler.scrape(&vec![info_hash]).await; + + assert!(scrape_data.files.contains_key(&info_hash)); +} + +#[test] +fn test_scrape_request() {} From 81825c9a5b1546bda00f7ddfa70bf176937bf1a6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 12:32:59 +0000 Subject: [PATCH 05/14] refactor: [#1268] separate UDP handlers into diferent modules Following HTTP structure. --- src/servers/udp/handlers.rs | 1877 -------------------------- src/servers/udp/handlers/announce.rs | 875 ++++++++++++ src/servers/udp/handlers/connect.rs | 199 +++ src/servers/udp/handlers/error.rs | 80 ++ src/servers/udp/handlers/mod.rs | 366 +++++ src/servers/udp/handlers/scrape.rs | 429 ++++++ 6 files changed, 1949 insertions(+), 1877 deletions(-) delete mode 100644 src/servers/udp/handlers.rs create mode 100644 src/servers/udp/handlers/announce.rs create mode 100644 src/servers/udp/handlers/connect.rs create mode 100644 src/servers/udp/handlers/error.rs create mode 100644 src/servers/udp/handlers/mod.rs create mode 100644 src/servers/udp/handlers/scrape.rs diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs deleted file mode 100644 index 4f98f52d9..000000000 --- a/src/servers/udp/handlers.rs +++ /dev/null @@ -1,1877 +0,0 @@ -//! Handlers for the UDP server. -use std::hash::{DefaultHasher, Hash, Hasher as _}; -use std::net::{IpAddr, SocketAddr}; -use std::ops::Range; -use std::sync::Arc; -use std::time::Instant; - -use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceRequest, AnnounceResponse, AnnounceResponseFixedData, ConnectRequest, ConnectResponse, - ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, RequestParseError, Response, - ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, -}; -use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::whitelist; -use torrust_tracker_clock::clock::Time as _; -use torrust_tracker_configuration::Core; -use tracing::{instrument, Level}; -use uuid::Uuid; -use zerocopy::network_endian::I32; - -use super::connection_cookie::{check, make}; -use super::RawRequest; -use crate::container::UdpTrackerContainer; -use crate::packages::udp_tracker_core; -use crate::servers::udp::error::Error; -use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; -use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; -use crate::CurrentClock; - -#[derive(Debug, Clone, PartialEq)] -pub(super) struct CookieTimeValues { - pub(super) issue_time: f64, - pub(super) valid_range: Range, -} - -impl CookieTimeValues { - pub(super) fn new(cookie_lifetime: f64) -> Self { - let issue_time = CurrentClock::now().as_secs_f64(); - let expiry_time = issue_time - cookie_lifetime - 1.0; - let tolerance_max_time = issue_time + 1.0; - - Self { - issue_time, - valid_range: expiry_time..tolerance_max_time, - } - } -} - -/// It handles the incoming UDP packets. -/// -/// It's responsible for: -/// -/// - Parsing the incoming packet. -/// - Delegating the request to the correct handler depending on the request type. -/// -/// It will return an `Error` response if the request is invalid. -#[instrument(fields(request_id), skip(udp_request, udp_tracker_container, cookie_time_values), ret(level = Level::TRACE))] -pub(crate) async fn handle_packet( - udp_request: RawRequest, - udp_tracker_container: Arc, - local_addr: SocketAddr, - cookie_time_values: CookieTimeValues, -) -> Response { - let request_id = Uuid::new_v4(); - - tracing::Span::current().record("request_id", request_id.to_string()); - tracing::debug!("Handling Packets: {udp_request:?}"); - - let start_time = Instant::now(); - - let response = - match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(Error::from) { - Ok(request) => match handle_request( - request, - udp_request.from, - udp_tracker_container.clone(), - cookie_time_values.clone(), - ) - .await - { - Ok(response) => return response, - Err((e, transaction_id)) => { - match &e { - Error::CookieValueNotNormal { .. } - | Error::CookieValueExpired { .. } - | Error::CookieValueFromFuture { .. } => { - // code-review: should we include `RequestParseError` and `BadRequest`? - let mut ban_service = udp_tracker_container.ban_service.write().await; - ban_service.increase_counter(&udp_request.from.ip()); - } - _ => {} - } - - handle_error( - udp_request.from, - local_addr, - request_id, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.valid_range.clone(), - &e, - Some(transaction_id), - ) - .await - } - }, - Err(e) => { - handle_error( - udp_request.from, - local_addr, - request_id, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.valid_range.clone(), - &e, - None, - ) - .await - } - }; - - let latency = start_time.elapsed(); - tracing::trace!(?latency, "responded"); - - response -} - -/// It dispatches the request to the correct handler. -/// -/// # Errors -/// -/// If a error happens in the `handle_request` function, it will just return the `ServerError`. -#[instrument(skip(request, remote_addr, udp_tracker_container, cookie_time_values))] -pub async fn handle_request( - request: Request, - remote_addr: SocketAddr, - udp_tracker_container: Arc, - cookie_time_values: CookieTimeValues, -) -> Result { - tracing::trace!("handle request"); - - match request { - Request::Connect(connect_request) => Ok(handle_connect( - remote_addr, - &connect_request, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.issue_time, - ) - .await), - Request::Announce(announce_request) => { - handle_announce( - remote_addr, - &announce_request, - &udp_tracker_container.core_config, - &udp_tracker_container.announce_handler, - &udp_tracker_container.whitelist_authorization, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.valid_range, - ) - .await - } - Request::Scrape(scrape_request) => { - handle_scrape( - remote_addr, - &scrape_request, - &udp_tracker_container.scrape_handler, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.valid_range, - ) - .await - } - } -} - -/// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) -/// request for more information. -/// -/// # Errors -/// -/// This function does not ever return an error. -#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] -pub async fn handle_connect( - remote_addr: SocketAddr, - request: &ConnectRequest, - opt_udp_stats_event_sender: &Arc>>, - cookie_issue_time: f64, -) -> Response { - tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); - - tracing::trace!("handle connect"); - - let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); - - let response = ConnectResponse { - transaction_id: request.transaction_id, - connection_id, - }; - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Connect) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Connect) - .await; - } - } - } - - Response::from(response) -} - -/// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) -/// request for more information. -/// -/// # Errors -/// -/// If a error happens in the `handle_announce` function, it will just return the `ServerError`. -#[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_udp_stats_event_sender), ret(level = Level::TRACE))] -pub async fn handle_announce( - remote_addr: SocketAddr, - request: &AnnounceRequest, - core_config: &Arc, - announce_handler: &Arc, - whitelist_authorization: &Arc, - opt_udp_stats_event_sender: &Arc>>, - cookie_valid_range: Range, -) -> Result { - tracing::Span::current() - .record("transaction_id", request.transaction_id.0.to_string()) - .record("connection_id", request.connection_id.0.to_string()) - .record("info_hash", InfoHash::from_bytes(&request.info_hash.0).to_hex_string()); - - tracing::trace!("handle announce"); - - check( - &request.connection_id, - gen_remote_fingerprint(&remote_addr), - cookie_valid_range, - ) - .map_err(|e| (e, request.transaction_id))?; - - let info_hash = request.info_hash.into(); - let remote_client_ip = remote_addr.ip(); - - // Authorization - whitelist_authorization - .authorize(&info_hash) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) - .map_err(|e| (e, request.transaction_id))?; - - let mut peer = peer_builder::from_request(request, &remote_client_ip); - let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - - let response = announce_handler.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_client_ip { - IpAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) - .await; - } - IpAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) - .await; - } - } - } - - #[allow(clippy::cast_possible_truncation)] - if remote_addr.is_ipv4() { - let announce_response = AnnounceResponse { - fixed: AnnounceResponseFixedData { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), - leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), - seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), - }, - peers: response - .peers - .iter() - .filter_map(|peer| { - if let IpAddr::V4(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip.into(), - port: Port(peer.peer_addr.port().into()), - }) - } else { - None - } - }) - .collect(), - }; - - Ok(Response::from(announce_response)) - } else { - let announce_response = AnnounceResponse { - fixed: AnnounceResponseFixedData { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), - leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), - seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), - }, - peers: response - .peers - .iter() - .filter_map(|peer| { - if let IpAddr::V6(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip.into(), - port: Port(peer.peer_addr.port().into()), - }) - } else { - None - } - }) - .collect(), - }; - - Ok(Response::from(announce_response)) - } -} - -/// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) -/// request for more information. -/// -/// # Errors -/// -/// This function does not ever return an error. -#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_udp_stats_event_sender), ret(level = Level::TRACE))] -pub async fn handle_scrape( - remote_addr: SocketAddr, - request: &ScrapeRequest, - scrape_handler: &Arc, - opt_udp_stats_event_sender: &Arc>>, - cookie_valid_range: Range, -) -> Result { - tracing::Span::current() - .record("transaction_id", request.transaction_id.0.to_string()) - .record("connection_id", request.connection_id.0.to_string()); - - tracing::trace!("handle scrape"); - - check( - &request.connection_id, - gen_remote_fingerprint(&remote_addr), - cookie_valid_range, - ) - .map_err(|e| (e, request.transaction_id))?; - - // Convert from aquatic infohashes - let mut info_hashes: Vec = vec![]; - for info_hash in &request.info_hashes { - info_hashes.push((*info_hash).into()); - } - - let scrape_data = scrape_handler.scrape(&info_hashes).await; - - let mut torrent_stats: Vec = Vec::new(); - - for file in &scrape_data.files { - let swarm_metadata = file.1; - - #[allow(clippy::cast_possible_truncation)] - let scrape_entry = { - TorrentScrapeStatistics { - seeders: NumberOfPeers(I32::new(i64::from(swarm_metadata.complete) as i32)), - completed: NumberOfDownloads(I32::new(i64::from(swarm_metadata.downloaded) as i32)), - leechers: NumberOfPeers(I32::new(i64::from(swarm_metadata.incomplete) as i32)), - } - }; - - torrent_stats.push(scrape_entry); - } - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) - .await; - } - } - } - - let response = ScrapeResponse { - transaction_id: request.transaction_id, - torrent_stats, - }; - - Ok(Response::from(response)) -} - -#[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] -async fn handle_error( - remote_addr: SocketAddr, - local_addr: SocketAddr, - request_id: Uuid, - opt_udp_stats_event_sender: &Arc>>, - cookie_valid_range: Range, - e: &Error, - transaction_id: Option, -) -> Response { - tracing::trace!("handle error"); - - match transaction_id { - Some(transaction_id) => { - let transaction_id = transaction_id.0.to_string(); - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, %transaction_id, "response error"); - } - None => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, "response error"); - } - } - - let e = if let Error::RequestParseError { request_parse_error } = e { - match request_parse_error { - RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } => { - if let Err(e) = check(connection_id, gen_remote_fingerprint(&remote_addr), cookie_valid_range) { - (e.to_string(), Some(*transaction_id)) - } else { - ((*err).to_string(), Some(*transaction_id)) - } - } - RequestParseError::Unsendable { err } => (err.to_string(), transaction_id), - } - } else { - (e.to_string(), transaction_id) - }; - - if e.1.is_some() { - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Error) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Error) - .await; - } - } - } - } - - Response::from(ErrorResponse { - transaction_id: e.1.unwrap_or(TransactionId(I32::new(0))), - message: e.0.into(), - }) -} - -fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { - let mut state = DefaultHasher::new(); - remote_addr.hash(&mut state); - state.finish() -} - -#[cfg(test)] -mod tests { - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::ops::Range; - use std::sync::Arc; - - use aquatic_udp_protocol::{NumberOfBytes, PeerId}; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use bittorrent_tracker_core::whitelist; - use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; - use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; - use futures::future::BoxFuture; - use mockall::mock; - use tokio::sync::mpsc::error::SendError; - use torrust_tracker_clock::clock::Time; - use torrust_tracker_configuration::{Configuration, Core}; - use torrust_tracker_primitives::peer; - use torrust_tracker_test_helpers::configuration; - - use super::gen_remote_fingerprint; - use crate::packages::udp_tracker_core; - use crate::{packages, CurrentClock}; - - struct CoreTrackerServices { - pub core_config: Arc, - pub announce_handler: Arc, - pub scrape_handler: Arc, - pub in_memory_torrent_repository: Arc, - pub in_memory_whitelist: Arc, - pub whitelist_authorization: Arc, - } - - struct CoreUdpTrackerServices { - pub udp_stats_event_sender: Arc>>, - } - - fn default_testing_tracker_configuration() -> Configuration { - configuration::ephemeral() - } - - fn initialize_core_tracker_services_for_default_tracker_configuration() -> (CoreTrackerServices, CoreUdpTrackerServices) { - initialize_core_tracker_services(&default_testing_tracker_configuration()) - } - - fn initialize_core_tracker_services_for_public_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { - initialize_core_tracker_services(&configuration::ephemeral_public()) - } - - fn initialize_core_tracker_services_for_listed_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { - initialize_core_tracker_services(&configuration::ephemeral_listed()) - } - - fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreUdpTrackerServices) { - let core_config = Arc::new(config.core.clone()); - let database = initialize_database(&config.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - ( - CoreTrackerServices { - core_config, - announce_handler, - scrape_handler, - in_memory_torrent_repository, - in_memory_whitelist, - whitelist_authorization, - }, - CoreUdpTrackerServices { udp_stats_event_sender }, - ) - } - - fn sample_ipv4_remote_addr() -> SocketAddr { - sample_ipv4_socket_address() - } - - fn sample_ipv4_remote_addr_fingerprint() -> u64 { - gen_remote_fingerprint(&sample_ipv4_socket_address()) - } - - fn sample_ipv6_remote_addr() -> SocketAddr { - sample_ipv6_socket_address() - } - - fn sample_ipv6_remote_addr_fingerprint() -> u64 { - gen_remote_fingerprint(&sample_ipv6_socket_address()) - } - - fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - } - - fn sample_ipv6_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) - } - - fn sample_issue_time() -> f64 { - 1_000_000_000_f64 - } - - fn sample_cookie_valid_range() -> Range { - sample_issue_time() - 10.0..sample_issue_time() + 10.0 - } - - #[derive(Debug, Default)] - pub struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - #[must_use] - pub fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - pub fn into(self) -> peer::Peer { - self.peer - } - } - - struct TrackerConfigurationBuilder { - configuration: Configuration, - } - - impl TrackerConfigurationBuilder { - pub fn default() -> TrackerConfigurationBuilder { - let default_configuration = default_testing_tracker_configuration(); - TrackerConfigurationBuilder { - configuration: default_configuration, - } - } - - pub fn with_external_ip(mut self, external_ip: &str) -> Self { - self.configuration.core.net.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); - self - } - - pub fn into(self) -> Configuration { - self.configuration - } - } - - mock! { - UdpStatsEventSender {} - impl udp_tracker_core::statistics::event::sender::Sender for UdpStatsEventSender { - fn send_event(&self, event: udp_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; - } - } - - mod connect_request { - - use std::future; - use std::sync::Arc; - - use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; - use mockall::predicate::eq; - - use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::packages::{self, udp_tracker_core}; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::handle_connect; - use crate::servers::udp::handlers::tests::{ - sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv6_remote_addr_fingerprint, sample_issue_time, - MockUdpStatsEventSender, - }; - - fn sample_connect_request() -> ConnectRequest { - ConnectRequest { - transaction_id: TransactionId(0i32.into()), - } - } - - #[tokio::test] - async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let request = ConnectRequest { - transaction_id: TransactionId(0i32.into()), - }; - - let response = handle_connect( - sample_ipv4_remote_addr(), - &request, - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - - assert_eq!( - response, - Response::Connect(ConnectResponse { - connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), - transaction_id: request.transaction_id - }) - ); - } - - #[tokio::test] - async fn a_connect_response_should_contain_a_new_connection_id() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let request = ConnectRequest { - transaction_id: TransactionId(0i32.into()), - }; - - let response = handle_connect( - sample_ipv4_remote_addr(), - &request, - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - - assert_eq!( - response, - Response::Connect(ConnectResponse { - connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), - transaction_id: request.transaction_id - }) - ); - } - - #[tokio::test] - async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let request = ConnectRequest { - transaction_id: TransactionId(0i32.into()), - }; - - let response = handle_connect( - sample_ipv6_remote_addr(), - &request, - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - - assert_eq!( - response, - Response::Connect(ConnectResponse { - connection_id: make(sample_ipv6_remote_addr_fingerprint(), sample_issue_time()).unwrap(), - transaction_id: request.transaction_id - }) - ); - } - - #[tokio::test] - async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Connect)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let client_socket_address = sample_ipv4_socket_address(); - - handle_connect( - client_socket_address, - &sample_connect_request(), - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - } - - #[tokio::test] - async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Connect)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - handle_connect( - sample_ipv6_remote_addr(), - &sample_connect_request(), - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - } - } - - mod announce_request { - - use std::net::Ipv4Addr; - use std::num::NonZeroU16; - - use aquatic_udp_protocol::{ - AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, - PeerId as AquaticPeerId, PeerKey, Port, TransactionId, - }; - - use super::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; - use crate::servers::udp::connection_cookie::make; - - struct AnnounceRequestBuilder { - request: AnnounceRequest, - } - - impl AnnounceRequestBuilder { - pub fn default() -> AnnounceRequestBuilder { - let client_ip = Ipv4Addr::new(126, 0, 0, 1); - let client_port = 8080; - let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); - - let default_request = AnnounceRequest { - connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), - action_placeholder: AnnounceActionPlaceholder::default(), - transaction_id: TransactionId(0i32.into()), - info_hash: info_hash_aquatic, - peer_id: AquaticPeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64.into()), - bytes_uploaded: NumberOfBytes(0i64.into()), - bytes_left: NumberOfBytes(0i64.into()), - event: AnnounceEvent::Started.into(), - ip_address: client_ip.into(), - key: PeerKey::new(0i32), - peers_wanted: NumberOfPeers::new(1i32), - port: Port::new(NonZeroU16::new(client_port).expect("a non-zero client port")), - }; - AnnounceRequestBuilder { - request: default_request, - } - } - - pub fn with_connection_id(mut self, connection_id: ConnectionId) -> Self { - self.request.connection_id = connection_id; - self - } - - pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { - self.request.info_hash = info_hash; - self - } - - pub fn with_peer_id(mut self, peer_id: AquaticPeerId) -> Self { - self.request.peer_id = peer_id; - self - } - - pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { - self.request.ip_address = ip_address.into(); - self - } - - pub fn with_port(mut self, port: u16) -> Self { - self.request.port = Port(port.into()); - self - } - - pub fn into(self) -> AnnounceRequest { - self.request - } - } - - mod using_ipv4 { - - use std::future; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, - PeerId as AquaticPeerId, Response, ResponsePeer, - }; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::whitelist; - use mockall::predicate::eq; - use torrust_tracker_configuration::Core; - - use crate::packages::{self, udp_tracker_core}; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, - initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, - sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, - }; - use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; - - #[tokio::test] - async fn an_announced_peer_should_be_added_to_the_tracker() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let client_ip = Ipv4Addr::new(126, 0, 0, 1); - let client_port = 8080; - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) - .into(); - - assert_eq!(peers[0], Arc::new(expected_peer)); - } - - #[tokio::test] - async fn the_announced_peer_should_not_be_included_in_the_response() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - let response = handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let empty_peer_vector: Vec> = vec![]; - assert_eq!( - response, - Response::from(AnnounceResponse { - fixed: AnnounceResponseFixedData { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(120i32.into()), - leechers: NumberOfPeers(0i32.into()), - seeders: NumberOfPeers(1i32.into()), - }, - peers: empty_peer_vector - }) - ); - } - - #[tokio::test] - async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( - ) { - // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): - // "Do note that most trackers will only honor the IP address field under limited circumstances." - - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - let client_port = 8080; - - let remote_client_ip = Ipv4Addr::new(126, 0, 0, 1); - let remote_client_port = 8081; - let peer_address = Ipv4Addr::new(126, 0, 0, 2); - - let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(peer_address) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); - } - - fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { - let info_hash = AquaticInfoHash([0u8; 20]); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - let client_port = 8080; - let peer_id = AquaticPeerId([255u8; 20]); - - let peer_using_ipv6 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) - .into(); - - let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); - } - - async fn announce_a_new_peer_using_ipv4( - core_config: Arc, - announce_handler: Arc, - whitelist_authorization: Arc, - ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = - packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - handle_announce( - remote_addr, - &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap() - } - - #[tokio::test] - async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); - - let response = announce_a_new_peer_using_ipv4( - core_tracker_services.core_config.clone(), - core_tracker_services.announce_handler.clone(), - core_tracker_services.whitelist_authorization, - ) - .await; - - // The response should not contain the peer using IPV6 - let peers: Option>> = match response { - Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), - _ => None, - }; - let no_ipv6_peers = peers.is_none(); - assert!(no_ipv6_peers); - } - - #[tokio::test] - async fn should_send_the_upd4_announce_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let (core_tracker_services, _core_udp_tracker_services) = - initialize_core_tracker_services_for_default_tracker_configuration(); - - handle_announce( - sample_ipv4_socket_address(), - &AnnounceRequestBuilder::default().into(), - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - } - - mod from_a_loopback_ip { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::handle_announce; - use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, - sample_issue_time, TorrentPeerBuilder, - }; - - #[tokio::test] - async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let (core_tracker_services, core_udp_tracker_services) = - initialize_core_tracker_services_for_public_tracker(); - - let client_ip = Ipv4Addr::new(127, 0, 0, 1); - let client_port = 8080; - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); - - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) - .into(); - - assert_eq!(peers[0], Arc::new(expected_peer)); - } - } - } - - mod using_ipv6 { - - use std::future; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, - PeerId as AquaticPeerId, Response, ResponsePeer, - }; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::whitelist; - use mockall::predicate::eq; - use torrust_tracker_configuration::Core; - - use crate::packages::{self, udp_tracker_core}; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, - initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, - }; - use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; - - #[tokio::test] - async fn an_announced_peer_should_be_added_to_the_tracker() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - let client_port = 8080; - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip_v4) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) - .into(); - - assert_eq!(peers[0], Arc::new(expected_peer)); - } - - #[tokio::test] - async fn the_announced_peer_should_not_be_included_in_the_response() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - let response = handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let empty_peer_vector: Vec> = vec![]; - assert_eq!( - response, - Response::from(AnnounceResponse { - fixed: AnnounceResponseFixedData { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(120i32.into()), - leechers: NumberOfPeers(0i32.into()), - seeders: NumberOfPeers(1i32.into()), - }, - peers: empty_peer_vector - }) - ); - } - - #[tokio::test] - async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( - ) { - // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): - // "Do note that most trackers will only honor the IP address field under limited circumstances." - - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - let client_port = 8080; - - let remote_client_ip = "::100".parse().unwrap(); // IPV4 ::0.0.1.0 -> IPV6 = ::100 = ::ffff:0:100 = 0:0:0:0:0:ffff:0:0100 - let remote_client_port = 8081; - let peer_address = "126.0.0.1".parse().unwrap(); - - let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(peer_address) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - // When using IPv6 the tracker converts the remote client ip into a IPv4 address - assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); - } - - fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { - let info_hash = AquaticInfoHash([0u8; 20]); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_port = 8080; - let peer_id = AquaticPeerId([255u8; 20]); - - let peer_using_ipv4 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) - .into(); - - let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); - } - - async fn announce_a_new_peer_using_ipv6( - core_config: Arc, - announce_handler: Arc, - whitelist_authorization: Arc, - ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = - packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - let client_port = 8080; - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - handle_announce( - remote_addr, - &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap() - } - - #[tokio::test] - async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); - - let response = announce_a_new_peer_using_ipv6( - core_tracker_services.core_config.clone(), - core_tracker_services.announce_handler.clone(), - core_tracker_services.whitelist_authorization, - ) - .await; - - // The response should not contain the peer using IPV4 - let peers: Option>> = match response { - Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), - _ => None, - }; - let no_ipv4_peers = peers.is_none(); - assert!(no_ipv4_peers); - } - - #[tokio::test] - async fn should_send_the_upd6_announce_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let (core_tracker_services, _core_udp_tracker_services) = - initialize_core_tracker_services_for_default_tracker_configuration(); - - let remote_addr = sample_ipv6_remote_addr(); - - let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - handle_announce( - remote_addr, - &announce_request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - } - - mod from_a_loopback_ip { - use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; - use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; - use mockall::predicate::eq; - - use crate::packages::udp_tracker_core; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::handle_announce; - use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, MockUdpStatsEventSender, - TrackerConfigurationBuilder, - }; - - #[tokio::test] - async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { - let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - - let database = initialize_database(&config.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = - Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); - let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - - let client_ip_v4 = loopback_ipv4; - let client_ip_v6 = loopback_ipv6; - let client_port = 8080; - - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip_v4) - .with_port(client_port) - .into(); - - let core_config = Arc::new(config.core.clone()); - - handle_announce( - remote_addr, - &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); - - let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); - - assert!(external_ip_in_tracker_configuration.is_ipv6()); - - // There's a special type of IPv6 addresses that provide compatibility with IPv4. - // The last 32 bits of these addresses represent an IPv4, and are represented like this: - // 1111:2222:3333:4444:5555:6666:1.2.3.4 - // - // ::127.0.0.1 is the IPV6 representation for the IPV4 address 127.0.0.1. - assert_eq!(Ok(peers[0].peer_addr.ip()), "::126.0.0.1".parse()); - } - } - } - } - - mod scrape_request { - use std::net::SocketAddr; - use std::sync::Arc; - - use aquatic_udp_protocol::{ - InfoHash, NumberOfDownloads, NumberOfPeers, PeerId, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, - TransactionId, - }; - use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - - use super::{gen_remote_fingerprint, TorrentPeerBuilder}; - use crate::packages; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{ - initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, - sample_issue_time, - }; - - fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { - TorrentScrapeStatistics { - seeders: NumberOfPeers(0.into()), - completed: NumberOfDownloads(0.into()), - leechers: NumberOfPeers(0.into()), - } - } - - #[tokio::test] - async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let remote_addr = sample_ipv4_remote_addr(); - - let info_hash = InfoHash([0u8; 20]); - let info_hashes = vec![info_hash]; - - let request = ScrapeRequest { - connection_id: make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap(), - transaction_id: TransactionId(0i32.into()), - info_hashes, - }; - - let response = handle_scrape( - remote_addr, - &request, - &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let expected_torrent_stats = vec![zeroed_torrent_statistics()]; - - assert_eq!( - response, - Response::from(ScrapeResponse { - transaction_id: request.transaction_id, - torrent_stats: expected_torrent_stats - }) - ); - } - - async fn add_a_seeder( - in_memory_torrent_repository: Arc, - remote_addr: &SocketAddr, - info_hash: &InfoHash, - ) { - let peer_id = PeerId([255u8; 20]); - - let peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(*remote_addr) - .with_number_of_bytes_left(0) - .into(); - - let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); - } - - fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { - let info_hashes = vec![*info_hash]; - - ScrapeRequest { - connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), - transaction_id: TransactionId::new(0i32), - info_hashes, - } - } - - async fn add_a_sample_seeder_and_scrape( - in_memory_torrent_repository: Arc, - scrape_handler: Arc, - ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let remote_addr = sample_ipv4_remote_addr(); - let info_hash = InfoHash([0u8; 20]); - - add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; - - let request = build_scrape_request(&remote_addr, &info_hash); - - handle_scrape( - remote_addr, - &request, - &scrape_handler, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap() - } - - fn match_scrape_response(response: Response) -> Option { - match response { - Response::Scrape(scrape_response) => Some(scrape_response), - _ => None, - } - } - - mod with_a_public_tracker { - use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - - use crate::servers::udp::handlers::tests::initialize_core_tracker_services_for_public_tracker; - use crate::servers::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; - - #[tokio::test] - async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let torrent_stats = match_scrape_response( - add_a_sample_seeder_and_scrape( - core_tracker_services.in_memory_torrent_repository.clone(), - core_tracker_services.scrape_handler.clone(), - ) - .await, - ); - - let expected_torrent_stats = vec![TorrentScrapeStatistics { - seeders: NumberOfPeers(1.into()), - completed: NumberOfDownloads(0.into()), - leechers: NumberOfPeers(0.into()), - }]; - - assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); - } - } - - mod with_a_whitelisted_tracker { - use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::scrape_request::{ - add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, - }; - use crate::servers::udp::handlers::tests::{ - initialize_core_tracker_services_for_listed_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, - }; - - #[tokio::test] - async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); - - let remote_addr = sample_ipv4_remote_addr(); - let info_hash = InfoHash([0u8; 20]); - - add_a_seeder( - core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, - &info_hash, - ) - .await; - - core_tracker_services.in_memory_whitelist.add(&info_hash.0.into()).await; - - let request = build_scrape_request(&remote_addr, &info_hash); - - let torrent_stats = match_scrape_response( - handle_scrape( - remote_addr, - &request, - &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(), - ) - .unwrap(); - - let expected_torrent_stats = vec![TorrentScrapeStatistics { - seeders: NumberOfPeers(1.into()), - completed: NumberOfDownloads(0.into()), - leechers: NumberOfPeers(0.into()), - }]; - - assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); - } - - #[tokio::test] - async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); - - let remote_addr = sample_ipv4_remote_addr(); - let info_hash = InfoHash([0u8; 20]); - - add_a_seeder( - core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, - &info_hash, - ) - .await; - - let request = build_scrape_request(&remote_addr, &info_hash); - - let torrent_stats = match_scrape_response( - handle_scrape( - remote_addr, - &request, - &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(), - ) - .unwrap(); - - let expected_torrent_stats = vec![zeroed_torrent_statistics()]; - - assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); - } - } - - fn sample_scrape_request(remote_addr: &SocketAddr) -> ScrapeRequest { - let info_hash = InfoHash([0u8; 20]); - let info_hashes = vec![info_hash]; - - ScrapeRequest { - connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), - transaction_id: TransactionId(0i32.into()), - info_hashes, - } - } - - mod using_ipv4 { - use std::future; - use std::sync::Arc; - - use mockall::predicate::eq; - - use super::sample_scrape_request; - use crate::packages::udp_tracker_core; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{ - initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv4_remote_addr, MockUdpStatsEventSender, - }; - - #[tokio::test] - async fn should_send_the_upd4_scrape_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let remote_addr = sample_ipv4_remote_addr(); - - let (core_tracker_services, _core_udp_tracker_services) = - initialize_core_tracker_services_for_default_tracker_configuration(); - - handle_scrape( - remote_addr, - &sample_scrape_request(&remote_addr), - &core_tracker_services.scrape_handler, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - } - } - - mod using_ipv6 { - use std::future; - use std::sync::Arc; - - use mockall::predicate::eq; - - use super::sample_scrape_request; - use crate::packages::udp_tracker_core; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{ - initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv6_remote_addr, MockUdpStatsEventSender, - }; - - #[tokio::test] - async fn should_send_the_upd6_scrape_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let remote_addr = sample_ipv6_remote_addr(); - - let (core_tracker_services, _core_udp_tracker_services) = - initialize_core_tracker_services_for_default_tracker_configuration(); - - handle_scrape( - remote_addr, - &sample_scrape_request(&remote_addr), - &core_tracker_services.scrape_handler, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - } - } - } -} diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs new file mode 100644 index 000000000..79fb91f49 --- /dev/null +++ b/src/servers/udp/handlers/announce.rs @@ -0,0 +1,875 @@ +//! UDP tracker announce handler. +use std::net::{IpAddr, SocketAddr}; +use std::ops::Range; +use std::sync::Arc; + +use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceRequest, AnnounceResponse, AnnounceResponseFixedData, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, + Port, Response, ResponsePeer, TransactionId, +}; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::whitelist; +use torrust_tracker_configuration::Core; +use tracing::{instrument, Level}; +use zerocopy::network_endian::I32; + +use crate::packages::udp_tracker_core; +use crate::servers::udp::connection_cookie::check; +use crate::servers::udp::error::Error; +use crate::servers::udp::handlers::gen_remote_fingerprint; +use crate::servers::udp::peer_builder; + +/// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) +/// request for more information. +/// +/// # Errors +/// +/// If a error happens in the `handle_announce` function, it will just return the `ServerError`. +#[allow(clippy::too_many_arguments)] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +pub async fn handle_announce( + remote_addr: SocketAddr, + request: &AnnounceRequest, + core_config: &Arc, + announce_handler: &Arc, + whitelist_authorization: &Arc, + opt_udp_stats_event_sender: &Arc>>, + cookie_valid_range: Range, +) -> Result { + tracing::Span::current() + .record("transaction_id", request.transaction_id.0.to_string()) + .record("connection_id", request.connection_id.0.to_string()) + .record("info_hash", InfoHash::from_bytes(&request.info_hash.0).to_hex_string()); + + tracing::trace!("handle announce"); + + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + ) + .map_err(|e| (e, request.transaction_id))?; + + let info_hash = request.info_hash.into(); + let remote_client_ip = remote_addr.ip(); + + // Authorization + whitelist_authorization + .authorize(&info_hash) + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) + .map_err(|e| (e, request.transaction_id))?; + + let mut peer = peer_builder::from_request(request, &remote_client_ip); + let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); + + let response = announce_handler.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_client_ip { + IpAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) + .await; + } + IpAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) + .await; + } + } + } + + #[allow(clippy::cast_possible_truncation)] + if remote_addr.is_ipv4() { + let announce_response = AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + }, + peers: response + .peers + .iter() + .filter_map(|peer| { + if let IpAddr::V4(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), + }) + } else { + None + } + }) + .collect(), + }; + + Ok(Response::from(announce_response)) + } else { + let announce_response = AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + }, + peers: response + .peers + .iter() + .filter_map(|peer| { + if let IpAddr::V6(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), + }) + } else { + None + } + }) + .collect(), + }; + + Ok(Response::from(announce_response)) + } +} + +#[cfg(test)] +mod tests { + + mod announce_request { + + use std::net::Ipv4Addr; + use std::num::NonZeroU16; + + use aquatic_udp_protocol::{ + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId as AquaticPeerId, PeerKey, Port, TransactionId, + }; + + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; + + struct AnnounceRequestBuilder { + request: AnnounceRequest, + } + + impl AnnounceRequestBuilder { + pub fn default() -> AnnounceRequestBuilder { + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); + + let default_request = AnnounceRequest { + connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id: TransactionId(0i32.into()), + info_hash: info_hash_aquatic, + peer_id: AquaticPeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: client_ip.into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers::new(1i32), + port: Port::new(NonZeroU16::new(client_port).expect("a non-zero client port")), + }; + AnnounceRequestBuilder { + request: default_request, + } + } + + pub fn with_connection_id(mut self, connection_id: ConnectionId) -> Self { + self.request.connection_id = connection_id; + self + } + + pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { + self.request.info_hash = info_hash; + self + } + + pub fn with_peer_id(mut self, peer_id: AquaticPeerId) -> Self { + self.request.peer_id = peer_id; + self + } + + pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { + self.request.ip_address = ip_address.into(); + self + } + + pub fn with_port(mut self, port: u16) -> Self { + self.request.port = Port(port.into()); + self + } + + pub fn into(self) -> AnnounceRequest { + self.request + } + } + + mod using_ipv4 { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, AnnounceResponseFixedData, InfoHash as AquaticInfoHash, Ipv4AddrBytes, + Ipv6AddrBytes, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, + }; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::whitelist; + use mockall::predicate::eq; + use torrust_tracker_configuration::Core; + + use crate::packages::{self, udp_tracker_core}; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_default_tracker_configuration, + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, + sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + let expected_peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .into(); + + assert_eq!(peers[0], Arc::new(expected_peer)); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + let response = handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = Ipv4Addr::new(126, 0, 0, 1); + let remote_client_port = 8081; + let peer_address = Ipv4Addr::new(126, 0, 0, 2); + + let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); + } + + fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv6 = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); + } + + async fn announce_a_new_peer_using_ipv4( + core_config: Arc, + announce_handler: Arc, + whitelist_authorization: Arc, + ) -> Response { + let (udp_stats_event_sender, _udp_stats_repository) = + packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + handle_announce( + remote_addr, + &request, + &core_config, + &announce_handler, + &whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap() + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { + let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); + + let response = announce_a_new_peer_using_ipv4( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.whitelist_authorization, + ) + .await; + + // The response should not contain the peer using IPV6 + let peers: Option>> = match response { + Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv6_peers = peers.is_none(); + assert!(no_ipv6_peers); + } + + #[tokio::test] + async fn should_send_the_upd4_announce_event() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); + + handle_announce( + sample_ipv4_socket_address(), + &AnnounceRequestBuilder::default().into(), + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_issue_time, + TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { + let (core_tracker_services, core_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); + + let client_ip = Ipv4Addr::new(127, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); + + let expected_peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) + .into(); + + assert_eq!(peers[0], Arc::new(expected_peer)); + } + } + } + + mod using_ipv6 { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, AnnounceResponseFixedData, InfoHash as AquaticInfoHash, Ipv4AddrBytes, + Ipv6AddrBytes, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, + }; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::whitelist; + use mockall::predicate::eq; + use torrust_tracker_configuration::Core; + + use crate::packages::{self, udp_tracker_core}; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_default_tracker_configuration, + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, + sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + let expected_peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + assert_eq!(peers[0], Arc::new(expected_peer)); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + let response = handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = "::100".parse().unwrap(); // IPV4 ::0.0.1.0 -> IPV6 = ::100 = ::ffff:0:100 = 0:0:0:0:0:ffff:0:0100 + let remote_client_port = 8081; + let peer_address = "126.0.0.1".parse().unwrap(); + + let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + // When using IPv6 the tracker converts the remote client ip into a IPv4 address + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); + } + + fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv4 = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .into(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); + } + + async fn announce_a_new_peer_using_ipv6( + core_config: Arc, + announce_handler: Arc, + whitelist_authorization: Arc, + ) -> Response { + let (udp_stats_event_sender, _udp_stats_repository) = + packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + handle_announce( + remote_addr, + &request, + &core_config, + &announce_handler, + &whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap() + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { + let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); + + let response = announce_a_new_peer_using_ipv6( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.whitelist_authorization, + ) + .await; + + // The response should not contain the peer using IPV4 + let peers: Option>> = match response { + Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv4_peers = peers.is_none(); + assert!(no_ipv4_peers); + } + + #[tokio::test] + async fn should_send_the_upd6_announce_event() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); + + let remote_addr = sample_ipv6_remote_addr(); + + let announce_request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + handle_announce( + remote_addr, + &announce_request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use mockall::predicate::eq; + + use crate::packages::udp_tracker_core; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + sample_cookie_valid_range, sample_issue_time, MockUdpStatsEventSender, TrackerConfigurationBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { + let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + + let database = initialize_database(&config.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = + Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); + let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + + let client_ip_v4 = loopback_ipv4; + let client_ip_v6 = loopback_ipv6; + let client_port = 8080; + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + let core_config = Arc::new(config.core.clone()); + + handle_announce( + remote_addr, + &request, + &core_config, + &announce_handler, + &whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + + let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); + + assert!(external_ip_in_tracker_configuration.is_ipv6()); + + // There's a special type of IPv6 addresses that provide compatibility with IPv4. + // The last 32 bits of these addresses represent an IPv4, and are represented like this: + // 1111:2222:3333:4444:5555:6666:1.2.3.4 + // + // ::127.0.0.1 is the IPV6 representation for the IPV4 address 127.0.0.1. + assert_eq!(Ok(peers[0].peer_addr.ip()), "::126.0.0.1".parse()); + } + } + } + } +} diff --git a/src/servers/udp/handlers/connect.rs b/src/servers/udp/handlers/connect.rs new file mode 100644 index 000000000..431c3bb4d --- /dev/null +++ b/src/servers/udp/handlers/connect.rs @@ -0,0 +1,199 @@ +//! UDP tracker connect handler. +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response}; +use tracing::{instrument, Level}; + +use crate::packages::udp_tracker_core; +use crate::servers::udp::connection_cookie::make; +use crate::servers::udp::handlers::gen_remote_fingerprint; + +/// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) +/// request for more information. +/// +/// # Errors +/// +/// This function does not ever return an error. +#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] +pub async fn handle_connect( + remote_addr: SocketAddr, + request: &ConnectRequest, + opt_udp_stats_event_sender: &Arc>>, + cookie_issue_time: f64, +) -> Response { + tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); + + tracing::trace!("handle connect"); + + let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); + + let response = ConnectResponse { + transaction_id: request.transaction_id, + connection_id, + }; + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Connect) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Connect) + .await; + } + } + } + + Response::from(response) +} + +#[cfg(test)] +mod tests { + + mod connect_request { + + use std::future; + use std::sync::Arc; + + use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + use mockall::predicate::eq; + + use crate::packages::{self, udp_tracker_core}; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::handle_connect; + use crate::servers::udp::handlers::tests::{ + sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, + sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpStatsEventSender, + }; + + fn sample_connect_request() -> ConnectRequest { + ConnectRequest { + transaction_id: TransactionId(0i32.into()), + } + } + + #[tokio::test] + async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let request = ConnectRequest { + transaction_id: TransactionId(0i32.into()), + }; + + let response = handle_connect( + sample_ipv4_remote_addr(), + &request, + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id() { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let request = ConnectRequest { + transaction_id: TransactionId(0i32.into()), + }; + + let response = handle_connect( + sample_ipv4_remote_addr(), + &request, + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let request = ConnectRequest { + transaction_id: TransactionId(0i32.into()), + }; + + let response = handle_connect( + sample_ipv6_remote_addr(), + &request, + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: make(sample_ipv6_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let client_socket_address = sample_ipv4_socket_address(); + + handle_connect( + client_socket_address, + &sample_connect_request(), + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + } + + #[tokio::test] + async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + handle_connect( + sample_ipv6_remote_addr(), + &sample_connect_request(), + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + } + } +} diff --git a/src/servers/udp/handlers/error.rs b/src/servers/udp/handlers/error.rs new file mode 100644 index 000000000..36095eeed --- /dev/null +++ b/src/servers/udp/handlers/error.rs @@ -0,0 +1,80 @@ +//! UDP tracker error handling. +use std::net::SocketAddr; +use std::ops::Range; +use std::sync::Arc; + +use aquatic_udp_protocol::{ErrorResponse, RequestParseError, Response, TransactionId}; +use tracing::{instrument, Level}; +use uuid::Uuid; +use zerocopy::network_endian::I32; + +use crate::packages::udp_tracker_core; +use crate::servers::udp::connection_cookie::check; +use crate::servers::udp::error::Error; +use crate::servers::udp::handlers::gen_remote_fingerprint; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +#[allow(clippy::too_many_arguments)] +#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] +pub async fn handle_error( + remote_addr: SocketAddr, + local_addr: SocketAddr, + request_id: Uuid, + opt_udp_stats_event_sender: &Arc>>, + cookie_valid_range: Range, + e: &Error, + transaction_id: Option, +) -> Response { + tracing::trace!("handle error"); + + match transaction_id { + Some(transaction_id) => { + let transaction_id = transaction_id.0.to_string(); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, %transaction_id, "response error"); + } + None => { + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, "response error"); + } + } + + let e = if let Error::RequestParseError { request_parse_error } = e { + match request_parse_error { + RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } => { + if let Err(e) = check(connection_id, gen_remote_fingerprint(&remote_addr), cookie_valid_range) { + (e.to_string(), Some(*transaction_id)) + } else { + ((*err).to_string(), Some(*transaction_id)) + } + } + RequestParseError::Unsendable { err } => (err.to_string(), transaction_id), + } + } else { + (e.to_string(), transaction_id) + }; + + if e.1.is_some() { + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Error) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Error) + .await; + } + } + } + } + + Response::from(ErrorResponse { + transaction_id: e.1.unwrap_or(TransactionId(I32::new(0))), + message: e.0.into(), + }) +} diff --git a/src/servers/udp/handlers/mod.rs b/src/servers/udp/handlers/mod.rs new file mode 100644 index 000000000..252a5be02 --- /dev/null +++ b/src/servers/udp/handlers/mod.rs @@ -0,0 +1,366 @@ +//! Handlers for the UDP server. +pub mod announce; +pub mod connect; +pub mod error; +pub mod scrape; + +use std::hash::{DefaultHasher, Hash, Hasher as _}; +use std::net::SocketAddr; +use std::ops::Range; +use std::sync::Arc; +use std::time::Instant; + +use announce::handle_announce; +use aquatic_udp_protocol::{Request, Response, TransactionId}; +use connect::handle_connect; +use error::handle_error; +use scrape::handle_scrape; +use torrust_tracker_clock::clock::Time as _; +use tracing::{instrument, Level}; +use uuid::Uuid; + +use super::RawRequest; +use crate::container::UdpTrackerContainer; +use crate::servers::udp::error::Error; +use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::CurrentClock; + +#[derive(Debug, Clone, PartialEq)] +pub(super) struct CookieTimeValues { + pub(super) issue_time: f64, + pub(super) valid_range: Range, +} + +impl CookieTimeValues { + pub(super) fn new(cookie_lifetime: f64) -> Self { + let issue_time = CurrentClock::now().as_secs_f64(); + let expiry_time = issue_time - cookie_lifetime - 1.0; + let tolerance_max_time = issue_time + 1.0; + + Self { + issue_time, + valid_range: expiry_time..tolerance_max_time, + } + } +} + +/// It handles the incoming UDP packets. +/// +/// It's responsible for: +/// +/// - Parsing the incoming packet. +/// - Delegating the request to the correct handler depending on the request type. +/// +/// It will return an `Error` response if the request is invalid. +#[instrument(fields(request_id), skip(udp_request, udp_tracker_container, cookie_time_values), ret(level = Level::TRACE))] +pub(crate) async fn handle_packet( + udp_request: RawRequest, + udp_tracker_container: Arc, + local_addr: SocketAddr, + cookie_time_values: CookieTimeValues, +) -> Response { + let request_id = Uuid::new_v4(); + + tracing::Span::current().record("request_id", request_id.to_string()); + tracing::debug!("Handling Packets: {udp_request:?}"); + + let start_time = Instant::now(); + + let response = + match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(Error::from) { + Ok(request) => match handle_request( + request, + udp_request.from, + udp_tracker_container.clone(), + cookie_time_values.clone(), + ) + .await + { + Ok(response) => return response, + Err((e, transaction_id)) => { + match &e { + Error::CookieValueNotNormal { .. } + | Error::CookieValueExpired { .. } + | Error::CookieValueFromFuture { .. } => { + // code-review: should we include `RequestParseError` and `BadRequest`? + let mut ban_service = udp_tracker_container.ban_service.write().await; + ban_service.increase_counter(&udp_request.from.ip()); + } + _ => {} + } + + handle_error( + udp_request.from, + local_addr, + request_id, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.valid_range.clone(), + &e, + Some(transaction_id), + ) + .await + } + }, + Err(e) => { + handle_error( + udp_request.from, + local_addr, + request_id, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.valid_range.clone(), + &e, + None, + ) + .await + } + }; + + let latency = start_time.elapsed(); + tracing::trace!(?latency, "responded"); + + response +} + +/// It dispatches the request to the correct handler. +/// +/// # Errors +/// +/// If a error happens in the `handle_request` function, it will just return the `ServerError`. +#[instrument(skip(request, remote_addr, udp_tracker_container, cookie_time_values))] +pub async fn handle_request( + request: Request, + remote_addr: SocketAddr, + udp_tracker_container: Arc, + cookie_time_values: CookieTimeValues, +) -> Result { + tracing::trace!("handle request"); + + match request { + Request::Connect(connect_request) => Ok(handle_connect( + remote_addr, + &connect_request, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.issue_time, + ) + .await), + Request::Announce(announce_request) => { + handle_announce( + remote_addr, + &announce_request, + &udp_tracker_container.core_config, + &udp_tracker_container.announce_handler, + &udp_tracker_container.whitelist_authorization, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.valid_range, + ) + .await + } + Request::Scrape(scrape_request) => { + handle_scrape( + remote_addr, + &scrape_request, + &udp_tracker_container.scrape_handler, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.valid_range, + ) + .await + } + } +} + +#[must_use] +pub(crate) fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { + let mut state = DefaultHasher::new(); + remote_addr.hash(&mut state); + state.finish() +} + +#[cfg(test)] +pub(crate) mod tests { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::ops::Range; + use std::sync::Arc; + + use aquatic_udp_protocol::{NumberOfBytes, PeerId}; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use futures::future::BoxFuture; + use mockall::mock; + use tokio::sync::mpsc::error::SendError; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::{Configuration, Core}; + use torrust_tracker_primitives::peer; + use torrust_tracker_test_helpers::configuration; + + use super::gen_remote_fingerprint; + use crate::packages::udp_tracker_core; + use crate::{packages, CurrentClock}; + + pub(crate) struct CoreTrackerServices { + pub core_config: Arc, + pub announce_handler: Arc, + pub scrape_handler: Arc, + pub in_memory_torrent_repository: Arc, + pub in_memory_whitelist: Arc, + pub whitelist_authorization: Arc, + } + + pub(crate) struct CoreUdpTrackerServices { + pub udp_stats_event_sender: Arc>>, + } + + fn default_testing_tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + pub(crate) fn initialize_core_tracker_services_for_default_tracker_configuration( + ) -> (CoreTrackerServices, CoreUdpTrackerServices) { + initialize_core_tracker_services(&default_testing_tracker_configuration()) + } + + pub(crate) fn initialize_core_tracker_services_for_public_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { + initialize_core_tracker_services(&configuration::ephemeral_public()) + } + + pub(crate) fn initialize_core_tracker_services_for_listed_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { + initialize_core_tracker_services(&configuration::ephemeral_listed()) + } + + fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreUdpTrackerServices) { + let core_config = Arc::new(config.core.clone()); + let database = initialize_database(&config.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + ( + CoreTrackerServices { + core_config, + announce_handler, + scrape_handler, + in_memory_torrent_repository, + in_memory_whitelist, + whitelist_authorization, + }, + CoreUdpTrackerServices { udp_stats_event_sender }, + ) + } + + pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() + } + + pub(crate) fn sample_ipv4_remote_addr_fingerprint() -> u64 { + gen_remote_fingerprint(&sample_ipv4_socket_address()) + } + + pub(crate) fn sample_ipv6_remote_addr() -> SocketAddr { + sample_ipv6_socket_address() + } + + pub(crate) fn sample_ipv6_remote_addr_fingerprint() -> u64 { + gen_remote_fingerprint(&sample_ipv6_socket_address()) + } + + pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + fn sample_ipv6_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + } + + pub(crate) fn sample_issue_time() -> f64 { + 1_000_000_000_f64 + } + + pub(crate) fn sample_cookie_valid_range() -> Range { + sample_issue_time() - 10.0..sample_issue_time() + 10.0 + } + + #[derive(Debug, Default)] + pub(crate) struct TorrentPeerBuilder { + peer: peer::Peer, + } + + impl TorrentPeerBuilder { + #[must_use] + pub fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } + } + + #[must_use] + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + self.peer.peer_id = peer_id; + self + } + + #[must_use] + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes::new(left); + self + } + + #[must_use] + pub fn into(self) -> peer::Peer { + self.peer + } + } + + pub(crate) struct TrackerConfigurationBuilder { + configuration: Configuration, + } + + impl TrackerConfigurationBuilder { + pub fn default() -> TrackerConfigurationBuilder { + let default_configuration = default_testing_tracker_configuration(); + TrackerConfigurationBuilder { + configuration: default_configuration, + } + } + + pub fn with_external_ip(mut self, external_ip: &str) -> Self { + self.configuration.core.net.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); + self + } + + pub fn into(self) -> Configuration { + self.configuration + } + } + + mock! { + pub(crate) UdpStatsEventSender {} + impl udp_tracker_core::statistics::event::sender::Sender for UdpStatsEventSender { + fn send_event(&self, event: udp_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + } + } +} diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs new file mode 100644 index 000000000..2c8ca335a --- /dev/null +++ b/src/servers/udp/handlers/scrape.rs @@ -0,0 +1,429 @@ +//! UDP tracker scrape handler. +use std::net::SocketAddr; +use std::ops::Range; +use std::sync::Arc; + +use aquatic_udp_protocol::{ + NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, +}; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use tracing::{instrument, Level}; +use zerocopy::network_endian::I32; + +use crate::packages::udp_tracker_core; +use crate::servers::udp::connection_cookie::check; +use crate::servers::udp::error::Error; +use crate::servers::udp::handlers::gen_remote_fingerprint; + +/// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) +/// request for more information. +/// +/// # Errors +/// +/// This function does not ever return an error. +#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +pub async fn handle_scrape( + remote_addr: SocketAddr, + request: &ScrapeRequest, + scrape_handler: &Arc, + opt_udp_stats_event_sender: &Arc>>, + cookie_valid_range: Range, +) -> Result { + tracing::Span::current() + .record("transaction_id", request.transaction_id.0.to_string()) + .record("connection_id", request.connection_id.0.to_string()); + + tracing::trace!("handle scrape"); + + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + ) + .map_err(|e| (e, request.transaction_id))?; + + // Convert from aquatic infohashes + let mut info_hashes: Vec = vec![]; + for info_hash in &request.info_hashes { + info_hashes.push((*info_hash).into()); + } + + let scrape_data = scrape_handler.scrape(&info_hashes).await; + + let mut torrent_stats: Vec = Vec::new(); + + for file in &scrape_data.files { + let swarm_metadata = file.1; + + #[allow(clippy::cast_possible_truncation)] + let scrape_entry = { + TorrentScrapeStatistics { + seeders: NumberOfPeers(I32::new(i64::from(swarm_metadata.complete) as i32)), + completed: NumberOfDownloads(I32::new(i64::from(swarm_metadata.downloaded) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(swarm_metadata.incomplete) as i32)), + } + }; + + torrent_stats.push(scrape_entry); + } + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) + .await; + } + } + } + + let response = ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats, + }; + + Ok(Response::from(response)) +} + +#[cfg(test)] +mod tests { + + mod scrape_request { + use std::net::SocketAddr; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + InfoHash, NumberOfDownloads, NumberOfPeers, PeerId, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, + TransactionId, + }; + use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + + use crate::packages; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, + sample_issue_time, TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_scrape}; + + fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), + } + } + + #[tokio::test] + async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + let request = ScrapeRequest { + connection_id: make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap(), + transaction_id: TransactionId(0i32.into()), + info_hashes, + }; + + let response = handle_scrape( + remote_addr, + &request, + &core_tracker_services.scrape_handler, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!( + response, + Response::from(ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats: expected_torrent_stats + }) + ); + } + + async fn add_a_seeder( + in_memory_torrent_repository: Arc, + remote_addr: &SocketAddr, + info_hash: &InfoHash, + ) { + let peer_id = PeerId([255u8; 20]); + + let peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(*remote_addr) + .with_number_of_bytes_left(0) + .into(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); + } + + fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { + let info_hashes = vec![*info_hash]; + + ScrapeRequest { + connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), + transaction_id: TransactionId::new(0i32), + info_hashes, + } + } + + async fn add_a_sample_seeder_and_scrape( + in_memory_torrent_repository: Arc, + scrape_handler: Arc, + ) -> Response { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + handle_scrape( + remote_addr, + &request, + &scrape_handler, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap() + } + + fn match_scrape_response(response: Response) -> Option { + match response { + Response::Scrape(scrape_response) => Some(scrape_response), + _ => None, + } + } + + mod with_a_public_tracker { + use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::servers::udp::handlers::scrape::tests::scrape_request::{ + add_a_sample_seeder_and_scrape, match_scrape_response, + }; + use crate::servers::udp::handlers::tests::initialize_core_tracker_services_for_public_tracker; + + #[tokio::test] + async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { + let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let torrent_stats = match_scrape_response( + add_a_sample_seeder_and_scrape( + core_tracker_services.in_memory_torrent_repository.clone(), + core_tracker_services.scrape_handler.clone(), + ) + .await, + ); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), + }]; + + assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); + } + } + + mod with_a_whitelisted_tracker { + use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::scrape::tests::scrape_request::{ + add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, + }; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_listed_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, + }; + + #[tokio::test] + async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder( + core_tracker_services.in_memory_torrent_repository.clone(), + &remote_addr, + &info_hash, + ) + .await; + + core_tracker_services.in_memory_whitelist.add(&info_hash.0.into()).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = match_scrape_response( + handle_scrape( + remote_addr, + &request, + &core_tracker_services.scrape_handler, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(), + ) + .unwrap(); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), + }]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder( + core_tracker_services.in_memory_torrent_repository.clone(), + &remote_addr, + &info_hash, + ) + .await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = match_scrape_response( + handle_scrape( + remote_addr, + &request, + &core_tracker_services.scrape_handler, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(), + ) + .unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + fn sample_scrape_request(remote_addr: &SocketAddr) -> ScrapeRequest { + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + ScrapeRequest { + connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), + transaction_id: TransactionId(0i32.into()), + info_hashes, + } + } + + mod using_ipv4 { + use std::future; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::sample_scrape_request; + use crate::packages::udp_tracker_core; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, + sample_ipv4_remote_addr, MockUdpStatsEventSender, + }; + + #[tokio::test] + async fn should_send_the_upd4_scrape_event() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let remote_addr = sample_ipv4_remote_addr(); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); + + handle_scrape( + remote_addr, + &sample_scrape_request(&remote_addr), + &core_tracker_services.scrape_handler, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + } + } + + mod using_ipv6 { + use std::future; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::sample_scrape_request; + use crate::packages::udp_tracker_core; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, + sample_ipv6_remote_addr, MockUdpStatsEventSender, + }; + + #[tokio::test] + async fn should_send_the_upd6_scrape_event() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let remote_addr = sample_ipv6_remote_addr(); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); + + handle_scrape( + remote_addr, + &sample_scrape_request(&remote_addr), + &core_tracker_services.scrape_handler, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + } + } + } +} From 3c07b260313bb088682e8378e7f4393340c85fc5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 13:44:19 +0000 Subject: [PATCH 06/14] refactor: [#1268] extract servers::udp::services::announce service --- src/servers/udp/handlers/announce.rs | 26 ++++++--------- src/servers/udp/mod.rs | 1 + src/servers/udp/services/announce.rs | 48 ++++++++++++++++++++++++++++ src/servers/udp/services/mod.rs | 2 ++ src/servers/udp/services/scrape.rs | 0 5 files changed, 60 insertions(+), 17 deletions(-) create mode 100644 src/servers/udp/services/announce.rs create mode 100644 src/servers/udp/services/mod.rs create mode 100644 src/servers/udp/services/scrape.rs diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index 79fb91f49..ecc4ba88f 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -18,7 +18,7 @@ use crate::packages::udp_tracker_core; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; -use crate::servers::udp::peer_builder; +use crate::servers::udp::{peer_builder, services}; /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) /// request for more information. @@ -66,22 +66,14 @@ pub async fn handle_announce( let mut peer = peer_builder::from_request(request, &remote_client_ip); let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - let response = announce_handler.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_client_ip { - IpAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) - .await; - } - IpAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) - .await; - } - } - } + let response = services::announce::invoke( + announce_handler.clone(), + opt_udp_stats_event_sender.clone(), + info_hash, + &mut peer, + &peers_wanted, + ) + .await; #[allow(clippy::cast_possible_truncation)] if remote_addr.is_ipv4() { diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index b141cc322..604fee8fe 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -642,6 +642,7 @@ pub mod error; pub mod handlers; pub mod peer_builder; pub mod server; +pub mod services; pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; diff --git a/src/servers/udp/services/announce.rs b/src/servers/udp/services/announce.rs new file mode 100644 index 000000000..317b1afef --- /dev/null +++ b/src/servers/udp/services/announce.rs @@ -0,0 +1,48 @@ +//! The `announce` service. +//! +//! The service is responsible for handling the `announce` requests. +//! +//! It delegates the `announce` logic to the [`AnnounceHandler`] and it returns +//! the [`AnnounceData`]. +//! +//! It also sends an [`http_tracker_core::statistics::event::Event`] +//! because events are specific for the HTTP tracker. +use std::net::IpAddr; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::peer; + +use crate::packages::udp_tracker_core; + +pub async fn invoke( + announce_handler: Arc, + opt_udp_stats_event_sender: Arc>>, + info_hash: InfoHash, + peer: &mut peer::Peer, + peers_wanted: &PeersWanted, +) -> AnnounceData { + let original_peer_ip = peer.peer_addr.ip(); + + // The tracker could change the original peer ip + let announce_data = announce_handler.announce(&info_hash, peer, &original_peer_ip, peers_wanted); + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match original_peer_ip { + IpAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) + .await; + } + IpAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) + .await; + } + } + } + + announce_data +} diff --git a/src/servers/udp/services/mod.rs b/src/servers/udp/services/mod.rs new file mode 100644 index 000000000..776d2dfbf --- /dev/null +++ b/src/servers/udp/services/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod scrape; diff --git a/src/servers/udp/services/scrape.rs b/src/servers/udp/services/scrape.rs new file mode 100644 index 000000000..e69de29bb From dec742e2326a9e9861a1b26907c1cbfe0c127838 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 13:52:46 +0000 Subject: [PATCH 07/14] refactor: [#1268] extract servers::udp::services::scrape service --- src/servers/udp/handlers/scrape.rs | 18 ++---------- src/servers/udp/services/announce.rs | 2 +- src/servers/udp/services/scrape.rs | 43 ++++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 17 deletions(-) diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index 2c8ca335a..d68ca07dd 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -15,6 +15,7 @@ use crate::packages::udp_tracker_core; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; +use crate::servers::udp::services; /// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) /// request for more information. @@ -49,7 +50,7 @@ pub async fn handle_scrape( info_hashes.push((*info_hash).into()); } - let scrape_data = scrape_handler.scrape(&info_hashes).await; + let scrape_data = services::scrape::invoke(scrape_handler, opt_udp_stats_event_sender, &info_hashes, remote_addr).await; let mut torrent_stats: Vec = Vec::new(); @@ -68,21 +69,6 @@ pub async fn handle_scrape( torrent_stats.push(scrape_entry); } - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) - .await; - } - } - } - let response = ScrapeResponse { transaction_id: request.transaction_id, torrent_stats, diff --git a/src/servers/udp/services/announce.rs b/src/servers/udp/services/announce.rs index 317b1afef..8a046a625 100644 --- a/src/servers/udp/services/announce.rs +++ b/src/servers/udp/services/announce.rs @@ -5,7 +5,7 @@ //! It delegates the `announce` logic to the [`AnnounceHandler`] and it returns //! the [`AnnounceData`]. //! -//! It also sends an [`http_tracker_core::statistics::event::Event`] +//! It also sends an [`udp_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; diff --git a/src/servers/udp/services/scrape.rs b/src/servers/udp/services/scrape.rs index e69de29bb..7d4897564 100644 --- a/src/servers/udp/services/scrape.rs +++ b/src/servers/udp/services/scrape.rs @@ -0,0 +1,43 @@ +//! The `scrape` service. +//! +//! The service is responsible for handling the `scrape` requests. +//! +//! It delegates the `scrape` logic to the [`ScrapeHandler`] and it returns the +//! [`ScrapeData`]. +//! +//! It also sends an [`udp_tracker_core::statistics::event::Event`] +//! because events are specific for the UDP tracker. +use std::net::SocketAddr; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use torrust_tracker_primitives::core::ScrapeData; + +use crate::packages::udp_tracker_core; + +pub async fn invoke( + scrape_handler: &Arc, + opt_udp_stats_event_sender: &Arc>>, + info_hashes: &Vec, + remote_addr: SocketAddr, +) -> ScrapeData { + let scrape_data = scrape_handler.scrape(info_hashes).await; + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) + .await; + } + } + } + + scrape_data +} From 73753e31f2626ff694bb0ea8994cce2877e2a637 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 15:41:25 +0000 Subject: [PATCH 08/14] [#1268] move http services to http_tracker_core package --- src/packages/http_tracker_core/mod.rs | 1 + .../http_tracker_core}/services/announce.rs | 6 +++--- .../http_tracker_core}/services/mod.rs | 0 .../http_tracker_core}/services/scrape.rs | 16 ++++++++-------- src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/mod.rs | 1 - 7 files changed, 14 insertions(+), 14 deletions(-) rename src/{servers/http/v1 => packages/http_tracker_core}/services/announce.rs (98%) rename src/{servers/http/v1 => packages/http_tracker_core}/services/mod.rs (100%) rename src/{servers/http/v1 => packages/http_tracker_core}/services/scrape.rs (97%) diff --git a/src/packages/http_tracker_core/mod.rs b/src/packages/http_tracker_core/mod.rs index 3449ec7b4..4f3e54857 100644 --- a/src/packages/http_tracker_core/mod.rs +++ b/src/packages/http_tracker_core/mod.rs @@ -1 +1,2 @@ +pub mod services; pub mod statistics; diff --git a/src/servers/http/v1/services/announce.rs b/src/packages/http_tracker_core/services/announce.rs similarity index 98% rename from src/servers/http/v1/services/announce.rs rename to src/packages/http_tracker_core/services/announce.rs index e321ad01f..67b5997b3 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/packages/http_tracker_core/services/announce.rs @@ -164,11 +164,11 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::packages::http_tracker_core; - use crate::servers::http::test_helpers::tests::sample_info_hash; - use crate::servers::http::v1::services::announce::invoke; - use crate::servers::http::v1::services::announce::tests::{ + use crate::packages::http_tracker_core::services::announce::invoke; + use crate::packages::http_tracker_core::services::announce::tests::{ initialize_core_tracker_services, sample_peer, MockHttpStatsEventSender, }; + use crate::servers::http::test_helpers::tests::sample_info_hash; fn initialize_announce_handler() -> Arc { let config = configuration::ephemeral(); diff --git a/src/servers/http/v1/services/mod.rs b/src/packages/http_tracker_core/services/mod.rs similarity index 100% rename from src/servers/http/v1/services/mod.rs rename to src/packages/http_tracker_core/services/mod.rs diff --git a/src/servers/http/v1/services/scrape.rs b/src/packages/http_tracker_core/services/scrape.rs similarity index 97% rename from src/servers/http/v1/services/scrape.rs rename to src/packages/http_tracker_core/services/scrape.rs index e2eb4f87c..8ce83212e 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/packages/http_tracker_core/services/scrape.rs @@ -161,13 +161,13 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::packages::{self, http_tracker_core}; - use crate::servers::http::test_helpers::tests::sample_info_hash; - use crate::servers::http::v1::services::scrape::invoke; - use crate::servers::http::v1::services::scrape::tests::{ + use crate::packages::http_tracker_core::services::scrape::invoke; + use crate::packages::http_tracker_core::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; + use crate::packages::{self, http_tracker_core}; + use crate::servers::http::test_helpers::tests::sample_info_hash; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { @@ -247,12 +247,12 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; - use crate::packages::{self, http_tracker_core}; - use crate::servers::http::test_helpers::tests::sample_info_hash; - use crate::servers::http::v1::services::scrape::fake; - use crate::servers::http::v1::services::scrape::tests::{ + use crate::packages::http_tracker_core::services::scrape::fake; + use crate::packages::http_tracker_core::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; + use crate::packages::{self, http_tracker_core}; + use crate::servers::http::test_helpers::tests::sample_info_hash; #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 64939ff48..ffc6a7b0a 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -28,11 +28,11 @@ use torrust_tracker_primitives::peer; use super::common::auth::map_auth_error_to_error_response; use crate::packages::http_tracker_core; +use crate::packages::http_tracker_core::services::{self}; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::handlers::common::auth; -use crate::servers::http::v1::services::{self}; use crate::CurrentClock; /// It handles the `announce` request when the HTTP tracker does not require diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 09af385fb..d2f4f9e0f 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -20,10 +20,10 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::http_tracker_core; +use crate::packages::http_tracker_core::services; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; -use crate::servers::http::v1::services; /// It handles the `scrape` request when the HTTP tracker is configured /// to run in `public` mode. diff --git a/src/servers/http/v1/mod.rs b/src/servers/http/v1/mod.rs index 48dac5663..6e9530cb0 100644 --- a/src/servers/http/v1/mod.rs +++ b/src/servers/http/v1/mod.rs @@ -5,4 +5,3 @@ pub mod extractors; pub mod handlers; pub mod routes; -pub mod services; From e48aaf51db7d7b84899c66149ecde2c6facb0615 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 15:47:28 +0000 Subject: [PATCH 09/14] [#1268] move udp services to udp_tracker_core package --- src/packages/udp_tracker_core/mod.rs | 1 + .../udp => packages/udp_tracker_core}/services/announce.rs | 0 .../udp => packages/udp_tracker_core}/services/mod.rs | 0 .../udp => packages/udp_tracker_core}/services/scrape.rs | 0 src/servers/udp/handlers/announce.rs | 4 ++-- src/servers/udp/handlers/scrape.rs | 2 +- src/servers/udp/mod.rs | 1 - 7 files changed, 4 insertions(+), 4 deletions(-) rename src/{servers/udp => packages/udp_tracker_core}/services/announce.rs (100%) rename src/{servers/udp => packages/udp_tracker_core}/services/mod.rs (100%) rename src/{servers/udp => packages/udp_tracker_core}/services/scrape.rs (100%) diff --git a/src/packages/udp_tracker_core/mod.rs b/src/packages/udp_tracker_core/mod.rs index 3449ec7b4..4f3e54857 100644 --- a/src/packages/udp_tracker_core/mod.rs +++ b/src/packages/udp_tracker_core/mod.rs @@ -1 +1,2 @@ +pub mod services; pub mod statistics; diff --git a/src/servers/udp/services/announce.rs b/src/packages/udp_tracker_core/services/announce.rs similarity index 100% rename from src/servers/udp/services/announce.rs rename to src/packages/udp_tracker_core/services/announce.rs diff --git a/src/servers/udp/services/mod.rs b/src/packages/udp_tracker_core/services/mod.rs similarity index 100% rename from src/servers/udp/services/mod.rs rename to src/packages/udp_tracker_core/services/mod.rs diff --git a/src/servers/udp/services/scrape.rs b/src/packages/udp_tracker_core/services/scrape.rs similarity index 100% rename from src/servers/udp/services/scrape.rs rename to src/packages/udp_tracker_core/services/scrape.rs diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index ecc4ba88f..26a1a2116 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -14,11 +14,11 @@ use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; -use crate::packages::udp_tracker_core; +use crate::packages::udp_tracker_core::{self, services}; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; -use crate::servers::udp::{peer_builder, services}; +use crate::servers::udp::peer_builder; /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) /// request for more information. diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index d68ca07dd..3b5ccf50d 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -12,10 +12,10 @@ use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::packages::udp_tracker_core; +use crate::packages::udp_tracker_core::services; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; -use crate::servers::udp::services; /// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) /// request for more information. diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 604fee8fe..b141cc322 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -642,7 +642,6 @@ pub mod error; pub mod handlers; pub mod peer_builder; pub mod server; -pub mod services; pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; From 74815abeb78198e0cc234e47a4af0633247232cf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 16:31:15 +0000 Subject: [PATCH 10/14] refactor: [#1268] move announce logic from axum to http_tracker_core package --- Cargo.lock | 1 + packages/http-protocol/Cargo.toml | 1 + packages/http-protocol/src/lib.rs | 13 ++++ .../http-protocol/src/v1/requests/announce.rs | 33 ++++++++- .../http_tracker_core/services/announce.rs | 53 ++++++++++++++ src/servers/http/v1/handlers/announce.rs | 69 ++++--------------- 6 files changed, 113 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f99db113..408471efc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -560,6 +560,7 @@ dependencies = [ "serde", "serde_bencode", "thiserror 2.0.11", + "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", "torrust-tracker-located-error", diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml index 2d0cabf51..e76094c1a 100644 --- a/packages/http-protocol/Cargo.toml +++ b/packages/http-protocol/Cargo.toml @@ -24,6 +24,7 @@ percent-encoding = "2" serde = { version = "1", features = ["derive"] } serde_bencode = "0" thiserror = "2" +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-contrib-bencode = { version = "3.0.0-develop", path = "../../contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } diff --git a/packages/http-protocol/src/lib.rs b/packages/http-protocol/src/lib.rs index 6525a6dca..326a5b182 100644 --- a/packages/http-protocol/src/lib.rs +++ b/packages/http-protocol/src/lib.rs @@ -1,3 +1,16 @@ //! Primitive types and function for `BitTorrent` HTTP trackers. pub mod percent_encoding; pub mod v1; + +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/http-protocol/src/v1/requests/announce.rs b/packages/http-protocol/src/v1/requests/announce.rs index 9bde7ec13..f293b9cf5 100644 --- a/packages/http-protocol/src/v1/requests/announce.rs +++ b/packages/http-protocol/src/v1/requests/announce.rs @@ -2,18 +2,21 @@ //! //! Data structures and logic for parsing the `announce` request. use std::fmt; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::str::FromStr; -use aquatic_udp_protocol::{NumberOfBytes, PeerId}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::{self, InfoHash}; use thiserror::Error; +use torrust_tracker_clock::clock::Time; use torrust_tracker_located_error::{Located, LocatedError}; use torrust_tracker_primitives::peer; use crate::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::v1::query::{ParseQueryError, Query}; use crate::v1::responses; +use crate::CurrentClock; // Query param names const INFO_HASH: &str = "info_hash"; @@ -373,6 +376,34 @@ fn extract_numwant(query: &Query) -> Result, ParseAnnounceQueryError } } +/// It builds a `Peer` from the announce request. +/// +/// It ignores the peer address in the announce request params. +#[must_use] +pub fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { + peer_id: announce_request.peer_id, + peer_addr: SocketAddr::new(*peer_ip, announce_request.port), + updated: CurrentClock::now(), + uploaded: announce_request.uploaded.unwrap_or(NumberOfBytes::new(0)), + downloaded: announce_request.downloaded.unwrap_or(NumberOfBytes::new(0)), + left: announce_request.left.unwrap_or(NumberOfBytes::new(0)), + event: map_to_torrust_event(&announce_request.event), + } +} + +#[must_use] +pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => AnnounceEvent::Started, + Event::Stopped => AnnounceEvent::Stopped, + Event::Completed => AnnounceEvent::Completed, + }, + None => AnnounceEvent::None, + } +} + #[cfg(test)] mod tests { diff --git a/src/packages/http_tracker_core/services/announce.rs b/src/packages/http_tracker_core/services/announce.rs index 67b5997b3..049d0d228 100644 --- a/src/packages/http_tracker_core/services/announce.rs +++ b/src/packages/http_tracker_core/services/announce.rs @@ -10,8 +10,14 @@ use std::net::IpAddr; use std::sync::Arc; +use bittorrent_http_protocol::v1::requests::announce::{peer_from_request, Announce}; +use bittorrent_http_protocol::v1::responses; +use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::whitelist; +use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; @@ -27,6 +33,53 @@ use crate::packages::http_tracker_core; /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `announce` request. +/// +/// # Errors +/// +/// This function will return an error if: +/// +/// - The tracker is running in `listed` mode and the torrent is not whitelisted. +/// - There is an error when resolving the client IP address. +#[allow(clippy::too_many_arguments)] +pub async fn handle_announce( + core_config: &Arc, + announce_handler: &Arc, + _authentication_service: &Arc, + whitelist_authorization: &Arc, + opt_http_stats_event_sender: &Arc>>, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, +) -> Result { + // Authorization + match whitelist_authorization.authorize(&announce_request.info_hash).await { + Ok(()) => (), + Err(error) => return Err(responses::error::Error::from(error)), + } + + let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(responses::error::Error::from(error)), + }; + + let mut peer = peer_from_request(announce_request, &peer_ip); + + let peers_wanted = match announce_request.numwant { + Some(numwant) => PeersWanted::only(numwant), + None => PeersWanted::AsManyAsPossible, + }; + + let announce_data = invoke( + announce_handler.clone(), + opt_http_stats_event_sender.clone(), + announce_request.info_hash, + &mut peer, + &peers_wanted, + ) + .await; + + Ok(announce_data) +} + pub async fn invoke( announce_handler: Arc, opt_http_stats_event_sender: Arc>>, diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index ffc6a7b0a..977e7dd6a 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -5,35 +5,29 @@ //! //! The handlers perform the authentication and authorization of the request, //! and resolve the client IP address. -use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use aquatic_udp_protocol::AnnounceEvent; use axum::extract::State; use axum::response::{IntoResponse, Response}; use bittorrent_http_protocol::v1::requests::announce::{Announce, Compact, Event}; use bittorrent_http_protocol::v1::responses::{self}; -use bittorrent_http_protocol::v1::services::peer_ip_resolver; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; -use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; use bittorrent_tracker_core::whitelist; use hyper::StatusCode; -use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer; use super::common::auth::map_auth_error_to_error_response; use crate::packages::http_tracker_core; -use crate::packages::http_tracker_core::services::{self}; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::handlers::common::auth; -use crate::CurrentClock; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). @@ -129,12 +123,6 @@ async fn handle( build_response(announce_request, announce_data) } -/* code-review: authentication, authorization and peer IP resolution could be moved - from the handler (Axum) layer into the app layer `services::announce::invoke`. - That would make the handler even simpler and the code more reusable and decoupled from Axum. - See https://github.com/torrust/torrust-tracker/discussions/240. -*/ - #[allow(clippy::too_many_arguments)] async fn handle_announce( core_config: &Arc, @@ -146,6 +134,8 @@ async fn handle_announce( client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { + // todo: move authentication inside `http_tracker_core::services::announce::handle_announce` + // Authentication if core_config.private { match maybe_key { @@ -161,33 +151,16 @@ async fn handle_announce( } } - // Authorization - match whitelist_authorization.authorize(&announce_request.info_hash).await { - Ok(()) => (), - Err(error) => return Err(responses::error::Error::from(error)), - } - - let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => peer_ip, - Err(error) => return Err(responses::error::Error::from(error)), - }; - - let mut peer = peer_from_request(announce_request, &peer_ip); - let peers_wanted = match announce_request.numwant { - Some(numwant) => PeersWanted::only(numwant), - None => PeersWanted::AsManyAsPossible, - }; - - let announce_data = services::announce::invoke( - announce_handler.clone(), - opt_http_stats_event_sender.clone(), - announce_request.info_hash, - &mut peer, - &peers_wanted, + http_tracker_core::services::announce::handle_announce( + &core_config.clone(), + &announce_handler.clone(), + &authentication_service.clone(), + &whitelist_authorization.clone(), + &opt_http_stats_event_sender.clone(), + announce_request, + client_ip_sources, ) - .await; - - Ok(announce_data) + .await } fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { @@ -202,22 +175,6 @@ fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> R } } -/// It builds a `Peer` from the announce request. -/// -/// It ignores the peer address in the announce request params. -#[must_use] -fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Peer { - peer::Peer { - peer_id: announce_request.peer_id, - peer_addr: SocketAddr::new(*peer_ip, announce_request.port), - updated: CurrentClock::now(), - uploaded: announce_request.uploaded.unwrap_or(NumberOfBytes::new(0)), - downloaded: announce_request.downloaded.unwrap_or(NumberOfBytes::new(0)), - left: announce_request.left.unwrap_or(NumberOfBytes::new(0)), - event: map_to_torrust_event(&announce_request.event), - } -} - #[must_use] pub fn map_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::AnnounceEvent { match event { From 37a142efcea0d1c85a7a16ec67d0847414855171 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 16:45:35 +0000 Subject: [PATCH 11/14] refactor: [#1268] move scrape logic from axum to http_tracker_core package --- .../http_tracker_core/services/scrape.rs | 42 +++++++++++++++++++ src/servers/http/v1/handlers/scrape.rs | 40 ++++++------------ 2 files changed, 55 insertions(+), 27 deletions(-) diff --git a/src/packages/http_tracker_core/services/scrape.rs b/src/packages/http_tracker_core/services/scrape.rs index 8ce83212e..62f5fdf62 100644 --- a/src/packages/http_tracker_core/services/scrape.rs +++ b/src/packages/http_tracker_core/services/scrape.rs @@ -10,8 +10,13 @@ use std::net::IpAddr; use std::sync::Arc; +use bittorrent_http_protocol::v1::requests::scrape::Scrape; +use bittorrent_http_protocol::v1::responses; +use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::http_tracker_core; @@ -26,6 +31,43 @@ use crate::packages::http_tracker_core; /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `scrape` request. +/// +/// # Errors +/// +/// This function will return an error if: +/// +/// - There is an error when resolving the client IP address. +#[allow(clippy::too_many_arguments)] +pub async fn handle_scrape( + core_config: &Arc, + scrape_handler: &Arc, + _authentication_service: &Arc, + opt_http_stats_event_sender: &Arc>>, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + return_real_scrape_data: bool, +) -> Result { + // Authorization for scrape requests is handled at the `http_tracker_core` + // level for each torrent. + + let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(responses::error::Error::from(error)), + }; + + if return_real_scrape_data { + Ok(invoke( + scrape_handler, + opt_http_stats_event_sender, + &scrape_request.info_hashes, + &peer_ip, + ) + .await) + } else { + Ok(http_tracker_core::services::scrape::fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) + } +} + pub async fn invoke( scrape_handler: &Arc, opt_http_stats_event_sender: &Arc>>, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index d2f4f9e0f..39bebe18e 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -11,7 +11,7 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use bittorrent_http_protocol::v1::requests::scrape::Scrape; use bittorrent_http_protocol::v1::responses; -use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; @@ -20,7 +20,6 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::http_tracker_core; -use crate::packages::http_tracker_core::services; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; @@ -111,12 +110,6 @@ async fn handle( build_response(scrape_data) } -/* code-review: authentication, authorization and peer IP resolution could be moved - from the handler (Axum) layer into the app layer `services::announce::invoke`. - That would make the handler even simpler and the code more reusable and decoupled from Axum. - See https://github.com/torrust/torrust-tracker/discussions/240. -*/ - #[allow(clippy::too_many_arguments)] async fn handle_scrape( core_config: &Arc, @@ -127,6 +120,8 @@ async fn handle_scrape( client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { + // todo: move authentication inside `http_tracker_core::services::scrape::handle_scrape` + // Authentication let return_real_scrape_data = if core_config.private { match maybe_key { @@ -140,25 +135,16 @@ async fn handle_scrape( true }; - // Authorization for scrape requests is handled at the `Tracker` level - // for each torrent. - - let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => peer_ip, - Err(error) => return Err(responses::error::Error::from(error)), - }; - - if return_real_scrape_data { - Ok(services::scrape::invoke( - scrape_handler, - opt_http_stats_event_sender, - &scrape_request.info_hashes, - &peer_ip, - ) - .await) - } else { - Ok(services::scrape::fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) - } + http_tracker_core::services::scrape::handle_scrape( + core_config, + scrape_handler, + authentication_service, + opt_http_stats_event_sender, + scrape_request, + client_ip_sources, + return_real_scrape_data, + ) + .await } fn build_response(scrape_data: ScrapeData) -> Response { From c0fc390409949ac11bde35095b58f81266d66e85 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 17:20:19 +0000 Subject: [PATCH 12/14] refactor: [#1268] move announce logic from udp server to udp_tracker_core package --- src/packages/udp_tracker_core/mod.rs | 1 + .../udp_tracker_core}/peer_builder.rs | 0 .../udp_tracker_core/services/announce.rs | 44 ++++++++++++++++++- src/servers/udp/handlers/announce.rs | 42 +++++++----------- src/servers/udp/mod.rs | 1 - 5 files changed, 60 insertions(+), 28 deletions(-) rename src/{servers/udp => packages/udp_tracker_core}/peer_builder.rs (100%) diff --git a/src/packages/udp_tracker_core/mod.rs b/src/packages/udp_tracker_core/mod.rs index 4f3e54857..3ab1d83dd 100644 --- a/src/packages/udp_tracker_core/mod.rs +++ b/src/packages/udp_tracker_core/mod.rs @@ -1,2 +1,3 @@ +pub mod peer_builder; pub mod services; pub mod statistics; diff --git a/src/servers/udp/peer_builder.rs b/src/packages/udp_tracker_core/peer_builder.rs similarity index 100% rename from src/servers/udp/peer_builder.rs rename to src/packages/udp_tracker_core/peer_builder.rs diff --git a/src/packages/udp_tracker_core/services/announce.rs b/src/packages/udp_tracker_core/services/announce.rs index 8a046a625..dec506aec 100644 --- a/src/packages/udp_tracker_core/services/announce.rs +++ b/src/packages/udp_tracker_core/services/announce.rs @@ -7,15 +7,55 @@ //! //! It also sends an [`udp_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::IpAddr; +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; +use aquatic_udp_protocol::AnnounceRequest; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::error::WhitelistError; +use bittorrent_tracker_core::whitelist; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::packages::udp_tracker_core; +use crate::packages::udp_tracker_core::{self, peer_builder}; + +/// It handles the `Announce` request. +/// +/// # Errors +/// +/// It will return an error if: +/// +/// - The tracker is running in listed mode and the torrent is not in the +/// whitelist. +#[allow(clippy::too_many_arguments)] +pub async fn handle_announce( + remote_addr: SocketAddr, + request: &AnnounceRequest, + announce_handler: &Arc, + whitelist_authorization: &Arc, + opt_udp_stats_event_sender: &Arc>>, +) -> Result { + let info_hash = request.info_hash.into(); + let remote_client_ip = remote_addr.ip(); + + // Authorization + whitelist_authorization.authorize(&info_hash).await?; + + let mut peer = peer_builder::from_request(request, &remote_client_ip); + let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); + + let announce_data = invoke( + announce_handler.clone(), + opt_udp_stats_event_sender.clone(), + info_hash, + &mut peer, + &peers_wanted, + ) + .await; + + Ok(announce_data) +} pub async fn invoke( announce_handler: Arc, diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index 26a1a2116..2254ea979 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -8,17 +8,16 @@ use aquatic_udp_protocol::{ Port, Response, ResponsePeer, TransactionId, }; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; -use crate::packages::udp_tracker_core::{self, services}; +use crate::packages::udp_tracker_core::{self}; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; -use crate::servers::udp::peer_builder; /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) /// request for more information. @@ -44,6 +43,8 @@ pub async fn handle_announce( tracing::trace!("handle announce"); + // todo: move authentication to `udp_tracker_core::services::announce::handle_announce` + check( &request.connection_id, gen_remote_fingerprint(&remote_addr), @@ -51,29 +52,20 @@ pub async fn handle_announce( ) .map_err(|e| (e, request.transaction_id))?; - let info_hash = request.info_hash.into(); - let remote_client_ip = remote_addr.ip(); - - // Authorization - whitelist_authorization - .authorize(&info_hash) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) - .map_err(|e| (e, request.transaction_id))?; - - let mut peer = peer_builder::from_request(request, &remote_client_ip); - let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - - let response = services::announce::invoke( - announce_handler.clone(), - opt_udp_stats_event_sender.clone(), - info_hash, - &mut peer, - &peers_wanted, + let response = udp_tracker_core::services::announce::handle_announce( + remote_addr, + request, + announce_handler, + whitelist_authorization, + opt_udp_stats_event_sender, ) - .await; + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) + .map_err(|e| (e, request.transaction_id))?; + + // todo: extract `build_response` function. #[allow(clippy::cast_possible_truncation)] if remote_addr.is_ipv4() { diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index b141cc322..e8410e5f0 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -640,7 +640,6 @@ use std::net::SocketAddr; pub mod connection_cookie; pub mod error; pub mod handlers; -pub mod peer_builder; pub mod server; pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; From eca5c597a7624d2a5edfe52e042b84a8e76998ec Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 17:31:51 +0000 Subject: [PATCH 13/14] refactor: [#1268] move scrape logic from udp server to udp_tracker_core package --- .../udp_tracker_core/services/scrape.rs | 17 +++++++++++++++++ src/servers/udp/handlers/scrape.rs | 13 +++++-------- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/src/packages/udp_tracker_core/services/scrape.rs b/src/packages/udp_tracker_core/services/scrape.rs index 7d4897564..e47dd35b3 100644 --- a/src/packages/udp_tracker_core/services/scrape.rs +++ b/src/packages/udp_tracker_core/services/scrape.rs @@ -10,12 +10,29 @@ use std::net::SocketAddr; use std::sync::Arc; +use aquatic_udp_protocol::ScrapeRequest; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::udp_tracker_core; +/// It handles the `Scrape` request. +pub async fn handle_scrape( + remote_addr: SocketAddr, + request: &ScrapeRequest, + scrape_handler: &Arc, + opt_udp_stats_event_sender: &Arc>>, +) -> ScrapeData { + // Convert from aquatic infohashes + let mut info_hashes: Vec = vec![]; + for info_hash in &request.info_hashes { + info_hashes.push((*info_hash).into()); + } + + invoke(scrape_handler, opt_udp_stats_event_sender, &info_hashes, remote_addr).await +} + pub async fn invoke( scrape_handler: &Arc, opt_udp_stats_event_sender: &Arc>>, diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index 3b5ccf50d..d41563add 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -6,13 +6,11 @@ use std::sync::Arc; use aquatic_udp_protocol::{ NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::packages::udp_tracker_core; -use crate::packages::udp_tracker_core::services; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; @@ -37,6 +35,8 @@ pub async fn handle_scrape( tracing::trace!("handle scrape"); + // todo: move authentication to `udp_tracker_core::services::scrape::handle_scrape` + check( &request.connection_id, gen_remote_fingerprint(&remote_addr), @@ -44,13 +44,10 @@ pub async fn handle_scrape( ) .map_err(|e| (e, request.transaction_id))?; - // Convert from aquatic infohashes - let mut info_hashes: Vec = vec![]; - for info_hash in &request.info_hashes { - info_hashes.push((*info_hash).into()); - } + let scrape_data = + udp_tracker_core::services::scrape::handle_scrape(remote_addr, request, scrape_handler, opt_udp_stats_event_sender).await; - let scrape_data = services::scrape::invoke(scrape_handler, opt_udp_stats_event_sender, &info_hashes, remote_addr).await; + // todo: extract `build_response` function. let mut torrent_stats: Vec = Vec::new(); From 3b6f1bb6a5ce386f14d8c26a1350339c0b4efdf5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Feb 2025 18:04:37 +0000 Subject: [PATCH 14/14] chore(deps): bump derive_more from 1.0.0 to 2.0.1 Bumps [derive_more](https://github.com/JelteF/derive_more) from 1.0.0 to 2.0.1. - [Release notes](https://github.com/JelteF/derive_more/releases) - [Changelog](https://github.com/JelteF/derive_more/blob/master/CHANGELOG.md) - [Commits](https://github.com/JelteF/derive_more/compare/v1.0.0...v2.0.1) --- updated-dependencies: - dependency-name: derive_more dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- Cargo.lock | 34 ++++++++++++++++++++++++------ Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/primitives/Cargo.toml | 2 +- packages/tracker-client/Cargo.toml | 2 +- packages/tracker-core/Cargo.toml | 2 +- 6 files changed, 32 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 408471efc..cd4a6fb97 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -554,7 +554,7 @@ dependencies = [ "aquatic_udp_protocol", "bittorrent-primitives", "bittorrent-tracker-core", - "derive_more", + "derive_more 1.0.0", "multimap", "percent-encoding", "serde", @@ -587,7 +587,7 @@ version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "bittorrent-primitives", - "derive_more", + "derive_more 2.0.1", "hyper", "percent-encoding", "reqwest", @@ -611,7 +611,7 @@ dependencies = [ "aquatic_udp_protocol", "bittorrent-primitives", "chrono", - "derive_more", + "derive_more 2.0.1", "local-ip-address", "mockall", "r2d2", @@ -1255,7 +1255,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ - "derive_more-impl", + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", ] [[package]] @@ -1263,6 +1272,17 @@ name = "derive_more-impl" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", @@ -4316,7 +4336,7 @@ dependencies = [ "clap", "crossbeam-skiplist", "dashmap", - "derive_more", + "derive_more 2.0.1", "figment", "futures", "futures-util", @@ -4411,7 +4431,7 @@ name = "torrust-tracker-configuration" version = "3.0.0-develop" dependencies = [ "camino", - "derive_more", + "derive_more 2.0.1", "figment", "serde", "serde_json", @@ -4446,7 +4466,7 @@ dependencies = [ "aquatic_udp_protocol", "binascii", "bittorrent-primitives", - "derive_more", + "derive_more 2.0.1", "serde", "tdyne-peer-id", "tdyne-peer-id-registry", diff --git a/Cargo.toml b/Cargo.toml index 6c9f7f22d..082b266b3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,7 @@ cipher = "0" clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0" dashmap = "6" -derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } figment = "0" futures = "0" futures-util = "0" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 05789b882..da04f29cd 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -16,7 +16,7 @@ version.workspace = true [dependencies] camino = { version = "1", features = ["serde", "serde1"] } -derive_more = { version = "1", features = ["constructor", "display"] } +derive_more = { version = "2", features = ["constructor", "display"] } figment = { version = "0", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index b83886385..1396d8bc8 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -18,7 +18,7 @@ version.workspace = true aquatic_udp_protocol = "0" binascii = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "1", features = ["constructor"] } +derive_more = { version = "2", features = ["constructor"] } serde = { version = "1", features = ["derive"] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml index 67a4c767a..ef5cccaa2 100644 --- a/packages/tracker-client/Cargo.toml +++ b/packages/tracker-client/Cargo.toml @@ -17,7 +17,7 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } hyper = "1" percent-encoding = "2" reqwest = { version = "0", features = ["json"] } diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 46807a534..4fa0c0132 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -17,7 +17,7 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" chrono = { version = "0", default-features = false, features = ["clock"] } -derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } mockall = "0" r2d2 = "0" r2d2_mysql = "25"