From 17276e2ad5f6577a7061af26a4500c9e43dbbe1f Mon Sep 17 00:00:00 2001 From: Mike Dallas Date: Mon, 7 Nov 2022 21:05:26 +0000 Subject: [PATCH] support filtering by upvotes --- README.md | 1 + src/download.rs | 14 +++++++------- src/main.rs | 29 +++++++++++++++++++++++++---- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index f4baa98..5083d01 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ OPTIONS: -o, --output Directory to save the media to [default: .] -p, --period Time period to download from [default: day] [possible values: now, hour, day, week, month, year, all] -s, --subreddit ... Download media from these subreddit + -u, --upvotes Minimum number of upvotes to download [default: 0] ``` diff --git a/src/download.rs b/src/download.rs index 51063f5..721bc47 100644 --- a/src/download.rs +++ b/src/download.rs @@ -10,8 +10,8 @@ use reqwest::StatusCode; use url::{Position, Url}; use crate::errors::GertError; -use crate::structures::{GfyData, StreamableApiResponse}; use crate::structures::Post; +use crate::structures::{GfyData, StreamableApiResponse}; use crate::utils::{check_path_present, check_url_has_mime_type}; pub static JPG_EXTENSION: &str = "jpg"; @@ -43,8 +43,8 @@ static GIPHY_MEDIA_SUBDOMAIN_2: &str = "media2.giphy.com"; static GIPHY_MEDIA_SUBDOMAIN_3: &str = "media3.giphy.com"; static GIPHY_MEDIA_SUBDOMAIN_4: &str = "media4.giphy.com"; -pub static STREAMABLE_DOMAIN : &str = "streamable.com"; -static STREAMABLE_API : &str = "https://api.streamable.com/videos"; +pub static STREAMABLE_DOMAIN: &str = "streamable.com"; +static STREAMABLE_API: &str = "https://api.streamable.com/videos"; /// Media Types Supported #[derive(Debug, PartialEq, Eq)] @@ -515,15 +515,15 @@ impl<'a> Downloader<'a> { }; if parsed.files.contains_key(MP4_EXTENSION) { - let video_url = parsed.files.get(MP4_EXTENSION).unwrap().url.borrow().to_owned().unwrap(); + let video_url = + parsed.files.get(MP4_EXTENSION).unwrap().url.borrow().to_owned().unwrap(); let ext = MP4_EXTENSION.to_owned(); - - let task = DownloadTask::from_post(post, video_url, ext , None); + + let task = DownloadTask::from_post(post, video_url, ext, None); self.schedule_task(task).await; } else { self.fail("No mp4 file found in streamable API response"); } - } fn fail(&self, msg: &str) { diff --git a/src/main.rs b/src/main.rs index cd81119..7686bf0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -39,7 +39,7 @@ async fn main() -> Result<(), GertError> { .help("URL of a single post to download") .takes_value(true) .required_unless("subreddit") - .conflicts_with_all(&["subreddit", "period", "feed", "limit", "match"]), + .conflicts_with_all(&["subreddit", "period", "feed", "limit", "match", "upvotes"]), ) .arg( Arg::with_name("environment") @@ -127,6 +127,15 @@ async fn main() -> Result<(), GertError> { .possible_values(&["hot", "new", "top", "rising"]) .default_value("hot"), ) + .arg( + Arg::with_name("upvotes") + .short("u") + .long("upvotes") + .value_name("NUM") + .help("Minimum number of upvotes to download") + .takes_value(true) + .default_value("0"), + ) .get_matches(); let env_file = matches.value_of("environment"); @@ -138,6 +147,11 @@ async fn main() -> Result<(), GertError> { // generate human readable file names instead of MD5 Hashed file names let use_human_readable = matches.is_present("human_readable"); // restrict downloads to these subreddits + let upvotes = matches + .value_of("upvotes") + .unwrap() + .parse::() + .unwrap_or_else(|_| exit("Upvotes must be a number")); let subreddits: Vec<&str> = match matches.is_present("subreddits") { true => matches.values_of("subreddits").unwrap().collect(), @@ -280,9 +294,16 @@ async fn main() -> Result<(), GertError> { for subreddit in &subreddits { let listing = Subreddit::new(subreddit).get_feed(feed, limit, period).await?; posts.extend( - listing.data.children.into_iter().filter(|post| post.data.url.is_some() && !post.data.is_self).filter( - |post| pattern.is_match(post.data.title.as_ref().unwrap_or(&"".to_string())), - ), + listing + .data + .children + .into_iter() + .filter(|post| { + post.data.url.is_some() && !post.data.is_self && post.data.score > upvotes + }) + .filter(|post| { + pattern.is_match(post.data.title.as_ref().unwrap_or(&"".to_string())) + }), ); } }