diff --git a/Backend/main.py b/Backend/main.py index 5647842c..9bed6c55 100644 --- a/Backend/main.py +++ b/Backend/main.py @@ -51,6 +51,7 @@ def generate(): data = request.get_json() paragraph_number = int(data.get('paragraphNumber', 1)) # Default to 1 if not provided ai_model = data.get('aiModel') # Get the AI model selected by the user + n_threads = data.get('threads') # Amount of threads to use for video generation # Get 'useMusic' from the request data and default to False if not provided use_music = data.get('useMusic', False) @@ -211,12 +212,11 @@ def generate(): # Concatenate videos temp_audio = AudioFileClip(tts_path) - print(video_paths) - combined_video_path = combine_videos(video_paths, temp_audio.duration, 5) + combined_video_path = combine_videos(video_paths, temp_audio.duration, 5, n_threads or 2) # Put everything together try: - final_video_path = generate_video(combined_video_path, tts_path, subtitles_path) + final_video_path = generate_video(combined_video_path, tts_path, subtitles_path, n_threads or 2) except Exception as e: print(colored(f"[-] Error generating final video: {e}", "red")) final_video_path = None @@ -289,7 +289,7 @@ def generate(): video_clip = video_clip.set_audio(comp_audio) video_clip = video_clip.set_fps(30) video_clip = video_clip.set_duration(original_duration) - video_clip.write_videofile(f"../{final_video_path}", threads=2) + video_clip.write_videofile(f"../{final_video_path}", threads=n_threads or 1) # Let user know diff --git a/Backend/utils.py b/Backend/utils.py index 13f83101..f2d51371 100644 --- a/Backend/utils.py +++ b/Backend/utils.py @@ -53,6 +53,9 @@ def fetch_songs(zip_url: str) -> None: if not os.path.exists(files_dir): os.mkdir(files_dir) logger.info(colored(f"Created directory: {files_dir}", "green")) + else: + # Skip if songs are already downloaded + return # Download songs response = requests.get(zip_url) diff --git a/Backend/video.py b/Backend/video.py index 65a2ec11..d8014c73 100644 --- a/Backend/video.py +++ b/Backend/video.py @@ -130,7 +130,7 @@ def equalize_subtitles(srt_path: str, max_chars: int = 10) -> None: return subtitles_path -def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: int) -> str: +def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: int, threads: int) -> str: """ Combines a list of videos into one video and returns the path to the combined video. @@ -138,6 +138,7 @@ def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: video_paths (List): A list of paths to the videos to combine. max_duration (int): The maximum duration of the combined video. max_clip_duration (int): The maximum duration of each clip. + threads (int): The number of threads to use for the video processing. Returns: str: The path to the combined video. @@ -188,12 +189,12 @@ def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: final_clip = concatenate_videoclips(clips) final_clip = final_clip.set_fps(30) - final_clip.write_videofile(combined_video_path, threads=2) + final_clip.write_videofile(combined_video_path, threads=threads) return combined_video_path -def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str) -> str: +def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, threads: int) -> str: """ This function creates the final video, with subtitles and audio. @@ -201,6 +202,7 @@ def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str) combined_video_path (str): The path to the combined video. tts_path (str): The path to the text-to-speech audio. subtitles_path (str): The path to the subtitles. + threads (int): The number of threads to use for the video processing. Returns: str: The path to the final video. diff --git a/Frontend/app.js b/Frontend/app.js index 7c878700..ece1dda8 100644 --- a/Frontend/app.js +++ b/Frontend/app.js @@ -8,6 +8,18 @@ const useMusicToggle = document.querySelector("#useMusicToggle"); const generateButton = document.querySelector("#generateButton"); const cancelButton = document.querySelector("#cancelButton"); +const advancedOptionsToggle = document.querySelector("#advancedOptionsToggle"); + +advancedOptionsToggle.addEventListener("click", () => { + // Change Emoji, from ▼ to ▲ and vice versa + const emoji = advancedOptionsToggle.textContent; + advancedOptionsToggle.textContent = emoji.includes("▼") + ? "Show less Options ▲" + : "Show Advanced Options ▼"; + const advancedOptions = document.querySelector("#advancedOptions"); + advancedOptions.classList.toggle("hidden"); +}); + const cancelGeneration = () => { console.log("Canceling generation..."); // Send request to /cancel @@ -52,6 +64,7 @@ const generateVideo = () => { const paragraphNumberValue = paragraphNumber.value; const youtubeUpload = youtubeToggle.checked; const useMusicToggleState = useMusicToggle.checked; + const threads = document.querySelector("#threads").value; const zipUrlValue = zipUrl.value; const url = "http://localhost:8080/api/generate"; @@ -65,6 +78,7 @@ const generateVideo = () => { automateYoutubeUpload: youtubeUpload, useMusic: useMusicToggleState, zipUrl: zipUrlValue, + threads: threads, }; // Send the actual request to the server diff --git a/Frontend/index.html b/Frontend/index.html index 09a5ae72..eb21863a 100644 --- a/Frontend/index.html +++ b/Frontend/index.html @@ -24,73 +24,6 @@