diff --git a/Backend/main.py b/Backend/main.py index 9bed6c55..c078441e 100644 --- a/Backend/main.py +++ b/Backend/main.py @@ -52,6 +52,7 @@ def generate(): paragraph_number = int(data.get('paragraphNumber', 1)) # Default to 1 if not provided ai_model = data.get('aiModel') # Get the AI model selected by the user n_threads = data.get('threads') # Amount of threads to use for video generation + subtitles_position = data.get('subtitlesPosition') # Position of the subtitles in the video # Get 'useMusic' from the request data and default to False if not provided use_music = data.get('useMusic', False) @@ -216,7 +217,7 @@ def generate(): # Put everything together try: - final_video_path = generate_video(combined_video_path, tts_path, subtitles_path, n_threads or 2) + final_video_path = generate_video(combined_video_path, tts_path, subtitles_path, n_threads or 2, subtitles_position) except Exception as e: print(colored(f"[-] Error generating final video: {e}", "red")) final_video_path = None diff --git a/Backend/video.py b/Backend/video.py index 5b82f97f..eed0839b 100644 --- a/Backend/video.py +++ b/Backend/video.py @@ -194,7 +194,7 @@ def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: return combined_video_path -def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, threads: int) -> str: +def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, threads: int, subtitles_position: str) -> str: """ This function creates the final video, with subtitles and audio. @@ -203,6 +203,7 @@ def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, tts_path (str): The path to the text-to-speech audio. subtitles_path (str): The path to the subtitles. threads (int): The number of threads to use for the video processing. + subtitles_position (str): The position of the subtitles. Returns: str: The path to the final video. @@ -217,11 +218,14 @@ def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, stroke_width=5, ) + # Split the subtitles position into horizontal and vertical + horizontal_subtitles_position, vertical_subtitles_position = subtitles_position.split(",") + # Burn the subtitles into the video subtitles = SubtitlesClip(subtitles_path, generator) result = CompositeVideoClip([ VideoFileClip(combined_video_path), - subtitles.set_pos(("center", "center")) + subtitles.set_pos((horizontal_subtitles_position, vertical_subtitles_position)) ]) # Add the audio diff --git a/Frontend/app.js b/Frontend/app.js index ece1dda8..ba99ecb1 100644 --- a/Frontend/app.js +++ b/Frontend/app.js @@ -66,6 +66,7 @@ const generateVideo = () => { const useMusicToggleState = useMusicToggle.checked; const threads = document.querySelector("#threads").value; const zipUrlValue = zipUrl.value; + const subtitlesPosition = document.querySelector("#subtitlesPosition").value; const url = "http://localhost:8080/api/generate"; @@ -79,6 +80,7 @@ const generateVideo = () => { useMusic: useMusicToggleState, zipUrl: zipUrlValue, threads: threads, + subtitlesPosition: subtitlesPosition, }; // Send the actual request to the server diff --git a/Frontend/index.html b/Frontend/index.html index eb21863a..5827ac04 100644 --- a/Frontend/index.html +++ b/Frontend/index.html @@ -97,6 +97,22 @@