From 73f19d793c32c9d0a5eeddcfceaee385ec7e6e72 Mon Sep 17 00:00:00 2001 From: Bohui WU Date: Thu, 16 Feb 2023 10:51:49 +0800 Subject: [PATCH 01/25] Fix audio out of sync --- auto_subtitle/cli.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/auto_subtitle/cli.py b/auto_subtitle/cli.py index a58d14f..4f3e888 100644 --- a/auto_subtitle/cli.py +++ b/auto_subtitle/cli.py @@ -4,6 +4,7 @@ import argparse import warnings import tempfile +import subprocess from .utils import filename, str2bool, write_srt @@ -71,10 +72,9 @@ def get_audio(paths): print(f"Extracting audio from {filename(path)}...") output_path = os.path.join(temp_dir, f"{filename(path)}.wav") - ffmpeg.input(path).output( - output_path, - acodec="pcm_s16le", ac=1, ar="16k" - ).run(quiet=True, overwrite_output=True) + # Use subprocess instead of the ffmpeg module due to conflicting argument name "async" + if subprocess.run(['ffmpeg', '-y', '-i', path, '-acodec', 'pcm_s16le', '-ac', '1', '-async', '1', output_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode > 0: + raise Exception(f'Error occurred while extracting audio from {filename(path)}') audio_paths[path] = output_path From bf8121a17e7fe36d543eeb019d4d90b03b533dfe Mon Sep 17 00:00:00 2001 From: RapDoodle Date: Fri, 17 Feb 2023 01:06:08 +0800 Subject: [PATCH 02/25] Add wildcard support --- auto_subtitle/cli.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/auto_subtitle/cli.py b/auto_subtitle/cli.py index 4f3e888..ec2bb13 100644 --- a/auto_subtitle/cli.py +++ b/auto_subtitle/cli.py @@ -1,4 +1,5 @@ import os +import glob import ffmpeg import whisper import argparse @@ -11,8 +12,8 @@ def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("video", nargs="+", type=str, - help="paths to video files to transcribe") + parser.add_argument("videos", nargs="+", type=str, + help="paths/wildcards to video files to transcribe") parser.add_argument("--model", default="small", choices=whisper.available_models(), help="name of the Whisper model to use") parser.add_argument("--output_dir", "-o", type=str, @@ -34,13 +35,29 @@ def main(): srt_only: bool = args.pop("srt_only") os.makedirs(output_dir, exist_ok=True) + # Process wildcards + videos = [] + for video in args['videos']: + videos += list(glob.glob(video)) + n = len(videos) + if n == 0: + print('Video file not found.') + return + elif n > 1: + print('List of videos:') + for i, file in enumerate(videos): + print(f' {i+1}. {file}') + args.pop('videos') + + # Load models if model_name.endswith(".en"): warnings.warn( f"{model_name} is an English-only model, forcing English detection.") args["language"] = "en" model = whisper.load_model(model_name) - audios = get_audio(args.pop("video")) + + audios = get_audio(videos) subtitles = get_subtitles( audios, output_srt or srt_only, output_dir, lambda audio_path: model.transcribe(audio_path, **args) ) From 3000bf3aff8860b1321826f22740840a7253c115 Mon Sep 17 00:00:00 2001 From: RapDoodle Date: Fri, 17 Feb 2023 09:08:27 +0800 Subject: [PATCH 03/25] Add lang option and default audio format to mp3 --- auto_subtitle/cli.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/auto_subtitle/cli.py b/auto_subtitle/cli.py index ec2bb13..2d5fc0f 100644 --- a/auto_subtitle/cli.py +++ b/auto_subtitle/cli.py @@ -27,6 +27,9 @@ def main(): parser.add_argument("--task", type=str, default="transcribe", choices=[ "transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") + parser.add_argument("--language", type=str, default=None, + choices=sorted(whisper.tokenizer.LANGUAGES.keys()) + sorted([k.title() for k in whisper.tokenizer.TO_LANGUAGE_CODE.keys()]), + help="language spoken in the audio, specify None to perform language detection") args = parser.parse_args().__dict__ model_name: str = args.pop("model") @@ -87,10 +90,10 @@ def get_audio(paths): for path in paths: print(f"Extracting audio from {filename(path)}...") - output_path = os.path.join(temp_dir, f"{filename(path)}.wav") + output_path = os.path.join(temp_dir, f"{filename(path)}.mp3") # Use subprocess instead of the ffmpeg module due to conflicting argument name "async" - if subprocess.run(['ffmpeg', '-y', '-i', path, '-acodec', 'pcm_s16le', '-ac', '1', '-async', '1', output_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode > 0: + if subprocess.run(['ffmpeg', '-y', '-i', path, '-ac', '1', '-async', '1', output_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode > 0: raise Exception(f'Error occurred while extracting audio from {filename(path)}') audio_paths[path] = output_path From 88e20a7b5f1fdbdd5bd950ddaf27ea452aa77895 Mon Sep 17 00:00:00 2001 From: Bohui WU Date: Sat, 18 Feb 2023 01:11:15 +0800 Subject: [PATCH 04/25] Add: Lang option, wildcard, parallel audio extract --- README.md | 14 ++++---- auto_subtitle/cli.py | 74 +++++++++++++++++++++++++++--------------- auto_subtitle/utils.py | 11 +++++++ setup.py | 6 ++-- 4 files changed, 69 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index 1d21530..9560ad1 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Automatic subtitles in your videos +# Automatic subtitles your videos This repository uses `ffmpeg` and [OpenAI's Whisper](https://openai.com/blog/whisper) to automatically generate and overlay subtitles on any video. @@ -6,7 +6,9 @@ This repository uses `ffmpeg` and [OpenAI's Whisper](https://openai.com/blog/whi To get started, you'll need Python 3.7 or newer. Install the binary by running the following command: - pip install git+https://github.com/m1guelpf/auto-subtitle.git +```bash +pip install git+https://github.com/RapDoodle/auto-subtitle.git +``` You'll also need to install [`ffmpeg`](https://ffmpeg.org/), which is available from most package managers: @@ -25,19 +27,19 @@ choco install ffmpeg The following command will generate a `subtitled/video.mp4` file contained the input video with overlayed subtitles. - auto_subtitle /path/to/video.mp4 -o subtitled/ + subtitle /path/to/video.mp4 -o subtitled/ The default setting (which selects the `small` model) works well for transcribing English. You can optionally use a bigger model for better results (especially with other languages). The available models are `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large`. - auto_subtitle /path/to/video.mp4 --model medium + subtitle /path/to/video.mp4 --model medium Adding `--task translate` will translate the subtitles into English: - auto_subtitle /path/to/video.mp4 --task translate + subtitle /path/to/video.mp4 --task translate Run the following to view all available options: - auto_subtitle --help + subtitle --help ## License diff --git a/auto_subtitle/cli.py b/auto_subtitle/cli.py index 2d5fc0f..b466ddf 100644 --- a/auto_subtitle/cli.py +++ b/auto_subtitle/cli.py @@ -1,56 +1,64 @@ import os import glob +import psutil import ffmpeg import whisper import argparse import warnings import tempfile import subprocess -from .utils import filename, str2bool, write_srt +import multiprocessing +from torch.cuda import is_available +from .utils import filename, write_srt, is_audio, ffmpeg_extract_audio def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("videos", nargs="+", type=str, + parser.add_argument("paths", nargs="+", type=str, help="paths/wildcards to video files to transcribe") parser.add_argument("--model", default="small", choices=whisper.available_models(), help="name of the Whisper model to use") - parser.add_argument("--output_dir", "-o", type=str, + parser.add_argument("--output-dir", "-o", type=str, default=".", help="directory to save the outputs") - parser.add_argument("--output_srt", type=str2bool, default=False, + parser.add_argument("--output-srt", action='store_true', default=False, help="whether to output the .srt file along with the video files") - parser.add_argument("--srt_only", type=str2bool, default=False, + parser.add_argument("--srt-only", action='store_true', default=False, help="only generate the .srt file and not create overlayed video") - parser.add_argument("--verbose", type=str2bool, default=False, + parser.add_argument("--extract-workers", type=int, default=psutil.cpu_count(logical=False), + help="number of workers to extract audio (only useful when there are multiple videos)") + parser.add_argument("--verbose", action='store_true', default=False, help="whether to print out the progress and debug messages") parser.add_argument("--task", type=str, default="transcribe", choices=[ "transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") parser.add_argument("--language", type=str, default=None, - choices=sorted(whisper.tokenizer.LANGUAGES.keys()) + sorted([k.title() for k in whisper.tokenizer.TO_LANGUAGE_CODE.keys()]), - help="language spoken in the audio, specify None to perform language detection") + choices=sorted(whisper.tokenizer.LANGUAGES.keys()) + sorted([k.title() for k in whisper.tokenizer.TO_LANGUAGE_CODE.keys()]), + help="language spoken in the audio, specify None to perform language detection") + parser.add_argument("--device", default="cuda" if is_available() else "cpu", help="device to use for PyTorch inference") args = parser.parse_args().__dict__ model_name: str = args.pop("model") output_dir: str = args.pop("output_dir") output_srt: bool = args.pop("output_srt") srt_only: bool = args.pop("srt_only") + device: str = args.pop("device") + extract_wokers: str = args.pop('extract_workers') os.makedirs(output_dir, exist_ok=True) # Process wildcards - videos = [] - for video in args['videos']: - videos += list(glob.glob(video)) - n = len(videos) + paths = [] + for path in args['paths']: + paths += list(glob.glob(path)) + n = len(paths) if n == 0: print('Video file not found.') return elif n > 1: print('List of videos:') - for i, file in enumerate(videos): - print(f' {i+1}. {file}') - args.pop('videos') + for i, path in enumerate(paths): + print(f' {i+1}. {path}') + args.pop('paths') # Load models if model_name.endswith(".en"): @@ -58,17 +66,25 @@ def main(): f"{model_name} is an English-only model, forcing English detection.") args["language"] = "en" - model = whisper.load_model(model_name) + model = whisper.load_model(model_name, device=device) - audios = get_audio(videos) + # Extract audio from video. Skip if it is already an audio file + audios = get_audio(paths, extract_wokers) + + # Generate subtitles with whisper subtitles = get_subtitles( - audios, output_srt or srt_only, output_dir, lambda audio_path: model.transcribe(audio_path, **args) + audios, output_srt or srt_only, output_dir, + lambda audio_path: model.transcribe(audio_path, condition_on_previous_text=False, **args) ) if srt_only: return for path, srt_path in subtitles.items(): + # Skip audio files + if is_audio(path): + continue + out_path = os.path.join(output_dir, f"{filename(path)}.mp4") print(f"Adding subtitles to {filename(path)}...") @@ -83,20 +99,24 @@ def main(): print(f"Saved subtitled video to {os.path.abspath(out_path)}.") -def get_audio(paths): +def get_audio(paths, num_workers=1): temp_dir = tempfile.gettempdir() - audio_paths = {} + func_args = [] for path in paths: - print(f"Extracting audio from {filename(path)}...") - output_path = os.path.join(temp_dir, f"{filename(path)}.mp3") - - # Use subprocess instead of the ffmpeg module due to conflicting argument name "async" - if subprocess.run(['ffmpeg', '-y', '-i', path, '-ac', '1', '-async', '1', output_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode > 0: - raise Exception(f'Error occurred while extracting audio from {filename(path)}') - + if is_audio(path): + # Skip audio files + output_path = path + else: + output_path = os.path.join(temp_dir, f"{filename(path)}.mp3") + func_args.append((path, output_path)) + audio_paths[path] = output_path + + # Execute on multiple processes + pool = multiprocessing.Pool(num_workers) + pool.starmap(ffmpeg_extract_audio, func_args) return audio_paths diff --git a/auto_subtitle/utils.py b/auto_subtitle/utils.py index ee5515b..7f4af16 100644 --- a/auto_subtitle/utils.py +++ b/auto_subtitle/utils.py @@ -1,4 +1,5 @@ import os +import subprocess from typing import Iterator, TextIO @@ -44,3 +45,13 @@ def write_srt(transcript: Iterator[dict], file: TextIO): def filename(path): return os.path.splitext(os.path.basename(path))[0] + + +def is_audio(path): + return True if path.endswith(('.mp3', '.wav', '.flac', '.m4a', '.wma', '.aac')) else False + + +def ffmpeg_extract_audio(input_path, output_path): + print(f"Extracting audio from {filename(input_path)}...") + if subprocess.run(('ffmpeg', '-y', '-i', input_path, '-ac', '1', '-async', '1', output_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode > 0: + raise Exception(f'Error occurred while extracting audio from {filename(input_path)}') diff --git a/setup.py b/setup.py index ca2ed5b..2b78b3b 100644 --- a/setup.py +++ b/setup.py @@ -5,13 +5,13 @@ name="auto_subtitle", packages=find_packages(), py_modules=["auto_subtitle"], - author="Miguel Piedrafita", + author="Miguel Piedrafita, Bohui WU", install_requires=[ 'openai-whisper', ], - description="Automatically generate and embed subtitles into your videos", + description="Automatically generate and/or embed subtitles into your videos", entry_points={ - 'console_scripts': ['auto_subtitle=auto_subtitle.cli:main'], + 'console_scripts': ['subtitle=auto_subtitle.cli:main'], }, include_package_data=True, ) From e79e26f6a8efef7bcdeb869af8c64887efe926fc Mon Sep 17 00:00:00 2001 From: Bohui WU Date: Sat, 18 Feb 2023 15:02:03 +0800 Subject: [PATCH 05/25] Update command options --- README.md | 24 +++++++++++++++--- auto_subtitle/cli.py | 56 ++++++++++++++++++++++++++---------------- auto_subtitle/utils.py | 6 ++--- requirements.txt | 1 + setup.py | 1 + 5 files changed, 61 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 9560ad1..05f951a 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,20 @@ This repository uses `ffmpeg` and [OpenAI's Whisper](https://openai.com/blog/whisper) to automatically generate and overlay subtitles on any video. +## About the fork + +This repository is a fork of [m1guelpf's auto-subtitle](https://github.com/m1guelpf/auto-subtitle) with additional features added. I am trying to push some of the new features into the m1guelpf's repository. + +The list of newly added features: + +- Fix audio out of sync issue +- Wildcard support for filenames +- Convert audio to subtitles (output `.srt` files) +- Option to pick a language instead of using language auto detection +- Extract audio from videos in parallel +- Disable `condition_on_previous_text` by default to avoid stucking in failure loop (especially for videos with long intervals between talks), with option `--enhance-consistency` to enable it. +- Many more new command options + ## Installation To get started, you'll need Python 3.7 or newer. Install the binary by running the following command: @@ -27,15 +41,19 @@ choco install ffmpeg The following command will generate a `subtitled/video.mp4` file contained the input video with overlayed subtitles. - subtitle /path/to/video.mp4 -o subtitled/ + subtitle /path/to/video.mp4 --output-video -o subtitled/ + +Convert all `mp4` videos in the current directory to `.srt` subtitles and store it in the current directory + + subtitle *.mp4 --output-srt The default setting (which selects the `small` model) works well for transcribing English. You can optionally use a bigger model for better results (especially with other languages). The available models are `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large`. - subtitle /path/to/video.mp4 --model medium + subtitle /path/to/video.mp4 --output-srt --model medium Adding `--task translate` will translate the subtitles into English: - subtitle /path/to/video.mp4 --task translate + subtitle /path/to/video.mp4 --output-srt --task translate Run the following to view all available options: diff --git a/auto_subtitle/cli.py b/auto_subtitle/cli.py index b466ddf..7d338f0 100644 --- a/auto_subtitle/cli.py +++ b/auto_subtitle/cli.py @@ -9,7 +9,7 @@ import subprocess import multiprocessing from torch.cuda import is_available -from .utils import filename, write_srt, is_audio, ffmpeg_extract_audio +from .utils import get_filename, write_srt, is_audio, ffmpeg_extract_audio def main(): @@ -21,14 +21,18 @@ def main(): choices=whisper.available_models(), help="name of the Whisper model to use") parser.add_argument("--output-dir", "-o", type=str, default=".", help="directory to save the outputs") - parser.add_argument("--output-srt", action='store_true', default=False, - help="whether to output the .srt file along with the video files") - parser.add_argument("--srt-only", action='store_true', default=False, - help="only generate the .srt file and not create overlayed video") - parser.add_argument("--extract-workers", type=int, default=psutil.cpu_count(logical=False), + parser.add_argument("--output-srt", "-s", action='store_true', default=False, + help="output the .srt file in the output directory") + parser.add_argument("--output-audio", "-a", action='store_true', default=False, + help="output the audio extracted") + parser.add_argument("--output-video", "-v", action='store_true', default=False, + help="generate video with embedded subtitles") + parser.add_argument("--enhance-consistency", action='store_true', default=False, + help="use the previous output as input to the next window to improve consistency (may stuck in a failure loop)") + parser.add_argument("--extract-workers", type=int, default=max(1, psutil.cpu_count(logical=False) // 2), help="number of workers to extract audio (only useful when there are multiple videos)") parser.add_argument("--verbose", action='store_true', default=False, - help="whether to print out the progress and debug messages") + help="print out the progress and debug messages") parser.add_argument("--task", type=str, default="transcribe", choices=[ "transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") @@ -41,11 +45,17 @@ def main(): model_name: str = args.pop("model") output_dir: str = args.pop("output_dir") output_srt: bool = args.pop("output_srt") - srt_only: bool = args.pop("srt_only") + output_video: bool = args.pop("output_video") + output_audio: bool = args.pop("output_audio") device: str = args.pop("device") - extract_wokers: str = args.pop('extract_workers') + extract_wokers: str = args.pop("extract_workers") + enhace_consistency: bool = args.pop("enhance_consistency") os.makedirs(output_dir, exist_ok=True) + # Default output_srt to True if output_video is False + if not output_video and not output_srt: + output_srt = True + # Process wildcards paths = [] for path in args['paths']: @@ -69,15 +79,15 @@ def main(): model = whisper.load_model(model_name, device=device) # Extract audio from video. Skip if it is already an audio file - audios = get_audio(paths, extract_wokers) + audios = get_audio(paths, output_audio, output_dir, extract_wokers) # Generate subtitles with whisper subtitles = get_subtitles( - audios, output_srt or srt_only, output_dir, - lambda audio_path: model.transcribe(audio_path, condition_on_previous_text=False, **args) + audios, output_srt, output_dir, + lambda audio_path: model.transcribe(audio_path, condition_on_previous_text=enhace_consistency, **args) ) - if srt_only: + if not output_video: return for path, srt_path in subtitles.items(): @@ -85,21 +95,24 @@ def main(): if is_audio(path): continue - out_path = os.path.join(output_dir, f"{filename(path)}.mp4") - - print(f"Adding subtitles to {filename(path)}...") + print(f"Adding subtitles to {path}...") + + out_path = os.path.join(output_dir, f"{get_filename(path)}.mp4") + if os.path.exists(out_path) and os.path.samefile(path, out_path): + out_path = os.path.join(output_dir, f"{get_filename(path)}-subtitled.mp4") + warnings.warn(f"{path} will overwrite the original file. Renaming the output file to {out_path}") video = ffmpeg.input(path) audio = video.audio ffmpeg.concat( video.filter('subtitles', srt_path, force_style="OutlineColour=&H40000000,BorderStyle=3"), audio, v=1, a=1 - ).output(out_path).run(quiet=True, overwrite_output=True) + ).output(out_path).run(quiet=False, overwrite_output=True) print(f"Saved subtitled video to {os.path.abspath(out_path)}.") -def get_audio(paths, num_workers=1): +def get_audio(paths, output_audio, output_dir, num_workers=1): temp_dir = tempfile.gettempdir() audio_paths = {} func_args = [] @@ -109,7 +122,8 @@ def get_audio(paths, num_workers=1): # Skip audio files output_path = path else: - output_path = os.path.join(temp_dir, f"{filename(path)}.mp3") + output_path = output_dir if output_audio else tempfile.gettempdir() + output_path = os.path.join(output_path, f"{get_filename(path)}.mp3") func_args.append((path, output_path)) audio_paths[path] = output_path @@ -126,10 +140,10 @@ def get_subtitles(audio_paths: list, output_srt: bool, output_dir: str, transcri for path, audio_path in audio_paths.items(): srt_path = output_dir if output_srt else tempfile.gettempdir() - srt_path = os.path.join(srt_path, f"{filename(path)}.srt") + srt_path = os.path.join(srt_path, f"{get_filename(path)}.srt") print( - f"Generating subtitles for {filename(path)}... This might take a while." + f"Generating subtitles for {path}... This might take a while." ) warnings.filterwarnings("ignore") diff --git a/auto_subtitle/utils.py b/auto_subtitle/utils.py index 7f4af16..a354ff2 100644 --- a/auto_subtitle/utils.py +++ b/auto_subtitle/utils.py @@ -43,7 +43,7 @@ def write_srt(transcript: Iterator[dict], file: TextIO): ) -def filename(path): +def get_filename(path): return os.path.splitext(os.path.basename(path))[0] @@ -52,6 +52,6 @@ def is_audio(path): def ffmpeg_extract_audio(input_path, output_path): - print(f"Extracting audio from {filename(input_path)}...") + print(f"Extracting audio from {input_path}...") if subprocess.run(('ffmpeg', '-y', '-i', input_path, '-ac', '1', '-async', '1', output_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode > 0: - raise Exception(f'Error occurred while extracting audio from {filename(input_path)}') + raise Exception(f'Error occurred while extracting audio from {input_path}') diff --git a/requirements.txt b/requirements.txt index 73bca28..6fb0566 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,2 @@ openai-whisper +psutil diff --git a/setup.py b/setup.py index 2b78b3b..e50a8aa 100644 --- a/setup.py +++ b/setup.py @@ -8,6 +8,7 @@ author="Miguel Piedrafita, Bohui WU", install_requires=[ 'openai-whisper', + 'psutil' ], description="Automatically generate and/or embed subtitles into your videos", entry_points={ From 78a9b58d6d8f9c9051976a0b4bcb70d8380c44df Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 15:16:43 +0100 Subject: [PATCH 06/25] First edit - add spanish --- .../__init__.py | 0 {auto_subtitle => auto_subtitle_plus}/cli.py | 6 +- .../utils.py | 0 build/lib/auto_subtitle_plus/__init__.py | 0 build/lib/auto_subtitle_plus/cli.py | 110 ++++++++++++++++++ build/lib/auto_subtitle_plus/utils.py | 46 ++++++++ setup.py | 13 ++- 7 files changed, 167 insertions(+), 8 deletions(-) rename {auto_subtitle => auto_subtitle_plus}/__init__.py (100%) rename {auto_subtitle => auto_subtitle_plus}/cli.py (91%) rename {auto_subtitle => auto_subtitle_plus}/utils.py (100%) create mode 100644 build/lib/auto_subtitle_plus/__init__.py create mode 100644 build/lib/auto_subtitle_plus/cli.py create mode 100644 build/lib/auto_subtitle_plus/utils.py diff --git a/auto_subtitle/__init__.py b/auto_subtitle_plus/__init__.py similarity index 100% rename from auto_subtitle/__init__.py rename to auto_subtitle_plus/__init__.py diff --git a/auto_subtitle/cli.py b/auto_subtitle_plus/cli.py similarity index 91% rename from auto_subtitle/cli.py rename to auto_subtitle_plus/cli.py index a58d14f..65b2bc1 100644 --- a/auto_subtitle/cli.py +++ b/auto_subtitle_plus/cli.py @@ -14,6 +14,8 @@ def main(): help="paths to video files to transcribe") parser.add_argument("--model", default="small", choices=whisper.available_models(), help="name of the Whisper model to use") + parser.add_argument("--language", choices=["en", "es", "auto"], type=str, default=["auto"], + help="force language to the chosen one: en or es, else autodetect") parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs") parser.add_argument("--output_srt", type=str2bool, default=False, @@ -33,9 +35,9 @@ def main(): srt_only: bool = args.pop("srt_only") os.makedirs(output_dir, exist_ok=True) - if model_name.endswith(".en"): + if args["language"] == "auto" or args["language"] == "" or args["language"] == None or model_name.endswith(".en"): warnings.warn( - f"{model_name} is an English-only model, forcing English detection.") + "forcing English detection") args["language"] = "en" model = whisper.load_model(model_name) diff --git a/auto_subtitle/utils.py b/auto_subtitle_plus/utils.py similarity index 100% rename from auto_subtitle/utils.py rename to auto_subtitle_plus/utils.py diff --git a/build/lib/auto_subtitle_plus/__init__.py b/build/lib/auto_subtitle_plus/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/auto_subtitle_plus/cli.py b/build/lib/auto_subtitle_plus/cli.py new file mode 100644 index 0000000..65b2bc1 --- /dev/null +++ b/build/lib/auto_subtitle_plus/cli.py @@ -0,0 +1,110 @@ +import os +import ffmpeg +import whisper +import argparse +import warnings +import tempfile +from .utils import filename, str2bool, write_srt + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("video", nargs="+", type=str, + help="paths to video files to transcribe") + parser.add_argument("--model", default="small", + choices=whisper.available_models(), help="name of the Whisper model to use") + parser.add_argument("--language", choices=["en", "es", "auto"], type=str, default=["auto"], + help="force language to the chosen one: en or es, else autodetect") + parser.add_argument("--output_dir", "-o", type=str, + default=".", help="directory to save the outputs") + parser.add_argument("--output_srt", type=str2bool, default=False, + help="whether to output the .srt file along with the video files") + parser.add_argument("--srt_only", type=str2bool, default=False, + help="only generate the .srt file and not create overlayed video") + parser.add_argument("--verbose", type=str2bool, default=False, + help="whether to print out the progress and debug messages") + + parser.add_argument("--task", type=str, default="transcribe", choices=[ + "transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") + + args = parser.parse_args().__dict__ + model_name: str = args.pop("model") + output_dir: str = args.pop("output_dir") + output_srt: bool = args.pop("output_srt") + srt_only: bool = args.pop("srt_only") + os.makedirs(output_dir, exist_ok=True) + + if args["language"] == "auto" or args["language"] == "" or args["language"] == None or model_name.endswith(".en"): + warnings.warn( + "forcing English detection") + args["language"] = "en" + + model = whisper.load_model(model_name) + audios = get_audio(args.pop("video")) + subtitles = get_subtitles( + audios, output_srt or srt_only, output_dir, lambda audio_path: model.transcribe(audio_path, **args) + ) + + if srt_only: + return + + for path, srt_path in subtitles.items(): + out_path = os.path.join(output_dir, f"{filename(path)}.mp4") + + print(f"Adding subtitles to {filename(path)}...") + + video = ffmpeg.input(path) + audio = video.audio + + ffmpeg.concat( + video.filter('subtitles', srt_path, force_style="OutlineColour=&H40000000,BorderStyle=3"), audio, v=1, a=1 + ).output(out_path).run(quiet=True, overwrite_output=True) + + print(f"Saved subtitled video to {os.path.abspath(out_path)}.") + + +def get_audio(paths): + temp_dir = tempfile.gettempdir() + + audio_paths = {} + + for path in paths: + print(f"Extracting audio from {filename(path)}...") + output_path = os.path.join(temp_dir, f"{filename(path)}.wav") + + ffmpeg.input(path).output( + output_path, + acodec="pcm_s16le", ac=1, ar="16k" + ).run(quiet=True, overwrite_output=True) + + audio_paths[path] = output_path + + return audio_paths + + +def get_subtitles(audio_paths: list, output_srt: bool, output_dir: str, transcribe: callable): + subtitles_path = {} + + for path, audio_path in audio_paths.items(): + srt_path = output_dir if output_srt else tempfile.gettempdir() + srt_path = os.path.join(srt_path, f"{filename(path)}.srt") + + print( + f"Generating subtitles for {filename(path)}... This might take a while." + ) + + warnings.filterwarnings("ignore") + result = transcribe(audio_path) + warnings.filterwarnings("default") + + with open(srt_path, "w", encoding="utf-8") as srt: + write_srt(result["segments"], file=srt) + + subtitles_path[path] = srt_path + + return subtitles_path + + +if __name__ == '__main__': + main() diff --git a/build/lib/auto_subtitle_plus/utils.py b/build/lib/auto_subtitle_plus/utils.py new file mode 100644 index 0000000..ee5515b --- /dev/null +++ b/build/lib/auto_subtitle_plus/utils.py @@ -0,0 +1,46 @@ +import os +from typing import Iterator, TextIO + + +def str2bool(string): + string = string.lower() + str2val = {"true": True, "false": False} + + if string in str2val: + return str2val[string] + else: + raise ValueError( + f"Expected one of {set(str2val.keys())}, got {string}") + + +def format_timestamp(seconds: float, always_include_hours: bool = False): + assert seconds >= 0, "non-negative timestamp expected" + milliseconds = round(seconds * 1000.0) + + hours = milliseconds // 3_600_000 + milliseconds -= hours * 3_600_000 + + minutes = milliseconds // 60_000 + milliseconds -= minutes * 60_000 + + seconds = milliseconds // 1_000 + milliseconds -= seconds * 1_000 + + hours_marker = f"{hours}:" if always_include_hours or hours > 0 else "" + return f"{hours_marker}{minutes:02d}:{seconds:02d}.{milliseconds:03d}" + + +def write_srt(transcript: Iterator[dict], file: TextIO): + for i, segment in enumerate(transcript, start=1): + print( + f"{i}\n" + f"{format_timestamp(segment['start'], always_include_hours=True)} --> " + f"{format_timestamp(segment['end'], always_include_hours=True)}\n" + f"{segment['text'].strip().replace('-->', '->')}\n", + file=file, + flush=True, + ) + + +def filename(path): + return os.path.splitext(os.path.basename(path))[0] diff --git a/setup.py b/setup.py index ca2ed5b..ac6ff90 100644 --- a/setup.py +++ b/setup.py @@ -1,17 +1,18 @@ from setuptools import setup, find_packages setup( - version="1.0", - name="auto_subtitle", + version="0.1", + name="auto_subtitle_plus", packages=find_packages(), - py_modules=["auto_subtitle"], - author="Miguel Piedrafita", + py_modules=["auto_subtitle_plus"], + author="Sectux - based on the work of Miguel Piedrafita", install_requires=[ - 'openai-whisper', + 'youtube-dl', + 'openai-whisper @ git+https://github.com/openai/whisper.git@main#egg=whisper' ], description="Automatically generate and embed subtitles into your videos", entry_points={ - 'console_scripts': ['auto_subtitle=auto_subtitle.cli:main'], + 'console_scripts': ['auto_subtitle_plus=auto_subtitle_plus.cli:main'], }, include_package_data=True, ) From 97548f3adc92b54c7c068e19cf045e0fe8fbc077 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 15:30:09 +0100 Subject: [PATCH 07/25] update Readme --- auto_subtitle_plus/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auto_subtitle_plus/cli.py b/auto_subtitle_plus/cli.py index 65b2bc1..9d28627 100644 --- a/auto_subtitle_plus/cli.py +++ b/auto_subtitle_plus/cli.py @@ -15,7 +15,7 @@ def main(): parser.add_argument("--model", default="small", choices=whisper.available_models(), help="name of the Whisper model to use") parser.add_argument("--language", choices=["en", "es", "auto"], type=str, default=["auto"], - help="force language to the chosen one: en or es, else autodetect") + help="force language to the chosen one: en or es, default: auto") parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs") parser.add_argument("--output_srt", type=str2bool, default=False, From 2d11997dd8e7d25102b68d10333534b60af35a48 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 15:32:46 +0100 Subject: [PATCH 08/25] update Readme --- README.md | 74 +++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 67 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 1d21530..691686b 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,20 @@ -# Automatic subtitles in your videos +## Forked from https://github.com/m1guelpf/auto-subtitle + + Bigup to m1guelpf for releasing this tool + +## Why forking it + + Because it needed some fixes, installer, dependencies.... and I wanted also to make it more flexible + +## Advantages of this version (so far) + + - Can force subtitles to be generated in spanish + + - Updated dependencies + + + +## Automatic subtitles in your videos This repository uses `ffmpeg` and [OpenAI's Whisper](https://openai.com/blog/whisper) to automatically generate and overlay subtitles on any video. @@ -6,7 +22,7 @@ This repository uses `ffmpeg` and [OpenAI's Whisper](https://openai.com/blog/whi To get started, you'll need Python 3.7 or newer. Install the binary by running the following command: - pip install git+https://github.com/m1guelpf/auto-subtitle.git + pip install git+https://github.com/Sectumsempra82/auto-subtitle-plus.git You'll also need to install [`ffmpeg`](https://ffmpeg.org/), which is available from most package managers: @@ -21,23 +37,67 @@ brew install ffmpeg choco install ffmpeg ``` +## How to make it use your GPU for 3x faster generations + +Follow thsese instructions only if your gpu is powerful enough to be worth switching to torch-cuda + + - pip uninstall torch + + - pip cache purge + + - pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116 + +## Options + +--model - name of the Whisper model to use, the larger the better and slower - OPTIONS: `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large` + +--language - used to force the subtitle output language - OPTIONS: `en`, `es`, `auto` - DEFAULT: `auto` + +--output_dir - directory to save the outputs + +--output_srt - whether to output the .srt file along with the video files - OPTIONS: True, False - DEFAULT: False + +--srt_only - only generate the .srt file and not create overlayed video - OPTIONS: True, False - DEFAULT: `False` + +--verbose - whether to print out the progress and debug messages - OPTIONS: True, False - DEFAULT: `False` + +--task - whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate') - OPTIONS: `transcribe`, `translate` - DEFAULT: `transcribe` + + + ## Usage The following command will generate a `subtitled/video.mp4` file contained the input video with overlayed subtitles. - auto_subtitle /path/to/video.mp4 -o subtitled/ + auto_subtitle_plus.exe /path/to/video.mp4 -o subtitled/ + +---------------------- Recommended---------------- + +The following command will only generate an `.srt` file next to your video + + auto_subtitle.exe '..\The Big Bang Theory 16.avi' --model medium --output_srt True --srt_only True + +-------------------------------------------------- + +The default setting (which selects the `small` model) works well for transcribing English and Spanish to a certain extent. + +--------------- NEW ------------------------------------------------------ + +You can use the --language parameter to force english or spanish output + +-------------------------------------------------------------------------- -The default setting (which selects the `small` model) works well for transcribing English. You can optionally use a bigger model for better results (especially with other languages). The available models are `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large`. +You can optionally use a bigger model for better results (especially with other languages). The available models are `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large`. - auto_subtitle /path/to/video.mp4 --model medium + auto_subtitle_plus.exe /path/to/video.mp4 --model medium Adding `--task translate` will translate the subtitles into English: - auto_subtitle /path/to/video.mp4 --task translate + auto_subtitle_plus.exe /path/to/video.mp4 --task translate Run the following to view all available options: - auto_subtitle --help + auto_subtitle_plus.exe --help ## License From 45297554b7ac7512cef17bbd507762f6e6ebeb98 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 15:33:54 +0100 Subject: [PATCH 09/25] update readme bis --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 691686b..d9b7c47 100644 --- a/README.md +++ b/README.md @@ -49,19 +49,19 @@ Follow thsese instructions only if your gpu is powerful enough to be worth switc ## Options ---model - name of the Whisper model to use, the larger the better and slower - OPTIONS: `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large` +`--model` - name of the Whisper model to use, the larger the better and slower - OPTIONS: `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large` ---language - used to force the subtitle output language - OPTIONS: `en`, `es`, `auto` - DEFAULT: `auto` +`--language` - used to force the subtitle output language - OPTIONS: `en`, `es`, `auto` - DEFAULT: `auto` ---output_dir - directory to save the outputs +`--output_dir` - directory to save the outputs ---output_srt - whether to output the .srt file along with the video files - OPTIONS: True, False - DEFAULT: False +`--output_srt` - whether to output the .srt file along with the video files - OPTIONS: `True`, `False` - DEFAULT: `False` ---srt_only - only generate the .srt file and not create overlayed video - OPTIONS: True, False - DEFAULT: `False` +`--srt_only` - only generate the .srt file and not create overlayed video - OPTIONS: `True`, `False` - DEFAULT: `False` ---verbose - whether to print out the progress and debug messages - OPTIONS: True, False - DEFAULT: `False` +`--verbose` - whether to print out the progress and debug messages - OPTIONS: `True`, `False` - DEFAULT: `False` ---task - whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate') - OPTIONS: `transcribe`, `translate` - DEFAULT: `transcribe` +`--task` - whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate') - OPTIONS: `transcribe`, `translate` - DEFAULT: `transcribe` From 0b870d224a99db9c646f9f9ddca7fe8a37453167 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 15:40:53 +0100 Subject: [PATCH 10/25] update readme tris --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index d9b7c47..0d8a1f9 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,10 @@ Because it needed some fixes, installer, dependencies.... and I wanted also to make it more flexible + The first iteration of this tool consistently failed generating spanish subtitles in movies that start with english songs, + + even though all the movie is in spanish... this should ifx it by manually forcing the language in the parameters. + ## Advantages of this version (so far) - Can force subtitles to be generated in spanish From bb267efd211212da69bd809285cdf749161452c3 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 16:34:48 +0100 Subject: [PATCH 11/25] fix merge --- auto_subtitle_plus/cli.py | 120 ++++++++++++++++++++++++++---------- auto_subtitle_plus/utils.py | 13 +++- 2 files changed, 98 insertions(+), 35 deletions(-) diff --git a/auto_subtitle_plus/cli.py b/auto_subtitle_plus/cli.py index 9d28627..6f3c5a9 100644 --- a/auto_subtitle_plus/cli.py +++ b/auto_subtitle_plus/cli.py @@ -1,84 +1,136 @@ import os +import glob +import psutil import ffmpeg import whisper import argparse import warnings import tempfile -from .utils import filename, str2bool, write_srt +import subprocess +import multiprocessing +from torch.cuda import is_available +from .utils import get_filename, write_srt, is_audio, ffmpeg_extract_audio def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("video", nargs="+", type=str, - help="paths to video files to transcribe") + parser.add_argument("paths", nargs="+", type=str, + help="paths/wildcards to video files to transcribe") parser.add_argument("--model", default="small", choices=whisper.available_models(), help="name of the Whisper model to use") - parser.add_argument("--language", choices=["en", "es", "auto"], type=str, default=["auto"], - help="force language to the chosen one: en or es, default: auto") - parser.add_argument("--output_dir", "-o", type=str, + parser.add_argument("--output-dir", "-o", type=str, default=".", help="directory to save the outputs") - parser.add_argument("--output_srt", type=str2bool, default=False, - help="whether to output the .srt file along with the video files") - parser.add_argument("--srt_only", type=str2bool, default=False, - help="only generate the .srt file and not create overlayed video") - parser.add_argument("--verbose", type=str2bool, default=False, - help="whether to print out the progress and debug messages") + parser.add_argument("--output-srt", "-s", action='store_true', default=False, + help="output the .srt file in the output directory") + parser.add_argument("--output-audio", "-a", action='store_true', default=False, + help="output the audio extracted") + parser.add_argument("--output-video", "-v", action='store_true', default=False, + help="generate video with embedded subtitles") + parser.add_argument("--enhance-consistency", action='store_true', default=False, + help="use the previous output as input to the next window to improve consistency (may stuck in a failure loop)") + parser.add_argument("--extract-workers", type=int, default=max(1, psutil.cpu_count(logical=False) // 2), + help="number of workers to extract audio (only useful when there are multiple videos)") + parser.add_argument("--verbose", action='store_true', default=False, + help="print out the progress and debug messages") parser.add_argument("--task", type=str, default="transcribe", choices=[ "transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") + parser.add_argument("--language", type=str, default=None, + choices=sorted(whisper.tokenizer.LANGUAGES.keys()) + sorted([k.title() for k in whisper.tokenizer.TO_LANGUAGE_CODE.keys()]), + help="language spoken in the audio, specify None to perform language detection") + parser.add_argument("--device", default="cuda" if is_available() else "cpu", help="device to use for PyTorch inference") args = parser.parse_args().__dict__ model_name: str = args.pop("model") output_dir: str = args.pop("output_dir") output_srt: bool = args.pop("output_srt") - srt_only: bool = args.pop("srt_only") + output_video: bool = args.pop("output_video") + output_audio: bool = args.pop("output_audio") + device: str = args.pop("device") + extract_wokers: str = args.pop("extract_workers") + enhace_consistency: bool = args.pop("enhance_consistency") os.makedirs(output_dir, exist_ok=True) - if args["language"] == "auto" or args["language"] == "" or args["language"] == None or model_name.endswith(".en"): + # Default output_srt to True if output_video is False + if not output_video and not output_srt: + output_srt = True + + # Process wildcards + paths = [] + for path in args['paths']: + paths += list(glob.glob(path)) + n = len(paths) + if n == 0: + print('Video file not found.') + return + elif n > 1: + print('List of videos:') + for i, path in enumerate(paths): + print(f' {i+1}. {path}') + args.pop('paths') + + # Load models + if model_name.endswith(".en"): warnings.warn( "forcing English detection") args["language"] = "en" - model = whisper.load_model(model_name) - audios = get_audio(args.pop("video")) + model = whisper.load_model(model_name, device=device) + + # Extract audio from video. Skip if it is already an audio file + audios = get_audio(paths, output_audio, output_dir, extract_wokers) + + # Generate subtitles with whisper subtitles = get_subtitles( - audios, output_srt or srt_only, output_dir, lambda audio_path: model.transcribe(audio_path, **args) + audios, output_srt, output_dir, + lambda audio_path: model.transcribe(audio_path, condition_on_previous_text=enhace_consistency, **args) ) - if srt_only: + if not output_video: return for path, srt_path in subtitles.items(): - out_path = os.path.join(output_dir, f"{filename(path)}.mp4") - - print(f"Adding subtitles to {filename(path)}...") + # Skip audio files + if is_audio(path): + continue + + print(f"Adding subtitles to {path}...") + + out_path = os.path.join(output_dir, f"{get_filename(path)}.mp4") + if os.path.exists(out_path) and os.path.samefile(path, out_path): + out_path = os.path.join(output_dir, f"{get_filename(path)}-subtitled.mp4") + warnings.warn(f"{path} will overwrite the original file. Renaming the output file to {out_path}") video = ffmpeg.input(path) audio = video.audio ffmpeg.concat( video.filter('subtitles', srt_path, force_style="OutlineColour=&H40000000,BorderStyle=3"), audio, v=1, a=1 - ).output(out_path).run(quiet=True, overwrite_output=True) + ).output(out_path).run(quiet=False, overwrite_output=True) print(f"Saved subtitled video to {os.path.abspath(out_path)}.") -def get_audio(paths): +def get_audio(paths, output_audio, output_dir, num_workers=1): temp_dir = tempfile.gettempdir() - audio_paths = {} + func_args = [] for path in paths: - print(f"Extracting audio from {filename(path)}...") - output_path = os.path.join(temp_dir, f"{filename(path)}.wav") - - ffmpeg.input(path).output( - output_path, - acodec="pcm_s16le", ac=1, ar="16k" - ).run(quiet=True, overwrite_output=True) - + if is_audio(path): + # Skip audio files + output_path = path + else: + output_path = output_dir if output_audio else tempfile.gettempdir() + output_path = os.path.join(output_path, f"{get_filename(path)}.mp3") + func_args.append((path, output_path)) + audio_paths[path] = output_path + + # Execute on multiple processes + pool = multiprocessing.Pool(num_workers) + pool.starmap(ffmpeg_extract_audio, func_args) return audio_paths @@ -88,10 +140,10 @@ def get_subtitles(audio_paths: list, output_srt: bool, output_dir: str, transcri for path, audio_path in audio_paths.items(): srt_path = output_dir if output_srt else tempfile.gettempdir() - srt_path = os.path.join(srt_path, f"{filename(path)}.srt") + srt_path = os.path.join(srt_path, f"{get_filename(path)}.srt") print( - f"Generating subtitles for {filename(path)}... This might take a while." + f"Generating subtitles for {path}... This might take a while." ) warnings.filterwarnings("ignore") diff --git a/auto_subtitle_plus/utils.py b/auto_subtitle_plus/utils.py index ee5515b..a354ff2 100644 --- a/auto_subtitle_plus/utils.py +++ b/auto_subtitle_plus/utils.py @@ -1,4 +1,5 @@ import os +import subprocess from typing import Iterator, TextIO @@ -42,5 +43,15 @@ def write_srt(transcript: Iterator[dict], file: TextIO): ) -def filename(path): +def get_filename(path): return os.path.splitext(os.path.basename(path))[0] + + +def is_audio(path): + return True if path.endswith(('.mp3', '.wav', '.flac', '.m4a', '.wma', '.aac')) else False + + +def ffmpeg_extract_audio(input_path, output_path): + print(f"Extracting audio from {input_path}...") + if subprocess.run(('ffmpeg', '-y', '-i', input_path, '-ac', '1', '-async', '1', output_path), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode > 0: + raise Exception(f'Error occurred while extracting audio from {input_path}') From fa8b83361bbb99641b6da9042ae86b0f29f3aea7 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 16:41:54 +0100 Subject: [PATCH 12/25] update readme --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7815d99..92f49c7 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,7 @@ Convert all `mp4` videos in the current directory to `.srt` subtitles and store The following command will only generate an `.srt` file next to your video - auto_subtitle.exe '..\The Big Bang Theory 16.avi' --model medium --output_srt True --srt_only True + auto_subtitle_plus.exe 'video.avi' --model medium --output-srt -------------------------------------------------- @@ -97,13 +97,15 @@ The default setting (which selects the `small` model) works well for transcribin --------------- NEW ------------------------------------------------------ -You can use the --language parameter to force english or spanish output +You can use the --language parameter to force the output for the following languages: + +Afrikaans,Albanian,Amharic,Arabic,Armenian,Assamese,Azerbaijani,Bashkir,Basque,Belarusian,Bengali,Bosnian,Breton,Bulgarian,Burmese,Castilian,Catalan,Chinese,Croatian,Czech,Danish,Dutch,English,Estonian,Faroese,Finnish,Flemish,French,Galician,Georgian,German,Greek,Gujarati,Haitian,Haitian Creole,Hausa,Hawaiian,Hebrew,Hindi,Hungarian,Icelandic,Indonesian,Italian,Japanese,Javanese,Kannada,Kazakh,Khmer,Korean,Lao,Latin,Latvian,Letzeburgesch,Lingala,Lithuanian,Luxembourgish,Macedonian,Malagasy,Malay,Malayalam,Maltese,Maori,Marathi,Moldavian,Moldovan,Mongolian,Myanmar,Nepali,Norwegian,Nynorsk,Occitan,Panjabi,Pashto,Persian,Polish,Portuguese,Punjabi,Pushto,Romanian,Russian,Sanskrit,Serbian,Shona,Sindhi,Sinhala,Sinhalese,Slovak,Slovenian,Somali,Spanish,Sundanese,Swahili,Swedish,Tagalog,Tajik,Tamil,Tatar,Telugu,Thai,Tibetan,Turkish,Turkmen,Ukrainian,Urdu,Uzbek,Valencian,Vietnamese,Welsh,Yiddish,Yoruba -------------------------------------------------------------------------- You can optionally use a bigger model for better results (especially with other languages). The available models are `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large`. - auto_subtitle_plus.exe /path/to/video.mp4 --model medium + auto_subtitle_plus.exe /path/to/video.mp4 --model medium Adding `--task translate` will translate the subtitles into English: From 9dab1bd24e0bf7b2033258667f086454129771fe Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 16:43:17 +0100 Subject: [PATCH 13/25] fix readme --- README.md | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 92f49c7..d1cc7aa 100644 --- a/README.md +++ b/README.md @@ -58,21 +58,26 @@ Follow thsese instructions only if your gpu is powerful enough to be worth switc ## Options -`--model` - name of the Whisper model to use, the larger the better and slower - OPTIONS: `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large` - -`--language` - used to force the subtitle output language - OPTIONS: `en`, `es`, `auto` - DEFAULT: `auto` - -`--output_dir` - directory to save the outputs - -`--output_srt` - whether to output the .srt file along with the video files - OPTIONS: `True`, `False` - DEFAULT: `False` - -`--srt_only` - only generate the .srt file and not create overlayed video - OPTIONS: `True`, `False` - DEFAULT: `False` - -`--verbose` - whether to print out the progress and debug messages - OPTIONS: `True`, `False` - DEFAULT: `False` - -`--task` - whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate') - OPTIONS: `transcribe`, `translate` - DEFAULT: `transcribe` - -... and many more + -h, --help show this help message and exit + --model {tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large} + name of the Whisper model to use (default: small) + --output-dir OUTPUT_DIR, -o OUTPUT_DIR + directory to save the outputs (default: .) + --output-srt, -s output the .srt file in the output directory (default: False) + --output-audio, -a output the audio extracted (default: False) + --output-video, -v generate video with embedded subtitles (default: False) + --enhance-consistency + use the previous output as input to the next window to improve consistency (may stuck in a + failure loop) (default: False) + --extract-workers EXTRACT_WORKERS + number of workers to extract audio (only useful when there are multiple videos) (default: 3) + --verbose print out the progress and debug messages (default: False) + --task {transcribe,translate} + whether to perform X->X speech recognition ('transcribe') or X->English translation + ('translate') (default: transcribe) + --language {af,am,ar,as,az,ba,be,bg,bn,bo,br,bs,ca,cs,cy,da,de,el,en,es,et,eu,fa,fi,fo,fr,gl,gu,ha,haw,he,hi,hr,ht,hu,hy,id,is,it,ja,jw,ka,kk,km,kn,ko,la,lb,ln,lo,lt,lv,mg,mi,mk,ml,mn,mr,ms,mt,my,ne,nl,nn,no,oc,pa,pl,ps,pt,ro,ru,sa,sd,si,sk,sl,sn,so,sq,sr,su,sv,sw,ta,te,tg,th,tk,tl,tr,tt,uk,ur,uz,vi,yi,yo,zh,Afrikaans,Albanian,Amharic,Arabic,Armenian,Assamese,Azerbaijani,Bashkir,Basque,Belarusian,Bengali,Bosnian,Breton,Bulgarian,Burmese,Castilian,Catalan,Chinese,Croatian,Czech,Danish,Dutch,English,Estonian,Faroese,Finnish,Flemish,French,Galician,Georgian,German,Greek,Gujarati,Haitian,Haitian Creole,Hausa,Hawaiian,Hebrew,Hindi,Hungarian,Icelandic,Indonesian,Italian,Japanese,Javanese,Kannada,Kazakh,Khmer,Korean,Lao,Latin,Latvian,Letzeburgesch,Lingala,Lithuanian,Luxembourgish,Macedonian,Malagasy,Malay,Malayalam,Maltese,Maori,Marathi,Moldavian,Moldovan,Mongolian,Myanmar,Nepali,Norwegian,Nynorsk,Occitan,Panjabi,Pashto,Persian,Polish,Portuguese,Punjabi,Pushto,Romanian,Russian,Sanskrit,Serbian,Shona,Sindhi,Sinhala,Sinhalese,Slovak,Slovenian,Somali,Spanish,Sundanese,Swahili,Swedish,Tagalog,Tajik,Tamil,Tatar,Telugu,Thai,Tibetan,Turkish,Turkmen,Ukrainian,Urdu,Uzbek,Valencian,Vietnamese,Welsh,Yiddish,Yoruba} + language spoken in the audio, specify None to perform language detection (default: None) + --device DEVICE device to use for PyTorch inference (default: cuda) ## Usage From 81c8a834992152e8e9cf0c8098b32c14ed53ad46 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 16:44:25 +0100 Subject: [PATCH 14/25] fix readme bis --- README.md | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index d1cc7aa..0e20f0e 100644 --- a/README.md +++ b/README.md @@ -58,26 +58,26 @@ Follow thsese instructions only if your gpu is powerful enough to be worth switc ## Options - -h, --help show this help message and exit - --model {tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large} - name of the Whisper model to use (default: small) - --output-dir OUTPUT_DIR, -o OUTPUT_DIR - directory to save the outputs (default: .) - --output-srt, -s output the .srt file in the output directory (default: False) - --output-audio, -a output the audio extracted (default: False) - --output-video, -v generate video with embedded subtitles (default: False) - --enhance-consistency - use the previous output as input to the next window to improve consistency (may stuck in a - failure loop) (default: False) - --extract-workers EXTRACT_WORKERS - number of workers to extract audio (only useful when there are multiple videos) (default: 3) - --verbose print out the progress and debug messages (default: False) - --task {transcribe,translate} - whether to perform X->X speech recognition ('transcribe') or X->English translation - ('translate') (default: transcribe) - --language {af,am,ar,as,az,ba,be,bg,bn,bo,br,bs,ca,cs,cy,da,de,el,en,es,et,eu,fa,fi,fo,fr,gl,gu,ha,haw,he,hi,hr,ht,hu,hy,id,is,it,ja,jw,ka,kk,km,kn,ko,la,lb,ln,lo,lt,lv,mg,mi,mk,ml,mn,mr,ms,mt,my,ne,nl,nn,no,oc,pa,pl,ps,pt,ro,ru,sa,sd,si,sk,sl,sn,so,sq,sr,su,sv,sw,ta,te,tg,th,tk,tl,tr,tt,uk,ur,uz,vi,yi,yo,zh,Afrikaans,Albanian,Amharic,Arabic,Armenian,Assamese,Azerbaijani,Bashkir,Basque,Belarusian,Bengali,Bosnian,Breton,Bulgarian,Burmese,Castilian,Catalan,Chinese,Croatian,Czech,Danish,Dutch,English,Estonian,Faroese,Finnish,Flemish,French,Galician,Georgian,German,Greek,Gujarati,Haitian,Haitian Creole,Hausa,Hawaiian,Hebrew,Hindi,Hungarian,Icelandic,Indonesian,Italian,Japanese,Javanese,Kannada,Kazakh,Khmer,Korean,Lao,Latin,Latvian,Letzeburgesch,Lingala,Lithuanian,Luxembourgish,Macedonian,Malagasy,Malay,Malayalam,Maltese,Maori,Marathi,Moldavian,Moldovan,Mongolian,Myanmar,Nepali,Norwegian,Nynorsk,Occitan,Panjabi,Pashto,Persian,Polish,Portuguese,Punjabi,Pushto,Romanian,Russian,Sanskrit,Serbian,Shona,Sindhi,Sinhala,Sinhalese,Slovak,Slovenian,Somali,Spanish,Sundanese,Swahili,Swedish,Tagalog,Tajik,Tamil,Tatar,Telugu,Thai,Tibetan,Turkish,Turkmen,Ukrainian,Urdu,Uzbek,Valencian,Vietnamese,Welsh,Yiddish,Yoruba} - language spoken in the audio, specify None to perform language detection (default: None) - --device DEVICE device to use for PyTorch inference (default: cuda) + -h, --help show this help message and exit + --model {tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large} + name of the Whisper model to use (default: small) + --output-dir OUTPUT_DIR, -o OUTPUT_DIR + directory to save the outputs (default: .) + --output-srt, -s output the .srt file in the output directory (default: False) + --output-audio, -a output the audio extracted (default: False) + --output-video, -v generate video with embedded subtitles (default: False) + --enhance-consistency + use the previous output as input to the next window to improve consistency (may stuck in a + failure loop) (default: False) + --extract-workers EXTRACT_WORKERS + number of workers to extract audio (only useful when there are multiple videos) (default: 3) + --verbose print out the progress and debug messages (default: False) + --task {transcribe,translate} + whether to perform X->X speech recognition ('transcribe') or X->English translation + ('translate') (default: transcribe) + --language {af,am,ar,as,az,ba,be,bg,bn,bo,br,bs,ca,cs,cy,da,de,el,en,es,et,eu,fa,fi,fo,fr,gl,gu,ha,haw,he,hi,hr,ht,hu,hy,id,is,it,ja,jw,ka,kk,km,kn,ko,la,lb,ln,lo,lt,lv,mg,mi,mk,ml,mn,mr,ms,mt,my,ne,nl,nn,no,oc,pa,pl, ps,pt,ro,ru,sa,sd,si,sk,sl,sn,so,sq,sr,su,sv,sw,ta,te,tg,th,tk,tl,tr,tt,uk,ur,uz,vi,yi,yo,zh,Afrikaans,Albanian,Amharic,Arabic,Armenian,Assamese,Azerbaijani,Bashkir,Basque,Belarusian,Bengali,Bosnian,Breton,Bulgarian, Burmese,Castilian,Catalan,Chinese,Croatian,Czech,Danish,Dutch,English,Estonian,Faroese,Finnish,Flemish,French,Galician,Georgian,German,Greek,Gujarati,Haitian,Haitian Creole,Hausa,Hawaiian,Hebrew,Hindi,Hungarian, Icelandic,Indonesian,Italian,Japanese,Javanese,Kannada,Kazakh,Khmer,Korean,Lao,Latin,Latvian,Letzeburgesch,Lingala,Lithuanian,Luxembourgish,Macedonian,Malagasy,Malay,Malayalam,Maltese,Maori,Marathi,Moldavian,Moldovan, Mongolian,Myanmar,Nepali,Norwegian,Nynorsk,Occitan,Panjabi,Pashto,Persian,Polish,Portuguese,Punjabi,Pushto,Romanian,Russian,Sanskrit,Serbian,Shona,Sindhi,Sinhala,Sinhalese,Slovak,Slovenian,Somali,Spanish,Sundanese, Swahili,Swedish,Tagalog,Tajik,Tamil,Tatar,Telugu,Thai,Tibetan,Turkish,Turkmen,Ukrainian,Urdu,Uzbek,Valencian,Vietnamese,Welsh,Yiddish,Yoruba} + language spoken in the audio, specify None to perform language detection (default: None) + --device DEVICE device to use for PyTorch inference (default: cuda) ## Usage From a3621fa72903b6260ff1f4cbf3549661adfc1c4e Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 16:49:06 +0100 Subject: [PATCH 15/25] language list --- README.md | 111 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 110 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0e20f0e..2819493 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,116 @@ The default setting (which selects the `small` model) works well for transcribin You can use the --language parameter to force the output for the following languages: -Afrikaans,Albanian,Amharic,Arabic,Armenian,Assamese,Azerbaijani,Bashkir,Basque,Belarusian,Bengali,Bosnian,Breton,Bulgarian,Burmese,Castilian,Catalan,Chinese,Croatian,Czech,Danish,Dutch,English,Estonian,Faroese,Finnish,Flemish,French,Galician,Georgian,German,Greek,Gujarati,Haitian,Haitian Creole,Hausa,Hawaiian,Hebrew,Hindi,Hungarian,Icelandic,Indonesian,Italian,Japanese,Javanese,Kannada,Kazakh,Khmer,Korean,Lao,Latin,Latvian,Letzeburgesch,Lingala,Lithuanian,Luxembourgish,Macedonian,Malagasy,Malay,Malayalam,Maltese,Maori,Marathi,Moldavian,Moldovan,Mongolian,Myanmar,Nepali,Norwegian,Nynorsk,Occitan,Panjabi,Pashto,Persian,Polish,Portuguese,Punjabi,Pushto,Romanian,Russian,Sanskrit,Serbian,Shona,Sindhi,Sinhala,Sinhalese,Slovak,Slovenian,Somali,Spanish,Sundanese,Swahili,Swedish,Tagalog,Tajik,Tamil,Tatar,Telugu,Thai,Tibetan,Turkish,Turkmen,Ukrainian,Urdu,Uzbek,Valencian,Vietnamese,Welsh,Yiddish,Yoruba +Afrikaans + - Albanian + - Amharic + - Arabic + - Armenian + - Assamese + - Azerbaijani + - Bashkir + - Basque + - Belarusian + - Bengali + - Bosnian + - Breton + - Bulgarian + - Burmese + - Castilian + - Catalan + - Chinese + - Croatian + - Czech + - Danish + - Dutch + - English + - Estonian + - Faroese + - Finnish + - Flemish + - French + - Galician + - Georgian + - German + - Greek + - Gujarati + - Haitian + - Haitian Creole + - Hausa + - Hawaiian + - Hebrew + - Hindi + - Hungarian + - Icelandic + - Indonesian + - Italian + - Japanese + - Javanese + - Kannada + - Kazakh + - Khmer + - Korean + - Lao + - Latin + - Latvian + - Letzeburgesch + - Lingala + - Lithuanian + - Luxembourgish + - Macedonian + - Malagasy + - Malay + - Malayalam + - Maltese + - Maori + - Marathi + - Moldavian + - Moldovan + - Mongolian + - Myanmar + - Nepali + - Norwegian + - Nynorsk + - Occitan + - Panjabi + - Pashto + - Persian + - Polish + - Portuguese + - Punjabi + - Pushto + - Romanian + - Russian + - Sanskrit + - Serbian + - Shona + - Sindhi + - Sinhala + - Sinhalese + - Slovak + - Slovenian + - Somali + - Spanish + - Sundanese + - Swahili + - Swedish + - Tagalog + - Tajik + - Tamil + - Tatar + - Telugu + - Thai + - Tibetan + - Turkish + - Turkmen + - Ukrainian + - Urdu + - Uzbek + - Valencian + - Vietnamese + - Welsh + - Yiddish + - Yorubaa -------------------------------------------------------------------------- From dc9924cc1e0c1bcf5b66401825eb1d24485428be Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 16:53:22 +0100 Subject: [PATCH 16/25] add references --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 2819493..ae1af9d 100644 --- a/README.md +++ b/README.md @@ -215,6 +215,8 @@ Afrikaans - Yiddish - Yorubaa + +Further details on accuracy and models can be obtained here: https://github.com/openai/whisper#available-models-and-languages -------------------------------------------------------------------------- You can optionally use a bigger model for better results (especially with other languages). The available models are `tiny`, `tiny.en`, `base`, `base.en`, `small`, `small.en`, `medium`, `medium.en`, `large`. From 0f42c5fe9f735946c8b06b22612c6d06ec4ca02b Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 16:54:47 +0100 Subject: [PATCH 17/25] Update README.md --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index ae1af9d..9605300 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,11 @@ Follow thsese instructions only if your gpu is powerful enough to be worth switc - pip cache purge - pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116 + + Having a decent gpu can drammatically increase the performance + + ![image](https://user-images.githubusercontent.com/19196549/221421292-fc09b38e-c3aa-46e3-8684-e46c1e4cc691.png) + ## Options From a3233b2d59045172dcd800030e54385547a6460b Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 26 Feb 2023 16:57:26 +0100 Subject: [PATCH 18/25] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9605300..f160949 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ Follow thsese instructions only if your gpu is powerful enough to be worth switc Having a decent gpu can drammatically increase the performance - ![image](https://user-images.githubusercontent.com/19196549/221421292-fc09b38e-c3aa-46e3-8684-e46c1e4cc691.png) +![image](https://user-images.githubusercontent.com/19196549/221421292-fc09b38e-c3aa-46e3-8684-e46c1e4cc691.png) ## Options From a79312eb76270eb6b07b2ea81895e5f9bd829883 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Mon, 30 Oct 2023 01:39:56 +0100 Subject: [PATCH 19/25] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f160949..d796fd6 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ To get started, you'll need Python 3.7 or newer. Install the binary by running the following command: ```bash - pip install git+https://github.com/Sectumsempra82/auto-subtitle-plus.gi + pip install git+https://github.com/Sectumsempra82/auto-subtitle-plus.git ``` You'll also need to install [`ffmpeg`](https://ffmpeg.org/), which is available from most package managers: From 7f27cd467067b03a2e7b24d37f15b56f4d0dbb0f Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Mon, 30 Oct 2023 02:12:49 +0100 Subject: [PATCH 20/25] Update README.md --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index d796fd6..dab3806 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,10 @@ Follow thsese instructions only if your gpu is powerful enough to be worth switc - pip cache purge - pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116 + + or for python 3.11 + + - pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 Having a decent gpu can drammatically increase the performance From 0e72ade8d4b5a76f0283b7f166bb20d76a2058cb Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 5 Nov 2023 00:37:43 +0100 Subject: [PATCH 21/25] Update README.md --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index dab3806..399eaa7 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,13 @@ To get started, you'll need Python 3.7 or newer. Install the binary by running t ``` You'll also need to install [`ffmpeg`](https://ffmpeg.org/), which is available from most package managers: +After careful testing, it is preferred to install ffpmpeg-python instead of the regular one in order to be able to use the -v parameter to embed subtitles in the video + ```bash + +pip install ffmpeg-python + +``` +if you don't need to use the -v option just go for the regular choco/brew/apt installers ```bash # on Ubuntu or Debian From 274f92f83d54cbd1f9c0009edefbc6e57d2b5289 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 5 Nov 2023 00:38:17 +0100 Subject: [PATCH 22/25] Update README.md --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 399eaa7..454319e 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,9 @@ To get started, you'll need Python 3.7 or newer. Install the binary by running t ``` You'll also need to install [`ffmpeg`](https://ffmpeg.org/), which is available from most package managers: -After careful testing, it is preferred to install ffpmpeg-python instead of the regular one in order to be able to use the -v parameter to embed subtitles in the video + +#After careful testing, it is preferred to install ffpmpeg-python instead of the regular one in order to be able to use the -v parameter to embed subtitles in the video + ```bash pip install ffmpeg-python From 97e6d7e73e4e5c4b5b30b0c8965c46c0f8c3e2a6 Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Sun, 5 Nov 2023 00:58:00 +0100 Subject: [PATCH 23/25] Update README.md --- README.md | 9 --------- 1 file changed, 9 deletions(-) diff --git a/README.md b/README.md index 454319e..dab3806 100644 --- a/README.md +++ b/README.md @@ -35,15 +35,6 @@ To get started, you'll need Python 3.7 or newer. Install the binary by running t You'll also need to install [`ffmpeg`](https://ffmpeg.org/), which is available from most package managers: -#After careful testing, it is preferred to install ffpmpeg-python instead of the regular one in order to be able to use the -v parameter to embed subtitles in the video - - ```bash - -pip install ffmpeg-python - -``` -if you don't need to use the -v option just go for the regular choco/brew/apt installers - ```bash # on Ubuntu or Debian sudo apt update && sudo apt install ffmpeg From e6481dff4ae2e34c8f18867609e8df3350d2209e Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Wed, 16 Oct 2024 02:41:25 +0200 Subject: [PATCH 24/25] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ce7855f..15797fa 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ install_requires=[ 'youtube-dl', 'psutil', - 'openai-whisper @ git+https://github.com/openai/whisper.git@main#egg=whisper' + 'openai-whisper' ], description="Automatically generate and/or embed subtitles into your videos", entry_points={ From 8efd0b784270e1db6c34d3aa2f2bcf254b41a40b Mon Sep 17 00:00:00 2001 From: Sectumsempra82 Date: Thu, 17 Oct 2024 18:43:23 +0200 Subject: [PATCH 25/25] Update README.md --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index dab3806..bca019c 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ ## Installation -To get started, you'll need Python 3.7 or newer. Install the binary by running the following command: +To get started, you'll need Python >= 3.7 && <= 3.11.9. Install the binary by running the following command: ```bash pip install git+https://github.com/Sectumsempra82/auto-subtitle-plus.git @@ -44,6 +44,10 @@ brew install ffmpeg # on Windows using Chocolatey (https://chocolatey.org/) choco install ffmpeg + +#you might also need python-ffmpeg + +pip3 install python-ffmpeg ``` ## How to make it use your GPU for 3x faster generations