mirror of
https://github.com/vondas-network/videobeaux.git
synced 2025-12-17 05:10:02 +01:00
refactored programs and arg structure
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -14,7 +14,6 @@ dist/
|
|||||||
downloads/
|
downloads/
|
||||||
eggs/
|
eggs/
|
||||||
.eggs/
|
.eggs/
|
||||||
env/
|
|
||||||
lib/
|
lib/
|
||||||
lib64/
|
lib64/
|
||||||
parts/
|
parts/
|
||||||
@@ -161,3 +160,5 @@ cython_debug/
|
|||||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
#.idea/
|
#.idea/
|
||||||
|
|
||||||
|
vosk-models/
|
||||||
91
config.yaml
91
config.yaml
@@ -1,88 +1,103 @@
|
|||||||
#V1
|
define: &input_dir 'inputs'
|
||||||
|
define: &output_dir 'outputs'
|
||||||
|
|
||||||
|
proj_mgmt:
|
||||||
|
input_dir: *input_dir
|
||||||
|
output_dir: *output_dir
|
||||||
|
stt_model: 'vosk-models\vosk-model-en-us-0.42-gigaspeech'
|
||||||
|
default_video_file_ext: '.mp4'
|
||||||
|
default_audio_file_ext: '.wav'
|
||||||
|
|
||||||
|
download_yt:
|
||||||
|
yt_url: 'https://www.youtube.com/watch?v=jNQXAC9IVRw'
|
||||||
|
output_file: 'outputs\oliphant'
|
||||||
|
format: 'mp4'
|
||||||
|
|
||||||
|
transcraibe:
|
||||||
|
input_file: 'inputs\input.mp4'
|
||||||
|
stt_model: 'vosk-models\vosk-model-en-us-0.42-gigaspeech'
|
||||||
|
|
||||||
resize:
|
resize:
|
||||||
input_file: 'input.mp4'
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "output_resized_again.mp4"
|
output_file: 'outputs\resize.mp4'
|
||||||
width: 1200
|
width: 1200
|
||||||
height: 200
|
height: 200
|
||||||
|
|
||||||
convert:
|
convert:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "output.avi"
|
output_file: 'outputs\output.mkv'
|
||||||
format: "avi"
|
format: "avi"
|
||||||
|
|
||||||
extract_frames:
|
extract_frames:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
output_folder: "frames"
|
output_folder: 'outputs\extract_frames.mp4'
|
||||||
frame_rate: 24
|
frame_rate: 24
|
||||||
|
|
||||||
silence_x:
|
silence_x:
|
||||||
min_d: 0
|
min_d: 0
|
||||||
max_d: 1000
|
max_d: 1000
|
||||||
adj: 0
|
adj: 0
|
||||||
input_file: ''
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "silencex"
|
output_file: 'outputs\silence.mp4'
|
||||||
|
|
||||||
# V2
|
|
||||||
extract_sound:
|
extract_sound:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "output.wav"
|
output_file: 'outputs\extract_sound.wav'
|
||||||
|
|
||||||
reverse:
|
reverse:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\reverse.mp4'
|
||||||
|
|
||||||
nostalgic_stutter:
|
nostalgic_stutter:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\nostalgic.mp4'
|
||||||
|
|
||||||
stutter_pro:
|
stutter_pro:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
stutter: "31.4"
|
stutter: "31.4"
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\stutter_pro.mp4'
|
||||||
|
|
||||||
stack_2x:
|
stack_2x:
|
||||||
input_file1: "input.mp4"
|
input_file1: 'inputs\input.mp4'
|
||||||
input_file2: "input.mp4"
|
input_file2: "input.mp4"
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\stack_2x.mp4'
|
||||||
|
|
||||||
lsd_feedback:
|
lsd_feedback:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
frame_weights: 1 1 -3 2 1 1 -3 1
|
frame_weights: 1 1 -3 2 1 1 -3 1
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\lsd_feedback.mp4'
|
||||||
|
|
||||||
frame_delay_pro1:
|
frame_delay_pro1:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
num_of_frames: "8"
|
num_of_frames: "8"
|
||||||
frame_weights: 2 1 -3 1
|
frame_weights: 2 1 -3 1
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\frame_delay_pro1.mp4'
|
||||||
|
|
||||||
frame_delay_pro2:
|
frame_delay_pro2:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
decay: "0.97"
|
decay: "0.97"
|
||||||
planes: "1"
|
planes: "1"
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\frame_delay_pro2.mp4'
|
||||||
|
|
||||||
mirror_delay:
|
mirror_delay:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\mirror_delay.mp4'
|
||||||
|
|
||||||
blur_pix:
|
blur_pix:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\blur_pix.mp4'
|
||||||
|
|
||||||
scrolling:
|
scrolling:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\scrolling.mp4'
|
||||||
|
|
||||||
scrolling_pro:
|
scrolling_pro:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
horizontal: "0.003"
|
horizontal: "0.003"
|
||||||
vertical: "0.05"
|
vertical: "0.05"
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\scrolling_pro.mp4'
|
||||||
|
|
||||||
overexposed_stutter:
|
overexposed_stutter:
|
||||||
input_file: "input.mp4"
|
input_file: 'inputs\input.mp4'
|
||||||
output_file: "output.mp4"
|
output_file: 'outputs\overexposed_stutter.mp4'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
BIN
output.mp4
BIN
output.mp4
Binary file not shown.
BIN
outputs/extract_sound.wav
Normal file
BIN
outputs/extract_sound.wav
Normal file
Binary file not shown.
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def blur_pix_video(input_file, output_file):
|
def blur_pix(input_file, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def convert_video(input_file, output_file, format):
|
def convert(input_file, output_file, format):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-i", input_file,
|
"-i", input_file,
|
||||||
|
|||||||
23
programs/download_yt.py
Normal file
23
programs/download_yt.py
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
import yt_dlp
|
||||||
|
# yt_dlp is dope, but the docs for the python module suck.
|
||||||
|
# they say "oh, just read the official yt-dlp docs and it should just work"
|
||||||
|
# but, while the logic is the same, the param format is not exact
|
||||||
|
# for example - to include a subtitle file in the download via the yt-dlp cli tool, yt-dlp docs say to use --write-auto-subs
|
||||||
|
# but, the same option here is writeautomaticsub for no discernable reason
|
||||||
|
# dm me if you know why
|
||||||
|
# in the meantime, you need to check the source code AND the docs to use certain options
|
||||||
|
|
||||||
|
# https://pypi.org/project/yt-dlp/
|
||||||
|
# https://github.com/yt-dlp/yt-dlp/blob/master/yt_dlp/YoutubeDL.py#L181
|
||||||
|
|
||||||
|
def download_yt(yt_url, output_file, format):
|
||||||
|
ydl_opts = {
|
||||||
|
'format': format,
|
||||||
|
'outtmpl': f"{output_file}.{format}", #https://github.com/yt-dlp/yt-dlp?tab=readme-ov-file#output-template
|
||||||
|
"writeautomaticsub":True # will include a vtt file. note: this won't work for some videobeaux functions that require an srt from vosk
|
||||||
|
}
|
||||||
|
|
||||||
|
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||||
|
ydl.download([yt_url])
|
||||||
|
|
||||||
|
return True
|
||||||
18
programs/extract_sound.py
Normal file
18
programs/extract_sound.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
from utils import load_config
|
||||||
|
|
||||||
|
config = load_config.load_config()
|
||||||
|
a_ext = config['proj_mgmt']['default_audio_file_ext']
|
||||||
|
|
||||||
|
def extract_sound(input_file, output_file):
|
||||||
|
command = [
|
||||||
|
"ffmpeg",
|
||||||
|
"-i", input_file,
|
||||||
|
"-vn",
|
||||||
|
"-acodec", "pcm_s16le",
|
||||||
|
"-ar", "44100",
|
||||||
|
"-ac", "2",
|
||||||
|
output_file
|
||||||
|
]
|
||||||
|
run_ffmpeg_command(command)
|
||||||
|
print(f"Audio extracted from video using sound and file is {output_file}")
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def frame_delay_pro1_video(input_file, num_of_frames, frame_weights, output_file):
|
def frame_delay_pro1(input_file, num_of_frames, frame_weights, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
@@ -16,4 +16,3 @@ def frame_delay_pro1_video(input_file, num_of_frames, frame_weights, output_file
|
|||||||
]
|
]
|
||||||
run_ffmpeg_command(command)
|
run_ffmpeg_command(command)
|
||||||
print(f"Video processed with frame_delay_pro1 and file is {output_file}")
|
print(f"Video processed with frame_delay_pro1 and file is {output_file}")
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def frame_delay_pro2_video(input_file, decay, planes, output_file):
|
def frame_delay_pro2(input_file, decay, planes, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
@@ -11,4 +11,3 @@ def frame_delay_pro2_video(input_file, decay, planes, output_file):
|
|||||||
|
|
||||||
run_ffmpeg_command(command)
|
run_ffmpeg_command(command)
|
||||||
print(f"Video processed with frame_delay_pro2 and file is {output_file}")
|
print(f"Video processed with frame_delay_pro2 and file is {output_file}")
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def lsd_feedback_video(input_file, output_file):
|
def lsd_feedback(input_file, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
@@ -16,4 +16,3 @@ def lsd_feedback_video(input_file, output_file):
|
|||||||
]
|
]
|
||||||
run_ffmpeg_command(command)
|
run_ffmpeg_command(command)
|
||||||
print(f"Video processed with LSD and the file is {output_file} from {input_file}")
|
print(f"Video processed with LSD and the file is {output_file} from {input_file}")
|
||||||
|
|
||||||
|
|||||||
14
programs/mirror-delay.py
Normal file
14
programs/mirror-delay.py
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
|
def mirror_delay(input_file, output_file):
|
||||||
|
command = [
|
||||||
|
"ffmpeg",
|
||||||
|
"-y",
|
||||||
|
"-i", input_file,
|
||||||
|
"-filter_complex", "[0:v]copy[1];[0:v]tmix=frames=8:weights=1 1 -3 2 1 1 -3 1[3];[1]hflip[2];[0:v][2][3]mix=inputs=3[out_v]",
|
||||||
|
"-map", "[out_v]",
|
||||||
|
"-map", "0:a",
|
||||||
|
output_file
|
||||||
|
]
|
||||||
|
run_ffmpeg_command(command)
|
||||||
|
print(f"Video processed with mirror_delay and file is {output_file}")
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def mirror_delay_video(input_file1, output_file):
|
def mirror_delay(input_file1, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def nostalgic_stutter_video(input_file, output_file):
|
def nostalgic_stutter(input_file, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def overexposed_stutter_video(input_file, output_file):
|
def overexposed_stutter(input_file, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def resize_video(input_file, output_file, width, height):
|
def resize(input_file, output_file, width, height):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-i", input_file,
|
"-i", input_file,
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def reverse_video(input_file, output_file):
|
def reverse(input_file, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def scrolling_pro_video(input_file, horizontal, vertical, output_file):
|
def scrolling_pro(input_file, horizontal, vertical, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
|
|||||||
39
programs/silence_xtraction.py
Normal file
39
programs/silence_xtraction.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
import sys
|
||||||
|
from videogrep import parse_transcript, create_supercut
|
||||||
|
|
||||||
|
|
||||||
|
def silence_extraction(min_d, max_d, adj, input_file, output_file):
|
||||||
|
print('DID WE MAKE IT^&(#(*&#(*&#(*&W#(*#&(*#&(*#&(*#&#*(&#))')
|
||||||
|
print(input_file)
|
||||||
|
# the min and max duration of silences to extract
|
||||||
|
min_duration = min_d #0.1
|
||||||
|
max_duration = max_d #1000.0
|
||||||
|
|
||||||
|
# value to to trim off the end of each clip
|
||||||
|
adjuster = adj #0.0
|
||||||
|
|
||||||
|
filenames = input_file
|
||||||
|
|
||||||
|
silences = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
for filename in filenames:
|
||||||
|
timestamps = parse_transcript(filename)
|
||||||
|
|
||||||
|
words = []
|
||||||
|
print(timestamps)
|
||||||
|
for sentence in timestamps:
|
||||||
|
words += sentence['words']
|
||||||
|
|
||||||
|
for word1, word2 in zip(words[:-2], words[1:]):
|
||||||
|
start = word1['end']
|
||||||
|
end = word2['start'] - adjuster
|
||||||
|
duration = end - start
|
||||||
|
if duration > min_duration and duration < max_duration:
|
||||||
|
silences.append({'start': start, 'end': end, 'file': filename})
|
||||||
|
|
||||||
|
create_supercut(silences, f"{output_file}.mp4")
|
||||||
|
return "ok"
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
return e
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def stack_2x_video(input_file1, input_file2, output_file):
|
def stack_2x(input_file1, input_file2, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from utils.ffmpeg_operations import run_ffmpeg_command
|
from utils.ffmpeg_operations import run_ffmpeg_command
|
||||||
|
|
||||||
def stutter_pro_video(input_file, stutter, output_file):
|
def stutter_pro(input_file, stutter, output_file):
|
||||||
command = [
|
command = [
|
||||||
"ffmpeg",
|
"ffmpeg",
|
||||||
"-y",
|
"-y",
|
||||||
|
|||||||
118
programs/transcraibe.py
Normal file
118
programs/transcraibe.py
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
import imageio_ffmpeg
|
||||||
|
from glob import glob
|
||||||
|
from subprocess import run
|
||||||
|
from vosk import Model, KaldiRecognizer, SetLogLevel
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
config_file = Path(__file__).parent.parent / "config.yaml"
|
||||||
|
def load_config():
|
||||||
|
with open(config_file, 'r') as file:
|
||||||
|
return yaml.safe_load(file)
|
||||||
|
config = load_config()
|
||||||
|
proj_mgmt_config = config['proj_mgmt']
|
||||||
|
|
||||||
|
# this piece is pulled almost verbatim from videogrep
|
||||||
|
# maybe we could have just used subprocess and run videogrep --transcribe since it is already a dependancy
|
||||||
|
# but it is here for archival purposes
|
||||||
|
# big up to Sam Levigne aka antiboredom
|
||||||
|
# https://github.com/antiboredom/videogrep
|
||||||
|
def vosk_stt(input_file, stt_model):
|
||||||
|
if stt_model is None:
|
||||||
|
stt_model = proj_mgmt_config['stt_model']
|
||||||
|
MAX_CHARS = 36
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
transcript_file = os.path.splitext(input_file)[0] + ".json"
|
||||||
|
|
||||||
|
if os.path.exists(transcript_file):
|
||||||
|
with open(transcript_file, "r") as infile:
|
||||||
|
data = json.load(infile)
|
||||||
|
return data
|
||||||
|
|
||||||
|
if not os.path.exists(input_file):
|
||||||
|
print("Could not find file", input_file)
|
||||||
|
return []
|
||||||
|
|
||||||
|
_model_path: str = 'defaultmodel'
|
||||||
|
|
||||||
|
if stt_model is not None:
|
||||||
|
_model_path = stt_model
|
||||||
|
|
||||||
|
if not os.path.exists(_model_path):
|
||||||
|
print("Could not find model folder")
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
print("Transcribing", input_file)
|
||||||
|
SetLogLevel(-1)
|
||||||
|
|
||||||
|
sample_rate = 16000
|
||||||
|
model = Model(_model_path)
|
||||||
|
rec = KaldiRecognizer(model, sample_rate)
|
||||||
|
rec.SetWords(True)
|
||||||
|
|
||||||
|
process = subprocess.Popen(
|
||||||
|
[
|
||||||
|
imageio_ffmpeg.get_ffmpeg_exe(),
|
||||||
|
"-nostdin",
|
||||||
|
"-loglevel",
|
||||||
|
"quiet",
|
||||||
|
"-i",
|
||||||
|
input_file,
|
||||||
|
"-ar",
|
||||||
|
str(sample_rate),
|
||||||
|
"-ac",
|
||||||
|
"1",
|
||||||
|
"-f",
|
||||||
|
"s16le",
|
||||||
|
"-",
|
||||||
|
],
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
)
|
||||||
|
|
||||||
|
tot_samples = 0
|
||||||
|
result = []
|
||||||
|
while True:
|
||||||
|
data = process.stdout.read(4000)
|
||||||
|
if len(data) == 0:
|
||||||
|
break
|
||||||
|
if rec.AcceptWaveform(data):
|
||||||
|
tot_samples += len(data)
|
||||||
|
result.append(json.loads(rec.Result()))
|
||||||
|
result.append(json.loads(rec.FinalResult()))
|
||||||
|
|
||||||
|
out = []
|
||||||
|
for r in result:
|
||||||
|
if "result" not in r:
|
||||||
|
continue
|
||||||
|
words = [w for w in r["result"]]
|
||||||
|
item = {"content": "", "start": None, "end": None, "words": []}
|
||||||
|
for w in words:
|
||||||
|
item["content"] += w["word"] + " "
|
||||||
|
item["words"].append(w)
|
||||||
|
if len(item["content"]) > MAX_CHARS or w == words[-1]:
|
||||||
|
item["content"] = item["content"].strip()
|
||||||
|
item["start"] = item["words"][0]["start"]
|
||||||
|
item["end"] = item["words"][-1]["end"]
|
||||||
|
out.append(item)
|
||||||
|
item = {"content": "", "start": None, "end": None, "words": []}
|
||||||
|
|
||||||
|
if len(out) == 0:
|
||||||
|
print("No words found in", i)
|
||||||
|
return []
|
||||||
|
|
||||||
|
with open(transcript_file, "w", encoding="utf-8") as outfile:
|
||||||
|
json.dump(out, outfile)
|
||||||
|
|
||||||
|
end_time = time.time()
|
||||||
|
execution_time = end_time - start_time
|
||||||
|
print(f"Transcription took: {execution_time} seconds")
|
||||||
|
|
||||||
|
#return out
|
||||||
|
return []
|
||||||
@@ -1,57 +1,28 @@
|
|||||||
annotated-types==0.6.0
|
|
||||||
beautifulsoup4==4.12.3
|
beautifulsoup4==4.12.3
|
||||||
blis==0.7.11
|
certifi==2024.6.2
|
||||||
Brotli==1.1.0
|
|
||||||
catalogue==2.0.10
|
|
||||||
certifi==2024.2.2
|
|
||||||
cffi==1.16.0
|
cffi==1.16.0
|
||||||
charset-normalizer==3.3.2
|
charset-normalizer==3.3.2
|
||||||
click==8.1.7
|
|
||||||
cloudpathlib==0.16.0
|
|
||||||
colorama==0.4.6
|
colorama==0.4.6
|
||||||
confection==0.1.4
|
|
||||||
cupy-cuda11x==12.3.0
|
|
||||||
cymem==2.0.8
|
|
||||||
decorator==4.4.2
|
decorator==4.4.2
|
||||||
en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl#sha256=86cc141f63942d4b2c5fcee06630fd6f904788d2f0ab005cce45aadb8fb73889
|
distlib==0.3.7
|
||||||
fastrlock==0.8.2
|
ffmpeg-python==0.2.0
|
||||||
|
filelock==3.13.1
|
||||||
|
future==1.0.0
|
||||||
idna==3.7
|
idna==3.7
|
||||||
imageio==2.34.1
|
imageio==2.34.1
|
||||||
imageio-ffmpeg==0.4.9
|
imageio-ffmpeg==0.5.1
|
||||||
Jinja2==3.1.4
|
|
||||||
langcodes==3.4.0
|
|
||||||
language_data==1.2.0
|
|
||||||
marisa-trie==1.1.1
|
|
||||||
MarkupSafe==2.1.5
|
|
||||||
moviepy==1.0.3
|
moviepy==1.0.3
|
||||||
murmurhash==1.0.10
|
numpy==2.0.0
|
||||||
mutagen==1.47.0
|
|
||||||
numpy==1.26.4
|
|
||||||
packaging==24.0
|
|
||||||
pillow==10.3.0
|
pillow==10.3.0
|
||||||
preshed==3.0.9
|
platformdirs==4.1.0
|
||||||
proglog==0.1.10
|
proglog==0.1.10
|
||||||
pycparser==2.22
|
pycparser==2.22
|
||||||
pycryptodomex==3.20.0
|
requests==2.32.3
|
||||||
pydantic==2.7.1
|
|
||||||
pydantic_core==2.18.2
|
|
||||||
pydub==0.25.1
|
|
||||||
requests==2.31.0
|
|
||||||
smart-open==6.4.0
|
|
||||||
soupsieve==2.5
|
soupsieve==2.5
|
||||||
spacy==3.7.4
|
|
||||||
spacy-legacy==3.0.12
|
|
||||||
spacy-loggers==1.0.5
|
|
||||||
srsly==2.4.8
|
|
||||||
srt==3.5.3
|
srt==3.5.3
|
||||||
thinc==8.2.3
|
|
||||||
tqdm==4.66.4
|
tqdm==4.66.4
|
||||||
typer==0.9.4
|
urllib3==2.2.2
|
||||||
typing_extensions==4.11.0
|
|
||||||
urllib3==2.2.1
|
|
||||||
videogrep==2.3.0
|
videogrep==2.3.0
|
||||||
|
virtualenv==20.25.0
|
||||||
vosk==0.3.45
|
vosk==0.3.45
|
||||||
wasabi==1.1.2
|
|
||||||
weasel==0.3.4
|
|
||||||
websockets==12.0
|
websockets==12.0
|
||||||
yt-dlp==2024.4.9
|
|
||||||
|
|||||||
8
utils/load_config.py
Normal file
8
utils/load_config.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
import yaml
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
config_file = Path(__file__).parent.parent / "config.yaml"
|
||||||
|
|
||||||
|
def load_config():
|
||||||
|
with open(config_file, 'r') as file:
|
||||||
|
return yaml.safe_load(file)
|
||||||
537
videobeaux.py
537
videobeaux.py
@@ -1,25 +1,97 @@
|
|||||||
import typer
|
import typer
|
||||||
import yaml
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from programs import silence_extraction, resize, convert, extract_frames, sound, reverse, stack_2x, lsd_feedback, frame_delay_pro1, frame_delay_pro2, mirror_delay, blur_pix, overexposed_stutter, scrolling, scrolling_pro, nostalgic_stutter, stutter_pro
|
from programs import (
|
||||||
|
silence_xtraction,
|
||||||
|
resize,
|
||||||
|
convert,
|
||||||
|
extract_frames,
|
||||||
|
download_yt,
|
||||||
|
transcraibe,
|
||||||
|
extract_sound,
|
||||||
|
reverse,
|
||||||
|
stack_2x,
|
||||||
|
lsd_feedback,
|
||||||
|
nostalgic_stutter,
|
||||||
|
frame_delay_pro1,
|
||||||
|
frame_delay_pro2,
|
||||||
|
mirror_delay,
|
||||||
|
overexposed_stutter,
|
||||||
|
stutter_pro,
|
||||||
|
scrolling,
|
||||||
|
scrolling_pro,
|
||||||
|
blur_pix)
|
||||||
|
|
||||||
config_file = Path(__file__).parent / "config.yaml"
|
from utils import load_config
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
def load_config():
|
from pyfiglet import Figlet
|
||||||
with open(config_file, 'r') as file:
|
a = Figlet(font='ogre')
|
||||||
return yaml.safe_load(file)
|
print(a.renderText("videobeaux"))
|
||||||
|
print("Your friendly multilateral video toolkit built for artists by artists.")
|
||||||
|
print("It's your best friend.")
|
||||||
|
print('-' * 50)
|
||||||
|
|
||||||
config = load_config()
|
config = load_config.load_config()
|
||||||
|
print(config)
|
||||||
|
|
||||||
|
proj_mgmt_config = config['proj_mgmt']
|
||||||
|
v_ext = proj_mgmt_config['default_video_file_ext']
|
||||||
|
a_ext = proj_mgmt_config['default_audio_file_ext']
|
||||||
|
|
||||||
|
now = datetime.now()
|
||||||
|
ct = now.strftime("%Y-%m-%d_%H-%M-%S")
|
||||||
|
|
||||||
app = typer.Typer()
|
app = typer.Typer()
|
||||||
|
|
||||||
# V1
|
|
||||||
@app.command()
|
##########
|
||||||
def silence_xtraction(
|
# yt-dlp
|
||||||
min_d: int = typer.Option(None, help="Width of the output video"),
|
##########
|
||||||
max_d: int = typer.Option(None, help="Height of the output video"),
|
@app.command('download-yt')
|
||||||
adj: int = typer.Option(None, help="Height of the output video"),
|
def yt_dlp_vb(
|
||||||
|
yt_url: str = typer.Argument(None, help="URL of the YT video"),
|
||||||
|
output_file: str = typer.Argument(None, help="Width of the output video"),
|
||||||
|
format: str = typer.Argument(v_ext, help="Width of the output video"),
|
||||||
|
):
|
||||||
|
params = {
|
||||||
|
"yt_url": yt_url,
|
||||||
|
"output_file": output_file,
|
||||||
|
"format": format
|
||||||
|
}
|
||||||
|
defaults = config['download_yt']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
download_yt.download_yt(**params)
|
||||||
|
|
||||||
|
|
||||||
|
###############
|
||||||
|
# transcraibe
|
||||||
|
###############
|
||||||
|
@app.command('transcraibe')
|
||||||
|
def transcraibe_vb(
|
||||||
|
input_file: str = typer.Argument(None, help='Video file you would like to transcribe.'),
|
||||||
|
stt_model: str = typer.Argument(None, help="URL of the YT video")
|
||||||
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"stt_model": stt_model
|
||||||
|
}
|
||||||
|
defaults = config['transcraibe']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
transcraibe.vosk_stt(**params)
|
||||||
|
|
||||||
|
|
||||||
|
#####################
|
||||||
|
# silence-xtraction
|
||||||
|
#####################
|
||||||
|
@app.command('silence-xtraction', help="Stitches togehter video chunks that have no discernable words." +
|
||||||
|
"This does NOT use audio analysis, but instead identifes the presence of a 'word' using the .srt transcription file")
|
||||||
|
def silence_xtraction_vb(
|
||||||
|
min_d: int = typer.Option(None, help="Minimum duration of a chunk of silence."),
|
||||||
|
max_d: int = typer.Option(None, help="Maximum duration of a chunk of silence."),
|
||||||
|
adj: int = typer.Option(None, help="Adjustment value"),
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
@@ -29,23 +101,22 @@ def silence_xtraction(
|
|||||||
"adj": adj,
|
"adj": adj,
|
||||||
"input_file": input_file,
|
"input_file": input_file,
|
||||||
"output_file": output_file,
|
"output_file": output_file,
|
||||||
|
|
||||||
}
|
}
|
||||||
defaults = config['silence_x']
|
defaults = config['silence_x']
|
||||||
params = {key: params.get(key) or defaults[key] for key in defaults}
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
silence_xtraction.silence_xtraction(**params)
|
||||||
|
|
||||||
silence_extraction.slncx_main(**params)
|
|
||||||
|
|
||||||
@app.command()
|
##########
|
||||||
def resize_video(
|
# resize
|
||||||
|
##########
|
||||||
|
@app.command('resize', help='Resize a video to the given width and height.')
|
||||||
|
def resize_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
output_file: str = typer.Option(None, help="Output video file"),
|
output_file: str = typer.Option(None, help="Output video file"),
|
||||||
width: int = typer.Option(None, help="Width of the output video"),
|
width: int = typer.Option(None, help="Width of the output video"),
|
||||||
height: int = typer.Option(None, help="Height of the output video")
|
height: int = typer.Option(None, help="Height of the output video")
|
||||||
):
|
):
|
||||||
"""
|
|
||||||
Resize a video to the given width and height.
|
|
||||||
"""
|
|
||||||
params = {
|
params = {
|
||||||
"input_file": input_file,
|
"input_file": input_file,
|
||||||
"output_file": output_file,
|
"output_file": output_file,
|
||||||
@@ -54,318 +125,284 @@ def resize_video(
|
|||||||
}
|
}
|
||||||
defaults = config['resize']
|
defaults = config['resize']
|
||||||
params = {key: params.get(key) or defaults[key] for key in defaults}
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
resize.resize(**params)
|
||||||
|
|
||||||
resize.resize_video(**params)
|
|
||||||
|
|
||||||
@app.command()
|
###########
|
||||||
def convert_video(
|
# convert
|
||||||
|
###########
|
||||||
|
@app.command('convert', help='Convert a video to a different format.')
|
||||||
|
def convert_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
output_file: str = typer.Option(None, help="Output video file"),
|
output_file: str = typer.Option(None, help="Output video file"),
|
||||||
format: str = typer.Option(None, help="Format of the output video")
|
format: str = typer.Option(None, help="Format of the output video")
|
||||||
):
|
):
|
||||||
"""
|
params = {
|
||||||
Convert a video to a different format.
|
"input_file": input_file,
|
||||||
"""
|
"output_file": output_file,
|
||||||
if not input_file:
|
"format": format
|
||||||
input_file = config['convert']['input_file']
|
}
|
||||||
if not output_file:
|
defaults = config['convert']
|
||||||
output_file = config['convert']['output_file']
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
if not format:
|
convert.convert(**params)
|
||||||
format = config['convert']['format']
|
|
||||||
|
|
||||||
convert.convert_video(input_file, output_file, format)
|
|
||||||
|
|
||||||
@app.command()
|
##################
|
||||||
def extract_frames(
|
# extract-frames
|
||||||
|
##################
|
||||||
|
@app.command('extract-frames', help='Extract frames from a video at the specified frame rate.')
|
||||||
|
def extract_frames_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
output_folder: str = typer.Option(None, help="Output folder for frames"),
|
output_file: str = typer.Option(None, help="Output folder for frames"),
|
||||||
frame_rate: int = typer.Option(None, help="Frame rate for extracting frames")
|
frame_rate: int = typer.Option(None, help="Frame rate for extracting frames")
|
||||||
):
|
):
|
||||||
"""
|
params = {
|
||||||
Extract frames from a video at the specified frame rate.
|
"input_file": input_file,
|
||||||
"""
|
"output_file": output_file,
|
||||||
if not input_file:
|
"frame_rate": frame_rate
|
||||||
input_file = config['extract_frames']['input_file']
|
}
|
||||||
if not output_folder:
|
defaults = config['extract_frames']
|
||||||
output_folder = config['extract_frames']['output_folder']
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
if not frame_rate:
|
extract_frames.extract_frames(**params)
|
||||||
frame_rate = config['extract_frames']['frame_rate']
|
|
||||||
|
|
||||||
extract_frames.extract_frames(input_file, output_folder, frame_rate)
|
|
||||||
|
|
||||||
# V2
|
#################
|
||||||
@app.command()
|
# extract-sound
|
||||||
def extract_sound(
|
#################
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
@app.command('extract-sound', help='Extract audio from video file.')
|
||||||
output_file: str = typer.Option(None, help="Output audio file")
|
def extract_sound_vb(
|
||||||
|
input_file: str = typer.Argument(None, help="Input video file"),
|
||||||
|
output_file: str = typer.Argument(None, help="Output audio file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['extract_sound']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
print(params)
|
||||||
|
extract_sound.extract_sound(**params)
|
||||||
|
|
||||||
"""
|
|
||||||
Extract audio from video file.
|
|
||||||
"""
|
|
||||||
if not input_file:
|
|
||||||
input_file = config['extract_sound']['input_file']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['extract_sound']['output_file']
|
|
||||||
|
|
||||||
sound.extract_sound(input_file, output_file)
|
|
||||||
|
|
||||||
@app.command()
|
###########
|
||||||
def reverse_video(
|
# reverse
|
||||||
|
###########
|
||||||
|
@app.command('reverse', help='Reverse video file.')
|
||||||
|
def reverse_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
"""
|
"input_file": input_file,
|
||||||
Reverse video file.
|
"output_file": output_file
|
||||||
"""
|
}
|
||||||
if not input_file:
|
defaults = config['reverse']
|
||||||
input_file = config['reverse']['input_file']
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
if not output_file:
|
reverse.reverse(**params)
|
||||||
output_file = config['reverse']['output_file']
|
|
||||||
|
|
||||||
reverse.reverse_video(input_file, output_file)
|
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
############
|
||||||
def stack_2x_video(
|
# stack-2x
|
||||||
|
############
|
||||||
|
@app.command('stack-2x', help='Stack 2 videos on top of each other keeping the original orientation.')
|
||||||
|
def stack_2x_vb(
|
||||||
input_file1: str = typer.Option(None, help="Input video file 1"),
|
input_file1: str = typer.Option(None, help="Input video file 1"),
|
||||||
input_file2: str = typer.Option(None, help="Input video file 2"),
|
input_file2: str = typer.Option(None, help="Input video file 2"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file1": input_file1,
|
||||||
|
"input_file2": input_file2,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['reverse']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
stack_2x.stack_2x(**params)
|
||||||
|
|
||||||
"""
|
|
||||||
Stack 2 videos on top of each other keeping the original orientation.
|
|
||||||
"""
|
|
||||||
if not input_file1:
|
|
||||||
input_file1= config['stack_2x']['input_file1']
|
|
||||||
if not input_file2:
|
|
||||||
input_file2 = config['stack_2x']['input_file2']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['stack_2x']['output_file']
|
|
||||||
|
|
||||||
stack_2x.stack_2x_video(input_file1, input_file2, output_file)
|
################
|
||||||
|
# lsd-feedback
|
||||||
@app.command()
|
################
|
||||||
def lsd_feedback_video(
|
@app.command('lsd-feedback', help='Apply LSD feedback effect to video file.')
|
||||||
|
def lsd_feedback_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file "),
|
input_file: str = typer.Option(None, help="Input video file "),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['lsd-feedback']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
lsd_feedback.lsd_feedback(**params)
|
||||||
|
|
||||||
"""
|
|
||||||
Apply LSD feedback effect to video file.
|
|
||||||
"""
|
|
||||||
if not input_file:
|
|
||||||
input_file= config['lsd_feedback']['input_file']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['lsd_feedback']['output_file']
|
|
||||||
|
|
||||||
lsd_feedback.lsd_feedback_video(input_file, output_file)
|
#####################
|
||||||
|
# nostalgic-stutter
|
||||||
@app.command()
|
#####################
|
||||||
def nostalgic_stutter_video(
|
@app.command('nostalgic-stutter', help='Apply nostaglic stutter effect to video file.')
|
||||||
|
def nostalgic_stutter_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['nostalgic-stutter']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
nostalgic_stutter.nostalgic_stutter(**params)
|
||||||
|
|
||||||
"""
|
|
||||||
Apply nostaglic stutter effect to video file.
|
|
||||||
"""
|
|
||||||
if not input_file:
|
|
||||||
input_file= config['lsd_feedback']['input_file']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['lsd_feedback']['output_file']
|
|
||||||
|
|
||||||
nostalgic_stutter.nostalgic_stutter_video(input_file, output_file)
|
####################
|
||||||
|
# frame-delay-pro1
|
||||||
@app.command()
|
####################
|
||||||
def frame_delay_pro1_video(
|
@app.command('frame-delay-pro1', help='Apply the pro1 frame delay to video file.')
|
||||||
|
def frame_delay_pro1_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file "),
|
input_file: str = typer.Option(None, help="Input video file "),
|
||||||
num_of_frames: int = typer.Option(None, help="Input weight for frame delay"),
|
num_of_frames: int = typer.Option(None, help="Input weight for frame delay"),
|
||||||
frame_weights: str = typer.Option(None, help="Input weight for frame delay"),
|
frame_weights: str = typer.Option(None, help="Input weight for frame delay"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"num_of_frames": num_of_frames,
|
||||||
|
"frame_weights": frame_weights,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['frame_delay_pro1']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
frame_delay_pro1.frame_delay_pro1(**params)
|
||||||
|
|
||||||
"""
|
|
||||||
Apply the pro1 frame delay to video file.
|
|
||||||
"""
|
|
||||||
if not input_file:
|
|
||||||
input_file= config['frame_delay_pro1']['input_file']
|
|
||||||
if not num_of_frames:
|
|
||||||
num_of_frames= config['frame_delay_pro1']['num_of_frames']
|
|
||||||
if not frame_weights:
|
|
||||||
frame_weights= config['frame_delay_pro1']['frame_weights']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['frame_delay_pro1']['output_file']
|
|
||||||
|
|
||||||
frame_delay_pro1.frame_delay_pro1_video(input_file, num_of_frames, frame_weights, output_file)
|
####################
|
||||||
|
# frame-delay-pro2
|
||||||
@app.command()
|
####################
|
||||||
def frame_delay_pro2_video(
|
@app.command('frame-delay-pro2', help='Apply the pro2 frame delay to video file.')
|
||||||
|
def frame_delay_pro2_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file "),
|
input_file: str = typer.Option(None, help="Input video file "),
|
||||||
decay: int = typer.Option(None, help=""),
|
decay: int = typer.Option(None, help=""),
|
||||||
planes: str = typer.Option(None, help="Input weight for frame delay"),
|
planes: str = typer.Option(None, help="Input weight for frame delay"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
"""
|
"input_file": input_file,
|
||||||
Apply the pro2 frame delay to video file.
|
"decay": decay,
|
||||||
"""
|
"planes": planes,
|
||||||
if not input_file:
|
"output_file": output_file
|
||||||
input_file= config['frame_delay_pro2']['input_file']
|
}
|
||||||
if not decay:
|
defaults = config['frame_delay_pro2']
|
||||||
decay= config['frame_delay_pro2']['decay']
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
if not planes:
|
frame_delay_pro2.frame_delay_pro2(**params)
|
||||||
planes= config['frame_delay_pro2']['planes']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['frame_delay_pro2']['output_file']
|
|
||||||
|
|
||||||
frame_delay_pro2.frame_delay_pro2_video(input_file, decay, planes, output_file)
|
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
################
|
||||||
def mirror_delay_video(
|
# mirror-delay
|
||||||
|
################
|
||||||
|
@app.command('mirror-delay', help='Apply mirrored delay effect to video file.')
|
||||||
|
def mirror_delay_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['mirror_delay']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
print(params)
|
||||||
|
mirror_delay.mirror_delay(**params)
|
||||||
|
|
||||||
"""
|
|
||||||
Apply mirrored delay effect to video file.
|
|
||||||
"""
|
|
||||||
if not input_file:
|
|
||||||
input_file= config['frame_lag']['input_file']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['frame_lag']['output_file']
|
|
||||||
|
|
||||||
mirror_delay.mirror_delay_video(input_file, output_file)
|
######################
|
||||||
|
# overexposed-stutter
|
||||||
@app.command()
|
######################
|
||||||
def overexposed_stutter_video(
|
@app.command('overexposed-stutter', help='Apply overexposed stutter effect to video file.')
|
||||||
|
def overexposed_stutter_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['overexposed-stutter']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
overexposed_stutter.overexposed_stutter(**params)
|
||||||
|
|
||||||
"""
|
|
||||||
Apply overexposed stutter effect to video file.
|
|
||||||
"""
|
|
||||||
if not input_file:
|
|
||||||
input_file= config['overexposed_stutter']['input_file']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['overexposed_stutter']['output_file']
|
|
||||||
|
|
||||||
overexposed_stutter.overexposed_stutter_video(input_file, output_file)
|
###############
|
||||||
|
# stutter-pro
|
||||||
@app.command()
|
###############
|
||||||
def stutter_pro_video(
|
@app.command('stutter-pro', help='Apply stutter pro effect to video file.')
|
||||||
|
def stutter_pro_vb(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
stutter: str = typer.Option(None, help="Frame stutter parameter"),
|
stutter: str = typer.Option(None, help="Frame stutter parameter"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"stutter": stutter,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['stutter-pro']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
stutter_pro.stutter_pro(**params)
|
||||||
|
|
||||||
"""
|
#############
|
||||||
Apply stutter pro effect to video file.
|
# scrolling
|
||||||
"""
|
#############
|
||||||
if not input_file:
|
@app.command('scrolling', help='Apply scrolling effect to video file.')
|
||||||
input_file= config['stutter_pro']['input_file']
|
def scrolling_vb(
|
||||||
if not stutter:
|
|
||||||
stutter= config['stutter_pro']['stutter']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['stutter_pro']['output_file']
|
|
||||||
|
|
||||||
stutter_pro.stutter_pro_video(input_file, stutter, output_file)
|
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
|
||||||
def scrolling_video(
|
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['scrolling']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
scrolling.scrolling(input_file, output_file)
|
||||||
|
|
||||||
"""
|
|
||||||
Apply scrolling effect to video file.
|
|
||||||
"""
|
|
||||||
if not input_file:
|
|
||||||
input_file= config['scrolling']['input_file']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['scrolling']['output_file']
|
|
||||||
|
|
||||||
scrolling.scrolling_video(input_file, output_file)
|
#################
|
||||||
|
# scrolling-pro
|
||||||
@app.command()
|
#################
|
||||||
|
@app.command('scrolling-pro', help='Apply scrolling pro effect to video file.')
|
||||||
def scrolling_pro_video(
|
def scrolling_pro_video(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Option(None, help="Input video file"),
|
||||||
horizontal: str = typer.Option(None, help="Horizontal scroll parameter"),
|
horizontal: str = typer.Option(None, help="Horizontal scroll parameter"),
|
||||||
vertical: str = typer.Option(None, help="Vertical scroll parameter"),
|
vertical: str = typer.Option(None, help="Vertical scroll parameter"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Option(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
|
"input_file": input_file,
|
||||||
|
"horizontal": horizontal,
|
||||||
|
"verticla": vertical,
|
||||||
|
"output_file": output_file
|
||||||
|
}
|
||||||
|
defaults = config['scrolling-pro']
|
||||||
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
|
scrolling_pro.scrolling_pro(**params)
|
||||||
|
|
||||||
"""
|
############
|
||||||
Apply scrolling pro effect to video file.
|
# blur-pix
|
||||||
"""
|
############
|
||||||
if not input_file:
|
@app.command('blur-pix', help='Apply blur pix effect to video file.')
|
||||||
input_file= config['scrolling_pro']['input_file']
|
|
||||||
if not horizontal:
|
|
||||||
horizontal= config['scrolling_pro']['horizontal']
|
|
||||||
if not vertical:
|
|
||||||
vertical= config['scrolling_pro']['vertical']
|
|
||||||
if not output_file:
|
|
||||||
output_file = config['scrolling_pro']['output_file']
|
|
||||||
|
|
||||||
scrolling_pro.scrolling_pro_video(input_file, horizontal, vertical, output_file)
|
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
|
||||||
def blur_pix_video(
|
def blur_pix_video(
|
||||||
input_file: str = typer.Option(None, help="Input video file"),
|
input_file: str = typer.Argument(None, help="Input video file"),
|
||||||
output_file: str = typer.Option(None, help="Output video file")
|
output_file: str = typer.Argument(None, help="Output video file")
|
||||||
):
|
):
|
||||||
|
params = {
|
||||||
"""
|
"input_file": input_file,
|
||||||
Apply blur pix effect to video file.
|
"output_file": output_file
|
||||||
"""
|
}
|
||||||
if not input_file:
|
defaults = config['blur-pix']
|
||||||
input_file= config['blur_pix']['input_file']
|
params = {key: params.get(key) or defaults[key] for key in defaults}
|
||||||
if not output_file:
|
blur_pix.blur_pix(**params)
|
||||||
output_file = config['blur_pix']['output_file']
|
|
||||||
|
|
||||||
blur_pix.blur_pix_video(input_file, output_file)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
app()
|
app()
|
||||||
|
|
||||||
|
|
||||||
'''def main():
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="VideoBeaux - It's You're Best Friend")
|
|
||||||
subparsers = parser.add_subparsers(title='Subcommands', dest='command', help='Sub-command help')
|
|
||||||
|
|
||||||
|
|
||||||
# Program selection
|
|
||||||
#add_parser = subparsers.add_parser('program', help='Add a new task')
|
|
||||||
#add_parser.add_argument('task', type=str, help='The task to add')
|
|
||||||
|
|
||||||
# Project Management
|
|
||||||
#prjmgmt_parser = subparsers.add_parser('project', help='Add a new task')
|
|
||||||
#prjmgmt_parser.add_argument('--input_file', dest='infile', type=str, help='Full path to input file') # todo - use a path defined in config
|
|
||||||
#prjmgmt_parser.add_argument('--output_file', dest='outfile', type=str, help='filename of output file that will be save in videobeaux root dir') # todo - use a path defined in config
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Silence Xtraction
|
|
||||||
silencextraction_parser = subparsers.add_parser('silence-xtraction', help='extracts silence from a given video')
|
|
||||||
silencextraction_parser.add_argument('--min_d', dest='mind', type=int, help='Minimum duration of a silent chunk')
|
|
||||||
silencextraction_parser.add_argument('--max_d', dest='maxd', type=int, help='Maximum duration of a silent chunk')
|
|
||||||
silencextraction_parser.add_argument('--adj', dest='adj', type=int, help='Maximum duration of a silent chunk')
|
|
||||||
silencextraction_parser.add_argument('--input_file', dest='infile', type=str, help='Full path to input file') # todo - use a path defined in config
|
|
||||||
silencextraction_parser.add_argument('--output_file', dest='outfile', type=str, help='filename of output file that will be save in videobeaux root dir') # todo - use a path defined in config
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if args.command == 'silence-xtraction':
|
|
||||||
silence_extraction.slncx_main(args.mind, args.maxd, args.adj, args.infile, args.outfile)
|
|
||||||
|
|
||||||
else:
|
|
||||||
parser.print_help()
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|||||||
Reference in New Issue
Block a user