L4GM-demo / app.py
fffiloni's picture
Update app.py
c34034e verified
raw
history blame
3.05 kB
import torch
print(torch.__version__)
print(torch.version.cuda)
print(torch.cuda.is_available())
import os, subprocess
import uuid, tempfile
from glob import glob
env_list = os.environ['PATH'].split(':')
env_list.append('/usr/local/cuda/bin')
os.environ['PATH'] = ':'.join(env_list)
os.environ['TORCH_CUDA_ARCH_LIST'] = '8.6'
import gradio as gr
from huggingface_hub import snapshot_download
os.makedirs("pretrained", exist_ok=True)
snapshot_download(
repo_id = "jiawei011/L4GM",
local_dir = "./pretrained"
)
# Folder containing example images
examples_folder = "data_test"
# Retrieve all file paths in the folder
video_examples = [
os.path.join(examples_folder, file)
for file in os.listdir(examples_folder)
if os.path.isfile(os.path.join(examples_folder, file))
]
def generate(input_video):
unique_id = str(uuid.uuid4())
workdir = f"results_{unique_id}"
recon_model = "pretrained/recon.safetensors"
interp_model = "pretrained/interp.safetensors"
num_frames = 16
test_path = input_video
try:
# Run the inference command
subprocess.run(
[
"python", "infer_3d.py", "big",
"--workspace", f"{workdir}",
"--resume", f"{recon_model}",
"--num_frames", f"1",
"--test_path", f"{test_path}",
],
check=True
)
subprocess.run(
[
"python", "infer_4d.py", "big",
"--workspace", f"{workdir}",
"--resume", f"{recon_model}",
"--interpresume", f"{interp_model}",
"--num_frames", f"{num_frames}",
"--test_path", f"{test_path}",
],
check=True
)
output_videos = glob(os.path.join(f"{workdir}", "*.mp4"))
print(output_videos)
return output_videos[0], output_videos[1], output_videos[2], output_videos[3],output_videos[4]
except subprocess.CalledProcessError as e:
raise gr.Error(f"Error during inference: {str(e)}")
with gr.Blocks() as demo:
with gr.Column():
with gr.Row():
with gr.Column():
input_video = gr.Video(label="Input Video")
submit_btn = gr.Button("Submit")
with gr.Column():
output_result_0 = gr.Video(label="Result")
output_result_1 = gr.Video(label="Result")
output_result_2 = gr.Video(label="Result")
output_result_3 = gr.Video(label="Result")
output_result_4 = gr.Video(label="Result")
gr.Examples(
examples = video_examples,
inputs = [input_video]
)
submit_btn.click(
fn = generate,
inputs = [input_video],
outputs = [
output_result_0,
output_result_1,
output_result_2,
output_result_3,
output_result_4
]
)
demo.queue().launch(show_api=False, show_error=True)