Comic-2 / app.p
AZLABS's picture
Create app.p
4ae5311 verified
raw
history blame
8.95 kB
import os
import json
import urllib.request
from PIL import Image
from gtts import gTTS
import cv2
import moviepy.editor as mp
import logging
from hercai import Hercai
import uuid
import time
import gradio as gr
# Configure logging
log_dir = os.getenv('LOG_DIRECTORY', './')
LOGGER_FILE_PATH = os.path.join(str(log_dir), 'utils.log')
logging.basicConfig(
filename=LOGGER_FILE_PATH,
filemode='a',
format='[%(asctime)s] [%(levelname)s] [%(filename)s] [%(lineno)s:%(funcName)s()] %(message)s',
datefmt='%Y-%b-%d %H:%M:%S'
)
LOGGER = logging.getLogger(__name__)
log_level_env = os.getenv('LOG_LEVEL', 'INFO')
log_level_dict = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
if log_level_env in log_level_dict:
log_level = log_level_dict[log_level_env]
else:
log_level = log_level_dict['INFO']
LOGGER.setLevel(log_level)
class Text2Video:
"""A class to generate videos from text prompts."""
def __init__(self) -> None:
"""
Initialize the Text2Video class.
"""
self.herc = Hercai("") # Replace with your Hercai API key if you have one
def get_image(self, img_prompt: str) -> str:
"""
Generate an image based on the provided text prompt, ensuring the text is in speech bubbles.
Args:
img_prompt (str): Text prompt for generating the image.
Returns:
str: URL of the generated image.
"""
try:
LOGGER.info(f"Generating image with prompt: {img_prompt}")
# Generate image using Hercai
# Modified prompt to include comic-style elements
modified_prompt = f"A comic book style image with speech bubbles containing the following text: '{img_prompt}'. Include vibrant colors and onomatopoeia where appropriate."
image_result = self.herc.draw_image(model="simurg", prompt=modified_prompt, negative_prompt="Dark and gloomy")
image_url = image_result["url"]
LOGGER.info(f"Image generated successfully: {image_url}")
return image_url
except Exception as e:
# Log any errors encountered during image generation
LOGGER.error(f"Error generating image: {e}")
return ""
def download_img_from_url(self, image_url: str, image_path: str) -> str:
"""
Download an image from a URL.
Args:
image_url (str): URL of the image to download.
image_path (str): Path to save the downloaded image.
Returns:
str: Path of the downloaded image.
"""
try:
LOGGER.info(f"Downloading image from: {image_url}")
# Download the image from the provided URL and save it to the specified path
urllib.request.urlretrieve(image_url, image_path)
LOGGER.info(f"Image downloaded and saved to: {image_path}")
return image_path
except Exception as e:
# Log any errors encountered during image download
LOGGER.error(f"Error downloading image from URL: {e}")
return ""
def text_to_audio(self, img_prompt: str, audio_path: str) -> str:
"""
Convert text to speech and save it as an audio file.
Args:
img_prompt (str): Text to convert to speech.
audio_path (str): Path to save the audio file.
Returns:
str: Path of the saved audio file.
"""
try:
LOGGER.info(f"Converting text to audio: {img_prompt}")
language = 'en'
# Create a gTTS object to convert text to speech
myobj = gTTS(text=img_prompt, lang=language, slow=False)
# Save the audio file at the specified path
myobj.save(audio_path)
LOGGER.info(f"Audio saved to: {audio_path}")
return audio_path
except Exception as e:
# Log any errors encountered during text-to-audio conversion
LOGGER.error(f"Error converting text to audio: {e}")
return ""
def get_images_and_audio(self, list_prompts: list) -> tuple:
"""
Generate images and corresponding audio files from a list of prompts.
Args:
list_prompts (list): List of text prompts.
Returns:
tuple: A tuple containing lists of image paths and audio paths.
"""
img_list = []
audio_paths = []
for img_prompt in list_prompts:
try:
LOGGER.info(f"Processing prompt: {img_prompt}")
unique_id = uuid.uuid4().hex
image_path = f"{img_prompt[:9]}_{unique_id}.png"
img_url = self.get_image(img_prompt)
image = self.download_img_from_url(img_url, image_path)
img_list.append(image)
audio_path = f"{img_prompt[:9]}_{unique_id}.mp3"
audio = self.text_to_audio(img_prompt, audio_path)
audio_paths.append(audio)
except Exception as e:
LOGGER.error(f"Error processing prompt: {img_prompt}, {e}")
return img_list, audio_paths
def create_video_from_images_and_audio(self, image_files: list, audio_files: list, output_path: str) -> None:
"""
Create a video from images and corresponding audio files.
Args:
image_files (list): List of image files.
audio_files (list): List of audio files.
output_path (str): Path to save the output video file.
"""
try:
LOGGER.info(f"Creating video from images and audio, output path: {output_path}")
if len(image_files) != len(audio_files):
LOGGER.error("Error: Number of images doesn't match the number of audio files.")
return
video_clips = []
for image_file, audio_file in zip(image_files, audio_files):
LOGGER.info(f"Processing image: {image_file} and audio: {audio_file}")
frame = cv2.imread(image_file)
audio_clip = mp.AudioFileClip(audio_file)
video_clip = mp.ImageClip(image_file).set_duration(audio_clip.duration)
video_clip = video_clip.set_audio(audio_clip)
video_clips.append(video_clip)
final_clip = mp.concatenate_videoclips(video_clips)
final_clip.write_videofile(output_path, codec='libx264', fps=24)
LOGGER.info("Video created successfully.")
except Exception as e:
# Log any errors encountered during video creation
LOGGER.error(f"Error creating video: {e}")
def generate_video(self, text: str) -> str:
"""
Generate a video from a list of text prompts.
Args:
text (str): Text prompts separated by double commas.
Returns:
str: Path to the generated video.
"""
try:
LOGGER.info(f"Generating video from text: {text}")
list_prompts = [sentence.strip() for sentence in text.split(",,") if sentence.strip()]
output_path = "output_video.mp4"
img_list, audio_paths = self.get_images_and_audio(list_prompts)
self.create_video_from_images_and_audio(img_list, audio_paths, output_path)
LOGGER.info(f"Video generated successfully: {output_path}")
return output_path
except Exception as e:
LOGGER.error(f"Error generating video: {e}")
return ""
def gradio_interface(self):
with gr.Blocks(css="style.css", theme='abidlabs/dracula_revamped') as demo:
example_txt = """once upon a time there was a village. It was a nice place to live, except for one thing. people did not like to share.,, One day a visitor came to town.
'Hello. Does anybody have food to share?' He asked. 'No', said everyone.,,
That's okay', said the visitor. 'I will make stone soup for everyone'.Then he took a stone and dropped it into a giant pot,,"""
gr.HTML("""
<center><h1 style="color:#fff">Comics Video Generator</h1></center>""")
with gr.Row(elem_id="col-container"):
input_text = gr.Textbox(label="Comics Text", placeholder="Enter the comics by double comma separated")
with gr.Row(elem_id="col-container"):
button = gr.Button("Generate Video")
with gr.Row(elem_id="col-container"):
output = gr.PlayableVideo()
with gr.Row(elem_id="col-container"):
example = gr.Examples([example_txt], input_text)
button.click(self.generate_video, [input_text], output)
demo.launch(debug=True)
if __name__ == "__main__":
text2video = Text2Video()
text2video.gradio_interface()