RAR / app.py
yucornetto's picture
Update app.py
e76aa24 verified
# Reference: https://huggingface.co/spaces/FoundationVision/LlamaGen/blob/main/app.py
from PIL import Image
import gradio as gr
from imagenet_classes import imagenet_idx2classname
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import time
import demo_util
from utils.train_utils import create_pretrained_tokenizer
import os
import spaces
from huggingface_hub import hf_hub_download
os.system("pip3 install -U numpy")
hf_hub_download(repo_id="fun-research/TiTok", filename="maskgit-vqgan-imagenet-f16-256.bin", local_dir="./")
hf_hub_download(repo_id="yucornetto/RAR", filename="rar_xl.bin", local_dir="./")
# @spaces.GPU
def load_model():
device = "cuda" # if torch.cuda.is_available() else "cpu"
# load config
rar_model_size = "rar_xl"
config = demo_util.get_config("configs/training/generator/rar.yaml")
config.experiment.generator_checkpoint = f"{rar_model_size}.bin"
config.model.generator.hidden_size = {"rar_b": 768, "rar_l": 1024, "rar_xl": 1280, "rar_xxl": 1408}[rar_model_size]
config.model.generator.num_hidden_layers = {"rar_b": 24, "rar_l": 24, "rar_xl": 32, "rar_xxl": 40}[rar_model_size]
config.model.generator.num_attention_heads = 16
config.model.generator.intermediate_size = {"rar_b": 3072, "rar_l": 4096, "rar_xl": 5120, "rar_xxl": 6144}[rar_model_size]
print(config)
tokenizer = create_pretrained_tokenizer(config)
print(tokenizer)
generator = demo_util.get_rar_generator(config)
print(generator)
tokenizer = tokenizer.to(device)
generator = generator.to(device)
return tokenizer, generator
tokenizer, generator = load_model()
@spaces.GPU
def demo_infer(
guidance_scale, randomize_temperature, guidance_scale_pow,
class_label, seed):
device = "cuda" # if torch.cuda.is_available() else "cpu"
n = 4
class_labels = [class_label for _ in range(n)]
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
t1 = time.time()
generated_image = demo_util.sample_fn(
generator=generator,
tokenizer=tokenizer,
labels=class_labels,
guidance_scale=guidance_scale,
randomize_temperature=randomize_temperature,
guidance_scale_pow=guidance_scale_pow,
device=device
)
sampling_time = time.time() - t1
print(f"generation takes about {sampling_time:.2f} seconds.")
samples = [Image.fromarray(sample) for sample in generated_image]
return samples
with gr.Blocks() as demo:
gr.Markdown("<h1 style='text-align: center'>Randomized Autoregressive Visual Generation (This demo runs with RAR-XL)</h1>")
with gr.Tabs():
with gr.TabItem('Generate'):
with gr.Row():
with gr.Column():
with gr.Row():
i1k_class = gr.Dropdown(
list(imagenet_idx2classname.values()),
value='Eskimo dog, husky',
type="index", label='ImageNet-1K Class'
)
guidance_scale = gr.Slider(minimum=1, maximum=25, step=0.1, value=4.0, label='Classifier-free Guidance Scale')
randomize_temperature = gr.Slider(minimum=0.8, maximum=1.2, step=0.01, value=1.0, label='randomize_temperature')
guidance_scale_pow = gr.Slider(minimum=0.0, maximum=4.0, step=0.25, value=0.0, label='guidance_scale_pow')
seed = gr.Slider(minimum=0, maximum=1000, step=1, value=42, label='Seed')
button = gr.Button("Generate", variant="primary")
with gr.Column():
output = gr.Gallery(label='Generated Images',
columns=4,
rows=1,
height=256, object_fit="scale-down")
button.click(demo_infer, inputs=[
guidance_scale, randomize_temperature, guidance_scale_pow,
i1k_class, seed],
outputs=[output])
demo.queue()
demo.launch(debug=True)