# Reference: https://huggingface.co/spaces/FoundationVision/LlamaGen/blob/main/app.py from PIL import Image import gradio as gr from imagenet_classes import imagenet_idx2classname import torch torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True import time import demo_util from utils.train_utils import create_pretrained_tokenizer import os import spaces from huggingface_hub import hf_hub_download os.system("pip3 install -U numpy") hf_hub_download(repo_id="fun-research/TiTok", filename="maskgit-vqgan-imagenet-f16-256.bin", local_dir="./") hf_hub_download(repo_id="yucornetto/RAR", filename="rar_xl.bin", local_dir="./") # @spaces.GPU def load_model(): device = "cuda" # if torch.cuda.is_available() else "cpu" # load config rar_model_size = "rar_xl" config = demo_util.get_config("configs/training/generator/rar.yaml") config.experiment.generator_checkpoint = f"{rar_model_size}.bin" config.model.generator.hidden_size = {"rar_b": 768, "rar_l": 1024, "rar_xl": 1280, "rar_xxl": 1408}[rar_model_size] config.model.generator.num_hidden_layers = {"rar_b": 24, "rar_l": 24, "rar_xl": 32, "rar_xxl": 40}[rar_model_size] config.model.generator.num_attention_heads = 16 config.model.generator.intermediate_size = {"rar_b": 3072, "rar_l": 4096, "rar_xl": 5120, "rar_xxl": 6144}[rar_model_size] print(config) tokenizer = create_pretrained_tokenizer(config) print(tokenizer) generator = demo_util.get_rar_generator(config) print(generator) tokenizer = tokenizer.to(device) generator = generator.to(device) return tokenizer, generator tokenizer, generator = load_model() @spaces.GPU def demo_infer( guidance_scale, randomize_temperature, guidance_scale_pow, class_label, seed): device = "cuda" # if torch.cuda.is_available() else "cpu" n = 4 class_labels = [class_label for _ in range(n)] torch.manual_seed(seed) torch.cuda.manual_seed(seed) t1 = time.time() generated_image = demo_util.sample_fn( generator=generator, tokenizer=tokenizer, labels=class_labels, guidance_scale=guidance_scale, randomize_temperature=randomize_temperature, guidance_scale_pow=guidance_scale_pow, device=device ) sampling_time = time.time() - t1 print(f"generation takes about {sampling_time:.2f} seconds.") samples = [Image.fromarray(sample) for sample in generated_image] return samples with gr.Blocks() as demo: gr.Markdown("