from datasets import load_dataset import gradio as gr from gradio_client import Client import json import torch from diffusers import FluxPipeline, AutoencoderKL from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images import spaces device = torch.device("cuda" if torch.cuda.is_available() else "cpu") pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to(device) good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16).to(device) # pipe.enable_sequential_cpu_offload() # pipe.vae.enable_slicing() # pipe.vae.enable_tiling() # pipe.to(torch.float16) pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe) llm_client = Client("Qwen/Qwen2.5-72B-Instruct") # t2i_client = Client("black-forest-labs/FLUX.1-dev") # t2i_client = Client("black-forest-labs/FLUX.1-schnell") ds = load_dataset("MohamedRashad/FinePersonas-Lite", split="train") prompt_template = """Generate a character with this persona description: {persona_description} In a world with this description: {world_description} Write the character in json format with the following fields: - name: The name of the character - background: The background of the character - appearance: The appearance of the character - personality: The personality of the character - skills_and_abilities: The skills and abilities of the character - goals: The goals of the character - conflicts: The conflicts of the character - backstory: The backstory of the character - current_situation: The current situation of the character - spoken_lines: The spoken lines of the character (list of strings) Don't write anything else except the character description in json format and don't include '```'. """ world_description_prompt = "Generate a unique and random world description (Don't Write anything else except the world description)." def get_random_world_description(): result = llm_client.predict( query=world_description_prompt, history=[], system="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.", api_name="/model_chat", ) return result[1][0][-1] def get_random_persona_description(): return ds.shuffle().select([100])[0]["persona"] @spaces.GPU(duration=75) def infer_flux(character_json): for image in pipe.flux_pipe_call_that_returns_an_iterable_of_images( prompt=character_json["appearance"], guidance_scale=3.5, num_inference_steps=28, width=1024, height=1024, generator=torch.Generator("cpu").manual_seed(0), output_type="pil", good_vae=good_vae, ): yield image def generate_character(world_description, persona_description, progress=gr.Progress(track_tqdm=True)): result = llm_client.predict( query=prompt_template.format( persona_description=persona_description, world_description=world_description ), history=[], system="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.", api_name="/model_chat", ) output = json.loads(result[1][0][-1]) return output app_description = """ - This app generates a character in JSON format based on a persona description and a world description. - The character's appearance is generated using [FLUX-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) and the character description is generated using [Qwen2.5-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct). - The persona description is randomly selected from the [FinePersonas-Lite](https://huggingface.co/datasets/MohamedRashad/FinePersonas-Lite) dataset. **Note:** I recommend starting with the world description (you can write one or loop over randomly generated ones) and then try different persona descriptions to generate interesting characters for the world you created. """ with gr.Blocks(title="Character Generator") as app: with gr.Column(): gr.HTML("