import subprocess
subprocess.run(
    'pip install flash-attn --no-build-isolation',
    env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"},
    shell=True
)

import torch
from PIL import Image
import gradio as gr
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, StoppingCriteriaList, StoppingCriteria
import os
from threading import Thread


HF_TOKEN = os.environ.get("HF_TOKEN", None)
MODEL_LIST = "THUDM/LongWriter-glm4-9b"
#MODELS = os.environ.get("MODELS")
#MODEL_NAME = MODELS.split("/")[-1]

TITLE = "<h1><center>GLM SPACE</center></h1>"

PLACEHOLDER = f'<h3><center>LongWriter-glm4-9b is trained based on glm-4-9b, and is capable of generating 10,000+ words at once.</center></h3>'

CSS = """
.duplicate-button {
  margin: auto !important;
  color: white !important;
  background: black !important;
  border-radius: 100vh !important;
}
"""

model = AutoModelForCausalLM.from_pretrained(
        "THUDM/LongWriter-glm4-9b",
        torch_dtype=torch.bfloat16,
        device_map="auto",
        trust_remote_code=True,
        ).eval()

tokenizer = AutoTokenizer.from_pretrained("THUDM/LongWriter-glm4-9b",trust_remote_code=True, use_fast=False)

class StopOnTokens(StoppingCriteria):
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        # stop_ids = model.config.eos_token_id
        stop_ids = [tokenizer.eos_token_id, tokenizer.get_command("<|user|>"),
                    tokenizer.get_command("<|observation|>")]
        for stop_id in stop_ids:
            if input_ids[0][-1] == stop_id:
                return True
        return False

@spaces.GPU()
def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int):
    print(f'message is - {message}')
    print(f'history is - {history}')
    conversation = []
    for prompt, answer in history:
        conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
    #conversation.append({"role": "user", "content": message})

    print(f"Conversation is -\n{conversation}")
    stop = StopOnTokens()

    input_ids = tokenizer.build_chat_input(message, history=conversation, role='user').input_ids.to(next(model.parameters()).device)
    #input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
    streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
    eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command("<|user|>"),
                    tokenizer.get_command("<|observation|>")]

    generate_kwargs = dict(
        input_ids=input_ids,
        streamer=streamer,
        max_new_tokens=max_new_tokens,
        do_sample=True,
        top_k=1,
        temperature=temperature,
        repetition_penalty=1,
        stopping_criteria=StoppingCriteriaList([stop]),
        eos_token_id=eos_token_id,
    )
    #gen_kwargs = {**input_ids, **generate_kwargs}

    thread = Thread(target=model.generate, kwargs=generate_kwargs)
    thread.start()
    buffer = ""   
    for new_token in streamer:
        if new_token and '<|user|>' not in new_token:
            buffer += new_token
        yield buffer

chatbot = gr.Chatbot(height=600, placeholder = PLACEHOLDER)

with gr.Blocks(css=CSS) as demo:
    gr.HTML(TITLE)
    gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
    gr.ChatInterface(
        fn=stream_chat,
        chatbot=chatbot,
        fill_height=True,
        additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
        additional_inputs=[
            gr.Slider(
                minimum=0,
                maximum=1,
                step=0.1,
                value=0.5,
                label="Temperature",
                render=False,
            ),
            gr.Slider(
                minimum=1024,
                maximum=32768,
                step=1,
                value=4096,
                label="Max New Tokens",
                render=False,
            ),
        ],
        examples=[
            ["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],
            ["What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."],
            ["Tell me a random fun fact about the Roman Empire."],
            ["Show me a code snippet of a website's sticky header in CSS and JavaScript."],
        ],
        cache_examples=False,
    )
    

if __name__ == "__main__":
    demo.launch()