File size: 4,877 Bytes
c94dddb
 
 
 
 
f6ea0cd
da0d3db
c94dddb
 
 
 
d81d960
097d213
c94dddb
 
 
 
 
 
 
 
 
 
 
 
3c7eec2
c94dddb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d81d960
da0d3db
40e4ca6
c94dddb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa77447
c94dddb
 
 
 
1ec5123
c94dddb
 
 
 
07ef311
c94dddb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2336105
c94dddb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
# Ref: https://huggingface.co/spaces/ysharma/Chat_with_Meta_llama3_8b

import gradio as gr
import os
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread

DESCRIPTION = '''
<div>
<h1 style="text-align: center;">Sarashina2-7B Instruct</h1>
<p>Sarashina2-7B Instructใ ใ‚ˆใ€‚ <a href="https://huggingface.co/alfredplpl/sarashina2-7b-it"><b>alfredplpl/sarashina2-7b-it</b></a>.</p>
</div>
'''

LICENSE = """
<p/>

---
Built with Meta Llama 3
"""

PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
   <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">้žๅ…ฌๅผ Sarashina2-7B Instruct Test</h1>
   <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">ใชใ‚“ใงใ‚‚ใใ„ใฆใญ</p>
</div>
"""


css = """
h1 {
  text-align: center;
  display: block;
}

#duplicate-button {
  margin: auto;
  color: white;
  background: #1565c0;
  border-radius: 100vh;
}
"""

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("alfredplpl/sarashina2-7b-it", use_fast=False)
model = AutoModelForCausalLM.from_pretrained("alfredplpl/sarashina2-7b-it", torch_dtype=torch.bfloat16)
model=model.to("cuda:0")

@spaces.GPU()
def chat_llama3_8b(message: str, 
              history: list, 
              temperature: float, 
              max_new_tokens: int
             ) -> str:
    """
    Generate a streaming response using the llama3-8b model.
    Args:
        message (str): The input message.
        history (list): The conversation history used by ChatInterface.
        temperature (float): The temperature for generating the response.
        max_new_tokens (int): The maximum number of new tokens to generate.
    Returns:
        str: The generated response.
    """
    conversation = []
    conversation.append({"role": "system", "content": "ใ‚ใชใŸใฏๅ„ช็ง€ใชใ‚ขใ‚ทใ‚นใ‚ฟใƒณใƒˆใงใ™ใ€‚"})
    for user, assistant in history:
        conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
    conversation.append({"role": "user", "content": message})

    input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True,return_tensors="pt")
    
    streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)

    generate_kwargs = dict(
        input_ids= input_ids.to(model.device),
        streamer=streamer,
        max_new_tokens=max_new_tokens,
        do_sample=True,
        temperature=temperature,
        top_p=0.95,
        repetition_penalty=1.1,
    )
    # This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.             
    if temperature == 0:
        generate_kwargs['do_sample'] = False
        
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()

    outputs = []
    for text in streamer:
        outputs.append(text)
        print(outputs)
        yield "".join(outputs)
        

# Gradio block
chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')

with gr.Blocks(fill_height=True, css=css) as demo:
    
    gr.Markdown(DESCRIPTION)
    gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
    gr.ChatInterface(
        fn=chat_llama3_8b,
        chatbot=chatbot,
        fill_height=True,
        additional_inputs_accordion=gr.Accordion(label="โš™๏ธ Parameters", open=False, render=False),
        additional_inputs=[
            gr.Slider(minimum=0,
                      maximum=1, 
                      step=0.1,
                      value=0.5, 
                      label="Temperature", 
                      render=False),
            gr.Slider(minimum=128, 
                      maximum=4096,
                      step=1,
                      value=512, 
                      label="Max new tokens", 
                      render=False ),
            ],
        examples=[
            ['ๅฐๅญฆ็”Ÿใซใ‚‚ใ‚ใ‹ใ‚‹ใ‚ˆใ†ใซ็›ธๅฏพๆ€ง็†่ซ–ใ‚’ๆ•™ใˆใฆใใ ใ•ใ„ใ€‚'],
            ['ๅฎ‡ๅฎ™ใฎ่ตทๆบใ‚’็Ÿฅใ‚‹ใŸใ‚ใฎๆ–นๆณ•ใ‚’ใ‚นใƒ†ใƒƒใƒ—ใƒปใƒใ‚คใƒปใ‚นใƒ†ใƒƒใƒ—ใงๆ•™ใˆใฆใใ ใ•ใ„ใ€‚'],
            ['1ใ‹ใ‚‰100ใพใงใฎ็ด ๆ•ฐใ‚’ๆฑ‚ใ‚ใ‚‹ใ‚นใ‚ฏใƒชใƒ—ใƒˆใ‚’Pythonใงๆ›ธใ„ใฆใใ ใ•ใ„ใ€‚'],
            ['ๅ‹้”ใฎ้™ฝ่‘ตใซใ‚ใ’ใ‚‹่ช•็”Ÿๆ—ฅใƒ—ใƒฌใ‚ผใƒณใƒˆใ‚’่€ƒใˆใฆใใ ใ•ใ„ใ€‚ใŸใ ใ—ใ€้™ฝ่‘ตใฏไธญๅญฆ็”Ÿใงใ€็งใฏๅŒใ˜ใ‚ฏใƒฉใ‚นใฎ็”ทๆ€งใงใ‚ใ‚‹ใ“ใจใ‚’่€ƒๆ…ฎใ—ใฆใใ ใ•ใ„ใ€‚'],
            ['ใƒšใƒณใ‚ฎใƒณใŒใ‚ธใƒฃใƒณใ‚ฐใƒซใฎ็Ž‹ๆง˜ใงใ‚ใ‚‹ใ“ใจใ‚’ๆญฃๅฝ“ๅŒ–ใ™ใ‚‹ใ‚ˆใ†ใซ่ชฌๆ˜Žใ—ใฆใใ ใ•ใ„ใ€‚']
            ],
        cache_examples=False,
                     )
    
    gr.Markdown(LICENSE)
    
if __name__ == "__main__":
    demo.launch()