File size: 1,091 Bytes
0d51860
2dfd30f
5061d1e
0d51860
 
 
e6a57a2
 
 
0d51860
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import gradio as gr
import os
os.system("pip install transformers sentencepiece torch")
from transformers import AutoTokenizer, AutoModelForCausalLM


tokenizer = AutoTokenizer.from_pretrained("kyo-takano/open-calm-7b-8bit")

model = AutoModelForCausalLM.from_pretrained("kyo-takano/open-calm-7b-8bit")
def generate_text(input_text, temperature=0.8, max_length=20):
    input_ids = tokenizer.encode(input_text, return_tensors="pt")
    output = model.generate(input_ids, max_length=max_length, temperature=temperature)
    generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
    return generated_text

inputs = gr.inputs.Textbox(lines=2, label="Input Text")
temperature = gr.inputs.Slider(minimum=0.2, maximum=1.0, default=0.8, step=0.1, label="Temperature")
max_length = gr.inputs.Slider(minimum=10, maximum=50, default=20, step=5, label="Max Length")

output_text = gr.outputs.Textbox(label="Generated Text")

interface = gr.Interface(fn=generate_text, inputs=[inputs, temperature, max_length], outputs=output_text, title="Text Generation Interface")
interface.launch()