|
from transformers import PretrainedConfig, AutoModelForCausalLM, AutoTokenizer |
|
import streamlit as st |
|
|
|
|
|
class MyCustomConfig(PretrainedConfig): |
|
model_type = "gpt" |
|
|
|
def __init__(self, vocab_size, n_embd, n_layer, n_head, block_size, **kwargs): |
|
super().__init__(**kwargs) |
|
self.vocab_size = vocab_size |
|
self.n_embd = n_embd |
|
self.n_layer = n_layer |
|
self.n_head = n_head |
|
self.block_size = block_size |
|
|
|
|
|
config = MyCustomConfig.from_json_file("config.json") |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained("praty7717/Odeyssey", config=config) |
|
tokenizer = AutoTokenizer.from_pretrained("praty7717/Odeyssey") |
|
|
|
|
|
st.title("Text Generation App") |
|
start_prompt = st.text_input("Enter your prompt:") |
|
|
|
if st.button("Generate"): |
|
input_ids = tokenizer.encode(start_prompt, return_tensors="pt") |
|
output = model.generate(input_ids, max_length=100) |
|
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
st.write(generated_text) |
|
|