File size: 1,180 Bytes
6dd871b b43ca4c 6dd871b b43ca4c 6dd871b 3de3355 69c380b b43ca4c 0337671 b4fb369 0337671 2cf7a6d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
from transformers import PretrainedConfig, AutoModelForCausalLM, AutoTokenizer
import streamlit as st
# Define your custom config class
class MyCustomConfig(PretrainedConfig):
model_type = "gpt" # Specify the model type
def __init__(self, vocab_size, n_embd, n_layer, n_head, block_size, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.block_size = block_size
# Load your model configuration
config = MyCustomConfig.from_json_file("config.json") # Make sure to specify the correct path
# Load the model and tokenizer
model = AutoModelForCausalLM.from_pretrained("praty7717/Odeyssey", config=config)
tokenizer = AutoTokenizer.from_pretrained("praty7717/Odeyssey")
# Streamlit interface setup
st.title("Text Generation App")
start_prompt = st.text_input("Enter your prompt:")
if st.button("Generate"):
input_ids = tokenizer.encode(start_prompt, return_tensors="pt")
output = model.generate(input_ids, max_length=100)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
st.write(generated_text)
|