|
import streamlit as st |
|
import torch |
|
from transformers import AutoModelForCausalLM |
|
|
|
|
|
class MyCustomConfig(PretrainedConfig): |
|
model_type = "gpt" |
|
|
|
def __init__(self, vocab_size, n_embd, n_layer, n_head, block_size, **kwargs): |
|
super().__init__(vocab_size=vocab_size, n_embd=n_embd, n_layer=n_layer, n_head=n_head, block_size=block_size, **kwargs) |
|
|
|
|
|
config = MyCustomConfig.from_pretrained("praty7717/Odeyssey") |
|
model = AutoModelForCausalLM.from_pretrained("praty7717/Odeyssey", config=config) |
|
|
|
|
|
def generate_text(prompt, max_length=100): |
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
with torch.no_grad(): |
|
outputs = model.generate(**inputs, max_length=max_length) |
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
st.title("Text Generation with Custom GPT Model") |
|
start_prompt = st.text_area("Enter your prompt here:", "Once upon a time") |
|
if st.button("Generate"): |
|
generated_text = generate_text(start_prompt, max_length=100) |
|
st.write(generated_text) |
|
|