from transformers import PretrainedConfig, AutoModelForCausalLM, AutoTokenizer import streamlit as st # Define your custom config class class MyCustomConfig(PretrainedConfig): model_type = "gpt" # Specify the model type def __init__(self, vocab_size, n_embd, n_layer, n_head, block_size, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.block_size = block_size # Load your model configuration config = MyCustomConfig.from_json_file("config.json") # Make sure to specify the correct path # Load the model and tokenizer model = AutoModelForCausalLM.from_pretrained("praty7717/Odeyssey", config=config) tokenizer = AutoTokenizer.from_pretrained("praty7717/Odeyssey") # Streamlit interface setup st.title("Text Generation App") start_prompt = st.text_input("Enter your prompt:") if st.button("Generate"): input_ids = tokenizer.encode(start_prompt, return_tensors="pt") output = model.generate(input_ids, max_length=100) generated_text = tokenizer.decode(output[0], skip_special_tokens=True) st.write(generated_text)