Pratyush Chaudhary commited on
Commit
3de3355
·
1 Parent(s): b4fb369

Add updated filed

Browse files
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -1,27 +1,28 @@
1
  import streamlit as st
2
- from transformers import GPT2LMHeadModel, AutoTokenizer, AutoConfig
 
3
 
4
- # Load the configuration
5
- config = AutoConfig.from_pretrained("praty7717/Odeyssey")
 
6
 
7
- # Load the model
8
- model = GPT2LMHeadModel.from_pretrained("praty7717/Odeyssey", config=config)
9
 
10
- # Load the tokenizer
11
- tokenizer = AutoTokenizer.from_pretrained("praty7717/Odeyssey")
 
12
 
13
- # Define the text generation function
14
  def generate_text(prompt, max_length=100):
15
- input_ids = tokenizer.encode(prompt, return_tensors='pt')
16
- output = model.generate(input_ids, max_length=max_length, num_return_sequences=1)
17
- return tokenizer.decode(output[0], skip_special_tokens=True)
 
18
 
19
  # Streamlit UI
20
  st.title("Text Generation with Custom GPT Model")
21
-
22
- # User input for the prompt
23
- user_input = st.text_input("Enter a prompt:", "Once upon a time")
24
-
25
  if st.button("Generate"):
26
- generated_text = generate_text(user_input, max_length=100)
27
  st.write(generated_text)
 
1
  import streamlit as st
2
+ import torch
3
+ from transformers import AutoModelForCausalLM
4
 
5
+ # Define your custom config class
6
+ class MyCustomConfig(PretrainedConfig):
7
+ model_type = "gpt"
8
 
9
+ def __init__(self, vocab_size, n_embd, n_layer, n_head, block_size, **kwargs):
10
+ super().__init__(vocab_size=vocab_size, n_embd=n_embd, n_layer=n_layer, n_head=n_head, block_size=block_size, **kwargs)
11
 
12
+ # Load the model and configuration
13
+ config = MyCustomConfig.from_pretrained("praty7717/Odeyssey")
14
+ model = AutoModelForCausalLM.from_pretrained("praty7717/Odeyssey", config=config)
15
 
16
+ # Function to generate text
17
  def generate_text(prompt, max_length=100):
18
+ inputs = tokenizer(prompt, return_tensors="pt")
19
+ with torch.no_grad():
20
+ outputs = model.generate(**inputs, max_length=max_length)
21
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
22
 
23
  # Streamlit UI
24
  st.title("Text Generation with Custom GPT Model")
25
+ start_prompt = st.text_area("Enter your prompt here:", "Once upon a time")
 
 
 
26
  if st.button("Generate"):
27
+ generated_text = generate_text(start_prompt, max_length=100)
28
  st.write(generated_text)