Pratyush Chaudhary
commited on
Commit
·
6dd871b
1
Parent(s):
0337671
Add updated app file
Browse files
app.py
CHANGED
@@ -1,11 +1,22 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
-
import torch
|
3 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
|
4 |
|
5 |
-
#
|
6 |
-
|
|
|
7 |
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
model = AutoModelForCausalLM.from_pretrained("praty7717/Odeyssey", config=config)
|
10 |
tokenizer = AutoTokenizer.from_pretrained("praty7717/Odeyssey")
|
11 |
|
|
|
1 |
+
from transformers import PretrainedConfig, AutoModelForCausalLM, AutoTokenizer
|
2 |
import streamlit as st
|
|
|
|
|
3 |
|
4 |
+
# Define your custom config class
|
5 |
+
class MyCustomConfig(PretrainedConfig):
|
6 |
+
model_type = "gpt" # Specify the model type
|
7 |
|
8 |
+
def __init__(self, vocab_size, n_embd, n_layer, n_head, block_size, **kwargs):
|
9 |
+
super().__init__(**kwargs)
|
10 |
+
self.vocab_size = vocab_size
|
11 |
+
self.n_embd = n_embd
|
12 |
+
self.n_layer = n_layer
|
13 |
+
self.n_head = n_head
|
14 |
+
self.block_size = block_size
|
15 |
+
|
16 |
+
# Load your model configuration
|
17 |
+
config = MyCustomConfig.from_json_file("config.json") # Make sure to specify the correct path
|
18 |
+
|
19 |
+
# Load the model and tokenizer
|
20 |
model = AutoModelForCausalLM.from_pretrained("praty7717/Odeyssey", config=config)
|
21 |
tokenizer = AutoTokenizer.from_pretrained("praty7717/Odeyssey")
|
22 |
|