Havi999 commited on
Commit
5931d56
Β·
1 Parent(s): 4649c4d

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. web_demo.py +5 -2
  2. web_demo2.py +3 -1
  3. web_demo_old.py +4 -1
  4. web_demo_vision.py +4 -1
web_demo.py CHANGED
@@ -2,8 +2,11 @@ from transformers import AutoModel, AutoTokenizer
2
  import gradio as gr
3
  import mdtex2html
4
 
5
- tokenizer = AutoTokenizer.from_pretrained("../chatglm", trust_remote_code=True)
6
- model = AutoModel.from_pretrained("../chatglm", trust_remote_code=True).float()
 
 
 
7
  model = model.eval()
8
 
9
  """Override Chatbot.postprocess"""
 
2
  import gradio as gr
3
  import mdtex2html
4
 
5
+ # tokenizer = AutoTokenizer.from_pretrained("../chatglm", trust_remote_code=True)
6
+ # model = AutoModel.from_pretrained("../chatglm", trust_remote_code=True).float()
7
+ tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
8
+ model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
9
+
10
  model = model.eval()
11
 
12
  """Override Chatbot.postprocess"""
web_demo2.py CHANGED
@@ -11,8 +11,10 @@ st.set_page_config(
11
 
12
  @st.cache_resource
13
  def get_model():
 
 
14
  tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
15
- model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
16
  model = model.eval()
17
  return tokenizer, model
18
 
 
11
 
12
  @st.cache_resource
13
  def get_model():
14
+ # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
15
+ # model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
16
  tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
17
+ model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
18
  model = model.eval()
19
  return tokenizer, model
20
 
web_demo_old.py CHANGED
@@ -1,8 +1,11 @@
1
  from transformers import AutoModel, AutoTokenizer
2
  import gradio as gr
3
 
 
 
4
  tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
5
- model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
 
6
  model = model.eval()
7
 
8
  MAX_TURNS = 20
 
1
  from transformers import AutoModel, AutoTokenizer
2
  import gradio as gr
3
 
4
+ # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
5
+ # model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
6
  tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
7
+ model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
8
+
9
  model = model.eval()
10
 
11
  MAX_TURNS = 20
web_demo_vision.py CHANGED
@@ -2,8 +2,11 @@ from transformers import AutoModel, AutoTokenizer
2
  import gradio as gr
3
  import mdtex2html
4
 
 
 
5
  tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
6
- model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).half().cuda()
 
7
  model = model.eval()
8
 
9
  """Override Chatbot.postprocess"""
 
2
  import gradio as gr
3
  import mdtex2html
4
 
5
+ # tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
6
+ # model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).half().cuda()
7
  tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
8
+ model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).float()
9
+
10
  model = model.eval()
11
 
12
  """Override Chatbot.postprocess"""