unknown commited on
Commit
d1ca7e4
Β·
1 Parent(s): d5b5b3a

Removed GPU

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import streamlit as st
2
- import torch
3
  from PIL import Image
4
  from transformers import AutoProcessor, AutoModelForCausalLM, AutoConfig
5
  import json
@@ -10,12 +9,11 @@ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENT
10
  # Function to load the model and processor
11
  @st.cache_resource
12
  def load_model_and_processor():
13
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
  config = AutoConfig.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True)
15
  config.vision_config.model_type = "davit"
16
- model = AutoModelForCausalLM.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True).to(device).eval()
17
  processor = AutoProcessor.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True)
18
- return model, processor, device
19
 
20
  # Function to generate answer
21
  def generate_answer(model, processor, device, image, prompt):
@@ -55,7 +53,7 @@ def main():
55
  st.sidebar.markdown("Our website : [sujet.ai](https://sujet.ai)")
56
 
57
  # Load model and processor
58
- model, processor, device = load_model_and_processor()
59
 
60
  # File uploader for document
61
  uploaded_file = st.file_uploader("πŸ“„ Upload a financial document", type=["png", "jpg", "jpeg"])
@@ -69,7 +67,7 @@ def main():
69
 
70
  if st.button("πŸ” Generate Answer"):
71
  with st.spinner("Generating answer..."):
72
- answer = generate_answer(model, processor, device, image, question)
73
  st.success(f"## πŸ’‘ {answer}")
74
 
75
  # # Model configuration viewer
 
1
  import streamlit as st
 
2
  from PIL import Image
3
  from transformers import AutoProcessor, AutoModelForCausalLM, AutoConfig
4
  import json
 
9
  # Function to load the model and processor
10
  @st.cache_resource
11
  def load_model_and_processor():
 
12
  config = AutoConfig.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True)
13
  config.vision_config.model_type = "davit"
14
+ model = AutoModelForCausalLM.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True).eval()
15
  processor = AutoProcessor.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True)
16
+ return model, processor
17
 
18
  # Function to generate answer
19
  def generate_answer(model, processor, device, image, prompt):
 
53
  st.sidebar.markdown("Our website : [sujet.ai](https://sujet.ai)")
54
 
55
  # Load model and processor
56
+ model, processor = load_model_and_processor()
57
 
58
  # File uploader for document
59
  uploaded_file = st.file_uploader("πŸ“„ Upload a financial document", type=["png", "jpg", "jpeg"])
 
67
 
68
  if st.button("πŸ” Generate Answer"):
69
  with st.spinner("Generating answer..."):
70
+ answer = generate_answer(model, processor, image, question)
71
  st.success(f"## πŸ’‘ {answer}")
72
 
73
  # # Model configuration viewer