unknown
commited on
Commit
Β·
e4994c1
1
Parent(s):
d1ca7e4
Added dependencies
Browse files- app.py +6 -5
- requirements.txt +0 -0
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import streamlit as st
|
|
|
2 |
from PIL import Image
|
3 |
from transformers import AutoProcessor, AutoModelForCausalLM, AutoConfig
|
4 |
-
import json
|
5 |
|
6 |
import subprocess
|
7 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
@@ -9,11 +9,12 @@ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENT
|
|
9 |
# Function to load the model and processor
|
10 |
@st.cache_resource
|
11 |
def load_model_and_processor():
|
|
|
12 |
config = AutoConfig.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True)
|
13 |
config.vision_config.model_type = "davit"
|
14 |
-
model = AutoModelForCausalLM.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True).eval()
|
15 |
processor = AutoProcessor.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True)
|
16 |
-
return model, processor
|
17 |
|
18 |
# Function to generate answer
|
19 |
def generate_answer(model, processor, device, image, prompt):
|
@@ -53,7 +54,7 @@ def main():
|
|
53 |
st.sidebar.markdown("Our website : [sujet.ai](https://sujet.ai)")
|
54 |
|
55 |
# Load model and processor
|
56 |
-
model, processor = load_model_and_processor()
|
57 |
|
58 |
# File uploader for document
|
59 |
uploaded_file = st.file_uploader("π Upload a financial document", type=["png", "jpg", "jpeg"])
|
@@ -67,7 +68,7 @@ def main():
|
|
67 |
|
68 |
if st.button("π Generate Answer"):
|
69 |
with st.spinner("Generating answer..."):
|
70 |
-
answer = generate_answer(model, processor, image, question)
|
71 |
st.success(f"## π‘ {answer}")
|
72 |
|
73 |
# # Model configuration viewer
|
|
|
1 |
import streamlit as st
|
2 |
+
import torch
|
3 |
from PIL import Image
|
4 |
from transformers import AutoProcessor, AutoModelForCausalLM, AutoConfig
|
|
|
5 |
|
6 |
import subprocess
|
7 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
|
|
9 |
# Function to load the model and processor
|
10 |
@st.cache_resource
|
11 |
def load_model_and_processor():
|
12 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
13 |
config = AutoConfig.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True)
|
14 |
config.vision_config.model_type = "davit"
|
15 |
+
model = AutoModelForCausalLM.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True).to(device).eval()
|
16 |
processor = AutoProcessor.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True)
|
17 |
+
return model, processor, device
|
18 |
|
19 |
# Function to generate answer
|
20 |
def generate_answer(model, processor, device, image, prompt):
|
|
|
54 |
st.sidebar.markdown("Our website : [sujet.ai](https://sujet.ai)")
|
55 |
|
56 |
# Load model and processor
|
57 |
+
model, processor, device = load_model_and_processor()
|
58 |
|
59 |
# File uploader for document
|
60 |
uploaded_file = st.file_uploader("π Upload a financial document", type=["png", "jpg", "jpeg"])
|
|
|
68 |
|
69 |
if st.button("π Generate Answer"):
|
70 |
with st.spinner("Generating answer..."):
|
71 |
+
answer = generate_answer(model, processor, device, image, question)
|
72 |
st.success(f"## π‘ {answer}")
|
73 |
|
74 |
# # Model configuration viewer
|
requirements.txt
ADDED
File without changes
|