unknown
commited on
Commit
Β·
dd4319f
1
Parent(s):
e4994c1
Added requirements and removed Device for CPU only
Browse files- app.py +6 -8
- requirements.txt +5 -0
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import streamlit as st
|
2 |
-
import torch
|
3 |
from PIL import Image
|
4 |
from transformers import AutoProcessor, AutoModelForCausalLM, AutoConfig
|
5 |
|
@@ -9,17 +8,16 @@ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENT
|
|
9 |
# Function to load the model and processor
|
10 |
@st.cache_resource
|
11 |
def load_model_and_processor():
|
12 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
13 |
config = AutoConfig.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True)
|
14 |
config.vision_config.model_type = "davit"
|
15 |
-
model = AutoModelForCausalLM.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True).
|
16 |
processor = AutoProcessor.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True)
|
17 |
-
return model, processor
|
18 |
|
19 |
# Function to generate answer
|
20 |
-
def generate_answer(model, processor,
|
21 |
task = "<FinanceQA>"
|
22 |
-
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
23 |
generated_ids = model.generate(
|
24 |
input_ids=inputs["input_ids"],
|
25 |
pixel_values=inputs["pixel_values"],
|
@@ -54,7 +52,7 @@ def main():
|
|
54 |
st.sidebar.markdown("Our website : [sujet.ai](https://sujet.ai)")
|
55 |
|
56 |
# Load model and processor
|
57 |
-
model, processor
|
58 |
|
59 |
# File uploader for document
|
60 |
uploaded_file = st.file_uploader("π Upload a financial document", type=["png", "jpg", "jpeg"])
|
@@ -68,7 +66,7 @@ def main():
|
|
68 |
|
69 |
if st.button("π Generate Answer"):
|
70 |
with st.spinner("Generating answer..."):
|
71 |
-
answer = generate_answer(model, processor,
|
72 |
st.success(f"## π‘ {answer}")
|
73 |
|
74 |
# # Model configuration viewer
|
|
|
1 |
import streamlit as st
|
|
|
2 |
from PIL import Image
|
3 |
from transformers import AutoProcessor, AutoModelForCausalLM, AutoConfig
|
4 |
|
|
|
8 |
# Function to load the model and processor
|
9 |
@st.cache_resource
|
10 |
def load_model_and_processor():
|
|
|
11 |
config = AutoConfig.from_pretrained("microsoft/Florence-2-base-ft", trust_remote_code=True)
|
12 |
config.vision_config.model_type = "davit"
|
13 |
+
model = AutoModelForCausalLM.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True).eval()
|
14 |
processor = AutoProcessor.from_pretrained("sujet-ai/Lutece-Vision-Base", config=config, trust_remote_code=True)
|
15 |
+
return model, processor
|
16 |
|
17 |
# Function to generate answer
|
18 |
+
def generate_answer(model, processor, image, prompt):
|
19 |
task = "<FinanceQA>"
|
20 |
+
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
21 |
generated_ids = model.generate(
|
22 |
input_ids=inputs["input_ids"],
|
23 |
pixel_values=inputs["pixel_values"],
|
|
|
52 |
st.sidebar.markdown("Our website : [sujet.ai](https://sujet.ai)")
|
53 |
|
54 |
# Load model and processor
|
55 |
+
model, processor = load_model_and_processor()
|
56 |
|
57 |
# File uploader for document
|
58 |
uploaded_file = st.file_uploader("π Upload a financial document", type=["png", "jpg", "jpeg"])
|
|
|
66 |
|
67 |
if st.button("π Generate Answer"):
|
68 |
with st.spinner("Generating answer..."):
|
69 |
+
answer = generate_answer(model, processor, image, question)
|
70 |
st.success(f"## π‘ {answer}")
|
71 |
|
72 |
# # Model configuration viewer
|
requirements.txt
CHANGED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
timm
|
2 |
+
transformers
|
3 |
+
spaces
|
4 |
+
pillow
|
5 |
+
torch
|