Spaces:
Sleeping
Sleeping
Divyansh12
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,80 +2,80 @@ import os
|
|
2 |
import streamlit as st
|
3 |
from transformers import AutoModel, AutoTokenizer
|
4 |
from PIL import Image
|
5 |
-
import base64
|
6 |
import uuid
|
7 |
-
import time
|
8 |
-
from pathlib import Path
|
9 |
|
10 |
-
#
|
|
|
11 |
def load_model(model_name):
|
12 |
-
if model_name == "
|
13 |
tokenizer = AutoTokenizer.from_pretrained('srimanth-d/GOT_CPU', trust_remote_code=True)
|
14 |
model = AutoModel.from_pretrained('srimanth-d/GOT_CPU', trust_remote_code=True, use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
|
15 |
-
model
|
16 |
-
elif model_name == "
|
17 |
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
|
18 |
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
|
19 |
-
model
|
20 |
return tokenizer, model
|
21 |
|
22 |
-
#
|
23 |
-
UPLOAD_FOLDER = "./uploads"
|
24 |
-
RESULTS_FOLDER = "./results"
|
25 |
-
|
26 |
-
for folder in [UPLOAD_FOLDER, RESULTS_FOLDER]:
|
27 |
-
if not os.path.exists(folder):
|
28 |
-
os.makedirs(folder)
|
29 |
-
|
30 |
-
# Function to run the GOT model for plain text OCR
|
31 |
def run_GOT(image, tokenizer, model):
|
32 |
unique_id = str(uuid.uuid4())
|
33 |
-
image_path =
|
34 |
|
35 |
image.save(image_path)
|
36 |
|
37 |
try:
|
38 |
-
|
|
|
39 |
return res
|
40 |
except Exception as e:
|
41 |
return f"Error: {str(e)}"
|
42 |
finally:
|
|
|
43 |
if os.path.exists(image_path):
|
44 |
os.remove(image_path)
|
45 |
|
46 |
-
# Function to
|
47 |
-
def
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
file_path.unlink()
|
53 |
|
54 |
# Streamlit App
|
55 |
-
st.set_page_config(page_title="GOT-OCR
|
56 |
|
57 |
-
|
|
|
58 |
|
59 |
-
|
60 |
-
|
61 |
|
62 |
-
|
63 |
-
|
|
|
64 |
|
65 |
if uploaded_image:
|
66 |
image = Image.open(uploaded_image)
|
67 |
|
68 |
-
with
|
69 |
st.image(image, caption='Uploaded Image', use_column_width=True)
|
70 |
-
|
71 |
-
with
|
72 |
-
if st.button("Run
|
73 |
with st.spinner("Processing..."):
|
74 |
-
# Load the selected model
|
75 |
tokenizer, model = load_model(model_option)
|
76 |
result_text = run_GOT(image, tokenizer, model)
|
77 |
-
|
78 |
-
|
79 |
-
#
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import streamlit as st
|
3 |
from transformers import AutoModel, AutoTokenizer
|
4 |
from PIL import Image
|
|
|
5 |
import uuid
|
|
|
|
|
6 |
|
7 |
+
# Cache the model loading function
|
8 |
+
@st.cache_resource
|
9 |
def load_model(model_name):
|
10 |
+
if model_name == "OCR for english or hindi (runs on CPU)":
|
11 |
tokenizer = AutoTokenizer.from_pretrained('srimanth-d/GOT_CPU', trust_remote_code=True)
|
12 |
model = AutoModel.from_pretrained('srimanth-d/GOT_CPU', trust_remote_code=True, use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
|
13 |
+
model.eval() # Load model on CPU
|
14 |
+
elif model_name == "OCR for english (runs on GPU)":
|
15 |
tokenizer = AutoTokenizer.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True)
|
16 |
model = AutoModel.from_pretrained('ucaslcl/GOT-OCR2_0', trust_remote_code=True, low_cpu_mem_usage=True, device_map='cuda', use_safetensors=True, pad_token_id=tokenizer.eos_token_id)
|
17 |
+
model.eval().cuda() # Load model on GPU
|
18 |
return tokenizer, model
|
19 |
|
20 |
+
# Function to run the GOT model for multilingual OCR
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
def run_GOT(image, tokenizer, model):
|
22 |
unique_id = str(uuid.uuid4())
|
23 |
+
image_path = f"{unique_id}.png"
|
24 |
|
25 |
image.save(image_path)
|
26 |
|
27 |
try:
|
28 |
+
# Use the model to extract text
|
29 |
+
res = model.chat(tokenizer, image_path, ocr_type='ocr') # Extract plain text
|
30 |
return res
|
31 |
except Exception as e:
|
32 |
return f"Error: {str(e)}"
|
33 |
finally:
|
34 |
+
# Clean up the saved image
|
35 |
if os.path.exists(image_path):
|
36 |
os.remove(image_path)
|
37 |
|
38 |
+
# Function to highlight keyword in text
|
39 |
+
def highlight_keyword(text, keyword):
|
40 |
+
if keyword:
|
41 |
+
highlighted_text = text.replace(keyword, f"<mark>{keyword}</mark>")
|
42 |
+
return highlighted_text
|
43 |
+
return text
|
|
|
44 |
|
45 |
# Streamlit App
|
46 |
+
st.set_page_config(page_title="GOT-OCR Multilingual Demo", layout="wide")
|
47 |
|
48 |
+
# Creating two columns
|
49 |
+
left_col, right_col = st.columns(2)
|
50 |
|
51 |
+
with left_col:
|
52 |
+
uploaded_image = st.file_uploader("Upload your image", type=["png", "jpg", "jpeg"])
|
53 |
|
54 |
+
with right_col:
|
55 |
+
# Model selection in the right column
|
56 |
+
model_option = st.selectbox("Select Model", ["OCR for english or hindi (runs on CPU)", "OCR for english (runs on GPU)"])
|
57 |
|
58 |
if uploaded_image:
|
59 |
image = Image.open(uploaded_image)
|
60 |
|
61 |
+
with left_col:
|
62 |
st.image(image, caption='Uploaded Image', use_column_width=True)
|
63 |
+
|
64 |
+
with right_col:
|
65 |
+
if st.button("Run OCR"):
|
66 |
with st.spinner("Processing..."):
|
67 |
+
# Load the selected model (cached using @st.cache_resource)
|
68 |
tokenizer, model = load_model(model_option)
|
69 |
result_text = run_GOT(image, tokenizer, model)
|
70 |
+
|
71 |
+
if "Error" not in result_text:
|
72 |
+
# Keyword input for search
|
73 |
+
keyword = st.text_input("Enter a keyword to highlight")
|
74 |
+
|
75 |
+
# Highlight keyword in the extracted text
|
76 |
+
highlighted_text = highlight_keyword(result_text, keyword)
|
77 |
+
|
78 |
+
# Display the extracted text
|
79 |
+
st.markdown(highlighted_text, unsafe_allow_html=True)
|
80 |
+
else:
|
81 |
+
st.error(result_text)
|