Spaces:
Running
Running
Shak33l-UiRev
commited on
added debug to model workflow
Browse filesdds a parallel debug column that shows:
Real-time Process Information:
Timestamped debug messages
Color-coded by message type (info, warning, error, success)
Model loading details
Image processing information
Resource Usage:
GPU memory usage (if available)
Model device information
Memory allocation tracking
Technical Details:
Image properties
Model configuration
Processing steps
Result type information
Error Handling:
Detailed error messages
Error types
Stack trace references
app.py
CHANGED
@@ -196,47 +196,117 @@ with col2:
|
|
196 |
for use_case in model_info[selected_model]['best_for']:
|
197 |
st.markdown(f"- {use_case}")
|
198 |
|
199 |
-
#
|
200 |
if uploaded_file is not None and selected_model:
|
201 |
if st.button("Analyze Document", help="Click to start document analysis"):
|
202 |
-
|
|
|
|
|
|
|
203 |
try:
|
204 |
-
# Create a progress bar
|
205 |
-
|
|
|
|
|
206 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
# Load model with progress update
|
208 |
-
|
209 |
-
|
|
|
|
|
|
|
210 |
model, processor = load_model(selected_model)
|
211 |
|
212 |
if model is None or processor is None:
|
213 |
-
|
|
|
|
|
214 |
else:
|
|
|
|
|
|
|
|
|
215 |
# Update progress
|
216 |
-
|
217 |
-
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
# Analyze document
|
|
|
220 |
results = analyze_document(image, selected_model, model, processor)
|
|
|
221 |
|
222 |
# Update progress
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
except Exception as e:
|
238 |
-
|
239 |
-
|
|
|
|
|
|
|
|
|
240 |
|
241 |
# Add improved information about usage and limitations
|
242 |
st.markdown("""
|
|
|
196 |
for use_case in model_info[selected_model]['best_for']:
|
197 |
st.markdown(f"- {use_case}")
|
198 |
|
199 |
+
# Inside the analysis section, replace the existing if-block with:
|
200 |
if uploaded_file is not None and selected_model:
|
201 |
if st.button("Analyze Document", help="Click to start document analysis"):
|
202 |
+
# Create two columns for results and debug info
|
203 |
+
result_col, debug_col = st.columns([1, 1])
|
204 |
+
|
205 |
+
with st.spinner('Processing...'):
|
206 |
try:
|
207 |
+
# Create a progress bar in results column
|
208 |
+
with result_col:
|
209 |
+
st.markdown("### Analysis Progress")
|
210 |
+
progress_bar = st.progress(0)
|
211 |
|
212 |
+
# Initialize debug column
|
213 |
+
with debug_col:
|
214 |
+
st.markdown("### Debug Information")
|
215 |
+
debug_container = st.empty()
|
216 |
+
|
217 |
+
def update_debug(message, level="info"):
|
218 |
+
"""Update debug information with timestamp"""
|
219 |
+
timestamp = datetime.now().strftime("%H:%M:%S.%f")[:-3]
|
220 |
+
color = {
|
221 |
+
"info": "blue",
|
222 |
+
"warning": "orange",
|
223 |
+
"error": "red",
|
224 |
+
"success": "green"
|
225 |
+
}.get(level, "black")
|
226 |
+
|
227 |
+
return f"<div style='color: {color};'>[{timestamp}] {message}</div>"
|
228 |
+
|
229 |
+
debug_messages = []
|
230 |
+
|
231 |
+
def add_debug(message, level="info"):
|
232 |
+
debug_messages.append(update_debug(message, level))
|
233 |
+
debug_container.markdown(
|
234 |
+
"\n".join(debug_messages),
|
235 |
+
unsafe_allow_html=True
|
236 |
+
)
|
237 |
+
|
238 |
# Load model with progress update
|
239 |
+
with result_col:
|
240 |
+
progress_bar.progress(25)
|
241 |
+
st.info("Loading model...")
|
242 |
+
|
243 |
+
add_debug(f"Loading {selected_model} model and processor...")
|
244 |
model, processor = load_model(selected_model)
|
245 |
|
246 |
if model is None or processor is None:
|
247 |
+
with result_col:
|
248 |
+
st.error("Failed to load model. Please try again.")
|
249 |
+
add_debug("Model loading failed!", "error")
|
250 |
else:
|
251 |
+
add_debug("Model loaded successfully", "success")
|
252 |
+
add_debug(f"Model device: {next(model.parameters()).device}")
|
253 |
+
add_debug(f"Model memory usage: {torch.cuda.max_memory_allocated()/1024**2:.2f}MB") if torch.cuda.is_available() else None
|
254 |
+
|
255 |
# Update progress
|
256 |
+
with result_col:
|
257 |
+
progress_bar.progress(50)
|
258 |
+
st.info("Analyzing document...")
|
259 |
+
|
260 |
+
# Log image details
|
261 |
+
add_debug(f"Image size: {image.size}")
|
262 |
+
add_debug(f"Image mode: {image.mode}")
|
263 |
|
264 |
# Analyze document
|
265 |
+
add_debug("Starting document analysis...")
|
266 |
results = analyze_document(image, selected_model, model, processor)
|
267 |
+
add_debug("Analysis completed", "success")
|
268 |
|
269 |
# Update progress
|
270 |
+
with result_col:
|
271 |
+
progress_bar.progress(75)
|
272 |
+
st.markdown("### Analysis Results")
|
273 |
+
|
274 |
+
if isinstance(results, dict) and "error" in results:
|
275 |
+
st.error(f"Analysis Error: {results['error']}")
|
276 |
+
add_debug(f"Analysis error: {results['error']}", "error")
|
277 |
+
else:
|
278 |
+
# Pretty print the results in results column
|
279 |
+
st.json(results)
|
280 |
+
|
281 |
+
# Show detailed results breakdown in debug column
|
282 |
+
add_debug("Results breakdown:", "info")
|
283 |
+
if isinstance(results, dict):
|
284 |
+
for key, value in results.items():
|
285 |
+
add_debug(f"- {key}: {type(value)}")
|
286 |
+
else:
|
287 |
+
add_debug(f"Result type: {type(results)}")
|
288 |
+
|
289 |
+
# Complete progress
|
290 |
+
progress_bar.progress(100)
|
291 |
+
st.success("Analysis completed!")
|
292 |
+
|
293 |
+
# Final debug info
|
294 |
+
add_debug("Process completed successfully", "success")
|
295 |
+
with debug_col:
|
296 |
+
if torch.cuda.is_available():
|
297 |
+
st.markdown("### Resource Usage")
|
298 |
+
st.markdown(f"""
|
299 |
+
- GPU Memory: {torch.cuda.max_memory_allocated()/1024**2:.2f}MB
|
300 |
+
- GPU Utilization: {torch.cuda.utilization()}%
|
301 |
+
""")
|
302 |
|
303 |
except Exception as e:
|
304 |
+
with result_col:
|
305 |
+
st.error(f"Error during analysis: {str(e)}")
|
306 |
+
add_debug(f"Error: {str(e)}", "error")
|
307 |
+
add_debug(f"Error type: {type(e)}", "error")
|
308 |
+
if hasattr(e, '__traceback__'):
|
309 |
+
add_debug("Traceback available in logs", "warning")
|
310 |
|
311 |
# Add improved information about usage and limitations
|
312 |
st.markdown("""
|