Kuberwastaken commited on
Commit
8ea6312
·
1 Parent(s): 474b075

something broke, trying model without the loading bar.

Browse files
Files changed (2) hide show
  1. gradio_app.py +28 -31
  2. model/analyzer.py +13 -26
gradio_app.py CHANGED
@@ -8,14 +8,12 @@ custom_css = """
8
  * {
9
  font-family: 'Roboto', sans-serif;
10
  }
11
-
12
  .gradio-container {
13
  background: #121212 !important;
14
  color: #fff !important;
15
  overflow: hidden;
16
  transition: background 0.5s ease;
17
  }
18
-
19
  .treat-title {
20
  text-align: center;
21
  padding: 40px;
@@ -25,7 +23,6 @@ custom_css = """
25
  box-shadow: 0 4px 8px rgba(0, 0, 0, 0.3);
26
  animation: slideInFromTop 1s ease-out;
27
  }
28
-
29
  .treat-title h1 {
30
  font-size: 5em;
31
  color: #4f46e5;
@@ -33,18 +30,15 @@ custom_css = """
33
  font-weight: bold;
34
  animation: fadeInText 1.5s ease-out;
35
  }
36
-
37
  .treat-title p {
38
  font-size: 1.3em;
39
  color: #4f46e5;
40
  animation: fadeInText 1.5s ease-out 0.5s;
41
  }
42
-
43
  .highlight {
44
  color: #4f46e5;
45
  font-weight: bold;
46
  }
47
-
48
  .content-area, .results-area {
49
  background: rgba(33, 33, 33, 0.9) !important;
50
  border-radius: 15px !important;
@@ -54,7 +48,6 @@ custom_css = """
54
  opacity: 0;
55
  animation: fadeInUp 1s forwards;
56
  }
57
-
58
  .gradio-textbox textarea {
59
  background-color: #333 !important;
60
  color: #fff !important;
@@ -64,11 +57,9 @@ custom_css = """
64
  font-size: 1.1em !important;
65
  transition: border-color 0.3s ease;
66
  }
67
-
68
  .gradio-textbox textarea:focus {
69
  border-color: #4f46e5 !important;
70
  }
71
-
72
  .gradio-button {
73
  background-color: #4f46e5 !important;
74
  color: white !important;
@@ -79,29 +70,24 @@ custom_css = """
79
  transition: transform 0.3s ease, background-color 0.3s ease;
80
  margin: 20px 0 !important;
81
  }
82
-
83
  .gradio-button:hover {
84
  transform: scale(1.1) !important;
85
  background-color: #5749d3 !important;
86
  }
87
-
88
  .gradio-button:active {
89
  transform: scale(0.98) !important;
90
  background-color: #4b40bb !important;
91
  }
92
-
93
  label {
94
  color: #ccc !important;
95
  font-weight: 500 !important;
96
  margin-bottom: 10px !important;
97
  }
98
-
99
  .center-row {
100
  display: flex;
101
  justify-content: center;
102
  align-items: center;
103
  }
104
-
105
  .footer {
106
  text-align: center;
107
  margin-top: 40px;
@@ -110,45 +96,55 @@ label {
110
  opacity: 0;
111
  animation: fadeInUp 1s forwards 1.5s;
112
  }
113
-
114
  .footer p {
115
  color: #4f46e5;
116
  }
117
-
118
  @keyframes slideInFromTop {
119
  0% { transform: translateY(-50px); opacity: 0; }
120
  100% { transform: translateY(0); opacity: 1; }
121
  }
122
-
123
  @keyframes fadeInText {
124
  0% { opacity: 0; }
125
  100% { opacity: 1; }
126
  }
127
-
128
  @keyframes fadeInUp {
129
  0% { opacity: 0; transform: translateY(30px); }
130
  100% { opacity: 1; transform: translateY(0); }
131
  }
132
  """
133
 
134
- async def analyze_with_loading(text, progress=gr.Progress()):
135
  """
136
- Asynchronous wrapper for analyze_content that properly tracks progress
137
  """
 
 
 
 
 
 
 
 
 
 
138
  try:
139
- # Call analyze_content directly with the progress object
140
- result = await analyze_content(text, progress)
141
-
142
- # Format the results
143
- triggers = result["detected_triggers"]
144
- if triggers == ["None"]:
145
- return "✓ No concerns detected in the content."
146
- else:
147
- trigger_list = "\n".join([f"• {trigger}" for trigger in triggers])
148
- return f"⚠ Triggers Detected:\n{trigger_list}"
149
-
150
  except Exception as e:
151
  return f"Error during analysis: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
  # Create the Gradio interface
154
  with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as iface:
@@ -206,6 +202,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as iface:
206
  """)
207
 
208
  if __name__ == "__main__":
 
209
  iface.launch(
210
  share=False,
211
  debug=True,
 
8
  * {
9
  font-family: 'Roboto', sans-serif;
10
  }
 
11
  .gradio-container {
12
  background: #121212 !important;
13
  color: #fff !important;
14
  overflow: hidden;
15
  transition: background 0.5s ease;
16
  }
 
17
  .treat-title {
18
  text-align: center;
19
  padding: 40px;
 
23
  box-shadow: 0 4px 8px rgba(0, 0, 0, 0.3);
24
  animation: slideInFromTop 1s ease-out;
25
  }
 
26
  .treat-title h1 {
27
  font-size: 5em;
28
  color: #4f46e5;
 
30
  font-weight: bold;
31
  animation: fadeInText 1.5s ease-out;
32
  }
 
33
  .treat-title p {
34
  font-size: 1.3em;
35
  color: #4f46e5;
36
  animation: fadeInText 1.5s ease-out 0.5s;
37
  }
 
38
  .highlight {
39
  color: #4f46e5;
40
  font-weight: bold;
41
  }
 
42
  .content-area, .results-area {
43
  background: rgba(33, 33, 33, 0.9) !important;
44
  border-radius: 15px !important;
 
48
  opacity: 0;
49
  animation: fadeInUp 1s forwards;
50
  }
 
51
  .gradio-textbox textarea {
52
  background-color: #333 !important;
53
  color: #fff !important;
 
57
  font-size: 1.1em !important;
58
  transition: border-color 0.3s ease;
59
  }
 
60
  .gradio-textbox textarea:focus {
61
  border-color: #4f46e5 !important;
62
  }
 
63
  .gradio-button {
64
  background-color: #4f46e5 !important;
65
  color: white !important;
 
70
  transition: transform 0.3s ease, background-color 0.3s ease;
71
  margin: 20px 0 !important;
72
  }
 
73
  .gradio-button:hover {
74
  transform: scale(1.1) !important;
75
  background-color: #5749d3 !important;
76
  }
 
77
  .gradio-button:active {
78
  transform: scale(0.98) !important;
79
  background-color: #4b40bb !important;
80
  }
 
81
  label {
82
  color: #ccc !important;
83
  font-weight: 500 !important;
84
  margin-bottom: 10px !important;
85
  }
 
86
  .center-row {
87
  display: flex;
88
  justify-content: center;
89
  align-items: center;
90
  }
 
91
  .footer {
92
  text-align: center;
93
  margin-top: 40px;
 
96
  opacity: 0;
97
  animation: fadeInUp 1s forwards 1.5s;
98
  }
 
99
  .footer p {
100
  color: #4f46e5;
101
  }
 
102
  @keyframes slideInFromTop {
103
  0% { transform: translateY(-50px); opacity: 0; }
104
  100% { transform: translateY(0); opacity: 1; }
105
  }
 
106
  @keyframes fadeInText {
107
  0% { opacity: 0; }
108
  100% { opacity: 1; }
109
  }
 
110
  @keyframes fadeInUp {
111
  0% { opacity: 0; transform: translateY(30px); }
112
  100% { opacity: 1; transform: translateY(0); }
113
  }
114
  """
115
 
116
+ def analyze_with_loading(text, progress=gr.Progress()):
117
  """
118
+ Synchronous wrapper for the async analyze_content function
119
  """
120
+ # Initialize progress
121
+ progress(0, desc="Starting analysis...")
122
+
123
+ # Initial setup phase
124
+ for i in range(30):
125
+ time.sleep(0.02) # Reduced sleep time
126
+ progress((i + 1) / 100)
127
+
128
+ # Perform analysis
129
+ progress(0.3, desc="Processing text...")
130
  try:
131
+ # Use asyncio.run to handle the async function call
132
+ result = asyncio.run(analyze_content(text))
 
 
 
 
 
 
 
 
 
133
  except Exception as e:
134
  return f"Error during analysis: {str(e)}"
135
+
136
+ # Final processing
137
+ for i in range(70, 100):
138
+ time.sleep(0.02) # Reduced sleep time
139
+ progress((i + 1) / 100)
140
+
141
+ # Format the results
142
+ triggers = result["detected_triggers"]
143
+ if triggers == ["None"]:
144
+ return "✓ No triggers detected in the content."
145
+ else:
146
+ trigger_list = "\n".join([f"• {trigger}" for trigger in triggers])
147
+ return f"⚠ Triggers Detected:\n{trigger_list}"
148
 
149
  # Create the Gradio interface
150
  with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as iface:
 
202
  """)
203
 
204
  if __name__ == "__main__":
205
+ # Launch without the 'ssr' argument
206
  iface.launch(
207
  share=False,
208
  debug=True,
model/analyzer.py CHANGED
@@ -122,7 +122,7 @@ class ContentAnalyzer:
122
  )
123
 
124
  if progress:
125
- progress(0.15, "Loading model...")
126
 
127
  self.model = AutoModelForCausalLM.from_pretrained(
128
  "meta-llama/Llama-3.2-1B",
@@ -132,7 +132,7 @@ class ContentAnalyzer:
132
  )
133
 
134
  if progress:
135
- progress(0.2, "Model loaded successfully")
136
 
137
  logger.info(f"Model loaded successfully on {self.device}")
138
  except Exception as e:
@@ -147,6 +147,7 @@ class ContentAnalyzer:
147
 
148
  while start < text_len:
149
  end = min(start + chunk_size, text_len)
 
150
  if end < text_len:
151
  last_period = max(
152
  text.rfind('. ', start, end),
@@ -164,6 +165,7 @@ class ContentAnalyzer:
164
  """Process model response and return a confidence score."""
165
  response = response_text.strip().upper()
166
 
 
167
  if "YES" in response:
168
  evidence_words = ["CLEAR", "DEFINITELY", "EXPLICIT", "STRONG"]
169
  return 1.0 if any(word in response for word in evidence_words) else 0.8
@@ -172,6 +174,7 @@ class ContentAnalyzer:
172
  elif "NO" in response:
173
  return 0.0
174
 
 
175
  positive_indicators = ["PRESENT", "FOUND", "CONTAINS", "SHOWS", "INDICATES"]
176
  negative_indicators = ["ABSENT", "NONE", "NOTHING", "LACKS"]
177
 
@@ -191,7 +194,6 @@ class ContentAnalyzer:
191
  ) -> Dict[str, float]:
192
  """Analyze a single chunk of text for triggers."""
193
  chunk_triggers = {}
194
- progress_increment = progress_step / len(self.trigger_categories)
195
 
196
  for category, info in self.trigger_categories.items():
197
  mapped_name = info["mapped_name"]
@@ -236,8 +238,8 @@ class ContentAnalyzer:
236
  chunk_triggers[mapped_name] = chunk_triggers.get(mapped_name, 0) + confidence
237
 
238
  if progress:
239
- current_progress += progress_increment
240
- progress(min(current_progress, 0.9), f"Analyzing for {mapped_name}...")
241
 
242
  except Exception as e:
243
  logger.error(f"Error analyzing chunk for {mapped_name}: {str(e)}")
@@ -251,40 +253,28 @@ class ContentAnalyzer:
251
 
252
  chunks = self._chunk_text(script)
253
  trigger_scores = {}
254
-
255
- # Calculate progress allocation
256
- analysis_progress = 0.7 # 70% of progress for analysis
257
- progress_per_chunk = analysis_progress / len(chunks)
258
- current_progress = 0.2 # Starting after model loading
259
-
260
- if progress:
261
- progress(current_progress, "Beginning content analysis...")
262
 
263
- for i, chunk in enumerate(chunks):
264
  chunk_triggers = await self.analyze_chunk(
265
  chunk,
266
  progress,
267
  current_progress,
268
- progress_per_chunk
269
  )
270
 
271
  for trigger, score in chunk_triggers.items():
272
  trigger_scores[trigger] = trigger_scores.get(trigger, 0) + score
273
-
274
- current_progress += progress_per_chunk
275
- if progress:
276
- chunk_number = i + 1
277
- progress(min(0.9, current_progress),
278
- f"Processing chunk {chunk_number}/{len(chunks)}...")
279
 
280
  if progress:
281
- progress(0.95, "Finalizing analysis...")
282
 
283
  # Normalize scores by number of chunks and apply threshold
284
  chunk_count = len(chunks)
285
  final_triggers = [
286
  trigger for trigger, score in trigger_scores.items()
287
- if score / chunk_count > 0.3
288
  ]
289
 
290
  return final_triggers if final_triggers else ["None"]
@@ -297,9 +287,6 @@ async def analyze_content(
297
  analyzer = ContentAnalyzer()
298
 
299
  try:
300
- if progress:
301
- progress(0.0, "Initializing analyzer...")
302
-
303
  triggers = await analyzer.analyze_script(script, progress)
304
 
305
  if progress:
 
122
  )
123
 
124
  if progress:
125
+ progress(0.3, "Loading model...")
126
 
127
  self.model = AutoModelForCausalLM.from_pretrained(
128
  "meta-llama/Llama-3.2-1B",
 
132
  )
133
 
134
  if progress:
135
+ progress(0.5, "Model loaded successfully")
136
 
137
  logger.info(f"Model loaded successfully on {self.device}")
138
  except Exception as e:
 
147
 
148
  while start < text_len:
149
  end = min(start + chunk_size, text_len)
150
+ # Find the last period or newline in the chunk to avoid cutting sentences
151
  if end < text_len:
152
  last_period = max(
153
  text.rfind('. ', start, end),
 
165
  """Process model response and return a confidence score."""
166
  response = response_text.strip().upper()
167
 
168
+ # Check for explicit YES/NO/MAYBE
169
  if "YES" in response:
170
  evidence_words = ["CLEAR", "DEFINITELY", "EXPLICIT", "STRONG"]
171
  return 1.0 if any(word in response for word in evidence_words) else 0.8
 
174
  elif "NO" in response:
175
  return 0.0
176
 
177
+ # Fallback analysis for unclear responses
178
  positive_indicators = ["PRESENT", "FOUND", "CONTAINS", "SHOWS", "INDICATES"]
179
  negative_indicators = ["ABSENT", "NONE", "NOTHING", "LACKS"]
180
 
 
194
  ) -> Dict[str, float]:
195
  """Analyze a single chunk of text for triggers."""
196
  chunk_triggers = {}
 
197
 
198
  for category, info in self.trigger_categories.items():
199
  mapped_name = info["mapped_name"]
 
238
  chunk_triggers[mapped_name] = chunk_triggers.get(mapped_name, 0) + confidence
239
 
240
  if progress:
241
+ current_progress += progress_step
242
+ progress(min(current_progress, 0.9), f"Analyzing {mapped_name}...")
243
 
244
  except Exception as e:
245
  logger.error(f"Error analyzing chunk for {mapped_name}: {str(e)}")
 
253
 
254
  chunks = self._chunk_text(script)
255
  trigger_scores = {}
256
+ progress_step = 0.4 / (len(chunks) * len(self.trigger_categories))
257
+ current_progress = 0.5
 
 
 
 
 
 
258
 
259
+ for chunk in chunks:
260
  chunk_triggers = await self.analyze_chunk(
261
  chunk,
262
  progress,
263
  current_progress,
264
+ progress_step
265
  )
266
 
267
  for trigger, score in chunk_triggers.items():
268
  trigger_scores[trigger] = trigger_scores.get(trigger, 0) + score
 
 
 
 
 
 
269
 
270
  if progress:
271
+ progress(0.95, "Finalizing results...")
272
 
273
  # Normalize scores by number of chunks and apply threshold
274
  chunk_count = len(chunks)
275
  final_triggers = [
276
  trigger for trigger, score in trigger_scores.items()
277
+ if score / chunk_count > 0.3 # Adjusted threshold for better balance
278
  ]
279
 
280
  return final_triggers if final_triggers else ["None"]
 
287
  analyzer = ContentAnalyzer()
288
 
289
  try:
 
 
 
290
  triggers = await analyzer.analyze_script(script, progress)
291
 
292
  if progress: