AliArshad commited on
Commit
d7a8a79
·
verified ·
1 Parent(s): 91d77cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -45
app.py CHANGED
@@ -43,6 +43,10 @@ class MedicalImageAnalysisSystem:
43
  model_kwargs={"low_cpu_mem_usage": True}
44
  )
45
 
 
 
 
 
46
  print("System initialization complete!")
47
 
48
  def generate_synthetic_metadata(self):
@@ -64,46 +68,51 @@ class MedicalImageAnalysisSystem:
64
  image = image.convert('RGB')
65
  return image.resize((224, 224))
66
 
67
- @torch.no_grad() # Disable gradient computation for inference
68
  def predict_tumor_presence(self, processed_image):
69
  inputs = self.tumor_classifier_processor(processed_image, return_tensors="pt")
70
- inputs = {k: v.to(self.device) for k, v in inputs.items()} # Move inputs to correct device
71
  outputs = self.tumor_classifier_model(**inputs)
72
  predictions = torch.softmax(outputs.logits, dim=-1)
73
- probs = predictions[0].cpu().tolist() # Move back to CPU for numpy operations
74
- return {
75
- "non-tumor": float(probs[0]),
76
- "tumor": float(probs[1])
77
- }
78
 
79
- @torch.no_grad() # Disable gradient computation for inference
80
  def predict_tumor_radius(self, processed_image):
81
  inputs = self.radius_processor(processed_image, return_tensors="pt")
82
- inputs = {k: v.to(self.device) for k, v in inputs.items()} # Move inputs to correct device
83
  outputs = self.radius_model(**inputs)
84
  predictions = outputs.logits.softmax(dim=-1)
85
  predicted_label = predictions.argmax().item()
86
- confidence = predictions[0][predicted_label].cpu().item() # Move back to CPU
87
-
88
  class_names = ["no-tumor", "0.5", "1.0", "1.5"]
89
- return {
90
- "radius": class_names[predicted_label],
91
- "confidence": float(confidence)
92
- }
93
 
94
  def generate_llm_interpretation(self, tumor_presence, tumor_radius, metadata):
95
- prompt = f"""<|system|>You are a medical AI assistant. Be concise but thorough.</s>
96
- <|user|>Analyze these results:
97
- Tumor Detection: {json.dumps(tumor_presence)}
98
- Tumor Radius: {json.dumps(tumor_radius)}
99
- Patient: {metadata['age']}y/o {metadata['gender']}, {metadata['smoking_status']}, {metadata['drinking_status']}
100
- Medications: {', '.join(metadata['medications']) if metadata['medications'] else 'None'}
101
- Provide: 1. Key findings 2. Risk assessment 3. Recommendations</s>
 
 
 
 
 
 
 
 
 
 
 
102
  <|assistant|>"""
103
 
104
  response = self.llm(
105
  prompt,
106
- max_new_tokens=300, # Reduced for faster response
107
  temperature=0.7,
108
  do_sample=True,
109
  top_p=0.95,
@@ -114,51 +123,44 @@ class MedicalImageAnalysisSystem:
114
 
115
  def analyze_image(self, image):
116
  try:
117
- # Add progress updates
118
- yield "Processing image..."
119
  processed_image = self.process_image(image)
120
-
121
- yield "Generating patient metadata..."
122
  metadata = self.generate_synthetic_metadata()
123
 
124
- yield "Analyzing tumor presence..."
125
  tumor_presence = self.predict_tumor_presence(processed_image)
126
-
127
- yield "Analyzing tumor radius..."
128
  tumor_radius = self.predict_tumor_radius(processed_image)
129
 
130
- yield "Generating medical interpretation..."
131
  interpretation = self.generate_llm_interpretation(
132
  tumor_presence,
133
  tumor_radius,
134
  metadata
135
  )
136
 
137
- # Final results
138
- result = {
139
  "metadata": metadata,
140
  "tumor_presence": tumor_presence,
141
  "tumor_radius": tumor_radius,
142
  "interpretation": interpretation
143
  }
144
 
145
- yield self.format_results(result)
146
 
147
  except Exception as e:
148
- yield f"Error: {str(e)}"
149
 
150
  def format_results(self, results):
151
  return f"""
152
- Patient Metadata:
153
  {json.dumps(results['metadata'], indent=2)}
154
 
155
- Tumor Presence Analysis:
156
- {json.dumps(results['tumor_presence'], indent=2)}
157
-
158
- Tumor Radius Analysis:
159
- {json.dumps(results['tumor_radius'], indent=2)}
160
 
161
- Medical Interpretation and Recommendations:
162
  {results['interpretation']}
163
  """
164
 
@@ -175,8 +177,7 @@ def create_interface():
175
  ],
176
  title="Medical Image Analysis System",
177
  description="Upload a medical image for tumor analysis and recommendations.",
178
- theme=gr.themes.Base(),
179
- flagging=False
180
  )
181
 
182
  return iface
@@ -184,5 +185,5 @@ def create_interface():
184
  if __name__ == "__main__":
185
  print("Starting application...")
186
  iface = create_interface()
187
- iface.queue() # Enable queuing for better handling of multiple requests
188
  iface.launch(debug=True, share=True)
 
43
  model_kwargs={"low_cpu_mem_usage": True}
44
  )
45
 
46
+ # Set models to evaluation mode
47
+ self.tumor_classifier_model.eval()
48
+ self.radius_model.eval()
49
+
50
  print("System initialization complete!")
51
 
52
  def generate_synthetic_metadata(self):
 
68
  image = image.convert('RGB')
69
  return image.resize((224, 224))
70
 
71
+ @torch.no_grad()
72
  def predict_tumor_presence(self, processed_image):
73
  inputs = self.tumor_classifier_processor(processed_image, return_tensors="pt")
74
+ inputs = {k: v.to(self.device) for k, v in inputs.items()}
75
  outputs = self.tumor_classifier_model(**inputs)
76
  predictions = torch.softmax(outputs.logits, dim=-1)
77
+ probs = predictions[0].cpu().tolist()
78
+ # Return just the predicted class instead of probabilities
79
+ return "tumor" if probs[1] > probs[0] else "non-tumor"
 
 
80
 
81
+ @torch.no_grad()
82
  def predict_tumor_radius(self, processed_image):
83
  inputs = self.radius_processor(processed_image, return_tensors="pt")
84
+ inputs = {k: v.to(self.device) for k, v in inputs.items()}
85
  outputs = self.radius_model(**inputs)
86
  predictions = outputs.logits.softmax(dim=-1)
87
  predicted_label = predictions.argmax().item()
 
 
88
  class_names = ["no-tumor", "0.5", "1.0", "1.5"]
89
+ # Return just the radius without confidence
90
+ return class_names[predicted_label]
 
 
91
 
92
  def generate_llm_interpretation(self, tumor_presence, tumor_radius, metadata):
93
+ prompt = f"""<|system|>You are a medical AI assistant. Provide a clear and concise medical interpretation.</s>
94
+ <|user|>Analyze the following medical findings:
95
+
96
+ Image Analysis:
97
+ - Tumor Detection: {tumor_presence}
98
+ - Tumor Size: {tumor_radius} cm
99
+
100
+ Patient Profile:
101
+ - Age: {metadata['age']} years
102
+ - Gender: {metadata['gender']}
103
+ - Smoking: {metadata['smoking_status']}
104
+ - Alcohol: {metadata['drinking_status']}
105
+ - Current Medications: {', '.join(metadata['medications']) if metadata['medications'] else 'None'}
106
+
107
+ Provide a brief:
108
+ 1. Key findings
109
+ 2. Clinical recommendations
110
+ 3. Follow-up plan</s>
111
  <|assistant|>"""
112
 
113
  response = self.llm(
114
  prompt,
115
+ max_new_tokens=300,
116
  temperature=0.7,
117
  do_sample=True,
118
  top_p=0.95,
 
123
 
124
  def analyze_image(self, image):
125
  try:
126
+ # Process image and generate metadata
 
127
  processed_image = self.process_image(image)
 
 
128
  metadata = self.generate_synthetic_metadata()
129
 
130
+ # Get predictions
131
  tumor_presence = self.predict_tumor_presence(processed_image)
 
 
132
  tumor_radius = self.predict_tumor_radius(processed_image)
133
 
134
+ # Generate interpretation
135
  interpretation = self.generate_llm_interpretation(
136
  tumor_presence,
137
  tumor_radius,
138
  metadata
139
  )
140
 
141
+ # Format results
142
+ results = {
143
  "metadata": metadata,
144
  "tumor_presence": tumor_presence,
145
  "tumor_radius": tumor_radius,
146
  "interpretation": interpretation
147
  }
148
 
149
+ return self.format_results(results)
150
 
151
  except Exception as e:
152
+ return f"Error: {str(e)}"
153
 
154
  def format_results(self, results):
155
  return f"""
156
+ Patient Information:
157
  {json.dumps(results['metadata'], indent=2)}
158
 
159
+ Image Analysis Results:
160
+ - Tumor Detection: {results['tumor_presence']}
161
+ - Tumor Size: {results['tumor_radius']} cm
 
 
162
 
163
+ Medical Assessment:
164
  {results['interpretation']}
165
  """
166
 
 
177
  ],
178
  title="Medical Image Analysis System",
179
  description="Upload a medical image for tumor analysis and recommendations.",
180
+ theme=gr.themes.Base()
 
181
  )
182
 
183
  return iface
 
185
  if __name__ == "__main__":
186
  print("Starting application...")
187
  iface = create_interface()
188
+ iface.queue()
189
  iface.launch(debug=True, share=True)