sfmig commited on
Commit
4687e19
·
1 Parent(s): 916c574

added selectable dlc model, added option to run dlc model only on input image (mostly for testing)

Browse files
Files changed (1) hide show
  1. app.py +60 -34
app.py CHANGED
@@ -112,7 +112,8 @@ def crop_animal_detections(yolo_results,
112
 
113
  def predict_dlc(list_np_crops,
114
  kpts_likelihood_th,
115
- DLCmodel, dlc_proc):
 
116
 
117
  # run dlc thru list of crops
118
  dlc_live = DLCLive(DLCmodel, processor=dlc_proc)
@@ -132,13 +133,17 @@ def predict_dlc(list_np_crops,
132
 
133
 
134
  def predict_pipeline(img_input,
 
 
135
  bbox_likelihood_th,
136
  kpts_likelihood_th):
137
 
138
- # these will eventually be user inputs....
139
- path_to_DLCmodel = "DLC_models/DLC_Cat_resnet_50_iteration-0_shuffle-0"
 
 
140
 
141
- ### Run Megadetector
142
  md_results = predict_md(img_input) #Image.fromarray(results.imgs[0])
143
 
144
  # Obtain animal crops with confidence above th
@@ -148,36 +153,46 @@ def predict_pipeline(img_input,
148
  # Run DLC
149
  # TODO: add llk threshold for kpts too?
150
  dlc_proc = Processor()
151
- list_kpts_per_crop = predict_dlc(list_crops,
152
- kpts_likelihood_th,
153
- path_to_DLCmodel,
154
- dlc_proc)
155
-
156
-
157
- # # Produce final image
158
- # fig = plt.Figure(md_results.imgs[0].shape[:2]) #figsize=(10,10)) #md_results.imgs[0].shape)
159
- # list_annotated_crops = []
160
- img_background = Image.fromarray(md_results.imgs[0])
161
- for ic, (np_crop, kpts_crop) in enumerate(zip(list_crops,
162
- list_kpts_per_crop)):
163
-
164
- ## Draw keypts on crop
165
- img_crop = Image.fromarray(np_crop)
166
- draw_keypoints_on_image(img_crop,
167
- kpts_crop, # a numpy array with shape [num_keypoints, 2].
168
  color='red',
169
  radius=2,
170
- use_normalized_coordinates=False) # if True, then I should use md_results.xyxyn
171
-
172
-
173
- # list_annotated_crops.append(img_crop)
174
-
175
- ## Paste crop in original image
176
- # https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.paste
177
- img_background.paste(img_crop,
178
- box = tuple([int(math.floor(t)) for t in md_results.xyxy[0][ic,:2]]))
179
-
180
- return img_background #Image.fromarray(list_crops[0]) #Image.fromarray(md_results.imgs[0]) #list_annotated_crops #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
 
183
  ##########################################################
@@ -191,8 +206,15 @@ MD_model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.
191
  # Create user interface and launch
192
  gr_image_input = gr.inputs.Image(type="pil", label="Input Image")
193
  gr_image_output = gr.outputs.Image(type="pil", label="Output Image")
 
 
 
 
 
 
 
194
  gr_slider_conf_bboxes = gr.inputs.Slider(0,1,.05,0.8,
195
- label='Set confidence threshold for animal detections')
196
  gr_slider_conf_keypoints = gr.inputs.Slider(0,1,.05,0,
197
  label='Set confidence threshold for keypoints')
198
  #image = gr.inputs.Image(type="pil", label="Input Image")
@@ -205,7 +227,11 @@ gr_description = "Detect and estimate the pose of animals in camera trap images,
205
  # examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
206
 
207
  gr.Interface(predict_pipeline,
208
- inputs=[gr_image_input,gr_slider_conf_bboxes,gr_slider_conf_keypoints],
 
 
 
 
209
  outputs=gr_image_output,
210
  title=gr_title,
211
  description=gr_description,
 
112
 
113
  def predict_dlc(list_np_crops,
114
  kpts_likelihood_th,
115
+ DLCmodel,
116
+ dlc_proc):
117
 
118
  # run dlc thru list of crops
119
  dlc_live = DLCLive(DLCmodel, processor=dlc_proc)
 
133
 
134
 
135
  def predict_pipeline(img_input,
136
+ model_input_str,
137
+ flag_dlc_only,
138
  bbox_likelihood_th,
139
  kpts_likelihood_th):
140
 
141
+ if model_input_str == 'full_cat':
142
+ path_to_DLCmodel = "DLC_models/DLC_Cat_resnet_50_iteration-0_shuffle-0"
143
+ elif model_input_str == 'full_dog':
144
+ path_to_DLCmodel = "DLC_models/DLC_Dog_resnet_50_iteration-0_shuffle-0"
145
 
146
+ # ### Run Megadetector
147
  md_results = predict_md(img_input) #Image.fromarray(results.imgs[0])
148
 
149
  # Obtain animal crops with confidence above th
 
153
  # Run DLC
154
  # TODO: add llk threshold for kpts too?
155
  dlc_proc = Processor()
156
+ if flag_dlc_only:
157
+ # compute kpts on input img
158
+ list_kpts_per_crop = predict_dlc([np.asarray(img_input)],#list_crops,--------
159
+ kpts_likelihood_th,
160
+ path_to_DLCmodel,
161
+ dlc_proc)
162
+ # draw kpts on input img
163
+ draw_keypoints_on_image(img_input,
164
+ list_kpts_per_crop[0], # a numpy array with shape [num_keypoints, 2].
 
 
 
 
 
 
 
 
165
  color='red',
166
  radius=2,
167
+ use_normalized_coordinates=False)
168
+ return img_input
169
+
170
+ else:
171
+ # Compute kpts for each crop
172
+ list_kpts_per_crop = predict_dlc(list_crops,
173
+ kpts_likelihood_th,
174
+ path_to_DLCmodel,
175
+ dlc_proc)
176
+
177
+ # Produce final image
178
+ img_background = Image.fromarray(md_results.imgs[0]) # img_input?
179
+ for ic, (np_crop, kpts_crop) in enumerate(zip(list_crops,
180
+ list_kpts_per_crop)):
181
+
182
+ ## Draw keypts on crop
183
+ img_crop = Image.fromarray(np_crop)
184
+ draw_keypoints_on_image(img_crop,
185
+ kpts_crop, # a numpy array with shape [num_keypoints, 2].
186
+ color='red',
187
+ radius=2,
188
+ use_normalized_coordinates=False) # if True, then I should use md_results.xyxyn
189
+
190
+ ## Paste crop in original image
191
+ # https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.paste
192
+ img_background.paste(img_crop,
193
+ box = tuple([int(math.floor(t)) for t in md_results.xyxy[0][ic,:2]]))
194
+
195
+ return img_background #Image.fromarray(list_crops[0]) #Image.fromarray(md_results.imgs[0]) #list_annotated_crops #
196
 
197
 
198
  ##########################################################
 
206
  # Create user interface and launch
207
  gr_image_input = gr.inputs.Image(type="pil", label="Input Image")
208
  gr_image_output = gr.outputs.Image(type="pil", label="Output Image")
209
+ gr_dlc_model_input = gr.inputs.Dropdown(choices=['full_cat','full_dog'], # choices
210
+ default='full_cat', # default option
211
+ type='value', # Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
212
+ label='Select DLC model')
213
+ gr_dlc_only_checkbox = gr.inputs.Checkbox(False,
214
+ label='Run DLClive only, directly on input image?')
215
+
216
  gr_slider_conf_bboxes = gr.inputs.Slider(0,1,.05,0.8,
217
+ label='Set confidence threshold for animal detections')
218
  gr_slider_conf_keypoints = gr.inputs.Slider(0,1,.05,0,
219
  label='Set confidence threshold for keypoints')
220
  #image = gr.inputs.Image(type="pil", label="Input Image")
 
227
  # examples = [['data/Macropod.jpg'], ['data/koala2.jpg'],['data/cat.jpg'],['data/BrushtailPossum.jpg']]
228
 
229
  gr.Interface(predict_pipeline,
230
+ inputs=[gr_image_input,
231
+ gr_dlc_model_input,
232
+ gr_dlc_only_checkbox,
233
+ gr_slider_conf_bboxes,
234
+ gr_slider_conf_keypoints],
235
  outputs=gr_image_output,
236
  title=gr_title,
237
  description=gr_description,