Spaces:
Runtime error
Runtime error
mejoras threshold
Browse files
app.py
CHANGED
@@ -45,20 +45,34 @@ torch.hub.download_url_to_file('https://i.pinimg.com/originals/c2/ce/e0/c2cee056
|
|
45 |
model = torch.hub.load('ultralytics/yolov5', 'custom', path='./best.pt') # local model o google colab
|
46 |
#model = torch.hub.load('path/to/yolov5', 'custom', path='/content/yolov56.pt', source='local') # local repo
|
47 |
|
48 |
-
|
49 |
-
|
|
|
|
|
50 |
im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
|
57 |
-
inputs = gr.inputs.Image(type='pil', label=" Imagen Original")
|
58 |
-
outputs = gr.outputs.Image(type="pil", label="Resultado")
|
59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
title = 'Trampas Barcel贸'
|
61 |
-
|
62 |
description = """
|
63 |
<p>
|
64 |
<center>
|
@@ -67,16 +81,15 @@ Sistemas de Desarrollado por Subsecretar铆a de Innovaci贸n del Municipio de Vice
|
|
67 |
</center>
|
68 |
</p>
|
69 |
"""
|
70 |
-
|
71 |
-
article = "<p style='text-align: center'>YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset, and includes " \
|
72 |
-
"simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, " \
|
73 |
-
"and export to ONNX, CoreML and TFLite. <a href='https://colab.research.google.com/drive/1fbeB71yD09WK2JG9P3Ladu9MEzQ2rQad?usp=sharing'>Source code</a> |" \
|
74 |
-
"<a href='https://colab.research.google.com/drive/1FxaL8DcYgvjPrWfWruSA5hvk3J81zLY9?usp=sharing'>Colab Deploy</a> | <a href='https://github.com/ultralytics/yolov5'>PyTorch Hub</a></p>"
|
75 |
|
76 |
-
examples = [['ejemplo1.jpg'], ['ejemplo2.jpg']]
|
77 |
-
|
|
|
78 |
debug=True)
|
79 |
|
|
|
|
|
80 |
"""For YOLOv5 PyTorch Hub inference with **PIL**, **OpenCV**, **Numpy** or **PyTorch** inputs please see the full [YOLOv5 PyTorch Hub Tutorial](https://github.com/ultralytics/yolov5/issues/36).
|
81 |
|
82 |
|
|
|
45 |
model = torch.hub.load('ultralytics/yolov5', 'custom', path='./best.pt') # local model o google colab
|
46 |
#model = torch.hub.load('path/to/yolov5', 'custom', path='/content/yolov56.pt', source='local') # local repo
|
47 |
|
48 |
+
|
49 |
+
def yolo(size, iou, conf, im):
|
50 |
+
'''Wrapper fn for gradio'''
|
51 |
+
g = (int(size) / max(im.size)) # gain
|
52 |
im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
|
53 |
|
54 |
+
model.iou = iou
|
55 |
+
|
56 |
+
model.conf = conf
|
57 |
+
|
58 |
+
|
59 |
+
results2 = model(im) # inference
|
60 |
+
|
61 |
+
results2.render() # updates results.imgs with boxes and labels
|
62 |
+
return Image.fromarray(results2.imgs[0])
|
63 |
+
|
64 |
+
#------------ Interface-------------
|
65 |
|
66 |
|
|
|
|
|
67 |
|
68 |
+
in1 = gr.inputs.Radio(['640', '1280'], label="Tama帽o de la imagen", default='640', type='value')
|
69 |
+
in2 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.45, label='NMS IoU threshold')
|
70 |
+
in3 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.50, label='Umbral o threshold')
|
71 |
+
in4 = gr.inputs.Image(type='pil', label="Original Image")
|
72 |
+
|
73 |
+
out2 = gr.outputs.Image(type="pil", label="YOLOv5")
|
74 |
+
#-------------- Text-----
|
75 |
title = 'Trampas Barcel贸'
|
|
|
76 |
description = """
|
77 |
<p>
|
78 |
<center>
|
|
|
81 |
</center>
|
82 |
</p>
|
83 |
"""
|
84 |
+
article = "<p style='text-align: center'><a href='https://docs.google.com/presentation/d/1T5CdcLSzgRe8cQpoi_sPB4U170551NGOrZNykcJD0xU/edit?usp=sharing' target='_blank'>Para mas info, clik para ir al white paper</a></p><p style='text-align: center'><a href='https://drive.google.com/drive/folders/1owACN3HGIMo4zm2GQ_jf-OhGNeBVRS7l?usp=sharing ' target='_blank'>Google Colab Demo</a></p></center></p>"
|
|
|
|
|
|
|
|
|
85 |
|
86 |
+
examples = [['640',0.45, 0.75,'ejemplo1.jpg'], ['640',0.45, 0.75,'ejemplo2.jpg']]
|
87 |
+
|
88 |
+
iface = gr.Interface(yolo, inputs=[in1, in2, in3, in4], outputs=out2, title=title, description=description, article=article, examples=examples,theme="huggingface", analytics_enabled=False).launch(
|
89 |
debug=True)
|
90 |
|
91 |
+
iface.launch()
|
92 |
+
|
93 |
"""For YOLOv5 PyTorch Hub inference with **PIL**, **OpenCV**, **Numpy** or **PyTorch** inputs please see the full [YOLOv5 PyTorch Hub Tutorial](https://github.com/ultralytics/yolov5/issues/36).
|
94 |
|
95 |
|