Spaces:
Runtime error
Runtime error
File size: 5,521 Bytes
bd430c2 6eaf9f2 7cc14bc 81d0025 b3ce859 4765ec5 3938920 a64df2e 3938920 4233cde 4947181 3938920 3f6c727 64990d9 3f6c727 3938920 c041d6c 3938920 bd430c2 48e6ba5 3938920 c9ceede bd430c2 3938920 0341cbc 4765ec5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import gradio as gr
import os
os.system('git clone https://github.com/WongKinYiu/yolov7.git')
def detect(inp):
os.system('python ./yolov7/detect.py --weights best.pt --conf 0.25 --img-size 640 --source f{inp} "--project","./yolov7/runs/detect ')
otp=inp.split('/')[2]
return f"./yolov7/runs/detect/exp/*"
#f"./yolov7/runs/detect/exp/{otp}"
import argparse
from pathlib import Path
import cv2
import torch
import numpy as np
from numpy import random
from . import models
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier,scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, time_synchronized
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
opt = {
"weights": "best.pt", # Path to weights file default weights are for nano model
"yaml" : "custom.yaml",
"img-size": 640, # default image size
"conf-thres": 0.25, # confidence threshold for inference.
"iou-thres" : 0.45, # NMS IoU threshold for inference.
"device" : '0', # device to run our model i.e. 0 or 0,1,2,3 or cpu
"classes" : classes_to_filter # list of classes to filter or None
}
def detect2(inp):
with torch.no_grad():
weights, imgsz = opt['weights'], opt['img-size']
set_logging()
device = select_device(opt['device'])
half = device.type != 'cpu'
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
if half:
model.half()
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))
img0 = cv2.imread(inp)
img = letterbox(img0, imgsz, stride=stride)[0]
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment= False)[0]
# Apply NMS
classes = None
if opt['classes']:
classes = []
for class_name in opt['classes']:
classes.append(names.index(class_name))
if classes:
classes = [i for i in range(len(names)) if i not in classes]
pred = non_max_suppression(pred, opt['conf-thres'], opt['iou-thres'], classes= [17], agnostic= False)
t2 = time_synchronized()
for i, det in enumerate(pred):
s = ''
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]]
if len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
for *xyxy, conf, cls in reversed(det):
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=3)
return img0
inp = gr.inputs.Image(type="filepath", label="Input")
outputs=gr.outputs.Image(type="pil", label="Output Image")
#output = gr.outputs.Image(type="filepath", label="Output")
#.outputs.Textbox()
io=gr.Interface(fn=detect2, inputs=inp, outputs=output, title='Pot Hole Detection With Custom YOLOv7 ',examples=[["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]])
#,examples=["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]
io.launch(debug=True,share=False)
|