Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
os.system('git clone https://github.com/WongKinYiu/yolov7.git') | |
def detect(inp): | |
os.system('python ./yolov7/detect.py --weights best.pt --conf 0.25 --img-size 640 --source f{inp} "--project","./yolov7/runs/detect ') | |
otp=inp.split('/')[2] | |
return f"./yolov7/runs/detect/exp/*" | |
#f"./yolov7/runs/detect/exp/{otp}" | |
opt = { | |
"weights": "best.pt", # Path to weights file default weights are for nano model | |
"yaml" : "custom.yaml", | |
"img-size": 640, # default image size | |
"conf-thres": 0.25, # confidence threshold for inference. | |
"iou-thres" : 0.45, # NMS IoU threshold for inference. | |
"device" : '0', # device to run our model i.e. 0 or 0,1,2,3 or cpu | |
"classes" : classes_to_filter # list of classes to filter or None | |
} | |
def detect2(inp): | |
with torch.no_grad(): | |
weights, imgsz = opt['weights'], opt['img-size'] | |
set_logging() | |
device = select_device(opt['device']) | |
half = device.type != 'cpu' | |
model = attempt_load(weights, map_location=device) # load FP32 model | |
stride = int(model.stride.max()) # model stride | |
imgsz = check_img_size(imgsz, s=stride) # check img_size | |
if half: | |
model.half() | |
names = model.module.names if hasattr(model, 'module') else model.names | |
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] | |
if device.type != 'cpu': | |
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) | |
img0 = cv2.imread(inp) | |
img = letterbox(img0, imgsz, stride=stride)[0] | |
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 | |
img = np.ascontiguousarray(img) | |
img = torch.from_numpy(img).to(device) | |
img = img.half() if half else img.float() # uint8 to fp16/32 | |
img /= 255.0 # 0 - 255 to 0.0 - 1.0 | |
if img.ndimension() == 3: | |
img = img.unsqueeze(0) | |
# Inference | |
t1 = time_synchronized() | |
pred = model(img, augment= False)[0] | |
# Apply NMS | |
classes = None | |
if opt['classes']: | |
classes = [] | |
for class_name in opt['classes']: | |
classes.append(names.index(class_name)) | |
if classes: | |
classes = [i for i in range(len(names)) if i not in classes] | |
pred = non_max_suppression(pred, opt['conf-thres'], opt['iou-thres'], classes= [17], agnostic= False) | |
t2 = time_synchronized() | |
for i, det in enumerate(pred): | |
s = '' | |
s += '%gx%g ' % img.shape[2:] # print string | |
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]] | |
if len(det): | |
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round() | |
for c in det[:, -1].unique(): | |
n = (det[:, -1] == c).sum() # detections per class | |
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string | |
for *xyxy, conf, cls in reversed(det): | |
label = f'{names[int(cls)]} {conf:.2f}' | |
plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=3) | |
return img0 | |
inp = gr.inputs.Image(type="filepath", label="Input") | |
outputs=gr.outputs.Image(type="pil", label="Output Image") | |
#output = gr.outputs.Image(type="filepath", label="Output") | |
#.outputs.Textbox() | |
io=gr.Interface(fn=detect2, inputs=inp, outputs=output, title='Pot Hole Detection With Custom YOLOv7 ',examples=[["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]]) | |
#,examples=["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"] | |
io.launch(debug=True,share=False) | |