Spaces:
Runtime error
Runtime error
File size: 2,188 Bytes
bd430c2 6eaf9f2 8294da9 7cc14bc 9ece096 b3ce859 4765ec5 663bfbc a64df2e 3938920 4233cde 4947181 3938920 3f6c727 3938920 3db973b edb3cf3 3db973b edb3cf3 3db973b edb3cf3 513587a bd430c2 48e6ba5 757f55b c9ceede bd430c2 513587a 929e161 0341cbc 4765ec5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import gradio as gr
import os
import torch
os.system('git clone https://github.com/WongKinYiu/yolov7')
def detect(inp):
os.system('python ./yolov7/detect.py --weights best.pt --conf 0.25 --img-size 640 --source f{inp} --project ./yolov7/runs/detect ')
otp=inp.split('/')[2]
return f"./yolov7/runs/detect/exp/*"
#f"./yolov7/runs/detect/exp/{otp}"
def custom(path_or_model='path/to/model.pt', autoshape=True):
"""custom mode
Arguments (3 options):
path_or_model (str): 'path/to/model.pt'
path_or_model (dict): torch.load('path/to/model.pt')
path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
Returns:
pytorch model
"""
model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint
if isinstance(model, dict):
model = model['ema' if model.get('ema') else 'model'] # load model
hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
hub_model.load_state_dict(model.float().state_dict()) # load state_dict
hub_model.names = model.names # class names
if autoshape:
hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available
return hub_model.to(device)
model = custom(path_or_model='best.pt')
def detect1(inp):
#g = (size / max(inp.size)) #gain
#im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
results = model(inp,size=640) # inference
results.render() # updates results.imgs with boxes and labels
return Image.fromarray(results.imgs[0])
inp = gr.inputs.Image(type="filepath", label="Input")
#output=gr.outputs.Image(type="pil", label="Output Image")
output = gr.outputs.Image(type="filepath", label="Output")
#.outputs.Textbox()
io=gr.Interface(fn=detect1, inputs=inp, outputs=output, title='Pot Hole Detection With Custom YOLOv7 ',
#examples=[["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]]
)
#,examples=["Examples/img-300_jpg.rf.6b7b035dff1cda092ce3dc22be8d0135.jpg"]
io.launch(debug=True,share=False)
|