import gradio as gr import os os.system("pip -qq install yoloxdetect==0.0.7") import torch from yoloxdetect import YoloxDetector # Images torch.hub.download_url_to_file('https://tochkanews.ru/wp-content/uploads/2020/09/0.jpg', '1.jpg') torch.hub.download_url_to_file('https://s.rdrom.ru/1/pubs/4/35893/1906770.jpg', '2.jpg') torch.hub.download_url_to_file('https://static.mk.ru/upload/entities/2022/04/17/07/articles/detailPicture/5b/39/28/b6/ffb1aa636dd62c30e6ff670f84474f75.jpg', '3.jpg') def yolox_inference( image_path: gr.inputs.Image = None, model_path: gr.inputs.Dropdown = 'kadirnar/yolox_s-v0.1.1', config_path: gr.inputs.Textbox = 'configs.yolox_s', image_size: gr.inputs.Slider = 640 ): """ YOLOX inference function Args: image: Input image model_path: Path to the model config_path: Path to the config file image_size: Image size Returns: Rendered image """ model = YoloxDetector(model_path, config_path=config_path, device="cpu", hf_model=True) pred = model.predict(image_path=image_path, image_size=image_size) return pred inputs = [ gr.inputs.Image(type="filepath", label="Input Image"), gr.inputs.Dropdown( label="Model Path", choices=[ "kadirnar/yolox_s-v0.1.1", "kadirnar/yolox_m-v0.1.1", "kadirnar/yolox_tiny-v0.1.1", ], default="kadirnar/yolox_s-v0.1.1", ), gr.inputs.Dropdown( label="Config Path", choices=[ "configs.yolox_s", "configs.yolox_m", "configs.yolox_tiny", ], default="configs.yolox_s", ), gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"), ] outputs = gr.outputs.Image(type="filepath", label="Output Image") title = "YOLOX is a high-performance anchor-free YOLO." examples = [ ["1.jpg", "kadirnar/yolox_m-v0.1.1", "configs.yolox_m", 640], ["2.jpg", "kadirnar/yolox_s-v0.1.1", "configs.yolox_s", 640], ["3.jpg", "kadirnar/yolox_tiny-v0.1.1", "configs.yolox_tiny", 640], ] demo_app = gr.Interface( fn=yolox_inference, inputs=inputs, outputs=outputs, title=title, examples=examples, cache_examples=True, theme='huggingface', ) demo_app.launch(debug=True, enable_queue=True)