Pranomvignesh commited on
Commit
d8c5837
·
1 Parent(s): 40dcf79

Added title and examples

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
app.py CHANGED
@@ -1,67 +1,65 @@
1
  import gradio as gr
2
- import torch
3
  import yolov5
 
4
  from transformers import pipeline
5
 
6
- pipeline = pipeline(task="image-classification", model="PranomVignesh/Police-vs-Public")
 
7
 
8
- # from transformers import AutoFeatureExtractor, AutoModelForImageClassification
9
 
10
- # extractor = AutoFeatureExtractor.from_pretrained("PranomVignesh/Police-vs-Public")
11
- # model = AutoModelForImageClassification.from_pretrained("PranomVignesh/Police-vs-Public")
12
-
13
- # Images
14
- # torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
15
- # torch.hub.download_url_to_file('https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/image3.jpg', 'image3.jpg')
16
-
17
- def yolov5_inference(
18
- image
19
- ):
20
- """
21
- YOLOv5 inference function
22
- Args:
23
- image: Input image
24
- model_path: Path to the model
25
- image_size: Image size
26
- conf_threshold: Confidence threshold
27
- iou_threshold: IOU threshold
28
- Returns:
29
- Rendered image
30
- """
31
  model = yolov5.load('./best.pt', device="cpu")
32
  results = model([image], size=224)
33
 
34
- # outputs = model(**inputs)
35
- # logits = outputs.logits
36
- # probabilities = torch.softmax(logits, dim=1).tolist()[0]
 
 
 
 
 
37
 
38
- # classes = ['Police/Authorized Personnel', 'Public/Unauthorized Person']
39
 
40
- # output = {name: float(prob) for name, prob in zip(classes, probabilities)}
41
 
42
- probabilities = pipeline(image)
43
- output = {p["label"]: p["score"] for p in probabilities}
 
 
 
44
 
45
- return results.render()[0],output
46
-
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- inputs = gr.Image(type="pil")
 
49
  outputs = [
50
- gr.Image(type="pil"),
51
- gr.Label()
52
  ]
53
- title = "Detection"
54
- description = "YOLOv5 is a family of object detection models pretrained on COCO dataset. This model is a pip implementation of the original YOLOv5 model."
55
 
56
- # examples = [['zidane.jpg', 'yolov5s.pt', 640, 0.25, 0.45], ['image3.jpg', 'yolov5s.pt', 640, 0.25, 0.45]]
57
- demo_app = gr.Interface(
58
- fn=yolov5_inference,
59
  inputs=inputs,
60
  outputs=outputs,
61
  title=title,
62
- # examples=examples,
63
- # cache_examples=True,
64
- # live=True,
65
- # theme='huggingface',
 
66
  )
67
- demo_app.launch(debug=True, enable_queue=True)
 
1
  import gradio as gr
 
2
  import yolov5
3
+ import os
4
  from transformers import pipeline
5
 
6
+ imageClassifier = pipeline(task="image-classification",
7
+ model="PranomVignesh/Police-vs-Public")
8
 
 
9
 
10
+ def predict(image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  model = yolov5.load('./best.pt', device="cpu")
12
  results = model([image], size=224)
13
 
14
+ predictions = imageClassifier(image)
15
+ classMappings = {
16
+ 'police': "Police / Authorized Personnel",
17
+ 'public': 'Unauthorized Person'
18
+ }
19
+ output = {}
20
+ for item in predictions:
21
+ output[classMappings[item['label']]] = item['score']
22
 
23
+ return results.render()[0], output
24
 
 
25
 
26
+ title = "Detecting Unauthorized Individuals with Firearms"
27
+
28
+ examples = [
29
+ []
30
+ ]
31
 
32
+ title = "Detecting Unauthorized Individuals with Firearms"
33
+ description = """
34
+ Try the examples at bottom to get started.
35
+ """
36
+ examples = [[
37
+ os.path.join(os.path.abspath(''), './examples/sample_1.png'),
38
+ os.path.join(os.path.abspath(''), './examples/sample_2.png'),
39
+ os.path.join(os.path.abspath(''), './examples/sample_3.png'),
40
+ os.path.join(os.path.abspath(''), './examples/sample_4.png'),
41
+ os.path.join(os.path.abspath(''), './examples/sample_5.png'),
42
+ os.path.join(os.path.abspath(''), './examples/sample_6.png'),
43
+ os.path.join(os.path.abspath(''), './examples/sample_7.png'),
44
+ os.path.join(os.path.abspath(''), './examples/sample_8.png'),
45
+ ]]
46
 
47
+ inputs = gr.Image(type="pil", shape=(224, 224),
48
+ label="Upload your image for detection")
49
  outputs = [
50
+ gr.Image(type="pil", label="Gun Detections"),
51
+ gr.Label(label="Class Prediction")
52
  ]
 
 
53
 
54
+ interface = gr.Interface(
55
+ fn=predict,
 
56
  inputs=inputs,
57
  outputs=outputs,
58
  title=title,
59
+ examples=examples,
60
+ description=description,
61
+ cache_examples=True,
62
+ live=True,
63
+ theme='huggingface'
64
  )
65
+ interface.launch(debug=True, enable_queue=True)
examples/sample_1.png ADDED
examples/sample_2.png ADDED
examples/sample_3.jpg ADDED
examples/sample_4.jpg ADDED
examples/sample_5.jpg ADDED
examples/sample_6.jpg ADDED
examples/sample_7.jpg ADDED
examples/sample_8.jpg ADDED