subhannadeem1 commited on
Commit
3c217ca
Β·
1 Parent(s): eb6d3d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -43
app.py CHANGED
@@ -1,56 +1,97 @@
 
 
1
  import torch
2
- import os
3
- from PIL import Image
4
- from torchvision import transforms
5
- import gradio as gr
6
 
7
- os.system("wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt")
8
 
 
9
 
10
- model = torch.hub.load('pytorch/vision:v0.9.0', 'inception_v3', pretrained=True)
11
- model.eval()
 
 
 
 
 
 
12
 
13
- torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
 
 
 
 
 
 
 
 
 
14
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- # sample execution (requires torchvision)
17
- def inference(input_image):
18
- preprocess = transforms.Compose([
19
- transforms.Resize(299),
20
- transforms.CenterCrop(299),
21
- transforms.ToTensor(),
22
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
23
- ])
24
- input_tensor = preprocess(input_image)
25
- input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
26
 
27
- # move the input and model to GPU for speed if available
28
- if torch.cuda.is_available():
29
- input_batch = input_batch.to('cuda')
30
- model.to('cuda')
31
 
 
32
  with torch.no_grad():
33
- output = model(input_batch)
34
- # The output has unnormalized scores. To get probabilities, you can run a softmax on it.
35
- probabilities = torch.nn.functional.softmax(output[0], dim=0)
36
- # Read the categories
37
- with open("imagenet_classes.txt", "r") as f:
38
- categories = [s.strip() for s in f.readlines()]
39
- # Show top categories per image
40
- top5_prob, top5_catid = torch.topk(probabilities, 5)
41
- result = {}
42
- for i in range(top5_prob.size(0)):
43
- result[categories[top5_catid[i]]] = top5_prob[i].item()
44
- return result
45
-
46
- inputs = gr.inputs.Image(type='pil')
47
- outputs = gr.outputs.Label(type="confidences",num_top_classes=5)
48
-
49
- title = "INCEPTION V3"
50
- description = "Gradio demo for INCEPTION V3, a famous ConvNet trained on Imagenet from 2015. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
51
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1512.00567'>Rethinking the Inception Architecture for Computer Vision</a> | <a href='https://github.com/pytorch/vision/blob/master/torchvision/models/inception.py'>Github Repo</a></p>"
 
 
52
 
53
  examples = [
54
- ['dog.jpg']
 
 
 
 
 
 
 
 
55
  ]
56
- gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, analytics_enabled=False).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
  import torch
4
+ import torch.nn.functional as F
5
+ from torchvision import models, transforms
 
 
6
 
7
+ import warnings
8
 
9
+ warnings.filterwarnings("ignore")
10
 
11
+ # εŠ θ½½ζ¨‘εž‹
12
+ models_dict = {
13
+ 'DeepLabv3': models.segmentation.deeplabv3_resnet50(pretrained=True).eval(),
14
+ 'DeepLabv3+': models.segmentation.deeplabv3_resnet101(pretrained=True).eval(),
15
+ 'FCN-ResNet50': models.segmentation.fcn_resnet50(pretrained=True).eval(),
16
+ 'FCN-ResNet101': models.segmentation.fcn_resnet101(pretrained=True).eval(),
17
+ 'LRR': models.segmentation.lraspp_mobilenet_v3_large(pretrained=True).eval(),
18
+ }
19
 
20
+ # 图像钄倄理
21
+ image_transforms = transforms.Compose([
22
+ transforms.Resize(256),
23
+ transforms.CenterCrop(224),
24
+ transforms.ToTensor(),
25
+ transforms.Normalize(
26
+ mean=[0.485, 0.456, 0.406],
27
+ std=[0.229, 0.224, 0.225]
28
+ )
29
+ ])
30
 
31
+ def download_test_img():
32
+ # Images
33
+ torch.hub.download_url_to_file(
34
+ 'https://user-images.githubusercontent.com/59380685/266264420-21575a83-4057-41cf-8a4a-b3ea6f332d79.jpg',
35
+ 'bus.jpg')
36
+ torch.hub.download_url_to_file(
37
+ 'https://user-images.githubusercontent.com/59380685/266264536-82afdf58-6b9a-4568-b9df-551ee72cb6d9.jpg',
38
+ 'dogs.jpg')
39
+ torch.hub.download_url_to_file(
40
+ 'https://user-images.githubusercontent.com/59380685/266264600-9d0c26ca-8ba6-45f2-b53b-4dc98460c43e.jpg',
41
+ 'zidane.jpg')
42
 
43
+ def predict_segmentation(image, model_name):
 
 
 
 
 
 
 
 
 
44
 
45
+ # 图像钄倄理
46
+ image_tensor = image_transforms(image).unsqueeze(0)
 
 
47
 
48
+ # ζ¨‘εž‹ζŽ¨η†
49
  with torch.no_grad():
50
+ output = models_dict[model_name](image_tensor)['out'][0]
51
+ output_predictions = output.argmax(0)
52
+ segmentation = F.interpolate(
53
+ output.float().unsqueeze(0),
54
+ size=image.size[::-1],
55
+ mode='bicubic',
56
+ align_corners=False
57
+ )[0].argmax(0).numpy()
58
+
59
+ # εˆ†ε‰²ε›Ύ
60
+ segmentation_image = np.uint8(segmentation)
61
+ segmentation_image = cv2.applyColorMap(segmentation_image, cv2.COLORMAP_JET)
62
+
63
+ # θžεˆε›Ύ
64
+ blend_image = cv2.addWeighted(np.array(image), 0.5, segmentation_image, 0.5, 0)
65
+ blend_image = cv2.cvtColor(blend_image, cv2.COLOR_BGR2RGB)
66
+
67
+ return segmentation_image, blend_image
68
+
69
+
70
+ import gradio as gr
71
 
72
  examples = [
73
+ ['bus.jpg', 'DeepLabv3'],
74
+ ['dogs.jpg', 'DeepLabv3'],
75
+ ['zidane.jpg', 'DeepLabv3']
76
+ ]
77
+ download_test_img()
78
+ model_list = ['DeepLabv3', 'DeepLabv3+', 'FCN-ResNet50', 'FCN-ResNet101', 'LRR']
79
+ inputs = [
80
+ gr.inputs.Image(type='pil', label='εŽŸε§‹ε›Ύεƒ'),
81
+ gr.inputs.Dropdown(model_list, label='ι€‰ζ‹©ζ¨‘εž‹', default='DeepLabv3')
82
  ]
83
+ outputs = [
84
+ gr.outputs.Image(type='pil',label='εˆ†ε‰²ε›Ύ'),
85
+ gr.outputs.Image(type='pil',label='θžεˆε›Ύ')
86
+ ]
87
+ interface = gr.Interface(
88
+ predict_segmentation,
89
+ inputs,
90
+ outputs,
91
+ examples=examples,
92
+ capture_session=True,
93
+ title='torchvision-segmentation-webui',
94
+ description='torchvision segmentation webui on gradio'
95
+ )
96
+
97
+ interface.launch()