Ahsen Khaliq commited on
Commit
57e5af9
·
1 Parent(s): 67fe755

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -17,11 +17,17 @@ face2paint = torch.hub.load(
17
  size=512, device="cuda",side_by_side=False
18
  )
19
 
20
- def inference(img, ver):
21
- if ver == 'version 2 (🔺 robustness,🔻 stylization)':
22
- out = face2paint(model2, img)
23
- else:
24
- out = face2paint(model1, img)
 
 
 
 
 
 
25
  return out
26
 
27
  title = "Animeganv2"
@@ -32,5 +38,5 @@ article = "<p style='text-align: center'><a href='https://github.com/bryandlee/a
32
 
33
  examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['bill.png','version 1 (🔺 stylization, 🔻 robustness)'],['tony.png','version 1 (🔺 stylization, 🔻 robustness)'],['elon.png','version 2 (🔺 robustness,🔻 stylization)'],['IU.png','version 1 (🔺 stylization, 🔻 robustness)'],['billie.png','version 2 (🔺 robustness,🔻 stylization)'],['will.png','version 2 (🔺 robustness,🔻 stylization)'],['beyonce.jpeg','version 1 (🔺 stylization, 🔻 robustness)'],['gongyoo.jpeg','version 1 (🔺 stylization, 🔻 robustness)']]
34
 
35
- gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')
36
  ], gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples,allow_flagging=False).launch()
 
17
  size=512, device="cuda",side_by_side=False
18
  )
19
 
20
+ def inference(img, ver,option,imgweb):
21
+ if option == 'webcam'
22
+ if ver == 'version 2 (🔺 robustness,🔻 stylization)':
23
+ out = face2paint(model2, imgweb)
24
+ else:
25
+ out = face2paint(model1, imgweb)
26
+ else:
27
+ if ver == 'version 2 (🔺 robustness,🔻 stylization)':
28
+ out = face2paint(model2, img)
29
+ else:
30
+ out = face2paint(model1, img)
31
  return out
32
 
33
  title = "Animeganv2"
 
38
 
39
  examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['bill.png','version 1 (🔺 stylization, 🔻 robustness)'],['tony.png','version 1 (🔺 stylization, 🔻 robustness)'],['elon.png','version 2 (🔺 robustness,🔻 stylization)'],['IU.png','version 1 (🔺 stylization, 🔻 robustness)'],['billie.png','version 2 (🔺 robustness,🔻 stylization)'],['will.png','version 2 (🔺 robustness,🔻 stylization)'],['beyonce.jpeg','version 1 (🔺 stylization, 🔻 robustness)'],['gongyoo.jpeg','version 1 (🔺 stylization, 🔻 robustness)']]
40
 
41
+ gr.Interface(inference, [gr.inputs.Image(type="pil",label='Input Image'),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version'),gr.inputs.Radio(['webcam','image upload'], type="value", default='webcam', label='Source'),gr.inputs.Image(type="pil",label='Input Webcam',source='webcam')
42
  ], gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples,allow_flagging=False).launch()