Spaces:
Running
on
T4
Running
on
T4
File size: 1,988 Bytes
56a97f7 148a19e c250292 50b7dca 46bd6e1 a9636e0 b767bd0 46bd6e1 8cf79c4 9a9f589 50b7dca 148a19e 56a97f7 50b7dca 148a19e 56a97f7 f6edc5a fb4e476 2061b93 56a97f7 f2975b3 50b7dca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
from PIL import Image
import torch
import gradio as gr
model2 = torch.hub.load(
"bryandlee/animegan2-pytorch:main",
"generator",
pretrained=True, # or give URL to a pretrained model
device="cuda",
progress=False
)
model1 = torch.hub.load("bryandlee/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1", device="cuda")
face2paint = torch.hub.load(
'bryandlee/animegan2-pytorch:main', 'face2paint',
size=512, device="cuda"
)
def inference(img, ver):
if ver == 'version 2':
out = face2paint(model2, img)
else:
out = face2paint(model1, img)
return out
title = "Animeganv2"
description = "Gradio demo for AnimeGanv2 Face Portrait v2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below"
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"
examples=[['groot.jpeg','version 2'],['bill.png','version 1'],['tony.png','version 1'],['elon.png','version 2'],['IU.png','version 1'],['billie.png','version 2'],['will.png','version 2'],['beyonce.jpeg','version 1'],['gongyoo.jpeg','version 1']]
gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1','version 2'], type="value", default='version 2', label='version')
], gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples).launch() |