Spaces:
Runtime error
Runtime error
import gradio as gr | |
import tensorflow as tf | |
import tensorflow_hub as hub | |
import matplotlib.pylab as plt | |
import numpy as np | |
hub_model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2') | |
def stylize(content_image_path, style_image_path): | |
content_image = plt.imread(content_image_path) | |
style_image = plt.imread(style_image_path) | |
content_image = content_image.astype(np.float32)[np.newaxis, ...] / 255. | |
style_image = style_image.astype(np.float32)[np.newaxis, ...] / 255. | |
style_image = tf.image.resize(style_image, (256, 256)) | |
stylized_image = hub_model(tf.constant(content_image), tf.constant(style_image))[0] | |
return tensor_to_image(stylized_image) | |
# Load content and style images (see example in the attached colab). | |
#content_image = plt.imread(content_image_path) | |
#style_image = plt.imread(style_image_path) | |
# Convert to float32 numpy array, add batch dimension, and normalize to range [0, 1]. Example using numpy: | |
#content_image = content_image.astype(np.float32)[np.newaxis, ...] / 255. | |
#style_image = style_image.astype(np.float32)[np.newaxis, ...] / 255. | |
# Optionally resize the images. It is recommended that the style image is about | |
# 256 pixels (this size was used when training the style transfer network). | |
# The content image can be any size. | |
# style_image = tf.image.resize(style_image, (256, 256)) | |
iface = gr.Interface(fn=stylize, inputs=["image", "image"], outputs="image") | |
iface.launch() |