Update handler.py
Browse files- handler.py +14 -0
handler.py
CHANGED
@@ -4,6 +4,8 @@ from diffusers import DPMSolverMultistepScheduler, StableDiffusionInpaintPipelin
|
|
4 |
from PIL import Image
|
5 |
import base64
|
6 |
from io import BytesIO
|
|
|
|
|
7 |
|
8 |
|
9 |
# set device
|
@@ -12,6 +14,12 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
12 |
if device.type != 'cuda':
|
13 |
raise ValueError("need to run on GPU")
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
class EndpointHandler():
|
16 |
def __init__(self, path=""):
|
17 |
# load StableDiffusionInpaintPipeline pipeline
|
@@ -43,6 +51,8 @@ class EndpointHandler():
|
|
43 |
width = data.pop("width", None)
|
44 |
|
45 |
# process image
|
|
|
|
|
46 |
if encoded_image is not None and encoded_mask_image is not None:
|
47 |
image = self.decode_base64_image(encoded_image)
|
48 |
mask_image = self.decode_base64_image(encoded_mask_image)
|
@@ -50,6 +60,8 @@ class EndpointHandler():
|
|
50 |
image = None
|
51 |
mask_image = None
|
52 |
|
|
|
|
|
53 |
# run inference pipeline
|
54 |
out = self.pipe(inputs,
|
55 |
image=image,
|
@@ -69,6 +81,8 @@ class EndpointHandler():
|
|
69 |
out.images[i].save(buffered, format="PNG")
|
70 |
img_str = base64.b64encode(buffered.getvalue())
|
71 |
json_imgs[f"{i}"] = img_str.decode()
|
|
|
|
|
72 |
return json_imgs
|
73 |
|
74 |
# helper to decode input image
|
|
|
4 |
from PIL import Image
|
5 |
import base64
|
6 |
from io import BytesIO
|
7 |
+
from datetime import datetime
|
8 |
+
import time
|
9 |
|
10 |
|
11 |
# set device
|
|
|
14 |
if device.type != 'cuda':
|
15 |
raise ValueError("need to run on GPU")
|
16 |
|
17 |
+
begin_runtime = datetime.now()
|
18 |
+
|
19 |
+
def print_current_time(tag: str):
|
20 |
+
global begin_runtime
|
21 |
+
print(str(tag).upper() + ": " + str(datetime.now() - begin_runtime))
|
22 |
+
|
23 |
class EndpointHandler():
|
24 |
def __init__(self, path=""):
|
25 |
# load StableDiffusionInpaintPipeline pipeline
|
|
|
51 |
width = data.pop("width", None)
|
52 |
|
53 |
# process image
|
54 |
+
print_current_time("Start decoding")
|
55 |
+
|
56 |
if encoded_image is not None and encoded_mask_image is not None:
|
57 |
image = self.decode_base64_image(encoded_image)
|
58 |
mask_image = self.decode_base64_image(encoded_mask_image)
|
|
|
60 |
image = None
|
61 |
mask_image = None
|
62 |
|
63 |
+
print_current_time("Finish decoding")
|
64 |
+
|
65 |
# run inference pipeline
|
66 |
out = self.pipe(inputs,
|
67 |
image=image,
|
|
|
81 |
out.images[i].save(buffered, format="PNG")
|
82 |
img_str = base64.b64encode(buffered.getvalue())
|
83 |
json_imgs[f"{i}"] = img_str.decode()
|
84 |
+
|
85 |
+
print_current_time("Complete Stable diffusion")
|
86 |
return json_imgs
|
87 |
|
88 |
# helper to decode input image
|