Update src/utils.py
Browse files- src/utils.py +97 -15
src/utils.py
CHANGED
@@ -1,10 +1,11 @@
|
|
|
|
1 |
from enum import Enum, auto
|
2 |
|
3 |
import torch
|
4 |
-
from huggingface_hub import
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
from refiners.fluxion.utils import load_from_safetensors, tensor_to_image
|
9 |
from refiners.foundationals.clip import CLIPTextEncoderL
|
10 |
from refiners.foundationals.latent_diffusion import SD1UNet
|
@@ -53,10 +54,7 @@ def resize_modulo_8(
|
|
53 |
resample: Image.Resampling | None = None,
|
54 |
on_short: bool = True,
|
55 |
) -> Image.Image:
|
56 |
-
"""
|
57 |
-
|
58 |
-
The `on_short` parameter determines whether the resizing is based on the shortest side.
|
59 |
-
"""
|
60 |
assert size % 8 == 0, "Size must be a multiple of 8 because this is the latent compression size."
|
61 |
side_size = min(image.size) if on_short else max(image.size)
|
62 |
scale = size / (side_size * 8)
|
@@ -64,6 +62,95 @@ def resize_modulo_8(
|
|
64 |
return image.resize(new_size, resample=resample or Image.Resampling.LANCZOS)
|
65 |
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
class LightingPreference(str, Enum):
|
68 |
LEFT = auto()
|
69 |
RIGHT = auto()
|
@@ -72,12 +159,7 @@ class LightingPreference(str, Enum):
|
|
72 |
NONE = auto()
|
73 |
|
74 |
def get_init_image(self, width: int, height: int, interval: tuple[float, float] = (0.0, 1.0)) -> Image.Image | None:
|
75 |
-
"""
|
76 |
-
|
77 |
-
In the original code, interval is always (0., 1.) ; we added it as a parameter to make the function more
|
78 |
-
flexible and allow for less contrasted images with a smaller interval.
|
79 |
-
see https://github.com/lllyasviel/IC-Light/blob/7886874/gradio_demo.py#L242
|
80 |
-
"""
|
81 |
start, end = interval
|
82 |
match self:
|
83 |
case LightingPreference.LEFT:
|
@@ -107,4 +189,4 @@ class LightingPreference(str, Enum):
|
|
107 |
case "none":
|
108 |
return LightingPreference.NONE
|
109 |
case _:
|
110 |
-
raise ValueError(f"Invalid lighting preference: {value}")
|
|
|
1 |
+
# utils.py
|
2 |
from enum import Enum, auto
|
3 |
|
4 |
import torch
|
5 |
+
from huggingface_hub import hf_hub_download
|
6 |
+
from PIL import Image, ImageEnhance, ImageFilter
|
7 |
+
import cv2
|
8 |
+
import numpy as np
|
9 |
from refiners.fluxion.utils import load_from_safetensors, tensor_to_image
|
10 |
from refiners.foundationals.clip import CLIPTextEncoderL
|
11 |
from refiners.foundationals.latent_diffusion import SD1UNet
|
|
|
54 |
resample: Image.Resampling | None = None,
|
55 |
on_short: bool = True,
|
56 |
) -> Image.Image:
|
57 |
+
"""이미지 크기를 8의 배수로 조정"""
|
|
|
|
|
|
|
58 |
assert size % 8 == 0, "Size must be a multiple of 8 because this is the latent compression size."
|
59 |
side_size = min(image.size) if on_short else max(image.size)
|
60 |
scale = size / (side_size * 8)
|
|
|
62 |
return image.resize(new_size, resample=resample or Image.Resampling.LANCZOS)
|
63 |
|
64 |
|
65 |
+
def adjust_image(
|
66 |
+
image: Image.Image,
|
67 |
+
brightness=0.0,
|
68 |
+
contrast=0.0,
|
69 |
+
temperature=0.0,
|
70 |
+
saturation=0.0,
|
71 |
+
tint=0.0,
|
72 |
+
blur_intensity=0,
|
73 |
+
exposure=0.0,
|
74 |
+
vibrance=0.0,
|
75 |
+
color_mixer_blues=0.0,
|
76 |
+
) -> Image.Image:
|
77 |
+
"""이미지 조정 함수"""
|
78 |
+
image = image.convert('RGB')
|
79 |
+
|
80 |
+
# 노출 조정 (Exposure)
|
81 |
+
if exposure != 0.0:
|
82 |
+
# Exposure ranges from -5 to 5, where 0 is neutral
|
83 |
+
exposure_factor = 1 + (exposure / 5.0)
|
84 |
+
exposure_factor = max(exposure_factor, 0.01) # Prevent zero or negative
|
85 |
+
enhancer = ImageEnhance.Brightness(image)
|
86 |
+
image = enhancer.enhance(exposure_factor)
|
87 |
+
|
88 |
+
# 밝기 조정
|
89 |
+
if brightness != 0.0:
|
90 |
+
# Brightness ranges from -5 to 5, mapped to brightness factor
|
91 |
+
brightness_factor = 1 + (brightness / 5.0)
|
92 |
+
brightness_factor = max(brightness_factor, 0.01) # Prevent zero or negative
|
93 |
+
enhancer = ImageEnhance.Brightness(image)
|
94 |
+
image = enhancer.enhance(brightness_factor)
|
95 |
+
|
96 |
+
# 대비 조정
|
97 |
+
if contrast != 0.0:
|
98 |
+
# Contrast ranges from -100 to 100, mapped to contrast factor
|
99 |
+
contrast_factor = 1 + (contrast / 100.0)
|
100 |
+
contrast_factor = max(contrast_factor, 0.01) # Prevent zero or negative
|
101 |
+
enhancer = ImageEnhance.Contrast(image)
|
102 |
+
image = enhancer.enhance(contrast_factor)
|
103 |
+
|
104 |
+
# 채도 조정 (Vibrance)
|
105 |
+
if vibrance != 0.0:
|
106 |
+
# Vibrance simulates adjusting the saturation; positive increases saturation, negative decreases
|
107 |
+
vibrance_factor = 1 + (vibrance / 100.0)
|
108 |
+
vibrance_factor = max(vibrance_factor, 0.0) # Prevent negative saturation
|
109 |
+
enhancer = ImageEnhance.Color(image)
|
110 |
+
image = enhancer.enhance(vibrance_factor)
|
111 |
+
|
112 |
+
# 채도 조정 (Saturation)
|
113 |
+
if saturation != 0.0:
|
114 |
+
# Saturation ranges from -100 to 100, mapped to saturation factor
|
115 |
+
saturation_factor = 1 + (saturation / 100.0)
|
116 |
+
saturation_factor = max(saturation_factor, 0.0) # Prevent negative saturation
|
117 |
+
enhancer = ImageEnhance.Color(image)
|
118 |
+
image = enhancer.enhance(saturation_factor)
|
119 |
+
|
120 |
+
# 색온도 조정
|
121 |
+
if temperature != 0.0:
|
122 |
+
# To prevent division by zero, adjust temperature calculation
|
123 |
+
temp_factor = 1 + (temperature / 100.0)
|
124 |
+
temp_factor = max(temp_factor, 0.01) # Prevent zero or negative
|
125 |
+
|
126 |
+
r, g, b = image.split()
|
127 |
+
r = r.point(lambda i: i * temp_factor)
|
128 |
+
b = b.point(lambda i: i / temp_factor)
|
129 |
+
image = Image.merge('RGB', (r, g, b))
|
130 |
+
|
131 |
+
# 색조 조정 (Tint)
|
132 |
+
if tint != 0.0:
|
133 |
+
image_np = np.array(image)
|
134 |
+
image_hsv = cv2.cvtColor(image_np, cv2.COLOR_RGB2HSV).astype(np.float32)
|
135 |
+
image_hsv[:, :, 0] = (image_hsv[:, :, 0] + tint) % 180
|
136 |
+
image_hsv[:, :, 0] = np.clip(image_hsv[:, :, 0], 0, 179)
|
137 |
+
image_rgb = cv2.cvtColor(image_hsv.astype(np.uint8), cv2.COLOR_HSV2RGB)
|
138 |
+
image = Image.fromarray(image_rgb)
|
139 |
+
|
140 |
+
# 블러 적용
|
141 |
+
if blur_intensity > 0:
|
142 |
+
image = image.filter(ImageFilter.GaussianBlur(radius=blur_intensity))
|
143 |
+
|
144 |
+
# Color Mixer (Blues)
|
145 |
+
if color_mixer_blues != 0.0:
|
146 |
+
image_np = np.array(image).astype(np.float32)
|
147 |
+
# Adjust the blue channel
|
148 |
+
image_np[:, :, 2] = np.clip(image_np[:, :, 2] + (color_mixer_blues / 100.0) * 255, 0, 255)
|
149 |
+
image = Image.fromarray(image_np.astype(np.uint8))
|
150 |
+
|
151 |
+
return image
|
152 |
+
|
153 |
+
|
154 |
class LightingPreference(str, Enum):
|
155 |
LEFT = auto()
|
156 |
RIGHT = auto()
|
|
|
159 |
NONE = auto()
|
160 |
|
161 |
def get_init_image(self, width: int, height: int, interval: tuple[float, float] = (0.0, 1.0)) -> Image.Image | None:
|
162 |
+
"""조명 선호도에 따른 그라데이션 이미지 생성"""
|
|
|
|
|
|
|
|
|
|
|
163 |
start, end = interval
|
164 |
match self:
|
165 |
case LightingPreference.LEFT:
|
|
|
189 |
case "none":
|
190 |
return LightingPreference.NONE
|
191 |
case _:
|
192 |
+
raise ValueError(f"Invalid lighting preference: {value}")
|