giulio98 commited on
Commit
fe0697e
·
verified ·
1 Parent(s): 0ae585a

Create unet/conditional_unet_model.py

Browse files
Files changed (1) hide show
  1. unet/conditional_unet_model.py +332 -0
unet/conditional_unet_model.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Union
2
+
3
+ import torch
4
+ from dataclasses import dataclass
5
+ from typing import Optional, Tuple, Union
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
11
+ from diffusers.utils import BaseOutput
12
+ from diffusers.models.embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
13
+ from diffusers.models.modeling_utils import ModelMixin
14
+ from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
15
+
16
+
17
+ @dataclass
18
+ class UNet2DOutput(BaseOutput):
19
+ """
20
+ The output of [`UNet2DModel`].
21
+ Args:
22
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
23
+ The hidden states output from the last layer of the model.
24
+ """
25
+
26
+ sample: torch.FloatTensor
27
+
28
+
29
+ class UNet2DModel(ModelMixin, ConfigMixin):
30
+ r"""
31
+ A 2D UNet model that takes a noisy sample and a timestep and returns a sample shaped output.
32
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
33
+ for all models (such as downloading or saving).
34
+ Parameters:
35
+ sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
36
+ Height and width of input/output sample. Dimensions must be a multiple of `2 ** (len(block_out_channels) -
37
+ 1)`.
38
+ in_channels (`int`, *optional*, defaults to 3): Number of channels in the input sample.
39
+ out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
40
+ center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
41
+ time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use.
42
+ freq_shift (`int`, *optional*, defaults to 0): Frequency shift for Fourier time embedding.
43
+ flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
44
+ Whether to flip sin to cos for Fourier time embedding.
45
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`):
46
+ Tuple of downsample block types.
47
+ mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2D"`):
48
+ Block type for middle of UNet, it can be either `UNetMidBlock2D` or `UnCLIPUNetMidBlock2D`.
49
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`):
50
+ Tuple of upsample block types.
51
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(224, 448, 672, 896)`):
52
+ Tuple of block output channels.
53
+ layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block.
54
+ mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block.
55
+ downsample_padding (`int`, *optional*, defaults to `1`): The padding for the downsample convolution.
56
+ downsample_type (`str`, *optional*, defaults to `conv`):
57
+ The downsample type for downsampling layers. Choose between "conv" and "resnet"
58
+ upsample_type (`str`, *optional*, defaults to `conv`):
59
+ The upsample type for upsampling layers. Choose between "conv" and "resnet"
60
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
61
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
62
+ attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension.
63
+ norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for normalization.
64
+ attn_norm_num_groups (`int`, *optional*, defaults to `None`):
65
+ If set to an integer, a group norm layer will be created in the mid block's [`Attention`] layer with the
66
+ given number of groups. If left as `None`, the group norm layer will only be created if
67
+ `resnet_time_scale_shift` is set to `default`, and if created will have `norm_num_groups` groups.
68
+ norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for normalization.
69
+ resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
70
+ for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
71
+ class_embed_type (`str`, *optional*, defaults to `None`):
72
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
73
+ `"timestep"`, or `"identity"`.
74
+ num_class_embeds (`int`, *optional*, defaults to `None`):
75
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim` when performing class
76
+ conditioning with `class_embed_type` equal to `None`.
77
+ """
78
+
79
+ @register_to_config
80
+ def __init__(
81
+ self,
82
+ sample_size: Optional[Union[int, Tuple[int, int]]] = None,
83
+ in_channels: int = 3,
84
+ out_channels: int = 3,
85
+ center_input_sample: bool = False,
86
+ time_embedding_type: str = "positional",
87
+ freq_shift: int = 0,
88
+ flip_sin_to_cos: bool = True,
89
+ down_block_types: Tuple[str, ...] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"),
90
+ up_block_types: Tuple[str, ...] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"),
91
+ block_out_channels: Tuple[int, ...] = (224, 448, 672, 896),
92
+ layers_per_block: int = 2,
93
+ mid_block_scale_factor: float = 1,
94
+ downsample_padding: int = 1,
95
+ downsample_type: str = "conv",
96
+ upsample_type: str = "conv",
97
+ dropout: float = 0.0,
98
+ act_fn: str = "silu",
99
+ attention_head_dim: Optional[int] = 8,
100
+ norm_num_groups: int = 32,
101
+ attn_norm_num_groups: Optional[int] = None,
102
+ norm_eps: float = 1e-5,
103
+ resnet_time_scale_shift: str = "default",
104
+ add_attention: bool = True,
105
+ class_embed_type: Optional[str] = None,
106
+ num_class_embeds: Optional[int] = None,
107
+ num_train_timesteps: Optional[int] = None,
108
+ set_W_to_weight: Optional[bool] = True,
109
+ ):
110
+ super().__init__()
111
+
112
+ self.sample_size = sample_size
113
+ time_embed_dim = block_out_channels[0] * 4
114
+
115
+ # Check inputs
116
+ if len(down_block_types) != len(up_block_types):
117
+ raise ValueError(
118
+ f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
119
+ )
120
+
121
+ if len(block_out_channels) != len(down_block_types):
122
+ raise ValueError(
123
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
124
+ )
125
+
126
+ # input
127
+ self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
128
+
129
+ # time
130
+ if time_embedding_type == "fourier":
131
+ self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16, set_W_to_weight=set_W_to_weight)
132
+ timestep_input_dim = 2 * block_out_channels[0]
133
+ elif time_embedding_type == "positional":
134
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
135
+ timestep_input_dim = block_out_channels[0]
136
+ elif time_embedding_type == "learned":
137
+ self.time_proj = nn.Embedding(num_train_timesteps, block_out_channels[0])
138
+ timestep_input_dim = block_out_channels[0]
139
+
140
+ self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
141
+
142
+ # class embedding
143
+ if class_embed_type is None and num_class_embeds is not None:
144
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
145
+ elif class_embed_type == "timestep":
146
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
147
+ elif class_embed_type == "identity":
148
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
149
+ else:
150
+ self.class_embedding = None
151
+
152
+ self.down_blocks = nn.ModuleList([])
153
+ self.mid_block = None
154
+ self.up_blocks = nn.ModuleList([])
155
+
156
+ # down
157
+ output_channel = block_out_channels[0]
158
+ for i, down_block_type in enumerate(down_block_types):
159
+ input_channel = output_channel
160
+ output_channel = block_out_channels[i]
161
+ is_final_block = i == len(block_out_channels) - 1
162
+
163
+ down_block = get_down_block(
164
+ down_block_type,
165
+ num_layers=layers_per_block,
166
+ in_channels=input_channel,
167
+ out_channels=output_channel,
168
+ temb_channels=time_embed_dim,
169
+ add_downsample=not is_final_block,
170
+ resnet_eps=norm_eps,
171
+ resnet_act_fn=act_fn,
172
+ resnet_groups=norm_num_groups,
173
+ attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
174
+ downsample_padding=downsample_padding,
175
+ resnet_time_scale_shift=resnet_time_scale_shift,
176
+ downsample_type=downsample_type,
177
+ dropout=dropout,
178
+ )
179
+ self.down_blocks.append(down_block)
180
+
181
+ # mid
182
+ self.mid_block = UNetMidBlock2D(
183
+ in_channels=block_out_channels[-1],
184
+ temb_channels=time_embed_dim,
185
+ dropout=dropout,
186
+ resnet_eps=norm_eps,
187
+ resnet_act_fn=act_fn,
188
+ output_scale_factor=mid_block_scale_factor,
189
+ resnet_time_scale_shift=resnet_time_scale_shift,
190
+ attention_head_dim=attention_head_dim if attention_head_dim is not None else block_out_channels[-1],
191
+ resnet_groups=norm_num_groups,
192
+ attn_groups=attn_norm_num_groups,
193
+ add_attention=add_attention,
194
+ )
195
+
196
+ # up
197
+ reversed_block_out_channels = list(reversed(block_out_channels))
198
+ output_channel = reversed_block_out_channels[0]
199
+ for i, up_block_type in enumerate(up_block_types):
200
+ prev_output_channel = output_channel
201
+ output_channel = reversed_block_out_channels[i]
202
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
203
+
204
+ is_final_block = i == len(block_out_channels) - 1
205
+
206
+ up_block = get_up_block(
207
+ up_block_type,
208
+ num_layers=layers_per_block + 1,
209
+ in_channels=input_channel,
210
+ out_channels=output_channel,
211
+ prev_output_channel=prev_output_channel,
212
+ temb_channels=time_embed_dim,
213
+ add_upsample=not is_final_block,
214
+ resnet_eps=norm_eps,
215
+ resnet_act_fn=act_fn,
216
+ resnet_groups=norm_num_groups,
217
+ attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
218
+ resnet_time_scale_shift=resnet_time_scale_shift,
219
+ upsample_type=upsample_type,
220
+ dropout=dropout,
221
+ )
222
+ self.up_blocks.append(up_block)
223
+ prev_output_channel = output_channel
224
+
225
+ # out
226
+ num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
227
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps)
228
+ self.conv_act = nn.SiLU()
229
+ self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
230
+
231
+ def forward(
232
+ self,
233
+ sample: torch.FloatTensor,
234
+ timestep: Union[torch.Tensor, float, int],
235
+ class_labels: Optional[torch.Tensor] = None,
236
+ return_dict: bool = True,
237
+ ) -> Union[UNet2DOutput, Tuple]:
238
+ r"""
239
+ The [`UNet2DModel`] forward method.
240
+ Args:
241
+ sample (`torch.FloatTensor`):
242
+ The noisy input tensor with the following shape `(batch, channel, height, width)`.
243
+ timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
244
+ class_labels (`torch.FloatTensor`, *optional*, defaults to `None`):
245
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
246
+ return_dict (`bool`, *optional*, defaults to `True`):
247
+ Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple.
248
+ Returns:
249
+ [`~models.unet_2d.UNet2DOutput`] or `tuple`:
250
+ If `return_dict` is True, an [`~models.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is
251
+ returned where the first element is the sample tensor.
252
+ """
253
+ # 0. center input if necessary
254
+ if self.config.center_input_sample:
255
+ sample = 2 * sample - 1.0
256
+
257
+ # 1. time
258
+ timesteps = timestep
259
+ if not torch.is_tensor(timesteps):
260
+ timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
261
+ elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
262
+ timesteps = timesteps[None].to(sample.device)
263
+
264
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
265
+ timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device)
266
+
267
+ t_emb = self.time_proj(timesteps)
268
+
269
+ # timesteps does not contain any weights and will always return f32 tensors
270
+ # but time_embedding might actually be running in fp16. so we need to cast here.
271
+ # there might be better ways to encapsulate this.
272
+ t_emb = t_emb.to(dtype=self.dtype)
273
+ emb = self.time_embedding(t_emb)
274
+
275
+ if self.class_embedding is not None:
276
+ if class_labels is None:
277
+ raise ValueError("class_labels should be provided when doing class conditioning")
278
+
279
+ if self.config.class_embed_type == "timestep":
280
+ class_labels = self.time_proj(class_labels)
281
+
282
+ class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
283
+ emb = emb + class_emb
284
+ elif self.class_embedding is None and class_labels is not None:
285
+ raise ValueError("class_embedding needs to be initialized in order to use class conditioning")
286
+
287
+ # 2. pre-process
288
+ skip_sample = sample
289
+ sample = self.conv_in(sample)
290
+
291
+ # 3. down
292
+ down_block_res_samples = (sample,)
293
+ for downsample_block in self.down_blocks:
294
+ if hasattr(downsample_block, "skip_conv"):
295
+ sample, res_samples, skip_sample = downsample_block(
296
+ hidden_states=sample, temb=emb, skip_sample=skip_sample
297
+ )
298
+ else:
299
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
300
+
301
+ down_block_res_samples += res_samples
302
+
303
+ # 4. mid
304
+ sample = self.mid_block(sample, emb)
305
+
306
+ # 5. up
307
+ skip_sample = None
308
+ for upsample_block in self.up_blocks:
309
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
310
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
311
+
312
+ if hasattr(upsample_block, "skip_conv"):
313
+ sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample)
314
+ else:
315
+ sample = upsample_block(sample, res_samples, emb)
316
+
317
+ # 6. post-process
318
+ sample = self.conv_norm_out(sample)
319
+ sample = self.conv_act(sample)
320
+ sample = self.conv_out(sample)
321
+
322
+ if skip_sample is not None:
323
+ sample += skip_sample
324
+
325
+ if self.config.time_embedding_type == "fourier":
326
+ timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:]))))
327
+ sample = sample / timesteps
328
+
329
+ if not return_dict:
330
+ return (sample,)
331
+
332
+ return UNet2DOutput(sample=sample)