|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from typing import Any, Dict, List, Optional, Tuple, Union |
|
|
|
import numpy as np |
|
import paddle |
|
import paddle.nn as nn |
|
from paddle.distributed.fleet.utils import recompute |
|
|
|
from ...configuration_utils import ConfigMixin, register_to_config |
|
from ...modeling_utils import ModelMixin |
|
from ...models.attention import DualTransformer2DModel, Transformer2DModel |
|
from ...models.cross_attention import ( |
|
AttnProcessor, |
|
CrossAttention, |
|
CrossAttnAddedKVProcessor, |
|
) |
|
from ...models.embeddings import TimestepEmbedding, Timesteps |
|
from ...models.unet_2d_condition import UNet2DConditionOutput |
|
from ...utils import logging |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
def get_down_block( |
|
down_block_type, |
|
num_layers, |
|
in_channels, |
|
out_channels, |
|
temb_channels, |
|
add_downsample, |
|
resnet_eps, |
|
resnet_act_fn, |
|
attn_num_head_channels, |
|
resnet_groups=None, |
|
cross_attention_dim=None, |
|
downsample_padding=None, |
|
dual_cross_attention=False, |
|
use_linear_projection=False, |
|
only_cross_attention=False, |
|
upcast_attention=False, |
|
resnet_time_scale_shift="default", |
|
): |
|
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type |
|
if down_block_type == "DownBlockFlat": |
|
return DownBlockFlat( |
|
num_layers=num_layers, |
|
in_channels=in_channels, |
|
out_channels=out_channels, |
|
temb_channels=temb_channels, |
|
add_downsample=add_downsample, |
|
resnet_eps=resnet_eps, |
|
resnet_act_fn=resnet_act_fn, |
|
resnet_groups=resnet_groups, |
|
downsample_padding=downsample_padding, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
) |
|
elif down_block_type == "CrossAttnDownBlockFlat": |
|
if cross_attention_dim is None: |
|
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockFlat") |
|
return CrossAttnDownBlockFlat( |
|
num_layers=num_layers, |
|
in_channels=in_channels, |
|
out_channels=out_channels, |
|
temb_channels=temb_channels, |
|
add_downsample=add_downsample, |
|
resnet_eps=resnet_eps, |
|
resnet_act_fn=resnet_act_fn, |
|
resnet_groups=resnet_groups, |
|
downsample_padding=downsample_padding, |
|
cross_attention_dim=cross_attention_dim, |
|
attn_num_head_channels=attn_num_head_channels, |
|
dual_cross_attention=dual_cross_attention, |
|
use_linear_projection=use_linear_projection, |
|
only_cross_attention=only_cross_attention, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
) |
|
raise ValueError(f"{down_block_type} is not supported.") |
|
|
|
|
|
def get_up_block( |
|
up_block_type, |
|
num_layers, |
|
in_channels, |
|
out_channels, |
|
prev_output_channel, |
|
temb_channels, |
|
add_upsample, |
|
resnet_eps, |
|
resnet_act_fn, |
|
attn_num_head_channels, |
|
resnet_groups=None, |
|
cross_attention_dim=None, |
|
dual_cross_attention=False, |
|
use_linear_projection=False, |
|
only_cross_attention=False, |
|
upcast_attention=False, |
|
resnet_time_scale_shift="default", |
|
): |
|
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type |
|
if up_block_type == "UpBlockFlat": |
|
return UpBlockFlat( |
|
num_layers=num_layers, |
|
in_channels=in_channels, |
|
out_channels=out_channels, |
|
prev_output_channel=prev_output_channel, |
|
temb_channels=temb_channels, |
|
add_upsample=add_upsample, |
|
resnet_eps=resnet_eps, |
|
resnet_act_fn=resnet_act_fn, |
|
resnet_groups=resnet_groups, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
) |
|
elif up_block_type == "CrossAttnUpBlockFlat": |
|
if cross_attention_dim is None: |
|
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockFlat") |
|
return CrossAttnUpBlockFlat( |
|
num_layers=num_layers, |
|
in_channels=in_channels, |
|
out_channels=out_channels, |
|
prev_output_channel=prev_output_channel, |
|
temb_channels=temb_channels, |
|
add_upsample=add_upsample, |
|
resnet_eps=resnet_eps, |
|
resnet_act_fn=resnet_act_fn, |
|
resnet_groups=resnet_groups, |
|
cross_attention_dim=cross_attention_dim, |
|
attn_num_head_channels=attn_num_head_channels, |
|
dual_cross_attention=dual_cross_attention, |
|
use_linear_projection=use_linear_projection, |
|
only_cross_attention=only_cross_attention, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
) |
|
raise ValueError(f"{up_block_type} is not supported.") |
|
|
|
|
|
|
|
class UNetFlatConditionModel(ModelMixin, ConfigMixin): |
|
r""" |
|
UNetFlatConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a |
|
timestep and returns sample shaped output. |
|
|
|
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library |
|
implements for all the models (such as downloading or saving, etc.) |
|
|
|
Parameters: |
|
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): |
|
Height and width of input/output sample. |
|
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. |
|
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. |
|
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. |
|
flip_sin_to_cos (`bool`, *optional*, defaults to `False`): |
|
Whether to flip the sin to cos in the time embedding. |
|
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. |
|
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "DownBlockFlat")`): |
|
The tuple of downsample blocks to use. |
|
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlockFlatCrossAttn"`): |
|
The mid block type. Choose from `UNetMidBlockFlatCrossAttn` or `UNetMidBlockFlatSimpleCrossAttn`. |
|
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat",)`): |
|
The tuple of upsample blocks to use. |
|
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): |
|
The tuple of output channels for each block. |
|
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. |
|
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. |
|
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. |
|
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. |
|
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. |
|
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. |
|
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. |
|
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. |
|
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config |
|
for resnet blocks, see [`~models.resnet.ResnetBlockFlat`]. Choose from `default` or `scale_shift`. |
|
class_embed_type (`str`, *optional*, defaults to None): The type of class embedding to use which is ultimately |
|
summed with the time embeddings. Choose from `None`, `"timestep"`, or `"identity"`. |
|
""" |
|
|
|
_supports_gradient_checkpointing = True |
|
|
|
@register_to_config |
|
def __init__( |
|
self, |
|
sample_size: Optional[int] = None, |
|
in_channels: int = 4, |
|
out_channels: int = 4, |
|
center_input_sample: bool = False, |
|
flip_sin_to_cos: bool = True, |
|
freq_shift: int = 0, |
|
down_block_types: Tuple[str] = ( |
|
"CrossAttnDownBlockFlat", |
|
"CrossAttnDownBlockFlat", |
|
"CrossAttnDownBlockFlat", |
|
"DownBlockFlat", |
|
), |
|
mid_block_type: str = "UNetMidBlockFlatCrossAttn", |
|
up_block_types: Tuple[str] = ( |
|
"UpBlockFlat", |
|
"CrossAttnUpBlockFlat", |
|
"CrossAttnUpBlockFlat", |
|
"CrossAttnUpBlockFlat", |
|
), |
|
only_cross_attention: Union[bool, Tuple[bool]] = False, |
|
block_out_channels: Tuple[int] = (320, 640, 1280, 1280), |
|
layers_per_block: int = 2, |
|
downsample_padding: int = 1, |
|
mid_block_scale_factor: float = 1, |
|
act_fn: str = "silu", |
|
norm_num_groups: int = 32, |
|
norm_eps: float = 1e-5, |
|
cross_attention_dim: int = 1280, |
|
attention_head_dim: Union[int, Tuple[int]] = 8, |
|
dual_cross_attention: bool = False, |
|
use_linear_projection: bool = False, |
|
class_embed_type: Optional[str] = None, |
|
num_class_embeds: Optional[int] = None, |
|
upcast_attention: bool = False, |
|
resnet_time_scale_shift: str = "default", |
|
): |
|
super().__init__() |
|
|
|
self.sample_size = sample_size |
|
time_embed_dim = block_out_channels[0] * 4 |
|
|
|
|
|
self.conv_in = LinearMultiDim(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1)) |
|
|
|
|
|
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) |
|
timestep_input_dim = block_out_channels[0] |
|
|
|
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) |
|
|
|
|
|
if class_embed_type is None and num_class_embeds is not None: |
|
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) |
|
elif class_embed_type == "timestep": |
|
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) |
|
elif class_embed_type == "identity": |
|
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) |
|
else: |
|
self.class_embedding = None |
|
|
|
self.down_blocks = nn.LayerList([]) |
|
self.mid_block = None |
|
self.up_blocks = nn.LayerList([]) |
|
|
|
if isinstance(only_cross_attention, bool): |
|
only_cross_attention = [only_cross_attention] * len(down_block_types) |
|
|
|
if isinstance(attention_head_dim, int): |
|
attention_head_dim = (attention_head_dim,) * len(down_block_types) |
|
|
|
|
|
output_channel = block_out_channels[0] |
|
for i, down_block_type in enumerate(down_block_types): |
|
input_channel = output_channel |
|
output_channel = block_out_channels[i] |
|
is_final_block = i == len(block_out_channels) - 1 |
|
|
|
down_block = get_down_block( |
|
down_block_type, |
|
num_layers=layers_per_block, |
|
in_channels=input_channel, |
|
out_channels=output_channel, |
|
temb_channels=time_embed_dim, |
|
add_downsample=not is_final_block, |
|
resnet_eps=norm_eps, |
|
resnet_act_fn=act_fn, |
|
resnet_groups=norm_num_groups, |
|
cross_attention_dim=cross_attention_dim, |
|
attn_num_head_channels=attention_head_dim[i], |
|
downsample_padding=downsample_padding, |
|
dual_cross_attention=dual_cross_attention, |
|
use_linear_projection=use_linear_projection, |
|
only_cross_attention=only_cross_attention[i], |
|
upcast_attention=upcast_attention, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
) |
|
self.down_blocks.append(down_block) |
|
|
|
|
|
if mid_block_type == "UNetMidBlockFlatCrossAttn": |
|
self.mid_block = UNetMidBlockFlatCrossAttn( |
|
in_channels=block_out_channels[-1], |
|
temb_channels=time_embed_dim, |
|
resnet_eps=norm_eps, |
|
resnet_act_fn=act_fn, |
|
output_scale_factor=mid_block_scale_factor, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
cross_attention_dim=cross_attention_dim, |
|
attn_num_head_channels=attention_head_dim[-1], |
|
resnet_groups=norm_num_groups, |
|
dual_cross_attention=dual_cross_attention, |
|
use_linear_projection=use_linear_projection, |
|
upcast_attention=upcast_attention, |
|
) |
|
elif mid_block_type == "UNetMidBlockFlatSimpleCrossAttn": |
|
self.mid_block = UNetMidBlockFlatSimpleCrossAttn( |
|
in_channels=block_out_channels[-1], |
|
temb_channels=time_embed_dim, |
|
resnet_eps=norm_eps, |
|
resnet_act_fn=act_fn, |
|
output_scale_factor=mid_block_scale_factor, |
|
cross_attention_dim=cross_attention_dim, |
|
attn_num_head_channels=attention_head_dim[-1], |
|
resnet_groups=norm_num_groups, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
) |
|
else: |
|
raise ValueError(f"unknown mid_block_type : {mid_block_type}") |
|
|
|
|
|
self.num_upsamplers = 0 |
|
|
|
|
|
reversed_block_out_channels = list(reversed(block_out_channels)) |
|
reversed_attention_head_dim = list(reversed(attention_head_dim)) |
|
reversed_only_cross_attention = list(reversed(only_cross_attention)) |
|
|
|
output_channel = reversed_block_out_channels[0] |
|
for i, up_block_type in enumerate(up_block_types): |
|
is_final_block = i == len(block_out_channels) - 1 |
|
|
|
prev_output_channel = output_channel |
|
output_channel = reversed_block_out_channels[i] |
|
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] |
|
|
|
|
|
if not is_final_block: |
|
add_upsample = True |
|
self.num_upsamplers += 1 |
|
else: |
|
add_upsample = False |
|
|
|
up_block = get_up_block( |
|
up_block_type, |
|
num_layers=layers_per_block + 1, |
|
in_channels=input_channel, |
|
out_channels=output_channel, |
|
prev_output_channel=prev_output_channel, |
|
temb_channels=time_embed_dim, |
|
add_upsample=add_upsample, |
|
resnet_eps=norm_eps, |
|
resnet_act_fn=act_fn, |
|
resnet_groups=norm_num_groups, |
|
cross_attention_dim=cross_attention_dim, |
|
attn_num_head_channels=reversed_attention_head_dim[i], |
|
dual_cross_attention=dual_cross_attention, |
|
use_linear_projection=use_linear_projection, |
|
only_cross_attention=reversed_only_cross_attention[i], |
|
upcast_attention=upcast_attention, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
) |
|
self.up_blocks.append(up_block) |
|
prev_output_channel = output_channel |
|
|
|
|
|
self.conv_norm_out = nn.GroupNorm( |
|
num_channels=block_out_channels[0], num_groups=norm_num_groups, epsilon=norm_eps |
|
) |
|
self.conv_act = nn.Silu() |
|
self.conv_out = LinearMultiDim(block_out_channels[0], out_channels, kernel_size=3, padding=1) |
|
|
|
@property |
|
def attn_processors(self) -> Dict[str, AttnProcessor]: |
|
r""" |
|
Returns: |
|
`dict` of attention processors: A dictionary containing all attention processors used in the model with |
|
indexed by its weight name. |
|
""" |
|
|
|
processors = {} |
|
|
|
def fn_recursive_add_processors(name: str, module: nn.Layer, processors: Dict[str, AttnProcessor]): |
|
if hasattr(module, "set_processor"): |
|
processors[f"{name}.processor"] = module.processor |
|
|
|
for sub_name, child in module.named_children(): |
|
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
|
|
|
return processors |
|
|
|
for name, module in self.named_children(): |
|
fn_recursive_add_processors(name, module, processors) |
|
|
|
return processors |
|
|
|
def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]): |
|
r""" |
|
Parameters: |
|
`processor (`dict` of `AttnProcessor` or `AttnProcessor`): |
|
The instantiated processor class or a dictionary of processor classes that will be set as the processor |
|
of **all** `CrossAttention` layers. |
|
In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainablae attention processors.: |
|
""" |
|
count = len(self.attn_processors.keys()) |
|
|
|
if isinstance(processor, dict) and len(processor) != count: |
|
raise ValueError( |
|
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
|
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
|
) |
|
|
|
def fn_recursive_attn_processor(name: str, module: nn.Layer, processor): |
|
if hasattr(module, "set_processor"): |
|
if not isinstance(processor, dict): |
|
module.set_processor(processor) |
|
else: |
|
module.set_processor(processor.pop(f"{name}.processor")) |
|
|
|
for sub_name, child in module.named_children(): |
|
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
|
|
|
for name, module in self.named_children(): |
|
fn_recursive_attn_processor(name, module, processor) |
|
|
|
def set_attention_slice(self, slice_size): |
|
r""" |
|
Enable sliced attention computation. |
|
|
|
When this option is enabled, the attention module will split the input tensor in slices, to compute attention |
|
in several steps. This is useful to save some memory in exchange for a small speed decrease. |
|
|
|
Args: |
|
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): |
|
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If |
|
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is |
|
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` |
|
must be a multiple of `slice_size`. |
|
""" |
|
sliceable_head_dims = [] |
|
|
|
def fn_recursive_retrieve_slicable_dims(module: nn.Layer): |
|
if hasattr(module, "set_attention_slice"): |
|
sliceable_head_dims.append(module.sliceable_head_dim) |
|
|
|
for child in module.children(): |
|
fn_recursive_retrieve_slicable_dims(child) |
|
|
|
|
|
for module in self.children(): |
|
fn_recursive_retrieve_slicable_dims(module) |
|
|
|
num_slicable_layers = len(sliceable_head_dims) |
|
|
|
if slice_size == "auto": |
|
|
|
|
|
slice_size = [dim // 2 for dim in sliceable_head_dims] |
|
elif slice_size == "max": |
|
|
|
slice_size = num_slicable_layers * [1] |
|
|
|
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size |
|
|
|
if len(slice_size) != len(sliceable_head_dims): |
|
raise ValueError( |
|
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" |
|
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." |
|
) |
|
|
|
for i in range(len(slice_size)): |
|
size = slice_size[i] |
|
dim = sliceable_head_dims[i] |
|
if size is not None and size > dim: |
|
raise ValueError(f"size {size} has to be smaller or equal to {dim}.") |
|
|
|
|
|
|
|
|
|
def fn_recursive_set_attention_slice(module: nn.Layer, slice_size: List[int]): |
|
if hasattr(module, "set_attention_slice"): |
|
module.set_attention_slice(slice_size.pop()) |
|
|
|
for child in module.children(): |
|
fn_recursive_set_attention_slice(child, slice_size) |
|
|
|
reversed_slice_size = list(reversed(slice_size)) |
|
for module in self.children(): |
|
fn_recursive_set_attention_slice(module, reversed_slice_size) |
|
|
|
def _set_gradient_checkpointing(self, module, value=False): |
|
if isinstance(module, (CrossAttnDownBlockFlat, DownBlockFlat, CrossAttnUpBlockFlat, UpBlockFlat)): |
|
module.gradient_checkpointing = value |
|
|
|
def forward( |
|
self, |
|
sample: paddle.Tensor, |
|
timestep: Union[paddle.Tensor, float, int], |
|
encoder_hidden_states: paddle.Tensor, |
|
class_labels: Optional[paddle.Tensor] = None, |
|
attention_mask: Optional[paddle.Tensor] = None, |
|
cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
|
return_dict: bool = True, |
|
) -> Union[UNet2DConditionOutput, Tuple]: |
|
r""" |
|
Args: |
|
sample (`paddle.Tensor`): (batch, channel, height, width) noisy inputs tensor |
|
timestep (`paddle.Tensor` or `float` or `int`): (batch) timesteps |
|
encoder_hidden_states (`paddle.Tensor`): (batch, sequence_length, feature_dim) encoder hidden states |
|
return_dict (`bool`, *optional*, defaults to `True`): |
|
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. |
|
|
|
Returns: |
|
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: |
|
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When |
|
returning a tuple, the first element is the sample tensor. |
|
""" |
|
|
|
|
|
|
|
|
|
default_overall_up_factor = 2**self.num_upsamplers |
|
|
|
|
|
forward_upsample_size = False |
|
upsample_size = None |
|
|
|
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): |
|
logger.info("Forward upsample size to force interpolation output size.") |
|
forward_upsample_size = True |
|
|
|
|
|
if attention_mask is not None: |
|
attention_mask = (1 - attention_mask.cast(sample.dtype)) * -10000.0 |
|
attention_mask = attention_mask.unsqueeze(1) |
|
|
|
|
|
if self.config.center_input_sample: |
|
sample = 2 * sample - 1.0 |
|
|
|
|
|
timesteps = timestep |
|
if not paddle.is_tensor(timesteps): |
|
|
|
timesteps = paddle.to_tensor([timesteps], dtype="int64") |
|
elif paddle.is_tensor(timesteps) and len(timesteps.shape) == 0: |
|
timesteps = timesteps[None] |
|
|
|
|
|
timesteps = timesteps.expand( |
|
[ |
|
sample.shape[0], |
|
] |
|
) |
|
|
|
t_emb = self.time_proj(timesteps) |
|
|
|
|
|
|
|
|
|
t_emb = t_emb.cast(self.dtype) |
|
emb = self.time_embedding(t_emb) |
|
|
|
if self.class_embedding is not None: |
|
if class_labels is None: |
|
raise ValueError("class_labels should be provided when num_class_embeds > 0") |
|
|
|
if self.config.class_embed_type == "timestep": |
|
class_labels = self.time_proj(class_labels) |
|
|
|
class_emb = self.class_embedding(class_labels).cast(self.dtype) |
|
emb = emb + class_emb |
|
|
|
|
|
sample = self.conv_in(sample) |
|
|
|
|
|
down_block_res_samples = (sample,) |
|
for downsample_block in self.down_blocks: |
|
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: |
|
sample, res_samples = downsample_block( |
|
hidden_states=sample, |
|
temb=emb, |
|
encoder_hidden_states=encoder_hidden_states, |
|
attention_mask=attention_mask, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
) |
|
else: |
|
sample, res_samples = downsample_block(hidden_states=sample, temb=emb) |
|
|
|
down_block_res_samples += res_samples |
|
|
|
|
|
sample = self.mid_block( |
|
sample, |
|
emb, |
|
encoder_hidden_states=encoder_hidden_states, |
|
attention_mask=attention_mask, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
) |
|
|
|
|
|
for i, upsample_block in enumerate(self.up_blocks): |
|
is_final_block = i == len(self.up_blocks) - 1 |
|
|
|
res_samples = down_block_res_samples[-len(upsample_block.resnets) :] |
|
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] |
|
|
|
|
|
|
|
if not is_final_block and forward_upsample_size: |
|
upsample_size = down_block_res_samples[-1].shape[2:] |
|
|
|
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: |
|
sample = upsample_block( |
|
hidden_states=sample, |
|
temb=emb, |
|
res_hidden_states_tuple=res_samples, |
|
encoder_hidden_states=encoder_hidden_states, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
upsample_size=upsample_size, |
|
attention_mask=attention_mask, |
|
) |
|
else: |
|
sample = upsample_block( |
|
hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size |
|
) |
|
|
|
sample = self.conv_norm_out(sample) |
|
sample = self.conv_act(sample) |
|
sample = self.conv_out(sample) |
|
|
|
if not return_dict: |
|
return (sample,) |
|
|
|
return UNet2DConditionOutput(sample=sample) |
|
|
|
|
|
class LinearMultiDim(nn.Linear): |
|
def __init__(self, in_features, out_features=None, second_dim=4, *args, **kwargs): |
|
in_features = [in_features, second_dim, 1] if isinstance(in_features, int) else list(in_features) |
|
if out_features is None: |
|
out_features = in_features |
|
out_features = [out_features, second_dim, 1] if isinstance(out_features, int) else list(out_features) |
|
self.in_features_multidim = in_features |
|
self.out_features_multidim = out_features |
|
super().__init__(np.array(in_features).prod(), np.array(out_features).prod()) |
|
|
|
def forward(self, input_tensor, *args, **kwargs): |
|
shape = input_tensor.shape |
|
n_dim = len(self.in_features_multidim) |
|
input_tensor = input_tensor.reshape([*shape[0:-n_dim], self.in_features]) |
|
output_tensor = super().forward(input_tensor) |
|
output_tensor = output_tensor.reshape([*shape[0:-n_dim], *self.out_features_multidim]) |
|
return output_tensor |
|
|
|
|
|
class ResnetBlockFlat(nn.Layer): |
|
def __init__( |
|
self, |
|
*, |
|
in_channels, |
|
out_channels=None, |
|
dropout=0.0, |
|
temb_channels=512, |
|
groups=32, |
|
groups_out=None, |
|
pre_norm=True, |
|
eps=1e-6, |
|
time_embedding_norm="default", |
|
use_in_shortcut=None, |
|
second_dim=4, |
|
**kwargs, |
|
): |
|
super().__init__() |
|
self.pre_norm = pre_norm |
|
self.pre_norm = True |
|
|
|
in_channels = [in_channels, second_dim, 1] if isinstance(in_channels, int) else list(in_channels) |
|
self.in_channels_prod = np.array(in_channels).prod() |
|
self.channels_multidim = in_channels |
|
|
|
if out_channels is not None: |
|
out_channels = [out_channels, second_dim, 1] if isinstance(out_channels, int) else list(out_channels) |
|
out_channels_prod = np.array(out_channels).prod() |
|
self.out_channels_multidim = out_channels |
|
else: |
|
out_channels_prod = self.in_channels_prod |
|
self.out_channels_multidim = self.channels_multidim |
|
self.time_embedding_norm = time_embedding_norm |
|
|
|
if groups_out is None: |
|
groups_out = groups |
|
|
|
self.norm1 = nn.GroupNorm(num_groups=groups, num_channels=self.in_channels_prod, epsilon=eps) |
|
self.conv1 = nn.Conv2D(self.in_channels_prod, out_channels_prod, kernel_size=1, padding=0) |
|
|
|
if temb_channels is not None: |
|
self.time_emb_proj = nn.Linear(temb_channels, out_channels_prod) |
|
else: |
|
self.time_emb_proj = None |
|
|
|
self.norm2 = nn.GroupNorm(num_groups=groups_out, num_channels=out_channels_prod, epsilon=eps) |
|
self.dropout = nn.Dropout(dropout) |
|
self.conv2 = nn.Conv2D(out_channels_prod, out_channels_prod, kernel_size=1, padding=0) |
|
|
|
self.nonlinearity = nn.Silu() |
|
|
|
self.use_in_shortcut = ( |
|
self.in_channels_prod != out_channels_prod if use_in_shortcut is None else use_in_shortcut |
|
) |
|
|
|
self.conv_shortcut = None |
|
if self.use_in_shortcut: |
|
self.conv_shortcut = nn.Conv2D( |
|
self.in_channels_prod, out_channels_prod, kernel_size=1, stride=1, padding=0 |
|
) |
|
|
|
def forward(self, input_tensor, temb): |
|
shape = input_tensor.shape |
|
n_dim = len(self.channels_multidim) |
|
input_tensor = input_tensor.reshape([*shape[0:-n_dim], self.in_channels_prod, 1, 1]) |
|
input_tensor = input_tensor.reshape([-1, self.in_channels_prod, 1, 1]) |
|
|
|
hidden_states = input_tensor |
|
|
|
hidden_states = self.norm1(hidden_states) |
|
hidden_states = self.nonlinearity(hidden_states) |
|
hidden_states = self.conv1(hidden_states) |
|
|
|
if temb is not None: |
|
temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None] |
|
hidden_states = hidden_states + temb |
|
|
|
hidden_states = self.norm2(hidden_states) |
|
hidden_states = self.nonlinearity(hidden_states) |
|
|
|
hidden_states = self.dropout(hidden_states) |
|
hidden_states = self.conv2(hidden_states) |
|
|
|
if self.conv_shortcut is not None: |
|
input_tensor = self.conv_shortcut(input_tensor) |
|
|
|
output_tensor = input_tensor + hidden_states |
|
|
|
output_tensor = output_tensor.reshape([*shape[0:-n_dim], -1]) |
|
output_tensor = output_tensor.reshape([*shape[0:-n_dim], *self.out_channels_multidim]) |
|
|
|
return output_tensor |
|
|
|
|
|
|
|
class DownBlockFlat(nn.Layer): |
|
def __init__( |
|
self, |
|
in_channels: int, |
|
out_channels: int, |
|
temb_channels: int, |
|
dropout: float = 0.0, |
|
num_layers: int = 1, |
|
resnet_eps: float = 1e-6, |
|
resnet_time_scale_shift: str = "default", |
|
resnet_act_fn: str = "swish", |
|
resnet_groups: int = 32, |
|
resnet_pre_norm: bool = True, |
|
output_scale_factor=1.0, |
|
add_downsample=True, |
|
downsample_padding=1, |
|
): |
|
super().__init__() |
|
resnets = [] |
|
|
|
for i in range(num_layers): |
|
in_channels = in_channels if i == 0 else out_channels |
|
resnets.append( |
|
ResnetBlockFlat( |
|
in_channels=in_channels, |
|
out_channels=out_channels, |
|
temb_channels=temb_channels, |
|
eps=resnet_eps, |
|
groups=resnet_groups, |
|
dropout=dropout, |
|
time_embedding_norm=resnet_time_scale_shift, |
|
non_linearity=resnet_act_fn, |
|
output_scale_factor=output_scale_factor, |
|
pre_norm=resnet_pre_norm, |
|
) |
|
) |
|
|
|
self.resnets = nn.LayerList(resnets) |
|
|
|
if add_downsample: |
|
self.downsamplers = nn.LayerList( |
|
[ |
|
LinearMultiDim( |
|
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" |
|
) |
|
] |
|
) |
|
else: |
|
self.downsamplers = None |
|
|
|
self.gradient_checkpointing = False |
|
|
|
def forward(self, hidden_states, temb=None): |
|
output_states = () |
|
|
|
for resnet in self.resnets: |
|
if self.training and self.gradient_checkpointing: |
|
|
|
def create_custom_forward(module): |
|
def custom_forward(*inputs): |
|
return module(*inputs) |
|
|
|
return custom_forward |
|
|
|
hidden_states = recompute(create_custom_forward(resnet), hidden_states, temb) |
|
else: |
|
hidden_states = resnet(hidden_states, temb) |
|
|
|
output_states += (hidden_states,) |
|
|
|
if self.downsamplers is not None: |
|
for downsampler in self.downsamplers: |
|
hidden_states = downsampler(hidden_states) |
|
|
|
output_states += (hidden_states,) |
|
|
|
return hidden_states, output_states |
|
|
|
|
|
|
|
class CrossAttnDownBlockFlat(nn.Layer): |
|
def __init__( |
|
self, |
|
in_channels: int, |
|
out_channels: int, |
|
temb_channels: int, |
|
dropout: float = 0.0, |
|
num_layers: int = 1, |
|
resnet_eps: float = 1e-6, |
|
resnet_time_scale_shift: str = "default", |
|
resnet_act_fn: str = "swish", |
|
resnet_groups: int = 32, |
|
resnet_pre_norm: bool = True, |
|
attn_num_head_channels=1, |
|
cross_attention_dim=1280, |
|
output_scale_factor=1.0, |
|
downsample_padding=1, |
|
add_downsample=True, |
|
dual_cross_attention=False, |
|
use_linear_projection=False, |
|
only_cross_attention=False, |
|
upcast_attention=False, |
|
): |
|
super().__init__() |
|
resnets = [] |
|
attentions = [] |
|
|
|
self.has_cross_attention = True |
|
self.attn_num_head_channels = attn_num_head_channels |
|
|
|
for i in range(num_layers): |
|
in_channels = in_channels if i == 0 else out_channels |
|
resnets.append( |
|
ResnetBlockFlat( |
|
in_channels=in_channels, |
|
out_channels=out_channels, |
|
temb_channels=temb_channels, |
|
eps=resnet_eps, |
|
groups=resnet_groups, |
|
dropout=dropout, |
|
time_embedding_norm=resnet_time_scale_shift, |
|
non_linearity=resnet_act_fn, |
|
output_scale_factor=output_scale_factor, |
|
pre_norm=resnet_pre_norm, |
|
) |
|
) |
|
if not dual_cross_attention: |
|
attentions.append( |
|
Transformer2DModel( |
|
attn_num_head_channels, |
|
out_channels // attn_num_head_channels, |
|
in_channels=out_channels, |
|
num_layers=1, |
|
cross_attention_dim=cross_attention_dim, |
|
norm_num_groups=resnet_groups, |
|
use_linear_projection=use_linear_projection, |
|
only_cross_attention=only_cross_attention, |
|
upcast_attention=upcast_attention, |
|
) |
|
) |
|
else: |
|
attentions.append( |
|
DualTransformer2DModel( |
|
attn_num_head_channels, |
|
out_channels // attn_num_head_channels, |
|
in_channels=out_channels, |
|
num_layers=1, |
|
cross_attention_dim=cross_attention_dim, |
|
norm_num_groups=resnet_groups, |
|
) |
|
) |
|
self.attentions = nn.LayerList(attentions) |
|
self.resnets = nn.LayerList(resnets) |
|
|
|
if add_downsample: |
|
self.downsamplers = nn.LayerList( |
|
[ |
|
LinearMultiDim( |
|
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" |
|
) |
|
] |
|
) |
|
else: |
|
self.downsamplers = None |
|
|
|
self.gradient_checkpointing = False |
|
|
|
def forward( |
|
self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None |
|
): |
|
output_states = () |
|
|
|
for resnet, attn in zip(self.resnets, self.attentions): |
|
if self.training and self.gradient_checkpointing: |
|
|
|
def create_custom_forward(module, return_dict=None): |
|
def custom_forward(*inputs): |
|
if return_dict is not None: |
|
return module(*inputs, return_dict=return_dict)[0] |
|
else: |
|
return module(*inputs) |
|
|
|
return custom_forward |
|
|
|
hidden_states = recompute(create_custom_forward(resnet), hidden_states, temb) |
|
hidden_states = recompute( |
|
create_custom_forward(attn, return_dict=False), |
|
hidden_states, |
|
encoder_hidden_states, |
|
cross_attention_kwargs, |
|
) |
|
else: |
|
hidden_states = resnet(hidden_states, temb) |
|
hidden_states = attn( |
|
hidden_states, |
|
encoder_hidden_states=encoder_hidden_states, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
).sample |
|
output_states += (hidden_states,) |
|
|
|
if self.downsamplers is not None: |
|
for downsampler in self.downsamplers: |
|
hidden_states = downsampler(hidden_states) |
|
|
|
output_states += (hidden_states,) |
|
|
|
return hidden_states, output_states |
|
|
|
|
|
|
|
class UpBlockFlat(nn.Layer): |
|
def __init__( |
|
self, |
|
in_channels: int, |
|
prev_output_channel: int, |
|
out_channels: int, |
|
temb_channels: int, |
|
dropout: float = 0.0, |
|
num_layers: int = 1, |
|
resnet_eps: float = 1e-6, |
|
resnet_time_scale_shift: str = "default", |
|
resnet_act_fn: str = "swish", |
|
resnet_groups: int = 32, |
|
resnet_pre_norm: bool = True, |
|
output_scale_factor=1.0, |
|
add_upsample=True, |
|
): |
|
super().__init__() |
|
resnets = [] |
|
|
|
for i in range(num_layers): |
|
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels |
|
resnet_in_channels = prev_output_channel if i == 0 else out_channels |
|
|
|
resnets.append( |
|
ResnetBlockFlat( |
|
in_channels=resnet_in_channels + res_skip_channels, |
|
out_channels=out_channels, |
|
temb_channels=temb_channels, |
|
eps=resnet_eps, |
|
groups=resnet_groups, |
|
dropout=dropout, |
|
time_embedding_norm=resnet_time_scale_shift, |
|
non_linearity=resnet_act_fn, |
|
output_scale_factor=output_scale_factor, |
|
pre_norm=resnet_pre_norm, |
|
) |
|
) |
|
|
|
self.resnets = nn.LayerList(resnets) |
|
|
|
if add_upsample: |
|
self.upsamplers = nn.LayerList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) |
|
else: |
|
self.upsamplers = None |
|
|
|
self.gradient_checkpointing = False |
|
|
|
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None): |
|
for resnet in self.resnets: |
|
|
|
res_hidden_states = res_hidden_states_tuple[-1] |
|
res_hidden_states_tuple = res_hidden_states_tuple[:-1] |
|
hidden_states = paddle.concat([hidden_states, res_hidden_states], axis=1) |
|
|
|
if self.training and self.gradient_checkpointing: |
|
|
|
def create_custom_forward(module): |
|
def custom_forward(*inputs): |
|
return module(*inputs) |
|
|
|
return custom_forward |
|
|
|
hidden_states = recompute(create_custom_forward(resnet), hidden_states, temb) |
|
else: |
|
hidden_states = resnet(hidden_states, temb) |
|
|
|
if self.upsamplers is not None: |
|
for upsampler in self.upsamplers: |
|
hidden_states = upsampler(hidden_states, upsample_size) |
|
|
|
return hidden_states |
|
|
|
|
|
|
|
class CrossAttnUpBlockFlat(nn.Layer): |
|
def __init__( |
|
self, |
|
in_channels: int, |
|
out_channels: int, |
|
prev_output_channel: int, |
|
temb_channels: int, |
|
dropout: float = 0.0, |
|
num_layers: int = 1, |
|
resnet_eps: float = 1e-6, |
|
resnet_time_scale_shift: str = "default", |
|
resnet_act_fn: str = "swish", |
|
resnet_groups: int = 32, |
|
resnet_pre_norm: bool = True, |
|
attn_num_head_channels=1, |
|
cross_attention_dim=1280, |
|
output_scale_factor=1.0, |
|
add_upsample=True, |
|
dual_cross_attention=False, |
|
use_linear_projection=False, |
|
only_cross_attention=False, |
|
upcast_attention=False, |
|
): |
|
super().__init__() |
|
resnets = [] |
|
attentions = [] |
|
|
|
self.has_cross_attention = True |
|
self.attn_num_head_channels = attn_num_head_channels |
|
|
|
for i in range(num_layers): |
|
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels |
|
resnet_in_channels = prev_output_channel if i == 0 else out_channels |
|
|
|
resnets.append( |
|
ResnetBlockFlat( |
|
in_channels=resnet_in_channels + res_skip_channels, |
|
out_channels=out_channels, |
|
temb_channels=temb_channels, |
|
eps=resnet_eps, |
|
groups=resnet_groups, |
|
dropout=dropout, |
|
time_embedding_norm=resnet_time_scale_shift, |
|
non_linearity=resnet_act_fn, |
|
output_scale_factor=output_scale_factor, |
|
pre_norm=resnet_pre_norm, |
|
) |
|
) |
|
if not dual_cross_attention: |
|
attentions.append( |
|
Transformer2DModel( |
|
attn_num_head_channels, |
|
out_channels // attn_num_head_channels, |
|
in_channels=out_channels, |
|
num_layers=1, |
|
cross_attention_dim=cross_attention_dim, |
|
norm_num_groups=resnet_groups, |
|
use_linear_projection=use_linear_projection, |
|
only_cross_attention=only_cross_attention, |
|
upcast_attention=upcast_attention, |
|
) |
|
) |
|
else: |
|
attentions.append( |
|
DualTransformer2DModel( |
|
attn_num_head_channels, |
|
out_channels // attn_num_head_channels, |
|
in_channels=out_channels, |
|
num_layers=1, |
|
cross_attention_dim=cross_attention_dim, |
|
norm_num_groups=resnet_groups, |
|
) |
|
) |
|
self.attentions = nn.LayerList(attentions) |
|
self.resnets = nn.LayerList(resnets) |
|
|
|
if add_upsample: |
|
self.upsamplers = nn.LayerList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)]) |
|
else: |
|
self.upsamplers = None |
|
|
|
self.gradient_checkpointing = False |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
res_hidden_states_tuple, |
|
temb=None, |
|
encoder_hidden_states=None, |
|
cross_attention_kwargs=None, |
|
upsample_size=None, |
|
attention_mask=None, |
|
): |
|
|
|
for resnet, attn in zip(self.resnets, self.attentions): |
|
|
|
res_hidden_states = res_hidden_states_tuple[-1] |
|
res_hidden_states_tuple = res_hidden_states_tuple[:-1] |
|
hidden_states = paddle.concat([hidden_states, res_hidden_states], axis=1) |
|
|
|
if self.training and self.gradient_checkpointing: |
|
|
|
def create_custom_forward(module, return_dict=None): |
|
def custom_forward(*inputs): |
|
if return_dict is not None: |
|
return module(*inputs, return_dict=return_dict)[0] |
|
else: |
|
return module(*inputs) |
|
|
|
return custom_forward |
|
|
|
hidden_states = recompute(create_custom_forward(resnet), hidden_states, temb) |
|
hidden_states = recompute( |
|
create_custom_forward(attn, return_dict=False), |
|
hidden_states, |
|
encoder_hidden_states, |
|
cross_attention_kwargs, |
|
) |
|
else: |
|
hidden_states = resnet(hidden_states, temb) |
|
hidden_states = attn( |
|
hidden_states, |
|
encoder_hidden_states=encoder_hidden_states, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
).sample |
|
|
|
if self.upsamplers is not None: |
|
for upsampler in self.upsamplers: |
|
hidden_states = upsampler(hidden_states, upsample_size) |
|
|
|
return hidden_states |
|
|
|
|
|
|
|
class UNetMidBlockFlatCrossAttn(nn.Layer): |
|
def __init__( |
|
self, |
|
in_channels: int, |
|
temb_channels: int, |
|
dropout: float = 0.0, |
|
num_layers: int = 1, |
|
resnet_eps: float = 1e-6, |
|
resnet_time_scale_shift: str = "default", |
|
resnet_act_fn: str = "swish", |
|
resnet_groups: int = 32, |
|
resnet_pre_norm: bool = True, |
|
attn_num_head_channels=1, |
|
output_scale_factor=1.0, |
|
cross_attention_dim=1280, |
|
dual_cross_attention=False, |
|
use_linear_projection=False, |
|
upcast_attention=False, |
|
): |
|
super().__init__() |
|
|
|
self.has_cross_attention = True |
|
self.attn_num_head_channels = attn_num_head_channels |
|
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) |
|
|
|
|
|
resnets = [ |
|
ResnetBlockFlat( |
|
in_channels=in_channels, |
|
out_channels=in_channels, |
|
temb_channels=temb_channels, |
|
eps=resnet_eps, |
|
groups=resnet_groups, |
|
dropout=dropout, |
|
time_embedding_norm=resnet_time_scale_shift, |
|
non_linearity=resnet_act_fn, |
|
output_scale_factor=output_scale_factor, |
|
pre_norm=resnet_pre_norm, |
|
) |
|
] |
|
attentions = [] |
|
|
|
for _ in range(num_layers): |
|
if not dual_cross_attention: |
|
attentions.append( |
|
Transformer2DModel( |
|
attn_num_head_channels, |
|
in_channels // attn_num_head_channels, |
|
in_channels=in_channels, |
|
num_layers=1, |
|
cross_attention_dim=cross_attention_dim, |
|
norm_num_groups=resnet_groups, |
|
use_linear_projection=use_linear_projection, |
|
upcast_attention=upcast_attention, |
|
) |
|
) |
|
else: |
|
attentions.append( |
|
DualTransformer2DModel( |
|
attn_num_head_channels, |
|
in_channels // attn_num_head_channels, |
|
in_channels=in_channels, |
|
num_layers=1, |
|
cross_attention_dim=cross_attention_dim, |
|
norm_num_groups=resnet_groups, |
|
) |
|
) |
|
resnets.append( |
|
ResnetBlockFlat( |
|
in_channels=in_channels, |
|
out_channels=in_channels, |
|
temb_channels=temb_channels, |
|
eps=resnet_eps, |
|
groups=resnet_groups, |
|
dropout=dropout, |
|
time_embedding_norm=resnet_time_scale_shift, |
|
non_linearity=resnet_act_fn, |
|
output_scale_factor=output_scale_factor, |
|
pre_norm=resnet_pre_norm, |
|
) |
|
) |
|
|
|
self.attentions = nn.LayerList(attentions) |
|
self.resnets = nn.LayerList(resnets) |
|
|
|
def forward( |
|
self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None |
|
): |
|
hidden_states = self.resnets[0](hidden_states, temb) |
|
for attn, resnet in zip(self.attentions, self.resnets[1:]): |
|
hidden_states = attn( |
|
hidden_states, |
|
encoder_hidden_states=encoder_hidden_states, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
).sample |
|
hidden_states = resnet(hidden_states, temb) |
|
|
|
return hidden_states |
|
|
|
|
|
|
|
class UNetMidBlockFlatSimpleCrossAttn(nn.Layer): |
|
def __init__( |
|
self, |
|
in_channels: int, |
|
temb_channels: int, |
|
dropout: float = 0.0, |
|
num_layers: int = 1, |
|
resnet_eps: float = 1e-6, |
|
resnet_time_scale_shift: str = "default", |
|
resnet_act_fn: str = "swish", |
|
resnet_groups: int = 32, |
|
resnet_pre_norm: bool = True, |
|
attn_num_head_channels=1, |
|
output_scale_factor=1.0, |
|
cross_attention_dim=1280, |
|
): |
|
super().__init__() |
|
|
|
self.has_cross_attention = True |
|
|
|
self.attn_num_head_channels = attn_num_head_channels |
|
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) |
|
|
|
self.num_heads = in_channels // self.attn_num_head_channels |
|
|
|
|
|
resnets = [ |
|
ResnetBlockFlat( |
|
in_channels=in_channels, |
|
out_channels=in_channels, |
|
temb_channels=temb_channels, |
|
eps=resnet_eps, |
|
groups=resnet_groups, |
|
dropout=dropout, |
|
time_embedding_norm=resnet_time_scale_shift, |
|
non_linearity=resnet_act_fn, |
|
output_scale_factor=output_scale_factor, |
|
pre_norm=resnet_pre_norm, |
|
) |
|
] |
|
attentions = [] |
|
|
|
for _ in range(num_layers): |
|
attentions.append( |
|
CrossAttention( |
|
query_dim=in_channels, |
|
cross_attention_dim=in_channels, |
|
heads=self.num_heads, |
|
dim_head=attn_num_head_channels, |
|
added_kv_proj_dim=cross_attention_dim, |
|
norm_num_groups=resnet_groups, |
|
bias=True, |
|
upcast_softmax=True, |
|
processor=CrossAttnAddedKVProcessor(), |
|
) |
|
) |
|
resnets.append( |
|
ResnetBlockFlat( |
|
in_channels=in_channels, |
|
out_channels=in_channels, |
|
temb_channels=temb_channels, |
|
eps=resnet_eps, |
|
groups=resnet_groups, |
|
dropout=dropout, |
|
time_embedding_norm=resnet_time_scale_shift, |
|
non_linearity=resnet_act_fn, |
|
output_scale_factor=output_scale_factor, |
|
pre_norm=resnet_pre_norm, |
|
) |
|
) |
|
|
|
self.attentions = nn.LayerList(attentions) |
|
self.resnets = nn.LayerList(resnets) |
|
|
|
def forward( |
|
self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None |
|
): |
|
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} |
|
hidden_states = self.resnets[0](hidden_states, temb) |
|
for attn, resnet in zip(self.attentions, self.resnets[1:]): |
|
|
|
hidden_states = attn( |
|
hidden_states, |
|
encoder_hidden_states=encoder_hidden_states, |
|
attention_mask=attention_mask, |
|
**cross_attention_kwargs, |
|
) |
|
|
|
|
|
hidden_states = resnet(hidden_states, temb) |
|
|
|
return hidden_states |
|
|