|
import numbers |
|
from typing import Dict, Optional, Tuple |
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from einops import rearrange |
|
from diffusers.utils import is_torch_version |
|
|
|
|
|
if is_torch_version(">=", "2.1.0"): |
|
LayerNorm = nn.LayerNorm |
|
else: |
|
|
|
|
|
class LayerNorm(nn.Module): |
|
def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True): |
|
super().__init__() |
|
|
|
self.eps = eps |
|
|
|
if isinstance(dim, numbers.Integral): |
|
dim = (dim,) |
|
|
|
self.dim = torch.Size(dim) |
|
|
|
if elementwise_affine: |
|
self.weight = nn.Parameter(torch.ones(dim)) |
|
self.bias = nn.Parameter(torch.zeros(dim)) if bias else None |
|
else: |
|
self.weight = None |
|
self.bias = None |
|
|
|
def forward(self, input): |
|
return F.layer_norm(input, self.dim, self.weight, self.bias, self.eps) |
|
|
|
|
|
class RMSNorm(nn.Module): |
|
def __init__(self, dim, eps: float, elementwise_affine: bool = True): |
|
super().__init__() |
|
|
|
self.eps = eps |
|
|
|
if isinstance(dim, numbers.Integral): |
|
dim = (dim,) |
|
|
|
self.dim = torch.Size(dim) |
|
|
|
if elementwise_affine: |
|
self.weight = nn.Parameter(torch.ones(dim)) |
|
else: |
|
self.weight = None |
|
|
|
def forward(self, hidden_states): |
|
input_dtype = hidden_states.dtype |
|
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) |
|
hidden_states = hidden_states * torch.rsqrt(variance + self.eps) |
|
|
|
if self.weight is not None: |
|
|
|
if self.weight.dtype in [torch.float16, torch.bfloat16]: |
|
hidden_states = hidden_states.to(self.weight.dtype) |
|
hidden_states = hidden_states * self.weight |
|
|
|
hidden_states = hidden_states.to(input_dtype) |
|
|
|
return hidden_states |
|
|
|
|
|
class AdaLayerNormContinuous(nn.Module): |
|
def __init__( |
|
self, |
|
embedding_dim: int, |
|
conditioning_embedding_dim: int, |
|
|
|
|
|
|
|
|
|
|
|
elementwise_affine=True, |
|
eps=1e-5, |
|
bias=True, |
|
norm_type="layer_norm", |
|
): |
|
super().__init__() |
|
self.silu = nn.SiLU() |
|
self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias) |
|
if norm_type == "layer_norm": |
|
self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias) |
|
elif norm_type == "rms_norm": |
|
self.norm = RMSNorm(embedding_dim, eps, elementwise_affine) |
|
else: |
|
raise ValueError(f"unknown norm_type {norm_type}") |
|
|
|
def forward_with_pad(self, x: torch.Tensor, conditioning_embedding: torch.Tensor, hidden_length=None) -> torch.Tensor: |
|
assert hidden_length is not None |
|
|
|
emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) |
|
batch_emb = torch.zeros_like(x).repeat(1, 1, 2) |
|
|
|
i_sum = 0 |
|
num_stages = len(hidden_length) |
|
for i_p, length in enumerate(hidden_length): |
|
batch_emb[:, i_sum:i_sum+length] = emb[i_p::num_stages][:,None] |
|
i_sum += length |
|
|
|
batch_scale, batch_shift = torch.chunk(batch_emb, 2, dim=2) |
|
x = self.norm(x) * (1 + batch_scale) + batch_shift |
|
return x |
|
|
|
def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor, hidden_length=None) -> torch.Tensor: |
|
|
|
if hidden_length is not None: |
|
return self.forward_with_pad(x, conditioning_embedding, hidden_length) |
|
emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) |
|
scale, shift = torch.chunk(emb, 2, dim=1) |
|
x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] |
|
return x |
|
|
|
|
|
class AdaLayerNormZero(nn.Module): |
|
r""" |
|
Norm layer adaptive layer norm zero (adaLN-Zero). |
|
|
|
Parameters: |
|
embedding_dim (`int`): The size of each embedding vector. |
|
num_embeddings (`int`): The size of the embeddings dictionary. |
|
""" |
|
|
|
def __init__(self, embedding_dim: int, num_embeddings: Optional[int] = None): |
|
super().__init__() |
|
self.emb = None |
|
self.silu = nn.SiLU() |
|
self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True) |
|
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6) |
|
|
|
def forward_with_pad( |
|
self, |
|
x: torch.Tensor, |
|
timestep: Optional[torch.Tensor] = None, |
|
class_labels: Optional[torch.LongTensor] = None, |
|
hidden_dtype: Optional[torch.dtype] = None, |
|
emb: Optional[torch.Tensor] = None, |
|
hidden_length: Optional[torch.Tensor] = None, |
|
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: |
|
|
|
if self.emb is not None: |
|
emb = self.emb(timestep, class_labels, hidden_dtype=hidden_dtype) |
|
|
|
emb = self.linear(self.silu(emb)) |
|
batch_emb = torch.zeros_like(x).repeat(1, 1, 6) |
|
|
|
i_sum = 0 |
|
num_stages = len(hidden_length) |
|
for i_p, length in enumerate(hidden_length): |
|
batch_emb[:, i_sum:i_sum+length] = emb[i_p::num_stages][:,None] |
|
i_sum += length |
|
|
|
batch_shift_msa, batch_scale_msa, batch_gate_msa, batch_shift_mlp, batch_scale_mlp, batch_gate_mlp = batch_emb.chunk(6, dim=2) |
|
x = self.norm(x) * (1 + batch_scale_msa) + batch_shift_msa |
|
return x, batch_gate_msa, batch_shift_mlp, batch_scale_mlp, batch_gate_mlp |
|
|
|
def forward( |
|
self, |
|
x: torch.Tensor, |
|
timestep: Optional[torch.Tensor] = None, |
|
class_labels: Optional[torch.LongTensor] = None, |
|
hidden_dtype: Optional[torch.dtype] = None, |
|
emb: Optional[torch.Tensor] = None, |
|
hidden_length: Optional[torch.Tensor] = None, |
|
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: |
|
if hidden_length is not None: |
|
return self.forward_with_pad(x, timestep, class_labels, hidden_dtype, emb, hidden_length) |
|
if self.emb is not None: |
|
emb = self.emb(timestep, class_labels, hidden_dtype=hidden_dtype) |
|
emb = self.linear(self.silu(emb)) |
|
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1) |
|
x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] |
|
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp |