dit.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. """
  2. ein notation:
  3. b - batch
  4. n - sequence
  5. nt - text sequence
  6. nw - raw wave length
  7. d - dimension
  8. """
  9. from __future__ import annotations
  10. import torch
  11. from torch import nn
  12. import torch.nn.functional as F
  13. from einops import repeat
  14. from x_transformers.x_transformers import RotaryEmbedding
  15. from cosyvoice.utils.mask import add_optional_chunk_mask
  16. from cosyvoice.flow.DiT.modules import (
  17. TimestepEmbedding,
  18. ConvNeXtV2Block,
  19. CausalConvPositionEmbedding,
  20. DiTBlock,
  21. AdaLayerNormZero_Final,
  22. precompute_freqs_cis,
  23. get_pos_embed_indices,
  24. )
  25. # Text embedding
  26. class TextEmbedding(nn.Module):
  27. def __init__(self, text_num_embeds, text_dim, conv_layers=0, conv_mult=2):
  28. super().__init__()
  29. self.text_embed = nn.Embedding(text_num_embeds + 1, text_dim) # use 0 as filler token
  30. if conv_layers > 0:
  31. self.extra_modeling = True
  32. self.precompute_max_pos = 4096 # ~44s of 24khz audio
  33. self.register_buffer("freqs_cis", precompute_freqs_cis(text_dim, self.precompute_max_pos), persistent=False)
  34. self.text_blocks = nn.Sequential(
  35. *[ConvNeXtV2Block(text_dim, text_dim * conv_mult) for _ in range(conv_layers)]
  36. )
  37. else:
  38. self.extra_modeling = False
  39. def forward(self, text: int["b nt"], seq_len, drop_text=False): # noqa: F722
  40. batch, text_len = text.shape[0], text.shape[1]
  41. text = text + 1 # use 0 as filler token. preprocess of batch pad -1, see list_str_to_idx()
  42. text = text[:, :seq_len] # curtail if character tokens are more than the mel spec tokens
  43. text = F.pad(text, (0, seq_len - text_len), value=0)
  44. if drop_text: # cfg for text
  45. text = torch.zeros_like(text)
  46. text = self.text_embed(text) # b n -> b n d
  47. # possible extra modeling
  48. if self.extra_modeling:
  49. # sinus pos emb
  50. batch_start = torch.zeros((batch,), dtype=torch.long)
  51. pos_idx = get_pos_embed_indices(batch_start, seq_len, max_pos=self.precompute_max_pos)
  52. text_pos_embed = self.freqs_cis[pos_idx]
  53. text = text + text_pos_embed
  54. # convnextv2 blocks
  55. text = self.text_blocks(text)
  56. return text
  57. # noised input audio and context mixing embedding
  58. class InputEmbedding(nn.Module):
  59. def __init__(self, mel_dim, text_dim, out_dim, spk_dim=None):
  60. super().__init__()
  61. spk_dim = 0 if spk_dim is None else spk_dim
  62. self.spk_dim = spk_dim
  63. self.proj = nn.Linear(mel_dim * 2 + text_dim + spk_dim, out_dim)
  64. self.conv_pos_embed = CausalConvPositionEmbedding(dim=out_dim)
  65. def forward(
  66. self,
  67. x: float["b n d"],
  68. cond: float["b n d"],
  69. text_embed: float["b n d"],
  70. spks: float["b d"],
  71. ):
  72. to_cat = [x, cond, text_embed]
  73. if self.spk_dim > 0:
  74. spks = repeat(spks, "b c -> b t c", t=x.shape[1])
  75. to_cat.append(spks)
  76. x = self.proj(torch.cat(to_cat, dim=-1))
  77. x = self.conv_pos_embed(x) + x
  78. return x
  79. # Transformer backbone using DiT blocks
  80. class DiT(nn.Module):
  81. def __init__(
  82. self,
  83. *,
  84. dim,
  85. depth=8,
  86. heads=8,
  87. dim_head=64,
  88. dropout=0.1,
  89. ff_mult=4,
  90. mel_dim=80,
  91. mu_dim=None,
  92. long_skip_connection=False,
  93. spk_dim=None,
  94. out_channels=None,
  95. static_chunk_size=50,
  96. num_decoding_left_chunks=2
  97. ):
  98. super().__init__()
  99. self.time_embed = TimestepEmbedding(dim)
  100. if mu_dim is None:
  101. mu_dim = mel_dim
  102. self.input_embed = InputEmbedding(mel_dim, mu_dim, dim, spk_dim)
  103. self.rotary_embed = RotaryEmbedding(dim_head)
  104. self.dim = dim
  105. self.depth = depth
  106. self.transformer_blocks = nn.ModuleList(
  107. [DiTBlock(dim=dim, heads=heads, dim_head=dim_head, ff_mult=ff_mult, dropout=dropout) for _ in range(depth)]
  108. )
  109. self.long_skip_connection = nn.Linear(dim * 2, dim, bias=False) if long_skip_connection else None
  110. self.norm_out = AdaLayerNormZero_Final(dim) # final modulation
  111. self.proj_out = nn.Linear(dim, mel_dim)
  112. self.out_channels = out_channels
  113. self.static_chunk_size = static_chunk_size
  114. self.num_decoding_left_chunks = num_decoding_left_chunks
  115. def forward(self, x, mask, mu, t, spks=None, cond=None, streaming=False):
  116. x = x.transpose(1, 2)
  117. mu = mu.transpose(1, 2)
  118. cond = cond.transpose(1, 2)
  119. spks = spks.unsqueeze(dim=1)
  120. batch, seq_len = x.shape[0], x.shape[1]
  121. if t.ndim == 0:
  122. t = t.repeat(batch)
  123. # t: conditioning time, c: context (text + masked cond audio), x: noised input audio
  124. t = self.time_embed(t)
  125. x = self.input_embed(x, cond, mu, spks.squeeze(1))
  126. rope = self.rotary_embed.forward_from_seq_len(seq_len)
  127. if self.long_skip_connection is not None:
  128. residual = x
  129. if streaming is True:
  130. attn_mask = add_optional_chunk_mask(x, mask.bool(), False, False, 0, self.static_chunk_size, -1).unsqueeze(dim=1)
  131. else:
  132. attn_mask = add_optional_chunk_mask(x, mask.bool(), False, False, 0, 0, -1).repeat(1, x.size(1), 1).unsqueeze(dim=1)
  133. for block in self.transformer_blocks:
  134. x = block(x, t, mask=attn_mask.bool(), rope=rope)
  135. if self.long_skip_connection is not None:
  136. x = self.long_skip_connection(torch.cat((x, residual), dim=-1))
  137. x = self.norm_out(x, t)
  138. output = self.proj_out(x).transpose(1, 2)
  139. return output