1
0

upsample_encoder.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
  2. # 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
  3. # 2024 Alibaba Inc (Xiang Lyu)
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. # Modified from ESPnet(https://github.com/espnet/espnet)
  17. """Encoder definition."""
  18. from typing import Tuple
  19. import torch
  20. from torch import nn
  21. from torch.nn import functional as F
  22. from cosyvoice.transformer.convolution import ConvolutionModule
  23. from cosyvoice.transformer.encoder_layer import ConformerEncoderLayer
  24. from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward
  25. from cosyvoice.utils.class_utils import (
  26. COSYVOICE_EMB_CLASSES,
  27. COSYVOICE_SUBSAMPLE_CLASSES,
  28. COSYVOICE_ATTENTION_CLASSES,
  29. COSYVOICE_ACTIVATION_CLASSES,
  30. )
  31. from cosyvoice.utils.mask import make_pad_mask
  32. from cosyvoice.utils.mask import add_optional_chunk_mask
  33. class Upsample1D(nn.Module):
  34. """A 1D upsampling layer with an optional convolution.
  35. Parameters:
  36. channels (`int`):
  37. number of channels in the inputs and outputs.
  38. use_conv (`bool`, default `False`):
  39. option to use a convolution.
  40. use_conv_transpose (`bool`, default `False`):
  41. option to use a convolution transpose.
  42. out_channels (`int`, optional):
  43. number of output channels. Defaults to `channels`.
  44. """
  45. def __init__(self, channels: int, out_channels: int, stride: int = 2):
  46. super().__init__()
  47. self.channels = channels
  48. self.out_channels = out_channels
  49. self.stride = stride
  50. # In this mode, first repeat interpolate, than conv with stride=1
  51. self.conv = nn.Conv1d(self.channels, self.out_channels, stride * 2 + 1, stride=1, padding=0)
  52. def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor):
  53. outputs = F.interpolate(inputs, scale_factor=float(self.stride), mode="nearest")
  54. outputs = F.pad(outputs, (self.stride * 2, 0), value=0.0)
  55. outputs = self.conv(outputs)
  56. return outputs, input_lengths * self.stride
  57. class PreLookaheadLayer(nn.Module):
  58. def __init__(self, channels: int, pre_lookahead_len: int = 1):
  59. super().__init__()
  60. self.channels = channels
  61. self.pre_lookahead_len = pre_lookahead_len
  62. self.conv1 = nn.Conv1d(
  63. channels, channels,
  64. kernel_size=pre_lookahead_len + 1,
  65. stride=1, padding=0,
  66. )
  67. self.conv2 = nn.Conv1d(
  68. channels, channels,
  69. kernel_size=3, stride=1, padding=0,
  70. )
  71. def forward(self, inputs: torch.Tensor) -> torch.Tensor:
  72. """
  73. inputs: (batch_size, seq_len, channels)
  74. """
  75. outputs = inputs.transpose(1, 2).contiguous()
  76. # look ahead
  77. outputs = F.pad(outputs, (0, self.pre_lookahead_len), mode='constant', value=0.0)
  78. outputs = F.leaky_relu(self.conv1(outputs))
  79. # outputs
  80. outputs = F.pad(outputs, (2, 0), mode='constant', value=0.0)
  81. outputs = self.conv2(outputs)
  82. outputs = outputs.transpose(1, 2).contiguous()
  83. # residual connection
  84. outputs = outputs + inputs
  85. return outputs
  86. class UpsampleConformerEncoder(torch.nn.Module):
  87. def __init__(
  88. self,
  89. input_size: int,
  90. output_size: int = 256,
  91. attention_heads: int = 4,
  92. linear_units: int = 2048,
  93. num_blocks: int = 6,
  94. dropout_rate: float = 0.1,
  95. positional_dropout_rate: float = 0.1,
  96. attention_dropout_rate: float = 0.0,
  97. input_layer: str = "conv2d",
  98. pos_enc_layer_type: str = "rel_pos",
  99. normalize_before: bool = True,
  100. static_chunk_size: int = 0,
  101. use_dynamic_chunk: bool = False,
  102. global_cmvn: torch.nn.Module = None,
  103. use_dynamic_left_chunk: bool = False,
  104. positionwise_conv_kernel_size: int = 1,
  105. macaron_style: bool = True,
  106. selfattention_layer_type: str = "rel_selfattn",
  107. activation_type: str = "swish",
  108. use_cnn_module: bool = True,
  109. cnn_module_kernel: int = 15,
  110. causal: bool = False,
  111. cnn_module_norm: str = "batch_norm",
  112. key_bias: bool = True,
  113. gradient_checkpointing: bool = False,
  114. ):
  115. """
  116. Args:
  117. input_size (int): input dim
  118. output_size (int): dimension of attention
  119. attention_heads (int): the number of heads of multi head attention
  120. linear_units (int): the hidden units number of position-wise feed
  121. forward
  122. num_blocks (int): the number of decoder blocks
  123. dropout_rate (float): dropout rate
  124. attention_dropout_rate (float): dropout rate in attention
  125. positional_dropout_rate (float): dropout rate after adding
  126. positional encoding
  127. input_layer (str): input layer type.
  128. optional [linear, conv2d, conv2d6, conv2d8]
  129. pos_enc_layer_type (str): Encoder positional encoding layer type.
  130. opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos]
  131. normalize_before (bool):
  132. True: use layer_norm before each sub-block of a layer.
  133. False: use layer_norm after each sub-block of a layer.
  134. static_chunk_size (int): chunk size for static chunk training and
  135. decoding
  136. use_dynamic_chunk (bool): whether use dynamic chunk size for
  137. training or not, You can only use fixed chunk(chunk_size > 0)
  138. or dyanmic chunk size(use_dynamic_chunk = True)
  139. global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module
  140. use_dynamic_left_chunk (bool): whether use dynamic left chunk in
  141. dynamic chunk training
  142. key_bias: whether use bias in attention.linear_k, False for whisper models.
  143. gradient_checkpointing: rerunning a forward-pass segment for each
  144. checkpointed segment during backward.
  145. """
  146. super().__init__()
  147. self._output_size = output_size
  148. self.global_cmvn = global_cmvn
  149. self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
  150. input_size,
  151. output_size,
  152. dropout_rate,
  153. COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
  154. positional_dropout_rate),
  155. )
  156. self.normalize_before = normalize_before
  157. self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
  158. self.static_chunk_size = static_chunk_size
  159. self.use_dynamic_chunk = use_dynamic_chunk
  160. self.use_dynamic_left_chunk = use_dynamic_left_chunk
  161. self.gradient_checkpointing = gradient_checkpointing
  162. activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
  163. # self-attention module definition
  164. encoder_selfattn_layer_args = (
  165. attention_heads,
  166. output_size,
  167. attention_dropout_rate,
  168. key_bias,
  169. )
  170. # feed-forward module definition
  171. positionwise_layer_args = (
  172. output_size,
  173. linear_units,
  174. dropout_rate,
  175. activation,
  176. )
  177. # convolution module definition
  178. convolution_layer_args = (output_size, cnn_module_kernel, activation,
  179. cnn_module_norm, causal)
  180. self.pre_lookahead_layer = PreLookaheadLayer(channels=512, pre_lookahead_len=3)
  181. self.encoders = torch.nn.ModuleList([
  182. ConformerEncoderLayer(
  183. output_size,
  184. COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
  185. *encoder_selfattn_layer_args),
  186. PositionwiseFeedForward(*positionwise_layer_args),
  187. PositionwiseFeedForward(
  188. *positionwise_layer_args) if macaron_style else None,
  189. ConvolutionModule(
  190. *convolution_layer_args) if use_cnn_module else None,
  191. dropout_rate,
  192. normalize_before,
  193. ) for _ in range(num_blocks)
  194. ])
  195. self.up_layer = Upsample1D(channels=512, out_channels=512, stride=2)
  196. self.up_embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
  197. input_size,
  198. output_size,
  199. dropout_rate,
  200. COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
  201. positional_dropout_rate),
  202. )
  203. self.up_encoders = torch.nn.ModuleList([
  204. ConformerEncoderLayer(
  205. output_size,
  206. COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
  207. *encoder_selfattn_layer_args),
  208. PositionwiseFeedForward(*positionwise_layer_args),
  209. PositionwiseFeedForward(
  210. *positionwise_layer_args) if macaron_style else None,
  211. ConvolutionModule(
  212. *convolution_layer_args) if use_cnn_module else None,
  213. dropout_rate,
  214. normalize_before,
  215. ) for _ in range(4)
  216. ])
  217. def output_size(self) -> int:
  218. return self._output_size
  219. def forward(
  220. self,
  221. xs: torch.Tensor,
  222. xs_lens: torch.Tensor,
  223. decoding_chunk_size: int = 0,
  224. num_decoding_left_chunks: int = -1,
  225. ) -> Tuple[torch.Tensor, torch.Tensor]:
  226. """Embed positions in tensor.
  227. Args:
  228. xs: padded input tensor (B, T, D)
  229. xs_lens: input length (B)
  230. decoding_chunk_size: decoding chunk size for dynamic chunk
  231. 0: default for training, use random dynamic chunk.
  232. <0: for decoding, use full chunk.
  233. >0: for decoding, use fixed chunk size as set.
  234. num_decoding_left_chunks: number of left chunks, this is for decoding,
  235. the chunk size is decoding_chunk_size.
  236. >=0: use num_decoding_left_chunks
  237. <0: use all left chunks
  238. Returns:
  239. encoder output tensor xs, and subsampled masks
  240. xs: padded output tensor (B, T' ~= T/subsample_rate, D)
  241. masks: torch.Tensor batch padding mask after subsample
  242. (B, 1, T' ~= T/subsample_rate)
  243. NOTE(xcsong):
  244. We pass the `__call__` method of the modules instead of `forward` to the
  245. checkpointing API because `__call__` attaches all the hooks of the module.
  246. https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
  247. """
  248. T = xs.size(1)
  249. masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
  250. if self.global_cmvn is not None:
  251. xs = self.global_cmvn(xs)
  252. xs, pos_emb, masks = self.embed(xs, masks)
  253. mask_pad = masks # (B, 1, T/subsample_rate)
  254. chunk_masks = add_optional_chunk_mask(xs, masks,
  255. self.use_dynamic_chunk,
  256. self.use_dynamic_left_chunk,
  257. decoding_chunk_size,
  258. self.static_chunk_size,
  259. num_decoding_left_chunks)
  260. # lookahead + conformer encoder
  261. xs = self.pre_lookahead_layer(xs)
  262. xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad)
  263. # upsample + conformer encoder
  264. xs = xs.transpose(1, 2).contiguous()
  265. xs, xs_lens = self.up_layer(xs, xs_lens)
  266. xs = xs.transpose(1, 2).contiguous()
  267. T = xs.size(1)
  268. masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
  269. xs, pos_emb, masks = self.up_embed(xs, masks)
  270. mask_pad = masks # (B, 1, T/subsample_rate)
  271. chunk_masks = add_optional_chunk_mask(xs, masks,
  272. self.use_dynamic_chunk,
  273. self.use_dynamic_left_chunk,
  274. decoding_chunk_size,
  275. self.static_chunk_size * self.up_layer.stride,
  276. num_decoding_left_chunks)
  277. xs = self.forward_up_layers(xs, chunk_masks, pos_emb, mask_pad)
  278. if self.normalize_before:
  279. xs = self.after_norm(xs)
  280. # Here we assume the mask is not changed in encoder layers, so just
  281. # return the masks before encoder layers, and the masks will be used
  282. # for cross attention with decoder later
  283. return xs, masks
  284. def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
  285. pos_emb: torch.Tensor,
  286. mask_pad: torch.Tensor) -> torch.Tensor:
  287. for layer in self.encoders:
  288. xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
  289. return xs
  290. def forward_up_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
  291. pos_emb: torch.Tensor,
  292. mask_pad: torch.Tensor) -> torch.Tensor:
  293. for layer in self.up_encoders:
  294. xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
  295. return xs