llm.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import random
  15. from typing import Dict, Optional, Callable, List, Generator
  16. import torch
  17. from torch import nn
  18. import torch.nn.functional as F
  19. from transformers import Qwen2ForCausalLM
  20. from torch.nn.utils.rnn import pad_sequence, unpad_sequence
  21. from cosyvoice.utils.common import IGNORE_ID
  22. from cosyvoice.transformer.label_smoothing_loss import LabelSmoothingLoss
  23. from cosyvoice.utils.common import th_accuracy
  24. from cosyvoice.utils.file_utils import logging
  25. from cosyvoice.utils.mask import make_pad_mask
  26. class TransformerLM(torch.nn.Module):
  27. def __init__(
  28. self,
  29. text_encoder_input_size: int,
  30. llm_input_size: int,
  31. llm_output_size: int,
  32. text_token_size: int,
  33. speech_token_size: int,
  34. text_encoder: torch.nn.Module,
  35. llm: torch.nn.Module,
  36. sampling: Callable,
  37. length_normalized_loss: bool = True,
  38. lsm_weight: float = 0.0,
  39. spk_embed_dim: int = 192,
  40. ):
  41. super().__init__()
  42. self.llm_input_size = llm_input_size
  43. self.speech_token_size = speech_token_size
  44. # 1. build text token inputs related modules
  45. self.text_embedding = torch.nn.Embedding(text_token_size, text_encoder_input_size)
  46. self.text_encoder = text_encoder
  47. self.text_encoder_affine_layer = nn.Linear(
  48. self.text_encoder.output_size(),
  49. llm_input_size
  50. )
  51. # 2. build speech token language model related modules
  52. self.sos_eos = 0
  53. self.task_id = 1
  54. self.llm_embedding = torch.nn.Embedding(2, llm_input_size)
  55. self.llm = llm
  56. self.llm_decoder = nn.Linear(llm_output_size, speech_token_size + 1)
  57. self.criterion_ce = LabelSmoothingLoss(
  58. size=speech_token_size + 1,
  59. padding_idx=IGNORE_ID,
  60. smoothing=lsm_weight,
  61. normalize_length=length_normalized_loss,
  62. )
  63. # 3. [Optional] build speech token related modules
  64. self.speech_embedding = torch.nn.Embedding(speech_token_size, llm_input_size)
  65. self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, llm_input_size)
  66. # 4. sampling method
  67. self.sampling = sampling
  68. def encode(
  69. self,
  70. text: torch.Tensor,
  71. text_lengths: torch.Tensor,
  72. ):
  73. encoder_out, encoder_mask = self.text_encoder(text, text_lengths, decoding_chunk_size=1, num_decoding_left_chunks=-1)
  74. encoder_out_lens = encoder_mask.squeeze(1).sum(1)
  75. encoder_out = self.text_encoder_affine_layer(encoder_out)
  76. return encoder_out, encoder_out_lens
  77. def pad_unpad_sequence(self, sos_eos_emb, embedding, text_token, text_token_len, task_id_emb, speech_token, speech_token_len):
  78. text_token = unpad_sequence(text_token, text_token_len.cpu(), batch_first=True)
  79. speech_token = unpad_sequence(speech_token, speech_token_len.cpu(), batch_first=True)
  80. lm_input = [torch.concat([sos_eos_emb.squeeze(dim=0), embedding[i], text_token[i], task_id_emb.squeeze(dim=0), speech_token[i]], dim=0)
  81. for i in range(len(text_token))]
  82. lm_input_len = torch.tensor([i.size(0) for i in lm_input], dtype=torch.int32)
  83. lm_input = pad_sequence(lm_input, batch_first=True, padding_value=IGNORE_ID)
  84. return lm_input, lm_input_len
  85. def forward(
  86. self,
  87. batch: dict,
  88. device: torch.device,
  89. ) -> Dict[str, Optional[torch.Tensor]]:
  90. """
  91. Args:
  92. text: (B, L, D)
  93. text_lengths: (B,)
  94. audio: (B, T, N) or (B, T)
  95. audio_lengths: (B,)
  96. """
  97. text_token = batch['text_token'].to(device)
  98. text_token_len = batch['text_token_len'].to(device)
  99. speech_token = batch['speech_token'].to(device)
  100. speech_token_len = batch['speech_token_len'].to(device)
  101. embedding = batch['embedding'].to(device)
  102. # 1. prepare llm_target
  103. lm_target = [torch.tensor([IGNORE_ID] * (2 + text_token_len[i]) + speech_token[i, :speech_token_len[i]].tolist() +
  104. [self.speech_token_size]) for i in range(text_token.size(0))]
  105. lm_target = pad_sequence(lm_target, batch_first=True, padding_value=IGNORE_ID).to(device)
  106. # 1. encode text_token
  107. text_token = self.text_embedding(text_token)
  108. text_token, text_token_len = self.encode(text_token, text_token_len)
  109. # 2. embedding projection
  110. embedding = F.normalize(embedding, dim=1)
  111. embedding = self.spk_embed_affine_layer(embedding)
  112. embedding = embedding.unsqueeze(1)
  113. # 3. eos and task_id
  114. sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
  115. task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
  116. # 4. encode speech_token
  117. speech_token = self.speech_embedding(speech_token)
  118. # 5. unpad and pad
  119. lm_input, lm_input_len = self.pad_unpad_sequence(sos_eos_emb, embedding, text_token, text_token_len,
  120. task_id_emb, speech_token, speech_token_len)
  121. # 6. run lm forward
  122. lm_output, lm_output_mask = self.llm(lm_input, lm_input_len.to(device))
  123. logits = self.llm_decoder(lm_output)
  124. loss = self.criterion_ce(logits, lm_target)
  125. acc = th_accuracy(logits.view(-1, self.speech_token_size + 1), lm_target, ignore_label=IGNORE_ID)
  126. return {'loss': loss, 'acc': acc}
  127. def sampling_ids(
  128. self,
  129. weighted_scores: torch.Tensor,
  130. decoded_tokens: List,
  131. sampling: int,
  132. ignore_eos: bool = True,
  133. ):
  134. num_trials, max_trials = 0, 100
  135. while True:
  136. top_ids = self.sampling(weighted_scores, decoded_tokens, sampling)
  137. if (not ignore_eos) or (self.speech_token_size not in top_ids):
  138. break
  139. num_trials += 1
  140. if num_trials > max_trials:
  141. raise RuntimeError('sampling reaches max_trials {} and still get eos when ignore_eos is True, check your input!'.format(max_trials))
  142. return top_ids
  143. @torch.inference_mode()
  144. def inference(
  145. self,
  146. text: torch.Tensor,
  147. text_len: torch.Tensor,
  148. prompt_text: torch.Tensor,
  149. prompt_text_len: torch.Tensor,
  150. prompt_speech_token: torch.Tensor,
  151. prompt_speech_token_len: torch.Tensor,
  152. embedding: torch.Tensor,
  153. sampling: int = 25,
  154. max_token_text_ratio: float = 20,
  155. min_token_text_ratio: float = 2,
  156. ) -> Generator[torch.Tensor, None, None]:
  157. device = text.device
  158. text = torch.concat([prompt_text, text], dim=1)
  159. text_len += prompt_text_len
  160. text = self.text_embedding(text)
  161. # 1. encode text
  162. text, text_len = self.encode(text, text_len)
  163. # 2. encode embedding
  164. if embedding.shape[0] != 0:
  165. embedding = F.normalize(embedding, dim=1)
  166. embedding = self.spk_embed_affine_layer(embedding)
  167. embedding = embedding.unsqueeze(dim=1)
  168. else:
  169. embedding = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device).to(text.dtype)
  170. # 3. concat llm_input
  171. sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
  172. task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
  173. if prompt_speech_token_len != 0:
  174. prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
  175. else:
  176. prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device)
  177. lm_input = torch.concat([sos_eos_emb, embedding, text, task_id_emb, prompt_speech_token_emb], dim=1)
  178. # 4. cal min/max_length
  179. min_len = int((text_len - prompt_text_len) * min_token_text_ratio)
  180. max_len = int((text_len - prompt_text_len) * max_token_text_ratio)
  181. # 5. step by step decode
  182. out_tokens = []
  183. offset = 0
  184. att_cache, cnn_cache = torch.zeros((0, 0, 0, 0), device=lm_input.device), torch.zeros((0, 0, 0, 0), device=lm_input.device)
  185. for i in range(max_len):
  186. y_pred, att_cache, cnn_cache = self.llm.forward_chunk(lm_input, offset=offset, required_cache_size=-1,
  187. att_cache=att_cache, cnn_cache=cnn_cache,
  188. att_mask=torch.tril(torch.ones((1, lm_input.shape[1], lm_input.shape[1]),
  189. device=lm_input.device)).to(torch.bool))
  190. logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
  191. # force continue decode first token
  192. if i == 0:
  193. logp[:, self.speech_token_size] = -float('inf')
  194. top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True if i < min_len else False).item()
  195. if top_ids == self.speech_token_size:
  196. break
  197. # in stream mode, yield token one by one
  198. yield top_ids
  199. out_tokens.append(top_ids)
  200. offset += lm_input.size(1)
  201. lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
  202. class Qwen2Encoder(torch.nn.Module):
  203. def __init__(self, pretrain_path):
  204. super().__init__()
  205. self.model = Qwen2ForCausalLM.from_pretrained(pretrain_path)
  206. def forward(self, xs: torch.Tensor, xs_lens: torch.Tensor):
  207. T = xs.size(1)
  208. masks = ~make_pad_mask(xs_lens, T)
  209. outs = self.model(
  210. inputs_embeds=xs,
  211. attention_mask=masks,
  212. output_hidden_states=True,
  213. return_dict=True,
  214. )
  215. return outs.hidden_states[-1], masks.unsqueeze(1)
  216. def forward_one_step(self, xs, masks, cache=None):
  217. input_masks = masks[:, -1, :]
  218. outs = self.model(
  219. inputs_embeds=xs,
  220. attention_mask=input_masks,
  221. output_hidden_states=True,
  222. return_dict=True,
  223. use_cache=True,
  224. past_key_values=cache,
  225. )
  226. xs = outs.hidden_states[-1]
  227. new_cache = outs.past_key_values
  228. return xs, new_cache
  229. class Qwen2LM(TransformerLM):
  230. def __init__(
  231. self,
  232. llm_input_size: int,
  233. llm_output_size: int,
  234. speech_token_size: int,
  235. llm: torch.nn.Module,
  236. sampling: Callable,
  237. length_normalized_loss: bool = True,
  238. lsm_weight: float = 0.0,
  239. mix_ratio: List[int] = [5, 15],
  240. ):
  241. torch.nn.Module.__init__(self)
  242. self.llm_input_size = llm_input_size
  243. self.llm_output_size = llm_output_size
  244. self.speech_token_size = speech_token_size
  245. # 2. build speech token language model related modules
  246. self.sos_eos = 0
  247. self.task_id = 1
  248. self.fill_token = 2
  249. self.llm_embedding = torch.nn.Embedding(2, llm_input_size)
  250. self.llm = llm
  251. self.llm_decoder = nn.Linear(llm_output_size, speech_token_size + 3)
  252. self.criterion_ce = LabelSmoothingLoss(
  253. size=speech_token_size + 3,
  254. padding_idx=IGNORE_ID,
  255. smoothing=lsm_weight,
  256. normalize_length=length_normalized_loss,
  257. )
  258. # 3. [Optional] build speech token related modules
  259. self.speech_embedding = torch.nn.Embedding(speech_token_size + 3, llm_input_size)
  260. # 4. sampling method
  261. self.sampling = sampling
  262. self.mix_ratio = mix_ratio
  263. def pad_unpad_sequence(self, sos_eos_emb, text_token, text_token_len, task_id_emb, speech_token, speech_token_len, bistream):
  264. text_token = unpad_sequence(text_token, text_token_len.cpu(), batch_first=True)
  265. speech_token = unpad_sequence(speech_token, speech_token_len.cpu(), batch_first=True)
  266. lm_input = [torch.concat([sos_eos_emb.squeeze(dim=0), text_token[i], task_id_emb.squeeze(dim=0), speech_token[i]], dim=0)
  267. for i in range(len(text_token))]
  268. lm_input_len = torch.tensor([i.size(0) for i in lm_input], dtype=torch.int32)
  269. lm_input = pad_sequence(lm_input, batch_first=True, padding_value=IGNORE_ID)
  270. return lm_input, lm_input_len
  271. def forward(
  272. self,
  273. batch: dict,
  274. device: torch.device,
  275. ) -> Dict[str, Optional[torch.Tensor]]:
  276. """
  277. Args:
  278. text: (B, L, D)
  279. text_lengths: (B,)
  280. audio: (B, T, N) or (B, T)
  281. audio_lengths: (B,)
  282. """
  283. text_token = batch['text_token'].to(device)
  284. text_token_len = batch['text_token_len'].to(device)
  285. speech_token = batch['speech_token'].to(device)
  286. speech_token_len = batch['speech_token_len'].to(device)
  287. # 1. prepare llm_target
  288. bistream = True if random.random() < 0.5 else False
  289. lm_target = [torch.tensor([IGNORE_ID] * (1 + text_token_len[i]) + speech_token[i, :speech_token_len[i]].tolist() +
  290. [self.speech_token_size]) for i in range(text_token.size(0))]
  291. lm_target = pad_sequence(lm_target, batch_first=True, padding_value=IGNORE_ID).to(device)
  292. # 1. encode text_token
  293. text_token = self.llm.model.model.embed_tokens(text_token)
  294. # 3. eos and task_id
  295. sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
  296. task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
  297. # 4. encode speech_token
  298. speech_token = self.speech_embedding(speech_token)
  299. # 5. unpad and pad
  300. lm_input, lm_input_len = self.pad_unpad_sequence(sos_eos_emb, text_token, text_token_len, task_id_emb, speech_token, speech_token_len, bistream)
  301. # 6. run lm forward
  302. lm_output, lm_output_mask = self.llm(lm_input, lm_input_len.to(device))
  303. logits = self.llm_decoder(lm_output)
  304. loss = self.criterion_ce(logits, lm_target)
  305. acc = th_accuracy(logits.view(-1, self.speech_token_size + 3), lm_target, ignore_label=IGNORE_ID)
  306. return {'loss': loss, 'acc': acc}
  307. @torch.inference_mode()
  308. def inference(
  309. self,
  310. text: torch.Tensor,
  311. text_len: torch.Tensor,
  312. prompt_text: torch.Tensor,
  313. prompt_text_len: torch.Tensor,
  314. prompt_speech_token: torch.Tensor,
  315. prompt_speech_token_len: torch.Tensor,
  316. embedding: torch.Tensor,
  317. sampling: int = 25,
  318. max_token_text_ratio: float = 20,
  319. min_token_text_ratio: float = 2,
  320. ) -> Generator[torch.Tensor, None, None]:
  321. device = text.device
  322. text = torch.concat([prompt_text, text], dim=1)
  323. text_len += prompt_text_len
  324. text = self.llm.model.model.embed_tokens(text)
  325. # 3. concat llm_input
  326. sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
  327. task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
  328. if prompt_speech_token_len != 0:
  329. prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
  330. else:
  331. prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=text.dtype).to(device)
  332. lm_input = torch.concat([sos_eos_emb, text, task_id_emb, prompt_speech_token_emb], dim=1)
  333. # 4. cal min/max_length
  334. min_len = int((text_len - prompt_text_len) * min_token_text_ratio)
  335. max_len = int((text_len - prompt_text_len) * max_token_text_ratio)
  336. # 5. step by step decode
  337. out_tokens = []
  338. cache = None
  339. for i in range(max_len):
  340. y_pred, cache = self.llm.forward_one_step(lm_input,
  341. masks=torch.tril(torch.ones((1, lm_input.shape[1], lm_input.shape[1]), device=lm_input.device)).to(torch.bool),
  342. cache=cache)
  343. logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
  344. top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True if i < min_len else False).item()
  345. if top_ids == self.speech_token_size:
  346. break
  347. if top_ids > self.speech_token_size:
  348. continue
  349. # in stream mode, yield token one by one
  350. yield top_ids
  351. out_tokens.append(top_ids)
  352. lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
  353. @torch.inference_mode()
  354. def inference_bistream(
  355. self,
  356. text: Generator,
  357. prompt_text: torch.Tensor,
  358. prompt_text_len: torch.Tensor,
  359. prompt_speech_token: torch.Tensor,
  360. prompt_speech_token_len: torch.Tensor,
  361. embedding: torch.Tensor,
  362. sampling: int = 25,
  363. max_token_text_ratio: float = 20,
  364. min_token_text_ratio: float = 2,
  365. ) -> Generator[torch.Tensor, None, None]:
  366. device = prompt_text.device
  367. # 1. prepare input
  368. sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
  369. task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
  370. if prompt_speech_token_len != 0:
  371. prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
  372. else:
  373. prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size, dtype=prompt_text.dtype).to(device)
  374. lm_input = torch.concat([sos_eos_emb], dim=1)
  375. # 2. iterate text
  376. out_tokens = []
  377. cache = None
  378. # NOTE init prompt_text as text_cache as it is basically impossible prompt_speech_token/prompt_text < 15/5
  379. text_cache = self.llm.model.model.embed_tokens(prompt_text)
  380. next_fill_index = -1
  381. for this_text in text:
  382. text_cache = torch.concat([text_cache, self.llm.model.model.embed_tokens(this_text)], dim=1)
  383. # prompt_speech_token_emb not empty, try append to lm_input
  384. while prompt_speech_token_emb.size(1) != 0:
  385. if text_cache.size(1) >= self.mix_ratio[0]:
  386. lm_input_text, lm_input_speech = text_cache[:, :self.mix_ratio[0]], prompt_speech_token_emb[:, :self.mix_ratio[1]]
  387. logging.info('append {} text token {} speech token'.format(lm_input_text.size(1), lm_input_speech.size(1)))
  388. lm_input = torch.concat([lm_input, lm_input_text, lm_input_speech], dim=1)
  389. text_cache, prompt_speech_token_emb = text_cache[:, self.mix_ratio[0]:], prompt_speech_token_emb[:, self.mix_ratio[1]:]
  390. else:
  391. logging.info('not enough text token to decode, wait for more')
  392. break
  393. # no prompt_speech_token_emb remain, can decode some speech token
  394. if prompt_speech_token_emb.size(1) == 0:
  395. if (len(out_tokens) != 0 and out_tokens[-1] == self.speech_token_size + 2) or (len(out_tokens) == 0 and lm_input.size(1) == 1):
  396. logging.info('get fill token, need to append more text token')
  397. if text_cache.size(1) >= self.mix_ratio[0]:
  398. lm_input_text = text_cache[:, :self.mix_ratio[0]]
  399. logging.info('append {} text token'.format(lm_input_text.size(1)))
  400. if len(out_tokens) != 0 and out_tokens[-1] == self.speech_token_size + 2:
  401. lm_input = lm_input_text
  402. else:
  403. lm_input = torch.concat([lm_input, lm_input_text], dim=1)
  404. text_cache = text_cache[:, self.mix_ratio[0]:]
  405. else:
  406. logging.info('not enough text token to decode, wait for more')
  407. continue
  408. while True:
  409. seq_len = lm_input.shape[1] if cache is None else lm_input.shape[1] + cache[0][0].size(2)
  410. y_pred, cache = self.llm.forward_one_step(lm_input,
  411. masks=torch.tril(torch.ones((1, seq_len, seq_len), device=lm_input.device)).to(torch.bool),
  412. cache=cache)
  413. logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
  414. if next_fill_index != -1 and len(out_tokens) == next_fill_index:
  415. top_ids = self.speech_token_size + 2
  416. next_fill_index += (self.mix_ratio[1] + 1)
  417. else:
  418. top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=True).item()
  419. if top_ids == self.speech_token_size + 2:
  420. next_fill_index = len(out_tokens) + self.mix_ratio[1] + 1
  421. logging.info('fill_token index {} next fill_token index {}'.format(len(out_tokens), next_fill_index))
  422. out_tokens.append(top_ids)
  423. if top_ids >= self.speech_token_size:
  424. if top_ids == self.speech_token_size + 2:
  425. break
  426. else:
  427. raise ValueError('should not get token {}'.format(top_ids))
  428. yield top_ids
  429. lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
  430. # 3. final decode
  431. lm_input = torch.concat([lm_input, text_cache, task_id_emb], dim=1)
  432. logging.info('no more text token, decode until met eos')
  433. while True:
  434. seq_len = lm_input.shape[1] if cache is None else lm_input.shape[1] + cache[0][0].size(2)
  435. y_pred, cache = self.llm.forward_one_step(lm_input,
  436. masks=torch.tril(torch.ones((1, seq_len, seq_len), device=lm_input.device)).to(torch.bool),
  437. cache=cache)
  438. logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
  439. top_ids = self.sampling_ids(logp.squeeze(dim=0), out_tokens, sampling, ignore_eos=False).item()
  440. out_tokens.append(top_ids)
  441. if top_ids >= self.speech_token_size:
  442. if top_ids == self.speech_token_size:
  443. break
  444. else:
  445. raise ValueError('should not get token {}'.format(top_ids))
  446. # in stream mode, yield token one by one
  447. yield top_ids
  448. lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)