llm_vllm.py 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import time
  15. import queue
  16. import asyncio
  17. import threading
  18. from typing import List, Generator, AsyncGenerator
  19. import torch
  20. from cosyvoice.utils.file_utils import logging
  21. from cosyvoice.llm.llm import Qwen2LM
  22. # 启用vllm V1版本
  23. import os
  24. os.environ["VLLM_USE_V1"] = '1'
  25. from vllm import ModelRegistry
  26. from vllm import LLMEngine, AsyncLLMEngine, CompletionOutput
  27. from vllm.engine.arg_utils import EngineArgs, AsyncEngineArgs
  28. from vllm.sampling_params import SamplingParams
  29. from cosyvoice.llm.vllm_use_cosyvoice2_model import CosyVoice2Model as CosyVoice2LLM
  30. ModelRegistry.register_model("CosyVoice2Model", CosyVoice2LLM)
  31. from vllm.sampling_params import RequestOutputKind
  32. # SamplingParams
  33. SAMPLING_PARAMS = {
  34. "temperature": 1, # 不能低于0.8, 否则会生成非常多的空音频,或者无法正常生成语音Token
  35. "top_p": 1, # 不能低于0.8, 否则会生成非常多的空音频,或者无法正常生成语音Token
  36. "top_k": 25,
  37. # "min_tokens": 80, # 不支持设置最小的tokens数量设置,开启后vllm直接崩溃,无法启动
  38. # "presence_penalty": 1.0, # 不支持设置
  39. # "frequency_penalty": 0.0, # 不支持设置
  40. "max_tokens": 1024,
  41. "detokenize": False, # 目前 vllm 0.7.3 v1版本中设置无效,待后续版本更新后减少计算
  42. "ignore_eos": False,
  43. "output_kind": RequestOutputKind.DELTA # 设置为DELTA,如调整该参数,请同时调整llm_inference的处理代码
  44. }
  45. def tensor_to_list(tensor: torch.tensor):
  46. return tensor.view(-1).cpu().numpy().tolist()
  47. class VllmQwen2LM(Qwen2LM):
  48. def __init__(
  49. self,
  50. model_dir,
  51. mix_ratio: List[int] = [5, 15],
  52. ):
  53. self.fp16 = False
  54. self.half = lambda: None
  55. self.mix_ratio = mix_ratio
  56. self.llm_engine = None
  57. self.speech_token_size = 6564 # 6561 + 3
  58. self.llm_token_size = 151936 # llm vocab_size
  59. self.sos_eos_token_id = self.speech_token_size + self.llm_token_size + 1
  60. self.task_token_id = self.sos_eos_token_id + 1
  61. self.zero_token_id = self.task_token_id + 1
  62. # vllm 的推理任务需要在一个固定的事件循环中,因此启动一个后台线程运行转用于推理任务
  63. self.loop = asyncio.new_event_loop()
  64. self.loop_thread = threading.Thread(target=self._run_event_loop, daemon=True)
  65. self.loop_thread.start()
  66. def _run_event_loop(self):
  67. asyncio.set_event_loop(self.loop)
  68. self.loop.run_forever()
  69. async def async_llm_inference(self, out_queue, prompt_token_ids, request_id, stop_token_ids, max_tokens):
  70. sampling_params = SamplingParams(**SAMPLING_PARAMS)
  71. sampling_params.stop_token_ids = stop_token_ids or [6561]
  72. if max_tokens:
  73. sampling_params.max_tokens = max_tokens
  74. async for output in self.llm_engine.generate(
  75. {
  76. "prompt_token_ids": prompt_token_ids,
  77. },
  78. sampling_params=sampling_params,
  79. request_id=request_id or f"{time.time()}",
  80. ):
  81. out_queue.put((output.outputs[0], output.finished))
  82. def llm_inference(self, prompt_token_ids: List[int], request_id: str=None, stop_token_ids=None, max_tokens=None):
  83. out_queue = queue.Queue()
  84. asyncio.run_coroutine_threadsafe(
  85. self.async_llm_inference(out_queue, prompt_token_ids, request_id, stop_token_ids, max_tokens), self.loop
  86. )
  87. # 接收 out_queue 返回的结果
  88. finished = False
  89. while not finished:
  90. (output, finished) = out_queue.get_nowait() if not out_queue.empty() else out_queue.get()
  91. yield output
  92. def inference(
  93. self,
  94. text: torch.Tensor,
  95. text_len: torch.Tensor,
  96. prompt_text: torch.Tensor,
  97. prompt_text_len: torch.Tensor,
  98. prompt_speech_token: torch.Tensor,
  99. prompt_speech_token_len: torch.Tensor,
  100. embedding: torch.Tensor,
  101. sampling: int = 25,
  102. max_token_text_ratio: float = 20,
  103. min_token_text_ratio: float = 2,
  104. ) -> Generator[torch.Tensor|int, None, None]:
  105. prompt_text = tensor_to_list(prompt_text + torch.tensor(6564))
  106. prompt_speech_token = tensor_to_list(prompt_speech_token)
  107. text = tensor_to_list(text + torch.tensor(6564))
  108. prompt_token_ids = [self.sos_eos_token_id] + prompt_text + text + \
  109. [self.task_token_id] + prompt_speech_token
  110. max_tokens = len(text) * 20
  111. for output in self.llm_inference(
  112. prompt_token_ids,
  113. stop_token_ids=[6561],
  114. max_tokens=max_tokens,
  115. ):
  116. if output.token_ids[-1] == 6561:
  117. need_add_tokens = output.token_ids[:-1]
  118. else:
  119. need_add_tokens = output.token_ids
  120. for token in need_add_tokens:
  121. yield token
  122. def inference_bistream(
  123. self,
  124. text: Generator,
  125. prompt_text: torch.Tensor,
  126. prompt_text_len: torch.Tensor,
  127. prompt_speech_token: torch.Tensor,
  128. prompt_speech_token_len: torch.Tensor,
  129. embedding: torch.Tensor,
  130. sampling: int = 25,
  131. max_token_text_ratio: float = 20,
  132. min_token_text_ratio: float = 2,
  133. ) -> Generator[torch.Tensor, None, None]:
  134. prompt_text = tensor_to_list(prompt_text + torch.tensor(6564))
  135. prompt_speech_token = tensor_to_list(prompt_speech_token)
  136. last_tokens = []
  137. prompt_token_ids = [self.sos_eos_token_id]
  138. text_tokens_cache = prompt_text
  139. for this_text in text:
  140. this_text = tensor_to_list(this_text + torch.tensor(6564))
  141. # text need tokens
  142. assert isinstance(this_text, list), "text need token ids List[int]."
  143. text_tokens_cache += this_text
  144. while len(prompt_speech_token) != 0:
  145. if len(text_tokens_cache) >= self.mix_ratio[0]:
  146. text_input_token = text_tokens_cache[:self.mix_ratio[0]]
  147. speech_input_token = prompt_speech_token[:self.mix_ratio[1]]
  148. prompt_token_ids += text_input_token + speech_input_token
  149. # reset the last cache
  150. text_tokens_cache = text_tokens_cache[self.mix_ratio[0]:]
  151. prompt_speech_token = prompt_speech_token[self.mix_ratio[1]:]
  152. else:
  153. break
  154. if len(prompt_speech_token) == 0:
  155. if (len(last_tokens) > 0 and last_tokens[-1] == 6563) or len(prompt_token_ids) == 1:
  156. if len(text_tokens_cache) >= self.mix_ratio[0]:
  157. text_tokens_temp = text_tokens_cache[:self.mix_ratio[0]]
  158. prompt_token_ids += text_tokens_temp
  159. text_tokens_cache = text_tokens_cache[self.mix_ratio[0]:]
  160. else:
  161. continue
  162. for output in self.llm_inference(prompt_token_ids, stop_token_ids=[6563]):
  163. last_tokens = output.token_ids
  164. if last_tokens[-1] == 6563:
  165. need_add_tokens = last_tokens[:-1]
  166. else:
  167. need_add_tokens = last_tokens
  168. for token in need_add_tokens:
  169. yield token
  170. prompt_token_ids.extend(need_add_tokens)
  171. prompt_token_ids += text_tokens_cache + [self.task_token_id]
  172. for output in self.llm_inference(prompt_token_ids, stop_token_ids=[6561]):
  173. if output.token_ids[-1] == 6561:
  174. need_add_tokens = output.token_ids[:-1]
  175. else:
  176. need_add_tokens = output.token_ids
  177. for token in need_add_tokens:
  178. yield token