1
0

extract_speech_token.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. #!/usr/bin/env python3
  2. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import argparse
  16. from concurrent.futures import ThreadPoolExecutor, as_completed
  17. import logging
  18. import torch
  19. from tqdm import tqdm
  20. import onnxruntime
  21. import numpy as np
  22. import torchaudio
  23. import whisper
  24. def single_job(utt):
  25. audio, sample_rate = torchaudio.load(utt2wav[utt], backend='soundfile')
  26. if sample_rate != 16000:
  27. audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(audio)
  28. # Convert audio to mono
  29. if audio.shape[0] > 1:
  30. audio = audio.mean(dim=0, keepdim=True)
  31. if audio.shape[1] / 16000 > 30:
  32. logging.warning('do not support extract speech token for audio longer than 30s')
  33. speech_token = []
  34. else:
  35. feat = whisper.log_mel_spectrogram(audio, n_mels=128)
  36. speech_token = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.detach().cpu().numpy(),
  37. ort_session.get_inputs()[1].name: np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist()
  38. return utt, speech_token
  39. def main(args):
  40. all_task = [executor.submit(single_job, utt) for utt in utt2wav.keys()]
  41. utt2speech_token = {}
  42. for future in tqdm(as_completed(all_task)):
  43. utt, speech_token = future.result()
  44. utt2speech_token[utt] = speech_token
  45. torch.save(utt2speech_token, '{}/utt2speech_token.pt'.format(args.dir))
  46. if __name__ == "__main__":
  47. parser = argparse.ArgumentParser()
  48. parser.add_argument("--dir", type=str)
  49. parser.add_argument("--onnx_path", type=str)
  50. parser.add_argument("--num_thread", type=int, default=8)
  51. args = parser.parse_args()
  52. utt2wav = {}
  53. with open('{}/wav.scp'.format(args.dir)) as f:
  54. for l in f:
  55. l = l.replace('\n', '').split()
  56. utt2wav[l[0]] = l[1]
  57. option = onnxruntime.SessionOptions()
  58. option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
  59. option.intra_op_num_threads = 1
  60. providers = ["CUDAExecutionProvider"]
  61. ort_session = onnxruntime.InferenceSession(args.onnx_path, sess_options=option, providers=providers)
  62. executor = ThreadPoolExecutor(max_workers=args.num_thread)
  63. main(args)