extract_speech_token.py 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. #!/usr/bin/env python3
  2. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import argparse
  16. from concurrent.futures import ThreadPoolExecutor, as_completed
  17. import logging
  18. import torch
  19. from tqdm import tqdm
  20. import onnxruntime
  21. import numpy as np
  22. import torchaudio
  23. import whisper
  24. def single_job(utt):
  25. audio, sample_rate = torchaudio.load(utt2wav[utt])
  26. if sample_rate != 16000:
  27. audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(audio)
  28. if audio.shape[1] / 16000 > 30:
  29. logging.warning('do not support extract speech token for audio longer than 30s')
  30. speech_token = []
  31. else:
  32. feat = whisper.log_mel_spectrogram(audio, n_mels=128)
  33. speech_token = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.detach().cpu().numpy(),
  34. ort_session.get_inputs()[1].name: np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist()
  35. return utt, speech_token
  36. def main(args):
  37. all_task = [executor.submit(single_job, utt) for utt in utt2wav.keys()]
  38. utt2speech_token = {}
  39. for future in tqdm(as_completed(all_task)):
  40. utt, speech_token = future.result()
  41. utt2speech_token[utt] = speech_token
  42. torch.save(utt2speech_token, '{}/utt2speech_token.pt'.format(args.dir))
  43. if __name__ == "__main__":
  44. parser = argparse.ArgumentParser()
  45. parser.add_argument("--dir", type=str)
  46. parser.add_argument("--onnx_path", type=str)
  47. parser.add_argument("--num_thread", type=int, default=8)
  48. args = parser.parse_args()
  49. utt2wav = {}
  50. with open('{}/wav.scp'.format(args.dir)) as f:
  51. for l in f:
  52. l = l.replace('\n', '').split()
  53. utt2wav[l[0]] = l[1]
  54. option = onnxruntime.SessionOptions()
  55. option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
  56. option.intra_op_num_threads = 1
  57. providers = ["CUDAExecutionProvider"]
  58. ort_session = onnxruntime.InferenceSession(args.onnx_path, sess_options=option, providers=providers)
  59. executor = ThreadPoolExecutor(max_workers=args.num_thread)
  60. main(args)