1
0

extract_embedding.py 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. #!/usr/bin/env python3
  2. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import argparse
  16. from concurrent.futures import ThreadPoolExecutor, as_completed
  17. import onnxruntime
  18. import torch
  19. import torchaudio
  20. import torchaudio.compliance.kaldi as kaldi
  21. from tqdm import tqdm
  22. def single_job(utt):
  23. audio, sample_rate = torchaudio.load(utt2wav[utt])
  24. if sample_rate != 16000:
  25. audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(audio)
  26. feat = kaldi.fbank(audio,
  27. num_mel_bins=80,
  28. dither=0,
  29. sample_frequency=16000)
  30. feat = feat - feat.mean(dim=0, keepdim=True)
  31. embedding = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
  32. return utt, embedding
  33. def main(args):
  34. all_task = [executor.submit(single_job, utt) for utt in utt2wav.keys()]
  35. utt2embedding, spk2embedding = {}, {}
  36. for future in tqdm(as_completed(all_task)):
  37. utt, embedding = future.result()
  38. utt2embedding[utt] = embedding
  39. spk = utt2spk[utt]
  40. if spk not in spk2embedding:
  41. spk2embedding[spk] = []
  42. spk2embedding[spk].append(embedding)
  43. for k, v in spk2embedding.items():
  44. spk2embedding[k] = torch.tensor(v).mean(dim=0).tolist()
  45. torch.save(utt2embedding, "{}/utt2embedding.pt".format(args.dir))
  46. torch.save(spk2embedding, "{}/spk2embedding.pt".format(args.dir))
  47. if __name__ == "__main__":
  48. parser = argparse.ArgumentParser()
  49. parser.add_argument("--dir", type=str)
  50. parser.add_argument("--onnx_path", type=str)
  51. parser.add_argument("--num_thread", type=int, default=8)
  52. args = parser.parse_args()
  53. utt2wav, utt2spk = {}, {}
  54. with open('{}/wav.scp'.format(args.dir)) as f:
  55. for l in f:
  56. l = l.replace('\n', '').split()
  57. utt2wav[l[0]] = l[1]
  58. with open('{}/utt2spk'.format(args.dir)) as f:
  59. for l in f:
  60. l = l.replace('\n', '').split()
  61. utt2spk[l[0]] = l[1]
  62. option = onnxruntime.SessionOptions()
  63. option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
  64. option.intra_op_num_threads = 1
  65. providers = ["CPUExecutionProvider"]
  66. ort_session = onnxruntime.InferenceSession(args.onnx_path, sess_options=option, providers=providers)
  67. executor = ThreadPoolExecutor(max_workers=args.num_thread)
  68. main(args)