1
0

extract_embedding.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. #!/usr/bin/env python3
  2. # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import argparse
  16. import torch
  17. import torchaudio
  18. from tqdm import tqdm
  19. import onnxruntime
  20. import torchaudio.compliance.kaldi as kaldi
  21. def main(args):
  22. utt2wav, utt2spk = {}, {}
  23. with open('{}/wav.scp'.format(args.dir)) as f:
  24. for l in f:
  25. l = l.replace('\n', '').split()
  26. utt2wav[l[0]] = l[1]
  27. with open('{}/utt2spk'.format(args.dir)) as f:
  28. for l in f:
  29. l = l.replace('\n', '').split()
  30. utt2spk[l[0]] = l[1]
  31. option = onnxruntime.SessionOptions()
  32. option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
  33. option.intra_op_num_threads = 1
  34. providers = ["CPUExecutionProvider"]
  35. ort_session = onnxruntime.InferenceSession(args.onnx_path, sess_options=option, providers=providers)
  36. utt2embedding, spk2embedding = {}, {}
  37. for utt in tqdm(utt2wav.keys()):
  38. audio, sample_rate = torchaudio.load(utt2wav[utt])
  39. if sample_rate != 16000:
  40. audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(audio)
  41. feat = kaldi.fbank(audio,
  42. num_mel_bins=80,
  43. dither=0,
  44. sample_frequency=16000)
  45. feat = feat - feat.mean(dim=0, keepdim=True)
  46. embedding = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
  47. utt2embedding[utt] = embedding
  48. spk = utt2spk[utt]
  49. if spk not in spk2embedding:
  50. spk2embedding[spk] = []
  51. spk2embedding[spk].append(embedding)
  52. for k, v in spk2embedding.items():
  53. spk2embedding[k] = torch.tensor(v).mean(dim=0).tolist()
  54. torch.save(utt2embedding, '{}/utt2embedding.pt'.format(args.dir))
  55. torch.save(spk2embedding, '{}/spk2embedding.pt'.format(args.dir))
  56. if __name__ == "__main__":
  57. parser = argparse.ArgumentParser()
  58. parser.add_argument('--dir',
  59. type=str)
  60. parser.add_argument('--onnx_path',
  61. type=str)
  62. args = parser.parse_args()
  63. main(args)