1
0

file_utils.py 3.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
  2. # 2024 Alibaba Inc (authors: Xiang Lyu, Zetao Hu)
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import json
  16. import torchaudio
  17. import logging
  18. logging.getLogger('matplotlib').setLevel(logging.WARNING)
  19. logging.basicConfig(level=logging.DEBUG,
  20. format='%(asctime)s %(levelname)s %(message)s')
  21. def read_lists(list_file):
  22. lists = []
  23. with open(list_file, 'r', encoding='utf8') as fin:
  24. for line in fin:
  25. lists.append(line.strip())
  26. return lists
  27. def read_json_lists(list_file):
  28. lists = read_lists(list_file)
  29. results = {}
  30. for fn in lists:
  31. with open(fn, 'r', encoding='utf8') as fin:
  32. results.update(json.load(fin))
  33. return results
  34. def load_wav(wav, target_sr):
  35. speech, sample_rate = torchaudio.load(wav, backend='soundfile')
  36. speech = speech.mean(dim=0, keepdim=True)
  37. if sample_rate != target_sr:
  38. assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr)
  39. speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech)
  40. return speech
  41. def convert_onnx_to_trt(trt_model, onnx_model, fp16):
  42. import tensorrt as trt
  43. _min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2,), (2, 80), (2, 80, 4)]
  44. _opt_shape = [(2, 80, 193), (2, 1, 193), (2, 80, 193), (2,), (2, 80), (2, 80, 193)]
  45. _max_shape = [(2, 80, 6800), (2, 1, 6800), (2, 80, 6800), (2,), (2, 80), (2, 80, 6800)]
  46. input_names = ["x", "mask", "mu", "t", "spks", "cond"]
  47. logging.info("Converting onnx to trt...")
  48. network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
  49. logger = trt.Logger(trt.Logger.INFO)
  50. builder = trt.Builder(logger)
  51. network = builder.create_network(network_flags)
  52. parser = trt.OnnxParser(network, logger)
  53. config = builder.create_builder_config()
  54. config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 33) # 8GB
  55. if fp16:
  56. config.set_flag(trt.BuilderFlag.FP16)
  57. profile = builder.create_optimization_profile()
  58. # load onnx model
  59. with open(onnx_model, "rb") as f:
  60. if not parser.parse(f.read()):
  61. for error in range(parser.num_errors):
  62. print(parser.get_error(error))
  63. raise ValueError('failed to parse {}'.format(onnx_model))
  64. # set input shapes
  65. for i in range(len(input_names)):
  66. profile.set_shape(input_names[i], _min_shape[i], _opt_shape[i], _max_shape[i])
  67. tensor_dtype = trt.DataType.HALF if fp16 else trt.DataType.FLOAT
  68. # set input and output data type
  69. for i in range(network.num_inputs):
  70. input_tensor = network.get_input(i)
  71. input_tensor.dtype = tensor_dtype
  72. for i in range(network.num_outputs):
  73. output_tensor = network.get_output(i)
  74. output_tensor.dtype = tensor_dtype
  75. config.add_optimization_profile(profile)
  76. engine_bytes = builder.build_serialized_network(network, config)
  77. # save trt engine
  78. with open(trt_model, "wb") as f:
  79. f.write(engine_bytes)