|
@@ -43,9 +43,9 @@ python3 client_grpc.py \
|
|
|
import argparse
|
|
import argparse
|
|
|
import asyncio
|
|
import asyncio
|
|
|
import json
|
|
import json
|
|
|
-import queue # Added
|
|
|
|
|
-import uuid # Added
|
|
|
|
|
-import functools # Added
|
|
|
|
|
|
|
+import queue
|
|
|
|
|
+import uuid
|
|
|
|
|
+import functools
|
|
|
|
|
|
|
|
import os
|
|
import os
|
|
|
import time
|
|
import time
|
|
@@ -55,16 +55,16 @@ from pathlib import Path
|
|
|
import numpy as np
|
|
import numpy as np
|
|
|
import soundfile as sf
|
|
import soundfile as sf
|
|
|
import tritonclient
|
|
import tritonclient
|
|
|
-import tritonclient.grpc.aio as grpcclient_aio # Renamed original import
|
|
|
|
|
-import tritonclient.grpc as grpcclient_sync # Added sync client import
|
|
|
|
|
-from tritonclient.utils import np_to_triton_dtype, InferenceServerException # Added InferenceServerException
|
|
|
|
|
|
|
+import tritonclient.grpc.aio as grpcclient_aio
|
|
|
|
|
+import tritonclient.grpc as grpcclient_sync
|
|
|
|
|
+from tritonclient.utils import np_to_triton_dtype, InferenceServerException
|
|
|
|
|
|
|
|
|
|
|
|
|
-# --- Added UserData and callback ---
|
|
|
|
|
class UserData:
|
|
class UserData:
|
|
|
def __init__(self):
|
|
def __init__(self):
|
|
|
self._completed_requests = queue.Queue()
|
|
self._completed_requests = queue.Queue()
|
|
|
self._first_chunk_time = None
|
|
self._first_chunk_time = None
|
|
|
|
|
+ self._second_chunk_time = None
|
|
|
self._start_time = None
|
|
self._start_time = None
|
|
|
|
|
|
|
|
def record_start_time(self):
|
|
def record_start_time(self):
|
|
@@ -75,39 +75,43 @@ class UserData:
|
|
|
return self._first_chunk_time - self._start_time
|
|
return self._first_chunk_time - self._start_time
|
|
|
return None
|
|
return None
|
|
|
|
|
|
|
|
|
|
+ def get_second_chunk_latency(self):
|
|
|
|
|
+ if self._first_chunk_time and self._second_chunk_time:
|
|
|
|
|
+ return self._second_chunk_time - self._first_chunk_time
|
|
|
|
|
+ return None
|
|
|
|
|
+
|
|
|
|
|
|
|
|
def callback(user_data, result, error):
|
|
def callback(user_data, result, error):
|
|
|
- if user_data._first_chunk_time is None and not error:
|
|
|
|
|
- user_data._first_chunk_time = time.time() # Record time of first successful chunk
|
|
|
|
|
|
|
+ if not error:
|
|
|
|
|
+ if user_data._first_chunk_time is None:
|
|
|
|
|
+ user_data._first_chunk_time = time.time()
|
|
|
|
|
+ elif user_data._second_chunk_time is None:
|
|
|
|
|
+ user_data._second_chunk_time = time.time()
|
|
|
|
|
+
|
|
|
if error:
|
|
if error:
|
|
|
user_data._completed_requests.put(error)
|
|
user_data._completed_requests.put(error)
|
|
|
else:
|
|
else:
|
|
|
user_data._completed_requests.put(result)
|
|
user_data._completed_requests.put(result)
|
|
|
-# --- End Added UserData and callback ---
|
|
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+def stream_callback(user_data_map, result, error):
|
|
|
|
|
+ request_id = None
|
|
|
|
|
+ if error:
|
|
|
|
|
+ print(f"An error occurred in the stream callback: {error}")
|
|
|
|
|
+ else:
|
|
|
|
|
+ request_id = result.get_response().id
|
|
|
|
|
+
|
|
|
|
|
+ if request_id:
|
|
|
|
|
+ user_data = user_data_map.get(request_id)
|
|
|
|
|
+ if user_data:
|
|
|
|
|
+ callback(user_data, result, error)
|
|
|
|
|
+ else:
|
|
|
|
|
+ print(f"Warning: Could not find user_data for request_id {request_id}")
|
|
|
|
|
|
|
|
|
|
|
|
|
def write_triton_stats(stats, summary_file):
|
|
def write_triton_stats(stats, summary_file):
|
|
|
with open(summary_file, "w") as summary_f:
|
|
with open(summary_file, "w") as summary_f:
|
|
|
model_stats = stats["model_stats"]
|
|
model_stats = stats["model_stats"]
|
|
|
- # write a note, the log is from triton_client.get_inference_statistics(), to better human readability
|
|
|
|
|
- summary_f.write(
|
|
|
|
|
- "The log is parsing from triton_client.get_inference_statistics(), to better human readability. \n"
|
|
|
|
|
- )
|
|
|
|
|
- summary_f.write("To learn more about the log, please refer to: \n")
|
|
|
|
|
- summary_f.write("1. https://github.com/triton-inference-server/server/blob/main/docs/user_guide/metrics.md \n")
|
|
|
|
|
- summary_f.write("2. https://github.com/triton-inference-server/server/issues/5374 \n\n")
|
|
|
|
|
- summary_f.write(
|
|
|
|
|
- "To better improve throughput, we always would like let requests wait in the queue for a while, and then execute them with a larger batch size. \n"
|
|
|
|
|
- )
|
|
|
|
|
- summary_f.write(
|
|
|
|
|
- "However, there is a trade-off between the increased queue time and the increased batch size. \n"
|
|
|
|
|
- )
|
|
|
|
|
- summary_f.write(
|
|
|
|
|
- "You may change 'max_queue_delay_microseconds' and 'preferred_batch_size' in the model configuration file to achieve this. \n"
|
|
|
|
|
- )
|
|
|
|
|
- summary_f.write(
|
|
|
|
|
- "See https://github.com/triton-inference-server/server/blob/main/docs/user_guide/model_configuration.md#delayed-batching for more details. \n\n"
|
|
|
|
|
- )
|
|
|
|
|
for model_state in model_stats:
|
|
for model_state in model_stats:
|
|
|
if "last_inference" not in model_state:
|
|
if "last_inference" not in model_state:
|
|
|
continue
|
|
continue
|
|
@@ -118,7 +122,10 @@ def write_triton_stats(stats, summary_file):
|
|
|
total_input_time_s = int(model_inference_stats["compute_input"]["ns"]) / 1e9
|
|
total_input_time_s = int(model_inference_stats["compute_input"]["ns"]) / 1e9
|
|
|
total_output_time_s = int(model_inference_stats["compute_output"]["ns"]) / 1e9
|
|
total_output_time_s = int(model_inference_stats["compute_output"]["ns"]) / 1e9
|
|
|
summary_f.write(
|
|
summary_f.write(
|
|
|
- f"queue time {total_queue_time_s:<5.2f} s, compute infer time {total_infer_time_s:<5.2f} s, compute input time {total_input_time_s:<5.2f} s, compute output time {total_output_time_s:<5.2f} s \n" # noqa
|
|
|
|
|
|
|
+ f"queue time {total_queue_time_s:<5.2f} s, "
|
|
|
|
|
+ f"compute infer time {total_infer_time_s:<5.2f} s, "
|
|
|
|
|
+ f"compute input time {total_input_time_s:<5.2f} s, "
|
|
|
|
|
+ f"compute output time {total_output_time_s:<5.2f} s \n"
|
|
|
)
|
|
)
|
|
|
model_batch_stats = model_state["batch_stats"]
|
|
model_batch_stats = model_state["batch_stats"]
|
|
|
for batch in model_batch_stats:
|
|
for batch in model_batch_stats:
|
|
@@ -127,21 +134,86 @@ def write_triton_stats(stats, summary_file):
|
|
|
compute_output = batch["compute_output"]
|
|
compute_output = batch["compute_output"]
|
|
|
compute_infer = batch["compute_infer"]
|
|
compute_infer = batch["compute_infer"]
|
|
|
batch_count = int(compute_infer["count"])
|
|
batch_count = int(compute_infer["count"])
|
|
|
|
|
+ if batch_count == 0:
|
|
|
|
|
+ continue
|
|
|
assert compute_infer["count"] == compute_output["count"] == compute_input["count"]
|
|
assert compute_infer["count"] == compute_output["count"] == compute_input["count"]
|
|
|
compute_infer_time_ms = int(compute_infer["ns"]) / 1e6
|
|
compute_infer_time_ms = int(compute_infer["ns"]) / 1e6
|
|
|
compute_input_time_ms = int(compute_input["ns"]) / 1e6
|
|
compute_input_time_ms = int(compute_input["ns"]) / 1e6
|
|
|
compute_output_time_ms = int(compute_output["ns"]) / 1e6
|
|
compute_output_time_ms = int(compute_output["ns"]) / 1e6
|
|
|
summary_f.write(
|
|
summary_f.write(
|
|
|
- f"execuate inference with batch_size {batch_size:<2} total {batch_count:<5} times, total_infer_time {compute_infer_time_ms:<9.2f} ms, avg_infer_time {compute_infer_time_ms:<9.2f}/{batch_count:<5}={compute_infer_time_ms / batch_count:.2f} ms, avg_infer_time_per_sample {compute_infer_time_ms:<9.2f}/{batch_count:<5}/{batch_size}={compute_infer_time_ms / batch_count / batch_size:.2f} ms \n" # noqa
|
|
|
|
|
|
|
+ f"execuate inference with batch_size {batch_size:<2} total {batch_count:<5} times, "
|
|
|
|
|
+ f"total_infer_time {compute_infer_time_ms:<9.2f} ms, "
|
|
|
|
|
+ f"avg_infer_time {compute_infer_time_ms:<9.2f}/{batch_count:<5}="
|
|
|
|
|
+ f"{compute_infer_time_ms / batch_count:.2f} ms, "
|
|
|
|
|
+ f"avg_infer_time_per_sample {compute_infer_time_ms:<9.2f}/{batch_count:<5}/{batch_size}="
|
|
|
|
|
+ f"{compute_infer_time_ms / batch_count / batch_size:.2f} ms \n"
|
|
|
)
|
|
)
|
|
|
summary_f.write(
|
|
summary_f.write(
|
|
|
- f"input {compute_input_time_ms:<9.2f} ms, avg {compute_input_time_ms / batch_count:.2f} ms, " # noqa
|
|
|
|
|
|
|
+ f"input {compute_input_time_ms:<9.2f} ms, avg {compute_input_time_ms / batch_count:.2f} ms, "
|
|
|
)
|
|
)
|
|
|
summary_f.write(
|
|
summary_f.write(
|
|
|
- f"output {compute_output_time_ms:<9.2f} ms, avg {compute_output_time_ms / batch_count:.2f} ms \n" # noqa
|
|
|
|
|
|
|
+ f"output {compute_output_time_ms:<9.2f} ms, avg {compute_output_time_ms / batch_count:.2f} ms \n"
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
+def subtract_stats(stats_after, stats_before):
|
|
|
|
|
+ """Subtracts two Triton inference statistics objects."""
|
|
|
|
|
+ stats_diff = json.loads(json.dumps(stats_after))
|
|
|
|
|
+
|
|
|
|
|
+ model_stats_before_map = {
|
|
|
|
|
+ s["name"]: {
|
|
|
|
|
+ "version": s["version"],
|
|
|
|
|
+ "last_inference": s.get("last_inference", 0),
|
|
|
|
|
+ "inference_count": s.get("inference_count", 0),
|
|
|
|
|
+ "execution_count": s.get("execution_count", 0),
|
|
|
|
|
+ "inference_stats": s.get("inference_stats", {}),
|
|
|
|
|
+ "batch_stats": s.get("batch_stats", []),
|
|
|
|
|
+ }
|
|
|
|
|
+ for s in stats_before["model_stats"]
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ for model_stat_after in stats_diff["model_stats"]:
|
|
|
|
|
+ model_name = model_stat_after["name"]
|
|
|
|
|
+ if model_name in model_stats_before_map:
|
|
|
|
|
+ model_stat_before = model_stats_before_map[model_name]
|
|
|
|
|
+
|
|
|
|
|
+ model_stat_after["inference_count"] = str(
|
|
|
|
|
+ int(model_stat_after.get("inference_count", 0)) - int(model_stat_before.get("inference_count", 0))
|
|
|
|
|
+ )
|
|
|
|
|
+ model_stat_after["execution_count"] = str(
|
|
|
|
|
+ int(model_stat_after.get("execution_count", 0)) - int(model_stat_before.get("execution_count", 0))
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ if "inference_stats" in model_stat_after and "inference_stats" in model_stat_before:
|
|
|
|
|
+ for key in ["success", "fail", "queue", "compute_input", "compute_infer", "compute_output", "cache_hit", "cache_miss"]:
|
|
|
|
|
+ if key in model_stat_after["inference_stats"] and key in model_stat_before["inference_stats"]:
|
|
|
|
|
+ if "ns" in model_stat_after["inference_stats"][key]:
|
|
|
|
|
+ ns_after = int(model_stat_after["inference_stats"][key]["ns"])
|
|
|
|
|
+ ns_before = int(model_stat_before["inference_stats"][key]["ns"])
|
|
|
|
|
+ model_stat_after["inference_stats"][key]["ns"] = str(ns_after - ns_before)
|
|
|
|
|
+ if "count" in model_stat_after["inference_stats"][key]:
|
|
|
|
|
+ count_after = int(model_stat_after["inference_stats"][key]["count"])
|
|
|
|
|
+ count_before = int(model_stat_before["inference_stats"][key]["count"])
|
|
|
|
|
+ model_stat_after["inference_stats"][key]["count"] = str(count_after - count_before)
|
|
|
|
|
+
|
|
|
|
|
+ if "batch_stats" in model_stat_after and "batch_stats" in model_stat_before:
|
|
|
|
|
+ batch_stats_before_map = {b["batch_size"]: b for b in model_stat_before["batch_stats"]}
|
|
|
|
|
+ for batch_stat_after in model_stat_after["batch_stats"]:
|
|
|
|
|
+ bs = batch_stat_after["batch_size"]
|
|
|
|
|
+ if bs in batch_stats_before_map:
|
|
|
|
|
+ batch_stat_before = batch_stats_before_map[bs]
|
|
|
|
|
+ for key in ["compute_input", "compute_infer", "compute_output"]:
|
|
|
|
|
+ if key in batch_stat_after and key in batch_stat_before:
|
|
|
|
|
+ count_after = int(batch_stat_after[key]["count"])
|
|
|
|
|
+ count_before = int(batch_stat_before[key]["count"])
|
|
|
|
|
+ batch_stat_after[key]["count"] = str(count_after - count_before)
|
|
|
|
|
+
|
|
|
|
|
+ ns_after = int(batch_stat_after[key]["ns"])
|
|
|
|
|
+ ns_before = int(batch_stat_before[key]["ns"])
|
|
|
|
|
+ batch_stat_after[key]["ns"] = str(ns_after - ns_before)
|
|
|
|
|
+ return stats_diff
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
def get_args():
|
|
def get_args():
|
|
|
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
|
|
|
|
|
|
@@ -209,7 +281,8 @@ def get_args():
|
|
|
choices=[
|
|
choices=[
|
|
|
"f5_tts",
|
|
"f5_tts",
|
|
|
"spark_tts",
|
|
"spark_tts",
|
|
|
- "cosyvoice2"],
|
|
|
|
|
|
|
+ "cosyvoice2",
|
|
|
|
|
+ "cosyvoice2_dit"],
|
|
|
help="triton model_repo module name to request",
|
|
help="triton model_repo module name to request",
|
|
|
)
|
|
)
|
|
|
|
|
|
|
@@ -243,7 +316,6 @@ def get_args():
|
|
|
help="log directory",
|
|
help="log directory",
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
- # --- Added arguments ---
|
|
|
|
|
parser.add_argument(
|
|
parser.add_argument(
|
|
|
"--mode",
|
|
"--mode",
|
|
|
type=str,
|
|
type=str,
|
|
@@ -260,8 +332,8 @@ def get_args():
|
|
|
|
|
|
|
|
parser.add_argument(
|
|
parser.add_argument(
|
|
|
"--use-spk2info-cache",
|
|
"--use-spk2info-cache",
|
|
|
- type=bool,
|
|
|
|
|
- default=False,
|
|
|
|
|
|
|
+ type=str,
|
|
|
|
|
+ default="False",
|
|
|
help="Use spk2info cache for reference audio.",
|
|
help="Use spk2info cache for reference audio.",
|
|
|
)
|
|
)
|
|
|
|
|
|
|
@@ -284,39 +356,33 @@ def load_audio(wav_path, target_sample_rate=16000):
|
|
|
|
|
|
|
|
|
|
|
|
|
def prepare_request_input_output(
|
|
def prepare_request_input_output(
|
|
|
- protocol_client, # Can be grpcclient_aio or grpcclient_sync
|
|
|
|
|
|
|
+ protocol_client,
|
|
|
waveform,
|
|
waveform,
|
|
|
reference_text,
|
|
reference_text,
|
|
|
target_text,
|
|
target_text,
|
|
|
sample_rate=16000,
|
|
sample_rate=16000,
|
|
|
- padding_duration: int = None, # Optional padding for offline mode
|
|
|
|
|
|
|
+ padding_duration: int = None,
|
|
|
use_spk2info_cache: bool = False
|
|
use_spk2info_cache: bool = False
|
|
|
):
|
|
):
|
|
|
"""Prepares inputs for Triton inference (offline or streaming)."""
|
|
"""Prepares inputs for Triton inference (offline or streaming)."""
|
|
|
assert len(waveform.shape) == 1, "waveform should be 1D"
|
|
assert len(waveform.shape) == 1, "waveform should be 1D"
|
|
|
lengths = np.array([[len(waveform)]], dtype=np.int32)
|
|
lengths = np.array([[len(waveform)]], dtype=np.int32)
|
|
|
|
|
|
|
|
- # Apply padding only if padding_duration is provided (for offline)
|
|
|
|
|
if padding_duration:
|
|
if padding_duration:
|
|
|
duration = len(waveform) / sample_rate
|
|
duration = len(waveform) / sample_rate
|
|
|
- # Estimate target duration based on text length ratio (crude estimation)
|
|
|
|
|
- # Avoid division by zero if reference_text is empty
|
|
|
|
|
if reference_text:
|
|
if reference_text:
|
|
|
estimated_target_duration = duration / len(reference_text) * len(target_text)
|
|
estimated_target_duration = duration / len(reference_text) * len(target_text)
|
|
|
else:
|
|
else:
|
|
|
- estimated_target_duration = duration # Assume target duration similar to reference if no text
|
|
|
|
|
|
|
+ estimated_target_duration = duration
|
|
|
|
|
|
|
|
- # Calculate required samples based on estimated total duration
|
|
|
|
|
required_total_samples = padding_duration * sample_rate * (
|
|
required_total_samples = padding_duration * sample_rate * (
|
|
|
(int(estimated_target_duration + duration) // padding_duration) + 1
|
|
(int(estimated_target_duration + duration) // padding_duration) + 1
|
|
|
)
|
|
)
|
|
|
samples = np.zeros((1, required_total_samples), dtype=np.float32)
|
|
samples = np.zeros((1, required_total_samples), dtype=np.float32)
|
|
|
samples[0, : len(waveform)] = waveform
|
|
samples[0, : len(waveform)] = waveform
|
|
|
else:
|
|
else:
|
|
|
- # No padding for streaming or if padding_duration is None
|
|
|
|
|
samples = waveform.reshape(1, -1).astype(np.float32)
|
|
samples = waveform.reshape(1, -1).astype(np.float32)
|
|
|
|
|
|
|
|
- # Common input creation logic
|
|
|
|
|
inputs = [
|
|
inputs = [
|
|
|
protocol_client.InferInput("reference_wav", samples.shape, np_to_triton_dtype(samples.dtype)),
|
|
protocol_client.InferInput("reference_wav", samples.shape, np_to_triton_dtype(samples.dtype)),
|
|
|
protocol_client.InferInput(
|
|
protocol_client.InferInput(
|
|
@@ -355,12 +421,8 @@ def run_sync_streaming_inference(
|
|
|
):
|
|
):
|
|
|
"""Helper function to run the blocking sync streaming call."""
|
|
"""Helper function to run the blocking sync streaming call."""
|
|
|
start_time_total = time.time()
|
|
start_time_total = time.time()
|
|
|
- user_data.record_start_time() # Record start time for first chunk latency calculation
|
|
|
|
|
-
|
|
|
|
|
- # Establish stream
|
|
|
|
|
- sync_triton_client.start_stream(callback=functools.partial(callback, user_data))
|
|
|
|
|
|
|
+ user_data.record_start_time()
|
|
|
|
|
|
|
|
- # Send request
|
|
|
|
|
sync_triton_client.async_stream_infer(
|
|
sync_triton_client.async_stream_infer(
|
|
|
model_name,
|
|
model_name,
|
|
|
inputs,
|
|
inputs,
|
|
@@ -369,91 +431,76 @@ def run_sync_streaming_inference(
|
|
|
enable_empty_final_response=True,
|
|
enable_empty_final_response=True,
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
- # Process results
|
|
|
|
|
audios = []
|
|
audios = []
|
|
|
while True:
|
|
while True:
|
|
|
try:
|
|
try:
|
|
|
- result = user_data._completed_requests.get() # Add timeout
|
|
|
|
|
|
|
+ result = user_data._completed_requests.get(timeout=200)
|
|
|
if isinstance(result, InferenceServerException):
|
|
if isinstance(result, InferenceServerException):
|
|
|
print(f"Received InferenceServerException: {result}")
|
|
print(f"Received InferenceServerException: {result}")
|
|
|
- sync_triton_client.stop_stream()
|
|
|
|
|
- return None, None, None # Indicate error
|
|
|
|
|
- # Get response metadata
|
|
|
|
|
|
|
+ return None, None, None, None
|
|
|
response = result.get_response()
|
|
response = result.get_response()
|
|
|
final = response.parameters["triton_final_response"].bool_param
|
|
final = response.parameters["triton_final_response"].bool_param
|
|
|
if final is True:
|
|
if final is True:
|
|
|
break
|
|
break
|
|
|
|
|
|
|
|
audio_chunk = result.as_numpy("waveform").reshape(-1)
|
|
audio_chunk = result.as_numpy("waveform").reshape(-1)
|
|
|
- if audio_chunk.size > 0: # Only append non-empty chunks
|
|
|
|
|
|
|
+ if audio_chunk.size > 0:
|
|
|
audios.append(audio_chunk)
|
|
audios.append(audio_chunk)
|
|
|
else:
|
|
else:
|
|
|
print("Warning: received empty audio chunk.")
|
|
print("Warning: received empty audio chunk.")
|
|
|
|
|
|
|
|
except queue.Empty:
|
|
except queue.Empty:
|
|
|
print(f"Timeout waiting for response for request id {request_id}")
|
|
print(f"Timeout waiting for response for request id {request_id}")
|
|
|
- sync_triton_client.stop_stream()
|
|
|
|
|
- return None, None, None # Indicate error
|
|
|
|
|
|
|
+ return None, None, None, None
|
|
|
|
|
|
|
|
- sync_triton_client.stop_stream()
|
|
|
|
|
end_time_total = time.time()
|
|
end_time_total = time.time()
|
|
|
total_request_latency = end_time_total - start_time_total
|
|
total_request_latency = end_time_total - start_time_total
|
|
|
first_chunk_latency = user_data.get_first_chunk_latency()
|
|
first_chunk_latency = user_data.get_first_chunk_latency()
|
|
|
|
|
+ second_chunk_latency = user_data.get_second_chunk_latency()
|
|
|
|
|
|
|
|
- # Reconstruct audio using cross-fade (from client_grpc_streaming.py)
|
|
|
|
|
- actual_duration = 0
|
|
|
|
|
if audios:
|
|
if audios:
|
|
|
- # Only spark_tts model uses cross-fade
|
|
|
|
|
if model_name == "spark_tts":
|
|
if model_name == "spark_tts":
|
|
|
cross_fade_samples = int(chunk_overlap_duration * save_sample_rate)
|
|
cross_fade_samples = int(chunk_overlap_duration * save_sample_rate)
|
|
|
fade_out = np.linspace(1, 0, cross_fade_samples)
|
|
fade_out = np.linspace(1, 0, cross_fade_samples)
|
|
|
fade_in = np.linspace(0, 1, cross_fade_samples)
|
|
fade_in = np.linspace(0, 1, cross_fade_samples)
|
|
|
reconstructed_audio = None
|
|
reconstructed_audio = None
|
|
|
|
|
|
|
|
- # Simplified reconstruction based on client_grpc_streaming.py
|
|
|
|
|
if not audios:
|
|
if not audios:
|
|
|
print("Warning: No audio chunks received.")
|
|
print("Warning: No audio chunks received.")
|
|
|
- reconstructed_audio = np.array([], dtype=np.float32) # Empty array
|
|
|
|
|
|
|
+ reconstructed_audio = np.array([], dtype=np.float32)
|
|
|
elif len(audios) == 1:
|
|
elif len(audios) == 1:
|
|
|
reconstructed_audio = audios[0]
|
|
reconstructed_audio = audios[0]
|
|
|
else:
|
|
else:
|
|
|
- reconstructed_audio = audios[0][:-cross_fade_samples] # Start with first chunk minus overlap
|
|
|
|
|
|
|
+ reconstructed_audio = audios[0][:-cross_fade_samples]
|
|
|
for i in range(1, len(audios)):
|
|
for i in range(1, len(audios)):
|
|
|
- # Cross-fade section
|
|
|
|
|
cross_faded_overlap = (audios[i][:cross_fade_samples] * fade_in +
|
|
cross_faded_overlap = (audios[i][:cross_fade_samples] * fade_in +
|
|
|
audios[i - 1][-cross_fade_samples:] * fade_out)
|
|
audios[i - 1][-cross_fade_samples:] * fade_out)
|
|
|
- # Middle section of the current chunk
|
|
|
|
|
middle_part = audios[i][cross_fade_samples:-cross_fade_samples]
|
|
middle_part = audios[i][cross_fade_samples:-cross_fade_samples]
|
|
|
- # Concatenate
|
|
|
|
|
reconstructed_audio = np.concatenate([reconstructed_audio, cross_faded_overlap, middle_part])
|
|
reconstructed_audio = np.concatenate([reconstructed_audio, cross_faded_overlap, middle_part])
|
|
|
- # Add the last part of the final chunk
|
|
|
|
|
reconstructed_audio = np.concatenate([reconstructed_audio, audios[-1][-cross_fade_samples:]])
|
|
reconstructed_audio = np.concatenate([reconstructed_audio, audios[-1][-cross_fade_samples:]])
|
|
|
|
|
|
|
|
if reconstructed_audio is not None and reconstructed_audio.size > 0:
|
|
if reconstructed_audio is not None and reconstructed_audio.size > 0:
|
|
|
actual_duration = len(reconstructed_audio) / save_sample_rate
|
|
actual_duration = len(reconstructed_audio) / save_sample_rate
|
|
|
- # Save reconstructed audio
|
|
|
|
|
sf.write(audio_save_path, reconstructed_audio, save_sample_rate, "PCM_16")
|
|
sf.write(audio_save_path, reconstructed_audio, save_sample_rate, "PCM_16")
|
|
|
else:
|
|
else:
|
|
|
print("Warning: No audio chunks received or reconstructed.")
|
|
print("Warning: No audio chunks received or reconstructed.")
|
|
|
- actual_duration = 0 # Set duration to 0 if no audio
|
|
|
|
|
|
|
+ actual_duration = 0
|
|
|
else:
|
|
else:
|
|
|
reconstructed_audio = np.concatenate(audios)
|
|
reconstructed_audio = np.concatenate(audios)
|
|
|
- print(f"reconstructed_audio: {reconstructed_audio.shape}")
|
|
|
|
|
actual_duration = len(reconstructed_audio) / save_sample_rate
|
|
actual_duration = len(reconstructed_audio) / save_sample_rate
|
|
|
- # Save reconstructed audio
|
|
|
|
|
sf.write(audio_save_path, reconstructed_audio, save_sample_rate, "PCM_16")
|
|
sf.write(audio_save_path, reconstructed_audio, save_sample_rate, "PCM_16")
|
|
|
|
|
|
|
|
else:
|
|
else:
|
|
|
print("Warning: No audio chunks received.")
|
|
print("Warning: No audio chunks received.")
|
|
|
actual_duration = 0
|
|
actual_duration = 0
|
|
|
|
|
|
|
|
- return total_request_latency, first_chunk_latency, actual_duration
|
|
|
|
|
|
|
+ return total_request_latency, first_chunk_latency, second_chunk_latency, actual_duration
|
|
|
|
|
|
|
|
|
|
|
|
|
async def send_streaming(
|
|
async def send_streaming(
|
|
|
manifest_item_list: list,
|
|
manifest_item_list: list,
|
|
|
name: str,
|
|
name: str,
|
|
|
- server_url: str, # Changed from sync_triton_client
|
|
|
|
|
|
|
+ server_url: str,
|
|
|
protocol_client: types.ModuleType,
|
|
protocol_client: types.ModuleType,
|
|
|
log_interval: int,
|
|
log_interval: int,
|
|
|
model_name: str,
|
|
model_name: str,
|
|
@@ -466,11 +513,13 @@ async def send_streaming(
|
|
|
total_duration = 0.0
|
|
total_duration = 0.0
|
|
|
latency_data = []
|
|
latency_data = []
|
|
|
task_id = int(name[5:])
|
|
task_id = int(name[5:])
|
|
|
- sync_triton_client = None # Initialize client variable
|
|
|
|
|
|
|
+ sync_triton_client = None
|
|
|
|
|
+ user_data_map = {}
|
|
|
|
|
|
|
|
- try: # Wrap in try...finally to ensure client closing
|
|
|
|
|
|
|
+ try:
|
|
|
print(f"{name}: Initializing sync client for streaming...")
|
|
print(f"{name}: Initializing sync client for streaming...")
|
|
|
- sync_triton_client = grpcclient_sync.InferenceServerClient(url=server_url, verbose=False) # Create client here
|
|
|
|
|
|
|
+ sync_triton_client = grpcclient_sync.InferenceServerClient(url=server_url, verbose=False)
|
|
|
|
|
+ sync_triton_client.start_stream(callback=functools.partial(stream_callback, user_data_map))
|
|
|
|
|
|
|
|
print(f"{name}: Starting streaming processing for {len(manifest_item_list)} items.")
|
|
print(f"{name}: Starting streaming processing for {len(manifest_item_list)} items.")
|
|
|
for i, item in enumerate(manifest_item_list):
|
|
for i, item in enumerate(manifest_item_list):
|
|
@@ -490,12 +539,13 @@ async def send_streaming(
|
|
|
padding_duration=padding_duration,
|
|
padding_duration=padding_duration,
|
|
|
use_spk2info_cache=use_spk2info_cache
|
|
use_spk2info_cache=use_spk2info_cache
|
|
|
)
|
|
)
|
|
|
|
|
+
|
|
|
request_id = str(uuid.uuid4())
|
|
request_id = str(uuid.uuid4())
|
|
|
user_data = UserData()
|
|
user_data = UserData()
|
|
|
|
|
+ user_data_map[request_id] = user_data
|
|
|
|
|
|
|
|
audio_save_path = os.path.join(audio_save_dir, f"{item['target_audio_path']}.wav")
|
|
audio_save_path = os.path.join(audio_save_dir, f"{item['target_audio_path']}.wav")
|
|
|
-
|
|
|
|
|
- total_request_latency, first_chunk_latency, actual_duration = await asyncio.to_thread(
|
|
|
|
|
|
|
+ total_request_latency, first_chunk_latency, second_chunk_latency, actual_duration = await asyncio.to_thread(
|
|
|
run_sync_streaming_inference,
|
|
run_sync_streaming_inference,
|
|
|
sync_triton_client,
|
|
sync_triton_client,
|
|
|
model_name,
|
|
model_name,
|
|
@@ -509,12 +559,18 @@ async def send_streaming(
|
|
|
)
|
|
)
|
|
|
|
|
|
|
|
if total_request_latency is not None:
|
|
if total_request_latency is not None:
|
|
|
- print(f"{name}: Item {i} - First Chunk Latency: {first_chunk_latency:.4f}s, Total Latency: {total_request_latency:.4f}s, Duration: {actual_duration:.4f}s")
|
|
|
|
|
- latency_data.append((total_request_latency, first_chunk_latency, actual_duration))
|
|
|
|
|
|
|
+ print(
|
|
|
|
|
+ f"{name}: Item {i} - First Chunk Latency: {first_chunk_latency:.4f}s, "
|
|
|
|
|
+ f"Second Chunk Latency: {second_chunk_latency if second_chunk_latency is not None else 'N/A'}, "
|
|
|
|
|
+ f"Total Latency: {total_request_latency:.4f}s, Duration: {actual_duration:.4f}s"
|
|
|
|
|
+ )
|
|
|
|
|
+ latency_data.append((total_request_latency, first_chunk_latency, second_chunk_latency, actual_duration))
|
|
|
total_duration += actual_duration
|
|
total_duration += actual_duration
|
|
|
else:
|
|
else:
|
|
|
print(f"{name}: Item {i} failed.")
|
|
print(f"{name}: Item {i} failed.")
|
|
|
|
|
|
|
|
|
|
+ del user_data_map[request_id]
|
|
|
|
|
+
|
|
|
except FileNotFoundError:
|
|
except FileNotFoundError:
|
|
|
print(f"Error: Audio file not found for item {i}: {item['audio_filepath']}")
|
|
print(f"Error: Audio file not found for item {i}: {item['audio_filepath']}")
|
|
|
except Exception as e:
|
|
except Exception as e:
|
|
@@ -522,10 +578,11 @@ async def send_streaming(
|
|
|
import traceback
|
|
import traceback
|
|
|
traceback.print_exc()
|
|
traceback.print_exc()
|
|
|
|
|
|
|
|
- finally: # Ensure client is closed
|
|
|
|
|
|
|
+ finally:
|
|
|
if sync_triton_client:
|
|
if sync_triton_client:
|
|
|
try:
|
|
try:
|
|
|
- print(f"{name}: Closing sync client...")
|
|
|
|
|
|
|
+ print(f"{name}: Closing stream and sync client...")
|
|
|
|
|
+ sync_triton_client.stop_stream()
|
|
|
sync_triton_client.close()
|
|
sync_triton_client.close()
|
|
|
except Exception as e:
|
|
except Exception as e:
|
|
|
print(f"{name}: Error closing sync client: {e}")
|
|
print(f"{name}: Error closing sync client: {e}")
|
|
@@ -550,7 +607,6 @@ async def send(
|
|
|
latency_data = []
|
|
latency_data = []
|
|
|
task_id = int(name[5:])
|
|
task_id = int(name[5:])
|
|
|
|
|
|
|
|
- print(f"manifest_item_list: {manifest_item_list}")
|
|
|
|
|
for i, item in enumerate(manifest_item_list):
|
|
for i, item in enumerate(manifest_item_list):
|
|
|
if i % log_interval == 0:
|
|
if i % log_interval == 0:
|
|
|
print(f"{name}: {i}/{len(manifest_item_list)}")
|
|
print(f"{name}: {i}/{len(manifest_item_list)}")
|
|
@@ -591,7 +647,6 @@ def load_manifests(manifest_path):
|
|
|
assert len(line.strip().split("|")) == 4
|
|
assert len(line.strip().split("|")) == 4
|
|
|
utt, prompt_text, prompt_wav, gt_text = line.strip().split("|")
|
|
utt, prompt_text, prompt_wav, gt_text = line.strip().split("|")
|
|
|
utt = Path(utt).stem
|
|
utt = Path(utt).stem
|
|
|
- # gt_wav = os.path.join(os.path.dirname(manifest_path), "wavs", utt + ".wav")
|
|
|
|
|
if not os.path.isabs(prompt_wav):
|
|
if not os.path.isabs(prompt_wav):
|
|
|
prompt_wav = os.path.join(os.path.dirname(manifest_path), prompt_wav)
|
|
prompt_wav = os.path.join(os.path.dirname(manifest_path), prompt_wav)
|
|
|
manifest_list.append(
|
|
manifest_list.append(
|
|
@@ -632,23 +687,17 @@ async def main():
|
|
|
args = get_args()
|
|
args = get_args()
|
|
|
url = f"{args.server_addr}:{args.server_port}"
|
|
url = f"{args.server_addr}:{args.server_port}"
|
|
|
|
|
|
|
|
- # --- Client Initialization based on mode ---
|
|
|
|
|
triton_client = None
|
|
triton_client = None
|
|
|
protocol_client = None
|
|
protocol_client = None
|
|
|
if args.mode == "offline":
|
|
if args.mode == "offline":
|
|
|
print("Initializing gRPC client for offline mode...")
|
|
print("Initializing gRPC client for offline mode...")
|
|
|
- # Use the async client for offline tasks
|
|
|
|
|
triton_client = grpcclient_aio.InferenceServerClient(url=url, verbose=False)
|
|
triton_client = grpcclient_aio.InferenceServerClient(url=url, verbose=False)
|
|
|
protocol_client = grpcclient_aio
|
|
protocol_client = grpcclient_aio
|
|
|
elif args.mode == "streaming":
|
|
elif args.mode == "streaming":
|
|
|
print("Initializing gRPC client for streaming mode...")
|
|
print("Initializing gRPC client for streaming mode...")
|
|
|
- # Use the sync client for streaming tasks, handled via asyncio.to_thread
|
|
|
|
|
- # We will create one sync client instance PER TASK inside send_streaming.
|
|
|
|
|
- # triton_client = grpcclient_sync.InferenceServerClient(url=url, verbose=False) # REMOVED: Client created per task now
|
|
|
|
|
- protocol_client = grpcclient_sync # protocol client for input prep
|
|
|
|
|
|
|
+ protocol_client = grpcclient_sync
|
|
|
else:
|
|
else:
|
|
|
raise ValueError(f"Invalid mode: {args.mode}")
|
|
raise ValueError(f"Invalid mode: {args.mode}")
|
|
|
- # --- End Client Initialization ---
|
|
|
|
|
|
|
|
|
|
if args.reference_audio:
|
|
if args.reference_audio:
|
|
|
args.num_tasks = 1
|
|
args.num_tasks = 1
|
|
@@ -682,15 +731,24 @@ async def main():
|
|
|
else:
|
|
else:
|
|
|
manifest_item_list = load_manifests(args.manifest_path)
|
|
manifest_item_list = load_manifests(args.manifest_path)
|
|
|
|
|
|
|
|
|
|
+ stats_client = None
|
|
|
|
|
+ stats_before = None
|
|
|
|
|
+ try:
|
|
|
|
|
+ print("Initializing temporary async client for fetching stats...")
|
|
|
|
|
+ stats_client = grpcclient_aio.InferenceServerClient(url=url, verbose=False)
|
|
|
|
|
+ print("Fetching inference statistics before running tasks...")
|
|
|
|
|
+ stats_before = await stats_client.get_inference_statistics(model_name="", as_json=True)
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ print(f"Could not retrieve statistics before running tasks: {e}")
|
|
|
|
|
+
|
|
|
num_tasks = min(args.num_tasks, len(manifest_item_list))
|
|
num_tasks = min(args.num_tasks, len(manifest_item_list))
|
|
|
manifest_item_list = split_data(manifest_item_list, num_tasks)
|
|
manifest_item_list = split_data(manifest_item_list, num_tasks)
|
|
|
|
|
|
|
|
os.makedirs(args.log_dir, exist_ok=True)
|
|
os.makedirs(args.log_dir, exist_ok=True)
|
|
|
-
|
|
|
|
|
|
|
+ args.use_spk2info_cache = args.use_spk2info_cache == "True" or args.use_spk2info_cache == "true"
|
|
|
tasks = []
|
|
tasks = []
|
|
|
start_time = time.time()
|
|
start_time = time.time()
|
|
|
for i in range(num_tasks):
|
|
for i in range(num_tasks):
|
|
|
- # --- Task Creation based on mode ---
|
|
|
|
|
if args.mode == "offline":
|
|
if args.mode == "offline":
|
|
|
task = asyncio.create_task(
|
|
task = asyncio.create_task(
|
|
|
send(
|
|
send(
|
|
@@ -711,7 +769,7 @@ async def main():
|
|
|
send_streaming(
|
|
send_streaming(
|
|
|
manifest_item_list[i],
|
|
manifest_item_list[i],
|
|
|
name=f"task-{i}",
|
|
name=f"task-{i}",
|
|
|
- server_url=url, # Pass URL instead of client
|
|
|
|
|
|
|
+ server_url=url,
|
|
|
protocol_client=protocol_client,
|
|
protocol_client=protocol_client,
|
|
|
log_interval=args.log_interval,
|
|
log_interval=args.log_interval,
|
|
|
model_name=args.model_name,
|
|
model_name=args.model_name,
|
|
@@ -722,7 +780,6 @@ async def main():
|
|
|
use_spk2info_cache=args.use_spk2info_cache,
|
|
use_spk2info_cache=args.use_spk2info_cache,
|
|
|
)
|
|
)
|
|
|
)
|
|
)
|
|
|
- # --- End Task Creation ---
|
|
|
|
|
tasks.append(task)
|
|
tasks.append(task)
|
|
|
|
|
|
|
|
ans_list = await asyncio.gather(*tasks)
|
|
ans_list = await asyncio.gather(*tasks)
|
|
@@ -735,7 +792,7 @@ async def main():
|
|
|
for ans in ans_list:
|
|
for ans in ans_list:
|
|
|
if ans:
|
|
if ans:
|
|
|
total_duration += ans[0]
|
|
total_duration += ans[0]
|
|
|
- latency_data.extend(ans[1]) # Use extend for list of lists
|
|
|
|
|
|
|
+ latency_data.extend(ans[1])
|
|
|
else:
|
|
else:
|
|
|
print("Warning: A task returned None, possibly due to an error.")
|
|
print("Warning: A task returned None, possibly due to an error.")
|
|
|
|
|
|
|
@@ -751,10 +808,8 @@ async def main():
|
|
|
s += f"({total_duration / 3600:.2f} hours)\n"
|
|
s += f"({total_duration / 3600:.2f} hours)\n"
|
|
|
s += f"processing time: {elapsed:.3f} seconds ({elapsed / 3600:.2f} hours)\n"
|
|
s += f"processing time: {elapsed:.3f} seconds ({elapsed / 3600:.2f} hours)\n"
|
|
|
|
|
|
|
|
- # --- Statistics Reporting based on mode ---
|
|
|
|
|
if latency_data:
|
|
if latency_data:
|
|
|
if args.mode == "offline":
|
|
if args.mode == "offline":
|
|
|
- # Original offline latency calculation
|
|
|
|
|
latency_list = [chunk_end for (chunk_end, chunk_duration) in latency_data]
|
|
latency_list = [chunk_end for (chunk_end, chunk_duration) in latency_data]
|
|
|
if latency_list:
|
|
if latency_list:
|
|
|
latency_ms = sum(latency_list) / float(len(latency_list)) * 1000.0
|
|
latency_ms = sum(latency_list) / float(len(latency_list)) * 1000.0
|
|
@@ -769,9 +824,9 @@ async def main():
|
|
|
s += "No latency data collected for offline mode.\n"
|
|
s += "No latency data collected for offline mode.\n"
|
|
|
|
|
|
|
|
elif args.mode == "streaming":
|
|
elif args.mode == "streaming":
|
|
|
- # Calculate stats for total request latency and first chunk latency
|
|
|
|
|
- total_latency_list = [total for (total, first, duration) in latency_data if total is not None]
|
|
|
|
|
- first_chunk_latency_list = [first for (total, first, duration) in latency_data if first is not None]
|
|
|
|
|
|
|
+ total_latency_list = [total for (total, first, second, duration) in latency_data if total is not None]
|
|
|
|
|
+ first_chunk_latency_list = [first for (total, first, second, duration) in latency_data if first is not None]
|
|
|
|
|
+ second_chunk_latency_list = [second for (total, first, second, duration) in latency_data if second is not None]
|
|
|
|
|
|
|
|
s += "\n--- Total Request Latency ---\n"
|
|
s += "\n--- Total Request Latency ---\n"
|
|
|
if total_latency_list:
|
|
if total_latency_list:
|
|
@@ -798,9 +853,21 @@ async def main():
|
|
|
s += f"average_first_chunk_latency_ms: {avg_first_chunk_latency_ms:.2f}\n"
|
|
s += f"average_first_chunk_latency_ms: {avg_first_chunk_latency_ms:.2f}\n"
|
|
|
else:
|
|
else:
|
|
|
s += "No first chunk latency data collected (check for errors or if all requests failed before first chunk).\n"
|
|
s += "No first chunk latency data collected (check for errors or if all requests failed before first chunk).\n"
|
|
|
|
|
+
|
|
|
|
|
+ s += "\n--- Second Chunk Latency ---\n"
|
|
|
|
|
+ if second_chunk_latency_list:
|
|
|
|
|
+ avg_second_chunk_latency_ms = sum(second_chunk_latency_list) / len(second_chunk_latency_list) * 1000.0
|
|
|
|
|
+ variance_second_chunk_latency = np.var(second_chunk_latency_list, dtype=np.float64) * 1000.0
|
|
|
|
|
+ s += f"second_chunk_latency_variance: {variance_second_chunk_latency:.2f}\n"
|
|
|
|
|
+ s += f"second_chunk_latency_50_percentile_ms: {np.percentile(second_chunk_latency_list, 50) * 1000.0:.2f}\n"
|
|
|
|
|
+ s += f"second_chunk_latency_90_percentile_ms: {np.percentile(second_chunk_latency_list, 90) * 1000.0:.2f}\n"
|
|
|
|
|
+ s += f"second_chunk_latency_95_percentile_ms: {np.percentile(second_chunk_latency_list, 95) * 1000.0:.2f}\n"
|
|
|
|
|
+ s += f"second_chunk_latency_99_percentile_ms: {np.percentile(second_chunk_latency_list, 99) * 1000.0:.2f}\n"
|
|
|
|
|
+ s += f"average_second_chunk_latency_ms: {avg_second_chunk_latency_ms:.2f}\n"
|
|
|
|
|
+ else:
|
|
|
|
|
+ s += "No second chunk latency data collected (check for errors or if all requests failed before second chunk).\n"
|
|
|
else:
|
|
else:
|
|
|
s += "No latency data collected.\n"
|
|
s += "No latency data collected.\n"
|
|
|
- # --- End Statistics Reporting ---
|
|
|
|
|
|
|
|
|
|
print(s)
|
|
print(s)
|
|
|
if args.manifest_path:
|
|
if args.manifest_path:
|
|
@@ -810,26 +877,27 @@ async def main():
|
|
|
elif args.reference_audio:
|
|
elif args.reference_audio:
|
|
|
name = Path(args.reference_audio).stem
|
|
name = Path(args.reference_audio).stem
|
|
|
else:
|
|
else:
|
|
|
- name = "results" # Default name if no manifest/split/audio provided
|
|
|
|
|
|
|
+ name = "results"
|
|
|
with open(f"{args.log_dir}/rtf-{name}.txt", "w") as f:
|
|
with open(f"{args.log_dir}/rtf-{name}.txt", "w") as f:
|
|
|
f.write(s)
|
|
f.write(s)
|
|
|
|
|
|
|
|
- # --- Statistics Fetching using temporary Async Client ---
|
|
|
|
|
- # Use a separate async client for fetching stats regardless of mode
|
|
|
|
|
- stats_client = None
|
|
|
|
|
try:
|
|
try:
|
|
|
- print("Initializing temporary async client for fetching stats...")
|
|
|
|
|
- stats_client = grpcclient_aio.InferenceServerClient(url=url, verbose=False)
|
|
|
|
|
- print("Fetching inference statistics...")
|
|
|
|
|
- # Fetching for all models, filtering might be needed depending on server setup
|
|
|
|
|
- stats = await stats_client.get_inference_statistics(model_name="", as_json=True)
|
|
|
|
|
- print("Fetching model config...")
|
|
|
|
|
- metadata = await stats_client.get_model_config(model_name=args.model_name, as_json=True)
|
|
|
|
|
|
|
+ if stats_client and stats_before:
|
|
|
|
|
+ print("Fetching inference statistics after running tasks...")
|
|
|
|
|
+ stats_after = await stats_client.get_inference_statistics(model_name="", as_json=True)
|
|
|
|
|
+
|
|
|
|
|
+ print("Calculating statistics difference...")
|
|
|
|
|
+ stats = subtract_stats(stats_after, stats_before)
|
|
|
|
|
|
|
|
- write_triton_stats(stats, f"{args.log_dir}/stats_summary-{name}.txt")
|
|
|
|
|
|
|
+ print("Fetching model config...")
|
|
|
|
|
+ metadata = await stats_client.get_model_config(model_name=args.model_name, as_json=True)
|
|
|
|
|
|
|
|
- with open(f"{args.log_dir}/model_config-{name}.json", "w") as f:
|
|
|
|
|
- json.dump(metadata, f, indent=4)
|
|
|
|
|
|
|
+ write_triton_stats(stats, f"{args.log_dir}/stats_summary-{name}.txt")
|
|
|
|
|
+
|
|
|
|
|
+ with open(f"{args.log_dir}/model_config-{name}.json", "w") as f:
|
|
|
|
|
+ json.dump(metadata, f, indent=4)
|
|
|
|
|
+ else:
|
|
|
|
|
+ print("Stats client not available or initial stats were not fetched. Skipping stats reporting.")
|
|
|
|
|
|
|
|
except Exception as e:
|
|
except Exception as e:
|
|
|
print(f"Could not retrieve statistics or config: {e}")
|
|
print(f"Could not retrieve statistics or config: {e}")
|
|
@@ -840,11 +908,9 @@ async def main():
|
|
|
await stats_client.close()
|
|
await stats_client.close()
|
|
|
except Exception as e:
|
|
except Exception as e:
|
|
|
print(f"Error closing async stats client: {e}")
|
|
print(f"Error closing async stats client: {e}")
|
|
|
- # --- End Statistics Fetching ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
if __name__ == "__main__":
|
|
|
- # asyncio.run(main()) # Use TaskGroup for better exception handling if needed
|
|
|
|
|
async def run_main():
|
|
async def run_main():
|
|
|
try:
|
|
try:
|
|
|
await main()
|
|
await main()
|