prepare_data.py 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. # Copyright 2024 Bytedance Ltd. and/or its affiliates
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Preprocess the Text to Speech dataset to parquet format
  16. """
  17. import argparse
  18. import os
  19. import re
  20. import datasets
  21. from verl.utils.hdfs_io import copy, makedirs
  22. if __name__ == "__main__":
  23. parser = argparse.ArgumentParser()
  24. parser.add_argument("--train_file", required=True, help="Path to training JSON/JSONL file")
  25. parser.add_argument("--test_file", required=True, help="Path to test JSON/JSONL file")
  26. parser.add_argument("--local_dir", default=None, required=True)
  27. parser.add_argument("--hdfs_dir", default=None)
  28. args = parser.parse_args()
  29. # Load datasets from local JSON files
  30. train_dataset = datasets.load_dataset("json", data_files=args.train_file)['train']
  31. test_dataset = datasets.load_dataset("json", data_files=args.test_file)['train']
  32. # add a row to each data item that represents a unique id
  33. def make_map_fn(split):
  34. def process_fn(example, idx):
  35. text = example.pop("text")
  36. # use cosyvoice2 official huggingface compatible checkpoint template
  37. question = text
  38. answer = ""
  39. data = {
  40. "data_source": f"{args.train_file}_{args.test_file}", # Use file names as data source
  41. "prompt": [
  42. {
  43. "role": "user",
  44. "content": question,
  45. },
  46. {
  47. "role": "assistant",
  48. "content": answer,
  49. },
  50. ],
  51. "ability": "text-to-speech",
  52. "reward_model": {"style": "rule", "ground_truth": text},
  53. "extra_info": {
  54. "split": split,
  55. "index": idx,
  56. "text": text,
  57. },
  58. }
  59. return data
  60. return process_fn
  61. train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True)
  62. test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True)
  63. local_dir = args.local_dir
  64. hdfs_dir = args.hdfs_dir
  65. print(train_dataset)
  66. print(test_dataset)
  67. train_dataset.to_parquet(os.path.join(local_dir, "train.parquet"))
  68. test_dataset.to_parquet(os.path.join(local_dir, "test.parquet"))
  69. if hdfs_dir is not None:
  70. makedirs(hdfs_dir)
  71. copy(src=local_dir, dst=hdfs_dir)