nukopy commited on
Commit
3355fb8
·
1 Parent(s): 5006d20

fix: logging format

Browse files
apps/audio_cloning/cheched_vallex.py CHANGED
@@ -154,7 +154,7 @@ def infer_from_cached_prompt(
154
  timings: List[Tuple[str, float]] = []
155
  start_time = time.perf_counter()
156
  try:
157
- print("Loading cached prompt from: %s", prompt_path)
158
  prompt_data = np.load(prompt_path)
159
  audio_tokens = torch.from_numpy(prompt_data["audio_tokens"]).to(
160
  dtype=torch.long
@@ -201,7 +201,7 @@ def infer_from_cached_prompt(
201
  audio_prompts = audio_prompts.unsqueeze(0)
202
 
203
  start_time = time.perf_counter()
204
- print("Start inferring from cached prompt: %s", prompt_path)
205
  encoded_frames = vallex.model.inference(
206
  text_tokens.to(vallex.device),
207
  text_tokens_lens.to(vallex.device),
@@ -235,12 +235,12 @@ def infer_from_cached_prompt(
235
  )
236
 
237
  for step, duration in timings:
238
- print("%s%.4f sec", step, duration)
239
 
240
  timing_report = "\n↓\n".join(
241
  f"{step}:{duration:.4f} sec" for step, duration in timings
242
  )
243
- print("推論ステップ計測結果\n%s", timing_report)
244
 
245
  return message, (24000, samples.squeeze(0).cpu().numpy())
246
 
 
154
  timings: List[Tuple[str, float]] = []
155
  start_time = time.perf_counter()
156
  try:
157
+ print(f"Loading cached prompt from: {prompt_path}")
158
  prompt_data = np.load(prompt_path)
159
  audio_tokens = torch.from_numpy(prompt_data["audio_tokens"]).to(
160
  dtype=torch.long
 
201
  audio_prompts = audio_prompts.unsqueeze(0)
202
 
203
  start_time = time.perf_counter()
204
+ print(f"Start inferring from cached prompt: {prompt_path}")
205
  encoded_frames = vallex.model.inference(
206
  text_tokens.to(vallex.device),
207
  text_tokens_lens.to(vallex.device),
 
235
  )
236
 
237
  for step, duration in timings:
238
+ print(f"{step}{duration:.4f} sec")
239
 
240
  timing_report = "\n↓\n".join(
241
  f"{step}:{duration:.4f} sec" for step, duration in timings
242
  )
243
+ print(f"推論ステップ計測結果\n{timing_report}")
244
 
245
  return message, (24000, samples.squeeze(0).cpu().numpy())
246
 
apps/audio_cloning/vallex/main.py CHANGED
@@ -43,25 +43,25 @@ logger = logging.getLogger(__name__)
43
  # set base directory
44
  OUTPUT_BASE_DIR = os.getenv("HF_HOME", ".")
45
  PREPARED_BASE_DIR = "."
46
- print("Base directory: %s", OUTPUT_BASE_DIR)
47
- print("Prepared base directory: %s", PREPARED_BASE_DIR)
48
 
49
  # set languages
50
  langid.set_languages(["en", "zh", "ja"])
51
 
52
  # set nltk data path
53
  nltk.data.path = nltk.data.path + [os.path.join(os.getcwd(), "nltk_data")]
54
- print("nltk_data path: %s", nltk.data.path)
55
 
56
  # get encoding
57
  print(
58
- "default encoding is %s,file system encoding is %s",
59
- sys.getdefaultencoding(),
60
- sys.getfilesystemencoding(),
61
  )
62
 
63
  # check python version
64
- print("You are using Python version %s", platform.python_version())
65
  if sys.version_info[0] < 3 or sys.version_info[1] < 7:
66
  logger.warning("The Python version is too low and may cause problems")
67
  if platform.system().lower() == "windows":
@@ -74,7 +74,7 @@ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
74
 
75
  # set torch threads (guarded for hot-reload)
76
  thread_count = multiprocessing.cpu_count()
77
- print("Use %d cpu cores for computing", thread_count)
78
  if not getattr(torch, "_vallex_threads_configured", False):
79
  torch.set_num_threads(thread_count)
80
  try:
@@ -105,7 +105,7 @@ if torch.cuda.is_available():
105
  device = torch.device("cuda", 0)
106
  # if torch.backends.mps.is_available():
107
  # device = torch.device("mps")
108
- print("Device set to %s", device)
109
 
110
  # Download VALL-E-X model weights if not exists
111
  OUTPUT_DIR_CHECKPOINTS = os.path.join(OUTPUT_BASE_DIR, "models/checkpoints")
@@ -436,12 +436,12 @@ def infer_from_audio(
436
  timings.append(("ボコーダ復号", time.perf_counter() - start_time))
437
 
438
  for step, duration in timings:
439
- print("%s%.4f sec", step, duration)
440
 
441
  timing_report = "\n↓\n".join(
442
  f"{step}:{duration:.4f} sec" for step, duration in timings
443
  )
444
- print("推論ステップ計測結果\n%s", timing_report)
445
 
446
  message = f"text prompt: {text_pr}\nsythesized text: {text}"
447
  return message, (24000, samples.squeeze(0).cpu().numpy())
 
43
  # set base directory
44
  OUTPUT_BASE_DIR = os.getenv("HF_HOME", ".")
45
  PREPARED_BASE_DIR = "."
46
+ print(f"Base directory: {OUTPUT_BASE_DIR}")
47
+ print(f"Prepared base directory: {PREPARED_BASE_DIR}")
48
 
49
  # set languages
50
  langid.set_languages(["en", "zh", "ja"])
51
 
52
  # set nltk data path
53
  nltk.data.path = nltk.data.path + [os.path.join(os.getcwd(), "nltk_data")]
54
+ print(f"nltk_data path: {nltk.data.path}")
55
 
56
  # get encoding
57
  print(
58
+ "default encoding is "
59
+ f"{sys.getdefaultencoding()},"
60
+ f"file system encoding is {sys.getfilesystemencoding()}"
61
  )
62
 
63
  # check python version
64
+ print(f"You are using Python version {platform.python_version()}")
65
  if sys.version_info[0] < 3 or sys.version_info[1] < 7:
66
  logger.warning("The Python version is too low and may cause problems")
67
  if platform.system().lower() == "windows":
 
74
 
75
  # set torch threads (guarded for hot-reload)
76
  thread_count = multiprocessing.cpu_count()
77
+ print(f"Use {thread_count} cpu cores for computing")
78
  if not getattr(torch, "_vallex_threads_configured", False):
79
  torch.set_num_threads(thread_count)
80
  try:
 
105
  device = torch.device("cuda", 0)
106
  # if torch.backends.mps.is_available():
107
  # device = torch.device("mps")
108
+ print(f"Device set to {device}")
109
 
110
  # Download VALL-E-X model weights if not exists
111
  OUTPUT_DIR_CHECKPOINTS = os.path.join(OUTPUT_BASE_DIR, "models/checkpoints")
 
436
  timings.append(("ボコーダ復号", time.perf_counter() - start_time))
437
 
438
  for step, duration in timings:
439
+ print(f"{step}{duration:.4f} sec")
440
 
441
  timing_report = "\n↓\n".join(
442
  f"{step}:{duration:.4f} sec" for step, duration in timings
443
  )
444
+ print(f"推論ステップ計測結果\n{timing_report}")
445
 
446
  message = f"text prompt: {text_pr}\nsythesized text: {text}"
447
  return message, (24000, samples.squeeze(0).cpu().numpy())