zh-liu799 commited on
Commit
acc5099
·
verified ·
1 Parent(s): dc103c1

Delete check_2.py

Browse files
Files changed (1) hide show
  1. check_2.py +0 -100
check_2.py DELETED
@@ -1,100 +0,0 @@
1
- import argparse# run.py
2
- import os
3
- import sys
4
- import json
5
- import torch
6
- from tqdm import tqdm
7
- import numpy as np
8
- import re
9
- import google.genai as genai
10
-
11
- def count_lengths(input_lengths):
12
- input_lengths = (input_lengths - 1) // 2 + 1
13
- output_lengths = (input_lengths - 2) // 2 + 1
14
- return input_lengths, output_lengths
15
-
16
- def load_audio_qwenomni(audio_path):
17
- audio = whisper.load_audio(audio_path, sr=16000).tolist()
18
- if len(audio) % 160 != 0:
19
- audio += [0] * (160 - len(audio) % 160)
20
- audio = np.array(audio, dtype=np.float32)
21
- mel = whisper.log_mel_spectrogram(audio, n_mels=128)
22
- len_feature = mel.shape[1]
23
- input_len, output_len = count_lengths(len_feature)
24
-
25
- audio_info = {
26
- "audio_feats": mel,
27
- "lens_audio_feats": [len_feature],
28
- "input_lens": [input_len],
29
- "output_lens": [output_len]
30
- }
31
- return audio_info
32
-
33
- whisper = __import__("whisper")
34
-
35
- import warnings
36
- # 忽略特定类型的警告,这里针对 UserWarning
37
- warnings.filterwarnings("ignore", category=UserWarning)
38
-
39
-
40
- from transformers import AutoConfig, Qwen2_5OmniForConditionalGeneration
41
-
42
-
43
- def load_audio(audio_path, audio_encoder, device):
44
- with torch.no_grad():
45
- audio_info = load_audio_qwenomni(audio_path)
46
- audio_feats = torch.tensor(audio_info["audio_feats"]).to(device)
47
- input_feat_lens = torch.tensor(audio_info["lens_audio_feats"]).to(device)
48
- input_lens = torch.tensor(audio_info["input_lens"]).to(device)
49
- audio_feat = audio_encoder(audio_feats, input_feat_lens, input_lens).last_hidden_state
50
-
51
- if audio_feat.size(1) != 1280:
52
- raise ValueError("audio_feat size is not 1280")
53
-
54
- return audio_feat
55
-
56
- def process_jsonl(jsonl_path, device, audio_encoder):
57
- new_feat_savepath = os.path.join(os.path.dirname(jsonl_path), 'audio_feats')
58
- if not os.path.exists(new_feat_savepath):
59
- os.makedirs(new_feat_savepath)
60
-
61
- bad_case_file = jsonl_path.replace('.jsonl', '_bad_case.jsonl')
62
-
63
- print(f"Processing {jsonl_path} on GPU {device}")
64
- with open(jsonl_path, "r") as f:
65
- lines = f.readlines()
66
- for line in tqdm(lines):
67
- try:
68
- data_item = json.loads(line)
69
- old_feats_path = data_item["audio_feat_path"]
70
- old_wav_path = os.path.join('/mnt/home/xiezhifei/projects/data_machine3', old_feats_path).replace('/audio_feats/', '/audio/').replace('.pt', '.wav')
71
- file_name = os.path.basename(old_feats_path)
72
- audio_feat = load_audio(old_wav_path, audio_encoder, device)
73
- torch.save(audio_feat, os.path.join(new_feat_savepath, file_name))
74
- except Exception as e:
75
- line['error'] = e
76
- with open(bad_case_file, 'a') as f:
77
- f.write(json.dumps(line, ensure_ascii=False) + '\n')
78
-
79
- BASE_FOLDER = "/mnt/home/xiezhifei/projects/data_machine3/tts_data_0720_machine3_tobe_compressed"
80
-
81
- def main():
82
- parser = argparse.ArgumentParser()
83
- parser.add_argument('--split_id', type=str, required=True)
84
- parser.add_argument('--gpu_id', type=int, required=True)
85
- args = parser.parse_args()
86
-
87
- device = f"cuda:{args.gpu_id}"
88
-
89
- qwen_omni_config = AutoConfig.from_pretrained("/mnt/home/xiezhifei/projects/zh/checkpoint/stage1_paskale2e/qwen2.5-omni-3B")
90
- audio_encoder = Qwen2_5OmniForConditionalGeneration._from_config(qwen_omni_config).thinker.audio_tower
91
- audio_encoder.load_state_dict(torch.load("/mnt/home/xiezhifei/projects/zh/download/datasets--zh-liu799--passsss/snapshots/cac004a84c5bcb68fe9b4136c5b6f191159998c0/audio_tower_adapterfinetuned_0615.pth", map_location=device))
92
- audio_encoder.requires_grad_(False).eval
93
-
94
- audio_encoder.to(device)
95
- from glob import glob
96
- jsonl_path = glob(f"{BASE_FOLDER}/tts_{args.split_id}/*.jsonl")[0]
97
- process_jsonl(jsonl_path, device, audio_encoder)
98
-
99
- if __name__ == "__main__":
100
- main()