ddwang2000 commited on
Commit
45a46ca
·
verified ·
1 Parent(s): a5d8cca

Upload code/mmsu_inference.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. code/mmsu_inference.py +80 -0
code/mmsu_inference.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import json
4
+ from tqdm import tqdm
5
+ import shutil
6
+
7
+
8
+ def main():
9
+ # Parse command-line arguments
10
+ parser = argparse.ArgumentParser(description="Easy Inference for your model.")
11
+ parser.add_argument('--input_jsonl', type=str, required=True, help="Path to the input JSONL file")
12
+ parser.add_argument('--output_jsonl', type=str, required=True, help="Path to the output JSONL file")
13
+ args = parser.parse_args()
14
+
15
+ input_file = args.input_jsonl
16
+ output_file = args.output_jsonl
17
+
18
+
19
+ #Step1: Bulid your model; Implement according to your own model
20
+ #model = model.cuda()
21
+ #model.eval()
22
+
23
+ #Step2: Single step inference
24
+ with open(input_file, "r") as fin, open(output_file, "w") as fout:
25
+ fin = json.load(fin)
26
+ for item in tqdm(fin):
27
+ audio_path = item['audio_path']
28
+ task_name = item['task_name']
29
+ if os.path.exists(audio_path) == False:
30
+ print(f"lack wav {wav_fn}")
31
+ continue
32
+
33
+
34
+ #Construct prompt
35
+ question = item['question']
36
+ question_prompts = 'Choose the most suitable answer from options A, B, C, and D to respond the question in next line, **you should only choose A or B or C or D.** Do not provide any additional explanations or content.'
37
+ choice_a = item['choice_a']
38
+ choice_b = item['choice_b']
39
+ choice_c = item.get('choice_c', None)
40
+ choice_d = item.get('choice_d', None)
41
+ choices = f'A. {choice_a}\nB. {choice_b}\nC. {choice_c}\nD. {choice_d}'
42
+ instruction = f"{question_prompts}\n\nQuestion: {question}\n\n{choices}"
43
+
44
+
45
+ #Step 3: Run model inference
46
+ #output = model.infer(
47
+ # Prompts=instruction,
48
+ # Audio_path=audio_path,
49
+ # ...
50
+ #)
51
+
52
+ output = "Model response here" # Placeholder response
53
+
54
+ #Step 4: save result
55
+ json_string = json.dumps(
56
+ {
57
+ "id": item["id"]
58
+ "audio_path": item["audio_path"],
59
+ "question": question,
60
+ "choice_a": choice_a,
61
+ "choice_b": choice_b,
62
+ "choice_c": choice_c,
63
+ "choice_d": choice_d,
64
+ "answer_gt": item["answer_gt"],
65
+ "response": output,
66
+ "task_name": task_name,
67
+ "category": item["category"],
68
+ "sub-category": item["category"],
69
+ "sub-sub-category": item["sub-sub-category"],
70
+ "linguistics_sub_discipline": item["linguistics_sub_discipline"],
71
+ },
72
+ ensure_ascii=False
73
+ )
74
+ fout.write(json_string + "\n")
75
+
76
+
77
+ if __name__ == "__main__":
78
+ # bash:
79
+ # python mmsu_inference.py --input_jsonl /path/to/input.jsonl --output_jsonl /path/to/output.jsonl
80
+ main()