Repoaner commited on
Commit
0c2371a
·
verified ·
1 Parent(s): b3341e7

Upload LLaVA-Next-3D/data_precessing/llm_analyze.py with huggingface_hub

Browse files
LLaVA-Next-3D/data_precessing/llm_analyze.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+ import json
3
+ from tqdm import tqdm
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ import argparse
6
+ import os
7
+ import re
8
+
9
+ def process_data(split):
10
+ model = AutoModelForCausalLM.from_pretrained("/mnt/petrelfs/wangzehan/cache/DeepSeek-R1-Distill-Qwen-14B", device_map="auto", torch_dtype="auto")
11
+ tokenizer = AutoTokenizer.from_pretrained("/mnt/petrelfs/wangzehan/cache/DeepSeek-R1-Distill-Qwen-14B")
12
+
13
+ with open('extra_data/annotation/scanrefer_vg_train_llava_style.json', 'r') as f:
14
+ all_data = json.load(f)
15
+
16
+ data = all_data[split*3667:(split+1)*3667]
17
+
18
+ if os.path.exists(f'extra_data/annotation/scanrefer_extra_info_{split}.jsonl'):
19
+ with open(f'extra_data/annotation/scanrefer_extra_info_{split}.jsonl', 'r') as f_extra_info:
20
+ count = sum(1 for _ in f_extra_info)
21
+ data = data[count:]
22
+
23
+ prompt = '''Suppose you are locating one target object described by a sentence from a video. Please analyze what the target object is, and simplify the core information that help locating the object.
24
+ Sentence: <SENTENCE>.
25
+ Only output json format as: {target_obj: "", core_info: "1.It is XXX, 2. It is XXX"}'''
26
+
27
+ for item in tqdm(data):
28
+ sentence = item['conversations'][0]['value'].split('\n')[-1]
29
+
30
+ messages = [
31
+ {"role": "user", "content": prompt.replace("<SENTENCE>", sentence)},
32
+ ]
33
+
34
+ input_ids = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
35
+ output_ids = model.generate(input_ids,
36
+ max_new_tokens=4096,
37
+ temperature=0.5)
38
+
39
+ content = tokenizer.decode(output_ids[0])
40
+ match = re.search(r'```json\n(.*?)\n```', content, re.DOTALL)
41
+ if match:
42
+ match_results = match.group(1)
43
+ match_results = json.loads(match_results)
44
+ item['extra_info'] = match_results
45
+ else:
46
+ item['extra_info'] = None
47
+
48
+ with open(f'extra_data/annotation/scanrefer_extra_info_{split}.jsonl', 'a') as f_extra_info:
49
+ f_extra_info.write(json.dumps(item) + '\n')
50
+
51
+ if __name__ == "__main__":
52
+ parser = argparse.ArgumentParser()
53
+ parser.add_argument('--split', type=int, default=0)
54
+ args = parser.parse_args()
55
+ process_data(args.split)