TJIET commited on
Commit
15c9c1c
·
verified ·
1 Parent(s): 8c139ac

Delete evaluate_1_shot.py.py

Browse files
Files changed (1) hide show
  1. evaluate_1_shot.py.py +0 -136
evaluate_1_shot.py.py DELETED
@@ -1,136 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
-
4
- import os, gc, torch
5
- from tqdm import tqdm
6
- from transformers import AutoModelForCausalLM
7
- from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
8
- from deepseek_vl.utils.io import load_pil_images
9
-
10
-
11
- chinese_to_english = {
12
- 'strawberry', 'tomato', 'guava', 'dragon fruit',
13
- 'orange', 'pear', 'lychee', 'mango',
14
- 'kiwi', 'papaya', 'apple', 'grape',
15
- 'pomegranate','peach', 'banana', 'pomelo'
16
- }
17
-
18
- stage_to_english = {
19
- 'unripe', 'mature', 'pest-damaged', 'rotten'
20
- }
21
-
22
- recommendation_map = {
23
- 'unripe': 'keep for further growth',
24
- 'mature': 'picking it',
25
- 'pest-damaged': 'try to recover it',
26
- 'rotten': 'discard it'
27
- }
28
-
29
- score_map = {
30
- 'unripe': 30, 'mature': 85, 'pest-damaged': 20, 'rotten': 5
31
- }
32
-
33
-
34
- model_path = "deepseek-ai/deepseek-vl-7b-chat"
35
- root_folder = "../data/" #
36
- output_root = "result_1shot"
37
- os.makedirs(output_root, exist_ok=True)
38
-
39
- print("🚀 Loading DeepSeek‑VL‑7B ...")
40
- vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
41
- tokenizer = vl_chat_processor.tokenizer
42
- vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
43
- model_path, cache_dir=".", trust_remote_code=True
44
- ).to(torch.bfloat16).cuda().eval()
45
-
46
-
47
-
48
- question = (
49
- "1. Identify the type of fruit or crop shown in the image. \n"
50
- "2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) \n"
51
- "3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) \n"
52
- "4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).\n"
53
- "Please respond in the following format:\n"
54
- "Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
55
- "Recommendation: [keep for further growth / pick it /try to recover it / discard it] Consumer Score: [1-100]"
56
- )
57
-
58
-
59
-
60
- def build_stage_example(fruit_dir: str, fruit_cn: str, stage_cn: str):
61
- stage_path = os.path.join(fruit_dir, stage_cn)
62
- imgs = sorted([f for f in os.listdir(stage_path)
63
- if f.lower().endswith((".png", ".jpg", ".jpeg", ".bmp"))])
64
- if not imgs:
65
- return []
66
-
67
- img_path = os.path.join(stage_path, imgs[0]) #
68
-
69
- fruit_en = chinese_to_english.get(fruit_cn, fruit_cn)
70
- stage_en = stage_to_english.get(stage_cn, "mature")
71
- assistant_reply = (
72
- f"Type: {fruit_en} Growth Stage: {stage_en} "
73
- f"Recommendation: {recommendation_map[stage_en]} "
74
- f"Consumer Score: {score_map[stage_en]}"
75
- )
76
-
77
-
78
- return [
79
- {"role": "User", "content": "<image_placeholder>\n" + question, "images": [img_path]},
80
- {"role": "Assistant", "content": assistant_reply},
81
- ]
82
-
83
-
84
- for fruit_cn in os.listdir(root_folder):
85
- fruit_path = os.path.join(root_folder, fruit_cn)
86
- if not os.path.isdir(fruit_path):
87
- continue
88
-
89
- for stage_cn in os.listdir(fruit_path):
90
- stage_path = os.path.join(fruit_path, stage_cn)
91
- if not os.path.isdir(stage_path):
92
- continue
93
-
94
- img_files = sorted([f for f in os.listdir(stage_path)
95
- if f.lower().endswith((".png", ".jpg", ".jpeg", ".bmp"))])
96
-
97
-
98
- example_msgs = build_stage_example(fruit_path, fruit_cn, stage_cn)
99
- if not example_msgs:
100
- continue
101
-
102
- output_file = os.path.join(output_root, f"{fruit_cn}_{stage_cn}.txt")
103
- with open(output_file, "w", encoding="utf-8") as fout:
104
- for img_name in tqdm(img_files, desc=f"{fruit_cn}/{stage_cn}"):
105
- img_path = os.path.join(stage_path, img_name)
106
- if img_path in example_msgs[0]["images"]:
107
- continue
108
-
109
- conversation = example_msgs + [
110
- {"role": "User", "content": "<image_placeholder>\n" + question, "images": [img_path]},
111
- {"role": "Assistant", "content": ""},
112
- ]
113
- try:
114
- with torch.no_grad():
115
- pil_imgs = load_pil_images(conversation)
116
- prep_in = vl_chat_processor(conversations=conversation, images=pil_imgs, force_batchify=True).to(vl_gpt.device)
117
- embeds = vl_gpt.prepare_inputs_embeds(**prep_in)
118
- out_ids = vl_gpt.language_model.generate(inputs_embeds=embeds, attention_mask=prep_in.attention_mask,
119
- pad_token_id=tokenizer.eos_token_id,
120
- bos_token_id=tokenizer.bos_token_id,
121
- eos_token_id=tokenizer.eos_token_id,
122
- max_new_tokens=256, do_sample=False, use_cache=True)
123
- answer = tokenizer.decode(out_ids[0], skip_special_tokens=True).strip()
124
-
125
- print(f"✅ {img_name} → {answer.splitlines()[0]}")
126
- fout.write("="*25 + " IMAGE START " + "="*25 + "\n")
127
- fout.write(f"🖼️ Image Name: {img_name}\n")
128
- fout.write(f"📝 Answer:\n{answer}\n")
129
- fout.write("="*25 + " IMAGE END " + "="*25 + "\n\n")
130
- except Exception as e:
131
- print(f"[ERROR] {fruit_cn}/{stage_cn}/{img_name}: {e}")
132
-
133
- finally:
134
- torch.cuda.empty_cache()
135
- gc.collect()
136
-