TJIET commited on
Commit
8c139ac
·
verified ·
1 Parent(s): 09b2d34

Delete evaluate_0_shot.py.py

Browse files
Files changed (1) hide show
  1. evaluate_0_shot.py.py +0 -110
evaluate_0_shot.py.py DELETED
@@ -1,110 +0,0 @@
1
- import os
2
- import torch
3
- from tqdm import tqdm
4
- from transformers import AutoModelForCausalLM
5
- from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
6
- from deepseek_vl.utils.io import load_pil_images
7
- from PIL import Image
8
-
9
-
10
- model_path = "deepseek-ai/deepseek-vl-7b-chat"
11
- vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
12
- tokenizer = vl_chat_processor.tokenizer
13
-
14
- vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
15
- model_path,
16
- cache_dir=".",
17
- trust_remote_code=True
18
- )
19
- vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
20
-
21
-
22
- question = (
23
- "1. Identify the type of fruit or crop shown in the image. "
24
- "2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) "
25
- "3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) "
26
- "4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely). "
27
- "Please respond in the following format:\n"
28
- "Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
29
- "Recommendation: [keep for further growth / try to recover it /picking it/ discard it] Consumer Score: [1-100]"
30
- )
31
-
32
-
33
- root_folder = "../data/"
34
- output_root = "result"
35
- os.makedirs(output_root, exist_ok=True)
36
-
37
-
38
- for fruit in os.listdir(root_folder):
39
- fruit_path = os.path.join(root_folder, fruit)
40
- if not os.path.isdir(fruit_path):
41
- continue
42
-
43
- for subfolder in os.listdir(fruit_path):
44
- subfolder_path = os.path.join(fruit_path, subfolder)
45
- if not os.path.isdir(subfolder_path):
46
- continue
47
-
48
- image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))]
49
- if not image_files:
50
- continue
51
-
52
- output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
53
-
54
-
55
- with open(output_file, "w", encoding="utf-8") as fout:
56
- for image_file in tqdm(image_files, desc=f"{fruit}/{subfolder}"):
57
- image_path = os.path.join(subfolder_path, image_file)
58
-
59
- try:
60
-
61
- conversation = [
62
- {
63
- "role": "User",
64
- "content": "<image_placeholder>" + question,
65
- "images": [image_path],
66
- },
67
- {"role": "Assistant", "content": ""}
68
- ]
69
-
70
-
71
- pil_images = load_pil_images(conversation)
72
-
73
-
74
- prepare_inputs = vl_chat_processor(
75
- conversations=conversation,
76
- images=pil_images,
77
- force_batchify=True
78
- ).to(vl_gpt.device)
79
-
80
- inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
81
-
82
-
83
- outputs = vl_gpt.language_model.generate(
84
- inputs_embeds=inputs_embeds,
85
- attention_mask=prepare_inputs.attention_mask,
86
- pad_token_id=tokenizer.eos_token_id,
87
- bos_token_id=tokenizer.bos_token_id,
88
- eos_token_id=tokenizer.eos_token_id,
89
- max_new_tokens=512,
90
- do_sample=False,
91
- use_cache=True
92
- )
93
-
94
- answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True).strip()
95
-
96
-
97
- fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
98
- fout.write(f"🖼️ Image Name: {image_file}\n")
99
- fout.write(f"📝 Answer:\n{answer}\n")
100
- fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
101
- print(f"✅ {image_file} => {answer.splitlines()[0]}")
102
-
103
- except Exception as e:
104
- print(f"[ERROR] {fruit}/{subfolder}/{image_file}: {e}")
105
- fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
106
- fout.write(f"🖼️ Image Name: {image_file}\n")
107
- fout.write(f"❌ ERROR: {e}\n")
108
- fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
109
-
110
-