Datasets:
Upload 18 files
Browse files- script/CogVLM2-0-shot.py +99 -0
- script/DeepSeekVL-chat-0-shot.py +111 -0
- script/DeepSeekVL-chat-1-shot.py +135 -0
- script/DeepSeekVL2-0-shot.py.py +107 -0
- script/DeepSeekVL2-1-shot.py +151 -0
- script/InterVL2.5-4B-0-shot.py +135 -0
- script/InterVL2.5-8B-0-shot.py +142 -0
- script/Janus-Pro-1B-0-shot.py +98 -0
- script/Janus-Pro-7B-0-shot.py +98 -0
- script/Janus-Pro-7B-1-shot.py +181 -0
- script/Mantis-8B-Idefics-0-shot.py +93 -0
- script/Mantis-8B-siglip-0-shot.py +77 -0
- script/Mantis-8B-siglip-1-shot.py +120 -0
- script/MiniCPM-0-2.6-8B-0-shot.py +83 -0
- script/Qwen2.5-VL-3B-0-shot.py +97 -0
- script/Qwen2.5-VL-7B-0-shot.py +95 -0
- script/Qwen2.5-VL-7B-1-shot.py +122 -0
- script/mPLUG-Owl3-7B-0-shot.py +97 -0
script/CogVLM2-0-shot.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from modelscope import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
+
|
| 6 |
+
MODEL_PATH = "./cogvlm2-llama3-chat-19B-int4"
|
| 7 |
+
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 8 |
+
TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
|
| 12 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 13 |
+
MODEL_PATH,
|
| 14 |
+
torch_dtype=TORCH_TYPE,
|
| 15 |
+
trust_remote_code=True,
|
| 16 |
+
low_cpu_mem_usage=True,
|
| 17 |
+
).eval()
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
question = (
|
| 21 |
+
''' "1. Identify the type of fruit or crop shown in the image. "
|
| 22 |
+
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) "
|
| 23 |
+
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, picking it, discard it) "
|
| 24 |
+
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely). "
|
| 25 |
+
"Please respond in the following format:\n"
|
| 26 |
+
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
|
| 27 |
+
"Recommendation: [keep for further growth / try to recover it / picking it / discard it] Consumer Score: [1-100]'''
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
root_folder = "../data"
|
| 32 |
+
output_root = "result"
|
| 33 |
+
os.makedirs(output_root, exist_ok=True)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
for fruit in os.listdir(root_folder):
|
| 37 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 38 |
+
if not os.path.isdir(fruit_path):
|
| 39 |
+
continue
|
| 40 |
+
|
| 41 |
+
for subfolder in os.listdir(fruit_path):
|
| 42 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 43 |
+
if not os.path.isdir(subfolder_path):
|
| 44 |
+
continue
|
| 45 |
+
|
| 46 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp', '.webp'))]
|
| 47 |
+
if not image_files:
|
| 48 |
+
continue
|
| 49 |
+
|
| 50 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
with open(output_file, "w", encoding="utf-8") as out_file:
|
| 54 |
+
for filename in image_files:
|
| 55 |
+
image_path = os.path.join(subfolder_path, filename)
|
| 56 |
+
try:
|
| 57 |
+
print(f"🖼️ Processing {filename}...")
|
| 58 |
+
|
| 59 |
+
image = Image.open(image_path).convert('RGB')
|
| 60 |
+
history = []
|
| 61 |
+
|
| 62 |
+
input_by_model = model.build_conversation_input_ids(
|
| 63 |
+
tokenizer,
|
| 64 |
+
query=question,
|
| 65 |
+
history=history,
|
| 66 |
+
images=[image],
|
| 67 |
+
template_version='chat'
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
inputs = {
|
| 71 |
+
'input_ids': input_by_model['input_ids'].unsqueeze(0).to(DEVICE),
|
| 72 |
+
'token_type_ids': input_by_model['token_type_ids'].unsqueeze(0).to(DEVICE),
|
| 73 |
+
'attention_mask': input_by_model['attention_mask'].unsqueeze(0).to(DEVICE),
|
| 74 |
+
'images': [[input_by_model['images'][0].to(DEVICE).to(TORCH_TYPE)]],
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
gen_kwargs = {
|
| 78 |
+
"max_new_tokens": 2048,
|
| 79 |
+
"pad_token_id": 128002,
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
with torch.no_grad():
|
| 83 |
+
outputs = model.generate(**inputs, **gen_kwargs)
|
| 84 |
+
outputs = outputs[:, inputs['input_ids'].shape[1]:]
|
| 85 |
+
response = tokenizer.decode(outputs[0])
|
| 86 |
+
response = response.split("<|end_of_text|>")[0].strip()
|
| 87 |
+
|
| 88 |
+
print(f"✅ Response: {response}\n")
|
| 89 |
+
out_file.write(f"{'='*25} IMAGE START {'='*25}\n")
|
| 90 |
+
out_file.write(f"🖼️ Image Name: {filename}\n")
|
| 91 |
+
out_file.write(f"📝 Answer:\n{response}\n")
|
| 92 |
+
out_file.write(f"{'='*25} IMAGE END {'='*25}\n\n")
|
| 93 |
+
|
| 94 |
+
except Exception as e:
|
| 95 |
+
print(f"[ERROR] {fruit}/{subfolder}/{filename}: {e}")
|
| 96 |
+
out_file.write(f"{'='*25} IMAGE START {'='*25}\n")
|
| 97 |
+
out_file.write(f"🖼️ Image Name: {filename}\n")
|
| 98 |
+
out_file.write(f"❌ ERROR: {e}\n")
|
| 99 |
+
out_file.write(f"{'='*25} IMAGE END {'='*25}\n\n")
|
script/DeepSeekVL-chat-0-shot.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
from transformers import AutoModelForCausalLM
|
| 5 |
+
from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
|
| 6 |
+
from deepseek_vl.utils.io import load_pil_images
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
model_path = "deepseek-ai/deepseek-vl-7b-chat"
|
| 11 |
+
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
|
| 12 |
+
tokenizer = vl_chat_processor.tokenizer
|
| 13 |
+
|
| 14 |
+
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
|
| 15 |
+
model_path,
|
| 16 |
+
cache_dir=".",
|
| 17 |
+
trust_remote_code=True
|
| 18 |
+
)
|
| 19 |
+
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
question = (
|
| 23 |
+
"1. Identify the type of fruit or crop shown in the image. "
|
| 24 |
+
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) "
|
| 25 |
+
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) "
|
| 26 |
+
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely). "
|
| 27 |
+
"Please respond in the following format:\n"
|
| 28 |
+
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
|
| 29 |
+
"Recommendation: [keep for further growth / try to recover it /picking it/ discard it] Consumer Score: [1-100]"
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
root_folder = "../data/"
|
| 34 |
+
output_root = "result"
|
| 35 |
+
os.makedirs(output_root, exist_ok=True)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
for fruit in os.listdir(root_folder):
|
| 39 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 40 |
+
if not os.path.isdir(fruit_path):
|
| 41 |
+
continue
|
| 42 |
+
|
| 43 |
+
for subfolder in os.listdir(fruit_path):
|
| 44 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 45 |
+
if not os.path.isdir(subfolder_path):
|
| 46 |
+
continue
|
| 47 |
+
|
| 48 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))]
|
| 49 |
+
if not image_files:
|
| 50 |
+
continue
|
| 51 |
+
|
| 52 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 56 |
+
for image_file in tqdm(image_files, desc=f"{fruit}/{subfolder}"):
|
| 57 |
+
image_path = os.path.join(subfolder_path, image_file)
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
|
| 61 |
+
conversation = [
|
| 62 |
+
{
|
| 63 |
+
"role": "User",
|
| 64 |
+
"content": "<image_placeholder>" + question,
|
| 65 |
+
"images": [image_path],
|
| 66 |
+
},
|
| 67 |
+
{"role": "Assistant", "content": ""}
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
pil_images = load_pil_images(conversation)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
prepare_inputs = vl_chat_processor(
|
| 75 |
+
conversations=conversation,
|
| 76 |
+
images=pil_images,
|
| 77 |
+
force_batchify=True
|
| 78 |
+
).to(vl_gpt.device)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
outputs = vl_gpt.language_model.generate(
|
| 85 |
+
inputs_embeds=inputs_embeds,
|
| 86 |
+
attention_mask=prepare_inputs.attention_mask,
|
| 87 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 88 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 89 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 90 |
+
max_new_tokens=512,
|
| 91 |
+
do_sample=False,
|
| 92 |
+
use_cache=True
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True).strip()
|
| 97 |
+
|
| 98 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 99 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 100 |
+
fout.write(f"📝 Answer:\n{answer}\n")
|
| 101 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 102 |
+
print(f"✅ {image_file} => {answer.splitlines()[0]}")
|
| 103 |
+
|
| 104 |
+
except Exception as e:
|
| 105 |
+
print(f"[ERROR] {fruit}/{subfolder}/{image_file}: {e}")
|
| 106 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 107 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 108 |
+
fout.write(f"❌ ERROR: {e}\n")
|
| 109 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 110 |
+
|
| 111 |
+
|
script/DeepSeekVL-chat-1-shot.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
import os, gc, torch
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from transformers import AutoModelForCausalLM
|
| 7 |
+
from deepseek_vl.models import VLChatProcessor, MultiModalityCausalLM
|
| 8 |
+
from deepseek_vl.utils.io import load_pil_images
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
chinese_to_english = {
|
| 12 |
+
'strawberry', 'tomato', 'guava', 'dragon fruit',
|
| 13 |
+
'orange', 'pear', 'lychee', 'mango',
|
| 14 |
+
'kiwi', 'papaya', 'apple', 'grape',
|
| 15 |
+
'pomegranate','peach', 'banana', 'pomelo'
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
stage_to_english = {
|
| 19 |
+
'unripe', 'mature', 'pest-damaged', 'rotten'
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
recommendation_map = {
|
| 23 |
+
'unripe': 'keep for further growth',
|
| 24 |
+
'mature': 'picking it',
|
| 25 |
+
'pest-damaged': 'try to recover it',
|
| 26 |
+
'rotten': 'discard it'
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
score_map = {
|
| 30 |
+
'unripe': 30, 'mature': 85, 'pest-damaged': 20, 'rotten': 5
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
model_path = "deepseek-ai/deepseek-vl-7b-chat"
|
| 35 |
+
root_folder = "../data/水果"
|
| 36 |
+
output_root = "result_1shot"
|
| 37 |
+
os.makedirs(output_root, exist_ok=True)
|
| 38 |
+
|
| 39 |
+
print("🚀 Loading DeepSeek‑VL‑7B ...")
|
| 40 |
+
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
|
| 41 |
+
tokenizer = vl_chat_processor.tokenizer
|
| 42 |
+
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
|
| 43 |
+
model_path, cache_dir=".", trust_remote_code=True
|
| 44 |
+
).to(torch.bfloat16).cuda().eval()
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
question = (
|
| 49 |
+
"1. Identify the type of fruit or crop shown in the image. \n"
|
| 50 |
+
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) \n"
|
| 51 |
+
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) \n"
|
| 52 |
+
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).\n"
|
| 53 |
+
"Please respond in the following format:\n"
|
| 54 |
+
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
|
| 55 |
+
"Recommendation: [keep for further growth / pick it /try to recover it / discard it] Consumer Score: [1-100]"
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def build_stage_example(fruit_dir: str, fruit_cn: str, stage_cn: str):
|
| 60 |
+
stage_path = os.path.join(fruit_dir, stage_cn)
|
| 61 |
+
imgs = sorted([f for f in os.listdir(stage_path)
|
| 62 |
+
if f.lower().endswith((".png", ".jpg", ".jpeg", ".bmp"))])
|
| 63 |
+
if not imgs:
|
| 64 |
+
return []
|
| 65 |
+
|
| 66 |
+
img_path = os.path.join(stage_path, imgs[0])
|
| 67 |
+
|
| 68 |
+
fruit_en = chinese_to_english.get(fruit_cn, fruit_cn)
|
| 69 |
+
stage_en = stage_to_english.get(stage_cn, "mature")
|
| 70 |
+
assistant_reply = (
|
| 71 |
+
f"Type: {fruit_en} Growth Stage: {stage_en} "
|
| 72 |
+
f"Recommendation: {recommendation_map[stage_en]} "
|
| 73 |
+
f"Consumer Score: {score_map[stage_en]}"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
return [
|
| 78 |
+
{"role": "User", "content": "<image_placeholder>\n" + question, "images": [img_path]},
|
| 79 |
+
{"role": "Assistant", "content": assistant_reply},
|
| 80 |
+
]
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
for fruit_cn in os.listdir(root_folder):
|
| 84 |
+
fruit_path = os.path.join(root_folder, fruit_cn)
|
| 85 |
+
if not os.path.isdir(fruit_path):
|
| 86 |
+
continue
|
| 87 |
+
|
| 88 |
+
for stage_cn in os.listdir(fruit_path):
|
| 89 |
+
stage_path = os.path.join(fruit_path, stage_cn)
|
| 90 |
+
if not os.path.isdir(stage_path):
|
| 91 |
+
continue
|
| 92 |
+
|
| 93 |
+
img_files = sorted([f for f in os.listdir(stage_path)
|
| 94 |
+
if f.lower().endswith((".png", ".jpg", ".jpeg", ".bmp"))])
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
example_msgs = build_stage_example(fruit_path, fruit_cn, stage_cn)
|
| 98 |
+
if not example_msgs:
|
| 99 |
+
continue
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
output_file = os.path.join(output_root, f"{fruit_cn}_{stage_cn}.txt")
|
| 103 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 104 |
+
for img_name in tqdm(img_files, desc=f"{fruit_cn}/{stage_cn}"):
|
| 105 |
+
img_path = os.path.join(stage_path, img_name)
|
| 106 |
+
if img_path in example_msgs[0]["images"]:
|
| 107 |
+
continue
|
| 108 |
+
|
| 109 |
+
conversation = example_msgs + [
|
| 110 |
+
{"role": "User", "content": "<image_placeholder>\n" + question, "images": [img_path]},
|
| 111 |
+
{"role": "Assistant", "content": ""},
|
| 112 |
+
]
|
| 113 |
+
try:
|
| 114 |
+
with torch.no_grad():
|
| 115 |
+
pil_imgs = load_pil_images(conversation)
|
| 116 |
+
prep_in = vl_chat_processor(conversations=conversation, images=pil_imgs, force_batchify=True).to(vl_gpt.device)
|
| 117 |
+
embeds = vl_gpt.prepare_inputs_embeds(**prep_in)
|
| 118 |
+
out_ids = vl_gpt.language_model.generate(inputs_embeds=embeds, attention_mask=prep_in.attention_mask,
|
| 119 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 120 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 121 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 122 |
+
max_new_tokens=256, do_sample=False, use_cache=True)
|
| 123 |
+
answer = tokenizer.decode(out_ids[0], skip_special_tokens=True).strip()
|
| 124 |
+
|
| 125 |
+
print(f"✅ {img_name} → {answer.splitlines()[0]}")
|
| 126 |
+
fout.write("="*25 + " IMAGE START " + "="*25 + "\n")
|
| 127 |
+
fout.write(f"🖼️ Image Name: {img_name}\n")
|
| 128 |
+
fout.write(f"📝 Answer:\n{answer}\n")
|
| 129 |
+
fout.write("="*25 + " IMAGE END " + "="*25 + "\n\n")
|
| 130 |
+
except Exception as e:
|
| 131 |
+
print(f"[ERROR] {fruit_cn}/{stage_cn}/{img_name}: {e}")
|
| 132 |
+
|
| 133 |
+
finally:
|
| 134 |
+
torch.cuda.empty_cache()
|
| 135 |
+
gc.collect()
|
script/DeepSeekVL2-0-shot.py.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch,os
|
| 2 |
+
from tqdm import tqdm
|
| 3 |
+
from transformers import AutoModelForCausalLM
|
| 4 |
+
|
| 5 |
+
from deepseek_vl2.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM
|
| 6 |
+
from deepseek_vl2.utils.io import load_pil_images
|
| 7 |
+
|
| 8 |
+
#https://huggingface.co/deepseek-ai/deepseek-vl-7b-chat
|
| 9 |
+
|
| 10 |
+
# specify the path to the model
|
| 11 |
+
model_path = "deepseek-ai/deepseek-vl2-tiny"
|
| 12 |
+
# model_path = "deepseek-ai/deepseek-vl2-small"
|
| 13 |
+
vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path,cache_dir=".")
|
| 14 |
+
tokenizer = vl_chat_processor.tokenizer
|
| 15 |
+
|
| 16 |
+
vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(model_path,cache_dir=".", trust_remote_code=True)
|
| 17 |
+
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
| 18 |
+
|
| 19 |
+
image_folder = "../images"
|
| 20 |
+
result_file = "./results.txt"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
user_prompt = (
|
| 24 |
+
"This is the image: <image>\n"
|
| 25 |
+
"1. Identify the type of fruit or crop shown in the image. "
|
| 26 |
+
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) "
|
| 27 |
+
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) "
|
| 28 |
+
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely). "
|
| 29 |
+
"Please respond in the following format:\n"
|
| 30 |
+
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
|
| 31 |
+
"Recommendation: [keep for further growth / try to recover it /picking it/ discard it] Consumer Score: [1-100]"
|
| 32 |
+
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
root_folder = "../data"
|
| 37 |
+
output_root = "result"
|
| 38 |
+
os.makedirs(output_root, exist_ok=True)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
for fruit in os.listdir(root_folder):
|
| 42 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 43 |
+
if not os.path.isdir(fruit_path):
|
| 44 |
+
continue
|
| 45 |
+
|
| 46 |
+
for status in os.listdir(fruit_path):
|
| 47 |
+
status_path = os.path.join(fruit_path, status)
|
| 48 |
+
if not os.path.isdir(status_path):
|
| 49 |
+
continue
|
| 50 |
+
|
| 51 |
+
image_files = [f for f in os.listdir(status_path) if f.lower().endswith((".jpg", ".jpeg", ".png"))]
|
| 52 |
+
if not image_files:
|
| 53 |
+
continue
|
| 54 |
+
|
| 55 |
+
output_file = os.path.join(output_root, f"{fruit}_{status}.txt")
|
| 56 |
+
|
| 57 |
+
results = []
|
| 58 |
+
|
| 59 |
+
for img_name in tqdm(image_files, desc=f"{fruit}/{status}"):
|
| 60 |
+
img_path = os.path.join(status_path, img_name)
|
| 61 |
+
|
| 62 |
+
conversation = [
|
| 63 |
+
{"role": "<|User|>", "content": user_prompt, "images": [img_path]},
|
| 64 |
+
{"role": "<|Assistant|>", "content": ""}
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
try:
|
| 68 |
+
pil_images = load_pil_images(conversation)
|
| 69 |
+
prepare_inputs = vl_chat_processor(
|
| 70 |
+
conversations=conversation,
|
| 71 |
+
images=pil_images,
|
| 72 |
+
force_batchify=True,
|
| 73 |
+
system_prompt=""
|
| 74 |
+
).to(vl_gpt.device)
|
| 75 |
+
|
| 76 |
+
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
|
| 77 |
+
|
| 78 |
+
outputs = vl_gpt.language.generate(
|
| 79 |
+
inputs_embeds=inputs_embeds,
|
| 80 |
+
attention_mask=prepare_inputs.attention_mask,
|
| 81 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 82 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 83 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 84 |
+
max_new_tokens=512,
|
| 85 |
+
do_sample=False,
|
| 86 |
+
use_cache=True
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
decoded = tokenizer.decode(outputs[0], skip_special_tokens=False)
|
| 90 |
+
assistant_reply = decoded.split("<|Assistant|>")[-1].strip()
|
| 91 |
+
|
| 92 |
+
result_entry = f"{'='*25} IMAGE START {'='*25}\n"
|
| 93 |
+
result_entry += f"🖼️ Image Name: {img_name}\n"
|
| 94 |
+
result_entry += f"📝 Answer:\n{assistant_reply}\n"
|
| 95 |
+
result_entry += f"{'='*25} IMAGE END {'='*25}\n\n"
|
| 96 |
+
results.append(result_entry)
|
| 97 |
+
|
| 98 |
+
except Exception as e:
|
| 99 |
+
print(f"[ERROR] {fruit}/{status}/{img_name}: {e}")
|
| 100 |
+
results.append(f"{'='*25} IMAGE START {'='*25}\n")
|
| 101 |
+
results.append(f"🖼️ Image Name: {img_name}\n")
|
| 102 |
+
results.append(f"❌ ERROR: {e}\n")
|
| 103 |
+
results.append(f"{'='*25} IMAGE END {'='*25}\n\n")
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 107 |
+
fout.writelines(results)
|
script/DeepSeekVL2-1-shot.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""
|
| 3 |
+
1-shot 推理脚本(每类水果、每张图片使用其真实阶段的示例图)
|
| 4 |
+
--------------------------------
|
| 5 |
+
- 每张图对应一个同阶段的示例图(取文件名排序后的第一张)+ 示例回答
|
| 6 |
+
- 检查 token 长度是否超出限制(默认 4096)
|
| 7 |
+
- 支持 CPU 或 GPU 部署
|
| 8 |
+
- 推理结果保存在 result_1shot/ 目录
|
| 9 |
+
- 每轮推理后自动清理 CUDA 显存与垃圾回收
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import gc
|
| 14 |
+
import torch
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
from transformers import AutoModelForCausalLM
|
| 17 |
+
from DeepSeek_VL2.deepseek_vl2.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM
|
| 18 |
+
from DeepSeek_VL2.deepseek_vl2.utils.io import load_pil_images
|
| 19 |
+
|
| 20 |
+
chinese_to_english = {
|
| 21 |
+
'strawberry', 'tomato', 'guava', 'dragon fruit',
|
| 22 |
+
'orange', 'pear', 'lychee', 'mango',
|
| 23 |
+
'kiwi', 'papaya', 'apple', 'grape',
|
| 24 |
+
'pomegranate','peach', 'banana', 'pomelo'
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
stage_to_english = {
|
| 28 |
+
'unripe', 'mature', 'pest-damaged', 'rotten'
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
recommendation_map = {
|
| 32 |
+
'unripe': 'keep for further growth',
|
| 33 |
+
'mature': 'picking it',
|
| 34 |
+
'pest-damaged': 'try to recover it',
|
| 35 |
+
'rotten': 'discard it'
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
score_map = {
|
| 39 |
+
'unripe': 30, 'mature': 85, 'pest-damaged': 20, 'rotten': 5
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
max_token_limit = 4096
|
| 43 |
+
|
| 44 |
+
model_path = "deepseek-ai/deepseek-vl2-tiny"
|
| 45 |
+
use_gpu = torch.cuda.is_available()
|
| 46 |
+
|
| 47 |
+
print(f"🚀 Loading model ({'GPU' if use_gpu else 'CPU'}) ...")
|
| 48 |
+
vl_proc: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path, cache_dir=".")
|
| 49 |
+
tokenizer = vl_proc.tokenizer
|
| 50 |
+
vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(
|
| 51 |
+
model_path, cache_dir=".", trust_remote_code=True
|
| 52 |
+
)
|
| 53 |
+
if use_gpu:
|
| 54 |
+
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
| 55 |
+
else:
|
| 56 |
+
vl_gpt = vl_gpt.eval()
|
| 57 |
+
print("✅ Model ready.\n")
|
| 58 |
+
|
| 59 |
+
user_prompt = (
|
| 60 |
+
"This is the image: <image>\n"
|
| 61 |
+
"1. Identify the type of fruit or crop shown in the image. "
|
| 62 |
+
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) "
|
| 63 |
+
"3. Recommend the farmer’s next action. (Options: keep for further growth, picking it,try to recover it, discard it) "
|
| 64 |
+
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely). "
|
| 65 |
+
"Please respond in the following format:\n"
|
| 66 |
+
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
|
| 67 |
+
"Recommendation: [keep for further growth / picking it /try to recover it / discard it] Consumer Score: [1-100]"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
root_folder = "../data"
|
| 71 |
+
output_root = "result_1shot"
|
| 72 |
+
os.makedirs(output_root, exist_ok=True)
|
| 73 |
+
|
| 74 |
+
for fruit_cn in os.listdir(root_folder):
|
| 75 |
+
fruit_dir = os.path.join(root_folder, fruit_cn)
|
| 76 |
+
if not os.path.isdir(fruit_dir):
|
| 77 |
+
continue
|
| 78 |
+
|
| 79 |
+
fruit_en = chinese_to_english.get(fruit_cn, fruit_cn)
|
| 80 |
+
|
| 81 |
+
for stage_cn in os.listdir(fruit_dir):
|
| 82 |
+
stage_dir = os.path.join(fruit_dir, stage_cn)
|
| 83 |
+
if not os.path.isdir(stage_dir):
|
| 84 |
+
continue
|
| 85 |
+
|
| 86 |
+
image_files = sorted([f for f in os.listdir(stage_dir) if f.lower().endswith(('.jpg', '.jpeg', '.png'))])
|
| 87 |
+
|
| 88 |
+
sample_file = image_files[0]
|
| 89 |
+
sample_path = os.path.join(stage_dir, sample_file)
|
| 90 |
+
stage_en = stage_to_english.get(stage_cn, 'mature')
|
| 91 |
+
recomm = recommendation_map.get(stage_en, 'picking it')
|
| 92 |
+
score = score_map.get(stage_en, 85)
|
| 93 |
+
sample_answer = f"Type: {fruit_en} Growth Stage: {stage_en} Recommendation: {recomm} Consumer Score: {score}"
|
| 94 |
+
|
| 95 |
+
output_file = os.path.join(output_root, f"{fruit_cn}_{stage_cn}.txt")
|
| 96 |
+
|
| 97 |
+
for image_file in tqdm(image_files, desc=f"{fruit_cn}/{stage_cn}"):
|
| 98 |
+
image_path = os.path.join(stage_dir, image_file)
|
| 99 |
+
if image_file == sample_file:
|
| 100 |
+
continue
|
| 101 |
+
|
| 102 |
+
try:
|
| 103 |
+
conversation = [
|
| 104 |
+
{"role": "<|User|>", "content": "This is the image: <image>\n1. Identify the fruit.\n2. Current growth stage.\n3. Recommended action.\n4. Consumer willingness score.", "images": [sample_path]},
|
| 105 |
+
{"role": "<|Assistant|>", "content": sample_answer},
|
| 106 |
+
{"role": "<|User|>", "content": user_prompt, "images": [image_path]},
|
| 107 |
+
{"role": "<|Assistant|>", "content": ""}
|
| 108 |
+
]
|
| 109 |
+
|
| 110 |
+
pil_images = load_pil_images(conversation)
|
| 111 |
+
inputs = vl_proc(conversations=conversation, images=pil_images, force_batchify=True, system_prompt="")
|
| 112 |
+
if use_gpu:
|
| 113 |
+
inputs = inputs.to(vl_gpt.device)
|
| 114 |
+
|
| 115 |
+
num_tokens = inputs.input_ids.shape[1]
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
inp_emb = vl_gpt.prepare_inputs_embeds(**inputs)
|
| 119 |
+
gen_out = vl_gpt.language.generate(
|
| 120 |
+
inputs_embeds=inp_emb,
|
| 121 |
+
attention_mask=inputs.attention_mask,
|
| 122 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 123 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 124 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 125 |
+
max_new_tokens=512,
|
| 126 |
+
do_sample=False,
|
| 127 |
+
use_cache=True
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
reply = tokenizer.decode(gen_out[0]).split("<|Assistant|>")[-1].strip()
|
| 131 |
+
|
| 132 |
+
with open(output_file, "a", encoding="utf-8") as fout:
|
| 133 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 134 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 135 |
+
fout.write(f"📝 Answer:\n{reply}\n")
|
| 136 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 137 |
+
|
| 138 |
+
print(f"✅ {image_file} => {reply.splitlines()[0]}")
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
print(f"[ERROR] {fruit_cn}/{stage_cn}/{image_file}: {e}")
|
| 142 |
+
with open(output_file, "a", encoding="utf-8") as fout:
|
| 143 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 144 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 145 |
+
fout.write(f"❌ ERROR: {e}\n")
|
| 146 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 147 |
+
|
| 148 |
+
finally:
|
| 149 |
+
if use_gpu:
|
| 150 |
+
torch.cuda.empty_cache()
|
| 151 |
+
gc.collect()
|
script/InterVL2.5-4B-0-shot.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
from PIL import Image
|
| 5 |
+
from transformers import AutoModel, AutoTokenizer
|
| 6 |
+
import torchvision.transforms as T
|
| 7 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
| 11 |
+
IMAGENET_STD = (0.229, 0.224, 0.225)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_transform(input_size):
|
| 15 |
+
return T.Compose([
|
| 16 |
+
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
| 17 |
+
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
| 18 |
+
T.ToTensor(),
|
| 19 |
+
T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)
|
| 20 |
+
])
|
| 21 |
+
|
| 22 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
| 23 |
+
best_ratio_diff = float('inf')
|
| 24 |
+
best_ratio = (1, 1)
|
| 25 |
+
area = width * height
|
| 26 |
+
for ratio in target_ratios:
|
| 27 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
| 28 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
| 29 |
+
if ratio_diff < best_ratio_diff:
|
| 30 |
+
best_ratio_diff = ratio_diff
|
| 31 |
+
best_ratio = ratio
|
| 32 |
+
elif ratio_diff == best_ratio_diff:
|
| 33 |
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
| 34 |
+
best_ratio = ratio
|
| 35 |
+
return best_ratio
|
| 36 |
+
|
| 37 |
+
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
|
| 38 |
+
orig_width, orig_height = image.size
|
| 39 |
+
aspect_ratio = orig_width / orig_height
|
| 40 |
+
target_ratios = sorted(
|
| 41 |
+
[(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1)
|
| 42 |
+
for j in range(1, n + 1) if i * j <= max_num and i * j >= min_num],
|
| 43 |
+
key=lambda x: x[0] * x[1]
|
| 44 |
+
)
|
| 45 |
+
target_aspect_ratio = find_closest_aspect_ratio(aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
| 46 |
+
target_width = image_size * target_aspect_ratio[0]
|
| 47 |
+
target_height = image_size * target_aspect_ratio[1]
|
| 48 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 49 |
+
resized_img = image.resize((target_width, target_height))
|
| 50 |
+
processed_images = []
|
| 51 |
+
for i in range(blocks):
|
| 52 |
+
box = (
|
| 53 |
+
(i % (target_width // image_size)) * image_size,
|
| 54 |
+
(i // (target_width // image_size)) * image_size,
|
| 55 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
| 56 |
+
((i // (target_width // image_size)) + 1) * image_size
|
| 57 |
+
)
|
| 58 |
+
processed_images.append(resized_img.crop(box))
|
| 59 |
+
if use_thumbnail and len(processed_images) != 1:
|
| 60 |
+
thumbnail_img = image.resize((image_size, image_size))
|
| 61 |
+
processed_images.append(thumbnail_img)
|
| 62 |
+
return processed_images
|
| 63 |
+
|
| 64 |
+
def load_image(image_file, input_size=448, max_num=12):
|
| 65 |
+
image = Image.open(image_file).convert('RGB')
|
| 66 |
+
transform = build_transform(input_size)
|
| 67 |
+
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
| 68 |
+
return torch.stack([transform(img) for img in images])
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
model_path = 'OpenGVLab/InternVL2_5-4B'
|
| 72 |
+
model = AutoModel.from_pretrained(
|
| 73 |
+
model_path,
|
| 74 |
+
torch_dtype=torch.bfloat16,
|
| 75 |
+
low_cpu_mem_usage=True,
|
| 76 |
+
use_flash_attn=True,
|
| 77 |
+
cache_dir="./",
|
| 78 |
+
trust_remote_code=True
|
| 79 |
+
).eval().cuda()
|
| 80 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=False)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
question = (
|
| 84 |
+
'''<image>\n
|
| 85 |
+
1. Identify the type of fruit or crop shown in the image.
|
| 86 |
+
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
|
| 87 |
+
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
|
| 88 |
+
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
|
| 89 |
+
Please respond in the following format:
|
| 90 |
+
Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] Recommendation: [keep for further growth / pick it / try to recover it / discard it] Consumer Score: [1-100]
|
| 91 |
+
'''
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
root_folder = "../data"
|
| 99 |
+
output_root = "result"
|
| 100 |
+
os.makedirs(output_root, exist_ok=True)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
for fruit in os.listdir(root_folder):
|
| 104 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 105 |
+
if not os.path.isdir(fruit_path): continue
|
| 106 |
+
|
| 107 |
+
for subfolder in os.listdir(fruit_path):
|
| 108 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 109 |
+
if not os.path.isdir(subfolder_path): continue
|
| 110 |
+
|
| 111 |
+
image_files = [f for f in os.listdir(subfolder_path)
|
| 112 |
+
if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))]
|
| 113 |
+
if not image_files:
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 117 |
+
|
| 118 |
+
with open(output_file, "w", encoding="utf-8") as out_file:
|
| 119 |
+
for image_file in tqdm(image_files, desc=f"{fruit}/{subfolder}"):
|
| 120 |
+
image_path = os.path.join(subfolder_path, image_file)
|
| 121 |
+
try:
|
| 122 |
+
pixel_values = load_image(image_path).to(torch.bfloat16).cuda()
|
| 123 |
+
response = model.chat(tokenizer, pixel_values, question, generation_config)
|
| 124 |
+
|
| 125 |
+
print(f"{image_file} ✅ -> {response}")
|
| 126 |
+
out_file.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 127 |
+
out_file.write(f"🖼️ Image Name: {image_file}\n")
|
| 128 |
+
out_file.write(f"📝 Answer:\n{response.strip()}\n")
|
| 129 |
+
out_file.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 130 |
+
except Exception as e:
|
| 131 |
+
print(f"[ERROR] {fruit}/{subfolder}/{image_file}: {e}")
|
| 132 |
+
out_file.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 133 |
+
out_file.write(f"🖼️ Image Name: {image_file}\n")
|
| 134 |
+
out_file.write(f"❌ ERROR:\n{e}\n")
|
| 135 |
+
out_file.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
script/InterVL2.5-8B-0-shot.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from transformers import AutoModel, AutoTokenizer
|
| 6 |
+
|
| 7 |
+
from torchvision import transforms as T
|
| 8 |
+
from torchvision.transforms.functional import InterpolationMode
|
| 9 |
+
|
| 10 |
+
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
| 11 |
+
IMAGENET_STD = (0.229, 0.224, 0.225)
|
| 12 |
+
|
| 13 |
+
def build_transform(input_size):
|
| 14 |
+
return T.Compose([
|
| 15 |
+
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
| 16 |
+
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
| 17 |
+
T.ToTensor(),
|
| 18 |
+
T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)
|
| 19 |
+
])
|
| 20 |
+
|
| 21 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
| 22 |
+
best_ratio_diff = float('inf')
|
| 23 |
+
best_ratio = (1, 1)
|
| 24 |
+
area = width * height
|
| 25 |
+
for ratio in target_ratios:
|
| 26 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
| 27 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
| 28 |
+
if ratio_diff < best_ratio_diff:
|
| 29 |
+
best_ratio_diff = ratio_diff
|
| 30 |
+
best_ratio = ratio
|
| 31 |
+
elif ratio_diff == best_ratio_diff:
|
| 32 |
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
| 33 |
+
best_ratio = ratio
|
| 34 |
+
return best_ratio
|
| 35 |
+
|
| 36 |
+
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
|
| 37 |
+
orig_width, orig_height = image.size
|
| 38 |
+
aspect_ratio = orig_width / orig_height
|
| 39 |
+
|
| 40 |
+
target_ratios = sorted(
|
| 41 |
+
[(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1)
|
| 42 |
+
if i * j <= max_num and i * j >= min_num],
|
| 43 |
+
key=lambda x: x[0] * x[1]
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
target_aspect_ratio = find_closest_aspect_ratio(aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
| 47 |
+
target_width = image_size * target_aspect_ratio[0]
|
| 48 |
+
target_height = image_size * target_aspect_ratio[1]
|
| 49 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 50 |
+
|
| 51 |
+
resized_img = image.resize((target_width, target_height))
|
| 52 |
+
processed_images = []
|
| 53 |
+
for i in range(blocks):
|
| 54 |
+
box = (
|
| 55 |
+
(i % (target_width // image_size)) * image_size,
|
| 56 |
+
(i // (target_width // image_size)) * image_size,
|
| 57 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
| 58 |
+
((i // (target_width // image_size)) + 1) * image_size
|
| 59 |
+
)
|
| 60 |
+
split_img = resized_img.crop(box)
|
| 61 |
+
processed_images.append(split_img)
|
| 62 |
+
|
| 63 |
+
if use_thumbnail and len(processed_images) != 1:
|
| 64 |
+
thumbnail_img = image.resize((image_size, image_size))
|
| 65 |
+
processed_images.append(thumbnail_img)
|
| 66 |
+
return processed_images
|
| 67 |
+
|
| 68 |
+
def load_image(image_file, input_size=448, max_num=12):
|
| 69 |
+
image = Image.open(image_file).convert('RGB')
|
| 70 |
+
transform = build_transform(input_size)
|
| 71 |
+
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
|
| 72 |
+
pixel_values = [transform(img) for img in images]
|
| 73 |
+
return torch.stack(pixel_values)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
model_path = "OpenGVLab/InternVL2_5-8B"
|
| 77 |
+
model = AutoModel.from_pretrained(
|
| 78 |
+
model_path,
|
| 79 |
+
torch_dtype=torch.bfloat16,
|
| 80 |
+
low_cpu_mem_usage=True,
|
| 81 |
+
use_flash_attn=True,
|
| 82 |
+
cache_dir="./",
|
| 83 |
+
trust_remote_code=True
|
| 84 |
+
).eval().cuda()
|
| 85 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=False)
|
| 86 |
+
|
| 87 |
+
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
question = (
|
| 91 |
+
'''<image>\n
|
| 92 |
+
1. Identify the type of fruit or crop shown in the image.
|
| 93 |
+
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
|
| 94 |
+
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
|
| 95 |
+
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
|
| 96 |
+
Please respond in the following format:
|
| 97 |
+
Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] Recommendation: [keep for further growth / pick it / try to recover it / discard it] Consumer Score: [1-100]
|
| 98 |
+
'''
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
root_folder = "../data/"
|
| 103 |
+
output_root = "result"
|
| 104 |
+
os.makedirs(output_root, exist_ok=True)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
for fruit in os.listdir(root_folder):
|
| 108 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 109 |
+
if not os.path.isdir(fruit_path):
|
| 110 |
+
continue
|
| 111 |
+
|
| 112 |
+
for subfolder in os.listdir(fruit_path):
|
| 113 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 114 |
+
if not os.path.isdir(subfolder_path):
|
| 115 |
+
continue
|
| 116 |
+
|
| 117 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))]
|
| 118 |
+
if not image_files:
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 122 |
+
|
| 123 |
+
with open(output_file, "w", encoding="utf-8") as out_file:
|
| 124 |
+
for image_file in tqdm(image_files, desc=f"Processing {fruit}/{subfolder}"):
|
| 125 |
+
image_path = os.path.join(subfolder_path, image_file)
|
| 126 |
+
try:
|
| 127 |
+
pixel_values = load_image(image_path, max_num=12).to(torch.bfloat16).cuda()
|
| 128 |
+
|
| 129 |
+
response = model.chat(tokenizer, pixel_values, question, generation_config)
|
| 130 |
+
|
| 131 |
+
print(f"{image_file} ✅ -> {response}")
|
| 132 |
+
out_file.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 133 |
+
out_file.write(f"🖼️ Image Name: {image_file}\n")
|
| 134 |
+
out_file.write(f"📝 Answer:\n{response}\n")
|
| 135 |
+
out_file.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 136 |
+
|
| 137 |
+
except Exception as e:
|
| 138 |
+
print(f"[ERROR] {fruit}/{subfolder}/{image_file}: {e}")
|
| 139 |
+
out_file.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 140 |
+
out_file.write(f"🖼️ Image Name: {image_file}\n")
|
| 141 |
+
out_file.write(f"❌ ERROR:\n{str(e)}\n")
|
| 142 |
+
out_file.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
script/Janus-Pro-1B-0-shot.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import AutoModelForCausalLM
|
| 4 |
+
from janus.models import MultiModalityCausalLM, VLChatProcessor
|
| 5 |
+
from janus.utils.io import load_pil_images
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
#https://github.com/deepseek-ai/Janus
|
| 9 |
+
|
| 10 |
+
model_path = "deepseek-ai/Janus-1.3B"
|
| 11 |
+
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path, cache_dir=".")
|
| 12 |
+
tokenizer = vl_chat_processor.tokenizer
|
| 13 |
+
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
|
| 14 |
+
model_path, cache_dir=".", trust_remote_code=True
|
| 15 |
+
).to(torch.bfloat16).cuda().eval()
|
| 16 |
+
|
| 17 |
+
question = (
|
| 18 |
+
'''
|
| 19 |
+
1. Identify the type of fruit or crop shown in the image.
|
| 20 |
+
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
|
| 21 |
+
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
|
| 22 |
+
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
|
| 23 |
+
Please respond in the following format:
|
| 24 |
+
Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] Recommendation: [keep for further growth / try to recover it / discard it] Consumer Score: [1-100]
|
| 25 |
+
'''
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
root_folder = "../data/"
|
| 29 |
+
output_root = "result-1B"
|
| 30 |
+
os.makedirs(output_root, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
for fruit in os.listdir(root_folder):
|
| 34 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 35 |
+
if not os.path.isdir(fruit_path):
|
| 36 |
+
continue
|
| 37 |
+
|
| 38 |
+
for subfolder in os.listdir(fruit_path):
|
| 39 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 40 |
+
if not os.path.isdir(subfolder_path):
|
| 41 |
+
continue
|
| 42 |
+
|
| 43 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp'))]
|
| 44 |
+
if not image_files:
|
| 45 |
+
continue
|
| 46 |
+
|
| 47 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 48 |
+
|
| 49 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 50 |
+
for image_file in tqdm(image_files, desc=f"{fruit}/{subfolder}"):
|
| 51 |
+
try:
|
| 52 |
+
image_path = os.path.join(subfolder_path, image_file)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
conversation = [
|
| 56 |
+
{
|
| 57 |
+
"role": "<|User|>",
|
| 58 |
+
"content": f"<image_placeholder>\n{question}",
|
| 59 |
+
"images": [image_path],
|
| 60 |
+
},
|
| 61 |
+
{"role": "<|Assistant|>", "content": ""},
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
pil_images = load_pil_images(conversation)
|
| 66 |
+
prepare_inputs = vl_chat_processor(
|
| 67 |
+
conversations=conversation,
|
| 68 |
+
images=pil_images,
|
| 69 |
+
force_batchify=True
|
| 70 |
+
).to(vl_gpt.device)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
outputs = vl_gpt.language_model.generate(
|
| 77 |
+
inputs_embeds=inputs_embeds,
|
| 78 |
+
attention_mask=prepare_inputs.attention_mask,
|
| 79 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 80 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 81 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 82 |
+
max_new_tokens=512,
|
| 83 |
+
do_sample=False,
|
| 84 |
+
use_cache=True,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
|
| 88 |
+
fout.write(f"{'='*25} IMAGE START {'='*25}\n")
|
| 89 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 90 |
+
fout.write(f"📝 Answer:\n{answer.strip()}\n")
|
| 91 |
+
fout.write(f"{'='*25} IMAGE END {'='*25}\n\n")
|
| 92 |
+
|
| 93 |
+
except Exception as e:
|
| 94 |
+
print(f"[ERROR] {fruit}/{subfolder}/{image_file}: {e}")
|
| 95 |
+
fout.write(f"{'='*25} IMAGE START {'='*25}\n")
|
| 96 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 97 |
+
fout.write(f"❌ ERROR:\n{e}\n")
|
| 98 |
+
fout.write(f"{'='*25} IMAGE END {'='*25}\n\n")
|
script/Janus-Pro-7B-0-shot.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import AutoModelForCausalLM
|
| 4 |
+
from janus.models import MultiModalityCausalLM, VLChatProcessor
|
| 5 |
+
from janus.utils.io import load_pil_images
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
#https://github.com/deepseek-ai/Janus
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
model_path = "deepseek-ai/Janus-Pro-7B"
|
| 12 |
+
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path, cache_dir=".")
|
| 13 |
+
tokenizer = vl_chat_processor.tokenizer
|
| 14 |
+
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
|
| 15 |
+
model_path, cache_dir=".", trust_remote_code=True
|
| 16 |
+
).to(torch.bfloat16).cuda().eval()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
question = (
|
| 20 |
+
'''
|
| 21 |
+
1. Identify the type of fruit or crop shown in the image.
|
| 22 |
+
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
|
| 23 |
+
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
|
| 24 |
+
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
|
| 25 |
+
Please respond in the following format:
|
| 26 |
+
Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] Recommendation: [keep for further growth / pick it / try to recover it / discard it] Consumer Score: [1-100]
|
| 27 |
+
'''
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
root_folder = "../data"
|
| 31 |
+
output_root = "result"
|
| 32 |
+
os.makedirs(output_root, exist_ok=True)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
for fruit in os.listdir(root_folder):
|
| 36 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 37 |
+
if not os.path.isdir(fruit_path):
|
| 38 |
+
continue
|
| 39 |
+
|
| 40 |
+
for subfolder in os.listdir(fruit_path):
|
| 41 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 42 |
+
if not os.path.isdir(subfolder_path):
|
| 43 |
+
continue
|
| 44 |
+
|
| 45 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp'))]
|
| 46 |
+
if not image_files:
|
| 47 |
+
continue
|
| 48 |
+
|
| 49 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 50 |
+
|
| 51 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 52 |
+
for image_file in tqdm(image_files, desc=f"{fruit}/{subfolder}"):
|
| 53 |
+
try:
|
| 54 |
+
image_path = os.path.join(subfolder_path, image_file)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
conversation = [
|
| 58 |
+
{
|
| 59 |
+
"role": "<|User|>",
|
| 60 |
+
"content": f"<image_placeholder>\n{question}",
|
| 61 |
+
"images": [image_path],
|
| 62 |
+
},
|
| 63 |
+
{"role": "<|Assistant|>", "content": ""},
|
| 64 |
+
]
|
| 65 |
+
|
| 66 |
+
pil_images = load_pil_images(conversation)
|
| 67 |
+
prepare_inputs = vl_chat_processor(
|
| 68 |
+
conversations=conversation,
|
| 69 |
+
images=pil_images,
|
| 70 |
+
force_batchify=True
|
| 71 |
+
).to(vl_gpt.device)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
|
| 75 |
+
|
| 76 |
+
outputs = vl_gpt.language_model.generate(
|
| 77 |
+
inputs_embeds=inputs_embeds,
|
| 78 |
+
attention_mask=prepare_inputs.attention_mask,
|
| 79 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 80 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 81 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 82 |
+
max_new_tokens=512,
|
| 83 |
+
do_sample=False,
|
| 84 |
+
use_cache=True,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
|
| 88 |
+
fout.write(f"{'='*25} IMAGE START {'='*25}\n")
|
| 89 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 90 |
+
fout.write(f"📝 Answer:\n{answer.strip()}\n")
|
| 91 |
+
fout.write(f"{'='*25} IMAGE END {'='*25}\n\n")
|
| 92 |
+
|
| 93 |
+
except Exception as e:
|
| 94 |
+
print(f"[ERROR] {fruit}/{subfolder}/{image_file}: {e}")
|
| 95 |
+
fout.write(f"{'='*25} IMAGE START {'='*25}\n")
|
| 96 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 97 |
+
fout.write(f"❌ ERROR: {e}\n")
|
| 98 |
+
fout.write(f"{'='*25} IMAGE END {'='*25}\n\n")
|
script/Janus-Pro-7B-1-shot.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import gc
|
| 6 |
+
import random
|
| 7 |
+
import torch
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
from transformers import AutoModelForCausalLM
|
| 10 |
+
from janus.models import MultiModalityCausalLM, VLChatProcessor
|
| 11 |
+
from janus.utils.io import load_pil_images
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
chinese_to_english = {
|
| 15 |
+
'strawberry', 'tomato', 'guava', 'dragon fruit',
|
| 16 |
+
'orange', 'pear', 'lychee', 'mango',
|
| 17 |
+
'kiwi', 'papaya', 'apple', 'grape',
|
| 18 |
+
'pomegranate','peach', 'banana', 'pomelo'
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
stage_to_english = {
|
| 22 |
+
'unripe', 'mature', 'pest-damaged', 'rotten'
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
recommendation_map = {
|
| 26 |
+
'unripe': 'keep for further growth',
|
| 27 |
+
'mature': 'picking it',
|
| 28 |
+
'pest-damaged': 'try to recover it',
|
| 29 |
+
'rotten': 'discard it'
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
score_map = {
|
| 33 |
+
'unripe': 30, 'mature': 85, 'pest-damaged': 20, 'rotten': 5
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
model_path = "deepseek-ai/Janus-Pro-7B"
|
| 38 |
+
root_folder = "../data"
|
| 39 |
+
output_root = "result_1shot"
|
| 40 |
+
os.makedirs(output_root, exist_ok=True)
|
| 41 |
+
|
| 42 |
+
print("🚀 Loading Janus‑Pro‑7B ...")
|
| 43 |
+
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path, cache_dir=".")
|
| 44 |
+
tokenizer = vl_chat_processor.tokenizer
|
| 45 |
+
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
|
| 46 |
+
model_path, cache_dir=".", trust_remote_code=True
|
| 47 |
+
)
|
| 48 |
+
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
|
| 49 |
+
print("✅ Model ready.\n")
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
question = (
|
| 53 |
+
"1. Identify the type of fruit or crop shown in the image. \n"
|
| 54 |
+
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) \n"
|
| 55 |
+
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) \n"
|
| 56 |
+
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).\n"
|
| 57 |
+
"Please respond in the following format:\n"
|
| 58 |
+
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
|
| 59 |
+
"Recommendation: [keep for further growth / pick it / try to recover it / discard it] Consumer Score: [1-100]"
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def build_stage_example(fruit_dir: str, fruit_cn: str, stage_cn: str):
|
| 65 |
+
|
| 66 |
+
stage_path = os.path.join(fruit_dir, stage_cn)
|
| 67 |
+
imgs = [f for f in os.listdir(stage_path)
|
| 68 |
+
if f.lower().endswith((".png", ".jpg", ".jpeg", ".bmp"))]
|
| 69 |
+
if not imgs:
|
| 70 |
+
return []
|
| 71 |
+
|
| 72 |
+
img_path = os.path.join(stage_path, random.choice(imgs))
|
| 73 |
+
|
| 74 |
+
fruit_en = chinese_to_english.get(fruit_cn, fruit_cn)
|
| 75 |
+
stage_en = stage_to_english[stage_cn]
|
| 76 |
+
|
| 77 |
+
assistant_reply = (
|
| 78 |
+
f"Type: {fruit_en} Growth Stage: {stage_en} "
|
| 79 |
+
f"Recommendation: {recommendation_map[stage_en]} "
|
| 80 |
+
f"Consumer Score: {score_map[stage_en]}"
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
return [
|
| 85 |
+
{
|
| 86 |
+
"role": "<|User|>",
|
| 87 |
+
"content": f"<image_placeholder>\n{question}",
|
| 88 |
+
"images": [img_path],
|
| 89 |
+
},
|
| 90 |
+
{
|
| 91 |
+
"role": "<|Assistant|>",
|
| 92 |
+
"content": assistant_reply,
|
| 93 |
+
},
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
for fruit_cn in os.listdir(root_folder):
|
| 97 |
+
fruit_path = os.path.join(root_folder, fruit_cn)
|
| 98 |
+
if not os.path.isdir(fruit_path):
|
| 99 |
+
continue
|
| 100 |
+
|
| 101 |
+
for stage_cn in os.listdir(fruit_path):
|
| 102 |
+
stage_path = os.path.join(fruit_path, stage_cn)
|
| 103 |
+
if not os.path.isdir(stage_path):
|
| 104 |
+
continue
|
| 105 |
+
|
| 106 |
+
img_files = [f for f in os.listdir(stage_path)
|
| 107 |
+
if f.lower().endswith((".png", ".jpg", ".jpeg", ".bmp"))]
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
example_1shot = build_stage_example(fruit_path, fruit_cn, stage_cn)
|
| 112 |
+
if not example_1shot:
|
| 113 |
+
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
output_file = os.path.join(output_root, f"{fruit_cn}_{stage_cn}.txt")
|
| 117 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 118 |
+
for img_name in tqdm(img_files, desc=f"{fruit_cn}/{stage_cn}"):
|
| 119 |
+
img_path = os.path.join(stage_path, img_name)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if any(img_path in m.get("images", []) for m in example_1shot):
|
| 123 |
+
continue
|
| 124 |
+
|
| 125 |
+
conversation = example_1shot + [
|
| 126 |
+
{
|
| 127 |
+
"role": "<|User|>",
|
| 128 |
+
"content": f"<image_placeholder>\n{question}",
|
| 129 |
+
"images": [img_path],
|
| 130 |
+
},
|
| 131 |
+
{"role": "<|Assistant|>", "content": ""},
|
| 132 |
+
]
|
| 133 |
+
|
| 134 |
+
try:
|
| 135 |
+
with torch.no_grad():
|
| 136 |
+
pil_imgs = load_pil_images(conversation)
|
| 137 |
+
prep_in = vl_chat_processor(
|
| 138 |
+
conversations=conversation,
|
| 139 |
+
images=pil_imgs,
|
| 140 |
+
force_batchify=True
|
| 141 |
+
).to(vl_gpt.device)
|
| 142 |
+
|
| 143 |
+
seq_len = len(prep_in.input_ids[0])
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
tail_text = tokenizer.decode(prep_in.input_ids[0][-120:])
|
| 147 |
+
|
| 148 |
+
del pil_imgs
|
| 149 |
+
|
| 150 |
+
embeds = vl_gpt.prepare_inputs_embeds(**prep_in)
|
| 151 |
+
outputs = vl_gpt.language_model.generate(
|
| 152 |
+
inputs_embeds=embeds,
|
| 153 |
+
attention_mask=prep_in.attention_mask,
|
| 154 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 155 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 156 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 157 |
+
max_new_tokens=256,
|
| 158 |
+
do_sample=False,
|
| 159 |
+
use_cache=True,
|
| 160 |
+
)
|
| 161 |
+
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
print(f"\n🖼️ {img_name}\n{answer.strip()}")
|
| 165 |
+
fout.write("="*25 + " IMAGE START " + "="*25 + "\n")
|
| 166 |
+
fout.write(f"🖼️ Image Name: {img_name}\n")
|
| 167 |
+
fout.write(f"📝 Answer:\n{answer.strip()}\n")
|
| 168 |
+
fout.write("="*25 + " IMAGE END " + "="*25 + "\n\n")
|
| 169 |
+
|
| 170 |
+
except Exception as e:
|
| 171 |
+
print(f"[ERROR] {fruit_cn}/{stage_cn}/{img_name}: {e}")
|
| 172 |
+
fout.write("="*25 + " IMAGE START " + "="*25 + "\n")
|
| 173 |
+
fout.write(f"🖼️ Image Name: {img_name}\n")
|
| 174 |
+
fout.write(f"❌ ERROR: {e}\n")
|
| 175 |
+
fout.write("="*25 + " IMAGE END " + "="*25 + "\n\n")
|
| 176 |
+
|
| 177 |
+
finally:
|
| 178 |
+
for var in ("prep_in", "embeds", "outputs"):
|
| 179 |
+
if var in locals():
|
| 180 |
+
del locals()[var]
|
| 181 |
+
torch.cuda.empty_cache()
|
script/Mantis-8B-Idefics-0-shot.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from transformers import AutoProcessor, AutoModelForVision2Seq
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
model_path = "TIGER-Lab/Mantis-8B-Idefics2"
|
| 9 |
+
processor = AutoProcessor.from_pretrained(model_path, cache_dir=".")
|
| 10 |
+
model = AutoModelForVision2Seq.from_pretrained(
|
| 11 |
+
model_path,
|
| 12 |
+
cache_dir=".",
|
| 13 |
+
device_map="auto"
|
| 14 |
+
)
|
| 15 |
+
model.eval()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
generation_kwargs = {
|
| 19 |
+
"max_new_tokens": 1024,
|
| 20 |
+
"num_beams": 1,
|
| 21 |
+
"do_sample": False
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
query = (
|
| 26 |
+
'''
|
| 27 |
+
1. Identify the type of fruit or crop shown in the image.
|
| 28 |
+
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
|
| 29 |
+
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
|
| 30 |
+
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
|
| 31 |
+
Please respond in the following format:
|
| 32 |
+
Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] Recommendation: [keep for further growth / pick it / try to recover it / discard it] Consumer Score: [1-100]
|
| 33 |
+
'''
|
| 34 |
+
)
|
| 35 |
+
root_folder = "../data/"
|
| 36 |
+
output_root = "result"
|
| 37 |
+
os.makedirs(output_root, exist_ok=True)
|
| 38 |
+
|
| 39 |
+
for fruit in os.listdir(root_folder):
|
| 40 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 41 |
+
if not os.path.isdir(fruit_path):
|
| 42 |
+
continue
|
| 43 |
+
|
| 44 |
+
for subfolder in os.listdir(fruit_path):
|
| 45 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 46 |
+
if not os.path.isdir(subfolder_path):
|
| 47 |
+
continue
|
| 48 |
+
|
| 49 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith((".jpg", ".jpeg", ".png", ".bmp"))]
|
| 50 |
+
if not image_files:
|
| 51 |
+
continue
|
| 52 |
+
|
| 53 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 54 |
+
|
| 55 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 56 |
+
for image_file in tqdm(image_files, desc=f"Processing {fruit}/{subfolder}"):
|
| 57 |
+
try:
|
| 58 |
+
image_path = os.path.join(subfolder_path, image_file)
|
| 59 |
+
image = Image.open(image_path).convert("RGB")
|
| 60 |
+
|
| 61 |
+
messages = [
|
| 62 |
+
{
|
| 63 |
+
"role": "user",
|
| 64 |
+
"content": [
|
| 65 |
+
{"type": "image"},
|
| 66 |
+
{"type": "text", "text": query},
|
| 67 |
+
]
|
| 68 |
+
}
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
|
| 73 |
+
inputs = processor(text=prompt, images=[image], return_tensors="pt")
|
| 74 |
+
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
| 75 |
+
|
| 76 |
+
with torch.no_grad():
|
| 77 |
+
generated_ids = model.generate(**inputs, **generation_kwargs)
|
| 78 |
+
response = processor.batch_decode(
|
| 79 |
+
generated_ids[:, inputs["input_ids"].shape[1]:],
|
| 80 |
+
skip_special_tokens=True
|
| 81 |
+
)[0].strip()
|
| 82 |
+
|
| 83 |
+
fout.write(f"{'='*25} IMAGE START {'='*25}\n")
|
| 84 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 85 |
+
fout.write(f"📝 Answer:\n{response}\n")
|
| 86 |
+
fout.write(f"{'='*25} IMAGE END {'='*25}\n\n")
|
| 87 |
+
|
| 88 |
+
except Exception as e:
|
| 89 |
+
print(f"[ERROR] {fruit}/{subfolder}/{image_file}: {e}")
|
| 90 |
+
fout.write(f"{'='*25} IMAGE START {'='*25}\n")
|
| 91 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 92 |
+
fout.write(f"❌ ERROR: {e}\n")
|
| 93 |
+
fout.write(f"{'='*25} IMAGE END {'='*25}\n\n")
|
script/Mantis-8B-siglip-0-shot.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from mantis.models.mllava import chat_mllava, MLlavaProcessor, LlavaForConditionalGeneration
|
| 4 |
+
|
| 5 |
+
#https://huggingface.co/TIGER-Lab/Mantis-8B-siglip-llama3
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
model_path = "TIGER-Lab/Mantis-8B-siglip-llama3"
|
| 9 |
+
processor = MLlavaProcessor.from_pretrained(model_path, cache_dir=".")
|
| 10 |
+
model = LlavaForConditionalGeneration.from_pretrained(
|
| 11 |
+
model_path,
|
| 12 |
+
cache_dir=".",
|
| 13 |
+
device_map="cuda",
|
| 14 |
+
torch_dtype="bfloat16",
|
| 15 |
+
attn_implementation=None # or "flash_attention_2"
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
generation_kwargs = {
|
| 20 |
+
"max_new_tokens": 1024,
|
| 21 |
+
"num_beams": 1,
|
| 22 |
+
"do_sample": False
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
question = (
|
| 26 |
+
'''
|
| 27 |
+
1. Identify the type of fruit or crop shown in the image.
|
| 28 |
+
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
|
| 29 |
+
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
|
| 30 |
+
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
|
| 31 |
+
Please respond in the following format:
|
| 32 |
+
Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] Recommendation: [keep for further growth / pick it /try to recover it / discard it] Consumer Score: [1-100]
|
| 33 |
+
'''
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
root_folder = "../data/"
|
| 37 |
+
output_root = "result"
|
| 38 |
+
os.makedirs(output_root, exist_ok=True)
|
| 39 |
+
|
| 40 |
+
for fruit in os.listdir(root_folder):
|
| 41 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 42 |
+
if not os.path.isdir(fruit_path):
|
| 43 |
+
continue
|
| 44 |
+
|
| 45 |
+
for subfolder in os.listdir(fruit_path):
|
| 46 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 47 |
+
if not os.path.isdir(subfolder_path):
|
| 48 |
+
continue
|
| 49 |
+
|
| 50 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))]
|
| 51 |
+
if not image_files:
|
| 52 |
+
continue
|
| 53 |
+
|
| 54 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 55 |
+
|
| 56 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 57 |
+
for image_file in image_files:
|
| 58 |
+
image_path = os.path.join(subfolder_path, image_file)
|
| 59 |
+
try:
|
| 60 |
+
image = Image.open(image_path).convert("RGB")
|
| 61 |
+
|
| 62 |
+
response, history = chat_mllava(
|
| 63 |
+
question, [image], model, processor, **generation_kwargs
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
print(f"{image_file} ✅ -> {response}")
|
| 67 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 68 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 69 |
+
fout.write(f"📝 Answer:\n{response.strip()}\n")
|
| 70 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 71 |
+
|
| 72 |
+
except Exception as e:
|
| 73 |
+
print(f"[ERROR] {fruit}/{subfolder}/{image_file} -> {e}")
|
| 74 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 75 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 76 |
+
fout.write(f"❌ ERROR:\n{str(e)}\n")
|
| 77 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
script/Mantis-8B-siglip-1-shot.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gc,torch
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from mantis.models.mllava import chat_mllava, MLlavaProcessor, LlavaForConditionalGeneration
|
| 5 |
+
|
| 6 |
+
chinese_to_english = {
|
| 7 |
+
'strawberry', 'tomato', 'guava', 'dragon fruit',
|
| 8 |
+
'orange', 'pear', 'lychee', 'mango',
|
| 9 |
+
'kiwi', 'papaya', 'apple', 'grape',
|
| 10 |
+
'pomegranate','peach', 'banana', 'pomelo'
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
stage_to_english = {
|
| 14 |
+
'unripe', 'mature', 'pest-damaged', 'rotten'
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
recommendation_map = {
|
| 18 |
+
'unripe': 'keep for further growth',
|
| 19 |
+
'mature': 'picking it',
|
| 20 |
+
'pest-damaged': 'try to recover it',
|
| 21 |
+
'rotten': 'discard it'
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
score_map = {
|
| 25 |
+
'unripe': 30, 'mature': 85, 'pest-damaged': 20, 'rotten': 5
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
model_path = "TIGER-Lab/Mantis-8B-siglip-llama3"
|
| 30 |
+
processor = MLlavaProcessor.from_pretrained(model_path, cache_dir=".")
|
| 31 |
+
model = LlavaForConditionalGeneration.from_pretrained(
|
| 32 |
+
model_path,
|
| 33 |
+
cache_dir=".",
|
| 34 |
+
device_map="cuda",
|
| 35 |
+
torch_dtype=torch.bfloat16,
|
| 36 |
+
attn_implementation=None # or "flash_attention_2"
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
generation_kwargs = {
|
| 42 |
+
"max_new_tokens": 1024,
|
| 43 |
+
"num_beams": 1,
|
| 44 |
+
"do_sample": False
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
base_question = (
|
| 48 |
+
"1. Identify the type of fruit or crop shown in the image. \n"
|
| 49 |
+
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) \n"
|
| 50 |
+
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) \n"
|
| 51 |
+
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).\n"
|
| 52 |
+
"Please respond in the following format:\n"
|
| 53 |
+
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
|
| 54 |
+
"Recommendation: [keep for further growth / try to recover it / discard it] Consumer Score: [1-100]"
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
root_folder = "../data/"
|
| 58 |
+
output_root = "result_1shot_mantis"
|
| 59 |
+
os.makedirs(output_root, exist_ok=True)
|
| 60 |
+
|
| 61 |
+
for fruit in os.listdir(root_folder):
|
| 62 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 63 |
+
if not os.path.isdir(fruit_path):
|
| 64 |
+
continue
|
| 65 |
+
|
| 66 |
+
fruit_en = chinese_to_english.get(fruit, fruit)
|
| 67 |
+
|
| 68 |
+
for stage_cn in os.listdir(fruit_path):
|
| 69 |
+
stage_path = os.path.join(fruit_path, stage_cn)
|
| 70 |
+
if not os.path.isdir(stage_path):
|
| 71 |
+
continue
|
| 72 |
+
|
| 73 |
+
stage_en = stage_to_english.get(stage_cn, 'mature')
|
| 74 |
+
recomm = recommendation_map.get(stage_en, 'picking it')
|
| 75 |
+
score = score_map.get(stage_en, 85)
|
| 76 |
+
|
| 77 |
+
example_answer = (
|
| 78 |
+
f"Type: {fruit_en} Growth Stage: {stage_en} "
|
| 79 |
+
f"Recommendation: {recomm} Consumer Score: {score}"
|
| 80 |
+
)
|
| 81 |
+
print(f"🍏 {stage_path} -> -> {example_answer}")
|
| 82 |
+
image_files = sorted([f for f in os.listdir(stage_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))])
|
| 83 |
+
|
| 84 |
+
sample_file = image_files[0]
|
| 85 |
+
sample_path = os.path.join(stage_path, sample_file)
|
| 86 |
+
sample_image = Image.open(sample_path).convert("RGB")
|
| 87 |
+
|
| 88 |
+
output_file = os.path.join(output_root, f"{fruit}_{stage_cn}.txt")
|
| 89 |
+
|
| 90 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 91 |
+
for image_file in image_files:
|
| 92 |
+
if image_file == sample_file:
|
| 93 |
+
continue
|
| 94 |
+
|
| 95 |
+
image_path = os.path.join(stage_path, image_file)
|
| 96 |
+
try:
|
| 97 |
+
image = Image.open(image_path).convert("RGB")
|
| 98 |
+
|
| 99 |
+
full_prompt = (
|
| 100 |
+
f"<image>\n1. Identify the fruit.\n2. Current growth stage.\n3. Recommended action.\n4. Consumer willingness score.\n"
|
| 101 |
+
f"{example_answer}\n<image>\n{base_question}"
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
response, _ = chat_mllava(full_prompt, [sample_image, image], model, processor, **generation_kwargs)
|
| 105 |
+
|
| 106 |
+
print(f"✅ {image_file} => {response.splitlines()[0] if response else 'N/A'}")
|
| 107 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 108 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 109 |
+
fout.write(f"📝 Answer:\n{response.strip()}\n")
|
| 110 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 111 |
+
|
| 112 |
+
except Exception as e:
|
| 113 |
+
print(f"[ERROR] {fruit}/{stage_cn}/{image_file} -> {e}")
|
| 114 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 115 |
+
fout.write(f"🖼️ Image Name: {image_file}\n")
|
| 116 |
+
fout.write(f"❌ ERROR:\n{str(e)}\n")
|
| 117 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 118 |
+
|
| 119 |
+
finally:
|
| 120 |
+
gc.collect()
|
script/MiniCPM-0-2.6-8B-0-shot.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from transformers import AutoModel, AutoTokenizer
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
|
| 7 |
+
MODEL_PATH = 'openbmb/MiniCPM-o-2_6'
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
model = AutoModel.from_pretrained(
|
| 11 |
+
MODEL_PATH,
|
| 12 |
+
trust_remote_code=True,
|
| 13 |
+
cache_dir=".",
|
| 14 |
+
attn_implementation='sdpa',
|
| 15 |
+
torch_dtype=torch.bfloat16,
|
| 16 |
+
init_vision=True,
|
| 17 |
+
init_audio=True,
|
| 18 |
+
init_tts=True
|
| 19 |
+
).eval().cuda()
|
| 20 |
+
|
| 21 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, cache_dir=".",trust_remote_code=True)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
model.init_tts()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
question = (
|
| 28 |
+
'''You are an agricultural expert. Analyze the image and answer the following questions:
|
| 29 |
+
|
| 30 |
+
1. Identify the type of fruit or crop shown in the image.
|
| 31 |
+
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
|
| 32 |
+
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
|
| 33 |
+
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
|
| 34 |
+
|
| 35 |
+
Please respond in the following format, and do not include explanations:
|
| 36 |
+
|
| 37 |
+
- Type: [Fruit/Crop Name]
|
| 38 |
+
- Growth Stage: [unripe / mature / pest-damaged / rotten]
|
| 39 |
+
- Recommendation: [keep for further growth / pick it / try to recover it / discard it]
|
| 40 |
+
- Consumer Score: [1-100]
|
| 41 |
+
'''
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
image_root = "../data"
|
| 45 |
+
output_root = "result"
|
| 46 |
+
os.makedirs(output_root, exist_ok=True)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
for fruit in os.listdir(image_root):
|
| 50 |
+
fruit_path = os.path.join(image_root, fruit)
|
| 51 |
+
if not os.path.isdir(fruit_path):
|
| 52 |
+
continue
|
| 53 |
+
|
| 54 |
+
for subfolder in os.listdir(fruit_path):
|
| 55 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 56 |
+
if not os.path.isdir(subfolder_path):
|
| 57 |
+
continue
|
| 58 |
+
|
| 59 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 60 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp', '.webp'))]
|
| 61 |
+
|
| 62 |
+
with open(output_file, "w", encoding="utf-8") as out_file:
|
| 63 |
+
for filename in tqdm(image_files, desc=f"Processing {fruit}/{subfolder}"):
|
| 64 |
+
image_path = os.path.join(subfolder_path, filename)
|
| 65 |
+
try:
|
| 66 |
+
image = Image.open(image_path).convert('RGB')
|
| 67 |
+
msgs = [{'role': 'user', 'content': [image, question]}]
|
| 68 |
+
|
| 69 |
+
response = model.chat(
|
| 70 |
+
msgs=msgs,
|
| 71 |
+
tokenizer=tokenizer
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
out_file.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 75 |
+
out_file.write(f"🖼️ Image Name: {filename}\n\n")
|
| 76 |
+
out_file.write("📝 Answer:\n" + response.strip() + "\n")
|
| 77 |
+
out_file.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 78 |
+
except Exception as e:
|
| 79 |
+
out_file.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 80 |
+
out_file.write(f"🖼️ Image Name: {filename}\n")
|
| 81 |
+
out_file.write("❌ ERROR:\n" + str(e) + "\n")
|
| 82 |
+
out_file.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 83 |
+
print(f"[ERROR] {fruit}/{subfolder}/{filename}: {e}")
|
script/Qwen2.5-VL-3B-0-shot.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
| 5 |
+
from qwen_vl_utils import process_vision_info
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 10 |
+
"Qwen/Qwen2.5-VL-3B-Instruct", torch_dtype=torch.bfloat16, device_map="auto", cache_dir="."
|
| 11 |
+
).eval()
|
| 12 |
+
|
| 13 |
+
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-3B-Instruct", cache_dir=".")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
question = '''
|
| 17 |
+
You are an agricultural expert. Analyze the image and answer the following questions:
|
| 18 |
+
1. Identify the type of fruit or crop shown in the image.
|
| 19 |
+
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
|
| 20 |
+
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
|
| 21 |
+
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
|
| 22 |
+
|
| 23 |
+
Please respond in the following format, and do not include explanations:
|
| 24 |
+
|
| 25 |
+
- Type: [Fruit/Crop Name]
|
| 26 |
+
- Growth Stage: [unripe / mature / pest-damaged / rotten]
|
| 27 |
+
- Recommendation: [keep for further growth / try to recover it / discard it]
|
| 28 |
+
- Consumer Score: [1-100]
|
| 29 |
+
'''
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
root_folder = "../data"
|
| 33 |
+
output_root = "result"
|
| 34 |
+
os.makedirs(output_root, exist_ok=True)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
for fruit in os.listdir(root_folder):
|
| 38 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 39 |
+
if not os.path.isdir(fruit_path): continue
|
| 40 |
+
|
| 41 |
+
for subfolder in os.listdir(fruit_path):
|
| 42 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 43 |
+
if not os.path.isdir(subfolder_path): continue
|
| 44 |
+
|
| 45 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))]
|
| 46 |
+
if not image_files: continue
|
| 47 |
+
|
| 48 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 49 |
+
|
| 50 |
+
with open(output_file, "w", encoding="utf-8") as fout:
|
| 51 |
+
for img_file in tqdm(image_files, desc=f"{fruit}/{subfolder}"):
|
| 52 |
+
try:
|
| 53 |
+
image_path = os.path.join(subfolder_path, img_file)
|
| 54 |
+
messages = [
|
| 55 |
+
{
|
| 56 |
+
"role": "user",
|
| 57 |
+
"content": [
|
| 58 |
+
{"type": "image", "image": image_path},
|
| 59 |
+
{"type": "text", "text": question},
|
| 60 |
+
]
|
| 61 |
+
}
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 66 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
| 67 |
+
inputs = processor(
|
| 68 |
+
text=[text],
|
| 69 |
+
images=image_inputs,
|
| 70 |
+
videos=video_inputs,
|
| 71 |
+
padding=True,
|
| 72 |
+
return_tensors="pt",
|
| 73 |
+
).to("cuda")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
with torch.no_grad():
|
| 77 |
+
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
| 78 |
+
generated_ids_trimmed = [
|
| 79 |
+
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
| 80 |
+
]
|
| 81 |
+
output_text = processor.batch_decode(
|
| 82 |
+
generated_ids_trimmed,
|
| 83 |
+
skip_special_tokens=True,
|
| 84 |
+
clean_up_tokenization_spaces=False
|
| 85 |
+
)[0].strip()
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 89 |
+
fout.write(f"🖼️ Image Name: {img_file}\n")
|
| 90 |
+
fout.write(f"📝 Answer:\n{output_text}\n")
|
| 91 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 92 |
+
except Exception as e:
|
| 93 |
+
print(f"[ERROR] {fruit}/{subfolder}/{img_file}: {e}")
|
| 94 |
+
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 95 |
+
fout.write(f"🖼️ Image Name: {img_file}\n")
|
| 96 |
+
fout.write(f"❌ ERROR: {e}\n")
|
| 97 |
+
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
script/Qwen2.5-VL-7B-0-shot.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
|
| 3 |
+
from qwen_vl_utils import process_vision_info
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
# https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 10 |
+
"Qwen/Qwen2.5-VL-7B-Instruct",
|
| 11 |
+
cache_dir=".",
|
| 12 |
+
torch_dtype="auto",
|
| 13 |
+
device_map="auto"
|
| 14 |
+
)
|
| 15 |
+
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", cache_dir=".")
|
| 16 |
+
|
| 17 |
+
question_text = (
|
| 18 |
+
'''
|
| 19 |
+
"1. Identify the type of fruit or crop shown in the image. \n"
|
| 20 |
+
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) \n"
|
| 21 |
+
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) \n"
|
| 22 |
+
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).\n"
|
| 23 |
+
"Please respond in the following format:\n"
|
| 24 |
+
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
|
| 25 |
+
"Recommendation: [keep for further growth / try to recover it / discard it] Consumer Score: [1-100]"
|
| 26 |
+
''')
|
| 27 |
+
|
| 28 |
+
root_folder = "../data"
|
| 29 |
+
output_root = "result"
|
| 30 |
+
os.makedirs(output_root, exist_ok=True)
|
| 31 |
+
|
| 32 |
+
for fruit in os.listdir(root_folder):
|
| 33 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 34 |
+
if not os.path.isdir(fruit_path):
|
| 35 |
+
continue
|
| 36 |
+
|
| 37 |
+
for subfolder in os.listdir(fruit_path):
|
| 38 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 39 |
+
if not os.path.isdir(subfolder_path):
|
| 40 |
+
continue
|
| 41 |
+
|
| 42 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp'))]
|
| 43 |
+
if not image_files:
|
| 44 |
+
continue
|
| 45 |
+
|
| 46 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 47 |
+
|
| 48 |
+
with open(output_file, "w", encoding="utf-8") as f_out:
|
| 49 |
+
for image_file in image_files:
|
| 50 |
+
image_path = os.path.join(subfolder_path, image_file)
|
| 51 |
+
try:
|
| 52 |
+
messages = [
|
| 53 |
+
{
|
| 54 |
+
"role": "user",
|
| 55 |
+
"content": [
|
| 56 |
+
{"type": "image", "image": image_path},
|
| 57 |
+
{"type": "text", "text": question_text},
|
| 58 |
+
],
|
| 59 |
+
}
|
| 60 |
+
]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 64 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
| 65 |
+
inputs = processor(
|
| 66 |
+
text=[text],
|
| 67 |
+
images=image_inputs,
|
| 68 |
+
videos=video_inputs,
|
| 69 |
+
padding=True,
|
| 70 |
+
return_tensors="pt"
|
| 71 |
+
).to("cuda")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
| 75 |
+
generated_ids_trimmed = [
|
| 76 |
+
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
| 77 |
+
]
|
| 78 |
+
output_text = processor.batch_decode(
|
| 79 |
+
generated_ids_trimmed,
|
| 80 |
+
skip_special_tokens=True,
|
| 81 |
+
clean_up_tokenization_spaces=False
|
| 82 |
+
)[0]
|
| 83 |
+
|
| 84 |
+
print(f"{fruit}/{subfolder}/{image_file} -> {output_text}")
|
| 85 |
+
f_out.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 86 |
+
f_out.write(f"🖼️ Image Name: {image_file}\n")
|
| 87 |
+
f_out.write(f"📝 Answer:\n{output_text}\n")
|
| 88 |
+
f_out.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 89 |
+
|
| 90 |
+
except Exception as e:
|
| 91 |
+
print(f"[ERROR] {fruit}/{subfolder}/{image_file} 出错:{e}")
|
| 92 |
+
f_out.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 93 |
+
f_out.write(f"🖼️ Image Name: {image_file}\n")
|
| 94 |
+
f_out.write(f"❌ ERROR:\n{e}\n")
|
| 95 |
+
f_out.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
script/Qwen2.5-VL-7B-1-shot.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
|
| 4 |
+
from qwen_vl_utils import process_vision_info
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
chinese_to_english = {
|
| 8 |
+
'strawberry', 'tomato', 'guava', 'dragon fruit',
|
| 9 |
+
'orange', 'pear', 'lychee', 'mango',
|
| 10 |
+
'kiwi', 'papaya', 'apple', 'grape',
|
| 11 |
+
'pomegranate','peach', 'banana', 'pomelo'
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
stage_to_english = {
|
| 15 |
+
'unripe', 'mature', 'pest-damaged', 'rotten'
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
recommendation_map = {
|
| 19 |
+
'unripe': 'keep for further growth',
|
| 20 |
+
'mature': 'picking it',
|
| 21 |
+
'pest-damaged': 'try to recover it',
|
| 22 |
+
'rotten': 'discard it'
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
score_map = {
|
| 26 |
+
'unripe': 30, 'mature': 85, 'pest-damaged': 20, 'rotten': 5
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
| 30 |
+
"Qwen/Qwen2.5-VL-7B-Instruct",
|
| 31 |
+
cache_dir=".",
|
| 32 |
+
torch_dtype=torch.float16,
|
| 33 |
+
device_map="auto"
|
| 34 |
+
)
|
| 35 |
+
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", cache_dir=".")
|
| 36 |
+
|
| 37 |
+
base_question = (
|
| 38 |
+
"1. Identify the type of fruit or crop shown in the image. \n"
|
| 39 |
+
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) \n"
|
| 40 |
+
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) \n"
|
| 41 |
+
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).\n"
|
| 42 |
+
"Please respond in the following format:\n"
|
| 43 |
+
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
|
| 44 |
+
"Recommendation: [keep for further growth /pick it / try to recover it / discard it] Consumer Score: [1-100]"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
root_folder = "../data/"
|
| 48 |
+
output_root = "result_1shot_qwen"
|
| 49 |
+
os.makedirs(output_root, exist_ok=True)
|
| 50 |
+
|
| 51 |
+
for fruit in os.listdir(root_folder):
|
| 52 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 53 |
+
if not os.path.isdir(fruit_path):
|
| 54 |
+
continue
|
| 55 |
+
|
| 56 |
+
fruit_en = chinese_to_english.get(fruit, fruit)
|
| 57 |
+
|
| 58 |
+
for stage_cn in os.listdir(fruit_path):
|
| 59 |
+
stage_path = os.path.join(fruit_path, stage_cn)
|
| 60 |
+
if not os.path.isdir(stage_path):
|
| 61 |
+
continue
|
| 62 |
+
|
| 63 |
+
stage_en = stage_to_english.get(stage_cn, 'mature')
|
| 64 |
+
recomm = recommendation_map.get(stage_en, 'picking it')
|
| 65 |
+
score = score_map.get(stage_en, 85)
|
| 66 |
+
example_answer = f"Type: {fruit_en} Growth Stage: {stage_en} Recommendation: {recomm} Consumer Score: {score}"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
image_files = sorted([f for f in os.listdir(stage_path) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp'))])
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
sample_file = image_files[0]
|
| 75 |
+
sample_path = os.path.join(stage_path, sample_file)
|
| 76 |
+
|
| 77 |
+
output_file = os.path.join(output_root, f"{fruit}_{stage_cn}.txt")
|
| 78 |
+
|
| 79 |
+
with open(output_file, "w", encoding="utf-8") as f_out:
|
| 80 |
+
for image_file in image_files:
|
| 81 |
+
if image_file == sample_file:
|
| 82 |
+
continue
|
| 83 |
+
|
| 84 |
+
image_path = os.path.join(stage_path, image_file)
|
| 85 |
+
try:
|
| 86 |
+
messages = [
|
| 87 |
+
{
|
| 88 |
+
"role": "user",
|
| 89 |
+
"content": [
|
| 90 |
+
{"type": "image", "image": sample_path},
|
| 91 |
+
{"type": "text", "text": "<1-shot example>\n" + example_answer},
|
| 92 |
+
{"type": "image", "image": image_path},
|
| 93 |
+
{"type": "text", "text": base_question},
|
| 94 |
+
],
|
| 95 |
+
}
|
| 96 |
+
]
|
| 97 |
+
|
| 98 |
+
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 99 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
| 100 |
+
inputs = processor(
|
| 101 |
+
text=[text], images=image_inputs, videos=video_inputs,
|
| 102 |
+
padding=True, return_tensors="pt"
|
| 103 |
+
).to(model.device)
|
| 104 |
+
|
| 105 |
+
generated_ids = model.generate(**inputs, max_new_tokens=256)
|
| 106 |
+
generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
|
| 107 |
+
output_text = processor.batch_decode(
|
| 108 |
+
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
| 109 |
+
)[0]
|
| 110 |
+
|
| 111 |
+
print(f"✅ {image_file} => {output_text.splitlines()[0] if output_text else 'N/A'}")
|
| 112 |
+
f_out.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 113 |
+
f_out.write(f"🖼️ Image Name: {image_file}\n")
|
| 114 |
+
f_out.write(f"📝 Answer:\n{output_text}\n")
|
| 115 |
+
f_out.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 116 |
+
|
| 117 |
+
except Exception as e:
|
| 118 |
+
print(f"[ERROR] {fruit}/{stage_cn}/{image_file} 出错:{e}")
|
| 119 |
+
f_out.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 120 |
+
f_out.write(f"🖼️ Image Name: {image_file}\n")
|
| 121 |
+
f_out.write(f"❌ ERROR:\n{e}\n")
|
| 122 |
+
f_out.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
script/mPLUG-Owl3-7B-0-shot.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from modelscope import AutoConfig, AutoModel, AutoTokenizer
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
model_path = 'iic/mPLUG-Owl3-7B-241101'
|
| 8 |
+
device = "cuda"
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
config = AutoConfig.from_pretrained(model_path, cache_dir=".", trust_remote_code=True)
|
| 12 |
+
print(config)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
model = AutoModel.from_pretrained(
|
| 16 |
+
model_path,
|
| 17 |
+
attn_implementation='flash_attention_2',
|
| 18 |
+
cache_dir=".",
|
| 19 |
+
torch_dtype=torch.bfloat16,
|
| 20 |
+
trust_remote_code=True
|
| 21 |
+
).eval().cuda()
|
| 22 |
+
|
| 23 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 24 |
+
processor = model.init_processor(tokenizer)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
question = """<|image|>
|
| 28 |
+
You are an agricultural expert. Analyze the image and answer the following questions:
|
| 29 |
+
|
| 30 |
+
1. Identify the type of fruit or crop shown in the image.
|
| 31 |
+
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
|
| 32 |
+
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
|
| 33 |
+
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
|
| 34 |
+
|
| 35 |
+
Please respond in the following format, and do not include explanations:
|
| 36 |
+
|
| 37 |
+
- Type: [Fruit/Crop Name]
|
| 38 |
+
- Growth Stage: [unripe / mature / pest-damaged / rotten]
|
| 39 |
+
- Recommendation: [keep for further growth / pick it / try to recover it / discard it]
|
| 40 |
+
- Consumer Score: [1-100]"""
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
root_folder = "../data"
|
| 44 |
+
output_root = "result"
|
| 45 |
+
os.makedirs(output_root, exist_ok=True)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
for fruit in os.listdir(root_folder):
|
| 49 |
+
fruit_path = os.path.join(root_folder, fruit)
|
| 50 |
+
if not os.path.isdir(fruit_path):
|
| 51 |
+
continue
|
| 52 |
+
|
| 53 |
+
for subfolder in os.listdir(fruit_path):
|
| 54 |
+
subfolder_path = os.path.join(fruit_path, subfolder)
|
| 55 |
+
if not os.path.isdir(subfolder_path):
|
| 56 |
+
continue
|
| 57 |
+
|
| 58 |
+
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))]
|
| 59 |
+
if not image_files:
|
| 60 |
+
continue
|
| 61 |
+
|
| 62 |
+
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
| 66 |
+
for image_file in image_files:
|
| 67 |
+
image_path = os.path.join(subfolder_path, image_file)
|
| 68 |
+
try:
|
| 69 |
+
image = Image.open(image_path).convert('RGB')
|
| 70 |
+
|
| 71 |
+
messages = [
|
| 72 |
+
{"role": "user", "content": question},
|
| 73 |
+
{"role": "assistant", "content": ""}
|
| 74 |
+
]
|
| 75 |
+
|
| 76 |
+
inputs = processor(messages, images=[image], videos=None)
|
| 77 |
+
inputs.to(device)
|
| 78 |
+
inputs.update({
|
| 79 |
+
'tokenizer': tokenizer,
|
| 80 |
+
'max_new_tokens': 100,
|
| 81 |
+
'decode_text': True,
|
| 82 |
+
})
|
| 83 |
+
|
| 84 |
+
output = model.generate(**inputs)
|
| 85 |
+
print(f"{image_file} ✅ -> {output}")
|
| 86 |
+
|
| 87 |
+
f.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 88 |
+
f.write(f"🖼️ Image Name: {image_file}\n")
|
| 89 |
+
f.write(f"📝 Answer:\n{output}\n")
|
| 90 |
+
f.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|
| 91 |
+
|
| 92 |
+
except Exception as e:
|
| 93 |
+
print(f"[ERROR] {fruit}/{subfolder}/{image_file}: {e}")
|
| 94 |
+
f.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
|
| 95 |
+
f.write(f"🖼️ Image Name: {image_file}\n")
|
| 96 |
+
f.write(f"❌ ERROR:\n{e}\n")
|
| 97 |
+
f.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
|