FruitBench / script /Qwen2.5-VL-3B-0-shot.py
TJIET's picture
Upload 18 files
3baa3fd verified
import os
import torch
from tqdm import tqdm
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
from PIL import Image
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
"Qwen/Qwen2.5-VL-3B-Instruct", torch_dtype=torch.bfloat16, device_map="auto", cache_dir="."
).eval()
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-3B-Instruct", cache_dir=".")
question = '''
You are an agricultural expert. Analyze the image and answer the following questions:
1. Identify the type of fruit or crop shown in the image.
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
Please respond in the following format, and do not include explanations:
- Type: [Fruit/Crop Name]
- Growth Stage: [unripe / mature / pest-damaged / rotten]
- Recommendation: [keep for further growth / try to recover it / discard it]
- Consumer Score: [1-100]
'''
root_folder = "../data"
output_root = "result"
os.makedirs(output_root, exist_ok=True)
for fruit in os.listdir(root_folder):
fruit_path = os.path.join(root_folder, fruit)
if not os.path.isdir(fruit_path): continue
for subfolder in os.listdir(fruit_path):
subfolder_path = os.path.join(fruit_path, subfolder)
if not os.path.isdir(subfolder_path): continue
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp'))]
if not image_files: continue
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
with open(output_file, "w", encoding="utf-8") as fout:
for img_file in tqdm(image_files, desc=f"{fruit}/{subfolder}"):
try:
image_path = os.path.join(subfolder_path, img_file)
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image_path},
{"type": "text", "text": question},
]
}
]
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
).to("cuda")
with torch.no_grad():
generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed,
skip_special_tokens=True,
clean_up_tokenization_spaces=False
)[0].strip()
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
fout.write(f"🖼️ Image Name: {img_file}\n")
fout.write(f"📝 Answer:\n{output_text}\n")
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")
except Exception as e:
print(f"[ERROR] {fruit}/{subfolder}/{img_file}: {e}")
fout.write(f"{'=' * 25} IMAGE START {'=' * 25}\n")
fout.write(f"🖼️ Image Name: {img_file}\n")
fout.write(f"❌ ERROR: {e}\n")
fout.write(f"{'=' * 25} IMAGE END {'=' * 25}\n\n")