FruitBench / script /Janus-Pro-7B-1-shot.py
TJIET's picture
Upload 18 files
3baa3fd verified
# -*- coding: utf-8 -*-
import os
import gc
import random
import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor
from janus.utils.io import load_pil_images
chinese_to_english = {
'strawberry', 'tomato', 'guava', 'dragon fruit',
'orange', 'pear', 'lychee', 'mango',
'kiwi', 'papaya', 'apple', 'grape',
'pomegranate','peach', 'banana', 'pomelo'
}
stage_to_english = {
'unripe', 'mature', 'pest-damaged', 'rotten'
}
recommendation_map = {
'unripe': 'keep for further growth',
'mature': 'picking it',
'pest-damaged': 'try to recover it',
'rotten': 'discard it'
}
score_map = {
'unripe': 30, 'mature': 85, 'pest-damaged': 20, 'rotten': 5
}
model_path = "deepseek-ai/Janus-Pro-7B"
root_folder = "../data"
output_root = "result_1shot"
os.makedirs(output_root, exist_ok=True)
print("🚀 Loading Janus‑Pro‑7B ...")
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path, cache_dir=".")
tokenizer = vl_chat_processor.tokenizer
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
model_path, cache_dir=".", trust_remote_code=True
)
vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval()
print("✅ Model ready.\n")
question = (
"1. Identify the type of fruit or crop shown in the image. \n"
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) \n"
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it) \n"
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).\n"
"Please respond in the following format:\n"
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] "
"Recommendation: [keep for further growth / pick it / try to recover it / discard it] Consumer Score: [1-100]"
)
def build_stage_example(fruit_dir: str, fruit_cn: str, stage_cn: str):
stage_path = os.path.join(fruit_dir, stage_cn)
imgs = [f for f in os.listdir(stage_path)
if f.lower().endswith((".png", ".jpg", ".jpeg", ".bmp"))]
if not imgs:
return []
img_path = os.path.join(stage_path, random.choice(imgs))
fruit_en = chinese_to_english.get(fruit_cn, fruit_cn)
stage_en = stage_to_english[stage_cn]
assistant_reply = (
f"Type: {fruit_en} Growth Stage: {stage_en} "
f"Recommendation: {recommendation_map[stage_en]} "
f"Consumer Score: {score_map[stage_en]}"
)
return [
{
"role": "<|User|>",
"content": f"<image_placeholder>\n{question}",
"images": [img_path],
},
{
"role": "<|Assistant|>",
"content": assistant_reply,
},
]
for fruit_cn in os.listdir(root_folder):
fruit_path = os.path.join(root_folder, fruit_cn)
if not os.path.isdir(fruit_path):
continue
for stage_cn in os.listdir(fruit_path):
stage_path = os.path.join(fruit_path, stage_cn)
if not os.path.isdir(stage_path):
continue
img_files = [f for f in os.listdir(stage_path)
if f.lower().endswith((".png", ".jpg", ".jpeg", ".bmp"))]
example_1shot = build_stage_example(fruit_path, fruit_cn, stage_cn)
if not example_1shot:
continue
output_file = os.path.join(output_root, f"{fruit_cn}_{stage_cn}.txt")
with open(output_file, "w", encoding="utf-8") as fout:
for img_name in tqdm(img_files, desc=f"{fruit_cn}/{stage_cn}"):
img_path = os.path.join(stage_path, img_name)
if any(img_path in m.get("images", []) for m in example_1shot):
continue
conversation = example_1shot + [
{
"role": "<|User|>",
"content": f"<image_placeholder>\n{question}",
"images": [img_path],
},
{"role": "<|Assistant|>", "content": ""},
]
try:
with torch.no_grad():
pil_imgs = load_pil_images(conversation)
prep_in = vl_chat_processor(
conversations=conversation,
images=pil_imgs,
force_batchify=True
).to(vl_gpt.device)
seq_len = len(prep_in.input_ids[0])
tail_text = tokenizer.decode(prep_in.input_ids[0][-120:])
del pil_imgs
embeds = vl_gpt.prepare_inputs_embeds(**prep_in)
outputs = vl_gpt.language_model.generate(
inputs_embeds=embeds,
attention_mask=prep_in.attention_mask,
pad_token_id=tokenizer.eos_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=256,
do_sample=False,
use_cache=True,
)
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(f"\n🖼️ {img_name}\n{answer.strip()}")
fout.write("="*25 + " IMAGE START " + "="*25 + "\n")
fout.write(f"🖼️ Image Name: {img_name}\n")
fout.write(f"📝 Answer:\n{answer.strip()}\n")
fout.write("="*25 + " IMAGE END " + "="*25 + "\n\n")
except Exception as e:
print(f"[ERROR] {fruit_cn}/{stage_cn}/{img_name}: {e}")
fout.write("="*25 + " IMAGE START " + "="*25 + "\n")
fout.write(f"🖼️ Image Name: {img_name}\n")
fout.write(f"❌ ERROR: {e}\n")
fout.write("="*25 + " IMAGE END " + "="*25 + "\n\n")
finally:
for var in ("prep_in", "embeds", "outputs"):
if var in locals():
del locals()[var]
torch.cuda.empty_cache()