FruitBench / script /Janus-Pro-7B-0-shot.py
TJIET's picture
Upload 18 files
3baa3fd verified
import os
import torch
from transformers import AutoModelForCausalLM
from janus.models import MultiModalityCausalLM, VLChatProcessor
from janus.utils.io import load_pil_images
from tqdm import tqdm
#https://github.com/deepseek-ai/Janus
model_path = "deepseek-ai/Janus-Pro-7B"
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path, cache_dir=".")
tokenizer = vl_chat_processor.tokenizer
vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
model_path, cache_dir=".", trust_remote_code=True
).to(torch.bfloat16).cuda().eval()
question = (
'''
1. Identify the type of fruit or crop shown in the image.
2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten)
3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, discard it)
4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely).
Please respond in the following format:
Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] Recommendation: [keep for further growth / pick it / try to recover it / discard it] Consumer Score: [1-100]
'''
)
root_folder = "../data"
output_root = "result"
os.makedirs(output_root, exist_ok=True)
for fruit in os.listdir(root_folder):
fruit_path = os.path.join(root_folder, fruit)
if not os.path.isdir(fruit_path):
continue
for subfolder in os.listdir(fruit_path):
subfolder_path = os.path.join(fruit_path, subfolder)
if not os.path.isdir(subfolder_path):
continue
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp'))]
if not image_files:
continue
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt")
with open(output_file, "w", encoding="utf-8") as fout:
for image_file in tqdm(image_files, desc=f"{fruit}/{subfolder}"):
try:
image_path = os.path.join(subfolder_path, image_file)
conversation = [
{
"role": "<|User|>",
"content": f"<image_placeholder>\n{question}",
"images": [image_path],
},
{"role": "<|Assistant|>", "content": ""},
]
pil_images = load_pil_images(conversation)
prepare_inputs = vl_chat_processor(
conversations=conversation,
images=pil_images,
force_batchify=True
).to(vl_gpt.device)
inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
outputs = vl_gpt.language_model.generate(
inputs_embeds=inputs_embeds,
attention_mask=prepare_inputs.attention_mask,
pad_token_id=tokenizer.eos_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
max_new_tokens=512,
do_sample=False,
use_cache=True,
)
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
fout.write(f"{'='*25} IMAGE START {'='*25}\n")
fout.write(f"🖼️ Image Name: {image_file}\n")
fout.write(f"📝 Answer:\n{answer.strip()}\n")
fout.write(f"{'='*25} IMAGE END {'='*25}\n\n")
except Exception as e:
print(f"[ERROR] {fruit}/{subfolder}/{image_file}: {e}")
fout.write(f"{'='*25} IMAGE START {'='*25}\n")
fout.write(f"🖼️ Image Name: {image_file}\n")
fout.write(f"❌ ERROR: {e}\n")
fout.write(f"{'='*25} IMAGE END {'='*25}\n\n")