|
|
import os |
|
|
import torch |
|
|
from PIL import Image |
|
|
from modelscope import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
MODEL_PATH = "./cogvlm2-llama3-chat-19B-int4" |
|
|
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
|
TORCH_TYPE = torch.bfloat16 if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8 else torch.float16 |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True) |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_PATH, |
|
|
torch_dtype=TORCH_TYPE, |
|
|
trust_remote_code=True, |
|
|
low_cpu_mem_usage=True, |
|
|
).eval() |
|
|
|
|
|
|
|
|
question = ( |
|
|
''' "1. Identify the type of fruit or crop shown in the image. " |
|
|
"2. Determine its current growth stage. (Options: unripe, mature, pest-damaged, rotten) " |
|
|
"3. Recommend the farmer’s next action. (Options: keep for further growth, try to recover it, picking it, discard it) " |
|
|
"4. Evaluate the consumer’s willingness to consume this fruit, from 1 (very unlikely) to 100 (very likely). " |
|
|
"Please respond in the following format:\n" |
|
|
"Type: [Fruit/Crop Name] Growth Stage: [unripe / mature / pest-damaged / rotten] " |
|
|
"Recommendation: [keep for further growth / try to recover it / picking it / discard it] Consumer Score: [1-100]''' |
|
|
) |
|
|
|
|
|
|
|
|
root_folder = "../data" |
|
|
output_root = "result" |
|
|
os.makedirs(output_root, exist_ok=True) |
|
|
|
|
|
|
|
|
for fruit in os.listdir(root_folder): |
|
|
fruit_path = os.path.join(root_folder, fruit) |
|
|
if not os.path.isdir(fruit_path): |
|
|
continue |
|
|
|
|
|
for subfolder in os.listdir(fruit_path): |
|
|
subfolder_path = os.path.join(fruit_path, subfolder) |
|
|
if not os.path.isdir(subfolder_path): |
|
|
continue |
|
|
|
|
|
image_files = [f for f in os.listdir(subfolder_path) if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp', '.webp'))] |
|
|
if not image_files: |
|
|
continue |
|
|
|
|
|
output_file = os.path.join(output_root, f"{fruit}_{subfolder}.txt") |
|
|
|
|
|
|
|
|
with open(output_file, "w", encoding="utf-8") as out_file: |
|
|
for filename in image_files: |
|
|
image_path = os.path.join(subfolder_path, filename) |
|
|
try: |
|
|
print(f"🖼️ Processing {filename}...") |
|
|
|
|
|
image = Image.open(image_path).convert('RGB') |
|
|
history = [] |
|
|
|
|
|
input_by_model = model.build_conversation_input_ids( |
|
|
tokenizer, |
|
|
query=question, |
|
|
history=history, |
|
|
images=[image], |
|
|
template_version='chat' |
|
|
) |
|
|
|
|
|
inputs = { |
|
|
'input_ids': input_by_model['input_ids'].unsqueeze(0).to(DEVICE), |
|
|
'token_type_ids': input_by_model['token_type_ids'].unsqueeze(0).to(DEVICE), |
|
|
'attention_mask': input_by_model['attention_mask'].unsqueeze(0).to(DEVICE), |
|
|
'images': [[input_by_model['images'][0].to(DEVICE).to(TORCH_TYPE)]], |
|
|
} |
|
|
|
|
|
gen_kwargs = { |
|
|
"max_new_tokens": 2048, |
|
|
"pad_token_id": 128002, |
|
|
} |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model.generate(**inputs, **gen_kwargs) |
|
|
outputs = outputs[:, inputs['input_ids'].shape[1]:] |
|
|
response = tokenizer.decode(outputs[0]) |
|
|
response = response.split("<|end_of_text|>")[0].strip() |
|
|
|
|
|
print(f"✅ Response: {response}\n") |
|
|
out_file.write(f"{'='*25} IMAGE START {'='*25}\n") |
|
|
out_file.write(f"🖼️ Image Name: {filename}\n") |
|
|
out_file.write(f"📝 Answer:\n{response}\n") |
|
|
out_file.write(f"{'='*25} IMAGE END {'='*25}\n\n") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"[ERROR] {fruit}/{subfolder}/{filename}: {e}") |
|
|
out_file.write(f"{'='*25} IMAGE START {'='*25}\n") |
|
|
out_file.write(f"🖼️ Image Name: {filename}\n") |
|
|
out_file.write(f"❌ ERROR: {e}\n") |
|
|
out_file.write(f"{'='*25} IMAGE END {'='*25}\n\n") |
|
|
|