|
|
import os |
|
|
import re |
|
|
import json |
|
|
import base64 |
|
|
import argparse |
|
|
import mimetypes |
|
|
import copy |
|
|
from tqdm import tqdm |
|
|
from openai import OpenAI |
|
|
import traceback |
|
|
|
|
|
|
|
|
from src.solver_bridge import TrussSolver |
|
|
from src.metrics import compute_score |
|
|
from src.data_loader import BenchmarkDataLoader |
|
|
from src.prompts import PROMPT_REGISTRY |
|
|
|
|
|
|
|
|
try: |
|
|
import json_repair |
|
|
|
|
|
JSON_LIB = json_repair |
|
|
except ImportError: |
|
|
import json |
|
|
|
|
|
JSON_LIB = json |
|
|
print( |
|
|
"[Warning] 'json_repair' library not found. Installing it (pip install json_repair) is highly recommended for robust parsing.") |
|
|
|
|
|
|
|
|
|
|
|
def encode_image(image_path): |
|
|
"""将图片文件读取并转换为 Base64 字符串""" |
|
|
if not os.path.exists(image_path): |
|
|
return None |
|
|
mime_type, _ = mimetypes.guess_type(image_path) |
|
|
if not mime_type: |
|
|
mime_type = "image/png" |
|
|
with open(image_path, "rb") as image_file: |
|
|
encoded_string = base64.b64encode(image_file.read()).decode('utf-8') |
|
|
return f"data:{mime_type};base64,{encoded_string}" |
|
|
|
|
|
|
|
|
def extract_json(response_text): |
|
|
"""从模型回复中提取 <json> 或 markdown 内容""" |
|
|
|
|
|
match = re.search(r'<json>(.*?)</json>', response_text, re.DOTALL) |
|
|
if match: return match.group(1).strip() |
|
|
|
|
|
|
|
|
match = re.search(r'<\|begin_of_box\|>(.*?)<\|end_of_box\|>', response_text, re.DOTALL) |
|
|
if match: return match.group(1).strip() |
|
|
|
|
|
|
|
|
match = re.search(r'```json(.*?)```', response_text, re.DOTALL) |
|
|
if match: return match.group(1).strip() |
|
|
|
|
|
|
|
|
match = re.search(r'```(.*?)```', response_text, re.DOTALL) |
|
|
if match: return match.group(1).strip() |
|
|
|
|
|
|
|
|
match = re.search(r'\{.*?\}', response_text, re.DOTALL) |
|
|
if match: return match.group(0).strip() |
|
|
|
|
|
return None |
|
|
|
|
|
|
|
|
def run_chat_completion(client, model_name, messages, temperature=0.2): |
|
|
"""封装 API 调用 (支持流式输出)""" |
|
|
try: |
|
|
print(f"\n[Model Output Start]:") |
|
|
stream = client.chat.completions.create( |
|
|
model=model_name, |
|
|
messages=messages, |
|
|
temperature=temperature, |
|
|
max_tokens=8192, |
|
|
stream=True |
|
|
) |
|
|
|
|
|
full_content = [] |
|
|
for chunk in stream: |
|
|
if chunk.choices: |
|
|
delta = chunk.choices[0].delta.content |
|
|
if delta: |
|
|
print(delta, end="", flush=True) |
|
|
full_content.append(delta) |
|
|
|
|
|
print(f"\n[Model Output End]\n{'-'*40}") |
|
|
return "".join(full_content) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"\n[API Error] {e}") |
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def apply_standard_load(model): |
|
|
""" |
|
|
移除所有原有载荷,给所有杆件施加世界坐标向下的均布载荷 |
|
|
""" |
|
|
model["loads"] = [] |
|
|
links = model.get("links", []) |
|
|
for link in links: |
|
|
model["loads"].append({ |
|
|
"id": f"TEST_LD_{link['id']}", |
|
|
"kind": "distributedLoad", |
|
|
"at": {"type": "link", "id": link["id"]}, |
|
|
"wStart": 10, |
|
|
"wEnd": 10, |
|
|
"angleDeg": 270, |
|
|
"angleMode": "global" |
|
|
}) |
|
|
return model |
|
|
|
|
|
def apply_uniform_material_and_rigid_joints(model): |
|
|
""" |
|
|
统一材质截面,并将所有连接设为刚接 |
|
|
""" |
|
|
for link in model.get("links", []): |
|
|
link["E"] = 200e9 |
|
|
link["A"] = 0.01 |
|
|
link["Iz"] = 0.0001 |
|
|
link["density"] = 7850 |
|
|
|
|
|
link["endA"] = "rigid" |
|
|
link["endB"] = "rigid" |
|
|
return model |
|
|
|
|
|
def solve_and_compare_reactions(solver, model_ai, model_gt): |
|
|
""" |
|
|
求解两个模型并对比支座反力 |
|
|
返回: True (match) / False (mismatch) |
|
|
""" |
|
|
sol_ai, err_ai = solver.solve(model_ai) |
|
|
sol_gt, err_gt = solver.solve(model_gt) |
|
|
|
|
|
if err_ai or err_gt or not sol_ai or not sol_gt: |
|
|
return False |
|
|
|
|
|
|
|
|
|
|
|
score, details = compute_score(sol_ai, {"reactions": sol_gt["reactions"], "max_moment": 0}, tolerance=0.05) |
|
|
|
|
|
|
|
|
return details.get("reactions_match", False) |
|
|
|
|
|
|
|
|
def diagnose_failure(solver, ai_json, gt_json): |
|
|
""" |
|
|
执行三步诊断逻辑 |
|
|
返回: (partial_score, feedback_message) |
|
|
""" |
|
|
|
|
|
ai_base = copy.deepcopy(ai_json) |
|
|
gt_base = copy.deepcopy(gt_json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def modify_supports_to_fixed(model): |
|
|
for sup in model.get("supports", []): |
|
|
sup["kind"] = "fixed" |
|
|
sup["angleDeg"] = 0 |
|
|
return model |
|
|
|
|
|
ai_s1 = apply_standard_load(modify_supports_to_fixed(apply_uniform_material_and_rigid_joints(copy.deepcopy(ai_base)))) |
|
|
gt_s1 = apply_standard_load(modify_supports_to_fixed(apply_uniform_material_and_rigid_joints(copy.deepcopy(gt_base)))) |
|
|
|
|
|
if not solve_and_compare_reactions(solver, ai_s1, gt_s1): |
|
|
return 0.0, "The geometric structure is incorrect. Please check node coordinates and member connectivity." |
|
|
|
|
|
|
|
|
|
|
|
ai_s2 = apply_standard_load(apply_uniform_material_and_rigid_joints(copy.deepcopy(ai_base))) |
|
|
gt_s2 = apply_standard_load(apply_uniform_material_and_rigid_joints(copy.deepcopy(gt_base))) |
|
|
|
|
|
if not solve_and_compare_reactions(solver, ai_s2, gt_s2): |
|
|
return 0.25, "The geometry is correct, but the boundary conditions (supports) are incorrect. Check support types and locations." |
|
|
|
|
|
|
|
|
|
|
|
def apply_uniform_material_only(model): |
|
|
for link in model.get("links", []): |
|
|
link["E"] = 200e9 |
|
|
link["A"] = 0.01 |
|
|
link["Iz"] = 0.0001 |
|
|
link["density"] = 7850 |
|
|
return model |
|
|
|
|
|
ai_s3 = apply_standard_load(apply_uniform_material_only(copy.deepcopy(ai_base))) |
|
|
gt_s3 = apply_standard_load(apply_uniform_material_only(copy.deepcopy(gt_base))) |
|
|
|
|
|
if solve_and_compare_reactions(solver, ai_s3, gt_s3): |
|
|
|
|
|
return 0.75, "The structure, supports, and connections are correct. Only the applied loads are incorrect." |
|
|
else: |
|
|
|
|
|
return 0.50, "Geometry and supports are correct, but the member connection types (hinge/rigid) are incorrect." |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description="Structural AI Benchmark Evaluator") |
|
|
parser.add_argument("--model", type=str, default="debug-mode", help="Model name") |
|
|
parser.add_argument("--api-base", type=str, default="http://localhost:8000/v1", help="API URL") |
|
|
parser.add_argument("--api-key", type=str, default="EMPTY", help="API Key") |
|
|
parser.add_argument("--limit", type=int, default=0, help="Limit tasks") |
|
|
parser.add_argument("--max-retries", type=int, default=2, help="Max retry attempts") |
|
|
parser.add_argument("--debug", action="store_true", help="Run sanity check using Ground Truth JSON (No AI)") |
|
|
parser.add_argument("--prompt-type", type=str, default="standard", choices=PROMPT_REGISTRY.keys()) |
|
|
parser.add_argument("--filter", type=str, default=None, help="Filter tasks") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
current_system_prompt = PROMPT_REGISTRY.get(args.prompt_type) |
|
|
print(f"Loaded Prompt Template: [{args.prompt_type}]") |
|
|
|
|
|
|
|
|
loader = BenchmarkDataLoader() |
|
|
solver = TrussSolver("bin/framecalc.wasm") |
|
|
client = OpenAI(api_key=args.api_key, base_url=args.api_base) if not args.debug else None |
|
|
|
|
|
|
|
|
tasks = loader.load_tasks_for_eval() |
|
|
if not tasks: return |
|
|
|
|
|
if args.filter: |
|
|
tasks = [t for t in tasks if args.filter in t['id']] |
|
|
if args.limit > 0: |
|
|
tasks = tasks[:args.limit] |
|
|
|
|
|
print(f"Starting evaluation on {len(tasks)} tasks.") |
|
|
results = [] |
|
|
|
|
|
for task in tqdm(tasks, desc="Evaluating"): |
|
|
task_id = task['id'] |
|
|
gt_solution = task['gt_solution'] |
|
|
if isinstance(gt_solution, list) and len(gt_solution) > 0: gt_solution = gt_solution[0] |
|
|
|
|
|
|
|
|
gt_raw_json = loader.load_raw_model_by_id(task_id) |
|
|
|
|
|
best_score = 0 |
|
|
final_details = {} |
|
|
fail_reason = "Unknown" |
|
|
attempts_used = 0 |
|
|
|
|
|
|
|
|
if args.debug: |
|
|
ai_json = gt_raw_json |
|
|
if not ai_json: |
|
|
fail_reason = "GT JSON Missing" |
|
|
else: |
|
|
ai_solution, solver_error = solver.solve(ai_json) |
|
|
if solver_error: |
|
|
fail_reason = f"Physics Solver Crashed: {solver_error}" |
|
|
else: |
|
|
score, details = compute_score(ai_solution, gt_solution) |
|
|
best_score = score |
|
|
final_details = details |
|
|
fail_reason = "Success" if score == 1.0 else "Wrong Answer" |
|
|
|
|
|
|
|
|
else: |
|
|
base64_image = encode_image(task['image_path']) |
|
|
|
|
|
base_messages = [ |
|
|
{"role": "system", "content": current_system_prompt}, |
|
|
{"role": "user", "content": [ |
|
|
{"type": "text", "text": "Analyze the structure in this image and output the JSON definition."}, |
|
|
{"type": "image_url", "image_url": {"url": base64_image}} |
|
|
]} |
|
|
] |
|
|
|
|
|
|
|
|
retry_context = [] |
|
|
|
|
|
for attempt in range(args.max_retries + 1): |
|
|
attempts_used = attempt + 1 |
|
|
current_temp = 0.1 if attempt == 0 else 0.4 |
|
|
|
|
|
|
|
|
messages = base_messages + retry_context |
|
|
|
|
|
print(f"\n[Attempt {attempts_used}] Requesting API...") |
|
|
response_text = run_chat_completion(client, args.model, messages, temperature=current_temp) |
|
|
|
|
|
if not response_text: |
|
|
fail_reason = "API Failure" |
|
|
break |
|
|
|
|
|
json_str = extract_json(response_text) |
|
|
error_feedback = "" |
|
|
|
|
|
if not json_str: |
|
|
error_feedback = "I cannot find valid JSON. Please output standard JSON inside <json> tags." |
|
|
fail_reason = "Parse Error" |
|
|
else: |
|
|
try: |
|
|
ai_json = JSON_LIB.loads(json_str) |
|
|
ai_solution, solver_error = solver.solve(ai_json) |
|
|
|
|
|
if solver_error: |
|
|
error_feedback = f"Solver Error: {solver_error}. Check connectivity." |
|
|
fail_reason = "Solver Crashed" |
|
|
elif not ai_solution: |
|
|
error_feedback = "Unstable structure (empty result)." |
|
|
fail_reason = "Unstable" |
|
|
else: |
|
|
score, details = compute_score(ai_solution, gt_solution) |
|
|
|
|
|
if score == 1.0: |
|
|
best_score = 1.0 |
|
|
final_details = details |
|
|
fail_reason = "Success" |
|
|
break |
|
|
else: |
|
|
|
|
|
fail_reason = "Wrong Answer" |
|
|
final_details = details |
|
|
|
|
|
|
|
|
if gt_raw_json: |
|
|
partial_score, diag_feedback = diagnose_failure(solver, ai_json, gt_raw_json) |
|
|
error_feedback = f"Result incorrect. Diagnostic: {diag_feedback}" |
|
|
|
|
|
|
|
|
if attempt == args.max_retries: |
|
|
best_score = partial_score |
|
|
fail_reason = f"Partial: {diag_feedback}" |
|
|
else: |
|
|
error_feedback = "Result incorrect (Reaction forces mismatch)." |
|
|
|
|
|
except Exception as e: |
|
|
error_feedback = f"JSON Syntax Error: {e}" |
|
|
fail_reason = "Syntax Error" |
|
|
|
|
|
|
|
|
if attempt < args.max_retries and error_feedback: |
|
|
print(f" -> Feedback: {error_feedback}") |
|
|
|
|
|
retry_context = [ |
|
|
{"role": "assistant", "content": response_text}, |
|
|
{"role": "user", "content": f"Error: {error_feedback} Fix the JSON."} |
|
|
] |
|
|
|
|
|
|
|
|
final_score = best_score * task.get("difficulty", 1) |
|
|
|
|
|
results.append({ |
|
|
"id": task_id, |
|
|
"score": final_score, |
|
|
"ratio": best_score, |
|
|
"difficulty": task.get("difficulty", 1), |
|
|
"reason": fail_reason, |
|
|
"attempts_used": attempts_used, |
|
|
"details": final_details |
|
|
}) |
|
|
|
|
|
|
|
|
total_score = sum(r['score'] for r in results) |
|
|
total_possible = sum(r['difficulty'] for r in results) if results else 0 |
|
|
|
|
|
avg_ratio = (sum(r['ratio'] for r in results) / len(results)) * 100 if results else 0 |
|
|
weighted_acc = (total_score / total_possible) * 100 if total_possible else 0 |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print(f"📊 Evaluation Report: {args.model}") |
|
|
print(f"Filter: {args.filter if args.filter else 'None'} | Max Retries: {args.max_retries}") |
|
|
print("-" * 60) |
|
|
print(f"{'Category':<15} | {'Tasks':<8} | {'Score':<10} | {'Max Score':<10} | {'Accuracy':<10}") |
|
|
print("-" * 60) |
|
|
|
|
|
|
|
|
categories = {'beam': [], 'frame': [], 'truss': []} |
|
|
|
|
|
for r in results: |
|
|
|
|
|
cat_key = r['id'].split('_')[0].lower() |
|
|
if cat_key in categories: |
|
|
categories[cat_key].append(r) |
|
|
else: |
|
|
|
|
|
if 'other' not in categories: categories['other'] = [] |
|
|
categories['other'].append(r) |
|
|
|
|
|
|
|
|
for cat, items in categories.items(): |
|
|
if not items: continue |
|
|
|
|
|
c_score = sum(x['score'] for x in items) |
|
|
c_max = sum(x['difficulty'] for x in items) |
|
|
c_acc = (c_score / c_max) * 100 if c_max > 0 else 0 |
|
|
|
|
|
print(f"{cat.capitalize():<15} | {len(items):<8} | {c_score:<10.2f} | {c_max:<10.0f} | {c_acc:<9.2f}%") |
|
|
|
|
|
print("-" * 60) |
|
|
print(f"{'OVERALL':<15} | {len(results):<8} | {total_score:<10.2f} | {total_possible:<10.0f} | {weighted_acc:<9.2f}%") |
|
|
print("=" * 60) |
|
|
|
|
|
output_filename = f"eval_result_{'DEBUG' if args.debug else args.model.replace('/', '_')}.json" |
|
|
with open(output_filename, "w") as f: |
|
|
json.dump(results, f, indent=2) |
|
|
print(f"Results saved to {output_filename}") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |