Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| import os | |
| os.environ["DDE_BACKEND"] = "pytorch" | |
| import sys | |
| import json | |
| import time | |
| from datetime import datetime | |
| from materials_config import MATERIALS, params_to_filename | |
| from utils import create_model, train_model, save_model | |
| MODELS_DIR = os.path.join(os.path.dirname(__file__), "models") | |
| RESULTS_FILE = os.path.join(MODELS_DIR, "training_results.json") | |
| def train_single_material(name, true_lam, true_mu, iterations_adam=5000): | |
| print(f"\n{'=' * 60}") | |
| print(f"Training: {name}") | |
| print(f" True λ = {true_lam:.4f}, True μ = {true_mu:.4f}") | |
| print(f"{'=' * 60}") | |
| start_time = time.time() | |
| try: | |
| model, lmbd, mu = create_model(true_lam, true_mu, n_points=5000) | |
| lambda_est, mu_est, losshistory, train_state = train_model( | |
| model, lmbd, mu, iterations_adam=iterations_adam, verbose=True | |
| ) | |
| filename = params_to_filename(true_lam, true_mu) | |
| filepath = os.path.join(MODELS_DIR, filename) | |
| save_model(model, lmbd, mu, filepath) | |
| elapsed = time.time() - start_time | |
| result = { | |
| "name": name, | |
| "true_lambda": true_lam, | |
| "true_mu": true_mu, | |
| "estimated_lambda": lambda_est, | |
| "estimated_mu": mu_est, | |
| "lambda_error_pct": abs(lambda_est - true_lam) / true_lam * 100, | |
| "mu_error_pct": abs(mu_est - true_mu) / true_mu * 100, | |
| "filename": filename, | |
| "training_time_sec": elapsed, | |
| "success": True, | |
| } | |
| print(f"\n Results for {name}:") | |
| print( | |
| f" λ: {true_lam:.4f} -> {lambda_est:.6f} (error: {result['lambda_error_pct']:.2f}%)" | |
| ) | |
| print( | |
| f" μ: {true_mu:.4f} -> {mu_est:.6f} (error: {result['mu_error_pct']:.2f}%)" | |
| ) | |
| print(f" Time: {elapsed:.1f}s") | |
| print(f" Saved: {filepath}") | |
| return result | |
| except Exception as e: | |
| print(f"\n ERROR training {name}: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return { | |
| "name": name, | |
| "true_lambda": true_lam, | |
| "true_mu": true_mu, | |
| "success": False, | |
| "error": str(e), | |
| } | |
| def train_all_materials(materials=None, iterations_adam=5000): | |
| os.makedirs(MODELS_DIR, exist_ok=True) | |
| if materials is None: | |
| materials = MATERIALS | |
| results = [] | |
| total = len(materials) | |
| print(f"\n{'#' * 60}") | |
| print(f"Batch Training - {total} materials") | |
| print(f"Started: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") | |
| print(f"{'#' * 60}") | |
| for i, (name, params) in enumerate(materials.items(), 1): | |
| print(f"\n[{i}/{total}] Processing {name}...") | |
| result = train_single_material( | |
| name, params["lambda"], params["mu"], iterations_adam | |
| ) | |
| result["material_info"] = params | |
| results.append(result) | |
| with open(RESULTS_FILE, "w") as f: | |
| json.dump(results, f, indent=2) | |
| print(f"\n{'#' * 60}") | |
| print("Training Complete!") | |
| print(f"{'#' * 60}") | |
| successful = [r for r in results if r.get("success", False)] | |
| failed = [r for r in results if not r.get("success", False)] | |
| print(f"\nSuccessful: {len(successful)}/{total}") | |
| if failed: | |
| print(f"Failed: {len(failed)}/{total}") | |
| for r in failed: | |
| print(f" - {r['name']}: {r.get('error', 'Unknown error')}") | |
| avg_lambda_err = sum(r["lambda_error_pct"] for r in successful) / len(successful) | |
| avg_mu_err = sum(r["mu_error_pct"] for r in successful) / len(successful) | |
| total_time = sum(r.get("training_time_sec", 0) for r in successful) | |
| print(f"\nAverage Errors:") | |
| print(f" λ: {avg_lambda_err:.2f}%") | |
| print(f" μ: {avg_mu_err:.2f}%") | |
| print(f"Total training time: {total_time / 60:.1f} minutes") | |
| return results | |
| def train_specific_materials(names, iterations_adam=5000): | |
| materials = {n: MATERIALS[n] for n in names if n in MATERIALS} | |
| return train_all_materials(materials, iterations_adam) | |
| if __name__ == "__main__": | |
| import argparse | |
| parser = argparse.ArgumentParser( | |
| description="Train PINN models for material identification" | |
| ) | |
| parser.add_argument( | |
| "--materials", | |
| nargs="*", | |
| default=None, | |
| help="Specific materials to train (default: all)", | |
| ) | |
| parser.add_argument( | |
| "--iterations", type=int, default=5000, help="Adam iterations (default: 5000)" | |
| ) | |
| parser.add_argument( | |
| "--list", action="store_true", help="List available materials and exit" | |
| ) | |
| args = parser.parse_args() | |
| if args.list: | |
| print("Available materials:") | |
| for name, params in MATERIALS.items(): | |
| print(f" {name}: λ={params['lambda']:.3f}, μ={params['mu']:.3f}") | |
| sys.exit(0) | |
| if args.materials: | |
| train_specific_materials(args.materials, args.iterations) | |
| else: | |
| train_all_materials(iterations_adam=args.iterations) | |