MolCraftDiffusion-demo / test_inference.py
iflp1908sl's picture
Initial commit with robust CPU inference
970ac6b
import sys
import os
import torch
# Add the space directory to path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
try:
print("Importing app...")
import app
print("\nTesting model loading...")
if app.TASK is None:
print("FAIL: Model failed to load")
sys.exit(1)
else:
print("SUCCESS: Model loaded")
print(f"Model type: {type(app.TASK)}")
print(f"Model device: {app.TASK.device}")
print("\nTesting generation (small batch)...")
# Generate 2 molecules, fixed size 5, 10 steps (fast)
html, zip_path, df = app.generate(
num_molecules=2,
size_mode="Fixed size",
fixed_size=5,
diffusion_steps=10,
seed=123
)
print("\nGeneration output:")
print(f"HTML len: {len(html) if html else 'None'}")
print(f"Zip path: {zip_path}")
print(f"DataFrame:\n{df}")
if html and zip_path and df is not None:
print("\nSUCCESS: Inference pipeline works!")
else:
print("\nFAIL: Generation produced incomplete outputs")
sys.exit(1)
except Exception as e:
print(f"\nCRITICAL FAIL: {e}")
import traceback
traceback.print_exc()
sys.exit(1)