Upload architectures.py with huggingface_hub
Browse files- architectures.py +1496 -0
architectures.py
ADDED
|
@@ -0,0 +1,1496 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import json
|
| 6 |
+
import time
|
| 7 |
+
import pytorch_lightning as pl
|
| 8 |
+
from torch.optim import AdamW
|
| 9 |
+
from torchmetrics import MeanSquaredError, PearsonCorrCoef, SpearmanCorrCoef, R2Score
|
| 10 |
+
|
| 11 |
+
logger = logging.getLogger(__name__)
|
| 12 |
+
|
| 13 |
+
# ===================== VERSION STRING FOR CLUSTER VERIFICATION =====================
|
| 14 |
+
ARCH_VERSION = "2024-12-24-stability-fix"
|
| 15 |
+
print(f"[ARCH] architectures.py loaded: {ARCH_VERSION}")
|
| 16 |
+
# ====================================================================================
|
| 17 |
+
|
| 18 |
+
class Interp1d(torch.autograd.Function):
|
| 19 |
+
@staticmethod
|
| 20 |
+
def forward(ctx, x, y, xnew):
|
| 21 |
+
is_flat = {}
|
| 22 |
+
vals = {'x': x, 'y': y, 'xnew': xnew}
|
| 23 |
+
for name, arr in vals.items():
|
| 24 |
+
is_flat[name] = (arr.dim() == 1)
|
| 25 |
+
if is_flat[name]:
|
| 26 |
+
vals[name] = arr.unsqueeze(0)
|
| 27 |
+
x_2d, y_2d, xnew_2d = vals['x'], vals['y'], vals['xnew']
|
| 28 |
+
B, Nx = x_2d.shape
|
| 29 |
+
|
| 30 |
+
# SAFETY: Handle edge case where sequence length is < 5
|
| 31 |
+
if Nx < 5:
|
| 32 |
+
# Return constant interpolation (repeat/average the values)
|
| 33 |
+
ynew_2d = y_2d.mean(dim=1, keepdim=True).expand(-1, xnew_2d.shape[1])
|
| 34 |
+
ctx.save_for_backward(x_2d, y_2d, xnew_2d,
|
| 35 |
+
torch.zeros_like(xnew_2d, dtype=torch.long),
|
| 36 |
+
torch.zeros_like(xnew_2d))
|
| 37 |
+
ctx.Nx_was_small = True
|
| 38 |
+
if is_flat['x'] and is_flat['xnew']:
|
| 39 |
+
ynew_2d = ynew_2d.squeeze(0)
|
| 40 |
+
return ynew_2d
|
| 41 |
+
|
| 42 |
+
ctx.Nx_was_small = False
|
| 43 |
+
idx = torch.searchsorted(x_2d, xnew_2d, right=False) - 1
|
| 44 |
+
idx = idx.clamp(min=0, max=Nx-2)
|
| 45 |
+
|
| 46 |
+
xL = torch.gather(x_2d, 1, idx)
|
| 47 |
+
xR = torch.gather(x_2d, 1, idx+1)
|
| 48 |
+
yL = torch.gather(y_2d, 1, idx)
|
| 49 |
+
yR = torch.gather(y_2d, 1, idx+1)
|
| 50 |
+
|
| 51 |
+
denom = (xR - xL)
|
| 52 |
+
denom[denom == 0] = 1e-12
|
| 53 |
+
t = (xnew_2d - xL)/denom
|
| 54 |
+
ynew_2d = yL + (yR - yL)*t
|
| 55 |
+
|
| 56 |
+
ctx.save_for_backward(x_2d, y_2d, xnew_2d, idx, t)
|
| 57 |
+
if is_flat['x'] and is_flat['xnew']:
|
| 58 |
+
ynew_2d = ynew_2d.squeeze(0)
|
| 59 |
+
return ynew_2d
|
| 60 |
+
|
| 61 |
+
@staticmethod
|
| 62 |
+
def backward(ctx, grad_out):
|
| 63 |
+
x_2d, y_2d, xnew_2d, idx, t = ctx.saved_tensors
|
| 64 |
+
grad_x = grad_y = grad_xnew = None
|
| 65 |
+
|
| 66 |
+
# Handle edge case from forward
|
| 67 |
+
if getattr(ctx, 'Nx_was_small', False):
|
| 68 |
+
if ctx.needs_input_grad[1]:
|
| 69 |
+
grad_y = grad_out.sum(dim=-1, keepdim=True).expand_as(y_2d)
|
| 70 |
+
return grad_x, grad_y, grad_xnew
|
| 71 |
+
|
| 72 |
+
if ctx.needs_input_grad[1]:
|
| 73 |
+
grad_y_tmp = torch.zeros_like(y_2d)
|
| 74 |
+
idxp1 = (idx + 1).clamp(max=y_2d.shape[1] - 1) # SAFETY: clamp idxp1
|
| 75 |
+
|
| 76 |
+
# Calculate gradients
|
| 77 |
+
grad_yL = (1.0 - t) * grad_out
|
| 78 |
+
grad_yR = t * grad_out
|
| 79 |
+
|
| 80 |
+
# Ensure consistent dtype between source and destination tensors
|
| 81 |
+
grad_yL = grad_yL.to(dtype=grad_y_tmp.dtype)
|
| 82 |
+
grad_yR = grad_yR.to(dtype=grad_y_tmp.dtype)
|
| 83 |
+
|
| 84 |
+
grad_y_tmp.scatter_add_(1, idx, grad_yL)
|
| 85 |
+
grad_y_tmp.scatter_add_(1, idxp1, grad_yR)
|
| 86 |
+
grad_y = grad_y_tmp
|
| 87 |
+
return grad_x, grad_y, grad_xnew
|
| 88 |
+
|
| 89 |
+
def interp1d(x, y, xnew):
|
| 90 |
+
return Interp1d.apply(x, y, xnew)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class SWE_Pooling(nn.Module):
|
| 94 |
+
"""
|
| 95 |
+
Sliced-Wasserstein Embedding (SWE) Pooling.
|
| 96 |
+
Maps token embeddings [B, L, d_in] => [B, num_slices].
|
| 97 |
+
"""
|
| 98 |
+
def __init__(self, d_in, num_slices, num_ref_points, freeze_swe=False):
|
| 99 |
+
super().__init__()
|
| 100 |
+
self.num_slices = num_slices
|
| 101 |
+
self.num_ref_points = num_ref_points
|
| 102 |
+
|
| 103 |
+
ref = torch.linspace(-1,1,num_ref_points).unsqueeze(1).repeat(1,num_slices)
|
| 104 |
+
self.reference = nn.Parameter(ref, requires_grad=not freeze_swe)
|
| 105 |
+
|
| 106 |
+
self.theta = nn.utils.weight_norm(nn.Linear(d_in, num_slices, bias=False), dim=0)
|
| 107 |
+
self.theta.weight_g.data = torch.ones_like(self.theta.weight_g.data)
|
| 108 |
+
self.theta.weight_g.requires_grad=False
|
| 109 |
+
nn.init.normal_(self.theta.weight_v)
|
| 110 |
+
|
| 111 |
+
self.weight = nn.Linear(num_ref_points,1,bias=False)
|
| 112 |
+
|
| 113 |
+
if freeze_swe:
|
| 114 |
+
self.theta.weight_v.requires_grad=False
|
| 115 |
+
self.reference.requires_grad=False
|
| 116 |
+
|
| 117 |
+
def forward(self, X, mask=None):
|
| 118 |
+
B, N, D = X.shape
|
| 119 |
+
device = X.device
|
| 120 |
+
|
| 121 |
+
X_slices = self.theta(X) # => [B,N,num_slices]
|
| 122 |
+
X_slices_sorted, _ = torch.sort(X_slices, dim=1)
|
| 123 |
+
|
| 124 |
+
x_coord = torch.linspace(0,1,N,device=device).unsqueeze(0).repeat(B*self.num_slices,1)
|
| 125 |
+
X_flat = X_slices_sorted.permute(0,2,1).reshape(B*self.num_slices, N)
|
| 126 |
+
xnew = torch.linspace(0,1,self.num_ref_points,device=device).unsqueeze(0).repeat(B*self.num_slices,1)
|
| 127 |
+
|
| 128 |
+
y_intp = interp1d(x_coord, X_flat, xnew)
|
| 129 |
+
X_slices_sorted_interp = y_intp.view(B,self.num_slices,self.num_ref_points).permute(0,2,1)
|
| 130 |
+
|
| 131 |
+
r_expanded = self.reference.expand_as(X_slices_sorted_interp)
|
| 132 |
+
embeddings = (r_expanded - X_slices_sorted_interp).permute(0,2,1) # => [B,num_slices,num_ref_points]
|
| 133 |
+
weighted = self.weight(embeddings).sum(dim=-1) # => [B, num_slices]
|
| 134 |
+
return weighted
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
#############################################################################
|
| 138 |
+
# Enhanced Mutation-Aware SWE_Pooling #
|
| 139 |
+
#############################################################################
|
| 140 |
+
|
| 141 |
+
class MutationAwareSWEPooling(nn.Module):
|
| 142 |
+
"""
|
| 143 |
+
Enhanced Sliced-Wasserstein Embedding Pooling with explicit mutation position handling.
|
| 144 |
+
Maps token embeddings [B, L, d_in] => [B, num_slices].
|
| 145 |
+
|
| 146 |
+
- Preserves mutation position information through weighted aggregation
|
| 147 |
+
- Uses mutation positions to guide the pooling process
|
| 148 |
+
"""
|
| 149 |
+
def __init__(self, d_in, num_slices, num_ref_points, freeze_swe=False):
|
| 150 |
+
super().__init__()
|
| 151 |
+
self.num_slices = num_slices
|
| 152 |
+
self.num_ref_points = num_ref_points
|
| 153 |
+
self.d_esm = 1152 # FIXED: Hardcode to 1152 to avoid channel indexing bugs with context window
|
| 154 |
+
|
| 155 |
+
# Standard SWE components
|
| 156 |
+
ref = torch.linspace(-1, 1, num_ref_points).unsqueeze(1).repeat(1, num_slices)
|
| 157 |
+
self.reference = nn.Parameter(ref, requires_grad=not freeze_swe)
|
| 158 |
+
|
| 159 |
+
# For ESM features (without mutation channel)
|
| 160 |
+
self.theta = nn.utils.weight_norm(nn.Linear(self.d_esm, num_slices, bias=False), dim=0)
|
| 161 |
+
self.theta.weight_g.data = torch.ones_like(self.theta.weight_g.data)
|
| 162 |
+
self.theta.weight_g.requires_grad = False
|
| 163 |
+
nn.init.normal_(self.theta.weight_v)
|
| 164 |
+
|
| 165 |
+
# Mutation-aware components
|
| 166 |
+
self.mutation_importance = nn.Sequential(
|
| 167 |
+
nn.Linear(1, 32),
|
| 168 |
+
nn.ReLU(),
|
| 169 |
+
nn.Linear(32, num_slices),
|
| 170 |
+
nn.Sigmoid()
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
# Position-specific weighting for each slice
|
| 174 |
+
self.pos_weighting = nn.Linear(1, num_slices, bias=False)
|
| 175 |
+
|
| 176 |
+
# FIXED: Direct projection of mutation channel (the 1153rd dim)
|
| 177 |
+
# Previously, this channel was only used as a multiplier, meaning if ESM
|
| 178 |
+
# features had no diff, the result was 0. Now we project it directly.
|
| 179 |
+
self.mut_projection = nn.Linear(1, num_slices, bias=False)
|
| 180 |
+
|
| 181 |
+
# Final weighting
|
| 182 |
+
self.weight = nn.Linear(num_ref_points, 1, bias=False)
|
| 183 |
+
|
| 184 |
+
if freeze_swe:
|
| 185 |
+
self.theta.weight_v.requires_grad = False
|
| 186 |
+
self.reference.requires_grad = False
|
| 187 |
+
|
| 188 |
+
def forward(self, X, mask=None):
|
| 189 |
+
"""
|
| 190 |
+
X: [B, L, d_in] where d_in = d_esm + 1 (mutation channel)
|
| 191 |
+
mask: [B, L] boolean mask
|
| 192 |
+
"""
|
| 193 |
+
B, N, D = X.shape
|
| 194 |
+
device = X.device
|
| 195 |
+
|
| 196 |
+
# Check if using context window (additional channel)
|
| 197 |
+
use_context = (D > self.d_esm + 1)
|
| 198 |
+
|
| 199 |
+
if use_context:
|
| 200 |
+
# Split ESM features and channels
|
| 201 |
+
X_esm = X[:, :, :-2] # [B, L, d_esm]
|
| 202 |
+
X_mut = X[:, :, -2:-1] # [B, L, 1] - mutation indicator
|
| 203 |
+
else:
|
| 204 |
+
# Split ESM features and mutation channel
|
| 205 |
+
X_esm = X[:, :, :-1] # [B, L, d_esm]
|
| 206 |
+
X_mut = X[:, :, -1:] # [B, L, 1] - mutation indicator
|
| 207 |
+
|
| 208 |
+
# Regular SWE on ESM features
|
| 209 |
+
X_slices = self.theta(X_esm) # => [B, L, num_slices]
|
| 210 |
+
|
| 211 |
+
# Compute mutation importance weights
|
| 212 |
+
mut_weights = self.mutation_importance(X_mut) # [B, L, num_slices]
|
| 213 |
+
|
| 214 |
+
# Create position encodings (0 to 1 for each sequence)
|
| 215 |
+
pos_tensor = torch.linspace(0, 1, N, device=device).view(1, N, 1).expand(B, N, 1)
|
| 216 |
+
pos_weights = self.pos_weighting(pos_tensor) # [B, L, num_slices]
|
| 217 |
+
|
| 218 |
+
# Apply mutation-aware weighting to slices
|
| 219 |
+
# Use both mutation indicator and position information
|
| 220 |
+
# BUGFIX: We ALSO add the projected mutation signal directly.
|
| 221 |
+
# This ensures the model 'sees' the 1.0 signal even if ESM features are identical.
|
| 222 |
+
X_slices = X_slices * (1.0 + mut_weights * pos_weights) + self.mut_projection(X_mut)
|
| 223 |
+
|
| 224 |
+
# Sort slices as in standard SWE
|
| 225 |
+
X_slices_sorted, _ = torch.sort(X_slices, dim=1)
|
| 226 |
+
|
| 227 |
+
# Continue with standard SWE interpolation
|
| 228 |
+
x_coord = torch.linspace(0, 1, N, device=device).unsqueeze(0).repeat(B*self.num_slices, 1)
|
| 229 |
+
X_flat = X_slices_sorted.permute(0, 2, 1).reshape(B*self.num_slices, N)
|
| 230 |
+
xnew = torch.linspace(0, 1, self.num_ref_points, device=device).unsqueeze(0).repeat(B*self.num_slices, 1)
|
| 231 |
+
|
| 232 |
+
y_intp = interp1d(x_coord, X_flat, xnew)
|
| 233 |
+
X_slices_sorted_interp = y_intp.view(B, self.num_slices, self.num_ref_points).permute(0, 2, 1)
|
| 234 |
+
|
| 235 |
+
r_expanded = self.reference.expand_as(X_slices_sorted_interp)
|
| 236 |
+
embeddings = (r_expanded - X_slices_sorted_interp).permute(0, 2, 1) # => [B, num_slices, num_ref_points]
|
| 237 |
+
weighted = self.weight(embeddings).sum(dim=-1) # => [B, num_slices]
|
| 238 |
+
|
| 239 |
+
return weighted
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
#############################################################################
|
| 243 |
+
# Mutation-Specific Cross-Attention with Gating #
|
| 244 |
+
#############################################################################
|
| 245 |
+
|
| 246 |
+
class MutationSpecificAttention(nn.Module):
|
| 247 |
+
"""
|
| 248 |
+
Enhanced cross-attention that explicitly handles mutation positions with gating.
|
| 249 |
+
- Keeps ESM embeddings (1152-dim) and mutation channel separate
|
| 250 |
+
- Uses specific mutation positions to guide attention
|
| 251 |
+
- Preserves position-specific information throughout the network
|
| 252 |
+
- Adds gating mechanism to control information flow
|
| 253 |
+
- Includes memory-efficient computation for long sequences
|
| 254 |
+
"""
|
| 255 |
+
def __init__(self, d_model=1152, num_heads=4, dropout=0.1):
|
| 256 |
+
super().__init__()
|
| 257 |
+
assert d_model % num_heads == 0, "d_model must be divisible by num_heads"
|
| 258 |
+
self.d_model = d_model
|
| 259 |
+
self.num_heads = num_heads
|
| 260 |
+
self.head_dim = d_model // num_heads
|
| 261 |
+
|
| 262 |
+
# Core attention for ESM embeddings only (1152-dim)
|
| 263 |
+
self.query = nn.Linear(d_model, d_model)
|
| 264 |
+
self.key = nn.Linear(d_model, d_model)
|
| 265 |
+
self.value = nn.Linear(d_model, d_model)
|
| 266 |
+
|
| 267 |
+
# Absolute position encoding
|
| 268 |
+
self.pos_encoder = nn.Sequential(
|
| 269 |
+
nn.Linear(1, 32),
|
| 270 |
+
nn.ReLU(),
|
| 271 |
+
nn.Linear(32, d_model)
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
# Mutation-position specific attention
|
| 275 |
+
self.mut_encoder = nn.Sequential(
|
| 276 |
+
nn.Linear(2, 64), # Input: [mut_binary, position_normalized]
|
| 277 |
+
nn.ReLU(),
|
| 278 |
+
nn.Linear(64, num_heads)
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
# Gating mechanism to control information flow
|
| 282 |
+
self.gate = nn.Sequential(
|
| 283 |
+
nn.Linear(d_model*2, d_model),
|
| 284 |
+
nn.Sigmoid()
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
self.dropout = nn.Dropout(dropout)
|
| 288 |
+
self.out_proj = nn.Linear(d_model, d_model)
|
| 289 |
+
|
| 290 |
+
def split_heads(self, x):
|
| 291 |
+
"""Split the last dimension into (heads, head_dim)"""
|
| 292 |
+
batch_size, seq_len, _ = x.shape
|
| 293 |
+
x = x.view(batch_size, seq_len, self.num_heads, self.head_dim)
|
| 294 |
+
return x.permute(0, 2, 1, 3) # [batch, heads, seq_len, head_dim]
|
| 295 |
+
|
| 296 |
+
def merge_heads(self, x):
|
| 297 |
+
"""Merge the (heads, head_dim) into d_model"""
|
| 298 |
+
batch_size, _, seq_len, _ = x.shape
|
| 299 |
+
x = x.permute(0, 2, 1, 3) # [batch, seq_len, heads, head_dim]
|
| 300 |
+
return x.reshape(batch_size, seq_len, self.d_model)
|
| 301 |
+
|
| 302 |
+
def forward(self, q_esm, k_esm, v_esm, q_mut, k_mut, mask=None):
|
| 303 |
+
"""
|
| 304 |
+
Inputs:
|
| 305 |
+
q_esm, k_esm, v_esm: ESM embeddings [B, L, 1152]
|
| 306 |
+
q_mut, k_mut: Mutation information [B, L, 1]
|
| 307 |
+
mask: Optional attention mask [B, L] or [B, 1, L]
|
| 308 |
+
"""
|
| 309 |
+
batch_size = q_esm.shape[0]
|
| 310 |
+
q_len, k_len = q_esm.shape[1], k_esm.shape[1]
|
| 311 |
+
|
| 312 |
+
# Create position tensors (0-1 range for each sequence)
|
| 313 |
+
q_pos = torch.linspace(0, 1, q_len, device=q_esm.device).view(1, -1, 1).expand(batch_size, q_len, 1)
|
| 314 |
+
k_pos = torch.linspace(0, 1, k_len, device=k_esm.device).view(1, -1, 1).expand(batch_size, k_len, 1)
|
| 315 |
+
|
| 316 |
+
# Position encoding
|
| 317 |
+
q_pos_enc = self.pos_encoder(q_pos)
|
| 318 |
+
k_pos_enc = self.pos_encoder(k_pos)
|
| 319 |
+
|
| 320 |
+
# Add position encodings to ESM features
|
| 321 |
+
q_esm_pos = q_esm + q_pos_enc
|
| 322 |
+
k_esm_pos = k_esm + k_pos_enc
|
| 323 |
+
|
| 324 |
+
# Process core ESM embeddings with position information
|
| 325 |
+
q = self.split_heads(self.query(q_esm_pos)) # [B, h, q_len, d_k]
|
| 326 |
+
k = self.split_heads(self.key(k_esm_pos)) # [B, h, k_len, d_k]
|
| 327 |
+
v = self.split_heads(self.value(v_esm)) # [B, h, v_len, d_v]
|
| 328 |
+
|
| 329 |
+
# Concatenate mutation indicator with position
|
| 330 |
+
q_mut_pos = torch.cat([q_mut, q_pos], dim=-1) # [B, q_len, 2]
|
| 331 |
+
k_mut_pos = torch.cat([k_mut, k_pos], dim=-1) # [B, k_len, 2]
|
| 332 |
+
|
| 333 |
+
# Encode position-aware mutation information
|
| 334 |
+
q_mut_enc = self.mut_encoder(q_mut_pos) # [B, q_len, num_heads]
|
| 335 |
+
k_mut_enc = self.mut_encoder(k_mut_pos) # [B, k_len, num_heads]
|
| 336 |
+
|
| 337 |
+
# Standard scaled dot-product attention
|
| 338 |
+
d_k = q.size(-1)
|
| 339 |
+
scores = torch.matmul(q, k.transpose(-2, -1)) / (d_k ** 0.5) # [B, h, q_len, k_len]
|
| 340 |
+
|
| 341 |
+
# Create mutation-position attention bias
|
| 342 |
+
# This explicitly boosts attention between positions based on mutation status
|
| 343 |
+
mut_attn_bias = torch.matmul(
|
| 344 |
+
q_mut_enc.permute(0, 2, 1).unsqueeze(3), # [B, h, q_len, 1]
|
| 345 |
+
k_mut_enc.permute(0, 2, 1).unsqueeze(2) # [B, h, 1, k_len]
|
| 346 |
+
) # [B, h, q_len, k_len]
|
| 347 |
+
|
| 348 |
+
# Apply mutation bias to attention scores
|
| 349 |
+
# This makes mutations and their surrounding context attend more to each other
|
| 350 |
+
scores = scores + mut_attn_bias
|
| 351 |
+
|
| 352 |
+
# Apply mask if provided
|
| 353 |
+
if mask is not None:
|
| 354 |
+
# Fix mask dimension to match scores
|
| 355 |
+
# mask shape should be [B, L] or [B, 1, L]
|
| 356 |
+
if mask.dim() == 2: # [B, L]
|
| 357 |
+
# For keys mask [B, k_len] -> [B, 1, 1, k_len]
|
| 358 |
+
mask = mask.unsqueeze(1).unsqueeze(2)
|
| 359 |
+
elif mask.dim() == 3 and mask.size(1) == 1: # [B, 1, L]
|
| 360 |
+
# For keys mask [B, 1, k_len] -> [B, 1, 1, k_len]
|
| 361 |
+
mask = mask.unsqueeze(2)
|
| 362 |
+
|
| 363 |
+
# Expand mask to match scores dimensions
|
| 364 |
+
# [B, 1, 1, k_len] -> [B, h, q_len, k_len]
|
| 365 |
+
mask = mask.expand(-1, scores.size(1), scores.size(2), -1)
|
| 366 |
+
|
| 367 |
+
# FIXED: Use -1e4 instead of -1e9 to avoid half-precision overflow
|
| 368 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
| 369 |
+
|
| 370 |
+
# Apply softmax and dropout
|
| 371 |
+
attention_weights = F.softmax(scores, dim=-1)
|
| 372 |
+
attention_weights = self.dropout(attention_weights)
|
| 373 |
+
|
| 374 |
+
# Apply attention to values
|
| 375 |
+
context = torch.matmul(attention_weights, v) # [B, h, q_len, d_v]
|
| 376 |
+
context = self.merge_heads(context) # [B, q_len, d_model]
|
| 377 |
+
attn_output = self.out_proj(context)
|
| 378 |
+
|
| 379 |
+
# Apply gating mechanism (new addition)
|
| 380 |
+
# Concatenate the original query with the attention output to determine the gate
|
| 381 |
+
gate_input = torch.cat([q_esm, attn_output], dim=-1)
|
| 382 |
+
gate_value = self.gate(gate_input)
|
| 383 |
+
|
| 384 |
+
# Memory optimization for long sequences
|
| 385 |
+
# Processing the gating operation in chunks to prevent OOM errors
|
| 386 |
+
if q_len > 1000: # Only use chunking for very long sequences
|
| 387 |
+
chunk_size = 500
|
| 388 |
+
output_chunks = []
|
| 389 |
+
|
| 390 |
+
for i in range(0, q_len, chunk_size):
|
| 391 |
+
end_idx = min(i + chunk_size, q_len)
|
| 392 |
+
# Process chunks
|
| 393 |
+
chunk_gate = gate_value[:, i:end_idx, :]
|
| 394 |
+
chunk_attn = attn_output[:, i:end_idx, :]
|
| 395 |
+
chunk_q = q_esm[:, i:end_idx, :]
|
| 396 |
+
|
| 397 |
+
# Apply gating equation to this chunk
|
| 398 |
+
chunk_output = chunk_gate * chunk_attn + (1 - chunk_gate) * chunk_q
|
| 399 |
+
output_chunks.append(chunk_output)
|
| 400 |
+
|
| 401 |
+
# Combine chunks
|
| 402 |
+
output = torch.cat(output_chunks, dim=1)
|
| 403 |
+
else:
|
| 404 |
+
# Original operation for shorter sequences
|
| 405 |
+
output = gate_value * attn_output + (1 - gate_value) * q_esm
|
| 406 |
+
|
| 407 |
+
return output
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
class MutationSpecificCrossAttentionBlock(nn.Module):
|
| 411 |
+
"""
|
| 412 |
+
Cross-attention block with explicit mutation position handling.
|
| 413 |
+
Each block processes ESM embeddings and mutation channels separately,
|
| 414 |
+
with special emphasis on mutation positions.
|
| 415 |
+
"""
|
| 416 |
+
def __init__(self, d_model=1152, num_heads=4, ffn_dim=2048, dropout=0.1):
|
| 417 |
+
super().__init__()
|
| 418 |
+
# Mutation-aware cross attention
|
| 419 |
+
self.attn_c12 = MutationSpecificAttention(d_model, num_heads, dropout)
|
| 420 |
+
self.attn_c21 = MutationSpecificAttention(d_model, num_heads, dropout)
|
| 421 |
+
|
| 422 |
+
# Layer normalization for ESM embeddings
|
| 423 |
+
self.norm_c1 = nn.LayerNorm(d_model)
|
| 424 |
+
self.norm_c2 = nn.LayerNorm(d_model)
|
| 425 |
+
|
| 426 |
+
# FFN for ESM embeddings
|
| 427 |
+
self.ffn_c1 = nn.Sequential(
|
| 428 |
+
nn.Linear(d_model, ffn_dim),
|
| 429 |
+
nn.ReLU(),
|
| 430 |
+
nn.Dropout(dropout),
|
| 431 |
+
nn.Linear(ffn_dim, d_model)
|
| 432 |
+
)
|
| 433 |
+
self.ffn_c2 = nn.Sequential(
|
| 434 |
+
nn.Linear(d_model, ffn_dim),
|
| 435 |
+
nn.ReLU(),
|
| 436 |
+
nn.Dropout(dropout),
|
| 437 |
+
nn.Linear(ffn_dim, d_model)
|
| 438 |
+
)
|
| 439 |
+
self.norm_ffn_c1 = nn.LayerNorm(d_model)
|
| 440 |
+
self.norm_ffn_c2 = nn.LayerNorm(d_model)
|
| 441 |
+
|
| 442 |
+
# Mutation importance update layer
|
| 443 |
+
self.mut_update = nn.Sequential(
|
| 444 |
+
nn.Linear(d_model + 1, 64),
|
| 445 |
+
nn.ReLU(),
|
| 446 |
+
nn.Linear(64, 1),
|
| 447 |
+
nn.Sigmoid()
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
def forward(self, c1_esm, c1_mut, c2_esm, c2_mut, mask1=None, mask2=None):
|
| 451 |
+
"""
|
| 452 |
+
Inputs:
|
| 453 |
+
c1_esm, c2_esm: ESM embeddings [B, L, 1152]
|
| 454 |
+
c1_mut, c2_mut: Mutation channels [B, L, 1]
|
| 455 |
+
mask1, mask2: Optional masks
|
| 456 |
+
"""
|
| 457 |
+
# c1->c2 cross-attention
|
| 458 |
+
c1_attn = self.attn_c12(c1_esm, c2_esm, c2_esm, c1_mut, c2_mut, mask2)
|
| 459 |
+
c1_out = self.norm_c1(c1_esm + c1_attn)
|
| 460 |
+
|
| 461 |
+
# c2->c1 cross-attention
|
| 462 |
+
c2_attn = self.attn_c21(c2_esm, c1_esm, c1_esm, c2_mut, c1_mut, mask1)
|
| 463 |
+
c2_out = self.norm_c2(c2_esm + c2_attn)
|
| 464 |
+
|
| 465 |
+
# Feed-forward
|
| 466 |
+
c1_ffn = self.ffn_c1(c1_out)
|
| 467 |
+
c1_ffn_out = self.norm_ffn_c1(c1_out + c1_ffn)
|
| 468 |
+
|
| 469 |
+
c2_ffn = self.ffn_c2(c2_out)
|
| 470 |
+
c2_ffn_out = self.norm_ffn_c2(c2_out + c2_ffn)
|
| 471 |
+
|
| 472 |
+
# Update mutation importance based on attention output
|
| 473 |
+
# This creates a feedback loop where mutation effect is refined
|
| 474 |
+
c1_mut_in = torch.cat([c1_ffn_out, c1_mut], dim=-1)
|
| 475 |
+
c2_mut_in = torch.cat([c2_ffn_out, c2_mut], dim=-1)
|
| 476 |
+
|
| 477 |
+
# Stabilized update: convex combination ensures values stay in [0, 1]
|
| 478 |
+
# Avoids exponential decay (old bug) and unbounded growth (additive bug)
|
| 479 |
+
c1_mut_updated = 0.9 * c1_mut + 0.1 * self.mut_update(c1_mut_in)
|
| 480 |
+
c2_mut_updated = 0.9 * c2_mut + 0.1 * self.mut_update(c2_mut_in)
|
| 481 |
+
|
| 482 |
+
return c1_ffn_out, c2_ffn_out, c1_mut_updated, c2_mut_updated
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
class MutationSpecificCrossAttentionStack(nn.Module):
|
| 486 |
+
"""
|
| 487 |
+
Stack of Mutation-Specific Cross-Attention blocks.
|
| 488 |
+
Emphasizes mutation positions throughout the network.
|
| 489 |
+
Now includes gradient checkpointing for memory efficiency.
|
| 490 |
+
"""
|
| 491 |
+
def __init__(self, d_model=1152, num_heads=4, ffn_dim=2048, dropout=0.1, num_layers=2):
|
| 492 |
+
super().__init__()
|
| 493 |
+
self.d_model = d_model
|
| 494 |
+
self.num_heads = num_heads
|
| 495 |
+
self.use_checkpoint = True # Enable gradient checkpointing by default
|
| 496 |
+
|
| 497 |
+
self.blocks = nn.ModuleList([
|
| 498 |
+
MutationSpecificCrossAttentionBlock(
|
| 499 |
+
d_model=d_model,
|
| 500 |
+
num_heads=num_heads,
|
| 501 |
+
ffn_dim=ffn_dim,
|
| 502 |
+
dropout=dropout
|
| 503 |
+
) for _ in range(num_layers)
|
| 504 |
+
])
|
| 505 |
+
|
| 506 |
+
def forward(self, c1, c2, mask1=None, mask2=None):
|
| 507 |
+
"""
|
| 508 |
+
Process protein chains with mutation-specific attention.
|
| 509 |
+
c1, c2: [B, L, D] where D can be 1153 (original) or 1154 (with context window)
|
| 510 |
+
Uses gradient checkpointing when in training mode to save memory.
|
| 511 |
+
"""
|
| 512 |
+
# Check input dimension to determine if context window is used
|
| 513 |
+
d_in = c1.shape[2]
|
| 514 |
+
use_context = (d_in > 1153)
|
| 515 |
+
|
| 516 |
+
if use_context:
|
| 517 |
+
# Split ESM embeddings from mutation+context channels
|
| 518 |
+
c1_esm, c1_channels = c1[:, :, :-2], c1[:, :, -2:] # [B, L, 1152], [B, L, 2]
|
| 519 |
+
c2_esm, c2_channels = c2[:, :, :-2], c2[:, :, -2:] # [B, L, 1152], [B, L, 2]
|
| 520 |
+
|
| 521 |
+
# Extract mutation channel (first channel)
|
| 522 |
+
c1_mut = c1_channels[:, :, :1] # [B, L, 1]
|
| 523 |
+
c2_mut = c2_channels[:, :, :1] # [B, L, 1]
|
| 524 |
+
else:
|
| 525 |
+
# Original behavior - just split ESM and mutation
|
| 526 |
+
c1_esm, c1_mut = c1[:, :, :-1], c1[:, :, -1:] # [B, L, 1152], [B, L, 1]
|
| 527 |
+
c2_esm, c2_mut = c2[:, :, :-1], c2[:, :, -1:] # [B, L, 1152], [B, L, 1]
|
| 528 |
+
|
| 529 |
+
# Process through attention blocks with optional checkpointing
|
| 530 |
+
for block in self.blocks:
|
| 531 |
+
# Use gradient checkpointing in training mode for memory efficiency
|
| 532 |
+
if self.use_checkpoint and self.training:
|
| 533 |
+
# Define helper function for checkpointing that handles None masks
|
| 534 |
+
def create_checkpoint_fn(block_fn):
|
| 535 |
+
def checkpoint_fn(esm1, mut1, esm2, mut2, has_mask1, has_mask2, mask1_val, mask2_val):
|
| 536 |
+
# Conditionally use the masks based on the has_mask flags
|
| 537 |
+
m1 = mask1_val if has_mask1 else None
|
| 538 |
+
m2 = mask2_val if has_mask2 else None
|
| 539 |
+
return block_fn(esm1, mut1, esm2, mut2, m1, m2)
|
| 540 |
+
return checkpoint_fn
|
| 541 |
+
|
| 542 |
+
# Convert None masks to flags and dummy tensors for checkpointing
|
| 543 |
+
has_mask1 = mask1 is not None
|
| 544 |
+
has_mask2 = mask2 is not None
|
| 545 |
+
mask1_val = mask1 if has_mask1 else torch.zeros(1, device=c1_esm.device)
|
| 546 |
+
mask2_val = mask2 if has_mask2 else torch.zeros(1, device=c1_esm.device)
|
| 547 |
+
|
| 548 |
+
# Apply checkpointing
|
| 549 |
+
c1_esm, c2_esm, c1_mut, c2_mut = torch.utils.checkpoint.checkpoint(
|
| 550 |
+
create_checkpoint_fn(block),
|
| 551 |
+
c1_esm, c1_mut, c2_esm, c2_mut,
|
| 552 |
+
torch.tensor(has_mask1, device=c1_esm.device),
|
| 553 |
+
torch.tensor(has_mask2, device=c1_esm.device),
|
| 554 |
+
mask1_val, mask2_val
|
| 555 |
+
)
|
| 556 |
+
else:
|
| 557 |
+
c1_esm, c2_esm, c1_mut, c2_mut = block(c1_esm, c1_mut, c2_esm, c2_mut, mask1, mask2)
|
| 558 |
+
|
| 559 |
+
# Recombine with appropriate channels
|
| 560 |
+
if use_context:
|
| 561 |
+
# Need to preserve the context channel
|
| 562 |
+
context_channels_c1 = c1_channels[:, :, 1:] # [B, L, 1]
|
| 563 |
+
context_channels_c2 = c2_channels[:, :, 1:] # [B, L, 1]
|
| 564 |
+
c1_out = torch.cat([c1_esm, c1_mut, context_channels_c1], dim=-1) # [B, L, 1154]
|
| 565 |
+
c2_out = torch.cat([c2_esm, c2_mut, context_channels_c2], dim=-1) # [B, L, 1154]
|
| 566 |
+
else:
|
| 567 |
+
# Original behavior
|
| 568 |
+
c1_out = torch.cat([c1_esm, c1_mut], dim=-1) # [B, L, 1153]
|
| 569 |
+
c2_out = torch.cat([c2_esm, c2_mut], dim=-1) # [B, L, 1153]
|
| 570 |
+
|
| 571 |
+
return c1_out, c2_out
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
#############################################################################
|
| 575 |
+
# AffinityPredictor with Improved Memory Efficiency #
|
| 576 |
+
#############################################################################
|
| 577 |
+
|
| 578 |
+
class AffinityPredictor(nn.Module):
|
| 579 |
+
"""
|
| 580 |
+
Enhanced AffinityPredictor with explicit mutation position handling.
|
| 581 |
+
embedding_method => "difference", "cosine", "cross_attention", or "cross_attention_swe".
|
| 582 |
+
"""
|
| 583 |
+
def __init__(
|
| 584 |
+
self,
|
| 585 |
+
input_dim=1153, # 1152 (ESM) + 1 (mutation)
|
| 586 |
+
latent_dim=1024,
|
| 587 |
+
num_slices=1024,
|
| 588 |
+
num_ref_points=128,
|
| 589 |
+
dropout_rate=0.2,
|
| 590 |
+
freeze_swe=False,
|
| 591 |
+
embedding_method="difference",
|
| 592 |
+
normalize_difference=False,
|
| 593 |
+
num_hidden_layers=2,
|
| 594 |
+
# cross-attn
|
| 595 |
+
num_cross_attn_layers=2,
|
| 596 |
+
num_attention_heads=4,
|
| 597 |
+
cross_ffn_dim=2048,
|
| 598 |
+
):
|
| 599 |
+
super().__init__()
|
| 600 |
+
self.embedding_method = embedding_method.lower()
|
| 601 |
+
self.normalize_difference = normalize_difference
|
| 602 |
+
self.input_dim = input_dim
|
| 603 |
+
|
| 604 |
+
# ESM dimension (without mutation channel)
|
| 605 |
+
self.esm_dim = input_dim - 1 # 1152
|
| 606 |
+
|
| 607 |
+
# Define cross-attention stack if needed
|
| 608 |
+
self.cross_stack = None
|
| 609 |
+
if "cross_attention" in self.embedding_method:
|
| 610 |
+
self.cross_stack = MutationSpecificCrossAttentionStack(
|
| 611 |
+
d_model=self.esm_dim, # 1152
|
| 612 |
+
num_heads=num_attention_heads,
|
| 613 |
+
ffn_dim=cross_ffn_dim,
|
| 614 |
+
dropout=dropout_rate,
|
| 615 |
+
num_layers=num_cross_attn_layers
|
| 616 |
+
)
|
| 617 |
+
|
| 618 |
+
# Enhanced Mutation-Aware SWE Pooling
|
| 619 |
+
self.swe_pooling = None
|
| 620 |
+
if self.embedding_method in ["difference", "cosine", "cross_attention_swe"]:
|
| 621 |
+
# For SWE, we use the full input_dim (1153)
|
| 622 |
+
self.swe_pooling = MutationAwareSWEPooling(
|
| 623 |
+
d_in=input_dim,
|
| 624 |
+
num_slices=num_slices,
|
| 625 |
+
num_ref_points=num_ref_points,
|
| 626 |
+
freeze_swe=freeze_swe
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
# Define aggregator MLP in-dimensions
|
| 630 |
+
if self.embedding_method == "cosine":
|
| 631 |
+
in_features = 1
|
| 632 |
+
else:
|
| 633 |
+
in_features = num_slices # difference or cross_attention_swe => [B, num_slices]
|
| 634 |
+
|
| 635 |
+
# Add projection layer for cross_attention to avoid dynamic creation
|
| 636 |
+
self.cross_attn_projection = None
|
| 637 |
+
if self.embedding_method == "cross_attention":
|
| 638 |
+
cross_proj_in = input_dim # Full dimension including mutation channel
|
| 639 |
+
cross_proj_out = in_features
|
| 640 |
+
self.cross_attn_projection = nn.Linear(cross_proj_in, cross_proj_out, bias=False)
|
| 641 |
+
|
| 642 |
+
# Final MLP
|
| 643 |
+
layers = []
|
| 644 |
+
current_dim = in_features
|
| 645 |
+
for _ in range(num_hidden_layers):
|
| 646 |
+
layers.append(nn.Linear(current_dim, latent_dim))
|
| 647 |
+
layers.append(nn.ReLU())
|
| 648 |
+
layers.append(nn.Dropout(dropout_rate))
|
| 649 |
+
current_dim = latent_dim
|
| 650 |
+
layers.append(nn.Linear(current_dim, 1))
|
| 651 |
+
self.mlp = nn.Sequential(*layers)
|
| 652 |
+
|
| 653 |
+
def forward(self, chain1, chain1_mask, chain2, chain2_mask):
|
| 654 |
+
"""
|
| 655 |
+
chain1, chain2 => [B, L, input_dim] (1153 or 1154 with context)
|
| 656 |
+
"""
|
| 657 |
+
if "cross_attention" in self.embedding_method:
|
| 658 |
+
# Process through mutation-specific cross-attention
|
| 659 |
+
c1_out, c2_out = self.cross_stack(chain1, chain2, chain1_mask, chain2_mask)
|
| 660 |
+
|
| 661 |
+
if self.embedding_method == "cross_attention_swe":
|
| 662 |
+
# Apply enhanced SWE pooling
|
| 663 |
+
rep1 = self.swe_pooling(c1_out, chain1_mask) # [B, num_slices]
|
| 664 |
+
rep2 = self.swe_pooling(c2_out, chain2_mask) # [B, num_slices]
|
| 665 |
+
|
| 666 |
+
# Difference aggregator
|
| 667 |
+
diff = rep1 - rep2
|
| 668 |
+
if self.normalize_difference:
|
| 669 |
+
diff = F.normalize(diff, p=2, dim=1)
|
| 670 |
+
|
| 671 |
+
# Final prediction
|
| 672 |
+
preds = self.mlp(diff).squeeze(-1)
|
| 673 |
+
|
| 674 |
+
return preds
|
| 675 |
+
|
| 676 |
+
elif self.embedding_method == "cross_attention":
|
| 677 |
+
# Use mutation-weighted pooling
|
| 678 |
+
# Extract mutation channel to guide pooling
|
| 679 |
+
d_in = c1_out.shape[2]
|
| 680 |
+
use_context = (d_in > 1153)
|
| 681 |
+
if use_context:
|
| 682 |
+
c1_mut = c1_out[:, :, -2:-1] # [B, L, 1]
|
| 683 |
+
c2_mut = c2_out[:, :, -2:-1] # [B, L, 1]
|
| 684 |
+
else:
|
| 685 |
+
c1_mut = c1_out[:, :, -1:] # [B, L, 1]
|
| 686 |
+
c2_mut = c2_out[:, :, -1:] # [B, L, 1]
|
| 687 |
+
|
| 688 |
+
# Weighted pooling - gives higher weight to mutated positions
|
| 689 |
+
c1_weights = F.softmax(c1_mut * 10, dim=1) # Sharpen weights
|
| 690 |
+
c2_weights = F.softmax(c2_mut * 10, dim=1)
|
| 691 |
+
|
| 692 |
+
c1_pool = torch.sum(c1_out * c1_weights, dim=1) # [B, 1153/1154]
|
| 693 |
+
c2_pool = torch.sum(c2_out * c2_weights, dim=1) # [B, 1153/1154]
|
| 694 |
+
|
| 695 |
+
# Create difference representation
|
| 696 |
+
diff = c1_pool - c2_pool
|
| 697 |
+
if self.normalize_difference:
|
| 698 |
+
diff = F.normalize(diff, p=2, dim=1)
|
| 699 |
+
|
| 700 |
+
# Use pre-defined projection layer instead of creating one dynamically
|
| 701 |
+
if self.cross_attn_projection is not None:
|
| 702 |
+
diff = self.cross_attn_projection(diff)
|
| 703 |
+
|
| 704 |
+
preds = self.mlp(diff).squeeze(-1)
|
| 705 |
+
|
| 706 |
+
return preds
|
| 707 |
+
|
| 708 |
+
elif self.embedding_method == "cosine":
|
| 709 |
+
# Enhanced SWE => [B, num_slices]
|
| 710 |
+
rep1 = self.swe_pooling(chain1, chain1_mask)
|
| 711 |
+
rep2 = self.swe_pooling(chain2, chain2_mask)
|
| 712 |
+
sim = F.cosine_similarity(rep1, rep2, dim=1).unsqueeze(-1)
|
| 713 |
+
out = self.mlp(sim).squeeze(-1)
|
| 714 |
+
|
| 715 |
+
return out
|
| 716 |
+
|
| 717 |
+
else: # "difference"
|
| 718 |
+
rep1 = self.swe_pooling(chain1, chain1_mask)
|
| 719 |
+
rep2 = self.swe_pooling(chain2, chain2_mask)
|
| 720 |
+
diff = rep1 - rep2
|
| 721 |
+
if self.normalize_difference:
|
| 722 |
+
diff = F.normalize(diff, p=2, dim=1)
|
| 723 |
+
out = self.mlp(diff).squeeze(-1)
|
| 724 |
+
|
| 725 |
+
return out
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
class AffinityPredictionModel(pl.LightningModule):
|
| 729 |
+
"""
|
| 730 |
+
Lightning wrapper for training. Siamese logic in training.py
|
| 731 |
+
"""
|
| 732 |
+
def __init__(self, predictor: AffinityPredictor, learning_rate=1e-4):
|
| 733 |
+
super().__init__()
|
| 734 |
+
self.predictor = predictor
|
| 735 |
+
self.learning_rate = learning_rate
|
| 736 |
+
|
| 737 |
+
self.loss_fn = nn.MSELoss()
|
| 738 |
+
self.pearson_corr = PearsonCorrCoef()
|
| 739 |
+
self.spearman_corr = SpearmanCorrCoef()
|
| 740 |
+
self.r2_score = R2Score()
|
| 741 |
+
self.mse_metric = MeanSquaredError()
|
| 742 |
+
|
| 743 |
+
def forward(self, chain1, chain1_mask, chain2, chain2_mask):
|
| 744 |
+
return self.predictor(chain1, chain1_mask, chain2, chain2_mask)
|
| 745 |
+
|
| 746 |
+
def training_step(self, batch, batch_idx):
|
| 747 |
+
pass
|
| 748 |
+
|
| 749 |
+
def validation_step(self, batch, batch_idx):
|
| 750 |
+
pass
|
| 751 |
+
|
| 752 |
+
def configure_optimizers(self):
|
| 753 |
+
optimizer = AdamW(self.parameters(), lr=self.learning_rate)
|
| 754 |
+
steps = self.trainer.estimated_stepping_batches
|
| 755 |
+
scheduler = torch.optim.lr_scheduler.OneCycleLR(
|
| 756 |
+
optimizer, max_lr=self.learning_rate, total_steps=steps
|
| 757 |
+
)
|
| 758 |
+
return {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "val_loss"}
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
# Enhanced AffinityPredictor with Two-Head Architecture
|
| 762 |
+
# Add this to architectures.py
|
| 763 |
+
|
| 764 |
+
class DualHeadAffinityPredictor(nn.Module):
|
| 765 |
+
"""
|
| 766 |
+
Enhanced AffinityPredictor with explicit two-head architecture.
|
| 767 |
+
Simultaneously processes mutant and wildtype proteins to predict both ΔG and ΔΔG.
|
| 768 |
+
|
| 769 |
+
embedding_method => "difference", "cosine", "cross_attention", or "cross_attention_swe".
|
| 770 |
+
"""
|
| 771 |
+
def __init__(
|
| 772 |
+
self,
|
| 773 |
+
input_dim=1153, # 1152 (ESM) + 1 (mutation)
|
| 774 |
+
latent_dim=1024,
|
| 775 |
+
num_slices=1024,
|
| 776 |
+
num_ref_points=128,
|
| 777 |
+
dropout_rate=0.2,
|
| 778 |
+
freeze_swe=False,
|
| 779 |
+
embedding_method="difference",
|
| 780 |
+
normalize_difference=False,
|
| 781 |
+
num_hidden_layers=2,
|
| 782 |
+
# cross-attn
|
| 783 |
+
num_cross_attn_layers=2,
|
| 784 |
+
num_attention_heads=4,
|
| 785 |
+
cross_ffn_dim=2048,
|
| 786 |
+
use_dual_head=True, # Enable dual-head by default
|
| 787 |
+
ddg_signal_gain=1.0, # Initial gain for ddG signal
|
| 788 |
+
ddg_signal_multiplier=20.0, # FIXED: multiplier for ddG signal (vnew65.0)
|
| 789 |
+
):
|
| 790 |
+
super().__init__()
|
| 791 |
+
self.embedding_method = embedding_method.lower()
|
| 792 |
+
self.normalize_difference = normalize_difference
|
| 793 |
+
self.input_dim = input_dim
|
| 794 |
+
self.use_dual_head = use_dual_head
|
| 795 |
+
self._ddg_log_counter = 0
|
| 796 |
+
|
| 797 |
+
# DEBUG: Confirm this version is running
|
| 798 |
+
print(f"[MODEL INIT] DualHeadAffinityPredictor created: version={ARCH_VERSION}, dual_head={use_dual_head}, method={self.embedding_method}")
|
| 799 |
+
|
| 800 |
+
# ESM dimension (without mutation channel)
|
| 801 |
+
self.esm_dim = input_dim - 1 # 1152
|
| 802 |
+
|
| 803 |
+
# Define cross-attention stack if needed
|
| 804 |
+
self.cross_stack = None
|
| 805 |
+
if "cross_attention" in self.embedding_method:
|
| 806 |
+
self.cross_stack = MutationSpecificCrossAttentionStack(
|
| 807 |
+
d_model=self.esm_dim, # 1152
|
| 808 |
+
num_heads=num_attention_heads,
|
| 809 |
+
ffn_dim=cross_ffn_dim,
|
| 810 |
+
dropout=dropout_rate,
|
| 811 |
+
num_layers=num_cross_attn_layers
|
| 812 |
+
)
|
| 813 |
+
|
| 814 |
+
# Enhanced Mutation-Aware SWE Pooling
|
| 815 |
+
self.swe_pooling = None
|
| 816 |
+
if self.embedding_method in ["difference", "cosine", "cross_attention_swe"]:
|
| 817 |
+
# For SWE, we use the full input_dim (1153)
|
| 818 |
+
self.swe_pooling = MutationAwareSWEPooling(
|
| 819 |
+
d_in=input_dim,
|
| 820 |
+
num_slices=num_slices,
|
| 821 |
+
num_ref_points=num_ref_points,
|
| 822 |
+
freeze_swe=freeze_swe
|
| 823 |
+
)
|
| 824 |
+
|
| 825 |
+
# Define aggregator MLP in-dimensions
|
| 826 |
+
if self.embedding_method == "cosine":
|
| 827 |
+
in_features = 1
|
| 828 |
+
else:
|
| 829 |
+
in_features = num_slices # difference or cross_attention_swe => [B, num_slices]
|
| 830 |
+
|
| 831 |
+
# Add projection layer for cross_attention to avoid dynamic creation
|
| 832 |
+
self.cross_attn_projection = None
|
| 833 |
+
if self.embedding_method == "cross_attention":
|
| 834 |
+
cross_proj_in = input_dim # Full dimension including mutation channel
|
| 835 |
+
cross_proj_out = in_features
|
| 836 |
+
self.cross_attn_projection = nn.Linear(cross_proj_in, cross_proj_out, bias=False)
|
| 837 |
+
|
| 838 |
+
# Define dG head (main prediction head)
|
| 839 |
+
layers = []
|
| 840 |
+
current_dim = in_features
|
| 841 |
+
for _ in range(num_hidden_layers):
|
| 842 |
+
layers.append(nn.Linear(current_dim, latent_dim))
|
| 843 |
+
layers.append(nn.ReLU())
|
| 844 |
+
layers.append(nn.Dropout(dropout_rate))
|
| 845 |
+
current_dim = latent_dim
|
| 846 |
+
layers.append(nn.Linear(current_dim, 1))
|
| 847 |
+
self.dg_mlp = nn.Sequential(*layers)
|
| 848 |
+
|
| 849 |
+
# Define ΔΔG head for direct prediction
|
| 850 |
+
# FIX (vnew64.0): Shallow 2-layer MLP + residual skip connection.
|
| 851 |
+
# DDGACT diagnostics from vnew62-63 showed 7-layer ReLU network causes
|
| 852 |
+
# progressive variance collapse (std: 0.1 → 7e-05). Each ReLU zeros ~50%
|
| 853 |
+
# of activations, so 7 layers → 0.5^7 = 0.8% signal survival.
|
| 854 |
+
# Solution: (1) 2 layers only, (2) skip connection preserves raw input signal.
|
| 855 |
+
if self.use_dual_head:
|
| 856 |
+
# Shallow nonlinear pathway (2 layers)
|
| 857 |
+
self.ddg_hidden = nn.Sequential(
|
| 858 |
+
nn.Linear(in_features, latent_dim),
|
| 859 |
+
nn.GELU(), # GELU instead of ReLU - no zero-capping, smoother gradients
|
| 860 |
+
nn.Linear(latent_dim, latent_dim),
|
| 861 |
+
nn.GELU(),
|
| 862 |
+
)
|
| 863 |
+
# Output projection
|
| 864 |
+
self.ddg_out = nn.Linear(latent_dim, 1)
|
| 865 |
+
# Skip connection: project input directly to output dimension
|
| 866 |
+
self.ddg_skip = nn.Linear(in_features, 1)
|
| 867 |
+
|
| 868 |
+
# Source type embedding for conditional inference
|
| 869 |
+
# User-friendly types for inference:
|
| 870 |
+
# 0 = "mutant" - Single mutant predictions (most common)
|
| 871 |
+
# 1 = "wt_pairs" - Wildtype pairs with absolute binding affinity
|
| 872 |
+
# 2 = "antibody" - Antibody-antigen binding (CDR-focused)
|
| 873 |
+
self.source_type_embedding = nn.Embedding(3, 32) # 32-dim embedding
|
| 874 |
+
self.source_type_projection = nn.Linear(in_features + 32, in_features) # Project back to in_features
|
| 875 |
+
|
| 876 |
+
# FIXED: Restoring the historical 'learnable gain' strategy
|
| 877 |
+
# This allows the model to amplify the ddG signal early in Stage B
|
| 878 |
+
self.ddg_signal_gain = nn.Parameter(torch.tensor(float(ddg_signal_gain)))
|
| 879 |
+
self.ddg_signal_multiplier = float(ddg_signal_multiplier)
|
| 880 |
+
|
| 881 |
+
def _extract_features(self, chain1, chain1_mask, chain2, chain2_mask):
|
| 882 |
+
"""
|
| 883 |
+
Extract feature representation for a protein complex.
|
| 884 |
+
Returns a vector representation suitable for prediction.
|
| 885 |
+
"""
|
| 886 |
+
if "cross_attention" in self.embedding_method:
|
| 887 |
+
# Process through mutation-specific cross-attention
|
| 888 |
+
c1_out, c2_out = self.cross_stack(chain1, chain2, chain1_mask, chain2_mask)
|
| 889 |
+
|
| 890 |
+
if self.embedding_method == "cross_attention_swe":
|
| 891 |
+
# Apply enhanced SWE pooling
|
| 892 |
+
rep1 = self.swe_pooling(c1_out, chain1_mask) # [B, num_slices]
|
| 893 |
+
rep2 = self.swe_pooling(c2_out, chain2_mask) # [B, num_slices]
|
| 894 |
+
|
| 895 |
+
# Difference aggregator
|
| 896 |
+
diff = rep1 - rep2
|
| 897 |
+
if self.normalize_difference:
|
| 898 |
+
diff = F.normalize(diff, p=2, dim=1)
|
| 899 |
+
return diff
|
| 900 |
+
|
| 901 |
+
elif self.embedding_method == "cross_attention":
|
| 902 |
+
# Use mutation-weighted pooling
|
| 903 |
+
# Extract mutation channel to guide pooling
|
| 904 |
+
d_in = c1_out.shape[2]
|
| 905 |
+
use_context = (d_in > 1153)
|
| 906 |
+
if use_context:
|
| 907 |
+
c1_mut = c1_out[:, :, -2:-1] # [B, L, 1]
|
| 908 |
+
c2_mut = c2_out[:, :, -2:-1] # [B, L, 1]
|
| 909 |
+
else:
|
| 910 |
+
c1_mut = c1_out[:, :, -1:] # [B, L, 1]
|
| 911 |
+
c2_mut = c2_out[:, :, -1:] # [B, L, 1]
|
| 912 |
+
|
| 913 |
+
# Weighted pooling - gives higher weight to mutated positions
|
| 914 |
+
c1_weights = F.softmax(c1_mut * 10, dim=1) # Sharpen weights
|
| 915 |
+
c2_weights = F.softmax(c2_mut * 10, dim=1)
|
| 916 |
+
|
| 917 |
+
c1_pool = torch.sum(c1_out * c1_weights, dim=1) # [B, 1153/1154]
|
| 918 |
+
c2_pool = torch.sum(c2_out * c2_weights, dim=1) # [B, 1153/1154]
|
| 919 |
+
|
| 920 |
+
# Create difference representation
|
| 921 |
+
diff = c1_pool - c2_pool
|
| 922 |
+
if self.normalize_difference:
|
| 923 |
+
diff = F.normalize(diff, p=2, dim=1)
|
| 924 |
+
|
| 925 |
+
# Use pre-defined projection layer instead of creating one dynamically
|
| 926 |
+
if self.cross_attn_projection is not None:
|
| 927 |
+
diff = self.cross_attn_projection(diff)
|
| 928 |
+
|
| 929 |
+
return diff
|
| 930 |
+
|
| 931 |
+
elif self.embedding_method == "cosine":
|
| 932 |
+
# Enhanced SWE => [B, num_slices]
|
| 933 |
+
rep1 = self.swe_pooling(chain1, chain1_mask)
|
| 934 |
+
rep2 = self.swe_pooling(chain2, chain2_mask)
|
| 935 |
+
sim = F.cosine_similarity(rep1, rep2, dim=1).unsqueeze(-1)
|
| 936 |
+
return sim
|
| 937 |
+
|
| 938 |
+
else: # "difference"
|
| 939 |
+
rep1 = self.swe_pooling(chain1, chain1_mask)
|
| 940 |
+
rep2 = self.swe_pooling(chain2, chain2_mask)
|
| 941 |
+
diff = rep1 - rep2
|
| 942 |
+
if self.normalize_difference:
|
| 943 |
+
diff = F.normalize(diff, p=2, dim=1)
|
| 944 |
+
return diff
|
| 945 |
+
|
| 946 |
+
def _extract_residue_features(self, chain1, chain1_mask, chain2, chain2_mask):
|
| 947 |
+
"""
|
| 948 |
+
Extract RESIDUE-LEVEL features (before pooling) for computing differences.
|
| 949 |
+
Used for ddG to preserve mutation-specific information.
|
| 950 |
+
|
| 951 |
+
Returns:
|
| 952 |
+
c1_out, c2_out: [B, L1, D] and [B, L2, D] attended residue features
|
| 953 |
+
"""
|
| 954 |
+
if "cross_attention" in self.embedding_method:
|
| 955 |
+
c1_out, c2_out = self.cross_stack(chain1, chain2, chain1_mask, chain2_mask)
|
| 956 |
+
return c1_out, c2_out
|
| 957 |
+
else:
|
| 958 |
+
# For non-cross-attention methods, return inputs directly
|
| 959 |
+
return chain1, chain2
|
| 960 |
+
|
| 961 |
+
def forward(self, mut_chain1, mut_chain1_mask, mut_chain2, mut_chain2_mask,
|
| 962 |
+
wt_chain1=None, wt_chain1_mask=None, wt_chain2=None, wt_chain2_mask=None,
|
| 963 |
+
source_type_ids=None):
|
| 964 |
+
"""
|
| 965 |
+
Dual-head forward method that can handle both modes:
|
| 966 |
+
1. Standard mode: Just predict dG for mutant complex
|
| 967 |
+
2. Dual-head mode: Predict both dG and direct ddG when wildtype is provided
|
| 968 |
+
|
| 969 |
+
For ddG: Uses RESIDUE-LEVEL differences before pooling to preserve mutation info.
|
| 970 |
+
ddG = ddg_mlp(pool(mut_features - wt_features)) instead of
|
| 971 |
+
ddg_mlp(pool(mut_features) - pool(wt_features))
|
| 972 |
+
|
| 973 |
+
Args:
|
| 974 |
+
source_type_ids: Optional[Tensor] of shape [B], values 0/1/2 for conditioning
|
| 975 |
+
|
| 976 |
+
Returns:
|
| 977 |
+
If wildtype inputs are None or use_dual_head=False:
|
| 978 |
+
Returns mutant dG prediction only
|
| 979 |
+
Else:
|
| 980 |
+
Returns tuple of (mutant_dG, direct_ddG_prediction)
|
| 981 |
+
"""
|
| 982 |
+
# ============== OPTIMIZED: Cache residue features ==============
|
| 983 |
+
# Get mutant RESIDUE-LEVEL features first (used for both dG and ddG)
|
| 984 |
+
mut_c1_res, mut_c2_res = self._extract_residue_features(
|
| 985 |
+
mut_chain1, mut_chain1_mask, mut_chain2, mut_chain2_mask)
|
| 986 |
+
|
| 987 |
+
# Pool for dG prediction (reuses cached residue features)
|
| 988 |
+
if "cross_attention_swe" in self.embedding_method:
|
| 989 |
+
rep1 = self.swe_pooling(mut_c1_res, mut_chain1_mask)
|
| 990 |
+
rep2 = self.swe_pooling(mut_c2_res, mut_chain2_mask)
|
| 991 |
+
mut_features = rep1 - rep2
|
| 992 |
+
if self.normalize_difference:
|
| 993 |
+
mut_features = F.normalize(mut_features, p=2, dim=1, eps=1e-8)
|
| 994 |
+
else:
|
| 995 |
+
# Fallback pooling
|
| 996 |
+
mut_features = self._extract_features(mut_chain1, mut_chain1_mask, mut_chain2, mut_chain2_mask)
|
| 997 |
+
|
| 998 |
+
# ============== SOURCE TYPE CONDITIONING ==============
|
| 999 |
+
# Apply source type conditioning if provided
|
| 1000 |
+
if source_type_ids is not None:
|
| 1001 |
+
# Get source type embedding [B, 32]
|
| 1002 |
+
src_emb = self.source_type_embedding(source_type_ids)
|
| 1003 |
+
# Concatenate with features and project back
|
| 1004 |
+
conditioned_features = torch.cat([mut_features, src_emb], dim=-1)
|
| 1005 |
+
mut_features = self.source_type_projection(conditioned_features)
|
| 1006 |
+
|
| 1007 |
+
# Predict dG for mutant
|
| 1008 |
+
dg_pred = self.dg_mlp(mut_features).squeeze(-1)
|
| 1009 |
+
|
| 1010 |
+
# If no wildtype or dual head is disabled, just return mutant dG
|
| 1011 |
+
if not self.use_dual_head or wt_chain1 is None or wt_chain2 is None:
|
| 1012 |
+
return dg_pred
|
| 1013 |
+
|
| 1014 |
+
# ============== RESIDUE-LEVEL ddG COMPUTATION ==============
|
| 1015 |
+
# Get wildtype RESIDUE-LEVEL features (mutant already cached above)
|
| 1016 |
+
wt_c1_res, wt_c2_res = self._extract_residue_features(
|
| 1017 |
+
wt_chain1, wt_chain1_mask, wt_chain2, wt_chain2_mask)
|
| 1018 |
+
|
| 1019 |
+
|
| 1020 |
+
# (Debug logging removed - was polluting training output)
|
| 1021 |
+
|
| 1022 |
+
# Compute RESIDUE-LEVEL differences BEFORE pooling
|
| 1023 |
+
# This preserves mutation-specific changes at each position
|
| 1024 |
+
# Handle sequence length differences by taking minimum length
|
| 1025 |
+
L_c1 = min(mut_c1_res.shape[1], wt_c1_res.shape[1])
|
| 1026 |
+
L_c2 = min(mut_c2_res.shape[1], wt_c2_res.shape[1])
|
| 1027 |
+
|
| 1028 |
+
c1_diff = mut_c1_res[:, :L_c1, :] - wt_c1_res[:, :L_c1, :] # [B, L1, D]
|
| 1029 |
+
c2_diff = mut_c2_res[:, :L_c2, :] - wt_c2_res[:, :L_c2, :] # [B, L2, D]
|
| 1030 |
+
|
| 1031 |
+
# Update masks for truncated length
|
| 1032 |
+
c1_diff_mask = mut_chain1_mask[:, :L_c1] if mut_chain1_mask is not None else None
|
| 1033 |
+
c2_diff_mask = mut_chain2_mask[:, :L_c2] if mut_chain2_mask is not None else None
|
| 1034 |
+
|
| 1035 |
+
# [DIFF CHECK] Diagnostic Logging
|
| 1036 |
+
# Measure cosine similarity at the approximate mutation site to check for embedding collapse.
|
| 1037 |
+
if self._ddg_log_counter % 200 == 1:
|
| 1038 |
+
with torch.no_grad():
|
| 1039 |
+
# Extract center position
|
| 1040 |
+
mid_idx = L_c1 // 2
|
| 1041 |
+
|
| 1042 |
+
# Vectors at midpoint
|
| 1043 |
+
v_mut = mut_c1_res[:, mid_idx, :]
|
| 1044 |
+
v_wt = wt_c1_res[:, mid_idx, :]
|
| 1045 |
+
|
| 1046 |
+
# Cosine similarity
|
| 1047 |
+
cos_sim = F.cosine_similarity(v_mut, v_wt, dim=1).mean().item()
|
| 1048 |
+
diff_norm = (v_mut - v_wt).norm(dim=1).mean().item()
|
| 1049 |
+
|
| 1050 |
+
logger.info(f"[DIFF CHECK] Batch {self._ddg_log_counter}: CosSim at mid={cos_sim:.5f}, DiffNorm={diff_norm:.5f}")
|
| 1051 |
+
|
| 1052 |
+
# NOW pool the differences
|
| 1053 |
+
if self.swe_pooling is not None:
|
| 1054 |
+
# =================================================================
|
| 1055 |
+
# HYBRID POOLING: Global SWE + Local Mutation-Site-Centric (vnew37.0)
|
| 1056 |
+
# This ensures local mutation signals are not diluted by global pooling
|
| 1057 |
+
# =================================================================
|
| 1058 |
+
|
| 1059 |
+
# Concatenate chain differences along sequence dimension
|
| 1060 |
+
combined_diff = torch.cat([c1_diff, c2_diff], dim=1) # [B, L_comb, D=1153]
|
| 1061 |
+
if c1_diff_mask is not None and c2_diff_mask is not None:
|
| 1062 |
+
combined_mask = torch.cat([c1_diff_mask, c2_diff_mask], dim=1)
|
| 1063 |
+
else:
|
| 1064 |
+
combined_mask = None
|
| 1065 |
+
|
| 1066 |
+
# A. Global Component: Standard SWE pooling (capture global stability context)
|
| 1067 |
+
global_diff = self.swe_pooling(combined_diff, combined_mask) # [B, num_slices]
|
| 1068 |
+
|
| 1069 |
+
# B. Local Component: Mutation-Site-Centric Pooling (MSCP)
|
| 1070 |
+
# CRITICAL FIX (v49.0): Extract indicator from RAW INPUT chains, NOT cross-attention output!
|
| 1071 |
+
# The cross-attention stack applies 0.9 convex combination at each layer (5 layers = 0.9^5 = 59% decay).
|
| 1072 |
+
# Using mut_chain1/mut_chain2 (raw inputs) instead of mut_c1_res/mut_c2_res (diluted outputs).
|
| 1073 |
+
|
| 1074 |
+
# Determine if we have context window (1154-dim) or standard (1153-dim)
|
| 1075 |
+
d_raw = mut_chain1.shape[2]
|
| 1076 |
+
use_context = (d_raw > 1153)
|
| 1077 |
+
|
| 1078 |
+
# Extract RAW indicator from INPUT chains (before cross-attention!)
|
| 1079 |
+
if use_context:
|
| 1080 |
+
c1_mut_raw = mut_chain1[:, :L_c1, -2:-1]
|
| 1081 |
+
c2_mut_raw = mut_chain2[:, :L_c2, -2:-1]
|
| 1082 |
+
else:
|
| 1083 |
+
c1_mut_raw = mut_chain1[:, :L_c1, -1:]
|
| 1084 |
+
c2_mut_raw = mut_chain2[:, :L_c2, -1:]
|
| 1085 |
+
|
| 1086 |
+
mut_indicator = torch.cat([c1_mut_raw, c2_mut_raw], dim=1) # [B, L_comb, 1]
|
| 1087 |
+
|
| 1088 |
+
# Stability: Clamp indicator to be non-negative and bounded.
|
| 1089 |
+
# In case attention stack produced weird values, we force them back to [0, 2]
|
| 1090 |
+
mut_indicator = mut_indicator.clamp(min=0.0, max=2.0)
|
| 1091 |
+
|
| 1092 |
+
# Weighted average focusing ONLY on the mutation sites
|
| 1093 |
+
# use 0.001 epsilon to avoid nan for WT samples (mut_sum=0)
|
| 1094 |
+
mut_sum = mut_indicator.sum(dim=1).clamp(min=1e-3)
|
| 1095 |
+
|
| 1096 |
+
# MSCP Calculation with additional stability guard
|
| 1097 |
+
mscp_esm = (combined_diff[:, :, :1152] * mut_indicator).sum(dim=1) / mut_sum # [B, 1152]
|
| 1098 |
+
mscp_mut = (mut_indicator * mut_indicator).sum(dim=1) / mut_sum # [B, 1] (should be ~1.0)
|
| 1099 |
+
|
| 1100 |
+
# Project local delta into the same slice space as global features
|
| 1101 |
+
# using the SHARED theta projection and mut_projection from SWE
|
| 1102 |
+
# This ensures consistent representation between global and local paths
|
| 1103 |
+
local_diff_esm = self.swe_pooling.theta(mscp_esm)
|
| 1104 |
+
local_diff_mut = self.swe_pooling.mut_projection(mscp_mut)
|
| 1105 |
+
local_diff = local_diff_esm + local_diff_mut # [B, num_slices]
|
| 1106 |
+
|
| 1107 |
+
# C. Combine: Residual-style addition + Gain
|
| 1108 |
+
# FIXED: Apply signal_gain AFTER normalization so it actually has effect
|
| 1109 |
+
# Previously, L2-norm undid the scaling entirely.
|
| 1110 |
+
diff_multiplier = getattr(self, "ddg_signal_multiplier", 20.0)
|
| 1111 |
+
diff_features = (global_diff + local_diff) * diff_multiplier
|
| 1112 |
+
|
| 1113 |
+
# ===================================================================
|
| 1114 |
+
# SIGNAL FLOW LOGGING: Track local vs global contribution
|
| 1115 |
+
if not hasattr(self, '_ddg_log_counter'):
|
| 1116 |
+
self._ddg_log_counter = 0
|
| 1117 |
+
self._ddg_log_counter += 1
|
| 1118 |
+
|
| 1119 |
+
should_log = (self._ddg_log_counter % 200 == 1)
|
| 1120 |
+
if should_log:
|
| 1121 |
+
g_mag = global_diff.abs().mean().item()
|
| 1122 |
+
l_mag = local_diff.abs().mean().item()
|
| 1123 |
+
logger.info(f"[DDG SIGNAL] Batch {self._ddg_log_counter}: Global_mag={g_mag:.4f}, Local_mag={l_mag:.4f}")
|
| 1124 |
+
#region agent log
|
| 1125 |
+
try:
|
| 1126 |
+
# Inspect what the model thinks the mutation indicator is (both tail channels)
|
| 1127 |
+
d_raw_dbg = int(mut_chain1.shape[2])
|
| 1128 |
+
# indicator candidate stats on chain1/chain2 for last and second-last channels
|
| 1129 |
+
def _chan_stats(x):
|
| 1130 |
+
return {
|
| 1131 |
+
"min": float(x.min().item()),
|
| 1132 |
+
"max": float(x.max().item()),
|
| 1133 |
+
"mean": float(x.float().mean().item()),
|
| 1134 |
+
"std": float(x.float().std().item()),
|
| 1135 |
+
}
|
| 1136 |
+
c1_last = _chan_stats(mut_chain1[:, :L_c1, -1])
|
| 1137 |
+
c2_last = _chan_stats(mut_chain2[:, :L_c2, -1])
|
| 1138 |
+
c1_last2 = _chan_stats(mut_chain1[:, :L_c1, -2]) if d_raw_dbg >= 1154 else None
|
| 1139 |
+
c2_last2 = _chan_stats(mut_chain2[:, :L_c2, -2]) if d_raw_dbg >= 1154 else None
|
| 1140 |
+
mut_sum_dbg = float(mut_sum.mean().item()) if "mut_sum" in locals() else None
|
| 1141 |
+
payload = {
|
| 1142 |
+
"sessionId": "debug-session",
|
| 1143 |
+
"runId": "pre-fix",
|
| 1144 |
+
"hypothesisId": "F",
|
| 1145 |
+
"location": "architectures.py:DualHeadAffinityPredictor:mscp_indicator_debug",
|
| 1146 |
+
"message": "Indicator channel stats (last vs second-last) to detect double-indicator / wrong channel selection",
|
| 1147 |
+
"data": {
|
| 1148 |
+
"ddg_log_counter": int(self._ddg_log_counter),
|
| 1149 |
+
"d_raw": d_raw_dbg,
|
| 1150 |
+
"use_context_flag": bool(use_context),
|
| 1151 |
+
"c1_last": c1_last,
|
| 1152 |
+
"c2_last": c2_last,
|
| 1153 |
+
"c1_last2": c1_last2,
|
| 1154 |
+
"c2_last2": c2_last2,
|
| 1155 |
+
"mut_sum_mean": mut_sum_dbg,
|
| 1156 |
+
},
|
| 1157 |
+
"timestamp": int(time.time() * 1000),
|
| 1158 |
+
}
|
| 1159 |
+
with open("/Users/supantha/Documents/code_v2/protein/.cursor/debug.log", "a") as f:
|
| 1160 |
+
f.write(json.dumps(payload, default=str) + "\n")
|
| 1161 |
+
logger.info(f"[AGENTLOG MSCP] d_raw={d_raw_dbg} use_context={use_context} c1_last={c1_last} c1_last2={c1_last2} mut_sum_mean={mut_sum_dbg}")
|
| 1162 |
+
except Exception:
|
| 1163 |
+
pass
|
| 1164 |
+
#endregion
|
| 1165 |
+
|
| 1166 |
+
if self.normalize_difference:
|
| 1167 |
+
diff_features = F.normalize(diff_features, p=2, dim=1, eps=1e-8)
|
| 1168 |
+
|
| 1169 |
+
# FIXED: Apply signal_gain AFTER normalization so it actually scales the output
|
| 1170 |
+
diff_features = diff_features * self.ddg_signal_gain
|
| 1171 |
+
else:
|
| 1172 |
+
# Fallback: mean pooling of differences
|
| 1173 |
+
if c1_diff_mask is not None:
|
| 1174 |
+
c1_diff = c1_diff * c1_diff_mask.unsqueeze(-1).float()
|
| 1175 |
+
c1_pool = c1_diff.sum(dim=1) / c1_diff_mask.sum(dim=1, keepdim=True).clamp(min=1)
|
| 1176 |
+
else:
|
| 1177 |
+
c1_pool = c1_diff.mean(dim=1)
|
| 1178 |
+
if c2_diff_mask is not None:
|
| 1179 |
+
c2_diff = c2_diff * c2_diff_mask.unsqueeze(-1).float()
|
| 1180 |
+
c2_pool = c2_diff.sum(dim=1) / c2_diff_mask.sum(dim=1, keepdim=True).clamp(min=1)
|
| 1181 |
+
else:
|
| 1182 |
+
c2_pool = c2_diff.mean(dim=1)
|
| 1183 |
+
diff_features = c1_pool - c2_pool
|
| 1184 |
+
if self.normalize_difference:
|
| 1185 |
+
diff_features = F.normalize(diff_features, p=2, dim=1)
|
| 1186 |
+
|
| 1187 |
+
# DEBUG: Check for NaNs/Infs in diff_features
|
| 1188 |
+
if torch.isnan(diff_features).any() or torch.isinf(diff_features).any():
|
| 1189 |
+
print(f"[DEBUG MODEL] NaN/Inf in diff_features! Shape: {diff_features.shape}")
|
| 1190 |
+
if c1_diff_mask is not None:
|
| 1191 |
+
print(f" c1_mask sum: {c1_diff_mask.sum(dim=1).min().item()}")
|
| 1192 |
+
print(f" c1_diff nan: {torch.isnan(c1_diff).any().item()}")
|
| 1193 |
+
print(f" c2_diff nan: {torch.isnan(c2_diff).any().item()}")
|
| 1194 |
+
|
| 1195 |
+
# ============== SOURCE TYPE CONDITIONING FOR DDG ==============
|
| 1196 |
+
# Apply same source conditioning to diff_features for ddG prediction
|
| 1197 |
+
# This allows ddG head to learn different behaviors for different data sources
|
| 1198 |
+
if source_type_ids is not None:
|
| 1199 |
+
src_emb = self.source_type_embedding(source_type_ids)
|
| 1200 |
+
conditioned_diff = torch.cat([diff_features, src_emb], dim=-1)
|
| 1201 |
+
diff_features = self.source_type_projection(conditioned_diff)
|
| 1202 |
+
|
| 1203 |
+
# MSCP Hybrid: Skip 20x gain since MSCP provides raw, un-diluted signal
|
| 1204 |
+
# vnew64.0: Shallow 2-layer GELU + skip connection to preserve variance
|
| 1205 |
+
ddg_hidden_out = self.ddg_hidden(diff_features)
|
| 1206 |
+
ddg_pred = (self.ddg_out(ddg_hidden_out) + self.ddg_skip(diff_features)).squeeze(-1)
|
| 1207 |
+
|
| 1208 |
+
#region agent log
|
| 1209 |
+
# Diagnose "train variance but eval constant" which often indicates dropout-only variance or head collapse.
|
| 1210 |
+
try:
|
| 1211 |
+
if should_log:
|
| 1212 |
+
# Diff feature stats across the batch
|
| 1213 |
+
df = diff_features.detach()
|
| 1214 |
+
df_mean = float(df.mean().item()) if df.numel() else None
|
| 1215 |
+
df_std = float(df.std().item()) if df.numel() else None
|
| 1216 |
+
df_abs_mean = float(df.abs().mean().item()) if df.numel() else None
|
| 1217 |
+
# per-sample spread: average std over features (helps detect "all samples identical")
|
| 1218 |
+
df_per_sample_std = float(df.float().std(dim=1).mean().item()) if df.dim() == 2 and df.shape[0] > 0 else None
|
| 1219 |
+
|
| 1220 |
+
# ddg_pred stats (across batch)
|
| 1221 |
+
p = ddg_pred.detach()
|
| 1222 |
+
p_mean = float(p.mean().item()) if p.numel() else None
|
| 1223 |
+
p_std = float(p.std().item()) if p.numel() else None
|
| 1224 |
+
|
| 1225 |
+
# If dropout is active (training), a second forward pass should differ.
|
| 1226 |
+
p2_std = None
|
| 1227 |
+
p_diff_std = None
|
| 1228 |
+
if self.training:
|
| 1229 |
+
p2 = (self.ddg_out(self.ddg_hidden(diff_features)) + self.ddg_skip(diff_features)).squeeze(-1).detach()
|
| 1230 |
+
p2_std = float(p2.std().item()) if p2.numel() else None
|
| 1231 |
+
p_diff_std = float((p2 - p).std().item()) if p2.numel() else None
|
| 1232 |
+
|
| 1233 |
+
# Weight/bias norms (to detect collapse to near-zero weights or bias-only prediction)
|
| 1234 |
+
lin_layers = [m for m in self.ddg_hidden.modules() if isinstance(m, nn.Linear)] if hasattr(self, "ddg_hidden") else []
|
| 1235 |
+
w0 = lin_layers[0] if len(lin_layers) > 0 else None
|
| 1236 |
+
wL = lin_layers[-1] if len(lin_layers) > 0 else None
|
| 1237 |
+
w0_norm = float(w0.weight.detach().norm().item()) if w0 is not None else None
|
| 1238 |
+
wL_norm = float(wL.weight.detach().norm().item()) if wL is not None else None
|
| 1239 |
+
bL_norm = float(wL.bias.detach().norm().item()) if (wL is not None and wL.bias is not None) else None
|
| 1240 |
+
|
| 1241 |
+
payload = {
|
| 1242 |
+
"sessionId": "debug-session",
|
| 1243 |
+
"runId": "pre-fix",
|
| 1244 |
+
"hypothesisId": "I",
|
| 1245 |
+
"location": "architectures.py:DualHeadAffinityPredictor:ddg_head_eval_vs_train",
|
| 1246 |
+
"message": "ddG head collapse vs dropout-only variance diagnostics",
|
| 1247 |
+
"data": {
|
| 1248 |
+
"ddg_log_counter": int(self._ddg_log_counter),
|
| 1249 |
+
"model_training": bool(self.training),
|
| 1250 |
+
"normalize_difference": bool(getattr(self, "normalize_difference", False)),
|
| 1251 |
+
"ddg_signal_gain": float(self.ddg_signal_gain.detach().item()) if hasattr(self, "ddg_signal_gain") else None,
|
| 1252 |
+
"diff_features_mean": df_mean,
|
| 1253 |
+
"diff_features_std": df_std,
|
| 1254 |
+
"diff_features_abs_mean": df_abs_mean,
|
| 1255 |
+
"diff_features_per_sample_std_mean": df_per_sample_std,
|
| 1256 |
+
"ddg_pred_mean": p_mean,
|
| 1257 |
+
"ddg_pred_std": p_std,
|
| 1258 |
+
"ddg_pred2_std": p2_std,
|
| 1259 |
+
"ddg_pred_repeat_diff_std": p_diff_std,
|
| 1260 |
+
"ddg_w0_norm": w0_norm,
|
| 1261 |
+
"ddg_wL_norm": wL_norm,
|
| 1262 |
+
"ddg_bL_norm": bL_norm,
|
| 1263 |
+
},
|
| 1264 |
+
"timestamp": int(time.time() * 1000),
|
| 1265 |
+
}
|
| 1266 |
+
# Log first (before file write that may fail on cluster)
|
| 1267 |
+
logger.info(
|
| 1268 |
+
f"[AGENTLOG DDGHEAD] train={self.training} df_std={df_std:.4f} df_ps_std={df_per_sample_std:.4f} "
|
| 1269 |
+
f"pred_std={p_std:.4f} pred_mean={p_mean:.4f} rep_diff_std={p_diff_std if p_diff_std is not None else 'NA'} "
|
| 1270 |
+
f"w0={w0_norm:.2f} wL={wL_norm:.2f} bL={bL_norm if bL_norm is not None else 'NA'}"
|
| 1271 |
+
)
|
| 1272 |
+
|
| 1273 |
+
# Layerwise activation trace to locate where variance collapses (ReLU dead / dropout-only variance)
|
| 1274 |
+
layer_stats = []
|
| 1275 |
+
try:
|
| 1276 |
+
with torch.no_grad():
|
| 1277 |
+
x = diff_features.detach()
|
| 1278 |
+
for li, layer in enumerate(self.ddg_hidden):
|
| 1279 |
+
x = layer(x)
|
| 1280 |
+
st = {
|
| 1281 |
+
"i": int(li),
|
| 1282 |
+
"t": layer.__class__.__name__,
|
| 1283 |
+
"mean": float(x.mean().item()) if x.numel() else None,
|
| 1284 |
+
"std": float(x.std().item()) if x.numel() else None,
|
| 1285 |
+
}
|
| 1286 |
+
if isinstance(layer, nn.ReLU):
|
| 1287 |
+
st["zero_frac"] = float((x == 0).float().mean().item()) if x.numel() else None
|
| 1288 |
+
layer_stats.append(st)
|
| 1289 |
+
# Compact summary for logs (first 2 + last 2 layers)
|
| 1290 |
+
compact = (layer_stats[:2] + (["..."] if len(layer_stats) > 4 else []) + layer_stats[-2:])
|
| 1291 |
+
logger.info(f"[AGENTLOG DDGACT] train={self.training} layers={compact}")
|
| 1292 |
+
except Exception:
|
| 1293 |
+
layer_stats = []
|
| 1294 |
+
# File write may fail on cluster - that's OK
|
| 1295 |
+
try:
|
| 1296 |
+
payload["data"]["ddg_layer_stats"] = layer_stats
|
| 1297 |
+
with open("/Users/supantha/Documents/code_v2/protein/.cursor/debug.log", "a") as f:
|
| 1298 |
+
f.write(json.dumps(payload, default=str) + "\n")
|
| 1299 |
+
except Exception:
|
| 1300 |
+
pass
|
| 1301 |
+
except Exception:
|
| 1302 |
+
pass
|
| 1303 |
+
#endregion
|
| 1304 |
+
|
| 1305 |
+
# DEBUG: Check ddg_pred
|
| 1306 |
+
if torch.isnan(ddg_pred).any() or torch.isinf(ddg_pred).any():
|
| 1307 |
+
print(f"[DEBUG MODEL] NaN/Inf in ddg_pred!")
|
| 1308 |
+
|
| 1309 |
+
return dg_pred, ddg_pred
|
| 1310 |
+
|
| 1311 |
+
|
| 1312 |
+
class DualHeadAffinityPredictionModel(pl.LightningModule):
|
| 1313 |
+
"""
|
| 1314 |
+
Lightning wrapper for dual-head training.
|
| 1315 |
+
"""
|
| 1316 |
+
def __init__(self, predictor: DualHeadAffinityPredictor, learning_rate=1e-4, ddg_loss_weight=1.0):
|
| 1317 |
+
super().__init__()
|
| 1318 |
+
self.predictor = predictor
|
| 1319 |
+
self.learning_rate = learning_rate
|
| 1320 |
+
self.ddg_loss_weight = ddg_loss_weight
|
| 1321 |
+
|
| 1322 |
+
self.loss_fn = nn.MSELoss()
|
| 1323 |
+
self.pearson_corr = PearsonCorrCoef()
|
| 1324 |
+
self.spearman_corr = SpearmanCorrCoef()
|
| 1325 |
+
self.r2_score = R2Score()
|
| 1326 |
+
self.mse_metric = MeanSquaredError()
|
| 1327 |
+
|
| 1328 |
+
# Save hyperparameters for checkpointing
|
| 1329 |
+
self.save_hyperparameters(ignore=['predictor'])
|
| 1330 |
+
|
| 1331 |
+
def forward(self, mut_chain1, mut_chain1_mask, mut_chain2, mut_chain2_mask,
|
| 1332 |
+
wt_chain1=None, wt_chain1_mask=None, wt_chain2=None, wt_chain2_mask=None,
|
| 1333 |
+
source_type_ids=None):
|
| 1334 |
+
return self.predictor(mut_chain1, mut_chain1_mask, mut_chain2, mut_chain2_mask,
|
| 1335 |
+
wt_chain1, wt_chain1_mask, wt_chain2, wt_chain2_mask,
|
| 1336 |
+
source_type_ids=source_type_ids)
|
| 1337 |
+
|
| 1338 |
+
def training_step(self, batch, batch_idx):
|
| 1339 |
+
# Mutant data
|
| 1340 |
+
(c1, m1, c2, m2, y_mut) = batch["mutant"]
|
| 1341 |
+
|
| 1342 |
+
# Wildtype data with valid mask
|
| 1343 |
+
(cw1, w1m, cw2, w2m, y_wt) = batch["wildtype"]
|
| 1344 |
+
has_wt = batch["has_wt"]
|
| 1345 |
+
|
| 1346 |
+
if self.predictor.use_dual_head and has_wt.sum() > 0:
|
| 1347 |
+
# For samples with wildtype available, use dual-head prediction
|
| 1348 |
+
valid_samples = has_wt.bool()
|
| 1349 |
+
|
| 1350 |
+
# Get predictions for valid samples
|
| 1351 |
+
dg_pred, ddg_pred = self(
|
| 1352 |
+
c1[valid_samples], m1[valid_samples],
|
| 1353 |
+
c2[valid_samples], m2[valid_samples],
|
| 1354 |
+
cw1[valid_samples], w1m[valid_samples],
|
| 1355 |
+
cw2[valid_samples], w2m[valid_samples]
|
| 1356 |
+
)
|
| 1357 |
+
|
| 1358 |
+
# Calculate losses for valid samples
|
| 1359 |
+
dg_loss = self.loss_fn(dg_pred, y_mut[valid_samples])
|
| 1360 |
+
|
| 1361 |
+
# Calculate true ddG as difference between mutant and wildtype dG
|
| 1362 |
+
true_ddg = y_mut[valid_samples] - y_wt[valid_samples]
|
| 1363 |
+
ddg_loss = self.loss_fn(ddg_pred, true_ddg)
|
| 1364 |
+
|
| 1365 |
+
# Combined loss with weighting
|
| 1366 |
+
loss = dg_loss + self.ddg_loss_weight * ddg_loss
|
| 1367 |
+
|
| 1368 |
+
# Process remaining samples (without wildtype) with standard prediction
|
| 1369 |
+
if (~valid_samples).sum() > 0:
|
| 1370 |
+
standard_dg_pred = self(
|
| 1371 |
+
c1[~valid_samples], m1[~valid_samples],
|
| 1372 |
+
c2[~valid_samples], m2[~valid_samples]
|
| 1373 |
+
)
|
| 1374 |
+
standard_loss = self.loss_fn(standard_dg_pred, y_mut[~valid_samples])
|
| 1375 |
+
|
| 1376 |
+
# Add to total loss, weighted by proportion of samples
|
| 1377 |
+
n_valid = valid_samples.sum()
|
| 1378 |
+
n_total = len(valid_samples)
|
| 1379 |
+
loss = (n_valid / n_total) * loss + ((n_total - n_valid) / n_total) * standard_loss
|
| 1380 |
+
else:
|
| 1381 |
+
# Standard prediction for all samples
|
| 1382 |
+
dg_pred = self(c1, m1, c2, m2)
|
| 1383 |
+
loss = self.loss_fn(dg_pred, y_mut)
|
| 1384 |
+
|
| 1385 |
+
# Log metrics
|
| 1386 |
+
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True)
|
| 1387 |
+
return loss
|
| 1388 |
+
|
| 1389 |
+
def validation_step(self, batch, batch_idx):
|
| 1390 |
+
# Similar to training_step but with more comprehensive metrics
|
| 1391 |
+
(c1, m1, c2, m2, y_mut) = batch["mutant"]
|
| 1392 |
+
(cw1, w1m, cw2, w2m, y_wt) = batch["wildtype"]
|
| 1393 |
+
has_wt = batch["has_wt"]
|
| 1394 |
+
|
| 1395 |
+
# Store predictions and targets for all samples
|
| 1396 |
+
all_dg_preds = []
|
| 1397 |
+
all_dg_targets = []
|
| 1398 |
+
all_ddg_preds = []
|
| 1399 |
+
all_ddg_targets = []
|
| 1400 |
+
|
| 1401 |
+
if self.predictor.use_dual_head and has_wt.sum() > 0:
|
| 1402 |
+
valid_samples = has_wt.bool()
|
| 1403 |
+
|
| 1404 |
+
# Dual-head prediction for samples with wildtype
|
| 1405 |
+
dg_pred, ddg_pred = self(
|
| 1406 |
+
c1[valid_samples], m1[valid_samples],
|
| 1407 |
+
c2[valid_samples], m2[valid_samples],
|
| 1408 |
+
cw1[valid_samples], w1m[valid_samples],
|
| 1409 |
+
cw2[valid_samples], w2m[valid_samples]
|
| 1410 |
+
)
|
| 1411 |
+
|
| 1412 |
+
# Calculate true ddG
|
| 1413 |
+
true_ddg = y_mut[valid_samples] - y_wt[valid_samples]
|
| 1414 |
+
|
| 1415 |
+
# Store predictions and targets
|
| 1416 |
+
all_dg_preds.append(dg_pred)
|
| 1417 |
+
all_dg_targets.append(y_mut[valid_samples])
|
| 1418 |
+
all_ddg_preds.append(ddg_pred)
|
| 1419 |
+
all_ddg_targets.append(true_ddg)
|
| 1420 |
+
|
| 1421 |
+
# Process remaining samples with standard prediction
|
| 1422 |
+
if (~valid_samples).sum() > 0:
|
| 1423 |
+
standard_dg_pred = self(
|
| 1424 |
+
c1[~valid_samples], m1[~valid_samples],
|
| 1425 |
+
c2[~valid_samples], m2[~valid_samples]
|
| 1426 |
+
)
|
| 1427 |
+
all_dg_preds.append(standard_dg_pred)
|
| 1428 |
+
all_dg_targets.append(y_mut[~valid_samples])
|
| 1429 |
+
else:
|
| 1430 |
+
# Standard prediction for all samples
|
| 1431 |
+
dg_pred = self(c1, m1, c2, m2)
|
| 1432 |
+
all_dg_preds.append(dg_pred)
|
| 1433 |
+
all_dg_targets.append(y_mut)
|
| 1434 |
+
|
| 1435 |
+
# For samples with wildtype, calculate implicit ddG
|
| 1436 |
+
if has_wt.sum() > 0:
|
| 1437 |
+
valid_samples = has_wt.bool()
|
| 1438 |
+
wt_dg_pred = self(cw1[valid_samples], w1m[valid_samples],
|
| 1439 |
+
cw2[valid_samples], w2m[valid_samples])
|
| 1440 |
+
|
| 1441 |
+
implicit_ddg_pred = dg_pred[valid_samples] - wt_dg_pred
|
| 1442 |
+
true_ddg = y_mut[valid_samples] - y_wt[valid_samples]
|
| 1443 |
+
|
| 1444 |
+
all_ddg_preds.append(implicit_ddg_pred)
|
| 1445 |
+
all_ddg_targets.append(true_ddg)
|
| 1446 |
+
|
| 1447 |
+
# Concatenate all predictions and targets
|
| 1448 |
+
if all_dg_preds:
|
| 1449 |
+
all_dg_preds = torch.cat(all_dg_preds)
|
| 1450 |
+
all_dg_targets = torch.cat(all_dg_targets)
|
| 1451 |
+
|
| 1452 |
+
# Calculate dG metrics
|
| 1453 |
+
dg_mse = self.mse_metric(all_dg_preds, all_dg_targets)
|
| 1454 |
+
dg_pearson = self.pearson_corr(all_dg_preds, all_dg_targets)
|
| 1455 |
+
dg_spearman = self.spearman_corr(all_dg_preds, all_dg_targets)
|
| 1456 |
+
dg_r2 = self.r2_score(all_dg_preds, all_dg_targets)
|
| 1457 |
+
|
| 1458 |
+
# Log dG metrics
|
| 1459 |
+
self.log('val_dg_mse', dg_mse, on_epoch=True, prog_bar=True)
|
| 1460 |
+
self.log('val_dg_pearson', dg_pearson, on_epoch=True)
|
| 1461 |
+
self.log('val_dg_spearman', dg_spearman, on_epoch=True)
|
| 1462 |
+
self.log('val_dg_r2', dg_r2, on_epoch=True)
|
| 1463 |
+
|
| 1464 |
+
# Calculate ddG metrics if available
|
| 1465 |
+
if all_ddg_preds:
|
| 1466 |
+
all_ddg_preds = torch.cat(all_ddg_preds)
|
| 1467 |
+
all_ddg_targets = torch.cat(all_ddg_targets)
|
| 1468 |
+
|
| 1469 |
+
ddg_mse = self.mse_metric(all_ddg_preds, all_ddg_targets)
|
| 1470 |
+
ddg_pearson = self.pearson_corr(all_ddg_preds, all_ddg_targets)
|
| 1471 |
+
ddg_spearman = self.spearman_corr(all_ddg_preds, all_ddg_targets)
|
| 1472 |
+
ddg_r2 = self.r2_score(all_ddg_preds, all_ddg_targets)
|
| 1473 |
+
|
| 1474 |
+
# Log ddG metrics
|
| 1475 |
+
self.log('val_ddg_mse', ddg_mse, on_epoch=True, prog_bar=True)
|
| 1476 |
+
self.log('val_ddg_pearson', ddg_pearson, on_epoch=True)
|
| 1477 |
+
self.log('val_ddg_spearman', ddg_spearman, on_epoch=True)
|
| 1478 |
+
self.log('val_ddg_r2', ddg_r2, on_epoch=True)
|
| 1479 |
+
|
| 1480 |
+
# Combined validation metric for early stopping
|
| 1481 |
+
combined_metric = dg_mse + self.ddg_loss_weight * ddg_mse
|
| 1482 |
+
self.log('val_combined_metric', combined_metric, on_epoch=True)
|
| 1483 |
+
|
| 1484 |
+
return {'val_dg_mse': dg_mse if 'dg_mse' in locals() else None,
|
| 1485 |
+
'val_ddg_mse': ddg_mse if 'ddg_mse' in locals() else None}
|
| 1486 |
+
|
| 1487 |
+
def test_step(self, batch, batch_idx):
|
| 1488 |
+
# Similar to validation_step but returns more detailed metrics
|
| 1489 |
+
return self.validation_step(batch, batch_idx)
|
| 1490 |
+
|
| 1491 |
+
def configure_optimizers(self):
|
| 1492 |
+
optimizer = torch.optim.AdamW(self.parameters(), lr=self.learning_rate)
|
| 1493 |
+
scheduler = torch.optim.lr_scheduler.OneCycleLR(
|
| 1494 |
+
optimizer, max_lr=self.learning_rate, total_steps=self.trainer.estimated_stepping_batches
|
| 1495 |
+
)
|
| 1496 |
+
return {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "val_combined_metric"}
|