| | |
| | """ |
| | FlowAMP Usage Example |
| | This script demonstrates how to use the FlowAMP model for AMP generation. |
| | Note: This is a demonstration version. For full functionality, you'll need to train the model. |
| | """ |
| |
|
| | import torch |
| | from final_flow_model import AMPFlowMatcherCFGConcat |
| |
|
| | def main(): |
| | print("=== FlowAMP Usage Example ===") |
| | print("This demonstrates the model architecture and usage.") |
| | |
| | if torch.cuda.is_available(): |
| | device = torch.device("cuda") |
| | print("Using CUDA") |
| | else: |
| | device = torch.device("cpu") |
| | print("Using CPU") |
| | |
| | |
| | model = AMPFlowMatcherCFGConcat( |
| | hidden_dim=480, |
| | compressed_dim=80, |
| | n_layers=4, |
| | n_heads=8, |
| | dim_ff=1920, |
| | dropout=0.1, |
| | max_seq_len=25, |
| | use_cfg=True |
| | ).to(device) |
| | |
| | print("Model initialized successfully!") |
| | print(f"Model parameters: {sum(p.numel() for p in model.parameters()):,}") |
| | |
| | |
| | batch_size = 2 |
| | seq_len = 25 |
| | compressed_dim = 80 |
| | |
| | |
| | x = torch.randn(batch_size, seq_len, compressed_dim).to(device) |
| | time_steps = torch.rand(batch_size, 1).to(device) |
| | |
| | |
| | with torch.no_grad(): |
| | output = model(x, time_steps) |
| | |
| | print(f"Input shape: {x.shape}") |
| | print(f"Output shape: {output.shape}") |
| | print("✓ Model forward pass successful!") |
| | |
| | print("\nTo use this model for AMP generation:") |
| | print("1. Train the model using the provided training scripts") |
| | print("2. Use generate_amps.py for peptide generation") |
| | print("3. Use test_generated_peptides.py for evaluation") |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|