| |
| """ |
| Test that existing notebook code still works with updated HF files |
| """ |
|
|
| from Bio.Seq import Seq |
| from transformers import GPT2LMHeadModel, GPT2Tokenizer, LogitsProcessor |
| import torch |
|
|
| print("Testing notebook compatibility...") |
|
|
| try: |
| |
| from tokenizer import CodonTokenizer |
| from synonymous_logit_processor import generate_candidate_codons_with_generate |
| |
| |
| print("Loading model and tokenizer...") |
| model = GPT2LMHeadModel.from_pretrained("naniltx/codonGPT") |
| tokenizer = CodonTokenizer() |
| print("✓ Model and tokenizer loaded successfully") |
| |
| |
| print("\nTesting notebook usage pattern...") |
| |
| |
| initial_codons = ["GCT", "TGT", "GAT"] |
| initial_codons = ['ATG', 'GAA', 'CTT', 'GTC'] |
| print("The initial prompt codons are:", " ".join(initial_codons)) |
| |
| |
| generated_codons_generate = generate_candidate_codons_with_generate(initial_codons, temperature=0.7, top_k=5) |
| print("Generated with model.generate():", " ".join(generated_codons_generate)) |
| |
| print("\n✅ Notebook compatibility test passed!") |
| print("Your existing notebook code will continue to work unchanged.") |
| |
| except Exception as e: |
| print(f"\n❌ Compatibility test failed: {e}") |
| import traceback |
| traceback.print_exc() |