McClain commited on
Commit
b429e26
·
verified ·
1 Parent(s): 0eb710f

Remove test_generation.py

Browse files
Files changed (1) hide show
  1. test_generation.py +0 -51
test_generation.py DELETED
@@ -1,51 +0,0 @@
1
- import torch
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
-
4
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
5
- print(f"Using device: {device}\n")
6
-
7
- print("Loading RL-optimized PlasmidGPT-GRPO model...")
8
- model = AutoModelForCausalLM.from_pretrained(
9
- ".",
10
- trust_remote_code=True
11
- ).to(device)
12
- model.eval()
13
-
14
- tokenizer = AutoTokenizer.from_pretrained(
15
- ".",
16
- trust_remote_code=True
17
- )
18
-
19
- print("Generating optimized plasmid sequences...\n")
20
-
21
- start_sequence = 'ATGGCTAGCGAATTCGGCGCGCCT'
22
- print(f"Start sequence: {start_sequence}\n")
23
-
24
- input_ids = tokenizer.encode(start_sequence, return_tensors='pt').to(device)
25
-
26
- outputs = model.generate(
27
- input_ids,
28
- max_length=400,
29
- num_return_sequences=3,
30
- temperature=0.8,
31
- do_sample=True,
32
- top_k=50,
33
- top_p=0.95,
34
- pad_token_id=tokenizer.pad_token_id,
35
- eos_token_id=tokenizer.eos_token_id
36
- )
37
-
38
- print("=" * 80)
39
- for i, output in enumerate(outputs, 1):
40
- sequence = tokenizer.decode(output, skip_special_tokens=True)
41
- print(f"\nPlasmid {i}:")
42
- print(f" Length: {len(sequence)} bp")
43
- print(f" First 100 bp: {sequence[:100]}")
44
- print(f" Last 100 bp: {sequence[-100:]}")
45
- print("\n" + "=" * 80)
46
-
47
- print("\nNote: These sequences are generated by an RL-optimized model trained to:")
48
- print(" ✓ Include proper genetic elements (ori, promoters, CDS, markers)")
49
- print(" ✓ Avoid repeat regions > 50 bp")
50
- print(" ✓ Generate compact, functional plasmids")
51
- print(" ✓ Organize genes in proper cassettes (promoter → CDS → terminator)")