Update README.md
Browse files
README.md
CHANGED
|
@@ -2,6 +2,8 @@
|
|
| 2 |
license: mit
|
| 3 |
tags:
|
| 4 |
- biology
|
|
|
|
|
|
|
| 5 |
pipeline_tag: translation
|
| 6 |
---
|
| 7 |
|
|
@@ -19,32 +21,34 @@ Greedy decoding selects the most likely token at each step, it's faster and dete
|
|
| 19 |
|
| 20 |
|
| 21 |
```python
|
| 22 |
-
from transformers import AutoTokenizer,
|
| 23 |
|
| 24 |
# Load model and tokenizer from the Hub
|
| 25 |
-
tokenizer = AutoTokenizer.from_pretrained("lareaulab/Trias", use_fast=
|
| 26 |
-
model =
|
| 27 |
|
| 28 |
# Input sequence
|
| 29 |
species = "Homo sapiens"
|
| 30 |
protein_sequence = "MTEITAAMVKELRESTGAGMMDCKNALSETQ*"
|
| 31 |
-
|
| 32 |
|
| 33 |
# Tokenize
|
| 34 |
-
|
| 35 |
|
| 36 |
# Generate codon sequence (greedy)
|
| 37 |
-
outputs = model.generate(
|
| 38 |
codon_sequence = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 39 |
|
| 40 |
print("Codon sequence:", codon_sequence)
|
|
|
|
| 41 |
|
| 42 |
Beam search example
|
| 43 |
```python
|
| 44 |
outputs = model.generate(
|
| 45 |
-
|
| 46 |
num_beams=5,
|
| 47 |
-
early_stopping=True
|
|
|
|
| 48 |
)
|
| 49 |
```
|
| 50 |
|
|
|
|
| 2 |
license: mit
|
| 3 |
tags:
|
| 4 |
- biology
|
| 5 |
+
- mrna design
|
| 6 |
+
- codon optimization
|
| 7 |
pipeline_tag: translation
|
| 8 |
---
|
| 9 |
|
|
|
|
| 21 |
|
| 22 |
|
| 23 |
```python
|
| 24 |
+
from transformers import AutoTokenizer, BartForConditionalGeneration
|
| 25 |
|
| 26 |
# Load model and tokenizer from the Hub
|
| 27 |
+
tokenizer = AutoTokenizer.from_pretrained("lareaulab/Trias", use_fast=True)
|
| 28 |
+
model = BartForConditionalGeneration.from_pretrained("lareaulab/Trias")
|
| 29 |
|
| 30 |
# Input sequence
|
| 31 |
species = "Homo sapiens"
|
| 32 |
protein_sequence = "MTEITAAMVKELRESTGAGMMDCKNALSETQ*"
|
| 33 |
+
input_seq = f">>{species}<< {protein_sequence}"
|
| 34 |
|
| 35 |
# Tokenize
|
| 36 |
+
input_ids = tokenizer.encode(input_seq, return_tensors="pt")
|
| 37 |
|
| 38 |
# Generate codon sequence (greedy)
|
| 39 |
+
outputs = model.generate(input_ids, max_length=tokenizer.model_max_length)
|
| 40 |
codon_sequence = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 41 |
|
| 42 |
print("Codon sequence:", codon_sequence)
|
| 43 |
+
```
|
| 44 |
|
| 45 |
Beam search example
|
| 46 |
```python
|
| 47 |
outputs = model.generate(
|
| 48 |
+
input_ids,
|
| 49 |
num_beams=5,
|
| 50 |
+
early_stopping=True,
|
| 51 |
+
max_length=tokenizer.model_max_length)
|
| 52 |
)
|
| 53 |
```
|
| 54 |
|