Update README.md
Browse files
README.md
CHANGED
|
@@ -25,10 +25,15 @@ Below is code using the Transformers library to generate sequences using ProGemm
|
|
| 25 |
|
| 26 |
|
| 27 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
|
|
|
| 28 |
model = AutoModelForCausalLM.from_pretrained("JuIm/ProGemma")
|
|
|
|
| 29 |
tokenizer = AutoTokenizer.from_pretrained("JuIm/Amino-Acid-Sequence-Tokenizer")
|
|
|
|
| 30 |
progemma = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
|
|
| 31 |
sequence = progemma("<bos>", top_k=950, max_length=100, num_return_sequences=1, do_sample=True, repetition_penalty=1.2, eos_token_id=21, pad_token_id=22, bos_token_id=20)
|
|
|
|
| 32 |
s = sequence[0]['generated_text']
|
| 33 |
print(s)
|
| 34 |
|
|
|
|
| 25 |
|
| 26 |
|
| 27 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 28 |
+
|
| 29 |
model = AutoModelForCausalLM.from_pretrained("JuIm/ProGemma")
|
| 30 |
+
|
| 31 |
tokenizer = AutoTokenizer.from_pretrained("JuIm/Amino-Acid-Sequence-Tokenizer")
|
| 32 |
+
|
| 33 |
progemma = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 34 |
+
|
| 35 |
sequence = progemma("<bos>", top_k=950, max_length=100, num_return_sequences=1, do_sample=True, repetition_penalty=1.2, eos_token_id=21, pad_token_id=22, bos_token_id=20)
|
| 36 |
+
|
| 37 |
s = sequence[0]['generated_text']
|
| 38 |
print(s)
|
| 39 |
|