Commit ·
672a649
1
Parent(s): 6488729
Update README.md
Browse files
README.md
CHANGED
|
@@ -96,9 +96,7 @@ You may also construct the pipeline from the loaded model and tokenizer yourself
|
|
| 96 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 97 |
|
| 98 |
model_name = "shashank-mugiwara/thor" # either local folder or huggingface model name
|
| 99 |
-
|
| 100 |
-
# You can find an example prompt in the experiment logs.
|
| 101 |
-
prompt = "<|prompt|>How are you?</s><|answer|>"
|
| 102 |
|
| 103 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 104 |
model_name,
|
|
|
|
| 96 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 97 |
|
| 98 |
model_name = "shashank-mugiwara/thor" # either local folder or huggingface model name
|
| 99 |
+
prompt = "<|prompt|>What is thor service?</s><|answer|>"
|
|
|
|
|
|
|
| 100 |
|
| 101 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 102 |
model_name,
|