Update README.md
Browse files
README.md
CHANGED
|
@@ -57,7 +57,7 @@ tokenizer = model.tokenizer
|
|
| 57 |
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
| 58 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
|
| 59 |
output_ids = model.generate(input_ids, max_length=150)
|
| 60 |
-
print(
|
| 61 |
```
|
| 62 |
|
| 63 |
|
|
@@ -73,7 +73,7 @@ tokenizer = model.tokenizer
|
|
| 73 |
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
| 74 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 75 |
output_ids = model.generate(input_ids, max_length=250)
|
| 76 |
-
print(
|
| 77 |
```
|
| 78 |
|
| 79 |
# Call the Italian expert:
|
|
@@ -87,7 +87,7 @@ tokenizer = model.tokenizer
|
|
| 87 |
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
| 88 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 89 |
output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=500)
|
| 90 |
-
print(
|
| 91 |
```
|
| 92 |
|
| 93 |
# Call the French expert:
|
|
@@ -102,7 +102,7 @@ input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
|
| 102 |
print(input_text)
|
| 103 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 104 |
output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=250)
|
| 105 |
-
print(
|
| 106 |
```
|
| 107 |
|
| 108 |
# Call the Scandinavian expert:
|
|
@@ -116,7 +116,7 @@ tokenizer = model.tokenizer
|
|
| 116 |
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
| 117 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 118 |
output_ids = model.generate(input_ids ,temperature=0.1, do_sample=True, top_p=0.9,top_k=20, max_length=250)
|
| 119 |
-
print(
|
| 120 |
```
|
| 121 |
|
| 122 |
|
|
|
|
| 57 |
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
| 58 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda:0")
|
| 59 |
output_ids = model.generate(input_ids, max_length=150)
|
| 60 |
+
print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
|
| 61 |
```
|
| 62 |
|
| 63 |
|
|
|
|
| 73 |
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
| 74 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 75 |
output_ids = model.generate(input_ids, max_length=250)
|
| 76 |
+
print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
|
| 77 |
```
|
| 78 |
|
| 79 |
# Call the Italian expert:
|
|
|
|
| 87 |
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
| 88 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 89 |
output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=500)
|
| 90 |
+
print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
|
| 91 |
```
|
| 92 |
|
| 93 |
# Call the French expert:
|
|
|
|
| 102 |
print(input_text)
|
| 103 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 104 |
output_ids = model.generate(input_ids ,temperature=0.6, do_sample=True, top_p=0.9,top_k=20, max_length=250)
|
| 105 |
+
print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
|
| 106 |
```
|
| 107 |
|
| 108 |
# Call the Scandinavian expert:
|
|
|
|
| 116 |
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
|
| 117 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
|
| 118 |
output_ids = model.generate(input_ids ,temperature=0.1, do_sample=True, top_p=0.9,top_k=20, max_length=250)
|
| 119 |
+
print(tokenizer.decode(output_ids[0], skip_special_tokens=True))
|
| 120 |
```
|
| 121 |
|
| 122 |
|