aishwaryac11 commited on
Commit
1fd24ad
·
verified ·
1 Parent(s): 512f596

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -5
README.md CHANGED
@@ -88,11 +88,9 @@ To select a language pair for translation, include one of the following tags in
88
  ```
89
  from transformers import AutoTokenizer, AutoModelForCausalLM
90
 
91
-
92
  tokenizer = AutoTokenizer.from_pretrained("nvidia/Riva-Translate-4B-Instruct-v1.1")
93
  model = AutoModelForCausalLM.from_pretrained("nvidia/Riva-Translate-4B-Instruct-v1.1").cuda()
94
 
95
-
96
  # Use the prompt template (along with chat template)
97
  messages = [
98
  {
@@ -101,10 +99,8 @@ messages = [
101
  },
102
  {"role": "user", "content": "The GRACE mission is a collaboration between the NASA and German Aerospace Center.?"},
103
  ]
104
-
105
  tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
106
  outputs = model.generate(tokenized_chat, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
107
-
108
  print(tokenizer.decode(outputs[0]))
109
  ```
110
 
@@ -188,7 +184,6 @@ curl http://localhost:8000/v1/chat/completions \
188
  ```
189
 
190
  ### Chat Template Structure
191
-
192
  ```
193
  {%- set language_pairs = {
194
  'en-zh-cn': {'source': 'English', 'target': 'Simplified Chinese'},
 
88
  ```
89
  from transformers import AutoTokenizer, AutoModelForCausalLM
90
 
 
91
  tokenizer = AutoTokenizer.from_pretrained("nvidia/Riva-Translate-4B-Instruct-v1.1")
92
  model = AutoModelForCausalLM.from_pretrained("nvidia/Riva-Translate-4B-Instruct-v1.1").cuda()
93
 
 
94
  # Use the prompt template (along with chat template)
95
  messages = [
96
  {
 
99
  },
100
  {"role": "user", "content": "The GRACE mission is a collaboration between the NASA and German Aerospace Center.?"},
101
  ]
 
102
  tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
103
  outputs = model.generate(tokenized_chat, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
 
104
  print(tokenizer.decode(outputs[0]))
105
  ```
106
 
 
184
  ```
185
 
186
  ### Chat Template Structure
 
187
  ```
188
  {%- set language_pairs = {
189
  'en-zh-cn': {'source': 'English', 'target': 'Simplified Chinese'},