Update README.md
Browse files
README.md
CHANGED
|
@@ -30,7 +30,8 @@ tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
| 30 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16)
|
| 31 |
messages = [
|
| 32 |
{"role": "system", "content": "Using the context, answer the users question."},
|
| 33 |
-
{"role": "user", "content":
|
|
|
|
| 34 |
]
|
| 35 |
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to(model.device)
|
| 36 |
outputs = model.generate(inputs, max_new_tokens=100)
|
|
|
|
| 30 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16)
|
| 31 |
messages = [
|
| 32 |
{"role": "system", "content": "Using the context, answer the users question."},
|
| 33 |
+
{"role": "user", "content": Context: {context_content}\n\nQuestion: Your input here..."}
|
| 34 |
+
|
| 35 |
]
|
| 36 |
inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to(model.device)
|
| 37 |
outputs = model.generate(inputs, max_new_tokens=100)
|