Update README.md
Browse files
README.md
CHANGED
|
@@ -20,10 +20,10 @@ The output will be a JSON object.
|
|
| 20 |
```python
|
| 21 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, Conversation
|
| 22 |
|
| 23 |
-
# Load model
|
| 24 |
-
tokenizer = AutoTokenizer.from_pretrained('fineinstructions/query_templatizer', revision=None)
|
| 25 |
tokenizer.padding_side = 'left'
|
| 26 |
-
model = AutoModelForCausalLM.from_pretrained('fineinstructions/query_templatizer', revision=None)
|
| 27 |
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, pad_token_id=tokenizer.pad_token_id, return_full_text=False)
|
| 28 |
|
| 29 |
# Run inference to templatize the query
|
|
|
|
| 20 |
```python
|
| 21 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, Conversation
|
| 22 |
|
| 23 |
+
# Load tokenizer and model
|
| 24 |
+
tokenizer = AutoTokenizer.from_pretrained('fineinstructions/query_templatizer', revision=None)
|
| 25 |
tokenizer.padding_side = 'left'
|
| 26 |
+
model = AutoModelForCausalLM.from_pretrained('fineinstructions/query_templatizer', revision=None)
|
| 27 |
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, pad_token_id=tokenizer.pad_token_id, return_full_text=False)
|
| 28 |
|
| 29 |
# Run inference to templatize the query
|