Update README.md
Browse files
README.md
CHANGED
|
@@ -22,6 +22,9 @@ This model is a fine-tuned version of **`meta-llama/Llama-3.2-1B`** optimized fo
|
|
| 22 |
### Direct Use
|
| 23 |
|
| 24 |
```python
|
|
|
|
|
|
|
|
|
|
| 25 |
model_id_new = "Vedant3907/Llama-3.2-1B-PersonaClassifier"
|
| 26 |
|
| 27 |
tokenzier = AutoTokenizer.from_pretrained(model_id_new)
|
|
@@ -44,7 +47,7 @@ pipe = pipeline(task="text-generation",
|
|
| 44 |
temperature=0.1,
|
| 45 |
pad_token_id = tokenizer.eos_token_id)
|
| 46 |
|
| 47 |
-
result = pipe(
|
| 48 |
|
| 49 |
print(extract_labels(result[0]['generated_text']))
|
| 50 |
|
|
|
|
| 22 |
### Direct Use
|
| 23 |
|
| 24 |
```python
|
| 25 |
+
from transformers import pipeline
|
| 26 |
+
|
| 27 |
+
|
| 28 |
model_id_new = "Vedant3907/Llama-3.2-1B-PersonaClassifier"
|
| 29 |
|
| 30 |
tokenzier = AutoTokenizer.from_pretrained(model_id_new)
|
|
|
|
| 47 |
temperature=0.1,
|
| 48 |
pad_token_id = tokenizer.eos_token_id)
|
| 49 |
|
| 50 |
+
result = pipe(prompt)
|
| 51 |
|
| 52 |
print(extract_labels(result[0]['generated_text']))
|
| 53 |
|