Update README.md
Browse files
README.md
CHANGED
|
@@ -56,7 +56,26 @@ widget:
|
|
| 56 |
|
| 57 |
# # Example usage
|
| 58 |
```python
|
|
|
|
| 59 |
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
```
|
|
|
|
| 56 |
|
| 57 |
# # Example usage
|
| 58 |
```python
|
| 59 |
+
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
| 60 |
|
| 61 |
+
model_name="Hezam/ArabicT5_Classification"
|
| 62 |
+
model = T5ForConditionalGeneration.from_pretrained(model_name)
|
| 63 |
+
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
| 64 |
+
|
| 65 |
+
text = "الزين فيك القناه الاولي المغربيه الزين فيك القناه الاولي المغربيه اخبارنا المغربيه متابعه تفاجا زوار موقع القناه الاولي المغربي"
|
| 66 |
+
tokens=tokenizer(text, max_length=200,
|
| 67 |
+
truncation=True,
|
| 68 |
+
padding="max_length",
|
| 69 |
+
return_tensors="pt"
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
output= model.generate(tokens['input_ids'],
|
| 73 |
+
max_length=3,
|
| 74 |
+
length_penalty=10)
|
| 75 |
|
| 76 |
+
output = [tokenizer.decode(ids, skip_special_tokens=True,clean_up_tokenization_spaces=True)for ids in output]
|
| 77 |
+
output
|
| 78 |
+
|
| 79 |
+
```bash
|
| 80 |
+
['5']
|
| 81 |
```
|