Text Classification
Transformers
Safetensors
PyTorch
English
longformer
fake-news-detection
misinformation-detection
news-classification
multi-dataset
vertex-ai
Instructions to use PushkarKumar/veritas_ai_v2 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use PushkarKumar/veritas_ai_v2 with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-classification", model="PushkarKumar/veritas_ai_v2")# Load model directly from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("PushkarKumar/veritas_ai_v2") model = AutoModelForSequenceClassification.from_pretrained("PushkarKumar/veritas_ai_v2") - Notebooks
- Google Colab
- Kaggle
| { | |
| "add_prefix_space": false, | |
| "added_tokens_decoder": { | |
| "0": { | |
| "content": "<s>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "1": { | |
| "content": "<pad>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "2": { | |
| "content": "</s>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "3": { | |
| "content": "<unk>", | |
| "lstrip": false, | |
| "normalized": true, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| }, | |
| "50264": { | |
| "content": "<mask>", | |
| "lstrip": true, | |
| "normalized": false, | |
| "rstrip": false, | |
| "single_word": false, | |
| "special": true | |
| } | |
| }, | |
| "bos_token": "<s>", | |
| "clean_up_tokenization_spaces": true, | |
| "cls_token": "<s>", | |
| "eos_token": "</s>", | |
| "errors": "replace", | |
| "mask_token": "<mask>", | |
| "model_max_length": 4096, | |
| "pad_token": "<pad>", | |
| "sep_token": "</s>", | |
| "tokenizer_class": "LongformerTokenizer", | |
| "trim_offsets": true, | |
| "unk_token": "<unk>" | |
| } | |