{ "architectures": [ "RobertaForSequenceClassification" ], "attention_probs_dropout_prob": 0.1, "bos_token_id": 0, "classifier_dropout": null, "dtype": "float32", "eos_token_id": 2, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "id2label": { "0": "All-or-Nothing", "1": "Catastrophizing", "2": "Discounting the Positive", "3": "Emotional Reasoning", "4": "Fortune-telling", "5": "Labeling", "6": "Mental Filter", "7": "Mind Reading", "8": "No Distortion", "9": "Overgeneralization", "10": "Personalization", "11": "Should Statements" }, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": { "All-or-Nothing": 0, "Catastrophizing": 1, "Discounting the Positive": 2, "Emotional Reasoning": 3, "Fortune-telling": 4, "Labeling": 5, "Mental Filter": 6, "Mind Reading": 7, "No Distortion": 8, "Overgeneralization": 9, "Personalization": 10, "Should Statements": 11 }, "layer_norm_eps": 1e-05, "max_position_embeddings": 514, "model_type": "roberta", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 1, "position_embedding_type": "absolute", "problem_type": "single_label_classification", "transformers_version": "4.57.1", "type_vocab_size": 1, "use_cache": true, "vocab_size": 50265 }