nimishgarg commited on
Commit
84fecb3
·
verified ·
1 Parent(s): f6b0025

Upload 7 files

Browse files
Files changed (7) hide show
  1. README.md +119 -0
  2. config.json +39 -0
  3. merges.txt +0 -0
  4. model.safetensors +3 -0
  5. special_tokens_map.json +51 -0
  6. tokenizer_config.json +57 -0
  7. vocab.json +0 -0
README.md ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # RoBERTa-Base Quantized Model for Topic Classification
3
+
4
+ This repository hosts a quantized version of the RoBERTa model, fine-tuned for topic classification using the AG News dataset. The model has been optimized using FP16 quantization for efficient deployment without significant accuracy loss.
5
+ ## Model Details
6
+
7
+ - **Model Architecture:** RoBERTa Base
8
+ - **Task:** Multi-class Topic Classification (4 classes)
9
+ - **Dataset:** AG News (Hugging Face Datasets)
10
+ - **Quantization:** Float16
11
+ - **Fine-tuning Framework:** Hugging Face Transformers
12
+
13
+ ---
14
+
15
+ ## Installation
16
+
17
+ ```bash
18
+ pip install transformers torch datasets
19
+ ```
20
+
21
+ ---
22
+
23
+ ## Loading the Model
24
+
25
+ ```python
26
+
27
+ from transformers import RobertaTokenizer
28
+ from transformers import RobertaForSequenceClassification
29
+
30
+ import torch
31
+
32
+ # Load tokenizer and model
33
+
34
+ tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
35
+ model = RobertaForSequenceClassification.from_pretrained("roberta-base", num_labels=4).to(device)
36
+ # Define test sentences
37
+ samples = [
38
+ "Tensions rise in the Middle East as diplomats gather for emergency talks to prevent further escalation.",
39
+ "Tesla reports a 25% increase in quarterly revenue, driven by strong demand for its Model Y vehicles in Asia.",
40
+
41
+ "Researchers develop a new quantum computing chip that significantly reduces energy consumption.",
42
+ "Argentina defeats Brazil 2-1 in the Copa América final, securing their 16th continental title.",
43
+ "Meta unveils its latest AI model capable of generating 3D virtual environments from text prompts."
44
+ ]
45
+
46
+
47
+
48
+ from transformers import pipeline
49
+
50
+ # Load pipeline for inference
51
+ classifier = pipeline("text-classification", model=trainer.model, tokenizer=tokenizer, device=0) # device=-1 if using CPU
52
+
53
+ predictions = classifier(samples)
54
+
55
+ # Print results
56
+ for text, pred in zip(samples, predictions):
57
+ print(f"\nText: {text}\nPredicted Topic: {pred['label']} (Score: {pred['score']:.4f})")
58
+ ```
59
+
60
+ ---
61
+
62
+ ## Performance Metrics
63
+
64
+ - **Accuracy:** 0.9471
65
+ - **Precision:** 0.9471
66
+ - **Recall:** 0.9471
67
+ - **F1 Score:** 0.9471
68
+
69
+ ---
70
+
71
+ ## Fine-Tuning Details
72
+
73
+ ### Dataset
74
+
75
+ The dataset is sourced from Hugging Face’s ag_news dataset. It contains 120,000 training samples and 7,600 test samples, with each news article labeled into one of four categories: World, Sports, Business, or Sci/Tech. The original dataset was used as provided, and input texts were tokenized using the RoBERTa tokenizer and truncated/padded to a maximum length of 128 tokens.
76
+
77
+ ### Training
78
+
79
+ - **Epochs:** 3
80
+ - **Batch size:** 8
81
+ - **Learning rate:** 2e-5
82
+ - **Evaluation strategy:** `epoch`
83
+
84
+ ---
85
+
86
+ ## Quantization
87
+
88
+ Post-training quantization was applied using PyTorch’s `half()` precision (FP16) to reduce model size and inference time.
89
+
90
+ ---
91
+
92
+ ## Repository Structure
93
+
94
+ ```python
95
+ .
96
+ ├── config.json # Model configuration
97
+ ├── merges.txt # Byte Pair Encoding (BPE) merge rules for tokenizer
98
+ ├── model.safetensors # Quantized model weights
99
+ ├── README.md # Model documentation
100
+ ├── special_tokens_map.json # Tokenizer special tokens
101
+ ├── tokenizer_config.json # Tokenizer configuration
102
+ ├── vocab.json # Tokenizer vocabulary
103
+
104
+ ├── README.md # Model documentation
105
+ ```
106
+
107
+ ---
108
+
109
+ ## Limitations
110
+
111
+ - The model is trained specifically for binary topic classification on ag news dataset.
112
+ - FP16 quantization may result in slight numerical instability in edge cases.
113
+
114
+
115
+ ---
116
+
117
+ ## Contributing
118
+
119
+ Feel free to open issues or submit pull requests to improve the model or documentation.
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "LABEL_0": 0,
22
+ "LABEL_1": 1,
23
+ "LABEL_2": 2,
24
+ "LABEL_3": 3
25
+ },
26
+ "layer_norm_eps": 1e-05,
27
+ "max_position_embeddings": 514,
28
+ "model_type": "roberta",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 12,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "problem_type": "single_label_classification",
34
+ "torch_dtype": "float16",
35
+ "transformers_version": "4.51.3",
36
+ "type_vocab_size": 1,
37
+ "use_cache": true,
38
+ "vocab_size": 50265
39
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:140e9e7cd268315932ffb1de35b9c63eb654da8d45811c9b70aa850db5c2e71d
3
+ size 249321504
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": false,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "extra_special_tokens": {},
51
+ "mask_token": "<mask>",
52
+ "model_max_length": 512,
53
+ "pad_token": "<pad>",
54
+ "sep_token": "</s>",
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "unk_token": "<unk>"
57
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff