Shivangsinha commited on
Commit
c0e4b5c
·
verified ·
1 Parent(s): bd3823c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +86 -56
README.md CHANGED
@@ -1,23 +1,47 @@
 
1
 
2
-
3
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import torch
5
  from transformers import BertTokenizer, BertModel
6
  import torch.nn as nn
7
- import os
8
-
9
- import gc
10
- torch.cuda.empty_cache()
11
- gc.collect()
12
- # Set the CUDA device
13
- os.environ["CUDA_VISIBLE_DEVICES"] = "3"
14
-
15
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
- print(f"Using device: {device}")
17
-
18
- # Initialize BERT tokenizer
19
  tokenizer = BertTokenizer.from_pretrained('bert-base-german-cased')
20
-
21
  # Define the Multi-Task Model
22
  class MultiTaskModel(nn.Module):
23
  def __init__(self):
@@ -27,7 +51,7 @@ class MultiTaskModel(nn.Module):
27
  self.fc_fake_news = nn.Linear(self.bert.config.hidden_size, 1)
28
  self.fc_hate_speech = nn.Linear(self.bert.config.hidden_size, 1)
29
  self.fc_toxicity = nn.Linear(self.bert.config.hidden_size, 1)
30
-
31
  def forward(self, input_ids, attention_mask):
32
  outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
33
  pooled_output = outputs[1] # Get the pooled output
@@ -36,48 +60,54 @@ class MultiTaskModel(nn.Module):
36
  hate_speech_output = self.fc_hate_speech(pooled_output)
37
  toxicity_output = self.fc_toxicity(pooled_output)
38
  return fake_news_output, hate_speech_output, toxicity_output
39
-
40
  # Function to load the model
41
- def load_model():
42
- model = MultiTaskModel().to(device) # Initialize the model
43
- model.load_state_dict(torch.load('../../media/data/multiTaskTWONB1/multi_task_model.pt')) # Alternatively Load the saved state from hugging face as well
44
- model.eval() # Set the model to evaluation mode
45
  return model
46
-
47
- # Function to make predictions
48
- def predict(text, model):
49
- # Tokenize and encode the input text
50
- encoding = tokenizer(text, return_tensors='pt', padding='max_length', truncation=True, max_length=128)
51
-
52
- # Move input tensors to the same device as the model
53
- input_ids = encoding['input_ids'].to(device)
54
- attention_mask = encoding['attention_mask'].to(device)
55
-
56
- # Make predictions
57
- with torch.no_grad():
58
- outputs_fake_news, outputs_hate_speech, outputs_toxicity = model(input_ids, attention_mask)
59
-
60
- # Apply sigmoid to get probabilities and round to get binary predictions
61
  preds_fake_news = torch.sigmoid(outputs_fake_news).squeeze().round().cpu().numpy()
62
  preds_hate_speech = torch.sigmoid(outputs_hate_speech).squeeze().round().cpu().numpy()
63
  preds_toxicity = torch.sigmoid(outputs_toxicity).squeeze().round().cpu().numpy()
64
-
65
- return preds_fake_news, preds_hate_speech, preds_toxicity
66
-
67
- # Load the model
68
- model = load_model()
69
-
70
- # Example text input for prediction
71
- text_input = "Mir fallen nur Steuervorteile durch Gender Pay gap ein."
72
-
73
- # Make predictions
74
- predictions = predict(text_input, model)
75
-
76
- # Print the predictions
77
- print(f"Fake News Prediction: {predictions[0]}")
78
- print(f"Hate Speech Prediction: {predictions[1]}")
79
- print(f"Toxicity Prediction: {predictions[2]}")
80
-
81
-
82
- torch.cuda.empty_cache()
83
- gc.collect()
 
 
 
 
 
 
 
 
1
+ # Multi-Task BERT Model for Fake News, Hate Speech, and Toxicity Detection
2
 
3
+ ## Model Description
4
+ This model is a **multi-task learning framework** based on [BERT (bert-base-german-cased)](https://huggingface.co/bert-base-german-cased), designed to perform binary classification on three tasks simultaneously:
5
+ 1. Fake News Detection
6
+ 2. Hate Speech Detection
7
+ 3. Toxicity Detection
8
+
9
+ The model utilizes a shared BERT encoder with task-specific fully connected layers for each classification task. It is fine-tuned on task-specific labeled datasets and supports input text in **German**.
10
+
11
+ ### Model Architecture
12
+ - **Base Model:** bert-base-german-cased
13
+ - **Classifier Heads:**
14
+ - Fully connected layers with 1 output unit for each task (fake news, hate speech, and toxicity).
15
+ - **Activation Function:** Sigmoid activation for binary classification.
16
+
17
+ ## Intended Use
18
+ This model is intended for applications in:
19
+ - Social media monitoring
20
+ - Content moderation
21
+ - Research on online discourse in German
22
+
23
+ ### Example Use Case
24
+ The model can analyze German text to predict whether it contains:
25
+ - Fake news (1: True, 0: False)
26
+ - Hate speech (1: True, 0: False)
27
+ - Toxicity (1: True, 0: False)
28
+
29
+ ## Usage
30
+
31
+ ### Requirements
32
+ ```bash
33
+ pip install torch transformers
34
+ ```
35
+
36
+ ### Code Example
37
+ ```python
38
  import torch
39
  from transformers import BertTokenizer, BertModel
40
  import torch.nn as nn
41
+
42
+ # Load the tokenizer
 
 
 
 
 
 
 
 
 
 
43
  tokenizer = BertTokenizer.from_pretrained('bert-base-german-cased')
44
+
45
  # Define the Multi-Task Model
46
  class MultiTaskModel(nn.Module):
47
  def __init__(self):
 
51
  self.fc_fake_news = nn.Linear(self.bert.config.hidden_size, 1)
52
  self.fc_hate_speech = nn.Linear(self.bert.config.hidden_size, 1)
53
  self.fc_toxicity = nn.Linear(self.bert.config.hidden_size, 1)
54
+
55
  def forward(self, input_ids, attention_mask):
56
  outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
57
  pooled_output = outputs[1] # Get the pooled output
 
60
  hate_speech_output = self.fc_hate_speech(pooled_output)
61
  toxicity_output = self.fc_toxicity(pooled_output)
62
  return fake_news_output, hate_speech_output, toxicity_output
63
+
64
  # Function to load the model
65
+ def load_model(device):
66
+ model = MultiTaskModel().to(device)
67
+ model.load_state_dict(torch.load('path_to_your_model.pt'))
68
+ model.eval()
69
  return model
70
+
71
+ # Example Usage
72
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
73
+ model = load_model(device)
74
+
75
+ text_input = "Mir fallen nur Steuervorteile durch Gender Pay gap ein."
76
+ encoding = tokenizer(text_input, return_tensors='pt', padding='max_length', truncation=True, max_length=128)
77
+
78
+ input_ids = encoding['input_ids'].to(device)
79
+ attention_mask = encoding['attention_mask'].to(device)
80
+
81
+ # Predict
82
+ with torch.no_grad():
83
+ outputs_fake_news, outputs_hate_speech, outputs_toxicity = model(input_ids, attention_mask)
 
84
  preds_fake_news = torch.sigmoid(outputs_fake_news).squeeze().round().cpu().numpy()
85
  preds_hate_speech = torch.sigmoid(outputs_hate_speech).squeeze().round().cpu().numpy()
86
  preds_toxicity = torch.sigmoid(outputs_toxicity).squeeze().round().cpu().numpy()
87
+
88
+ print(f"Fake News Prediction: {preds_fake_news}")
89
+ print(f"Hate Speech Prediction: {preds_hate_speech}")
90
+ print(f"Toxicity Prediction: {preds_toxicity}")
91
+ ```
92
+
93
+ ## Dataset
94
+ This model assumes fine-tuning on task-specific datasets for German text. Ensure the training datasets are labeled for fake news, hate speech, and toxicity.
95
+
96
+ ## Evaluation
97
+ The model is evaluated using:
98
+ - **Binary classification metrics:** F1-score, precision, recall, and accuracy.
99
+ - Task-specific benchmarks using separate test sets for each task.
100
+
101
+ ## Limitations and Bias
102
+ - **Language Support:** The model only supports German text.
103
+ - **Dataset Bias:** Predictions may reflect biases present in the training data.
104
+ - **Task-Specific Limitations:** Performance may degrade if the input text contains ambiguous or mixed classifications.
105
+
106
+ ## Citation
107
+ If you use this model, please cite:
108
+ ```bibtex
109
+ @article{example2025,
110
+ title={Multi-Task BERT Model for Fake News, Hate Speech, and Toxicity Detection},
111
+ author={Shivang Sinha},
112
+ year={2025}
113
+ }