jesse-tong commited on
Commit
832e945
·
1 Parent(s): ae47555

Update example use and remove unused class

Browse files
Files changed (4) hide show
  1. example_uses.md +16 -0
  2. example_uses.txt +0 -1
  3. models/lstm_model.py +0 -107
  4. requirements.txt +2 -1
example_uses.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ## Example uses:
3
+
4
+ - Train with BERT model (train.csv is ag_news dataset with 4 classes)
5
+ ```
6
+ python train.py --data_path train.csv --label_column "Class Index" --text_column "Description" --epochs 4 --num_classes 4
7
+ ```
8
+ - Inference with BERT model (train.csv is ag_news dataset with 4 classes)
9
+ ```
10
+ python .\inference_example.py --model_path "./bert_base_uncased/best_model.pth" --num_classes 4 --class_names "World" "Sports" "Business" "Science" --text_column "Description" --label_column "Class Index" --data_path "./train.csv" --inference_batch_limit 10
11
+ ```
12
+
13
+ - Train LSTM model from BERT model using distillation
14
+ ```
15
+ python .\distill_bert_to_lstm.py --bert_model bert-base-uncased --bert_model_path "./bert_base_uncased/best_model.pth" --output_dir "./docbert_lstm" --batch_size 32 --epochs 10 --data_path "./train.csv" --text_column "Description" --label_column "Class Index" --num_classes 4
16
+ ```
example_uses.txt DELETED
@@ -1 +0,0 @@
1
- python .\inference_example.py --model_path "./bert_base_uncased/best_model.pth" --num_classes 4 --class_names "World" "Sports" "Business" "Science" --text_column "Description" --label_column "Class Index" --data_path "./train.csv" --inference_batch_limit 10
 
 
models/lstm_model.py CHANGED
@@ -1,113 +1,6 @@
1
  import torch
2
  import torch.nn as nn
3
  import torch.nn.functional as F
4
- from torchtext.vocab import GloVe # For loading pre-trained word embeddings
5
-
6
- class DocumentLSTM(nn.Module):
7
- """
8
- LSTM model for document classification using GloVe embeddings
9
- """
10
- def __init__(self, num_classes, vocab_size=30000, embedding_dim=300,
11
- hidden_dim=256, num_layers=2, bidirectional=True,
12
- dropout_rate=0.3, use_pretrained=True, padding_idx=0):
13
- super(DocumentLSTM, self).__init__()
14
-
15
- self.hidden_dim = hidden_dim
16
- self.num_layers = num_layers
17
- self.bidirectional = bidirectional
18
- self.num_directions = 2 if bidirectional else 1
19
-
20
- # Embedding layer (with option to use pre-trained GloVe)
21
- if use_pretrained:
22
- # Initialize with GloVe embeddings
23
- try:
24
- glove = GloVe(name='6B', dim=embedding_dim)
25
- # You'd need to map your vocabulary to GloVe indices
26
- # This is a simplified placeholder
27
- self.embedding = nn.Embedding.from_pretrained(
28
- glove.vectors[:vocab_size],
29
- padding_idx=padding_idx,
30
- freeze=False
31
- )
32
- except Exception as e:
33
- print(f"Could not load pretrained embeddings: {e}")
34
- # Fall back to random initialization
35
- self.embedding = nn.Embedding(
36
- vocab_size, embedding_dim, padding_idx=padding_idx
37
- )
38
- else:
39
- # Random initialization
40
- self.embedding = nn.Embedding(
41
- vocab_size, embedding_dim, padding_idx=padding_idx
42
- )
43
-
44
- # LSTM layer
45
- self.lstm = nn.LSTM(
46
- embedding_dim,
47
- hidden_dim,
48
- num_layers=num_layers,
49
- bidirectional=bidirectional,
50
- batch_first=True,
51
- dropout=dropout_rate if num_layers > 1 else 0
52
- )
53
-
54
- # Attention mechanism
55
- self.attention = nn.Linear(hidden_dim * self.num_directions, 1)
56
-
57
- # Layer normalization
58
- self.layer_norm = nn.LayerNorm(hidden_dim * self.num_directions)
59
-
60
- # Dropout layer
61
- self.dropout = nn.Dropout(dropout_rate)
62
-
63
- # Classification layer
64
- self.classifier = nn.Linear(hidden_dim * self.num_directions, num_classes)
65
-
66
- def forward(self, input_ids, attention_mask=None, **kwargs):
67
- """
68
- Forward pass through LSTM model
69
-
70
- Args:
71
- input_ids: Tensor of token ids [batch_size, seq_len]
72
- attention_mask: Tensor indicating which tokens to attend to [batch_size, seq_len]
73
- """
74
- # Word embeddings
75
- embedded = self.embedding(input_ids) # [batch_size, seq_len, embedding_dim]
76
-
77
- # Pass through LSTM
78
- lstm_out, (hidden, cell) = self.lstm(embedded)
79
- # lstm_out: [batch_size, seq_len, hidden_dim * num_directions]
80
-
81
- # Apply attention
82
- if attention_mask is not None:
83
- # Apply attention mask (1 for tokens to attend to, 0 for padding)
84
- attention_mask = attention_mask.unsqueeze(-1) # [batch_size, seq_len, 1]
85
- attention_scores = self.attention(lstm_out) # [batch_size, seq_len, 1]
86
- attention_scores = attention_scores.masked_fill(attention_mask == 0, -1e10)
87
- attention_weights = F.softmax(attention_scores, dim=1) # [batch_size, seq_len, 1]
88
-
89
- # Weighted sum
90
- context_vector = torch.sum(attention_weights * lstm_out, dim=1) # [batch_size, hidden_dim * num_directions]
91
- else:
92
- # If no attention mask, use the last hidden state
93
- if self.bidirectional:
94
- # For bidirectional LSTM, concatenate last hidden states from both directions
95
- last_hidden = torch.cat([hidden[-2], hidden[-1]], dim=1) # [batch_size, hidden_dim * 2]
96
- else:
97
- last_hidden = hidden[-1] # [batch_size, hidden_dim]
98
-
99
- context_vector = last_hidden
100
-
101
- # Layer normalization
102
- normalized = self.layer_norm(context_vector)
103
-
104
- # Dropout
105
- dropped = self.dropout(normalized)
106
-
107
- # Classification
108
- logits = self.classifier(dropped)
109
-
110
- return logits
111
 
112
  class DocumentBiLSTM(nn.Module):
113
  """
 
1
  import torch
2
  import torch.nn as nn
3
  import torch.nn.functional as F
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  class DocumentBiLSTM(nn.Module):
6
  """
requirements.txt CHANGED
@@ -3,4 +3,5 @@ numpy
3
  pandas
4
  torch
5
  transformers
6
- datasets
 
 
3
  pandas
4
  torch
5
  transformers
6
+ datasets
7
+ torchtext