Update README.md
Browse files
README.md
CHANGED
|
@@ -20,13 +20,13 @@ class ProjectionModel(nn.Module):
|
|
| 20 |
self.c_code_encoder = AutoModel.from_pretrained("microsoft/codebert-base")
|
| 21 |
self.pseudocode_encoder = AutoModel.from_pretrained("microsoft/codebert-base")
|
| 22 |
|
| 23 |
-
#
|
| 24 |
self.projection = nn.Sequential(
|
| 25 |
nn.Linear(config.embedding_dim, config.hidden_dim),
|
| 26 |
nn.ReLU(), # First activation function
|
| 27 |
-
nn.Linear(config.hidden_dim, config.hidden_dim), #
|
| 28 |
nn.ReLU(), # Second activation function
|
| 29 |
-
nn.Linear(config.hidden_dim, config.embedding_dim) #
|
| 30 |
)
|
| 31 |
|
| 32 |
def forward(self, c_code_inputs, pseudocode_inputs):
|
|
|
|
| 20 |
self.c_code_encoder = AutoModel.from_pretrained("microsoft/codebert-base")
|
| 21 |
self.pseudocode_encoder = AutoModel.from_pretrained("microsoft/codebert-base")
|
| 22 |
|
| 23 |
+
#Projection network with 2 hidden layers
|
| 24 |
self.projection = nn.Sequential(
|
| 25 |
nn.Linear(config.embedding_dim, config.hidden_dim),
|
| 26 |
nn.ReLU(), # First activation function
|
| 27 |
+
nn.Linear(config.hidden_dim, config.hidden_dim), #Second hidden layer
|
| 28 |
nn.ReLU(), # Second activation function
|
| 29 |
+
nn.Linear(config.hidden_dim, config.embedding_dim) #Output layer projecting back to the original embedding space
|
| 30 |
)
|
| 31 |
|
| 32 |
def forward(self, c_code_inputs, pseudocode_inputs):
|