Update README.md
Browse files
README.md
CHANGED
|
@@ -1,3 +1,60 @@
|
|
| 1 |
Very well working 2 layer NN for projection.
|
| 2 |
Trained with custom N Pairs loss + hard negative mining.
|
| 3 |
-
margin = 1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
Very well working 2 layer NN for projection.
|
| 2 |
Trained with custom N Pairs loss + hard negative mining.
|
| 3 |
+
margin = 1.0
|
| 4 |
+
|
| 5 |
+
# load in using following code:
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from transformers import AutoConfig, AutoTokenizer, AutoModel
|
| 10 |
+
from huggingface_hub import hf_hub_download
|
| 11 |
+
import json
|
| 12 |
+
from types import SimpleNamespace
|
| 13 |
+
|
| 14 |
+
#model architecture - needed since this is a custom model
|
| 15 |
+
class ProjectionModel(nn.Module):
|
| 16 |
+
def __init__(self, config):
|
| 17 |
+
super(ProjectionModel, self).__init__()
|
| 18 |
+
self.config = config
|
| 19 |
+
self.c_code_encoder = AutoModel.from_pretrained("microsoft/codebert-base")
|
| 20 |
+
self.pseudocode_encoder = AutoModel.from_pretrained("microsoft/codebert-base")
|
| 21 |
+
|
| 22 |
+
# Projection network with 2 hidden layers
|
| 23 |
+
self.projection = nn.Sequential(
|
| 24 |
+
nn.Linear(config.embedding_dim, config.hidden_dim),
|
| 25 |
+
nn.ReLU(), # First activation function
|
| 26 |
+
nn.Linear(config.hidden_dim, config.hidden_dim), # Second hidden layer
|
| 27 |
+
nn.ReLU(), # Second activation function
|
| 28 |
+
nn.Linear(config.hidden_dim, config.embedding_dim) # Output layer projecting back to the original embedding space
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
def forward(self, c_code_inputs, pseudocode_inputs):
|
| 32 |
+
# Encode C code and pseudocode
|
| 33 |
+
c_code_embedding = self.c_code_encoder(**c_code_inputs).last_hidden_state.mean(dim=1)
|
| 34 |
+
pseudocode_embedding = self.pseudocode_encoder(**pseudocode_inputs).last_hidden_state.mean(dim=1)
|
| 35 |
+
|
| 36 |
+
# Apply the projection network to the pseudocode embeddings
|
| 37 |
+
projected_pseudocode_embedding = self.projection(pseudocode_embedding)
|
| 38 |
+
|
| 39 |
+
return c_code_embedding, projected_pseudocode_embedding
|
| 40 |
+
|
| 41 |
+
model_name = "aircrypto/code-llama-7b-projection-largev2.11"
|
| 42 |
+
config_file = hf_hub_download(repo_id=model_name, filename="config.json")
|
| 43 |
+
|
| 44 |
+
with open(config_file, 'r') as f:
|
| 45 |
+
config_dict = json.load(f)
|
| 46 |
+
config = SimpleNamespace(**config_dict)
|
| 47 |
+
|
| 48 |
+
model = ProjectionModel(config)
|
| 49 |
+
model_path = hf_hub_download(repo_id=model_name, filename="pytorch_model.bin")
|
| 50 |
+
state_dict = torch.load(model_path, map_location="cpu")
|
| 51 |
+
model.load_state_dict(state_dict)
|
| 52 |
+
|
| 53 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 54 |
+
model = model.to(device)
|
| 55 |
+
|
| 56 |
+
print("Model loaded successfully!")
|
| 57 |
+
|
| 58 |
+
tokenizer = AutoTokenizer.from_pretrained("aircrypto/code-llama-7b-projection-largev2.11")
|
| 59 |
+
|
| 60 |
+
print("Tokenizer loaded successfully!")
|