File size: 2,577 Bytes
ab6c03c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import torch
import numpy as np
import os
from transformers import BertModel, BertConfig, DNATokenizer, BertForMaskedLM

# --- CONFIGURATION ---
OUTPUT_FOLDER = "6mer_pretrain_emb_adaptive"
OUTPUT_FILENAME = "static_adaptive_embed.npy"
CHECKPOINT_PATH =  "/data/n5huang/dna_token/pretrain_output_adaptive/checkpoint-10000/"

if not CHECKPOINT_PATH:
    raise EnvironmentError("MODEL_DIR environment variable is not set.")

# --- DUMMY MODEL CLASSES (Needed for the code structure) ---
MODEL_CLASSES = {
    "dna": (BertConfig, BertForMaskedLM, DNATokenizer),
}

# --- CUSTOM LOADING FUNCTION (Modified to return BertModel for clean embeddings) ---
def loadmodel(model_dir):
    config_class, _, tokenizer_class = MODEL_CLASSES['dna']
    
    # Load Config
    config = config_class.from_pretrained(model_dir)
    
    # Explicitly load the BASE BERT MODEL (BertModel) to access the embedding layer
    model = BertModel.from_pretrained(model_dir, config=config)
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.eval()
    
    # Load Tokenizer (using custom environment variables)
    #tokenizer_class.vocab_files_names = {"vocab_file": os.getenv("VOCAB_NAME")}
    #tokenizer_class.pretrained_vocab_files_map = {"vocab_file": {'dna': os.getenv("VOCAB_PATH")}}
    tokenizer = tokenizer_class.from_pretrained(model_dir)
    
    return model, tokenizer

# --- MAIN EXECUTION ---
if __name__ == "__main__":
    # Load the model and tokenizer
    print("Starting model and tokenizer load...")
    model, tokenizer = loadmodel(CHECKPOINT_PATH)
    print(f"Model and Tokenizer loaded successfully. Vocab size: {len(tokenizer)}")
    
    # 1. Extract the static embedding layer
    # This matrix contains the vector for every token ID (4101 tokens x 768 dimensions)
    embedding_layer = model.get_input_embeddings()
    print(embedding_layer.weight.shape)

    # 2. Extract the weights (the actual NumPy array)
    # Detach from GPU and convert to NumPy
    static_embeddings_tensor = embedding_layer.weight.data.cpu()
    static_embeddings_array = static_embeddings_tensor.numpy()
    
    print(f"\nExtracted embedding tensor size: {static_embeddings_tensor.size()}")
    print(f"Extracted NumPy array shape: {static_embeddings_array.shape}")
    
    # 3. Save the Embeddings
    os.makedirs(OUTPUT_FOLDER, exist_ok=True)
    output_path = os.path.join(OUTPUT_FOLDER, OUTPUT_FILENAME)
    np.save(output_path, static_embeddings_array)
    
    print(f"\n✅ Successfully saved static embeddings to: {output_path}")