File size: 1,022 Bytes
725a9ef | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 | import torch
from configuration_ltgbert import LtgBertConfig # Adjust this if you have a custom config class in modeling_ltgbert.py
from modeling_ltgbert import LtgBertForMaskedLM # Import your Hugging Face wrapper
# 1. Initialize Config and Model
config = LtgBertConfig(
attention_probs_dropout_prob=0.1,
classifier_dropout=None,
hidden_dropout_prob=0.1,
hidden_size=384,
intermediate_size=1024,
layer_norm_eps=1e-07,
max_position_embeddings=512,
num_attention_heads=6,
num_hidden_layers=12,
output_all_encoded_layers=True,
pad_token_id=4,
position_bucket_size=32,
vocab_size=6144
)
model = LtgBertForMaskedLM(config)
# 2. Load the Custom Model Weights
model_weights_path = "model_weights.pth"
state_dict = torch.load(model_weights_path, map_location="cpu")
model.load_state_dict(state_dict)
# 3. Save the Model in Hugging Face Format
output_dir = "./"
model.save_pretrained(output_dir,safe_serialization=False)
|