|
|
import torch |
|
|
import torch.nn as nn |
|
|
import yaml |
|
|
from transformers import GPT2Config, PreTrainedTokenizerFast |
|
|
from huggingface_hub import HfApi, upload_file |
|
|
import os |
|
|
import json |
|
|
import sentencepiece as spm |
|
|
from tokenizers import SentencePieceBPETokenizer |
|
|
|
|
|
|
|
|
class TinyLlama(nn.Module): |
|
|
def __init__(self, config): |
|
|
super(TinyLlama, self).__init__() |
|
|
self.embedding = nn.Embedding(config.vocab_size, config.n_embd) |
|
|
self.transformer_blocks = nn.ModuleList([ |
|
|
nn.TransformerEncoderLayer( |
|
|
d_model=config.n_embd, |
|
|
nhead=config.n_head, |
|
|
dim_feedforward=config.hidden_dim |
|
|
) |
|
|
for _ in range(config.n_layer) |
|
|
]) |
|
|
self.output_layer = nn.Linear(config.n_embd, config.vocab_size) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.embedding(x) |
|
|
for block in self.transformer_blocks: |
|
|
x = block(x) |
|
|
x = self.output_layer(x) |
|
|
return x |
|
|
|
|
|
|
|
|
model_config_path = "/home/jax/out/custom-model/final/model_config.yaml" |
|
|
with open(model_config_path, 'r') as file: |
|
|
config_data = yaml.safe_load(file) |
|
|
|
|
|
|
|
|
config = GPT2Config( |
|
|
vocab_size=config_data.get("vocab_size", 32000), |
|
|
n_embd=config_data.get("n_embd", 2048), |
|
|
n_layer=config_data.get("n_layer", 24), |
|
|
n_head=config_data.get("n_head", 16), |
|
|
hidden_dim=config_data.get("hidden_dim", 8192) |
|
|
) |
|
|
|
|
|
|
|
|
model = TinyLlama(config) |
|
|
|
|
|
|
|
|
model_weights_path = "/home/jax/out/custom-model/final/lit_model.pth" |
|
|
model_weights = torch.load(model_weights_path, map_location=torch.device('cpu'), weights_only=True) |
|
|
model.load_state_dict(model_weights, strict=False) |
|
|
|
|
|
|
|
|
model_dir = "./huggingface_tinyllama" |
|
|
os.makedirs(model_dir, exist_ok=True) |
|
|
|
|
|
|
|
|
model_weights_save_path = os.path.join(model_dir, "pytorch_model.bin") |
|
|
torch.save(model.state_dict(), model_weights_save_path) |
|
|
|
|
|
|
|
|
config_save_path = os.path.join(model_dir, "config.json") |
|
|
with open(config_save_path, 'w') as f: |
|
|
json.dump(config.to_dict(), f) |
|
|
|
|
|
|
|
|
|
|
|
tokenizer_path = "/home/jax/out/custom-model/final/tokenizer.model" |
|
|
sp_tokenizer = spm.SentencePieceProcessor() |
|
|
sp_tokenizer.load(tokenizer_path) |
|
|
|
|
|
|
|
|
hf_tokenizer = SentencePieceBPETokenizer( |
|
|
vocab=tokenizer_path, |
|
|
add_prefix_space=True |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hf_tokenizer.add_special_tokens(["<unk>", "<pad>", "<s>", "</s>"]) |
|
|
|
|
|
|
|
|
hf_tokenizer = PreTrainedTokenizerFast.from_pretrained(tokenizer_path) |
|
|
|
|
|
|
|
|
hf_tokenizer.save_pretrained(model_dir) |
|
|
|
|
|
|
|
|
api.create_repo(repo_id=repo_id, token=os.getenv('HUGGINGFACE_API_TOKEN'), exist_ok=True) |
|
|
|
|
|
|
|
|
model_card = """ |
|
|
--- |
|
|
language: en |
|
|
tags: |
|
|
- tinyllama |
|
|
- language-model |
|
|
- chat |
|
|
license: apache-2.0 |
|
|
--- |
|
|
|
|
|
# TinyLlama 1.1B Chat Model |
|
|
|
|
|
## Model Description |
|
|
TinyLlama is a lightweight LLaMA-based model with 1.1 billion parameters, designed to perform well on conversational and text generation tasks. It has been fine-tuned specifically for chat applications, providing coherent and context-aware responses. |
|
|
|
|
|
## Training Data |
|
|
The model was trained on a diverse dataset, including web text, books, and conversational data, to make it capable of handling a wide range of language styles. |
|
|
|
|
|
## Usage |
|
|
You can use this model for conversational AI, text completion, or other natural language generation tasks. Here’s a quick example: |
|
|
|
|
|
```python |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("jacksonstrut/tinyllama-1.1B-chat") |
|
|
model = AutoModelForCausalLM.from_pretrained("jacksonstrut/tinyllama-1.1B-chat") |
|
|
|
|
|
input_ids = tokenizer("Hello, how are you?", return_tensors="pt").input_ids |
|
|
output = model.generate(input_ids) |
|
|
print(tokenizer.decode(output[0])) |
|
|
``` |
|
|
|
|
|
## Limitations |
|
|
- The model may produce biased or inappropriate outputs as it is trained on general datasets from the internet. |
|
|
- It may not be suitable for all applications, especially those requiring factual accuracy. |
|
|
|
|
|
## License |
|
|
This model is licensed under the Apache 2.0 License. |
|
|
""" |
|
|
|
|
|
|
|
|
readme_path = os.path.join(model_dir, "README.md") |
|
|
with open(readme_path, 'w') as f: |
|
|
f.write(model_card) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
upload_file( |
|
|
path_or_fileobj=model_weights_save_path, |
|
|
path_in_repo="pytorch_model.bin", |
|
|
repo_id=repo_id, |
|
|
token=os.getenv('HUGGINGFACE_API_TOKEN') |
|
|
) |
|
|
upload_file( |
|
|
path_or_fileobj=config_save_path, |
|
|
path_in_repo="config.json", |
|
|
repo_id=repo_id, |
|
|
token=os.getenv('HUGGINGFACE_API_TOKEN') |
|
|
) |
|
|
|
|
|
|
|
|
upload_file( |
|
|
path_or_fileobj=os.path.join(model_dir, "tokenizer.model"), |
|
|
path_in_repo="tokenizer.model", |
|
|
repo_id=repo_id, |
|
|
token=os.getenv('HUGGINGFACE_API_TOKEN') |
|
|
) |
|
|
upload_file( |
|
|
path_or_fileobj=os.path.join(model_dir, "tokenizer_config.json"), |
|
|
path_in_repo="tokenizer_config.json", |
|
|
repo_id=repo_id, |
|
|
token=os.getenv('HUGGINGFACE_API_TOKEN') |
|
|
) |
|
|
|