File size: 1,442 Bytes
4d951b3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
{
"Project Name": "Financial LLaMA Fine-tuning",
"Base Model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"Training Dataset": "Josephgflowers/Finance-Instruct-500k",
"Fine-tuning Method": "LoRA (Low-Rank Adaptation)",
"Save Time": "2025-08-08 04:51:47",
"File List": [
"README.md",
"adapter_model.safetensors",
"adapter_config.json",
"training_args.bin",
"chat_template.jinja",
"tokenizer_config.json",
"special_tokens_map.json",
"tokenizer.json",
"training_config.json",
"test_results.json"
],
"Local Save Path": "C:\\Users\\Timber's Pad\\OneDrive\\Desktop\\JobHunting\\Project2_FineTune\\Project2_FineTune\\FineTuneSave",
"File Description": {
"adapter_config.json": "LoRA configuration file",
"adapter_model.safetensors": "LoRA weight file",
"tokenizer.json": "Tokenizer file",
"tokenizer_config.json": "Tokenizer configuration",
"special_tokens_map.json": "Special token mapping"
},
"Usage Instructions": [
"1. Extract zip file to target folder",
"2. Use the following code to load the model:",
" from peft import PeftModel",
" from transformers import AutoModelForCausalLM, AutoTokenizer",
" base_model = AutoModelForCausalLM.from_pretrained('meta-llama/Meta-Llama-3.1-8B-Instruct')",
" model = PeftModel.from_pretrained(base_model, 'path/to/model')",
" tokenizer = AutoTokenizer.from_pretrained('path/to/model')"
]
} |