| { | |
| "Project Name": "Financial LLaMA Fine-tuning", | |
| "Base Model": "meta-llama/Meta-Llama-3.1-8B-Instruct", | |
| "Training Dataset": "Josephgflowers/Finance-Instruct-500k", | |
| "Fine-tuning Method": "LoRA (Low-Rank Adaptation)", | |
| "Save Time": "2025-08-08 04:51:47", | |
| "File List": [ | |
| "README.md", | |
| "adapter_model.safetensors", | |
| "adapter_config.json", | |
| "training_args.bin", | |
| "chat_template.jinja", | |
| "tokenizer_config.json", | |
| "special_tokens_map.json", | |
| "tokenizer.json", | |
| "training_config.json", | |
| "test_results.json" | |
| ], | |
| "Local Save Path": "C:\\Users\\Timber's Pad\\OneDrive\\Desktop\\JobHunting\\Project2_FineTune\\Project2_FineTune\\FineTuneSave", | |
| "File Description": { | |
| "adapter_config.json": "LoRA configuration file", | |
| "adapter_model.safetensors": "LoRA weight file", | |
| "tokenizer.json": "Tokenizer file", | |
| "tokenizer_config.json": "Tokenizer configuration", | |
| "special_tokens_map.json": "Special token mapping" | |
| }, | |
| "Usage Instructions": [ | |
| "1. Extract zip file to target folder", | |
| "2. Use the following code to load the model:", | |
| " from peft import PeftModel", | |
| " from transformers import AutoModelForCausalLM, AutoTokenizer", | |
| " base_model = AutoModelForCausalLM.from_pretrained('meta-llama/Meta-Llama-3.1-8B-Instruct')", | |
| " model = PeftModel.from_pretrained(base_model, 'path/to/model')", | |
| " tokenizer = AutoTokenizer.from_pretrained('path/to/model')" | |
| ] | |
| } |