Instructions to use fausap/dark-phi with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- PEFT
How to use fausap/dark-phi with PEFT:
from peft import PeftModel from transformers import AutoModelForCausalLM base_model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2") model = PeftModel.from_pretrained(base_model, "fausap/dark-phi") - Notebooks
- Google Colab
- Kaggle
| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 255, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.11764705882352941, | |
| "grad_norm": 0.03323917090892792, | |
| "learning_rate": 0.0001999919114627769, | |
| "loss": 2.763583183288574, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.23529411764705882, | |
| "grad_norm": 0.05506502836942673, | |
| "learning_rate": 0.0001990228692687429, | |
| "loss": 2.593435287475586, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.35294117647058826, | |
| "grad_norm": 0.05839679390192032, | |
| "learning_rate": 0.00019645406355025565, | |
| "loss": 2.345814323425293, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.47058823529411764, | |
| "grad_norm": 0.054268915206193924, | |
| "learning_rate": 0.00019232699463668542, | |
| "loss": 2.245083808898926, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5882352941176471, | |
| "grad_norm": 0.04734891280531883, | |
| "learning_rate": 0.0001867083373715264, | |
| "loss": 2.1789533615112306, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7058823529411765, | |
| "grad_norm": 0.04950524866580963, | |
| "learning_rate": 0.00017968886394725874, | |
| "loss": 2.146891975402832, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8235294117647058, | |
| "grad_norm": 0.048059429973363876, | |
| "learning_rate": 0.00017138197743559654, | |
| "loss": 2.1342124938964844, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9411764705882353, | |
| "grad_norm": 0.048672087490558624, | |
| "learning_rate": 0.00016192187970466644, | |
| "loss": 2.08856086730957, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0588235294117647, | |
| "grad_norm": 0.053143732249736786, | |
| "learning_rate": 0.00015146140332132358, | |
| "loss": 2.0824731826782226, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.1764705882352942, | |
| "grad_norm": 0.05677838623523712, | |
| "learning_rate": 0.00014016954246529696, | |
| "loss": 2.0192502975463866, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.2941176470588236, | |
| "grad_norm": 0.054012443870306015, | |
| "learning_rate": 0.00012822872274446958, | |
| "loss": 2.039341926574707, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.4117647058823528, | |
| "grad_norm": 0.06396123766899109, | |
| "learning_rate": 0.00011583185401878101, | |
| "loss": 2.0363845825195312, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.5294117647058822, | |
| "grad_norm": 0.05511175096035004, | |
| "learning_rate": 0.00010317921384584244, | |
| "loss": 2.039676856994629, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.6470588235294117, | |
| "grad_norm": 0.052731700241565704, | |
| "learning_rate": 9.047521189774455e-05, | |
| "loss": 2.0343761444091797, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.7647058823529411, | |
| "grad_norm": 0.05958898738026619, | |
| "learning_rate": 7.792508762150833e-05, | |
| "loss": 2.042786979675293, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.8823529411764706, | |
| "grad_norm": 0.05990149825811386, | |
| "learning_rate": 6.57315944941107e-05, | |
| "loss": 2.0720109939575195, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.05766311287879944, | |
| "learning_rate": 5.409172443958843e-05, | |
| "loss": 2.0328493118286133, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.1176470588235294, | |
| "grad_norm": 0.059346605092287064, | |
| "learning_rate": 4.3193525326884435e-05, | |
| "loss": 2.0109920501708984, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.235294117647059, | |
| "grad_norm": 0.06073939800262451, | |
| "learning_rate": 3.321306296333673e-05, | |
| "loss": 1.9789583206176757, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.3529411764705883, | |
| "grad_norm": 0.05661051720380783, | |
| "learning_rate": 2.431157666431052e-05, | |
| "loss": 2.008417320251465, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.4705882352941178, | |
| "grad_norm": 0.06170394644141197, | |
| "learning_rate": 1.663287435215498e-05, | |
| "loss": 2.0092374801635744, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.588235294117647, | |
| "grad_norm": 0.060639169067144394, | |
| "learning_rate": 1.0301009267953143e-05, | |
| "loss": 2.00238094329834, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.7058823529411766, | |
| "grad_norm": 0.06061309948563576, | |
| "learning_rate": 5.418275829936537e-06, | |
| "loss": 2.024155616760254, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.8235294117647056, | |
| "grad_norm": 0.06357177346944809, | |
| "learning_rate": 2.063557016466111e-06, | |
| "loss": 1.9843828201293945, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.9411764705882355, | |
| "grad_norm": 0.06014658138155937, | |
| "learning_rate": 2.9104997242590527e-07, | |
| "loss": 1.9744945526123048, | |
| "step": 250 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 255, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.324634988544e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |