feat: upload lora model
Browse files- _config/config_file.toml +3 -3
_config/config_file.toml
CHANGED
|
@@ -22,8 +22,8 @@ token_warmup_min = 1
|
|
| 22 |
token_warmup_step = 0
|
| 23 |
|
| 24 |
[training_arguments]
|
| 25 |
-
output_dir = "/content/LoRA/output/
|
| 26 |
-
output_name = "
|
| 27 |
save_precision = "fp16"
|
| 28 |
save_every_n_epochs = 2
|
| 29 |
train_batch_size = 4
|
|
@@ -41,7 +41,7 @@ mixed_precision = "fp16"
|
|
| 41 |
[logging_arguments]
|
| 42 |
log_with = "tensorboard"
|
| 43 |
logging_dir = "/content/LoRA/logs"
|
| 44 |
-
log_prefix = "
|
| 45 |
|
| 46 |
[sample_prompt_arguments]
|
| 47 |
sample_every_n_epochs = 1
|
|
|
|
| 22 |
token_warmup_step = 0
|
| 23 |
|
| 24 |
[training_arguments]
|
| 25 |
+
output_dir = "/content/LoRA/output/bamakipajamas2"
|
| 26 |
+
output_name = "bamakipajamas2"
|
| 27 |
save_precision = "fp16"
|
| 28 |
save_every_n_epochs = 2
|
| 29 |
train_batch_size = 4
|
|
|
|
| 41 |
[logging_arguments]
|
| 42 |
log_with = "tensorboard"
|
| 43 |
logging_dir = "/content/LoRA/logs"
|
| 44 |
+
log_prefix = "bamakipajamas2"
|
| 45 |
|
| 46 |
[sample_prompt_arguments]
|
| 47 |
sample_every_n_epochs = 1
|