penfever commited on
Commit
e4a4a9f
·
verified ·
1 Parent(s): 5aeb277

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - llama-factory
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: bugs-r2egym-stackseq
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # bugs-r2egym-stackseq
15
+
16
+ This model was trained from scratch on the None dataset.
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 4e-05
36
+ - train_batch_size: 1
37
+ - eval_batch_size: 8
38
+ - seed: 42
39
+ - distributed_type: multi-GPU
40
+ - num_devices: 8
41
+ - gradient_accumulation_steps: 2
42
+ - total_train_batch_size: 16
43
+ - total_eval_batch_size: 64
44
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.98) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
+ - lr_scheduler_type: cosine
46
+ - lr_scheduler_warmup_ratio: 0.1
47
+ - num_epochs: 7.0
48
+
49
+ ### Training results
50
+
51
+
52
+
53
+ ### Framework versions
54
+
55
+ - Transformers 4.56.1
56
+ - Pytorch 2.9.1+cu128
57
+ - Datasets 4.4.1
58
+ - Tokenizers 0.22.1
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "eos_token_id": [
4
+ 151645,
5
+ 151643
6
+ ],
7
+ "pad_token_id": 151643,
8
+ "temperature": 0.6,
9
+ "top_k": 20,
10
+ "top_p": 0.95,
11
+ "transformers_version": "4.56.1"
12
+ }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63ed496a0d512c055a1701e27ef8914242bc8007a781a9907a08a6ade389e9cd
3
  size 4902257696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5ced680ae2bfc22065e00b94991d84adc2bdedeaac190ebdb20346bf9a42c93
3
  size 4902257696
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e9b2aefc04e03dc16b5a7b4e6708424dcb4dcad4a5ce2ae386a231b902d0a3b
3
  size 4915960368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39867f9ad2ef45335a387054b3c60023501b021af35a0c44e98dfa1041fc403a
3
  size 4915960368
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45ebe1635d6057f894ee88687ae70a700788d2dda1275e998b275ad8ad2fa43f
3
  size 4983068496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:442905ebc657b633d2d57ed471bf0b4a4a3cfdb276385a5cb2207579783304ef
3
  size 4983068496
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aaa22c7dde35b88cfc4258517540773eb2bb4936a7fe43c2756384eda9994a91
3
  size 1580230264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25871f657e4e2f37c55cbd418f674d79ba4cb77ae4b5ae6cb27995353e262831
3
  size 1580230264
trainer_log.jsonl CHANGED
@@ -1932,3 +1932,8 @@
1932
  {"current_steps": 9590, "total_steps": 9625, "loss": 0.1767, "lr": 1.7047559633920353e-09, "epoch": 6.975627500909422, "percentage": 99.64, "elapsed_time": "16:06:46", "remaining_time": "0:03:31"}
1933
  {"current_steps": 9595, "total_steps": 9625, "loss": 0.187, "lr": 1.26410223527218e-09, "epoch": 6.979265187340851, "percentage": 99.69, "elapsed_time": "16:08:22", "remaining_time": "0:03:01"}
1934
  {"current_steps": 9600, "total_steps": 9625, "loss": 0.1851, "lr": 8.89215172437119e-10, "epoch": 6.9829028737722805, "percentage": 99.74, "elapsed_time": "16:10:01", "remaining_time": "0:02:31"}
 
 
 
 
 
 
1932
  {"current_steps": 9590, "total_steps": 9625, "loss": 0.1767, "lr": 1.7047559633920353e-09, "epoch": 6.975627500909422, "percentage": 99.64, "elapsed_time": "16:06:46", "remaining_time": "0:03:31"}
1933
  {"current_steps": 9595, "total_steps": 9625, "loss": 0.187, "lr": 1.26410223527218e-09, "epoch": 6.979265187340851, "percentage": 99.69, "elapsed_time": "16:08:22", "remaining_time": "0:03:01"}
1934
  {"current_steps": 9600, "total_steps": 9625, "loss": 0.1851, "lr": 8.89215172437119e-10, "epoch": 6.9829028737722805, "percentage": 99.74, "elapsed_time": "16:10:01", "remaining_time": "0:02:31"}
1935
+ {"current_steps": 9605, "total_steps": 9625, "loss": 0.1931, "lr": 5.800960077206874e-10, "epoch": 6.986540560203711, "percentage": 99.79, "elapsed_time": "16:13:12", "remaining_time": "0:02:01"}
1936
+ {"current_steps": 9610, "total_steps": 9625, "loss": 0.1898, "lr": 3.3674575767417283e-10, "epoch": 6.99017824663514, "percentage": 99.84, "elapsed_time": "16:14:54", "remaining_time": "0:01:31"}
1937
+ {"current_steps": 9615, "total_steps": 9625, "loss": 0.1868, "lr": 1.591652225663154e-10, "epoch": 6.9938159330665695, "percentage": 99.9, "elapsed_time": "16:16:35", "remaining_time": "0:01:00"}
1938
+ {"current_steps": 9620, "total_steps": 9625, "loss": 0.1935, "lr": 4.735498637442604e-11, "epoch": 6.997453619498, "percentage": 99.95, "elapsed_time": "16:18:16", "remaining_time": "0:00:30"}
1939
+ {"current_steps": 9624, "total_steps": 9625, "epoch": 7.0, "percentage": 99.99, "elapsed_time": "16:19:29", "remaining_time": "0:00:06"}