Update pipeline tag and add Github link

#1
by nielsr HF Staff - opened
Files changed (1) hide show
  1. README.md +432 -4
README.md CHANGED
@@ -1,13 +1,14 @@
1
  ---
 
 
 
2
  library_name: transformers
3
  license: mit
4
- base_model: henryhe0123/PC-Agent-E
5
  tags:
6
  - llama-factory
7
  - full
8
  - generated_from_trainer
9
- datasets:
10
- - henryhe0123/PC-Agent-E
11
  ---
12
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -17,6 +18,8 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [Qwen/Qwen2.5-VL-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-72B-Instruct) on the PC-Agent-E dataset.
19
 
 
 
20
  ## Training procedure
21
 
22
  ### Training hyperparameters
@@ -41,4 +44,429 @@ The following hyperparameters were used during training:
41
  - Transformers 4.49.0.dev0
42
  - Pytorch 2.6.0+cu124
43
  - Datasets 3.3.2
44
- - Tokenizers 0.21.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ base_model: henryhe0123/PC-Agent-E
3
+ datasets:
4
+ - henryhe0123/PC-Agent-E
5
  library_name: transformers
6
  license: mit
 
7
  tags:
8
  - llama-factory
9
  - full
10
  - generated_from_trainer
11
+ pipeline_tag: image-text-to-text
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
18
 
19
  This model is a fine-tuned version of [Qwen/Qwen2.5-VL-72B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-72B-Instruct) on the PC-Agent-E dataset.
20
 
21
+ Github repository: https://github.com/GAIR-NLP/PC-Agent
22
+
23
  ## Training procedure
24
 
25
  ### Training hyperparameters
 
44
  - Transformers 4.49.0.dev0
45
  - Pytorch 2.6.0+cu124
46
  - Datasets 3.3.2
47
+ - Tokenizers 0.21.0
48
+
49
+ # File information
50
+
51
+ The repository contains the following file information:
52
+
53
+ Filename: train_results.json
54
+ Content: {
55
+ "epoch": 1.9928400954653938,
56
+ "total_flos": 3166299160051712.0,
57
+ "train_loss": 0.5405693022828353,
58
+ "train_runtime": 17864.3337,
59
+ "train_samples_per_second": 2.995,
60
+ "train_steps_per_second": 0.023
61
+ }
62
+
63
+ Filename: generation_config.json
64
+ Content: {
65
+ "bos_token_id": 151643,
66
+ "do_sample": true,
67
+ "eos_token_id": [
68
+ 151645,
69
+ 151643
70
+ ],
71
+ "pad_token_id": 151643,
72
+ "repetition_penalty": 1.05,
73
+ "top_k": 1,
74
+ "top_p": 0.001,
75
+ "transformers_version": "4.49.0.dev0"
76
+ }
77
+
78
+ Filename: vocab.json
79
+ Content: Content of the file is larger than 50 KB, too long to display.
80
+
81
+ Filename: all_results.json
82
+ Content: {
83
+ "epoch": 1.9928400954653938,
84
+ "total_flos": 3166299160051712.0,
85
+ "train_loss": 0.5405693022828353,
86
+ "train_runtime": 17864.3337,
87
+ "train_samples_per_second": 2.995,
88
+ "train_steps_per_second": 0.023
89
+ }
90
+
91
+ Filename: config.json
92
+ Content: {
93
+ "_name_or_path": "/inspire/hdd/global_user/liupengfei-24025/yhhe/model/Qwen2.5-VL-72B-Instruct",
94
+ "architectures": [
95
+ "Qwen2_5_VLForConditionalGeneration"
96
+ ],
97
+ "attention_bias": false,
98
+ "attention_dropout": 0.0,
99
+ "bos_token_id": 151643,
100
+ "eos_token_id": 151645,
101
+ "hidden_act": "silu",
102
+ "hidden_size": 8192,
103
+ "image_token_id": 151655,
104
+ "initializer_range": 0.02,
105
+ "intermediate_size": 29568,
106
+ "max_position_embeddings": 128000,
107
+ "max_window_layers": 80,
108
+ "model_type": "qwen2_5_vl",
109
+ "num_attention_heads": 64,
110
+ "num_hidden_layers": 80,
111
+ "num_key_value_heads": 8,
112
+ "rms_norm_eps": 1e-06,
113
+ "rope_scaling": {
114
+ "mrope_section": [
115
+ 16,
116
+ 24,
117
+ 24
118
+ ],
119
+ "rope_type": "default",
120
+ "type": "default"
121
+ },
122
+ "rope_theta": 1000000.0,
123
+ "sliding_window": 32768,
124
+ "tie_word_embeddings": false,
125
+ "torch_dtype": "bfloat16",
126
+ "transformers_version": "4.49.0.dev0",
127
+ "use_cache": false,
128
+ "use_sliding_window": false,
129
+ "video_token_id": 151656,
130
+ "vision_config": {
131
+ "hidden_size": 1280,
132
+ "in_chans": 3,
133
+ "intermediate_size": 3456,
134
+ "model_type": "qwen2_5_vl",
135
+ "out_hidden_size": 8192,
136
+ "spatial_patch_size": 14,
137
+ "tokens_per_second": 2,
138
+ "torch_dtype": "float32"
139
+ },
140
+ "vision_end_token_id": 151653,
141
+ "vision_start_token_id": 151652,
142
+ "vision_token_id": 151654,
143
+ "vocab_size": 152064
144
+ }
145
+
146
+ Filename: added_tokens.json
147
+ Content: {
148
+ "</tool_call>": 151658,
149
+ "<tool_call>": 151657,
150
+ "<|box_end|>": 151649,
151
+ "<|box_start|>": 151648,
152
+ "<|endoftext|>": 151643,
153
+ "<|file_sep|>": 151664,
154
+ "<|fim_middle|>": 151660,
155
+ "<|fim_pad|>": 151662,
156
+ "<|fim_prefix|>": 151659,
157
+ "<|fim_suffix|>": 151661,
158
+ "<|im_end|>": 151645,
159
+ "<|im_start|>": 151644,
160
+ "<|image_pad|>": 151655,
161
+ "<|object_ref_end|>": 151647,
162
+ "<|object_ref_start|>": 151646,
163
+ "<|quad_end|>": 151651,
164
+ "<|quad_start|>": 151650,
165
+ "<|repo_name|>": 151663,
166
+ "<|video_pad|>": 151656,
167
+ "<|vision_end|>": 151653,
168
+ "<|vision_pad|>": 151654,
169
+ "<|vision_start|>": 151652
170
+ }
171
+
172
+ Filename: special_tokens_map.json
173
+ Content: {
174
+ "additional_special_tokens": [
175
+ "<|im_start|>",
176
+ "<|im_end|>",
177
+ "<|object_ref_start|>",
178
+ "<|object_ref_end|>",
179
+ "<|box_start|>",
180
+ "<|box_end|>",
181
+ "<|quad_start|>",
182
+ "<|quad_end|>",
183
+ "<|vision_start|>",
184
+ "<|vision_end|>",
185
+ "<|vision_pad|>",
186
+ "<|image_pad|>",
187
+ "<|video_pad|>"
188
+ ],
189
+ "eos_token": {
190
+ "content": "<|im_end|>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false
195
+ },
196
+ "pad_token": {
197
+ "content": "<|endoftext|>",
198
+ "lstrip": false,
199
+ "normalized": false,
200
+ "rstrip": false,
201
+ "single_word": false
202
+ }
203
+ }
204
+
205
+ Filename: preprocessor_config.json
206
+ Content: {
207
+ "do_convert_rgb": true,
208
+ "do_normalize": true,
209
+ "do_rescale": true,
210
+ "do_resize": true,
211
+ "image_mean": [
212
+ 0.48145466,
213
+ 0.4578275,
214
+ 0.40821073
215
+ ],
216
+ "image_processor_type": "Qwen2_5_VLImageProcessor",
217
+ "image_std": [
218
+ 0.26862954,
219
+ 0.26130258,
220
+ 0.27577711
221
+ ],
222
+ "max_pixels": 12845056,
223
+ "merge_size": 2,
224
+ "min_pixels": 3136,
225
+ "patch_size": 14,
226
+ "processor_class": "Qwen2_5_VLProcessor",
227
+ "resample": 3,
228
+ "rescale_factor": 0.00392156862745098,
229
+ "size": {
230
+ "max_pixels": 12845056,
231
+ "min_pixels": 3136
232
+ },
233
+ "temporal_patch_size": 2
234
+ }
235
+
236
+ Filename: model.safetensors.index.json
237
+ Content: Content of the file is larger than 50 KB, too long to display.
238
+
239
+ Filename: trainer_state.json
240
+ Content: Content of the file is larger than 50 KB, too long to display.
241
+
242
+ Filename: tokenizer.json
243
+ Content: Content of the file is larger than 50 KB, too long to display.
244
+
245
+ Filename: chat_template.json
246
+ Content: {
247
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
248
+ You are a helpful assistant.<|im_end|>
249
+ {% endif %}<|im_start|>{{ message['role'] }}
250
+ {% if message['content'] is string %}{{ message['content'] }}<|im_end|>
251
+ {% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
252
+ {% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
253
+ {% endif %}"
254
+ }
255
+
256
+ Filename: tokenizer_config.json
257
+ Content: {
258
+ "add_bos_token": false,
259
+ "add_prefix_space": false,
260
+ "added_tokens_decoder": {
261
+ "151643": {
262
+ "content": "<|endoftext|>",
263
+ "lstrip": false,
264
+ "normalized": false,
265
+ "rstrip": false,
266
+ "single_word": false,
267
+ "special": true
268
+ },
269
+ "151644": {
270
+ "content": "<|im_start|>",
271
+ "lstrip": false,
272
+ "normalized": false,
273
+ "rstrip": false,
274
+ "single_word": false,
275
+ "special": true
276
+ },
277
+ "151645": {
278
+ "content": "<|im_end|>",
279
+ "lstrip": false,
280
+ "normalized": false,
281
+ "rstrip": false,
282
+ "single_word": false,
283
+ "special": true
284
+ },
285
+ "151646": {
286
+ "content": "<|object_ref_start|>",
287
+ "lstrip": false,
288
+ "normalized": false,
289
+ "rstrip": false,
290
+ "single_word": false,
291
+ "special": true
292
+ },
293
+ "151647": {
294
+ "content": "<|object_ref_end|>",
295
+ "lstrip": false,
296
+ "normalized": false,
297
+ "rstrip": false,
298
+ "single_word": false,
299
+ "special": true
300
+ },
301
+ "151648": {
302
+ "content": "<|box_start|>",
303
+ "lstrip": false,
304
+ "normalized": false,
305
+ "rstrip": false,
306
+ "single_word": false,
307
+ "special": true
308
+ },
309
+ "151649": {
310
+ "content": "<|box_end|>",
311
+ "lstrip": false,
312
+ "normalized": false,
313
+ "rstrip": false,
314
+ "single_word": false,
315
+ "special": true
316
+ },
317
+ "151650": {
318
+ "content": "<|quad_start|>",
319
+ "lstrip": false,
320
+ "normalized": false,
321
+ "rstrip": false,
322
+ "single_word": false,
323
+ "special": true
324
+ },
325
+ "151651": {
326
+ "content": "<|quad_end|>",
327
+ "lstrip": false,
328
+ "normalized": false,
329
+ "rstrip": false,
330
+ "single_word": false,
331
+ "special": true
332
+ },
333
+ "151652": {
334
+ "content": "<|vision_start|>",
335
+ "lstrip": false,
336
+ "normalized": false,
337
+ "rstrip": false,
338
+ "single_word": false,
339
+ "special": true
340
+ },
341
+ "151653": {
342
+ "content": "<|vision_end|>",
343
+ "lstrip": false,
344
+ "normalized": false,
345
+ "rstrip": false,
346
+ "single_word": false,
347
+ "special": true
348
+ },
349
+ "151654": {
350
+ "content": "<|vision_pad|>",
351
+ "lstrip": false,
352
+ "normalized": false,
353
+ "rstrip": false,
354
+ "single_word": false,
355
+ "special": true
356
+ },
357
+ "151655": {
358
+ "content": "<|image_pad|>",
359
+ "lstrip": false,
360
+ "normalized": false,
361
+ "rstrip": false,
362
+ "single_word": false,
363
+ "special": true
364
+ },
365
+ "151656": {
366
+ "content": "<|video_pad|>",
367
+ "lstrip": false,
368
+ "normalized": false,
369
+ "rstrip": false,
370
+ "single_word": false,
371
+ "special": true
372
+ },
373
+ "151657": {
374
+ "content": "<tool_call>",
375
+ "lstrip": false,
376
+ "normalized": false,
377
+ "rstrip": false,
378
+ "single_word": false,
379
+ "special": false
380
+ },
381
+ "151658": {
382
+ "content": "</tool_call>",
383
+ "lstrip": false,
384
+ "normalized": false,
385
+ "rstrip": false,
386
+ "single_word": false,
387
+ "special": false
388
+ },
389
+ "151659": {
390
+ "content": "<|fim_prefix|>",
391
+ "lstrip": false,
392
+ "normalized": false,
393
+ "rstrip": false,
394
+ "single_word": false,
395
+ "special": false
396
+ },
397
+ "151660": {
398
+ "content": "<|fim_middle|>",
399
+ "lstrip": false,
400
+ "normalized": false,
401
+ "rstrip": false,
402
+ "single_word": false,
403
+ "special": false
404
+ },
405
+ "151661": {
406
+ "content": "<|fim_suffix|>",
407
+ "lstrip": false,
408
+ "normalized": false,
409
+ "rstrip": false,
410
+ "single_word": false,
411
+ "special": false
412
+ },
413
+ "151662": {
414
+ "content": "<|fim_pad|>",
415
+ "lstrip": false,
416
+ "normalized": false,
417
+ "rstrip": false,
418
+ "single_word": false,
419
+ "special": false
420
+ },
421
+ "151663": {
422
+ "content": "<|repo_name|>",
423
+ "lstrip": false,
424
+ "normalized": false,
425
+ "rstrip": false,
426
+ "single_word": false,
427
+ "special": false
428
+ },
429
+ "151664": {
430
+ "content": "<|file_sep|>",
431
+ "lstrip": false,
432
+ "normalized": false,
433
+ "rstrip": false,
434
+ "single_word": false,
435
+ "special": false
436
+ }
437
+ },
438
+ "additional_special_tokens": [
439
+ "<|im_start|>",
440
+ "<|im_end|>",
441
+ "<|object_ref_start|>",
442
+ "<|object_ref_end|>",
443
+ "<|box_start|>",
444
+ "<|box_end|>",
445
+ "<|quad_start|>",
446
+ "<|quad_end|>",
447
+ "<|vision_start|>",
448
+ "<|vision_end|>",
449
+ "<|vision_pad|>",
450
+ "<|image_pad|>",
451
+ "<|video_pad|>"
452
+ ],
453
+ "bos_token": null,
454
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
455
+ You are a helpful assistant.<|im_end|>
456
+ {% endif %}<|im_start|>{{ message['role'] }}
457
+ {% if message['content'] is string %}{{ message['content'] }}<|im_end|>
458
+ {% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
459
+ {% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
460
+ {% endif %}",
461
+ "clean_up_tokenization_spaces": false,
462
+ "eos_token": "<|im_end|>",
463
+ "errors": "replace",
464
+ "extra_special_tokens": {},
465
+ "model_max_length": 131072,
466
+ "pad_token": "<|endoftext|>",
467
+ "padding_side": "right",
468
+ "processor_class": "Qwen2_5_VLProcessor",
469
+ "split_special_tokens": false,
470
+ "tokenizer_class": "Qwen2Tokenizer",
471
+ "unk_token": null
472
+ }