The-Shy commited on
Commit
d9db5c6
·
verified ·
1 Parent(s): 685bc5e

Upload trainer.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. trainer.py +6 -4
trainer.py CHANGED
@@ -1,4 +1,5 @@
1
 
 
2
  # coding=utf-8
3
  # Copyright 2020-present the HuggingFace Inc. team.
4
  #
@@ -384,7 +385,7 @@ class Trainer:
384
  f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only "
385
  "computes hidden states and does not accept any labels. You should choose a model with a head "
386
  "suitable for your task like any of the `AutoModelForXxx` listed at "
387
- "https://huggingface.co/docs/transformers/model_doc/auto"
388
  )
389
 
390
  if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
@@ -416,7 +417,7 @@ class Trainer:
416
  if _is_quantized_and_base_model and not _is_peft_model(model):
417
  raise ValueError(
418
  "You cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of"
419
- " the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft"
420
  " for more details"
421
  )
422
  elif _is_quantized_and_base_model and not getattr(model, "_is_quantized_training_enabled", False):
@@ -2805,14 +2806,14 @@ class Trainer:
2805
  #######################################################
2806
  import json
2807
  for i in range(len(data_info_temp)):
2808
- # data_info_temp[i]['loss'] = float(loss[0][i])
2809
  data_info_temp[i]['yes_target_logprob'] = yes_target_logprob
2810
  data_info_temp[i]['logits_shape'] = outputs.logits.shape
2811
 
2812
  from datetime import datetime
2813
  current_time = datetime.now().strftime('%Y_%m_%d')
2814
 
2815
- file_path = '/data/zbz5349/ICLR_2024/ACL_2025/LLaVA_Fliter/inference_demo/cherry_AskLLM_infer_result_' + current_time + '.jsonl'
2816
  with open(file_path, 'a', encoding='utf-8') as file:
2817
  # json.dump(data_info_temp[0], file, ensure_ascii=False, indent=4)
2818
  for content in data_info_temp:
@@ -4088,3 +4089,4 @@ class Trainer:
4088
  ds_plugin.hf_ds_config.trainer_config_process(self.args, auto_find_batch_size)
4089
 
4090
 
 
 
1
 
2
+
3
  # coding=utf-8
4
  # Copyright 2020-present the HuggingFace Inc. team.
5
  #
 
385
  f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only "
386
  "computes hidden states and does not accept any labels. You should choose a model with a head "
387
  "suitable for your task like any of the `AutoModelForXxx` listed at "
388
+ "https://hf-mirror.com/docs/transformers/model_doc/auto"
389
  )
390
 
391
  if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
 
417
  if _is_quantized_and_base_model and not _is_peft_model(model):
418
  raise ValueError(
419
  "You cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of"
420
+ " the quantized model to correctly perform fine-tuning. Please see: https://hf-mirror.com/docs/transformers/peft"
421
  " for more details"
422
  )
423
  elif _is_quantized_and_base_model and not getattr(model, "_is_quantized_training_enabled", False):
 
2806
  #######################################################
2807
  import json
2808
  for i in range(len(data_info_temp)):
2809
+ data_info_temp[i]['loss'] = float(loss[0][i])
2810
  data_info_temp[i]['yes_target_logprob'] = yes_target_logprob
2811
  data_info_temp[i]['logits_shape'] = outputs.logits.shape
2812
 
2813
  from datetime import datetime
2814
  current_time = datetime.now().strftime('%Y_%m_%d')
2815
 
2816
+ file_path = '/data/zbz5349/ICLR_2024/ACL_2025/LLaVA_Fliter/inference_demo/math_cherry_AskLLM_infer_result_' + current_time + '.jsonl'
2817
  with open(file_path, 'a', encoding='utf-8') as file:
2818
  # json.dump(data_info_temp[0], file, ensure_ascii=False, indent=4)
2819
  for content in data_info_temp:
 
4089
  ds_plugin.hf_ds_config.trainer_config_process(self.args, auto_find_batch_size)
4090
 
4091
 
4092
+