llm_cp2 / src /lmms-eval /lmms_eval /tasks /egothink /egothink_activity.yaml
csuhan's picture
Upload folder using huggingface_hub
b0c0df0 verified
dataset_name: "Activity"
task: "egothink_activity"
output_type: generate_until
doc_to_visual: !function utils.egothink_doc_to_visual
doc_to_text: !function utils.egothink_doc_to_text
doc_to_target: !function utils.egothink_doc_to_answer
generation_kwargs:
max_new_tokens: 30
temperature: 0.2
num_beams: 1
do_sample: True
# Note that the metric name can be either a registed metric function (such as the case for GQA) or a key name returned by process_results
metric_list:
- metric: gpt_eval_score
aggregation: !function utils.egothink_aggregate_results
higher_is_better: true
# The return value of process_results will be used by metrics
process_results: !function utils.egothink_process_results
lmms_eval_specific_kwargs:
default:
pre_prompt: ""
post_prompt: ""
include: _default_template_yaml