video_llm_template / config /eval /msvd_qa /test_config.json
RoadQAQ's picture
Upload folder using huggingface_hub
710b71f verified
{
"dataset":{
"dataset_name": "MSVD_QA",
"q_json_path":"/home/user/students/ml/dataset/MSVD_Zero_Shot_QA/test_q.json",
"a_json_path":"/home/user/students/ml/dataset/MSVD_Zero_Shot_QA/test_a.json",
"video_path":"/home/user/students/ml/dataset/MSVD_Zero_Shot_QA/videos",
"data_type":"video",
"bound":"False",
"question_key":"question",
"answer_key":"answer",
"name_key":"video_name",
"video_postfix":[".avi"],
"num_segments":8
},
"model":{
"model_path":"/home/user/students/ml/model/LLaVA-NeXT-Video-7B-DPO-hf"
},
"conv_mode":"llava_next_video_template",
"save_path":"./result/eval/msvd_qa",
"experiment_name":"test",
"description":"simple inference using llava_next_video with llava_next_video_template in msvd",
"batch_size":4
}