temp / human_corr.py
Kaiyue's picture
Upload folder using huggingface_hub
56b30d7 verified
#Pearson correlation coefficient
import argparse
import numpy as np
import json
import pandas as pd
import scipy.stats
import os
import math
def compute_correlation(set1, set2):
corr_coefficient = np.corrcoef(set1, set2)[0, 1]
return corr_coefficient
def compute_max_correlation(fix, var): #(350,3)
set1 = fix[:,0].flatten()
set2 = var[:,0].flatten()
max = np.corrcoef(set1,set2)[0, 1]
selected_human_score = set2
selected_human_index = np.zeros(num_images)
for i in range(num_images):
for j in range(3):
set2[i] = var[i,j]
corr_coefficient = np.corrcoef(set1, set2)[0, 1]
if corr_coefficient > max:
selected_human_score[i] = var[i,j]
selected_human_index[i] = j
max = corr_coefficient
return max,selected_human_index,selected_human_score
# Normalizing function using Min-Max scaling
def min_max_normalize(scores):
min_score = np.min(scores)
max_score = np.max(scores)
return (scores - min_score) / (max_score - min_score)
def scale_dict_values(input_dict):
# 获取字典中所有值
values = list(input_dict.values())
# 找到最小值和最大值
min_value = min(values)
max_value = max(values)
# 定义缩放函数
def scale_value(value):
return 1 + (value - min_value) * (5 - 1) / (max_value - min_value)
# 创建一个新的字典,其中所有值都缩放到1到5之间
scaled_dict = {key: scale_value(value) for key, value in input_dict.items()}
return scaled_dict
def parse_args():
parser = argparse.ArgumentParser(description="Demo")
parser.add_argument(
"--auto_evaluation",
type=str,
help="csv",
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_5_2/frame_score.csv" # TODO pkc
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_9_2/frame_score.csv" # TODO
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_2_2/frame_score_mechanism_3_1.csv" # TODO
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_2_2/frame_score_mechanism.csv" # TODO pick
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_6_2_gt/grid_score.csv" # TODO pick
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_6_2_gt/frame_score.csv"
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/pllava/csv_output_6_2/video_score.csv" #
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_5_2/grid_score.csv" # TODO pick
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/clip/csv_output_6_2_gt/clip.csv" # TODO
# default ="/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_9_2_user_study/thresh_04_2_score.csv"
# default ="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/b_clip/csv_output_2_2/b_clip.csv"
# default ="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/clip/csv_output_2_2/clip.csv"
# default ="/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/pllava/csv_output_2_2/video_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_1_2/grid_score_again.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_1_2/grid_score_6_again.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_1_2/grid_score_4_adjust_again.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_1_2/grid_score_8.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_1_2/frame_score_8.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/BLIP-vqa/csv_output_1_2_blipvqa.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/pllava/csv_output_5_2/video_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_2_2/frame_score_mechanism_test.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_2_2/grid_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_3_2_user_study_2d/user_study_2d_03_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_3_2_user_study_2d/user_study_2d_03_321split_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_3_2_user_study_2d/user_study_2d_03_321split_algorithm_035_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_3_2_user_study_2d/user_study_2d_03_321split_algorithm_035_2_combine_frame.csv",
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/Grounded-Segment-Anything/csv_3_depth_user_study/algorithm_035_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/Grounded-Segment-Anything/csv_3_depth_user_study/algorithm_035_addscore_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/clip/csv_output_3_2/clip.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/b_clip/csv_output_6_2/b_clip.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/dot/csv_4_2_user_study/4_2_user_study_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/clip/csv_output_4_2/clip.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/b_clip/csv_output_4_2/b_clip.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_4_2/frame_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_4_2/grid_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_1_2/frame_score_corrected.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_3_2_user_study_2d/user_study_2d_03_321split_algorithm_035_productscore_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_3_2_user_study_2d/user_study_2d_03_321split_algorithm_035_2_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/Grounded-Segment-Anything/csv_3_depth_user_study/algorithm_035_addscore_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/groundingDINO_3/spatial_final_2.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_3_2_user_study_2d/user_study_2d_03_321split_algorithm_035_corrected_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_3_2_user_study_2d/user_study_2d_03_321split_algorithm_035_corrected_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/csv_output_3_2_user_study_2d/user_study_2d_03_321split_algorithm_035_corrected_2_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/Grounded-Segment-Anything/csv_3_depth_user_study/algorithm_035_corrected_combine_frame.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/b_clip/csv_output_3_2/b_clip.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_3_2/frame_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/pllava/csv_output_3_2/video_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/pllava/csv_output_4_2/video_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_1_2/grid_score_8.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_1_2/frame_score_corrected_8_withinitial.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/pllava/csv_output_1_2/video_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/BLIP-vqa/csv_output_1_2_blipvqa.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/b_clip/csv_output_1_2/b_clip.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/clip/csv_output_6_2_gt/clip.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/groundingDINO_3/spatial_method1.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/groundingDINO_3/spatial_method2.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_5_2/frame_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_5_2/grid_score_old.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/b_clip/csv_output_5_2/b_clip.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/clip/csv_output_5_2/clip.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/pllava/csv_output_5_2/video_score.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava/csv_output_5_2/frame_score_new.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/viclip/consistent_attr_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/viclip/dynamic_attr_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/viclip/spatial_relationship_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/viclip/motion_binding_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/viclip/action_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/viclip/interaction_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/viclip/numeracy_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/rebuttal_models/EvalCrafter/blipbleu_rebuttal/consistent_attr_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/rebuttal_models/EvalCrafter/blipbleu_rebuttal/dynamic_attr_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/rebuttal_models/EvalCrafter/blipbleu_rebuttal/spatial_relationship_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/rebuttal_models/EvalCrafter/blipbleu_rebuttal/motion_binding_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/rebuttal_models/EvalCrafter/blipbleu_rebuttal/action_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/rebuttal_models/EvalCrafter/blipbleu_rebuttal/interaction_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/rebuttal_models/EvalCrafter/blipbleu_rebuttal/numeracy_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/rebuttal_models/VPEval/csv_rebuttal/vpeval_user_study_video.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/GroundingDINO/rebuttal_videodirectorgpt_csv/M-GDino_user_study_rebuttal.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/LLaVA/cvpr_csv/csv_6/user_study/user-t0_object_interactions_score_0.0.csv",
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava_cvpr/csv_output_1_2/frame_score.csv",
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava_cvpr/csv_output_5_2/frame_score.csv",
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava_cvpr/csv_output_6_2_gt/frame_score.csv",
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava_cvpr/csv_output_1_2/frame_score_1st_last_2.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/auto_eval/llava_cvpr/csv_output_3_2/frame_score.csv",
# default = "/group/xihuiliu/sky/T2V-Compbench/metric/models/LLaVA/cvpr_csv/csv_1/user_study/cvpr_conv/user-t005_consistent_attr_score_0.05_4.csv",
)
parser.add_argument(
"--human_evaluation",
type=str,
help="csv",
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/action_grid_corrected.csv" # pick!!!
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/dynamic_attr.csv"
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/interaction_llava_grid.csv"
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/interaction_llava_grid_corrected_gt_filter2.csv" #pick!!!
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/dynamic_attr_mechanism_corrected.csv" #pick!!!
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/interaction_gt.csv"
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/consistent spatial_human.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/consistent spatial_human_corrected2.csv"
# default="/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/numeracy_human.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/numeracy_human_adjust.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/dynamic_spatial_human_1.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/consistent attribute_human.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/consistent attribute_human_2.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/consistent attribute_human_corrected.csv"
default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/consistent attribute_human_corrected_kaiyue.csv", #ok final
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/dynamic_attr_mechanism_corrected_kaiyue.csv" #ok final
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/consistent spatial_human_corrected2_kaiyue.csv" #ok final
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/dynamic_spatial_human_1_kaiyue.csv" #ok final
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/action_adjust_kaiyue_arxiv.csv" #ok final
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/interaction_llava_grid_corrected_gt_filter2_kaiyue.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/numeracy_human_adjust_corrected_kaiyue.csv"
# default = "/group/xihuiliu/sky/T2V-Compbench/user_study/human_eval/interaction_adjust_kaiyue.csv", #ok final
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
auto_evaluation = args.auto_evaluation
method = ["videocrafter2", "latte", "opensora11", "opensoraplan", "show1", "modelscope", "gt"]
dic_human = {} # store by model name, for calculating avg score for each model
dic_auto = {}# store by model name
dic_score_category = {}
dic_human_avg = {} # store by video name, for calculating human corr with avg score
dic_auto_avg = {} # store by video name
# hky
# get AMT score
# Define the path to your CSV file
csv_file = args.human_evaluation
# Read the CSV file into a pandas DataFrame
data_frame = pd.read_csv(csv_file)
try:
column_index = data_frame.columns.get_loc('Input.video')
except:
column_index = data_frame.columns.get_loc('Input.url')
column_name = data_frame.iloc[:,column_index]
column_name = column_name.to_numpy()
try:
score_index = data_frame.columns.get_loc('Answer.score')
except:
score_index = data_frame.columns.get_loc('Answer.correctness_answer')
score_AMT = data_frame.iloc[:,score_index]
score_AMT = score_AMT.to_numpy()
score_dic = {} # to store {method: {id: score,... },... }
# read from auto eval
csv_file_auto = args.auto_evaluation
data_frame_auto = pd.read_csv(csv_file_auto)
# read key = id, value = Score from csv
for i in range(len(data_frame_auto)):
# print(data_frame_auto.iloc[i])
# score_dic[(data_frame_auto.iloc[i]['name'].split(".")[0])] = float(data_frame_auto.iloc[i][-1])
score_dic[(data_frame_auto.iloc[i]['name'].split(".")[0])] = float(data_frame_auto.iloc[i]['Score'])
# score_dic[(data_frame_auto[0].split(".")[0])] = float(data_frame_auto.iloc[i]['Score'])
# score_dic[(data_frame_auto.iloc[i]['name'])] = float(data_frame_auto.iloc[i]['Score'])
# score_dic[(data_frame_auto.iloc[i]['id'])] = float(data_frame_auto.iloc[i]['Score'])
# # read from human eval's csv (col. "Input.video"), store {method: [score,...],...}
for me in range(len(method)): # initialize
dic_human[method[me]] = []
dic_auto[method[me]] = []
dic_score_category[method[me]] = []
print(score_dic)
record = np.empty(0)
score_human = np.empty(0) # for calculating human corr with all scores
score_norm = np.empty(0)
score_human_avg = np.empty(0) #f or calculating human corr with avg score
score_norm_avg = np.empty(0)
for i in range(len(column_name)): # initialize
c = column_name[i]
c = c.replace(".mp4","")
dic_human_avg[c] = []
dic_auto_avg[c] = []
# import pdb; pdb.set_trace()
# score_dic = scale_dict_values(score_dic) # TODO rescale to 1~5
for i in range(len(column_name)):
c = column_name[i]
c = c.replace(".mp4","")
s = score_AMT[i]
method_name = c.split("_")[-1]
# hky
index_name = method.index(method_name)
# print(method_name, index_num)
# score_norm = np.append(score_norm, score_dic[method_name][index_num])
# print(score_dic)
# print(c)
# print(score_dic[c])
try:
score_norm = np.append(score_norm, score_dic[c]) #TODO round or not
# import pdb; pdb.set_trace()
# auto_score = round(score_dic[c])
# score_norm = np.append(score_norm, auto_score)
score_human = np.append(score_human, (s/5))
record = np.append(record, c)
dic_human[method_name].append(s/5) # length 150
dic_auto[method_name].append(score_dic[c]) #
dic_score_category[method_name].append(score_dic[c])
dic_human_avg[c].append(s/5)
dic_auto_avg[c].append(score_dic[c]) #TODO round or not
# dic_auto_avg[c].append(auto_score)
except: # not for gt
continue
# import pdb; pdb.set_trace()
for k, v in dic_human_avg.items():
score_human_avg = np.append(score_human_avg, np.average(dic_human_avg[k]))
score_norm_avg = np.append(score_norm_avg, np.average(dic_auto_avg[k]))
Pearson_correlation_coefficient_all = compute_correlation(score_human,score_norm) #TODO change score_human, score_norm
print("Pearson correlation coefficient all:",f'{Pearson_correlation_coefficient_all: .4}')
score_diff = np.empty(0)
# Normalize the scores to the range [0, 1], and pick the larger different score
score_norm_norm = min_max_normalize(score_norm)
score_human_norm = min_max_normalize(score_human)
# Calculate the threshold
threshold = np.mean(np.abs(score_human_norm - score_norm_norm))
# Iterate through the scores and apply the condition
for i in range(len(score_norm)):
score_diff = np.append(score_diff, score_human_norm[i] - score_norm_norm[i])
if abs(score_norm_norm[i] - score_human_norm[i]) > threshold * 2:
score_diff = np.append(score_diff, [score_norm[i], score_human[i]])
print("Not aligned:", i, "score ori:", score_norm[i], "score from AMT", score_human[i])
print("##############################")
# score_diff = np.reshape(score_diff,(num_images,3))
# best_index = np.argmin(score_diff, axis=1)
# #score_diff_best = np.choose(best_index,score_diff.T)
# score_norm_reshape = np.reshape(score_norm,(num_images,3))
# score_norm_best = np.choose(best_index,score_norm_reshape.T)
# score_human_reshape = np.reshape(score_human,(num_images,3))
# score_human_best = np.choose(best_index,score_human_reshape.T) * 5 #best human score
# Pearson_correlation_coefficient_abs_diff = compute_correlation(score_human_best,score_norm_best)
# print("Pearson correlation coefficient with min abs diff:",f'{Pearson_correlation_coefficient_abs_diff: .4}')
# Pearson_correlation_coefficient_search_max,selected_human_index,selected_human_score = compute_max_correlation(score_norm_reshape, score_human_reshape) #(350,3)
# print("Pearson correlation coefficient with search max:",f'{Pearson_correlation_coefficient_search_max: .4}')
# score_norm_average = np.average(score_norm_reshape, axis=1)
# score_human_average = np.average(score_human_reshape, axis=1)
Pearson_correlation_coefficient_all = compute_correlation(score_norm, score_human)
print("Pearson correlation coefficient all:",f'{Pearson_correlation_coefficient_all: .4}')
Pearson_correlation_coefficient_avg = compute_correlation(score_norm_avg, score_human_avg)
print("Pearson correlation coefficient avg:",f'{Pearson_correlation_coefficient_avg: .4}')
kendalltau_all,p_value_all = scipy.stats.kendalltau(score_human, score_norm)
print("kendall's Tau_all: ", f'{kendalltau_all: .4}', " p_value_all: ", f'{p_value_all: .4}')
# kendalltau_abs_diff,p_value_abs_diff = scipy.stats.kendalltau(score_human_best,score_norm_best)
# print("kendall's Tau with abs diff: ", f'{kendalltau_abs_diff: .4}', " p_value: ", f'{p_value_abs_diff: .4}')
#kendalltau_search_max = scipy.stats.kendalltau(x, y)
kendalltau_avg, p_value_avg = scipy.stats.kendalltau(score_human_avg, score_norm_avg)
print("kendall's Tau avg: ", f'{kendalltau_avg: .4}', " p_value: ", f'{p_value_avg: .4}')
spearmanr_all = scipy.stats.spearmanr(score_human,score_norm)
print("spearmanr's rho all", f'{spearmanr_all.statistic: .4}', f'{spearmanr_all.pvalue: .4}')
# spearmanr_abs_diff = scipy.stats.spearmanr(score_human_best, score_norm_best)
# print("spearmanr's rho with abs diff", f'{spearmanr_abs_diff.statistic: .4}', f'{spearmanr_abs_diff.pvalue: .4}')
spearmanr_avg = scipy.stats.spearmanr(score_human_avg, score_norm_avg)
print("spearmanr's rho avg", f'{spearmanr_avg.statistic: .4}', f'{spearmanr_avg.pvalue: .4}')
#auto avg per model
print("##############################")
# import pdb; pdb.set_trace()
for i in range(len(method)):
auto_avg = np.average(dic_auto[method[i]])
human_avg = np.average(dic_human[method[i]])
print(method[i], " auto_avg: ", f'{auto_avg: .4}', " human_avg:", f'{human_avg: .4}'," human eval length / method", len(dic_human[method[i]]))
# for i in range (len(method)):
# #corr within per model
# mPearson_all = compute_correlation(dic_human[method[i]], dic_score_category[method[i]])
# mkendalltau_all, mp_value_all = scipy.stats.kendalltau(dic_human[method[i]], dic_score_category[method[i]])
# mspearmanr_all = scipy.stats.spearmanr(dic_human[method[i]], dic_score_category[method[i]])
# m_human_reshape = np.reshape(dic_human[method[i]], (50, 3))
# m_auto_reshape = np.reshape(dic_score_category[method[i]], (50, 3))
# m_human_average = np.average(m_human_reshape, axis=1)
# m_auto_average = np.average(m_auto_reshape, axis=1)
# mPearson_average = compute_correlation(m_human_average, m_auto_average)
# mkendalltau_avg, mp_value_avg = scipy.stats.kendalltau(m_human_average, m_auto_average)
# mspearmanr_avg = scipy.stats.spearmanr(m_human_average, m_auto_average)
# print(method[i])
# print(" Pearson all:", f'{mPearson_all: .4}', " Pearson avg", f'{mPearson_average: .4}')
# print(" kendall all:", f'{mkendalltau_all: .4}', " p value", f'{mp_value_all: .4}',end=" ")
# print(" spearman all:", f'{mspearmanr_all.statistic: .4}', f'{mspearmanr_all.pvalue: .4}')
# print(" kendall avg:", f'{mkendalltau_avg: .4}', " p value", f'{mp_value_avg: .4}',end=" ")
# print(" spearman avg:", f'{mspearmanr_avg.statistic: .4}',f'{mspearmanr_avg.pvalue: .4}')