|
|
import pandas as pd |
|
|
import numpy as np |
|
|
from scipy.stats import spearmanr, kendalltau, pearsonr |
|
|
import os |
|
|
import argparse |
|
|
|
|
|
def read_txt_scores(file_path): |
|
|
"""读取txt文件中的分数""" |
|
|
scores = {} |
|
|
video_keys = set() |
|
|
video_scores_list = [] |
|
|
try: |
|
|
with open(file_path, 'r') as f: |
|
|
for line in f: |
|
|
parts = line.strip().split(',') |
|
|
if len(parts) == 2: |
|
|
video_name = parts[0] |
|
|
try: |
|
|
score = float(parts[1]) |
|
|
scores[video_name] = score |
|
|
video_keys.add(video_name) |
|
|
video_scores_list.append((video_name, score)) |
|
|
|
|
|
|
|
|
base_name = os.path.basename(video_name) |
|
|
if base_name.endswith('.mp4'): |
|
|
scores[base_name] = score |
|
|
video_keys.add(base_name) |
|
|
|
|
|
scores[base_name[:-4]] = score |
|
|
video_keys.add(base_name[:-4]) |
|
|
except ValueError: |
|
|
print(f"跳过无效分数: {parts[1]} for {video_name}") |
|
|
except Exception as e: |
|
|
print(f"读取txt文件时出错: {e}") |
|
|
|
|
|
print(f"从{file_path}读取了{len(video_keys)}个独特视频的分数") |
|
|
return scores, video_scores_list |
|
|
|
|
|
def extract_video_name(name): |
|
|
"""从视频路径中提取基本视频名,以便更好地匹配""" |
|
|
if isinstance(name, str): |
|
|
|
|
|
base_name = os.path.basename(name) |
|
|
|
|
|
if base_name.endswith('.mp4'): |
|
|
return base_name[:-4] |
|
|
return base_name |
|
|
return str(name) |
|
|
|
|
|
def calculate_correlations(txt_scores_list, xlsx_df): |
|
|
"""计算相关系数""" |
|
|
|
|
|
models = xlsx_df.columns.tolist() |
|
|
|
|
|
|
|
|
results = { |
|
|
'SRCC': {}, |
|
|
'KRCC': {}, |
|
|
'PLCC': {} |
|
|
} |
|
|
|
|
|
|
|
|
txt_scores = [score for _, score in txt_scores_list] |
|
|
|
|
|
|
|
|
print(f"TXT分数数量: {len(txt_scores)}") |
|
|
print(f"XLSX文件行数: {xlsx_df.shape[0]}") |
|
|
|
|
|
if len(txt_scores) != xlsx_df.shape[0]: |
|
|
print(f"警告: TXT分数数量({len(txt_scores)})与XLSX行数({xlsx_df.shape[0]})不匹配") |
|
|
|
|
|
min_count = min(len(txt_scores), xlsx_df.shape[0]) |
|
|
print(f"使用前{min_count}个数据点进行计算") |
|
|
txt_scores = txt_scores[:min_count] |
|
|
xlsx_df = xlsx_df.iloc[:min_count, :] |
|
|
|
|
|
|
|
|
for model in models: |
|
|
print(f"\n处理模型: {model}") |
|
|
|
|
|
model_series = xlsx_df[model] |
|
|
|
|
|
|
|
|
valid_indices = model_series.dropna().index |
|
|
model_scores = model_series.dropna().values.tolist() |
|
|
|
|
|
|
|
|
txt_model_scores = [txt_scores[i] for i in valid_indices if i < len(txt_scores)] |
|
|
|
|
|
valid_count = len(txt_model_scores) |
|
|
print(f"模型 {model}: 有效数据点数量 = {valid_count}") |
|
|
|
|
|
|
|
|
if valid_count > 1: |
|
|
|
|
|
srcc, p_srcc = spearmanr(txt_model_scores, model_scores) |
|
|
results['SRCC'][model] = srcc |
|
|
|
|
|
|
|
|
krcc, p_krcc = kendalltau(txt_model_scores, model_scores) |
|
|
results['KRCC'][model] = krcc |
|
|
|
|
|
|
|
|
plcc, p_plcc = pearsonr(txt_model_scores, model_scores) |
|
|
results['PLCC'][model] = plcc |
|
|
|
|
|
print(f" SRCC={srcc:.4f} (p={p_srcc:.4f}), KRCC={krcc:.4f} (p={p_krcc:.4f}), PLCC={plcc:.4f} (p={p_plcc:.4f})") |
|
|
|
|
|
|
|
|
print(f" 前5个数据点示例 (TXT分数 vs {model}分数):") |
|
|
for i in range(min(5, valid_count)): |
|
|
print(f" {txt_model_scores[i]:.2f} vs {model_scores[i]:.2f}") |
|
|
else: |
|
|
print(f"警告: 模型 {model} 没有足够的有效数据点进行相关性计算") |
|
|
results['SRCC'][model] = np.nan |
|
|
results['KRCC'][model] = np.nan |
|
|
results['PLCC'][model] = np.nan |
|
|
|
|
|
return results |
|
|
|
|
|
def main(): |
|
|
|
|
|
parser = argparse.ArgumentParser(description='计算TXT文件和XLSX文件之间的相关系数') |
|
|
parser.add_argument('--txt', type=str, default='text.txt', help='TXT文件路径') |
|
|
parser.add_argument('--xlsx', type=str, default='score.xlsx', help='XLSX文件路径') |
|
|
parser.add_argument('--output', type=str, default='correlation_results.csv', help='输出CSV文件路径') |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
txt_file = args.txt |
|
|
txt_scores, txt_scores_list = read_txt_scores(txt_file) |
|
|
|
|
|
|
|
|
unique_videos = set() |
|
|
for key in txt_scores.keys(): |
|
|
if '/' in key or '\\' in key: |
|
|
unique_videos.add(key) |
|
|
|
|
|
print(f"TXT文件中的唯一视频数量: {len(unique_videos)}") |
|
|
if abs(len(unique_videos) - 3857) > 10: |
|
|
print(f"警告: txt文件中的唯一视频数量({len(unique_videos)})与预期的3857相差较大") |
|
|
|
|
|
|
|
|
xlsx_file = args.xlsx |
|
|
try: |
|
|
|
|
|
xlsx_df = pd.read_excel(xlsx_file) |
|
|
print(f"从{xlsx_file}读取了{xlsx_df.shape[0]}行和{xlsx_df.shape[1]}列") |
|
|
|
|
|
|
|
|
if xlsx_df.shape[1] < 6: |
|
|
print(f"警告: xlsx文件应该有6列模型,但只发现了{xlsx_df.shape[1]}列") |
|
|
|
|
|
|
|
|
print("\n前5行数据预览:") |
|
|
print(xlsx_df.head()) |
|
|
|
|
|
|
|
|
all_columns = xlsx_df.columns.tolist() |
|
|
print(f"\n所有列名称: {all_columns}") |
|
|
|
|
|
|
|
|
print(f"发现{len(all_columns)}个模型列: {', '.join(all_columns)}") |
|
|
|
|
|
|
|
|
na_counts = xlsx_df.isna().sum() |
|
|
print("\n各模型缺失值数量:") |
|
|
for col in xlsx_df.columns: |
|
|
print(f" {col}: {na_counts[col]}") |
|
|
|
|
|
|
|
|
correlations = calculate_correlations(txt_scores_list, xlsx_df) |
|
|
|
|
|
|
|
|
result_df = pd.DataFrame({ |
|
|
'Model': [], |
|
|
'SRCC': [], |
|
|
'KRCC': [], |
|
|
'PLCC': [] |
|
|
}) |
|
|
|
|
|
|
|
|
print("\n=== 相关系数结果 ===") |
|
|
for model in correlations['SRCC'].keys(): |
|
|
srcc = correlations['SRCC'].get(model, np.nan) |
|
|
krcc = correlations['KRCC'].get(model, np.nan) |
|
|
plcc = correlations['PLCC'].get(model, np.nan) |
|
|
|
|
|
result_df = pd.concat([result_df, pd.DataFrame({ |
|
|
'Model': [model], |
|
|
'SRCC': [srcc], |
|
|
'KRCC': [krcc], |
|
|
'PLCC': [plcc] |
|
|
})], ignore_index=True) |
|
|
|
|
|
|
|
|
print(result_df.to_string(index=False, float_format=lambda x: f"{x:.4f}" if not np.isnan(x) else "NaN")) |
|
|
|
|
|
|
|
|
result_df.to_csv(args.output, index=False) |
|
|
print(f"\n结果已保存到 {args.output}") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"处理xlsx文件时出错: {e}") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
|
|
|
def process_all_txt_files(): |
|
|
"""处理所有TXT文件与xlsx文件的相关性""" |
|
|
txt_files = [f for f in os.listdir('.') if f.endswith('.txt')] |
|
|
xlsx_file = 'score.xlsx' |
|
|
|
|
|
if not os.path.exists(xlsx_file): |
|
|
print(f"错误: 找不到XLSX文件 {xlsx_file}") |
|
|
return |
|
|
|
|
|
all_results = {} |
|
|
|
|
|
for txt_file in txt_files: |
|
|
print(f"\n===== 处理文件: {txt_file} =====") |
|
|
try: |
|
|
|
|
|
import sys |
|
|
old_argv = sys.argv |
|
|
output_file = f"correlation_results_{os.path.splitext(txt_file)[0]}.csv" |
|
|
sys.argv = ['', f'--txt={txt_file}', f'--xlsx={xlsx_file}', f'--output={output_file}'] |
|
|
|
|
|
|
|
|
main() |
|
|
|
|
|
|
|
|
sys.argv = old_argv |
|
|
|
|
|
|
|
|
if os.path.exists(output_file): |
|
|
results = pd.read_csv(output_file) |
|
|
all_results[txt_file] = results |
|
|
print(f"已加载结果文件: {output_file}") |
|
|
else: |
|
|
print(f"警告: 未找到结果文件 {output_file}") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"处理文件 {txt_file} 时出错: {e}") |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
|
|
|
|
|
|
if all_results: |
|
|
print(f"\n合并 {len(all_results)} 个结果文件") |
|
|
combined_results = pd.DataFrame() |
|
|
for txt_file, results in all_results.items(): |
|
|
file_base = os.path.splitext(txt_file)[0] |
|
|
if 'Model' in results.columns: |
|
|
|
|
|
renamed_cols = {col: f'{col}_{file_base}' for col in results.columns if col != 'Model'} |
|
|
tmp_results = results.rename(columns=renamed_cols) |
|
|
|
|
|
if combined_results.empty: |
|
|
combined_results = tmp_results |
|
|
else: |
|
|
combined_results = pd.merge(combined_results, tmp_results, on='Model') |
|
|
|
|
|
|
|
|
if not combined_results.empty: |
|
|
combined_results.to_csv('all_correlation_results.csv', index=False) |
|
|
print("\n所有结果已合并保存到 all_correlation_results.csv") |
|
|
else: |
|
|
print("\n警告: 没有可合并的结果") |
|
|
else: |
|
|
print("\n警告: 没有可用的结果文件进行合并") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
txt_files = [f for f in os.listdir('.') if f.endswith('.txt')] |
|
|
if len(txt_files) > 1: |
|
|
print(f"检测到多个TXT文件: {txt_files}") |
|
|
process_all_txt_files() |
|
|
else: |
|
|
main() |