17data / VQA_model /matlab_model /correlation_analysis.py
Moyao001's picture
Add files using upload-large-folder tool
a19818c verified
import pandas as pd
import numpy as np
from scipy.stats import spearmanr, kendalltau, pearsonr
import os
import argparse
def read_txt_scores(file_path):
"""读取txt文件中的分数"""
scores = {}
video_keys = set()
video_scores_list = [] # 保持原始顺序的列表
try:
with open(file_path, 'r') as f:
for line in f:
parts = line.strip().split(',')
if len(parts) == 2:
video_name = parts[0]
try:
score = float(parts[1])
scores[video_name] = score
video_keys.add(video_name)
video_scores_list.append((video_name, score))
# 同时存储不带路径的视频名(用于匹配)
base_name = os.path.basename(video_name)
if base_name.endswith('.mp4'):
scores[base_name] = score
video_keys.add(base_name)
# 不带扩展名的版本
scores[base_name[:-4]] = score
video_keys.add(base_name[:-4])
except ValueError:
print(f"跳过无效分数: {parts[1]} for {video_name}")
except Exception as e:
print(f"读取txt文件时出错: {e}")
print(f"从{file_path}读取了{len(video_keys)}个独特视频的分数")
return scores, video_scores_list
def extract_video_name(name):
"""从视频路径中提取基本视频名,以便更好地匹配"""
if isinstance(name, str):
# 尝试不同的格式化方式
base_name = os.path.basename(name)
# 移除扩展名
if base_name.endswith('.mp4'):
return base_name[:-4]
return base_name
return str(name)
def calculate_correlations(txt_scores_list, xlsx_df):
"""计算相关系数"""
# 获取xlsx文件中的模型名称列表
models = xlsx_df.columns.tolist()
# 初始化结果字典
results = {
'SRCC': {},
'KRCC': {},
'PLCC': {}
}
# 创建一个仅包含txt分数的列表
txt_scores = [score for _, score in txt_scores_list]
# 检查数量是否匹配
print(f"TXT分数数量: {len(txt_scores)}")
print(f"XLSX文件行数: {xlsx_df.shape[0]}")
if len(txt_scores) != xlsx_df.shape[0]:
print(f"警告: TXT分数数量({len(txt_scores)})与XLSX行数({xlsx_df.shape[0]})不匹配")
# 如果不匹配,我们只使用最小的那个
min_count = min(len(txt_scores), xlsx_df.shape[0])
print(f"使用前{min_count}个数据点进行计算")
txt_scores = txt_scores[:min_count]
xlsx_df = xlsx_df.iloc[:min_count, :]
# 为每个模型计算相关系数
for model in models:
print(f"\n处理模型: {model}")
# 获取当前模型的所有有效分数
model_series = xlsx_df[model]
# 跳过NaN值
valid_indices = model_series.dropna().index
model_scores = model_series.dropna().values.tolist()
# 提取对应的txt分数
txt_model_scores = [txt_scores[i] for i in valid_indices if i < len(txt_scores)]
valid_count = len(txt_model_scores)
print(f"模型 {model}: 有效数据点数量 = {valid_count}")
# 检查是否有足够的有效数据点
if valid_count > 1: # 至少需要2个点来计算相关系数
# 计算SRCC - Spearman相关系数(秩相关)
srcc, p_srcc = spearmanr(txt_model_scores, model_scores)
results['SRCC'][model] = srcc
# 计算KRCC - Kendall相关系数(秩相关)
krcc, p_krcc = kendalltau(txt_model_scores, model_scores)
results['KRCC'][model] = krcc
# 计算PLCC - Pearson相关系数(线性相关)
plcc, p_plcc = pearsonr(txt_model_scores, model_scores)
results['PLCC'][model] = plcc
print(f" SRCC={srcc:.4f} (p={p_srcc:.4f}), KRCC={krcc:.4f} (p={p_krcc:.4f}), PLCC={plcc:.4f} (p={p_plcc:.4f})")
# 打印前几个数据点,帮助验证
print(f" 前5个数据点示例 (TXT分数 vs {model}分数):")
for i in range(min(5, valid_count)):
print(f" {txt_model_scores[i]:.2f} vs {model_scores[i]:.2f}")
else:
print(f"警告: 模型 {model} 没有足够的有效数据点进行相关性计算")
results['SRCC'][model] = np.nan
results['KRCC'][model] = np.nan
results['PLCC'][model] = np.nan
return results
def main():
# 设置命令行参数
parser = argparse.ArgumentParser(description='计算TXT文件和XLSX文件之间的相关系数')
parser.add_argument('--txt', type=str, default='text.txt', help='TXT文件路径')
parser.add_argument('--xlsx', type=str, default='score.xlsx', help='XLSX文件路径')
parser.add_argument('--output', type=str, default='correlation_results.csv', help='输出CSV文件路径')
args = parser.parse_args()
# 读取txt文件
txt_file = args.txt
txt_scores, txt_scores_list = read_txt_scores(txt_file)
# 检查是否接近3857
unique_videos = set()
for key in txt_scores.keys():
if '/' in key or '\\' in key:
unique_videos.add(key)
print(f"TXT文件中的唯一视频数量: {len(unique_videos)}")
if abs(len(unique_videos) - 3857) > 10:
print(f"警告: txt文件中的唯一视频数量({len(unique_videos)})与预期的3857相差较大")
# 读取xlsx文件
xlsx_file = args.xlsx
try:
# 读取xlsx文件,设置第一行为列名
xlsx_df = pd.read_excel(xlsx_file)
print(f"从{xlsx_file}读取了{xlsx_df.shape[0]}行和{xlsx_df.shape[1]}列")
# 检查xlsx文件格式
if xlsx_df.shape[1] < 6:
print(f"警告: xlsx文件应该有6列模型,但只发现了{xlsx_df.shape[1]}列")
# 前几行和列的预览
print("\n前5行数据预览:")
print(xlsx_df.head())
# 列名称列表
all_columns = xlsx_df.columns.tolist()
print(f"\n所有列名称: {all_columns}")
# 检查是否包含6个模型列
print(f"发现{len(all_columns)}个模型列: {', '.join(all_columns)}")
# 分析缺失值
na_counts = xlsx_df.isna().sum()
print("\n各模型缺失值数量:")
for col in xlsx_df.columns:
print(f" {col}: {na_counts[col]}")
# 计算相关系数
correlations = calculate_correlations(txt_scores_list, xlsx_df)
# 创建结果DataFrame
result_df = pd.DataFrame({
'Model': [],
'SRCC': [],
'KRCC': [],
'PLCC': []
})
# 显示结果
print("\n=== 相关系数结果 ===")
for model in correlations['SRCC'].keys():
srcc = correlations['SRCC'].get(model, np.nan)
krcc = correlations['KRCC'].get(model, np.nan)
plcc = correlations['PLCC'].get(model, np.nan)
result_df = pd.concat([result_df, pd.DataFrame({
'Model': [model],
'SRCC': [srcc],
'KRCC': [krcc],
'PLCC': [plcc]
})], ignore_index=True)
# 打印详细结果
print(result_df.to_string(index=False, float_format=lambda x: f"{x:.4f}" if not np.isnan(x) else "NaN"))
# 保存结果到CSV
result_df.to_csv(args.output, index=False)
print(f"\n结果已保存到 {args.output}")
except Exception as e:
print(f"处理xlsx文件时出错: {e}")
import traceback
traceback.print_exc()
def process_all_txt_files():
"""处理所有TXT文件与xlsx文件的相关性"""
txt_files = [f for f in os.listdir('.') if f.endswith('.txt')]
xlsx_file = 'score.xlsx'
if not os.path.exists(xlsx_file):
print(f"错误: 找不到XLSX文件 {xlsx_file}")
return
all_results = {}
for txt_file in txt_files:
print(f"\n===== 处理文件: {txt_file} =====")
try:
# 临时修改sys.argv以传递参数给main函数
import sys
old_argv = sys.argv
output_file = f"correlation_results_{os.path.splitext(txt_file)[0]}.csv"
sys.argv = ['', f'--txt={txt_file}', f'--xlsx={xlsx_file}', f'--output={output_file}']
# 运行主函数
main()
# 恢复sys.argv
sys.argv = old_argv
# 读取结果并合并
if os.path.exists(output_file):
results = pd.read_csv(output_file)
all_results[txt_file] = results
print(f"已加载结果文件: {output_file}")
else:
print(f"警告: 未找到结果文件 {output_file}")
except Exception as e:
print(f"处理文件 {txt_file} 时出错: {e}")
import traceback
traceback.print_exc()
# 合并所有结果
if all_results:
print(f"\n合并 {len(all_results)} 个结果文件")
combined_results = pd.DataFrame()
for txt_file, results in all_results.items():
file_base = os.path.splitext(txt_file)[0]
if 'Model' in results.columns:
# 重命名列以区分不同txt文件的结果
renamed_cols = {col: f'{col}_{file_base}' for col in results.columns if col != 'Model'}
tmp_results = results.rename(columns=renamed_cols)
if combined_results.empty:
combined_results = tmp_results
else:
combined_results = pd.merge(combined_results, tmp_results, on='Model')
# 保存合并结果
if not combined_results.empty:
combined_results.to_csv('all_correlation_results.csv', index=False)
print("\n所有结果已合并保存到 all_correlation_results.csv")
else:
print("\n警告: 没有可合并的结果")
else:
print("\n警告: 没有可用的结果文件进行合并")
if __name__ == "__main__":
# 检查是否存在多个txt文件
txt_files = [f for f in os.listdir('.') if f.endswith('.txt')]
if len(txt_files) > 1:
print(f"检测到多个TXT文件: {txt_files}")
process_all_txt_files()
else:
main()