Gilfoyle727's picture
Add files using upload-large-folder tool
ba9f51f verified
#%%
import pandas as pd
import pandas as pd
data = pd.read_csv("D:\\NN\\Data\\Study1AllUsers\\Cleaned_TrialResultsFull.csv")
# 分组并计算均值
grouped_data = data.groupby(['ParticipantID']).agg({
'AngularDistanceHMD': 'mean',
'AngularDistanceHand': 'mean',
'AngularDistanceLeye': 'mean'
}).reset_index()
print(grouped_data)
output_path = 'D:\\NN\\Data\\Study1AllUsers\\ModalityAnalyse.csv' # 替换为你的输出文件路径
grouped_data.to_csv(output_path)
# # 创建一个唯一的条件列,将 Depth, Theta, Width, Position 的组合转为单一标识符
# grouped_data['Condition'] = grouped_data['Depth'].astype(str) + '_' + grouped_data['Theta'].astype(str) + '_' + grouped_data['Width'].astype(str) + '_' + grouped_data['Position'].astype(str)
# # 转换数据为宽格式
# wide_data = grouped_data.pivot_table(index='ParticipantID',
# columns='Condition',
# values=['MovementTime', 'AngularDistanceHMD', 'AngularDistanceHand', 'AngularDistanceLeye'])
#
# # 为了更好地兼容性,重命名列
# wide_data.columns = ['_'.join(col).strip() for col in wide_data.columns.values]
#
# # 输出查看转换后的数据
# print(wide_data.head())
#
# # 保存为CSV文件,以便于导入SPSS
# output_path = 'path_to_your_output_file.csv' # 替换为你的输出文件路径
# wide_data.to_csv(output_path)
#%%
import pandas as pd
import numpy as np
from statsmodels.stats.correlation_tools import cov_nearest
from scipy.stats import chi2
# Load your data
data = pd.read_csv("D:\\NN\\Data\\Study1AllUsers\\Cleaned_TrialResultsFull.csv")
# data['Depth'] = data['Depth'].astype(str)
# data['Theta'] = data['Theta'].astype(str)
# data['Width'] = data['Width'].astype(str)
# data['Position'] = data['Position'].astype(str)
# columns = ['ParticipantID', 'BlockID', 'TrialID', 'MovementTime', 'Depth', 'Theta', 'Width','Position']
columns = ['ParticipantID', 'BlockID', 'TrialID', 'MovementTime','AngularDistanceHMD','AngularDistanceHand','AngularDistanceLeye', 'Depth', 'Theta', 'Width','Position']
# Aggregating data for each user under each condition
data= data[columns]
grouped_data = data.groupby(['ParticipantID', 'Depth', 'Theta', 'Width','Position']).agg({
'AngularDistanceLeye': 'mean'
}).reset_index()
# 创建一个唯一的条件列,将 Depth, Theta, Width, Position 的组合转为单一标识符
grouped_data['Condition'] = grouped_data['Depth'].astype(str) + '_' + grouped_data['Theta'].astype(str) + '_' + grouped_data['Width'].astype(str)+ '_' + grouped_data['Position'].astype(str)
# 转换数据为宽格式
wide_data = grouped_data.pivot_table(index='ParticipantID',
columns='Condition',
values=['AngularDistanceLeye'])
# 为了更好地兼容性,重命名列
wide_data.columns = ['_'.join(col).strip() for col in wide_data.columns.values]
# 输出查看转换后的数据
print(wide_data.head())
output_path = 'D:\\NN\\Data\\Study1AllUsers\\EyeDistance.csv' # 替换为你的输出文件路径
wide_data.to_csv(output_path)
# output_path = 'D:\\NN\\Data\\Study1AllUsers\\Cleaned_TrialResultsFull1.csv' # 替换为你的输出文件路径
# grouped_data.to_csv(output_path, index=False)
# grouped_data = data.groupby(['ParticipantID', 'Depth', 'Theta', 'Width', 'Position']).agg({
# 'MovementTime': 'mean',
# 'AngularDistanceHMD': 'mean',
# 'AngularDistanceHand': 'mean',
# 'AngularDistanceLeye': 'mean'
# }).reset_index()
# #%%
# import pandas as pd
# import numpy as np
# from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
#
# # Correcting the data based on the user's indication
# data_corrected = {
# "Theta": [10, 10, 15, 15, 20, 20, 25, 25, 50, 50, 75, 75],
# "Width": [4.5, 9.0, 4.5, 9.0, 4.5, 9.0, 4.5, 9.0, 4.5, 9.0, 4.5, 9.0],
# "Mean": [704.222126, 508.689598, 797.962563, 560.906088, 904.062486, 646.458888,
# 1183.485047, 796.196496, 1464.353523, 1034.035743, 1728.876132, 1266.901965]
# }
#
#
# model = LinearRegression()
#
# df_corrected = pd.DataFrame(data_corrected)
#
# # Compute the index of difficulty again
# df_corrected['ID'] = np.log2(df_corrected['Theta'] / df_corrected['Width'] + 1)
#
# # Re-run linear regression
# model.fit(df_corrected[['ID']], df_corrected['Mean'])
#
# # Predict values using the fitted model
# df_corrected['Predicted'] = model.predict(df_corrected[['ID']])
# a_corrected = model.intercept_
# b_corrected = model.coef_[0]
#
# # Recalculate R-squared value
# r_squared_corrected = model.score(df_corrected[['ID']], df_corrected['Mean'])
#
# # Plotting the corrected data
# plt.figure(figsize=(16, 9))
# plt.scatter(df_corrected['ID'], df_corrected['Mean'], color='blue', label='Observed Data')
# plt.plot(df_corrected['ID'], df_corrected['Predicted'], color='darkblue', linestyle='dashed', label='Fitted Line')
#
# plt.xlabel('Index of Difficulty (bits)')
# plt.ylabel('Movement Time (ms)')
# plt.grid(True)
# plt.legend()
# plt.text(3.5, 1100, f'R² = {r_squared_corrected:.4f}', fontsize=12)
#
# plt.show(), (a_corrected, b_corrected, r_squared_corrected)
import pandas as pd
#%%
import pandas as pd
from statsmodels.stats.anova import AnovaRM
import statsmodels.api as sm
# 加载数据
# 读取数据
data = pd.read_csv("D:\\NN\\Data\\Study1AllUsers\\Cleaned_TrialResultsFull.csv")
# 确保分类变量为字符串格式
data['Depth'] = data['Depth'].astype(str)
data['Theta'] = data['Theta'].astype(str)
data['Width'] = data['Width'].astype(str)
# 筛选掉特定参与者的数据
filtered_data = data[~data['ParticipantID'].isin([3, 6, 15, 19, 18, 20, 22])]
# 重新进行数据聚合
filtered_aggregated_data = filtered_data.groupby(['ParticipantID', 'Depth', 'Theta', 'Width']).mean().reset_index()
print(filtered_aggregated_data)
# 执行重复测量ANOVA,并应用Greenhouse-Geisser校正
rm_anova_results = AnovaRM(filtered_aggregated_data, 'MovementTime', 'ParticipantID',
within=['Depth', 'Theta', 'Width'])
# 打印ANOVA结果摘要
print(rm_anova_results.summary())
# 计算eta squared
anova_table = rm_anova_results.anova_table
anova_table['eta_squared'] = (anova_table['F Value'] * anova_table['Num DF']) / \
(anova_table['F Value'] * anova_table['Num DF'] + anova_table['Den DF'])
# 打印带有eta squared的结果表格
print(anova_table[['F Value', 'Pr > F', 'eta_squared']])
#%%
from statsmodels.stats.multicomp import pairwise_tukeyhsd
# Prepare the data for Tukey HSD tests
tukey_data = filtered_aggregated_data[['Theta', 'Width', 'MovementTime']]
# Perform Tukey HSD test for Theta
tukey_result_theta = pairwise_tukeyhsd(endog=tukey_data['MovementTime'], groups=tukey_data['Theta'], alpha=0.05)
# Perform Tukey HSD test for Width
tukey_result_width = pairwise_tukeyhsd(endog=tukey_data['MovementTime'], groups=tukey_data['Width'], alpha=0.05)
tukey_result_theta.summary(), tukey_result_width.summary()
#%%
print(tukey_result_theta.summary())
print(tukey_result_width.summary())
#%%
for width_level in filtered_aggregated_data['Width'].unique():
subset = filtered_aggregated_data[filtered_aggregated_data['Width'] == width_level]
print(f'Tukey HSD for Width {width_level}:')
print(pairwise_tukeyhsd(subset['MovementTime'], subset['Theta'], alpha=0.05).summary())
# 遍历每个Theta水平
for theta_level in filtered_aggregated_data['Theta'].unique():
subset = filtered_aggregated_data[filtered_aggregated_data['Theta'] == theta_level]
print(f'Tukey HSD for Theta {theta_level}:')
print(pairwise_tukeyhsd(subset['MovementTime'], subset['Width'], alpha=0.05).summary())
#%%
import pandas as pd
from statsmodels.stats.anova import AnovaRM
# 加载数据
data = pd.read_csv("D:\\NN\\Data\\Study1AllUsers\\TrialResultsFull.csv")
# 选择需要分析的列,并确保分类变量为字符串格式
data['Depth'] = data['Depth'].astype(str)
data['Theta'] = data['Theta'].astype(str)
data['Width'] = data['Width'].astype(str)
# 筛选掉特定参与者的数据
filtered_data = data[~data['ParticipantID'].isin([3, 6, 15, 19, 18, 20, 22])]
filtered_aggregated_data = filtered_data.groupby(['ParticipantID', 'Depth', 'Theta', 'Width', 'Position']).mean().reset_index()
print(filtered_aggregated_data)
# 执行重复测量ANOVA
rm_anova_results = AnovaRM(filtered_aggregated_data, 'AngularDistanceHand', 'ParticipantID', within=['Depth', 'Theta', 'Width', 'Position']).fit()
print(rm_anova_results.summary())
anova_table = rm_anova_results.anova_table
anova_table['eta_squared'] = (anova_table['F Value'] * anova_table['Num DF']) / \
(anova_table['F Value'] * anova_table['Num DF'] + anova_table['Den DF'])
print(anova_table[['F Value', 'Pr > F', 'eta_squared']])