File size: 9,059 Bytes
ba9f51f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 | #%%
import pandas as pd
import pandas as pd
data = pd.read_csv("D:\\NN\\Data\\Study1AllUsers\\Cleaned_TrialResultsFull.csv")
# 分组并计算均值
grouped_data = data.groupby(['ParticipantID']).agg({
'AngularDistanceHMD': 'mean',
'AngularDistanceHand': 'mean',
'AngularDistanceLeye': 'mean'
}).reset_index()
print(grouped_data)
output_path = 'D:\\NN\\Data\\Study1AllUsers\\ModalityAnalyse.csv' # 替换为你的输出文件路径
grouped_data.to_csv(output_path)
# # 创建一个唯一的条件列,将 Depth, Theta, Width, Position 的组合转为单一标识符
# grouped_data['Condition'] = grouped_data['Depth'].astype(str) + '_' + grouped_data['Theta'].astype(str) + '_' + grouped_data['Width'].astype(str) + '_' + grouped_data['Position'].astype(str)
# # 转换数据为宽格式
# wide_data = grouped_data.pivot_table(index='ParticipantID',
# columns='Condition',
# values=['MovementTime', 'AngularDistanceHMD', 'AngularDistanceHand', 'AngularDistanceLeye'])
#
# # 为了更好地兼容性,重命名列
# wide_data.columns = ['_'.join(col).strip() for col in wide_data.columns.values]
#
# # 输出查看转换后的数据
# print(wide_data.head())
#
# # 保存为CSV文件,以便于导入SPSS
# output_path = 'path_to_your_output_file.csv' # 替换为你的输出文件路径
# wide_data.to_csv(output_path)
#%%
import pandas as pd
import numpy as np
from statsmodels.stats.correlation_tools import cov_nearest
from scipy.stats import chi2
# Load your data
data = pd.read_csv("D:\\NN\\Data\\Study1AllUsers\\Cleaned_TrialResultsFull.csv")
# data['Depth'] = data['Depth'].astype(str)
# data['Theta'] = data['Theta'].astype(str)
# data['Width'] = data['Width'].astype(str)
# data['Position'] = data['Position'].astype(str)
# columns = ['ParticipantID', 'BlockID', 'TrialID', 'MovementTime', 'Depth', 'Theta', 'Width','Position']
columns = ['ParticipantID', 'BlockID', 'TrialID', 'MovementTime','AngularDistanceHMD','AngularDistanceHand','AngularDistanceLeye', 'Depth', 'Theta', 'Width','Position']
# Aggregating data for each user under each condition
data= data[columns]
grouped_data = data.groupby(['ParticipantID', 'Depth', 'Theta', 'Width','Position']).agg({
'AngularDistanceLeye': 'mean'
}).reset_index()
# 创建一个唯一的条件列,将 Depth, Theta, Width, Position 的组合转为单一标识符
grouped_data['Condition'] = grouped_data['Depth'].astype(str) + '_' + grouped_data['Theta'].astype(str) + '_' + grouped_data['Width'].astype(str)+ '_' + grouped_data['Position'].astype(str)
# 转换数据为宽格式
wide_data = grouped_data.pivot_table(index='ParticipantID',
columns='Condition',
values=['AngularDistanceLeye'])
# 为了更好地兼容性,重命名列
wide_data.columns = ['_'.join(col).strip() for col in wide_data.columns.values]
# 输出查看转换后的数据
print(wide_data.head())
output_path = 'D:\\NN\\Data\\Study1AllUsers\\EyeDistance.csv' # 替换为你的输出文件路径
wide_data.to_csv(output_path)
# output_path = 'D:\\NN\\Data\\Study1AllUsers\\Cleaned_TrialResultsFull1.csv' # 替换为你的输出文件路径
# grouped_data.to_csv(output_path, index=False)
# grouped_data = data.groupby(['ParticipantID', 'Depth', 'Theta', 'Width', 'Position']).agg({
# 'MovementTime': 'mean',
# 'AngularDistanceHMD': 'mean',
# 'AngularDistanceHand': 'mean',
# 'AngularDistanceLeye': 'mean'
# }).reset_index()
# #%%
# import pandas as pd
# import numpy as np
# from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
#
# # Correcting the data based on the user's indication
# data_corrected = {
# "Theta": [10, 10, 15, 15, 20, 20, 25, 25, 50, 50, 75, 75],
# "Width": [4.5, 9.0, 4.5, 9.0, 4.5, 9.0, 4.5, 9.0, 4.5, 9.0, 4.5, 9.0],
# "Mean": [704.222126, 508.689598, 797.962563, 560.906088, 904.062486, 646.458888,
# 1183.485047, 796.196496, 1464.353523, 1034.035743, 1728.876132, 1266.901965]
# }
#
#
# model = LinearRegression()
#
# df_corrected = pd.DataFrame(data_corrected)
#
# # Compute the index of difficulty again
# df_corrected['ID'] = np.log2(df_corrected['Theta'] / df_corrected['Width'] + 1)
#
# # Re-run linear regression
# model.fit(df_corrected[['ID']], df_corrected['Mean'])
#
# # Predict values using the fitted model
# df_corrected['Predicted'] = model.predict(df_corrected[['ID']])
# a_corrected = model.intercept_
# b_corrected = model.coef_[0]
#
# # Recalculate R-squared value
# r_squared_corrected = model.score(df_corrected[['ID']], df_corrected['Mean'])
#
# # Plotting the corrected data
# plt.figure(figsize=(16, 9))
# plt.scatter(df_corrected['ID'], df_corrected['Mean'], color='blue', label='Observed Data')
# plt.plot(df_corrected['ID'], df_corrected['Predicted'], color='darkblue', linestyle='dashed', label='Fitted Line')
#
# plt.xlabel('Index of Difficulty (bits)')
# plt.ylabel('Movement Time (ms)')
# plt.grid(True)
# plt.legend()
# plt.text(3.5, 1100, f'R² = {r_squared_corrected:.4f}', fontsize=12)
#
# plt.show(), (a_corrected, b_corrected, r_squared_corrected)
import pandas as pd
#%%
import pandas as pd
from statsmodels.stats.anova import AnovaRM
import statsmodels.api as sm
# 加载数据
# 读取数据
data = pd.read_csv("D:\\NN\\Data\\Study1AllUsers\\Cleaned_TrialResultsFull.csv")
# 确保分类变量为字符串格式
data['Depth'] = data['Depth'].astype(str)
data['Theta'] = data['Theta'].astype(str)
data['Width'] = data['Width'].astype(str)
# 筛选掉特定参与者的数据
filtered_data = data[~data['ParticipantID'].isin([3, 6, 15, 19, 18, 20, 22])]
# 重新进行数据聚合
filtered_aggregated_data = filtered_data.groupby(['ParticipantID', 'Depth', 'Theta', 'Width']).mean().reset_index()
print(filtered_aggregated_data)
# 执行重复测量ANOVA,并应用Greenhouse-Geisser校正
rm_anova_results = AnovaRM(filtered_aggregated_data, 'MovementTime', 'ParticipantID',
within=['Depth', 'Theta', 'Width'])
# 打印ANOVA结果摘要
print(rm_anova_results.summary())
# 计算eta squared
anova_table = rm_anova_results.anova_table
anova_table['eta_squared'] = (anova_table['F Value'] * anova_table['Num DF']) / \
(anova_table['F Value'] * anova_table['Num DF'] + anova_table['Den DF'])
# 打印带有eta squared的结果表格
print(anova_table[['F Value', 'Pr > F', 'eta_squared']])
#%%
from statsmodels.stats.multicomp import pairwise_tukeyhsd
# Prepare the data for Tukey HSD tests
tukey_data = filtered_aggregated_data[['Theta', 'Width', 'MovementTime']]
# Perform Tukey HSD test for Theta
tukey_result_theta = pairwise_tukeyhsd(endog=tukey_data['MovementTime'], groups=tukey_data['Theta'], alpha=0.05)
# Perform Tukey HSD test for Width
tukey_result_width = pairwise_tukeyhsd(endog=tukey_data['MovementTime'], groups=tukey_data['Width'], alpha=0.05)
tukey_result_theta.summary(), tukey_result_width.summary()
#%%
print(tukey_result_theta.summary())
print(tukey_result_width.summary())
#%%
for width_level in filtered_aggregated_data['Width'].unique():
subset = filtered_aggregated_data[filtered_aggregated_data['Width'] == width_level]
print(f'Tukey HSD for Width {width_level}:')
print(pairwise_tukeyhsd(subset['MovementTime'], subset['Theta'], alpha=0.05).summary())
# 遍历每个Theta水平
for theta_level in filtered_aggregated_data['Theta'].unique():
subset = filtered_aggregated_data[filtered_aggregated_data['Theta'] == theta_level]
print(f'Tukey HSD for Theta {theta_level}:')
print(pairwise_tukeyhsd(subset['MovementTime'], subset['Width'], alpha=0.05).summary())
#%%
import pandas as pd
from statsmodels.stats.anova import AnovaRM
# 加载数据
data = pd.read_csv("D:\\NN\\Data\\Study1AllUsers\\TrialResultsFull.csv")
# 选择需要分析的列,并确保分类变量为字符串格式
data['Depth'] = data['Depth'].astype(str)
data['Theta'] = data['Theta'].astype(str)
data['Width'] = data['Width'].astype(str)
# 筛选掉特定参与者的数据
filtered_data = data[~data['ParticipantID'].isin([3, 6, 15, 19, 18, 20, 22])]
filtered_aggregated_data = filtered_data.groupby(['ParticipantID', 'Depth', 'Theta', 'Width', 'Position']).mean().reset_index()
print(filtered_aggregated_data)
# 执行重复测量ANOVA
rm_anova_results = AnovaRM(filtered_aggregated_data, 'AngularDistanceHand', 'ParticipantID', within=['Depth', 'Theta', 'Width', 'Position']).fit()
print(rm_anova_results.summary())
anova_table = rm_anova_results.anova_table
anova_table['eta_squared'] = (anova_table['F Value'] * anova_table['Num DF']) / \
(anova_table['F Value'] * anova_table['Num DF'] + anova_table['Den DF'])
print(anova_table[['F Value', 'Pr > F', 'eta_squared']]) |