Gilfoyle727's picture
Add files using upload-large-folder tool
ba9f51f verified
import pandas as pd
import argparse
import os
import numpy as np
from scipy.ndimage import gaussian_filter1d
from sklearn.preprocessing import MinMaxScaler
#这个文件是用来在原始文件的基础上删除一些Trial,进行特征工程,然后再打上标签
def angle_between_vectors(v1, v2):
"""Calculate the angle between two vectors."""
v1 = v1 / np.linalg.norm(v1)
v2 = v2 / np.linalg.norm(v2)
u_minus_v = v1 - v2
u_plus_v = v1 + v2
angle = 2 * np.arctan2(np.linalg.norm(u_minus_v), np.linalg.norm(u_plus_v))
return np.degrees(angle)
def smooth_velocity(velocity, sigma=5):
return gaussian_filter1d(velocity, sigma=sigma)
def calculate_angular_velocity(forward_vectors, timestamps):
"""计算角速度"""
angles = np.array(
[angle_between_vectors(forward_vectors[i], forward_vectors[i + 1]) if i + 1 < len(forward_vectors) else 0 for i
in range(len(forward_vectors) - 1)]) # 注意这里的变化,排除了最后一个向量的计算
# 计算时间间隔
times = np.diff(timestamps)/1000
# 计算角速度,对于第一个时间点,将角度设置为0
if len(times) > 0: # 检查times是否非空,避免除以空数组
angular_velocities = np.insert(angles / times, 0, 0) # 在结果数组的开始插入0
else:
angular_velocities = np.array([0]) # 如果times为空,说明只有一个时间点,无法计算速度
return angular_velocities
def calculate_linear_velocity(positions, timestamps):
"""计算线性速度"""
distances = np.linalg.norm(np.diff(positions, axis=0), axis=1)
times = np.diff(timestamps)/1000
linear_velocities = np.insert(distances / times, 0,0) # Insert 0 at the start as there's no velocity at the first timestamp
return linear_velocities
def calculate_direction(start_position, current_position):
"""计算方向"""
direction = current_position - start_position
norm = np.linalg.norm(direction)
return direction / norm if norm != 0 else np.zeros_like(direction)
def calculate_acceleration(velocities, timestamps):
"""根据速度计算加速度"""
accels = [0] # 第一个时间点的加速度假设为0
for i in range(1, len(velocities)):
accel = (velocities[i] - velocities[i-1]) / ((timestamps[i] - timestamps[i-1])/1000)
accels.append(accel)
return np.array(accels)
## 特征工程,先加上三个模态的角速度,和旋转角,再加上手的线性速度和移动距离
def process_single_sequence(df):
"""处理groupBy之后的单个序列DataFrame,计算所有模态的角速度、线性速度、方向和旋转轴"""
# 提取时间戳
timestamps = df['TimeStamp'].values
# 定义所有需要计算的模态
modalities = ['HMD', 'Hand', 'Leye']
for modality in modalities:
# 计算角速度和旋转轴
if all(f'{modality}ForwardV{i}' in df.columns for i in ['X', 'Y', 'Z']):
forward_vectors = df[[f'{modality}ForwardVX', f'{modality}ForwardVY', f'{modality}ForwardVZ']].values
initial_forward_vector = forward_vectors[0]
df[f'{modality}A'] = [(angle_between_vectors(initial_forward_vector, fv)) for fv in forward_vectors]
angular_velocity = calculate_angular_velocity(forward_vectors, timestamps)
smoothed_angular_velocity = smooth_velocity(angular_velocity)
df[f'{modality}AV'] = smoothed_angular_velocity
df[f'{modality}AAcc'] = calculate_acceleration(smoothed_angular_velocity, timestamps)
# 计算旋转轴并进行标准化以仅保留方向信息
if modality == 'Hand':
initial_forward_vector = forward_vectors[0]
rotation_axes = np.array([np.cross(initial_forward_vector, fv) for fv in forward_vectors])
rotation_axes_normalized = np.array(
[axis / np.linalg.norm(axis) if np.linalg.norm(axis) != 0 else np.array([0.0, 0.0, 0.0]) for axis in
rotation_axes]
)
df[f'{modality}RotationAxis_X'] = rotation_axes_normalized[:, 0]
df[f'{modality}RotationAxis_Y'] = rotation_axes_normalized[:, 1]
df[f'{modality}RotationAxis_Z'] = rotation_axes_normalized[:, 2]
# 对于HMD和Hand,还需计算线性速度和方向还有移动距离
if modality in ['HMD', 'Hand'] and all(f'{modality}Position{i}' in df.columns for i in ['X', 'Y', 'Z']):
positions = df[[f'{modality}PositionX', f'{modality}PositionY', f'{modality}PositionZ']].values
initial_position = positions[0]
linear_velocity = calculate_linear_velocity(positions, timestamps)
df[f'{modality}L'] = [np.linalg.norm(pos-initial_position) for pos in positions]
smoothed_velocity = smooth_velocity(linear_velocity)
df[f'{modality}LV'] = smoothed_velocity
df[f'{modality}LAcc'] = calculate_acceleration(smoothed_velocity, timestamps) # 新特性:加速度
# 计算方向
if modality == 'Hand':
start_position = positions[0]
directions = np.array([calculate_direction(start_position, pos) for pos in positions])
df[f'{modality}Direction_X'] = directions[:, 0]
df[f'{modality}Direction_Y'] = directions[:, 1]
df[f'{modality}Direction_Z'] = directions[:, 2]
return df
def label_trials_with_motion_metrics(df):
# Define a helper function for labeling each group
def label_group(group):
# For the 'HandLinearDistance' and 'HandAngularDistance', take the last value in the group as it represents the total
total_linear_distance = group['HandL'].iloc[-1]
total_angular_distance = group['HandA'].iloc[-1]
# Assign these totals to new columns for every row in the group
group['LLabel'] = total_linear_distance
group['ALabel'] = total_angular_distance
return group
# Apply the labeling function to each trial group
df_labeled = df.groupby(['ParticipantID', 'BlockID', 'TrialID'], group_keys=False).apply(label_group)
return df_labeled
def split_data_by_theta_grouped(df):
grouped = df.groupby(['ParticipantID', 'BlockID', 'TrialID'])
theta_groups = {}
# 将每个组添加到对应Theta值的列表中
for _, group in grouped:
theta_value = group['Theta'].iloc[0]
if theta_value not in theta_groups:
theta_groups[theta_value] = []
theta_groups[theta_value].append(group)
train_dfs = []
test_dfs = []
for theta_value, groups in theta_groups.items():
# 计算训练集大小
n_train = int(len(groups) * 0.8)
# 随机选择训练集序列
np.random.seed(1) # 确保可重复性
train_indices = np.random.choice(len(groups), size=n_train, replace=False)
train_groups = [groups[i] for i in train_indices]
# 选择测试集序列
test_groups = [groups[i] for i in range(len(groups)) if i not in train_indices]
# 将训练集和测试集序列合并
train_df = pd.concat(train_groups)
test_df = pd.concat(test_groups)
train_dfs.append(train_df)
test_dfs.append(test_df)
# 合并所有训练集和测试集的DataFrame
final_train_df = pd.concat(train_dfs).sort_index()
final_test_df = pd.concat(test_dfs).sort_index()
return final_train_df, final_test_df
def split_data_by_theta_grouped(df):
grouped = df.groupby(['ParticipantID', 'BlockID', 'TrialID'])
theta_groups = {}
# 将每个组添加到对应Theta值的列表中
for _, group in grouped:
theta_value = group['Theta'].iloc[0]
if theta_value not in theta_groups:
theta_groups[theta_value] = []
theta_groups[theta_value].append(group)
train_dfs = []
test_dfs = []
for theta_value, groups in theta_groups.items():
# 计算训练集大小
n_train = int(len(groups) * 0.8)
# 随机选择训练集序列
np.random.seed(1) # 确保可重复性
train_indices = np.random.choice(len(groups), size=n_train, replace=False)
train_groups = [groups[i] for i in train_indices]
# 选择测试集序列
test_groups = [groups[i] for i in range(len(groups)) if i not in train_indices]
# 将训练集和测试集序列合并
train_df = pd.concat(train_groups)
test_df = pd.concat(test_groups)
train_dfs.append(train_df)
test_dfs.append(test_df)
# 合并所有训练集和测试集的DataFrame
final_train_df = pd.concat(train_dfs).sort_index()
final_test_df = pd.concat(test_dfs).sort_index()
return final_train_df, final_test_df
# 定义一个函数,将特征缩放到指定的范围内
def preprocess_features(train_df, test_df):
# 定义包含负值的特征列和其他特征列
negative_value_features = ['HandAAcc', 'HMDAAcc',"LeyeAAcc","HandLAcc","HMDLAcc"]
other_features = ["HMDA", "HMDAV", "HandA", "HandAV", "LeyeA", "LeyeAV","HMDL", "HMDLV","HandL", "HandLV"]
# 为包含负值的特征和其他特征创建两个不同的缩放器
scaler_negatives = MinMaxScaler(feature_range=(-1, 1))
scaler_others = MinMaxScaler(feature_range=(0, 1))
# 对训练集应用fit_transform,对测试集应用transform
train_df[negative_value_features] = scaler_negatives.fit_transform(train_df[negative_value_features])
test_df[negative_value_features] = scaler_negatives.transform(test_df[negative_value_features])
train_df[other_features] = scaler_others.fit_transform(train_df[other_features])
test_df[other_features] = scaler_others.transform(test_df[other_features])
return train_df, test_df
#这个function用于修改Evaluation的数据集
def main(Participant_ID):
# 读入数据
input_file_path = f'../Data/Study2Evaluation/{Participant_ID}_Trajectory.csv'
output_train_path = f'../Data/Study2Evaluation/Preprocessed/{Participant_ID}_train_data_preprocessed_evaluation.csv'
output_test_path = f'../Data/Study2Evaluation/Preprocessed/{Participant_ID}_test_data_preprocessed_evaluation.csv'
data=pd.read_csv(input_file_path)
data_cleaned = data.loc[:, ~data.columns.str.contains('^Unnamed')]
data_no_error = data_cleaned[data_cleaned['isError'] == False]
final_data = data_no_error[(data_no_error['TrialID'] != 0) & (data_no_error['TrialID'] != 6) & (data_no_error['TrialID'] != 12)
& (data_no_error['TrialID'] != 18) & (data_no_error['TrialID'] != 24) & (data_no_error['TrialID'] != 30) & (data_no_error['TrialID'] != 36)]
# 特征工程
processed_groups = final_data.groupby(['ParticipantID','BlockID', 'TrialID']).apply(process_single_sequence)
processed_df = processed_groups.reset_index(drop=True)
# 为处理完的数据添加标签
df_with_features_labelled = label_trials_with_motion_metrics(processed_df.copy())
final_train_df, final_test_df = split_data_by_theta_grouped(df_with_features_labelled)
# 特征预处理
final_train_df_preprocessed, final_test_df_preprocessed = preprocess_features(final_train_df.copy(), final_test_df.copy())
# 保存处理后的数据集到CSV文件
final_train_df_preprocessed.to_csv(output_train_path, index=False)
final_test_df_preprocessed.to_csv(output_test_path, index=False)
# def main(Participant_ID):
# # 读入数据
# input_file_path = f'../Data/{Participant_ID}_Trajectory.csv'
# output_train_path = f'../Data/Study1AllUSers/Preprocessed/{Participant_ID}_train_data_preprocessed.csv'
# output_test_path = f'../Data/Study1AllUSers/Preprocessed/{Participant_ID}_test_data_preprocessed.csv'
# data = pd. read_csv(input_file_path)
# participant_id = int(os.path.basename(input_file_path).split('_')[0])
# data.insert(0, 'ParticipantID', participant_id)
# data_cleaned = data.loc[:, ~data.columns.str.contains('^Unnamed')]
# data_no_error = data_cleaned[data_cleaned['isError'] == False]
# cleaned_data_path = '../Data/cleaned_data_trimmed.xlsx'
# cleaned_data = pd.read_excel(cleaned_data_path)
# filtered_data = pd.merge(data_no_error, cleaned_data, on=['ParticipantID', 'BlockID', 'TrialID'], how='inner')
# final_data = filtered_data[(filtered_data['TrialID'] != 0) & (filtered_data['TrialID'] != 8) & (filtered_data['TrialID'] != 16) & (filtered_data['TrialID'] != 24)]
# # 特征工程
# processed_groups = final_data.groupby(['ParticipantID','BlockID', 'TrialID']).apply(process_single_sequence)
# processed_df = processed_groups.reset_index(drop=True)
# # 为处理完的数据添加标签
# df_with_features_labelled = label_trials_with_motion_metrics(processed_df.copy())
# final_train_df, final_test_df = split_data_by_theta_grouped(df_with_features_labelled)
# # 特征预处理
# final_train_df_preprocessed, final_test_df_preprocessed = preprocess_features(final_train_df.copy(), final_test_df.copy())
# # 保存处理后的数据集到CSV文件
# final_train_df_preprocessed.to_csv(output_train_path, index=False)
# final_test_df_preprocessed.to_csv(output_test_path, index=False)
if __name__ == '__main__':
for i in range(79, 80):
main(str(i))