FraleyLabAttachmentBot / ChatAttachmentAnalysis.py
AjithKSenthil's picture
Upload ChatAttachmentAnalysis.py
840f6e0
raw
history blame
2.3 kB
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error
# Read your data file
datafile_path = "data/chat_transcripts_with_embeddings_and_scores.csv"
df = pd.read_csv(datafile_path)
# Convert embeddings to numpy arrays
df['embedding'] = df['embedding'].apply(lambda x: [float(num) for num in x.strip('[]').split(',')])
# Split the data into features (X) and labels (y)
X = list(df.embedding.values)
y = df[['avoide', 'avoida', 'avoidb', 'avoidc', 'avoidd', 'anxietye', 'anxietya', 'anxietyb', 'anxietyc', 'anxietyd']].values
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train your regression model
rfr = RandomForestRegressor(n_estimators=100)
rfr.fit(X_train, y_train)
# Make predictions on the test data
preds = rfr.predict(X_test)
# Evaluate your model
mse = mean_squared_error(y_test, preds)
mae = mean_absolute_error(y_test, preds)
print(f"Chat transcript embeddings performance: mse={mse:.2f}, mae={mae:.2f}")
# Mean Squared Error (MSE) is a measure of how close a fitted line is to data points.
# In the context of this task, a lower MSE means that our model's predicted attachment scores are closer to the true scores.
# An MSE of 1.32 suggests that the average squared difference between the predicted and actual scores is 1.32.
# Since our scores are normalized between 0 and 1, this error could be considered relatively high,
# meaning the model's predictions are somewhat off from the true values.
# Mean Absolute Error (MAE) is another measure of error in our predictions.
# It's the average absolute difference between the predicted and actual scores.
# An MAE of 0.96 suggests that, on average, our predicted attachment scores are off by 0.96 from the true scores.
# Considering that our scores are normalized between 0 and 1, this error is also quite high, indicating that
# the model's predictions are not very accurate.
# Both MSE and MAE are loss functions that we want to minimize. Lower values for both indicate better model performance.
# In general, the lower these values, the better the model's predictions are.