{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "markdown", "source": [ "# Sample Code for Training Multiclass Classroom Management Classification Measures\n", "\n", "This notebook provides sample code for training multiclass classification measures using the classroom management datasets included in the collection. It is intended as a minimal example for researchers.\n", "\n", "## Notes\n", "This code uses the SimpleTransformers library, and the base model is RoBERTa. You can change the base model or experiment with hyperparameters here, though our paper [1] uses the defaults.\n", "\n", "\n", "## How to Run\n", "Download the \"talkmoves.csv\" file in the collection and process it so that you select the \"text\" and \"vs_submove\" columns. Rename the \"vs_submoves\" column \"labels\". You should have two columns, \"text\" and \"labels\". All other files and columns should be used with the binary classification notebook.\n", "\n", "## Author\n", "Mei Tan, EduNLP Lab @ Stanford University Graduate School of Education, 2024\n", "\n", "[1] Tan, Mei, and Dorottya Demszky. (2025). Do As I Say: What Teachers’ Language Reveals About Classroom\n", "Management Practices. (EdWorkingPaper: 23-844). Retrieved from Annenberg Institute at Brown University:\n", "https://doi.org/10.26300/9yj6-jn52" ], "metadata": { "id": "Ae09Pa3LQqPg" } }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "XlOE7tQVQYHK" }, "outputs": [], "source": [ "import pandas as pd\n", "train_data = pd.read_csv(\"\")" ] }, { "cell_type": "code", "source": [ "!pip install --upgrade transformers\n", "!pip install --upgrade simpletransformers\n", "from simpletransformers.classification import ClassificationModel, ClassificationArgs\n", "from sklearn.metrics import precision_score, recall_score, f1_score\n", "from sklearn.model_selection import KFold, train_test_split\n", "from scipy.stats import pearsonr, spearmanr\n", "import warnings\n", "import numpy as np\n", "import pandas as pd\n", "from sys import exit\n", "import logging\n", "import torch\n", "import wandb\n", "warnings.filterwarnings(\"ignore\")\n", "transformers_logger = logging.getLogger(\"transformers\")\n", "transformers_logger.setLevel(logging.WARNING)\n", "\n", "wandbproject = \"\"\n", "output_dir = \"\"" ], "metadata": { "id": "PYgtnTtHQ6Hx" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "def precision_multiclass(preds, labels):\n", " per_class_precision = precision_score(y_true=labels, y_pred=preds, average=None)\n", " macro_precision = np.mean(per_class_precision)\n", " for i, p in enumerate(per_class_precision):\n", " wandb.log({f\"Precision_Class_{i}\": p})\n", " print(p)\n", "\n", " return macro_precision\n", "\n", "def recall_multiclass(preds, labels):\n", " per_class_recall = recall_score(y_true=labels, y_pred=preds, average=None)\n", " macro_recall = np.mean(per_class_recall)\n", " for i, r in enumerate(per_class_recall):\n", " wandb.log({f\"Recall_Class_{i}\": r})\n", " print(r)\n", "\n", " return macro_recall\n", "\n", "def f1_multiclass(preds, labels):\n", " per_class_f1 = f1_score(y_true=labels, y_pred=preds, average=None)\n", " macro_f1 = np.mean(per_class_f1)\n", " for i, f in enumerate(per_class_f1):\n", " wandb.log({f\"F1_Class_{i}\": f})\n", " print(f)\n", "\n", " return macro_f1" ], "metadata": { "id": "bny5vmfZTMin" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "def train_multiclass(label_name, label_col, text_col, train_df, eval_df, output_dir,\n", " model=\"roberta\",\n", " num_labels=4,\n", " num_train_epochs=5,\n", " train_batch_size=8,\n", " gradient_accumulation_steps=2,\n", " max_seq_length=512,\n", " cross_validate=False,\n", " balance_labels_oversample=False,\n", " balance_labels_weights=False,\n", " weights = None):\n", "\n", " if balance_labels_oversample:\n", " most_common = train_df[label_col].value_counts().idxmax()\n", " most_common_df = train_df[train_df[label_col]==most_common]\n", " concat_list = [most_common_df]\n", " for label, group in train_df[train_df[label_col]!=most_common].groupby(label_col):\n", " concat_list.append(group.sample(replace=True, n=len(most_common_df)))\n", " train_df = pd.concat(concat_list)\n", "\n", " train_df = train_df.sample(frac=1)\n", " save_dir = output_dir + \"/\" + label_name + \"_train_size=\" + str(len(train_df))\n", "\n", " model_args = ClassificationArgs()\n", " model_args.reprocess_input_data = True\n", " model_args.overwrite_output_dir = True\n", " model_args.evaluate_during_training = True\n", " model_args.max_seq_length = int(max_seq_length / len(text_col))\n", " model_args.num_train_epochs = num_train_epochs\n", " model_args.evaluate_during_training_steps = int(len(train_df) / train_batch_size) # after each epoch\n", " model_args.save_eval_checkpoints = False\n", " model_args.save_model_every_epoch = False\n", " model_args.wandb_project = label_name\n", " model_args.train_batch_size = train_batch_size\n", " model_args.output_dir = save_dir\n", " model_args.best_model_dir = save_dir +\"/best_model\"\n", " model_args.cache_dir = save_dir + \"/cache\"\n", " model_args.tensorboard_dir = save_dir + \"/tensorboard\"\n", " model_args.regression = num_labels == 1\n", " model_args.gradient_accumulation_steps = gradient_accumulation_steps\n", " model_args.wandb_kwargs = {\"reinit\": True}\n", " model_args.fp16 = False\n", " model_args.fp16_opt_level = \"O0\"\n", " model_args.no_cache = False\n", " model_args.no_save = cross_validate\n", " model_args.save_optimizer_and_scheduler = True\n", "\n", " if balance_labels_weights:\n", " model = ClassificationModel(model.split(\"-\")[0], model,\n", " use_cuda=torch.cuda.is_available(),\n", " num_labels=num_labels,\n", " args=model_args, weight=weights)\n", " else:\n", " model = ClassificationModel(model.split(\"-\")[0], model,\n", " use_cuda=torch.cuda.is_available(),\n", " num_labels=num_labels,\n", " args=model_args)\n", "\n", " train_args = {\"use_multiprocessing\": False,\n", " \"process_count\": 1,\n", " \"use_multiprocessing_for_evaluation\": False}\n", " if wandb.run is not None:\n", " wandb.finish()\n", " model.train_model(train_df,\n", " eval_df=eval_df,\n", " precision=precision_multiclass,\n", " recall=recall_multiclass,\n", " f1=f1_multiclass,\n", " args=train_args)\n", " return model" ], "metadata": { "id": "p8HAkZFQTbiy" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "def predict(fname, model_path, model=None,\n", " model_type=\"roberta-base\", predict_list=None,\n", " index_list=None, index_colname=\"index\"):\n", "\n", " preds, outputs = model.predict(predict_list)\n", " with open(model_path + '/' + fname + '_preds.txt', 'w') as f:\n", " f.write(f\"{index_colname}\\tpred\\outputs\\n\")\n", " for index, pred, output in zip(index_list, preds, outputs):\n", " f.write(f\"{index}\\t{pred}\\t{output}\\n\")\n", "\n", " return preds" ], "metadata": { "id": "uAdNJygwTzqR" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# Sample Train\n", "model = train_multiclass(wandbproject, \"labels\", \"text\", train_data, train_data, output_dir=output_dir,\n", " model=\"roberta-base\", num_labels=4, balance_labels_oversample=True, cross_validate=False)" ], "metadata": { "id": "YdcCESlqT7zx" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# Sample Predict\n", "predict_data = pd.read_csv(\"\")\n", "predict_list = predict_data['text'].tolist()\n", "index_list = predict_data['index'].tolist()\n", "predict(wandbproject, \"predictions\", model, model_type=\"roberta-base\", predict_list=predict_list,\n", " index_list=index_list, index_colname=\"index\")" ], "metadata": { "id": "uRrln6rXUFuo" }, "execution_count": null, "outputs": [] } ] }