File size: 3,990 Bytes
5980447
1
2
{"repo": "EducationalTestingService/skll", "pull_number": 467, "instance_id": "EducationalTestingService__skll-467", "issue_numbers": "", "base_commit": "5481d0c5348578e1bff3f2f0c9c4319d8edd96bf", "patch": "diff --git a/skll/experiments.py b/skll/experiments.py\n--- a/skll/experiments.py\n+++ b/skll/experiments.py\n@@ -26,7 +26,6 @@\n \n import ruamel.yaml as yaml\n \n-from prettytable import PrettyTable, ALL\n from six import iterkeys, iteritems  # Python 2/3\n from six.moves import zip\n from sklearn import __version__ as SCIKIT_VERSION\n@@ -37,6 +36,7 @@\n from skll.learner import (Learner, MAX_CONCURRENT_PROCESSES,\n                           _import_custom_learner)\n from skll.version import __version__\n+from tabulate import tabulate\n \n # Check if gridmap is available\n try:\n@@ -876,11 +876,8 @@ def _create_learner_result_dicts(task_results,\n \n         if conf_matrix:\n             labels = sorted(iterkeys(task_results[0][2]))\n-            result_table = PrettyTable([\"\"] + labels + [\"Precision\", \"Recall\",\n-                                                        \"F-measure\"],\n-                                       header=True, hrules=ALL)\n-            result_table.align = 'r'\n-            result_table.float_format = '.3'\n+            headers = [\"\"] + labels + [\"Precision\", \"Recall\", \"F-measure\"]\n+            rows = []\n             for i, actual_label in enumerate(labels):\n                 conf_matrix[i][i] = \"[{}]\".format(conf_matrix[i][i])\n                 label_prec = _get_stat_float(result_dict[actual_label],\n@@ -897,8 +894,13 @@ def _create_learner_result_dicts(task_results,\n                     f_sum_dict[actual_label] += float(label_f)\n                 result_row = ([actual_label] + conf_matrix[i] +\n                               [label_prec, label_recall, label_f])\n-                result_table.add_row(result_row)\n+                rows.append(result_row)\n \n+            result_table = tabulate(rows, \n+                                    headers=headers,\n+                                    stralign=\"right\",\n+                                    floatfmt=\".3f\",\n+                                    tablefmt=\"grid\")\n             result_table_str = '{}'.format(result_table)\n             result_table_str += '\\n(row = reference; column = predicted)'\n             learner_result_dict['result_table'] = result_table_str\n@@ -932,20 +934,19 @@ def _create_learner_result_dicts(task_results,\n         learner_result_dict['fold'] = 'average'\n \n         if result_table:\n-            result_table = PrettyTable([\"Label\", \"Precision\", \"Recall\",\n-                                        \"F-measure\"],\n-                                       header=True)\n-            result_table.align = \"r\"\n-            result_table.align[\"Label\"] = \"l\"\n-            result_table.float_format = '.3'\n+            headers = [\"Label\", \"Precision\", \"Recall\", \"F-measure\"]\n+            rows = []\n             for actual_label in labels:\n                 # Convert sums to means\n                 prec_mean = prec_sum_dict[actual_label] / num_folds\n                 recall_mean = recall_sum_dict[actual_label] / num_folds\n                 f_mean = f_sum_dict[actual_label] / num_folds\n-                result_table.add_row([actual_label] +\n-                                     [prec_mean, recall_mean, f_mean])\n+                rows.append([actual_label] + [prec_mean, recall_mean, f_mean])\n \n+            result_table = tabulate(rows, \n+                                    headers=headers,\n+                                    floatfmt=\".3f\",\n+                                    tablefmt=\"psql\")\n             learner_result_dict['result_table'] = '{}'.format(result_table)\n             learner_result_dict['accuracy'] = accuracy_sum / num_folds\n         else:\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2019-02-22T18:01:17Z"}