Datasets:

ArXiv:
DOI:
License:
Yiran Wang commited on
Commit
a8f2714
·
1 Parent(s): 9a2ca4e

polish crash isolation in some cases

Browse files
benchmark/numpy_7/numpy_7_fixed.ipynb CHANGED
@@ -923,7 +923,6 @@
923
  "# plot_sample(dataset['test_images'][image_idx], classes[dataset['test_labels'][image_idx]], classes[pred])\n",
924
  " plot_sample(dataset['test_images'][image_idx], classes[dataset['test_labels'][image_idx][0]], classes[pred])\n",
925
  "\n",
926
- " # fix 2 by temporary commenting because: need to make sure the model is built after every layer\n",
927
  "# # extracting the output and appending to outputs\n",
928
  "# feature_maps = []\n",
929
  "# for name in layer_names:\n",
 
923
  "# plot_sample(dataset['test_images'][image_idx], classes[dataset['test_labels'][image_idx]], classes[pred])\n",
924
  " plot_sample(dataset['test_images'][image_idx], classes[dataset['test_labels'][image_idx][0]], classes[pred])\n",
925
  "\n",
 
926
  "# # extracting the output and appending to outputs\n",
927
  "# feature_maps = []\n",
928
  "# for name in layer_names:\n",
benchmark/numpy_7/numpy_7_reproduced.ipynb CHANGED
@@ -867,20 +867,20 @@
867
  "\n",
868
  " plot_sample(dataset['test_images'][image_idx], classes[dataset['test_labels'][image_idx]], classes[pred])\n",
869
  "\n",
870
- " # extracting the output and appending to outputs\n",
871
- " feature_maps = []\n",
872
- " for name in layer_names:\n",
873
- " tmp_model = Model(inputs=model.input, outputs=model.get_layer(name).output)\n",
874
- " feature_maps.append(tmp_model.predict(image))\n",
875
- "\n",
876
- " fig, ax = plt.subplots(nrows=len(feature_maps), ncols=num_features, figsize=(20, 20))\n",
877
- " for i in range(len(feature_maps)):\n",
878
- " for z in range(num_features):\n",
879
- " ax[i][z].imshow(feature_maps[i][0, :, :, z])\n",
880
- " ax[i][z].set_title(layer_names[i])\n",
881
- " ax[i][z].set_xticks([])\n",
882
- " ax[i][z].set_yticks([])\n",
883
- " plt.savefig('feature_maps.png')\n",
884
  "\n",
885
  "\n",
886
  "def plot_weights(model):\n",
 
867
  "\n",
868
  " plot_sample(dataset['test_images'][image_idx], classes[dataset['test_labels'][image_idx]], classes[pred])\n",
869
  "\n",
870
+ "# # extracting the output and appending to outputs\n",
871
+ "# feature_maps = []\n",
872
+ "# for name in layer_names:\n",
873
+ "# tmp_model = Model(inputs=model.input, outputs=model.get_layer(name).output)\n",
874
+ "# feature_maps.append(tmp_model.predict(image))\n",
875
+ "\n",
876
+ "# fig, ax = plt.subplots(nrows=len(feature_maps), ncols=num_features, figsize=(20, 20))\n",
877
+ "# for i in range(len(feature_maps)):\n",
878
+ "# for z in range(num_features):\n",
879
+ "# ax[i][z].imshow(feature_maps[i][0, :, :, z])\n",
880
+ "# ax[i][z].set_title(layer_names[i])\n",
881
+ "# ax[i][z].set_xticks([])\n",
882
+ "# ax[i][z].set_yticks([])\n",
883
+ "# plt.savefig('feature_maps.png')\n",
884
  "\n",
885
  "\n",
886
  "def plot_weights(model):\n",
benchmark/numpy_8/numpy_8_fixed.ipynb CHANGED
@@ -2093,7 +2093,7 @@
2093
  "def evaluate_fitness(individual):\n",
2094
  " # Вычислите значения признаков на основе individual\n",
2095
  " \n",
2096
- " # fix 2 --- shape mismatch, undefined model\n",
2097
  "# X_train_gp = transform_gp_structure(individual, X_train) # Вычисление новых признаков на обучающей выборке\n",
2098
  "# rf_model_gp.fit(X_train_gp, y_train) # Обучение модели случайного леса с новыми признаками\n",
2099
  "# X_test_gp = transform_gp_structure(individual, X_test) # Вычисление новых признаков на тестовой выборке\n",
@@ -2121,8 +2121,7 @@
2121
  },
2122
  "outputs": [],
2123
  "source": [
2124
- "# fix 3 --- missing import and variable\n",
2125
- "import random\n",
2126
  "crossover_prob = 0.5\n",
2127
  "mutation_prob = 0.5\n",
2128
  "\n",
 
2093
  "def evaluate_fitness(individual):\n",
2094
  " # Вычислите значения признаков на основе individual\n",
2095
  " \n",
2096
+ " # fix 2 --- shape mismatch\n",
2097
  "# X_train_gp = transform_gp_structure(individual, X_train) # Вычисление новых признаков на обучающей выборке\n",
2098
  "# rf_model_gp.fit(X_train_gp, y_train) # Обучение модели случайного леса с новыми признаками\n",
2099
  "# X_test_gp = transform_gp_structure(individual, X_test) # Вычисление новых признаков на тестовой выборке\n",
 
2121
  },
2122
  "outputs": [],
2123
  "source": [
2124
+ "import random # fix missing import for crash isolation purposes\n",
 
2125
  "crossover_prob = 0.5\n",
2126
  "mutation_prob = 0.5\n",
2127
  "\n",
benchmark/numpy_8/numpy_8_reproduced.ipynb CHANGED
@@ -1202,34 +1202,34 @@
1202
  {
1203
  "data": {
1204
  "text/plain": [
1205
- "['CNT_CHILDREN',\n",
 
1206
  " 'DAYS_BIRTH',\n",
1207
  " 'AMT_INCOME_TOTAL',\n",
1208
- " 'HOUR_APPR_PROCESS_START',\n",
 
 
1209
  " 'AMT_GOODS_PRICE',\n",
1210
- " 'DEF_30_CNT_SOCIAL_CIRCLE',\n",
 
 
 
1211
  " 'AMT_REQ_CREDIT_BUREAU_QRT',\n",
1212
- " 'AMT_REQ_CREDIT_BUREAU_YEAR',\n",
1213
- " 'DEF_60_CNT_SOCIAL_CIRCLE',\n",
1214
- " 'AMT_REQ_CREDIT_BUREAU_HOUR',\n",
1215
  " 'AMT_REQ_CREDIT_BUREAU_MON',\n",
1216
- " 'REGION_RATING_CLIENT_W_CITY',\n",
1217
- " 'EXT_SOURCE_2',\n",
1218
- " 'AMT_REQ_CREDIT_BUREAU_DAY',\n",
1219
- " 'OBS_30_CNT_SOCIAL_CIRCLE',\n",
1220
- " 'DAYS_ID_PUBLISH',\n",
1221
- " 'DAYS_LAST_PHONE_CHANGE',\n",
1222
- " 'REG_CITY_NOT_WORK_CITY',\n",
1223
  " 'DAYS_EMPLOYED',\n",
1224
- " 'REGION_RATING_CLIENT',\n",
1225
- " 'LIVE_CITY_NOT_WORK_CITY',\n",
1226
- " 'REG_REGION_NOT_LIVE_REGION',\n",
1227
- " 'REGION_POPULATION_RELATIVE',\n",
1228
  " 'DAYS_REGISTRATION',\n",
1229
- " 'REG_CITY_NOT_LIVE_CITY',\n",
1230
  " 'EXT_SOURCE_3',\n",
 
 
 
1231
  " 'AMT_ANNUITY',\n",
1232
- " 'AMT_REQ_CREDIT_BUREAU_WEEK']"
 
 
 
 
1233
  ]
1234
  },
1235
  "execution_count": 27,
@@ -2089,6 +2089,7 @@
2089
  "def evaluate_fitness(individual):\n",
2090
  " # Вычислите значения признаков на основе individual\n",
2091
  " X_train_gp = transform_gp_structure(individual, X_train) # Вычисление новых признаков на обучающей выборке\n",
 
2092
  " rf_model_gp.fit(X_train_gp, y_train) # Обучение модели случайного леса с новыми признаками\n",
2093
  " X_test_gp = transform_gp_structure(individual, X_test) # Вычисление новых признаков на тестовой выборке\n",
2094
  " y_pred_gp = rf_model_gp.predict_proba(X_test_gp)[:, 1] # Прогнозирование на тестовой выборке с новыми признаками\n",
@@ -2116,15 +2117,19 @@
2116
  "traceback": [
2117
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
2118
  "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
2119
- "\u001b[0;32m<ipython-input-39-c2fff693920c>\u001b[0m in \u001b[0;36m<cell line: 8>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;31m# Оценка фитнеса\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0mfitnesses\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mevaluate_fitness\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpopulation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfitness\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpopulation\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfitnesses\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0mindividual\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfitness\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfitness\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
2120
- "\u001b[0;32m<ipython-input-37-c74419f37cb5>\u001b[0m in \u001b[0;36mevaluate_fitness\u001b[0;34m(individual)\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mevaluate_fitness\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;31m# Вычислите значения признаков на основе individual\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 9\u001b[0;31m \u001b[0mX_train_gp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtransform_gp_structure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_train\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Вычисление новых признаков на обучающей выборке\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 10\u001b[0m \u001b[0mrf_model_gp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_train_gp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Обучение модели случайного леса с новыми признаками\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0mX_test_gp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtransform_gp_structure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_test\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Вычисление новых признаков на тестовой выборке\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
2121
- "\u001b[0;32m<ipython-input-37-c74419f37cb5>\u001b[0m in \u001b[0;36mtransform_gp_structure\u001b[0;34m(individual, X)\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtransform_gp_structure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mexpr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mexpr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mrow\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;31m# Определение функции оценки фитнеса (ваша собственная функция)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
2122
- "\u001b[0;32m<ipython-input-37-c74419f37cb5>\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtransform_gp_structure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mexpr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mexpr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mrow\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;31m# Определение функции оценки фитнеса (ваша собственная функция)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
2123
  "\u001b[0;31mTypeError\u001b[0m: <lambda>() takes 2 positional arguments but 72 were given"
2124
  ]
2125
  }
2126
  ],
2127
  "source": [
 
 
 
 
2128
  "pop_size = 100 # Размер популяции\n",
2129
  "num_generations = 50 # Количество поколений\n",
2130
  "\n",
 
1202
  {
1203
  "data": {
1204
  "text/plain": [
1205
+ "['DEF_60_CNT_SOCIAL_CIRCLE',\n",
1206
+ " 'DAYS_LAST_PHONE_CHANGE',\n",
1207
  " 'DAYS_BIRTH',\n",
1208
  " 'AMT_INCOME_TOTAL',\n",
1209
+ " 'OBS_30_CNT_SOCIAL_CIRCLE',\n",
1210
+ " 'REGION_RATING_CLIENT_W_CITY',\n",
1211
+ " 'AMT_REQ_CREDIT_BUREAU_WEEK',\n",
1212
  " 'AMT_GOODS_PRICE',\n",
1213
+ " 'LIVE_CITY_NOT_WORK_CITY',\n",
1214
+ " 'REG_REGION_NOT_LIVE_REGION',\n",
1215
+ " 'HOUR_APPR_PROCESS_START',\n",
1216
+ " 'REG_CITY_NOT_LIVE_CITY',\n",
1217
  " 'AMT_REQ_CREDIT_BUREAU_QRT',\n",
 
 
 
1218
  " 'AMT_REQ_CREDIT_BUREAU_MON',\n",
 
 
 
 
 
 
 
1219
  " 'DAYS_EMPLOYED',\n",
1220
+ " 'AMT_REQ_CREDIT_BUREAU_HOUR',\n",
 
 
 
1221
  " 'DAYS_REGISTRATION',\n",
1222
+ " 'DAYS_ID_PUBLISH',\n",
1223
  " 'EXT_SOURCE_3',\n",
1224
+ " 'REGION_RATING_CLIENT',\n",
1225
+ " 'CNT_CHILDREN',\n",
1226
+ " 'DEF_30_CNT_SOCIAL_CIRCLE',\n",
1227
  " 'AMT_ANNUITY',\n",
1228
+ " 'AMT_REQ_CREDIT_BUREAU_YEAR',\n",
1229
+ " 'EXT_SOURCE_2',\n",
1230
+ " 'AMT_REQ_CREDIT_BUREAU_DAY',\n",
1231
+ " 'REGION_POPULATION_RELATIVE',\n",
1232
+ " 'REG_CITY_NOT_WORK_CITY']"
1233
  ]
1234
  },
1235
  "execution_count": 27,
 
2089
  "def evaluate_fitness(individual):\n",
2090
  " # Вычислите значения признаков на основе individual\n",
2091
  " X_train_gp = transform_gp_structure(individual, X_train) # Вычисление новых признаков на обучающей выборке\n",
2092
+ " rf_model_gp = lr\n",
2093
  " rf_model_gp.fit(X_train_gp, y_train) # Обучение модели случайного леса с новыми признаками\n",
2094
  " X_test_gp = transform_gp_structure(individual, X_test) # Вычисление новых признаков на тестовой выборке\n",
2095
  " y_pred_gp = rf_model_gp.predict_proba(X_test_gp)[:, 1] # Прогнозирование на тестовой выборке с новыми признаками\n",
 
2117
  "traceback": [
2118
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
2119
  "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
2120
+ "\u001b[0;32m<ipython-input-39-c8e552423953>\u001b[0m in \u001b[0;36m<cell line: 12>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;31m# Оценка фитнеса\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0mfitnesses\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mevaluate_fitness\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpopulation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfitness\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpopulation\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfitnesses\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0mindividual\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfitness\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfitness\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
2121
+ "\u001b[0;32m<ipython-input-37-65cc5406f880>\u001b[0m in \u001b[0;36mevaluate_fitness\u001b[0;34m(individual)\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mevaluate_fitness\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;31m# Вычислите значения признаков на основе individual\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 9\u001b[0;31m \u001b[0mX_train_gp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtransform_gp_structure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_train\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Вычисление новых признаков на обучающей выборке\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 10\u001b[0m \u001b[0mrf_model_gp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlr\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0mrf_model_gp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_train_gp\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Обучение модели случайного леса с новыми признаками\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
2122
+ "\u001b[0;32m<ipython-input-37-65cc5406f880>\u001b[0m in \u001b[0;36mtransform_gp_structure\u001b[0;34m(individual, X)\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtransform_gp_structure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mexpr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mexpr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mrow\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;31m# Определение функции оценки фитнеса (ваша собственная функция)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
2123
+ "\u001b[0;32m<ipython-input-37-65cc5406f880>\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtransform_gp_structure\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mexpr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindividual\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mexpr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mrow\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;31m# Определение функции оценки фитнеса (ваша собственная функция)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
2124
  "\u001b[0;31mTypeError\u001b[0m: <lambda>() takes 2 positional arguments but 72 were given"
2125
  ]
2126
  }
2127
  ],
2128
  "source": [
2129
+ "import random # fix missing import and vars for crash isolation purposes\n",
2130
+ "crossover_prob = 0.5\n",
2131
+ "mutation_prob = 0.5\n",
2132
+ "\n",
2133
  "pop_size = 100 # Размер популяции\n",
2134
  "num_generations = 50 # Количество поколений\n",
2135
  "\n",
benchmark/pandas_14/pandas_14_fixed.ipynb CHANGED
@@ -1265,11 +1265,11 @@
1265
  "for i in range(filas):\n",
1266
  " for j in range(col):\n",
1267
  " n = i * col + j\n",
1268
- " if n < len(nombres_col):\n",
1269
- " # fix --- add a condition\n",
1270
- " if n < len(nombres_col.columns): # fix\n",
1271
- " axs[i, j].set_title(nombres_col.columns[n])\n",
1272
- " axs[i, j].scatter(df[nombres_col.columns[n]][:N], y[:N])\n",
1273
  "\n",
1274
  "fig.tight_layout()\n",
1275
  "plt.show()\n"
 
1265
  "for i in range(filas):\n",
1266
  " for j in range(col):\n",
1267
  " n = i * col + j\n",
1268
+ " # fix --- fix the guarding condition\n",
1269
+ "# if n < len(nombres_col):\n",
1270
+ " if n < len(nombres_col.columns):\n",
1271
+ " axs[i, j].set_title(nombres_col.columns[n])\n",
1272
+ " axs[i, j].scatter(df[nombres_col.columns[n]][:N], y[:N])\n",
1273
  "\n",
1274
  "fig.tight_layout()\n",
1275
  "plt.show()\n"
benchmark/sklearn_10/sklearn_10_fixed.ipynb CHANGED
@@ -1553,7 +1553,7 @@
1553
  "kf_rmse = []\n",
1554
  "for fold, (train_idx, valid_idx) in enumerate(KFold(n_splits=n_splits, shuffle=True).split(X_train,y_train)):\n",
1555
  " # Fetch the train-validation indices.\n",
1556
- " # fix 1: should iloc row ids, should not rewrite X_train, y_train\n",
1557
  " X_train_fold, y_train_fold = X_train.iloc[train_idx], y_train.iloc[train_idx]\n",
1558
  " X_valid_fold, y_valid_fold = X_train.iloc[valid_idx], y_train.iloc[valid_idx]\n",
1559
  " \n",
@@ -1566,20 +1566,18 @@
1566
  " # Validation predictions.\n",
1567
  " valid_pred = model.predict(X_valid_fold)\n",
1568
  " \n",
1569
- " # fix 2: non-existing func\n",
1570
- "# rmse = my_rmse(y_valid_fold, valid_pred)\n",
1571
  " rmse = np.sqrt(mean_squared_error(y_valid_fold, valid_pred))\n",
1572
  " \n",
1573
  " print(f'Fold {fold+1}/{n_splits} RMSE: {rmse:.4f}')\n",
1574
  " kf_rmse.append(rmse)\n",
1575
  " \n",
1576
- " # fix 3: non-existing test variable\n",
1577
  " # Use the model trained for 1/n_splits of the output predictions.\n",
1578
  " if test_preds is None:\n",
1579
- " test_preds = model.predict(X_test)\n",
1580
  " else:\n",
1581
  " # This is kind of naughty for numerical accuracy (may overflow on other problems) but slightly quicker.\n",
1582
- " test_preds += model.predict(X_test)\n",
1583
  "\n",
1584
  "test_preds /= n_splits\n",
1585
  "print(f'Average KFold RMSE: {np.mean(np.array(kf_rmse)):.5f}')"
 
1553
  "kf_rmse = []\n",
1554
  "for fold, (train_idx, valid_idx) in enumerate(KFold(n_splits=n_splits, shuffle=True).split(X_train,y_train)):\n",
1555
  " # Fetch the train-validation indices.\n",
1556
+ " # fix: should iloc row ids, should not rewrite X_train, y_train\n",
1557
  " X_train_fold, y_train_fold = X_train.iloc[train_idx], y_train.iloc[train_idx]\n",
1558
  " X_valid_fold, y_valid_fold = X_train.iloc[valid_idx], y_train.iloc[valid_idx]\n",
1559
  " \n",
 
1566
  " # Validation predictions.\n",
1567
  " valid_pred = model.predict(X_valid_fold)\n",
1568
  " \n",
1569
+ "# rmse = my_rmse(y_valid_fold, valid_pred) # fix for crash isolation purpose, non-existing func\n",
 
1570
  " rmse = np.sqrt(mean_squared_error(y_valid_fold, valid_pred))\n",
1571
  " \n",
1572
  " print(f'Fold {fold+1}/{n_splits} RMSE: {rmse:.4f}')\n",
1573
  " kf_rmse.append(rmse)\n",
1574
  " \n",
 
1575
  " # Use the model trained for 1/n_splits of the output predictions.\n",
1576
  " if test_preds is None:\n",
1577
+ " test_preds = model.predict(X_test) # fix for crash isolation purpose: non-existing test_x variable\n",
1578
  " else:\n",
1579
  " # This is kind of naughty for numerical accuracy (may overflow on other problems) but slightly quicker.\n",
1580
+ " test_preds += model.predict(X_test) # fix for crash isolation purpose: non-existing test_x variable\n",
1581
  "\n",
1582
  "test_preds /= n_splits\n",
1583
  "print(f'Average KFold RMSE: {np.mean(np.array(kf_rmse)):.5f}')"
benchmark/sklearn_10/sklearn_10_reproduced.ipynb CHANGED
@@ -1539,8 +1539,8 @@
1539
  ],
1540
  "source": [
1541
  "params = {}\n",
1542
- "params['tree_method'] = 'gpu_hist'\n",
1543
- "params['predictor'] = 'gpu_predictor'\n",
1544
  "params['n_jobs'] = 4\n",
1545
  "\n",
1546
  "\n",
@@ -1561,16 +1561,16 @@
1561
  " \n",
1562
  " # Validation predictions.\n",
1563
  " valid_pred = model.predict(X_valid)\n",
1564
- " rmse = my_rmse(y_valid, valid_pred)\n",
1565
- " print(f'Fold {fold+1}/{n_splits} RMSE: {rmse:.4f}')\n",
1566
  " kf_rmse.append(rmse)\n",
1567
  " \n",
1568
  " # Use the model trained for 1/n_splits of the output predictions.\n",
1569
  " if test_preds is None:\n",
1570
- " test_preds = model.predict(test_x)\n",
1571
  " else:\n",
1572
  " # This is kind of naughty for numerical accuracy (may overflow on other problems) but slightly quicker.\n",
1573
- " test_preds += model.predict(test_x)\n",
1574
  "\n",
1575
  "test_preds /= n_splits\n",
1576
  "print(f'Average KFold RMSE: {np.mean(np.array(kf_rmse)):.5f}')"
 
1539
  ],
1540
  "source": [
1541
  "params = {}\n",
1542
+ "params['tree_method'] = 'hist' # fix (for testing locally), use cpu instead of gpu\n",
1543
+ "params['predictor'] = 'predictor'\n",
1544
  "params['n_jobs'] = 4\n",
1545
  "\n",
1546
  "\n",
 
1561
  " \n",
1562
  " # Validation predictions.\n",
1563
  " valid_pred = model.predict(X_valid)\n",
1564
+ "# rmse = my_rmse(y_valid_fold, valid_pred) # fix for crash isolation purpose, non-existing func\n",
1565
+ " rmse = np.sqrt(mean_squared_error(y_valid_fold, valid_pred)) print(f'Fold {fold+1}/{n_splits} RMSE: {rmse:.4f}')\n",
1566
  " kf_rmse.append(rmse)\n",
1567
  " \n",
1568
  " # Use the model trained for 1/n_splits of the output predictions.\n",
1569
  " if test_preds is None:\n",
1570
+ " test_preds = model.predict(X_test) # fix for crash isolation purpose: non-existing test_x variable\n",
1571
  " else:\n",
1572
  " # This is kind of naughty for numerical accuracy (may overflow on other problems) but slightly quicker.\n",
1573
+ " test_preds += model.predict(X_test) # fix for crash isolation purpose: non-existing test_x variable\n",
1574
  "\n",
1575
  "test_preds /= n_splits\n",
1576
  "print(f'Average KFold RMSE: {np.mean(np.array(kf_rmse)):.5f}')"
benchmark/sklearn_6/sklearn_6_fixed.ipynb CHANGED
@@ -916,7 +916,7 @@
916
  },
917
  "outputs": [],
918
  "source": [
919
- "test_ds_ids = test_ds['Id'] # fix --- save ids for submission\n",
920
  "test_ds.drop(['Id', 'MoSold', 'GarageYrBlt', 'Condition1', 'Condition2'], axis = 1, inplace = True)"
921
  ]
922
  },
@@ -1028,7 +1028,7 @@
1028
  "source": [
1029
  "predictions = FReg.predict(test_ds)\n",
1030
  "submissions_df = pd.DataFrame({\n",
1031
- " \"ID\" : test_ds_ids, # test_data['ID'], # fix --- wrong variable name\n",
1032
  " \"Predictions\" : predictions\n",
1033
  "})\n",
1034
  "\n",
 
916
  },
917
  "outputs": [],
918
  "source": [
919
+ "test_ds_ids = test_ds['Id'] # fix for crash isolation purpose\n",
920
  "test_ds.drop(['Id', 'MoSold', 'GarageYrBlt', 'Condition1', 'Condition2'], axis = 1, inplace = True)"
921
  ]
922
  },
 
1028
  "source": [
1029
  "predictions = FReg.predict(test_ds)\n",
1030
  "submissions_df = pd.DataFrame({\n",
1031
+ " \"ID\" : test_ds_ids, # test_data['ID'], # fix for crash isolation purpose\n",
1032
  " \"Predictions\" : predictions\n",
1033
  "})\n",
1034
  "\n",
benchmark/sklearn_6/sklearn_6_reproduced.ipynb CHANGED
@@ -916,6 +916,7 @@
916
  },
917
  "outputs": [],
918
  "source": [
 
919
  "test_ds.drop(['Id', 'MoSold', 'GarageYrBlt', 'Condition1', 'Condition2'], axis = 1, inplace = True)"
920
  ]
921
  },
@@ -1051,7 +1052,7 @@
1051
  "traceback": [
1052
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
1053
  "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
1054
- "\u001b[0;32m<ipython-input-15-e055db40fbd5>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mpredictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mFReg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_ds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m submissions_df = pd.DataFrame({\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\"ID\"\u001b[0m \u001b[0;34m:\u001b[0m \u001b[0mtest_data\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'ID'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\"Predictions\"\u001b[0m \u001b[0;34m:\u001b[0m \u001b[0mpredictions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m })\n",
1055
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/sklearn/ensemble/_forest.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, X)\u001b[0m\n\u001b[1;32m 979\u001b[0m \u001b[0mcheck_is_fitted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 980\u001b[0m \u001b[0;31m# Check data\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 981\u001b[0;31m \u001b[0mX\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_validate_X_predict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 982\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 983\u001b[0m \u001b[0;31m# Assign chunk of trees to jobs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1056
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/sklearn/ensemble/_forest.py\u001b[0m in \u001b[0;36m_validate_X_predict\u001b[0;34m(self, X)\u001b[0m\n\u001b[1;32m 600\u001b[0m Validate X whenever one tries to predict, apply, predict_proba.\"\"\"\n\u001b[1;32m 601\u001b[0m \u001b[0mcheck_is_fitted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 602\u001b[0;31m \u001b[0mX\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_validate_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mDTYPE\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maccept_sparse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"csr\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 603\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0missparse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindices\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintc\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindptr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 604\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"No support for np.int64 index based sparse matrices\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1057
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/sklearn/base.py\u001b[0m in \u001b[0;36m_validate_data\u001b[0;34m(self, X, y, reset, validate_separately, **check_params)\u001b[0m\n\u001b[1;32m 546\u001b[0m \u001b[0mvalidated\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 547\u001b[0m \"\"\"\n\u001b[0;32m--> 548\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_check_feature_names\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mreset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 549\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 550\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0my\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_tags\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"requires_y\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
@@ -1063,7 +1064,7 @@
1063
  "source": [
1064
  "predictions = FReg.predict(test_ds)\n",
1065
  "submissions_df = pd.DataFrame({\n",
1066
- " \"ID\" : test_data['ID'],\n",
1067
  " \"Predictions\" : predictions\n",
1068
  "})\n",
1069
  "\n",
 
916
  },
917
  "outputs": [],
918
  "source": [
919
+ "test_ds_ids = test_ds['Id'] # fix for crash isolation purpose\n",
920
  "test_ds.drop(['Id', 'MoSold', 'GarageYrBlt', 'Condition1', 'Condition2'], axis = 1, inplace = True)"
921
  ]
922
  },
 
1052
  "traceback": [
1053
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
1054
  "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
1055
+ "\u001b[0;32m<ipython-input-15-0b091944b5d9>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mpredictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mFReg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_ds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m submissions_df = pd.DataFrame({\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\"ID\"\u001b[0m \u001b[0;34m:\u001b[0m \u001b[0mtest_ds_ids\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;31m# test_data['ID'], # fix for crash isolation purpose\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\"Predictions\"\u001b[0m \u001b[0;34m:\u001b[0m \u001b[0mpredictions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m })\n",
1056
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/sklearn/ensemble/_forest.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, X)\u001b[0m\n\u001b[1;32m 979\u001b[0m \u001b[0mcheck_is_fitted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 980\u001b[0m \u001b[0;31m# Check data\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 981\u001b[0;31m \u001b[0mX\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_validate_X_predict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 982\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 983\u001b[0m \u001b[0;31m# Assign chunk of trees to jobs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1057
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/sklearn/ensemble/_forest.py\u001b[0m in \u001b[0;36m_validate_X_predict\u001b[0;34m(self, X)\u001b[0m\n\u001b[1;32m 600\u001b[0m Validate X whenever one tries to predict, apply, predict_proba.\"\"\"\n\u001b[1;32m 601\u001b[0m \u001b[0mcheck_is_fitted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 602\u001b[0;31m \u001b[0mX\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_validate_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mDTYPE\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maccept_sparse\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"csr\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 603\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0missparse\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindices\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintc\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindptr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mintc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 604\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"No support for np.int64 index based sparse matrices\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1058
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/sklearn/base.py\u001b[0m in \u001b[0;36m_validate_data\u001b[0;34m(self, X, y, reset, validate_separately, **check_params)\u001b[0m\n\u001b[1;32m 546\u001b[0m \u001b[0mvalidated\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 547\u001b[0m \"\"\"\n\u001b[0;32m--> 548\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_check_feature_names\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mreset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 549\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 550\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0my\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_tags\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"requires_y\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
 
1064
  "source": [
1065
  "predictions = FReg.predict(test_ds)\n",
1066
  "submissions_df = pd.DataFrame({\n",
1067
+ " \"ID\" : test_ds_ids, # test_data['ID'], # fix for crash isolation purpose\n",
1068
  " \"Predictions\" : predictions\n",
1069
  "})\n",
1070
  "\n",
benchmark/sklearn_7/sklearn_7_fixed.ipynb CHANGED
@@ -768,7 +768,7 @@
768
  }
769
  ],
770
  "source": [
771
- "import numpy as np # fix --- imports\n",
772
  "\n",
773
  "# Evaluate the model\n",
774
  "mse = mean_squared_error(y_test, y_pred)\n",
 
768
  }
769
  ],
770
  "source": [
771
+ "import numpy as np # fix for crash isolation purpose (only testing the fix)\n",
772
  "\n",
773
  "# Evaluate the model\n",
774
  "mse = mean_squared_error(y_test, y_pred)\n",
benchmark/torch_1/torch_1_reproduced.ipynb CHANGED
@@ -577,7 +577,7 @@
577
  " iteration_number += 10\n",
578
  " counter.append(iteration_number)\n",
579
  " loss.append(loss_contrastive.item())\n",
580
- " show_plot(counter, loss) \n",
581
  " return net\n",
582
  "#set the device to cuda\n",
583
  "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
 
577
  " iteration_number += 10\n",
578
  " counter.append(iteration_number)\n",
579
  " loss.append(loss_contrastive.item())\n",
580
+ "# show_plot(counter, loss) \n",
581
  " return net\n",
582
  "#set the device to cuda\n",
583
  "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
benchmark/torch_5/torch_5_fixed.ipynb CHANGED
@@ -249,10 +249,7 @@
249
  " total = 0.0\n",
250
  " \n",
251
  " with torch.no_grad():\n",
252
- " # fix 4 ----- undefined variable \n",
253
- "# for data in test_loader:\n",
254
- " for data in testing_set_loader:\n",
255
- " \n",
256
  " images, labels = data\n",
257
  " # run the model on the test set to predict labels\n",
258
  " outputs = model(images)\n",
@@ -281,9 +278,7 @@
281
  " running_loss = 0.0\n",
282
  " running_acc = 0.0\n",
283
  "\n",
284
- " # fix 2-------should use previously created dataloader\n",
285
- "# for i, (images, classes) in enumerate(dataset, 0):\n",
286
- " for i, (images, classes) in enumerate(training_set_loader, 0):\n",
287
  " \n",
288
  " # fix 1 ---- inputs and labels should be tensors\n",
289
  "# # get the inputs\n",
@@ -357,9 +352,7 @@
357
  "def testBatch():\n",
358
  " # get batch of images from the test DataLoader \n",
359
  " \n",
360
- " # fix 5----------undefined variable\n",
361
- "# images, labels = next(iter(test_loader))\n",
362
- " images, labels = next(iter(testing_set_loader))\n",
363
  " \n",
364
  "\n",
365
  " # show all images as one image grid\n",
@@ -407,7 +400,7 @@
407
  "name": "stderr",
408
  "output_type": "stream",
409
  "text": [
410
- "<ipython-input-7-9774f697d913>:16: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
411
  " model.load_state_dict(torch.load(path))\n"
412
  ]
413
  },
@@ -438,10 +431,7 @@
438
  " print('Finished Training')\n",
439
  "\n",
440
  " # Test which classes performed well\n",
441
- " \n",
442
- " # fix 6----------undefined function\n",
443
- "# testModelAccuracy()\n",
444
- " testAccuracy()\n",
445
  " \n",
446
  " # Let's load the model we just created and test the accuracy per label\n",
447
  " model = Network()\n",
 
249
  " total = 0.0\n",
250
  " \n",
251
  " with torch.no_grad():\n",
252
+ " for data in testing_set_loader: # for data in test_loader: # fix for crash isolation reasons\n",
 
 
 
253
  " images, labels = data\n",
254
  " # run the model on the test set to predict labels\n",
255
  " outputs = model(images)\n",
 
278
  " running_loss = 0.0\n",
279
  " running_acc = 0.0\n",
280
  "\n",
281
+ " for i, (images, classes) in enumerate(training_set_loader, 0): # for i, (images, classes) in enumerate(dataset, 0): # fix for crash isolation reasons\n",
 
 
282
  " \n",
283
  " # fix 1 ---- inputs and labels should be tensors\n",
284
  "# # get the inputs\n",
 
352
  "def testBatch():\n",
353
  " # get batch of images from the test DataLoader \n",
354
  " \n",
355
+ " images, labels = next(iter(testing_set_loader)) # images, labels = next(iter(test_loader)) # fix for crash isolation reasons\n",
 
 
356
  " \n",
357
  "\n",
358
  " # show all images as one image grid\n",
 
400
  "name": "stderr",
401
  "output_type": "stream",
402
  "text": [
403
+ "<ipython-input-7-e3520913a2eb>:13: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
404
  " model.load_state_dict(torch.load(path))\n"
405
  ]
406
  },
 
431
  " print('Finished Training')\n",
432
  "\n",
433
  " # Test which classes performed well\n",
434
+ " testAccuracy() # testModelAccuracy() # fix for crash isolation reasons\n",
 
 
 
435
  " \n",
436
  " # Let's load the model we just created and test the accuracy per label\n",
437
  " model = Network()\n",
benchmark/torch_5/torch_5_reproduced.ipynb CHANGED
@@ -232,7 +232,7 @@
232
  " total = 0.0\n",
233
  " \n",
234
  " with torch.no_grad():\n",
235
- " for data in test_loader:\n",
236
  " images, labels = data\n",
237
  " # run the model on the test set to predict labels\n",
238
  " outputs = model(images)\n",
@@ -261,7 +261,7 @@
261
  " running_loss = 0.0\n",
262
  " running_acc = 0.0\n",
263
  "\n",
264
- " for i, (images, classes) in enumerate(dataset, 0):\n",
265
  " \n",
266
  " # get the inputs\n",
267
  " images = Variable(images.to(device))\n",
@@ -327,7 +327,7 @@
327
  "# Function to test the model with a batch of images and show the labels predictions\n",
328
  "def testBatch():\n",
329
  " # get batch of images from the test DataLoader \n",
330
- " images, labels = next(iter(test_loader))\n",
331
  "\n",
332
  " # show all images as one image grid\n",
333
  " imageshow(torchvision.utils.make_grid(images))\n",
@@ -367,6 +367,14 @@
367
  "The model will be running on cpu device\n"
368
  ]
369
  },
 
 
 
 
 
 
 
 
370
  {
371
  "ename": "TypeError",
372
  "evalue": "conv2d() received an invalid combination of arguments - got (torch.device, Parameter, Parameter, tuple, tuple, tuple, int), but expected one of:\n * (Tensor input, Tensor weight, Tensor bias = None, tuple of ints stride = 1, tuple of ints padding = 0, tuple of ints dilation = 1, int groups = 1)\n didn't match because some of the arguments have invalid types: (\u001b[31;1mtorch.device\u001b[0m, \u001b[31;1mParameter\u001b[0m, \u001b[31;1mParameter\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mint\u001b[0m)\n * (Tensor input, Tensor weight, Tensor bias = None, tuple of ints stride = 1, str padding = \"valid\", tuple of ints dilation = 1, int groups = 1)\n didn't match because some of the arguments have invalid types: (\u001b[31;1mtorch.device\u001b[0m, \u001b[31;1mParameter\u001b[0m, \u001b[31;1mParameter\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mint\u001b[0m)\n",
@@ -374,8 +382,8 @@
374
  "traceback": [
375
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
376
  "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
377
- "\u001b[0;32m<ipython-input-7-94cb09bf2a34>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;31m# Let's build our model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Finished Training'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
378
- "\u001b[0;32m<ipython-input-5-9ad46287b0ec>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(num_epochs)\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzero_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;31m# predict classes using images from the training set\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 56\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 57\u001b[0m \u001b[0;31m# compute the loss based on model output and real labels\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mclass_names\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
379
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1551\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1552\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1553\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1554\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1555\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
380
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1560\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1561\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1563\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1564\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
381
  "\u001b[0;32m<ipython-input-3-0e24d66103f7>\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbn1\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconv1\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 24\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbn2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconv2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpool\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
@@ -395,7 +403,7 @@
395
  " print('Finished Training')\n",
396
  "\n",
397
  " # Test which classes performed well\n",
398
- " testModelAccuracy()\n",
399
  " \n",
400
  " # Let's load the model we just created and test the accuracy per label\n",
401
  " model = Network()\n",
 
232
  " total = 0.0\n",
233
  " \n",
234
  " with torch.no_grad():\n",
235
+ " for data in testing_set_loader: # for data in test_loader: # fix for crash isolation reasons\n",
236
  " images, labels = data\n",
237
  " # run the model on the test set to predict labels\n",
238
  " outputs = model(images)\n",
 
261
  " running_loss = 0.0\n",
262
  " running_acc = 0.0\n",
263
  "\n",
264
+ " for i, (images, classes) in enumerate(training_set_loader, 0): # for i, (images, classes) in enumerate(dataset, 0): # fix for crash isolation reasons\n",
265
  " \n",
266
  " # get the inputs\n",
267
  " images = Variable(images.to(device))\n",
 
327
  "# Function to test the model with a batch of images and show the labels predictions\n",
328
  "def testBatch():\n",
329
  " # get batch of images from the test DataLoader \n",
330
+ " images, labels = next(iter(testing_set_loader)) # images, labels = next(iter(test_loader)) # fix for crash isolation reasons\n",
331
  "\n",
332
  " # show all images as one image grid\n",
333
  " imageshow(torchvision.utils.make_grid(images))\n",
 
367
  "The model will be running on cpu device\n"
368
  ]
369
  },
370
+ {
371
+ "name": "stderr",
372
+ "output_type": "stream",
373
+ "text": [
374
+ "<ipython-input-5-9eca036b5181>:50: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
375
+ " classes = torch.tensor(classes)\n"
376
+ ]
377
+ },
378
  {
379
  "ename": "TypeError",
380
  "evalue": "conv2d() received an invalid combination of arguments - got (torch.device, Parameter, Parameter, tuple, tuple, tuple, int), but expected one of:\n * (Tensor input, Tensor weight, Tensor bias = None, tuple of ints stride = 1, tuple of ints padding = 0, tuple of ints dilation = 1, int groups = 1)\n didn't match because some of the arguments have invalid types: (\u001b[31;1mtorch.device\u001b[0m, \u001b[31;1mParameter\u001b[0m, \u001b[31;1mParameter\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mint\u001b[0m)\n * (Tensor input, Tensor weight, Tensor bias = None, tuple of ints stride = 1, str padding = \"valid\", tuple of ints dilation = 1, int groups = 1)\n didn't match because some of the arguments have invalid types: (\u001b[31;1mtorch.device\u001b[0m, \u001b[31;1mParameter\u001b[0m, \u001b[31;1mParameter\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mtuple of (int, int)\u001b[0m, \u001b[31;1mint\u001b[0m)\n",
 
382
  "traceback": [
383
  "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
384
  "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
385
+ "\u001b[0;32m<ipython-input-7-3a1624416c0c>\u001b[0m in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;31m# Let's build our model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Finished Training'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
386
+ "\u001b[0;32m<ipython-input-5-9eca036b5181>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(num_epochs)\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzero_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;31m# predict classes using images from the training set\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 56\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 57\u001b[0m \u001b[0;31m# compute the loss based on model output and real labels\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mclass_names\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
387
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1551\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1552\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1553\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1554\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1555\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
388
  "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1560\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1561\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1563\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1564\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
389
  "\u001b[0;32m<ipython-input-3-0e24d66103f7>\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbn1\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconv1\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 24\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbn2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconv2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpool\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
 
403
  " print('Finished Training')\n",
404
  "\n",
405
  " # Test which classes performed well\n",
406
+ " testAccuracy() # testModelAccuracy() # fix for crash isolation reasons\n",
407
  " \n",
408
  " # Let's load the model we just created and test the accuracy per label\n",
409
  " model = Network()\n",
benchmark/torch_6/torch_6_fixed.ipynb CHANGED
@@ -1527,7 +1527,7 @@
1527
  " #print(\"output shape is,\",output.shape)\n",
1528
  " #print(\"label shape is,\",label.shape)\n",
1529
  " \n",
1530
- " # fix ---- labels had shape [batch_size, 393], but model predicts [batch_size, 6294]. That won’t match\n",
1531
  " label = label.argmax(dim=1)\n",
1532
  " \n",
1533
  " loss_ = loss(output, label).to(device)\n",
 
1527
  " #print(\"output shape is,\",output.shape)\n",
1528
  " #print(\"label shape is,\",label.shape)\n",
1529
  " \n",
1530
+ " # fix ---- label is one-hot encoded, but nn.CrossEntropyLoss expects a 1D target tensor of class indices\n",
1531
  " label = label.argmax(dim=1)\n",
1532
  " \n",
1533
  " loss_ = loss(output, label).to(device)\n",
benchmark/torchvision_1/torchvision_1_fixed.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
benchmark/torchvision_1/torchvision_1_reproduced.ipynb CHANGED
The diff for this file is too large to render. See raw diff