path stringlengths 13 17 | screenshot_names listlengths 1 873 | code stringlengths 0 40.4k | cell_type stringclasses 1 value |
|---|---|---|---|
33118743/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from tqdm import tqdm
import json
import numpy as np # linear algebra
import os
train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/'
evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/'
test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/'
same_shape = []
for ex in tqdm(os.listdir(evaluation_path)):
with open(evaluation_path + ex, 'r') as train_file:
all_im = json.load(train_file)
im_in = np.array(all_im['train'][0]['input'])
im_out = np.array(all_im['train'][0]['output'])
if im_in.shape == im_out.shape:
same_shape.append(ex)
def get_im_with_same_ioshape(file_path, name, show=False, mode='train'):
train = []
test = []
with open(file_path + name, 'r') as train_file:
all_im = json.load(train_file)
im_in = np.array(all_im['train'][0]['input'])
im_out = np.array(all_im['train'][0]['output'])
if im_in.shape != im_out.shape:
return None
for im in all_im['train']:
im_in = np.array(im['input'])
im_out = np.array(im['output'])
mask = np.asarray(np.nan_to_num((im_in - im_out) / (im_in - im_out), 0), 'int8')
train.append((im_in, im_out, mask))
if mode == 'train':
for im in all_im['test']:
im_in = np.array(im['input'])
im_out = np.array(im['output'])
test.append((im_in, im_out))
if mode == 'test':
for im in all_im['test']:
im_in = np.array(im['input'])
test.append(im_in)
return (train, test)
train, test = get_im_with_same_ioshape(evaluation_path, same_shape[1], False)
def get_features(input_):
im_in, im_out, mask = input_
features = np.zeros((sum(sum(mask)), 8))
colors = np.zeros(sum(sum(mask)))
f = 0
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if mask[y, x] == 1:
pix_exp = np.zeros(8)
n_p = 0
for dy in range(-1, 2):
for dx in range(-1, 2):
if dy != 0 or dx != 0:
if dx + x >= 0 and dy + y >= 0 and (dx + x < mask.shape[1]) and (dy + y < mask.shape[0]):
pix_exp[n_p] = im_in[y + dy, x + dx]
else:
pix_exp[n_p] = -1
n_p += 1
features[f] = pix_exp
colors[f] = im_out[y, x]
f += 1
return (features, colors)
def get_cf(train):
features_set = []
colors_set = []
for in_out_mask in train:
features, colors = get_features(in_out_mask)
features_set += list(features)
colors_set += list(colors)
features_set_min = np.unique(np.array(features_set), axis=0)
colors_min = np.zeros(len(features_set_min))
for n, feature in enumerate(features_set):
if feature in features_set_min:
for i, feature_uniq in enumerate(features_set_min):
if str(feature_uniq) == str(feature):
break
colors_min[i] = colors_set[n]
return (colors_min, features_set_min)
colors_min, features_set_min = get_cf(train)
print(colors_min, '\n')
print(features_set_min) | code |
33118743/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from tqdm import tqdm
import json
import numpy as np # linear algebra
import os
train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/'
evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/'
test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/'
same_shape = []
for ex in tqdm(os.listdir(evaluation_path)):
with open(evaluation_path + ex, 'r') as train_file:
all_im = json.load(train_file)
im_in = np.array(all_im['train'][0]['input'])
im_out = np.array(all_im['train'][0]['output'])
if im_in.shape == im_out.shape:
same_shape.append(ex)
def get_im_with_same_ioshape(file_path, name, show=False, mode='train'):
train = []
test = []
with open(file_path + name, 'r') as train_file:
all_im = json.load(train_file)
im_in = np.array(all_im['train'][0]['input'])
im_out = np.array(all_im['train'][0]['output'])
if im_in.shape != im_out.shape:
return None
for im in all_im['train']:
im_in = np.array(im['input'])
im_out = np.array(im['output'])
mask = np.asarray(np.nan_to_num((im_in - im_out) / (im_in - im_out), 0), 'int8')
train.append((im_in, im_out, mask))
if mode == 'train':
for im in all_im['test']:
im_in = np.array(im['input'])
im_out = np.array(im['output'])
test.append((im_in, im_out))
if mode == 'test':
for im in all_im['test']:
im_in = np.array(im['input'])
test.append(im_in)
return (train, test)
train, test = get_im_with_same_ioshape(evaluation_path, same_shape[1], False)
test[0][0] | code |
33118743/cell_3 | [
"text_plain_output_1.png"
] | from tqdm import tqdm
import json
import numpy as np # linear algebra
import os
train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/'
evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/'
test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/'
same_shape = []
for ex in tqdm(os.listdir(evaluation_path)):
with open(evaluation_path + ex, 'r') as train_file:
all_im = json.load(train_file)
im_in = np.array(all_im['train'][0]['input'])
im_out = np.array(all_im['train'][0]['output'])
if im_in.shape == im_out.shape:
same_shape.append(ex)
print('Same:', len(same_shape), 'All:', len(os.listdir(evaluation_path))) | code |
33118743/cell_10 | [
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from tqdm import tqdm
import json
import numpy as np # linear algebra
import os
train_path = '/kaggle/input/abstraction-and-reasoning-challenge/training/'
evaluation_path = '/kaggle/input/abstraction-and-reasoning-challenge/evaluation/'
test_path = '/kaggle/input/abstraction-and-reasoning-challenge/test/'
same_shape = []
for ex in tqdm(os.listdir(evaluation_path)):
with open(evaluation_path + ex, 'r') as train_file:
all_im = json.load(train_file)
im_in = np.array(all_im['train'][0]['input'])
im_out = np.array(all_im['train'][0]['output'])
if im_in.shape == im_out.shape:
same_shape.append(ex)
def get_im_with_same_ioshape(file_path, name, show=False, mode='train'):
train = []
test = []
with open(file_path + name, 'r') as train_file:
all_im = json.load(train_file)
im_in = np.array(all_im['train'][0]['input'])
im_out = np.array(all_im['train'][0]['output'])
if im_in.shape != im_out.shape:
return None
for im in all_im['train']:
im_in = np.array(im['input'])
im_out = np.array(im['output'])
mask = np.asarray(np.nan_to_num((im_in - im_out) / (im_in - im_out), 0), 'int8')
train.append((im_in, im_out, mask))
if mode == 'train':
for im in all_im['test']:
im_in = np.array(im['input'])
im_out = np.array(im['output'])
test.append((im_in, im_out))
if mode == 'test':
for im in all_im['test']:
im_in = np.array(im['input'])
test.append(im_in)
return (train, test)
train, test = get_im_with_same_ioshape(evaluation_path, same_shape[1], False)
def get_features(input_):
im_in, im_out, mask = input_
features = np.zeros((sum(sum(mask)), 8))
colors = np.zeros(sum(sum(mask)))
f = 0
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if mask[y, x] == 1:
pix_exp = np.zeros(8)
n_p = 0
for dy in range(-1, 2):
for dx in range(-1, 2):
if dy != 0 or dx != 0:
if dx + x >= 0 and dy + y >= 0 and (dx + x < mask.shape[1]) and (dy + y < mask.shape[0]):
pix_exp[n_p] = im_in[y + dy, x + dx]
else:
pix_exp[n_p] = -1
n_p += 1
features[f] = pix_exp
colors[f] = im_out[y, x]
f += 1
return (features, colors)
def get_cf(train):
features_set = []
colors_set = []
for in_out_mask in train:
features, colors = get_features(in_out_mask)
features_set += list(features)
colors_set += list(colors)
features_set_min = np.unique(np.array(features_set), axis=0)
colors_min = np.zeros(len(features_set_min))
for n, feature in enumerate(features_set):
if feature in features_set_min:
for i, feature_uniq in enumerate(features_set_min):
if str(feature_uniq) == str(feature):
break
colors_min[i] = colors_set[n]
return (colors_min, features_set_min)
colors_min, features_set_min = get_cf(train)
def make_pred(im_in, features, colors):
im_out = im_in.copy()
f = 0
for y in range(im_in.shape[0]):
for x in range(im_in.shape[1]):
pix_exp = np.zeros(8)
n_p = 0
for dy in range(-1, 2):
for dx in range(-1, 2):
if dy != 0 or dx != 0:
if dx + x >= 0 and dy + y >= 0 and (dx + x < im_in.shape[1]) and (dy + y < im_in.shape[0]):
pix_exp[n_p] = im_in[y + dy, x + dx]
else:
pix_exp[n_p] = -1
n_p += 1
for n, f in enumerate(features):
if str(f) == str(pix_exp):
im_out[y, x] = colors[n]
return im_out
pred = make_pred(test[0][0], features_set_min, colors_min)
er = []
for N in tqdm(range(len(same_shape[:2]))):
data = get_im_with_same_ioshape(evaluation_path, same_shape[N])
if data != None:
train, test = data
colors, features = get_cf(train)
pred = make_pred(test[0], features, colors)
d = np.sum(np.where(np.nan_to_num((pred - test[1]) / (pred - test[1]), 0) != 0, 1, 0))
er.append(d)
if d == 0:
print('Uhu!!!')
er.sort()
print(er) | code |
88095138/cell_9 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils import shuffle
import pandas as pd
test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv')
train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv')
from sklearn.utils import shuffle
train = shuffle(train)
features = [c for c in train.columns if c not in ('target', 'row_id')]
target = 'target'
x_test = test[features]
y = train[target]
X = train[features]
numerical_features = X.columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
from sklearn.preprocessing import OneHotEncoder
oe = OneHotEncoder()
target = oe.fit_transform(y.reshape(-1, 1))
target = target.toarray()
targetyDF = pd.DataFrame(target)
yDF = targetyDF
X_train = X[:87500]
y_train = yDF[:87500]
X_valid = X[87500:]
y_valid = yDF[87500:]
X_train.shape[0] + X_valid.shape[0] == X.shape[0] | code |
88095138/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pyradox_tabular.data import DataLoader
from pyradox_tabular.data_config import DataConfig
from pyradox_tabular.model_config import TabTransformerConfig
from pyradox_tabular.nn import TabTransformer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
from sklearn.utils import shuffle
import pandas as pd
test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv')
train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv')
from sklearn.utils import shuffle
train = shuffle(train)
features = [c for c in train.columns if c not in ('target', 'row_id')]
target = 'target'
x_test = test[features]
y = train[target]
X = train[features]
numerical_features = X.columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
from sklearn.preprocessing import OneHotEncoder
oe = OneHotEncoder()
target = oe.fit_transform(y.reshape(-1, 1))
target = target.toarray()
targetyDF = pd.DataFrame(target)
yDF = targetyDF
X_train = X[:87500]
y_train = yDF[:87500]
X_valid = X[87500:]
y_valid = yDF[87500:]
X_train.shape[0] + X_valid.shape[0] == X.shape[0]
from sklearn.preprocessing import RobustScaler
RobustScaler_transformer = RobustScaler().fit(X_train.values)
X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values)
X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values)
X_testRobustScaler = RobustScaler_transformer.transform(x_test.values)
X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features))
X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features))
x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features))
data_config = DataConfig(numeric_feature_names=list(numerical_features), categorical_features_with_vocabulary={})
data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2)
data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2)
data_test = DataLoader.from_df(x_test, batch_size=64 * 2)
model_config = TabTransformerConfig(num_outputs=10, out_activation='softmax', num_transformer_blocks=10, num_heads=6, mlp_hidden_units_factors=[4, 2])
model = TabTransformer.from_config(data_config, model_config, name='tab_transformer')
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(data_train, validation_data=data_valid, epochs=100, batch_size=64)
test_preds = model.predict(data_test, batch_size=64)
test_preds_decoded = oe.inverse_transform(test_preds)
test_preds_decoded_inversed = le.inverse_transform(test_preds_decoded)
test_preds_decoded_inversed | code |
88095138/cell_23 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from pyradox_tabular.data import DataLoader
from pyradox_tabular.data_config import DataConfig
from pyradox_tabular.model_config import TabTransformerConfig
from pyradox_tabular.nn import TabTransformer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
from sklearn.utils import shuffle
import pandas as pd
test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv')
train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv')
from sklearn.utils import shuffle
train = shuffle(train)
features = [c for c in train.columns if c not in ('target', 'row_id')]
target = 'target'
x_test = test[features]
y = train[target]
X = train[features]
numerical_features = X.columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
from sklearn.preprocessing import OneHotEncoder
oe = OneHotEncoder()
target = oe.fit_transform(y.reshape(-1, 1))
target = target.toarray()
targetyDF = pd.DataFrame(target)
yDF = targetyDF
X_train = X[:87500]
y_train = yDF[:87500]
X_valid = X[87500:]
y_valid = yDF[87500:]
X_train.shape[0] + X_valid.shape[0] == X.shape[0]
from sklearn.preprocessing import RobustScaler
RobustScaler_transformer = RobustScaler().fit(X_train.values)
X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values)
X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values)
X_testRobustScaler = RobustScaler_transformer.transform(x_test.values)
X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features))
X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features))
x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features))
data_config = DataConfig(numeric_feature_names=list(numerical_features), categorical_features_with_vocabulary={})
data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2)
data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2)
data_test = DataLoader.from_df(x_test, batch_size=64 * 2)
model_config = TabTransformerConfig(num_outputs=10, out_activation='softmax', num_transformer_blocks=10, num_heads=6, mlp_hidden_units_factors=[4, 2])
model = TabTransformer.from_config(data_config, model_config, name='tab_transformer')
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(data_train, validation_data=data_valid, epochs=100, batch_size=64)
test_preds = model.predict(data_test, batch_size=64)
test_preds_decoded = oe.inverse_transform(test_preds)
test_preds_decoded | code |
88095138/cell_20 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pyradox_tabular.data import DataLoader
from pyradox_tabular.data_config import DataConfig
from pyradox_tabular.model_config import TabTransformerConfig
from pyradox_tabular.nn import TabTransformer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
from sklearn.utils import shuffle
import pandas as pd
test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv')
train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv')
from sklearn.utils import shuffle
train = shuffle(train)
features = [c for c in train.columns if c not in ('target', 'row_id')]
target = 'target'
x_test = test[features]
y = train[target]
X = train[features]
numerical_features = X.columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
from sklearn.preprocessing import OneHotEncoder
oe = OneHotEncoder()
target = oe.fit_transform(y.reshape(-1, 1))
target = target.toarray()
targetyDF = pd.DataFrame(target)
yDF = targetyDF
X_train = X[:87500]
y_train = yDF[:87500]
X_valid = X[87500:]
y_valid = yDF[87500:]
X_train.shape[0] + X_valid.shape[0] == X.shape[0]
from sklearn.preprocessing import RobustScaler
RobustScaler_transformer = RobustScaler().fit(X_train.values)
X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values)
X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values)
X_testRobustScaler = RobustScaler_transformer.transform(x_test.values)
X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features))
X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features))
x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features))
data_config = DataConfig(numeric_feature_names=list(numerical_features), categorical_features_with_vocabulary={})
data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2)
data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2)
data_test = DataLoader.from_df(x_test, batch_size=64 * 2)
model_config = TabTransformerConfig(num_outputs=10, out_activation='softmax', num_transformer_blocks=10, num_heads=6, mlp_hidden_units_factors=[4, 2])
model = TabTransformer.from_config(data_config, model_config, name='tab_transformer')
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(data_train, validation_data=data_valid, epochs=100, batch_size=64) | code |
88095138/cell_1 | [
"text_plain_output_1.png"
] | !pip install pyradox-tabular -q
import pandas as pd
import numpy as np
import sklearn
from pyradox_tabular.data import DataLoader
from pyradox_tabular.data_config import DataConfig
from pyradox_tabular.model_config import TabTransformerConfig
from pyradox_tabular.nn import TabTransformer | code |
88095138/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
from sklearn.utils import shuffle
import pandas as pd
test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv')
train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv')
from sklearn.utils import shuffle
train = shuffle(train)
features = [c for c in train.columns if c not in ('target', 'row_id')]
target = 'target'
x_test = test[features]
y = train[target]
X = train[features]
numerical_features = X.columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
from sklearn.preprocessing import OneHotEncoder
oe = OneHotEncoder()
target = oe.fit_transform(y.reshape(-1, 1))
target = target.toarray()
targetyDF = pd.DataFrame(target)
yDF = targetyDF
X_train = X[:87500]
y_train = yDF[:87500]
X_valid = X[87500:]
y_valid = yDF[87500:]
X_train.shape[0] + X_valid.shape[0] == X.shape[0]
from sklearn.preprocessing import RobustScaler
RobustScaler_transformer = RobustScaler().fit(X_train.values)
X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values)
X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values)
X_testRobustScaler = RobustScaler_transformer.transform(x_test.values)
X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features))
X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features))
x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features))
sub = pd.read_csv('../input/tabular-playground-series-feb-2022/sample_submission.csv')
sub.groupby(['target']).count() | code |
88095138/cell_16 | [
"text_plain_output_1.png"
] | from pyradox_tabular.data import DataLoader
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
from sklearn.utils import shuffle
import pandas as pd
test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv')
train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv')
from sklearn.utils import shuffle
train = shuffle(train)
features = [c for c in train.columns if c not in ('target', 'row_id')]
target = 'target'
x_test = test[features]
y = train[target]
X = train[features]
numerical_features = X.columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
from sklearn.preprocessing import OneHotEncoder
oe = OneHotEncoder()
target = oe.fit_transform(y.reshape(-1, 1))
target = target.toarray()
targetyDF = pd.DataFrame(target)
yDF = targetyDF
X_train = X[:87500]
y_train = yDF[:87500]
X_valid = X[87500:]
y_valid = yDF[87500:]
X_train.shape[0] + X_valid.shape[0] == X.shape[0]
from sklearn.preprocessing import RobustScaler
RobustScaler_transformer = RobustScaler().fit(X_train.values)
X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values)
X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values)
X_testRobustScaler = RobustScaler_transformer.transform(x_test.values)
X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features))
X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features))
x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features))
data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2)
data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2)
data_test = DataLoader.from_df(x_test, batch_size=64 * 2) | code |
88095138/cell_24 | [
"text_plain_output_1.png"
] | from pyradox_tabular.data import DataLoader
from pyradox_tabular.data_config import DataConfig
from pyradox_tabular.model_config import TabTransformerConfig
from pyradox_tabular.nn import TabTransformer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
from sklearn.utils import shuffle
import pandas as pd
test = pd.read_csv('../input/tabular-playground-series-feb-2022/test.csv')
train = pd.read_csv('../input/tabular-playground-series-feb-2022/train.csv')
from sklearn.utils import shuffle
train = shuffle(train)
features = [c for c in train.columns if c not in ('target', 'row_id')]
target = 'target'
x_test = test[features]
y = train[target]
X = train[features]
numerical_features = X.columns
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
from sklearn.preprocessing import OneHotEncoder
oe = OneHotEncoder()
target = oe.fit_transform(y.reshape(-1, 1))
target = target.toarray()
targetyDF = pd.DataFrame(target)
yDF = targetyDF
X_train = X[:87500]
y_train = yDF[:87500]
X_valid = X[87500:]
y_valid = yDF[87500:]
X_train.shape[0] + X_valid.shape[0] == X.shape[0]
from sklearn.preprocessing import RobustScaler
RobustScaler_transformer = RobustScaler().fit(X_train.values)
X_trainRobustScaler = RobustScaler_transformer.transform(X_train.values)
X_validRobustScaler = RobustScaler_transformer.transform(X_valid.values)
X_testRobustScaler = RobustScaler_transformer.transform(x_test.values)
X_train = pd.DataFrame(X_trainRobustScaler, columns=list(numerical_features))
X_valid = pd.DataFrame(X_validRobustScaler, columns=list(numerical_features))
x_test = pd.DataFrame(X_testRobustScaler, columns=list(numerical_features))
data_config = DataConfig(numeric_feature_names=list(numerical_features), categorical_features_with_vocabulary={})
data_train = DataLoader.from_df(X_train, y_train, batch_size=64 * 2)
data_valid = DataLoader.from_df(X_valid, y_valid, batch_size=64 * 2)
data_test = DataLoader.from_df(x_test, batch_size=64 * 2)
model_config = TabTransformerConfig(num_outputs=10, out_activation='softmax', num_transformer_blocks=10, num_heads=6, mlp_hidden_units_factors=[4, 2])
model = TabTransformer.from_config(data_config, model_config, name='tab_transformer')
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(data_train, validation_data=data_valid, epochs=100, batch_size=64)
test_preds = model.predict(data_test, batch_size=64)
test_preds_decoded = oe.inverse_transform(test_preds)
test_preds_decoded_inversed = le.inverse_transform(test_preds_decoded) | code |
106198657/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
df2 = pd.DataFrame({'Pregnancies': [0], 'Glucose': [80], 'BloodPressure': [72], 'Skinthickness': [0], 'Insulin': [0], 'BMI': [23], 'DiabetesPedigreeFunction': [0.5], 'Age': [30], 'Outcome': [0]})
df2 = data.append(df2)
df2 | code |
106198657/cell_13 | [
"image_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
from sklearn.metrics import classification_report, confusion_matrix
logreg.classes_
logreg.intercept_
logreg.coef_
logreg.predict_proba(X) | code |
106198657/cell_9 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
plt.plot(fpr, tpr, label='data 1')
plt.legend(loc=4)
plt.show() | code |
106198657/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X | code |
106198657/cell_26 | [
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
df2 = pd.DataFrame({'Pregnancies': [0], 'Glucose': [80], 'BloodPressure': [72], 'Skinthickness': [0], 'Insulin': [0], 'BMI': [23], 'DiabetesPedigreeFunction': [0.5], 'Age': [30], 'Outcome': [0]})
df2 = data.append(df2)
df2
X_test = df2[['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']][768:]
model2 = LogisticRegression(solver='liblinear', C=10.0, random_state=0)
model2.fit(X, y)
y_pred = model2.predict(X_test)
y_pred | code |
106198657/cell_11 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
from sklearn.metrics import classification_report, confusion_matrix
logreg.classes_
logreg.intercept_ | code |
106198657/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
model = LogisticRegression(solver='liblinear', C=10.0, random_state=0)
model.fit(X, y) | code |
106198657/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
106198657/cell_7 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
print('Accuracy:', metrics.accuracy_score(y_test, y_pred)) | code |
106198657/cell_18 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
from sklearn.metrics import classification_report, confusion_matrix
logreg.classes_
logreg.intercept_
logreg.coef_
logreg.predict_proba(X)
logreg.predict(X)
logreg.score(X, y)
confusion_matrix(y, logreg.predict(X))
cm = confusion_matrix (y , logreg.predict(X))
fig,ax = plt.subplots(figsize= (8,8))
ax.imshow(cm)
ax.grid(False)
ax.xaxis.set(ticks=(0,1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.yaxis.set(ticks=(0,1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.set_ylim(1.5, -0.5)
for i in range(2):
for j in range(2):
ax.text(j,i, cm[i,j], ha='center', va='center', color ='red')
plt.show()
print(classification_report(y, logreg.predict(X))) | code |
106198657/cell_8 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
plt.plot(fpr, tpr, label='data 1')
plt.legend(loc=4)
plt.show() | code |
106198657/cell_15 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
from sklearn.metrics import classification_report, confusion_matrix
logreg.classes_
logreg.intercept_
logreg.coef_
logreg.predict_proba(X)
logreg.predict(X)
logreg.score(X, y) | code |
106198657/cell_16 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
from sklearn.metrics import classification_report, confusion_matrix
logreg.classes_
logreg.intercept_
logreg.coef_
logreg.predict_proba(X)
logreg.predict(X)
logreg.score(X, y)
confusion_matrix(y, logreg.predict(X)) | code |
106198657/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data | code |
106198657/cell_17 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
from sklearn.metrics import classification_report, confusion_matrix
logreg.classes_
logreg.intercept_
logreg.coef_
logreg.predict_proba(X)
logreg.predict(X)
logreg.score(X, y)
confusion_matrix(y, logreg.predict(X))
cm = confusion_matrix(y, logreg.predict(X))
fig, ax = plt.subplots(figsize=(8, 8))
ax.imshow(cm)
ax.grid(False)
ax.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.yaxis.set(ticks=(0, 1), ticklabels=('Predicted 0s', 'Predicted 1s'))
ax.set_ylim(1.5, -0.5)
for i in range(2):
for j in range(2):
ax.text(j, i, cm[i, j], ha='center', va='center', color='red')
plt.show() | code |
106198657/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
model2 = LogisticRegression(solver='liblinear', C=10.0, random_state=0)
model2.fit(X, y) | code |
106198657/cell_14 | [
"image_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/diabetes-dataset/diabetes.csv')
data
X = pd.DataFrame(data, columns=['Pregnancies', 'Glucose', 'BloodPressure', 'Skinthickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age'])
y = data.Outcome
X
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
from sklearn.metrics import classification_report, confusion_matrix
logreg.classes_
logreg.intercept_
logreg.coef_
logreg.predict_proba(X)
logreg.predict(X) | code |
106198657/cell_10 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
from sklearn.metrics import classification_report, confusion_matrix
logreg.classes_ | code |
106198657/cell_12 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
from sklearn.metrics import classification_report, confusion_matrix
logreg.classes_
logreg.intercept_
logreg.coef_ | code |
1008193/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
trn = pd.read_json(open('../input/train.json', 'r'))
tst = pd.read_json(open('../input/test.json', 'r'))
trn.head() | code |
1008193/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1008193/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
trn = pd.read_json(open('../input/train.json', 'r'))
tst = pd.read_json(open('../input/test.json', 'r'))
print('Train set: ', trn.shape)
print('Test set: ', tst.shape) | code |
17111364/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1)
data['cost'] = data['cost'].replace(',', '', regex=True)
data[['votes', 'cost']] = data[['votes', 'cost']].apply(pd.to_numeric)
grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list})
newdata = pd.merge(grouped, data, on=['name', 'address'])
newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str)
newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True)
newdata.index = newdata['name']
newdata.drop(['name', 'url', 'phone', 'listed_in(city)', 'listed_in(type)_x', 'address', 'dish_liked', 'listed_in(type)_y', 'menu_item', 'cuisines', 'reviews_list'], axis=1, inplace=True)
newdata['rating'] = newdata['rate'].str[:3]
newdata = newdata[newdata.rating != 'NEW']
newdata = newdata.dropna(subset=['rating'])
newdata['rating'] = pd.to_numeric(newdata['rating'])
np.unique(newdata['rating'], return_counts=True) | code |
17111364/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
data = data.rename({'approx_cost(for two people)': 'cost'}, axis=1)
data['cost'] = data['cost'].replace(',', '', regex=True)
data[['votes', 'cost']] = data[['votes', 'cost']].apply(pd.to_numeric)
grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list})
newdata = pd.merge(grouped, data, on=['name', 'address'])
newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str)
newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True)
newdata.index = newdata['name']
newdata.drop(['name', 'url', 'phone', 'listed_in(city)', 'listed_in(type)_x', 'address', 'dish_liked', 'listed_in(type)_y', 'menu_item', 'cuisines', 'reviews_list'], axis=1, inplace=True)
newdata['rating'] = newdata['rate'].str[:3]
newdata = newdata[newdata.rating != 'NEW']
newdata = newdata.dropna(subset=['rating'])
newdata['rating'] = pd.to_numeric(newdata['rating'])
newdata.drop('rate', axis=1, inplace=True)
newdata.describe(include='all') | code |
18155947/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return (df, new_columns)
def train_test():
train = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
(print(train.shape), print(test.shape))
ind_train = train['SK_ID_CURR']
ind_test = test['SK_ID_CURR']
new_df = train.append(test).reset_index()
avg = [col for col in new_df.columns if 'AVG' in col]
new_df['AVG_MEAN'] = new_df[avg].sum(axis=1)
new_df['AVG_SUM'] = new_df[avg].sum(axis=1)
new_df['AVG_STD'] = new_df[avg].std(axis=1)
new_df['AVG_MEDIAN'] = new_df[avg].median(axis=1)
mode = [col for col in new_df.columns if 'MODE' in col]
new_df['MODE_MEAN'] = new_df[mode].sum(axis=1)
new_df['MODE_SUM'] = new_df[mode].sum(axis=1)
new_df['MODE_STD'] = new_df[mode].std(axis=1)
new_df['MODE_MEDIAN'] = new_df[mode].median(axis=1)
medi = [col for col in new_df.columns if 'MEDI' in col]
new_df['MEDI_MEAN'] = new_df[medi].sum(axis=1)
new_df['MEDI_SUM'] = new_df[medi].sum(axis=1)
new_df['MEDI_STD'] = new_df[medi].std(axis=1)
new_df['MEDI_MEDIAN'] = new_df[medi].median(axis=1)
new_df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
new_df['AGE_CLIENT'] = new_df['DAYS_BIRTH'] / -365
new_df['EMPLOYED_YEAR'] = new_df['DAYS_EMPLOYED'] / -365
new_df['_REGISTRATION_YEAR'] = new_df['DAYS_REGISTRATION'] / -365
new_df['ID_PUBLISH_YEAR'] = new_df['DAYS_ID_PUBLISH'] / -365
new_df['RATIO_CHILD_MEMBERS_FAM'] = new_df['CNT_CHILDREN'] / new_df['CNT_FAM_MEMBERS']
new_df['RATIO_INCOME_MEMBERS_FAM'] = new_df['AMT_INCOME_TOTAL'] / new_df['CNT_FAM_MEMBERS']
new_df['RATIO_INCOME_CREDIT'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_CREDIT']
new_df['RATIO_INCOME_ANNUITY'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_ANNUITY']
new_df['RATIO_PRICE_INCOME'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_INCOME_TOTAL']
new_df['RATIO_PRICE_CREDIT'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_CREDIT']
new_df['EXT_SCORE_SUM'] = new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']
new_df['EXT_SCORE_MEAN'] = (new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']) / 3
new_df['OBS_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']
new_df['OBS_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']) / 2
new_df['DEF_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']) / 2
new_df['DEF_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']
flag_doc_col = [col for col in new_df.columns if 'FLAG_DOCUMENT' in col]
new_df['FLAG_DOC_MEAN'] = train[flag_doc_col].mean(axis=1)
new_df['FLAG_DOC_SUM'] = train[flag_doc_col].sum(axis=1)
new_df, col = one_hot_encoder(new_df)
train = new_df.loc[new_df['SK_ID_CURR'].isin(ind_train)]
test = new_df.loc[new_df['SK_ID_CURR'].isin(ind_test)]
(print(train.shape), print(test.shape))
return (train, test)
def bureau_bb():
bureau_balance = pd.read_csv('../input/bureau_balance.csv')
bureau = pd.read_csv('../input/bureau.csv')
bureau_balance, cat_bb = one_hot_encoder(bureau_balance)
bb_agg = {'MONTHS_BALANCE': ['median', 'min', 'max'], 'STATUS_0': ['sum', 'mean'], 'STATUS_1': ['sum', 'mean'], 'STATUS_2': ['sum', 'mean'], 'STATUS_3': ['sum', 'mean'], 'STATUS_4': ['sum', 'mean'], 'STATUS_5': ['sum', 'mean'], 'STATUS_C': ['sum', 'mean'], 'STATUS_X': ['sum', 'mean'], 'STATUS_nan': ['sum', 'mean']}
bureau_balance_agg = bureau_balance.groupby('SK_ID_BUREAU').agg(bb_agg)
bureau_balance_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_balance_agg.columns.tolist()])
bureau_balance_agg = bureau_balance_agg.reset_index()
bureau = bureau.merge(bureau_balance_agg, how='left', on='SK_ID_BUREAU').drop(['SK_ID_BUREAU'], axis=1)
bureau, cat_b = one_hot_encoder(bureau)
b_agg = {'DAYS_CREDIT': ['median'], 'CREDIT_DAY_OVERDUE': ['median', 'min', 'max'], 'DAYS_CREDIT_ENDDATE': ['median'], 'DAYS_ENDDATE_FACT': ['median'], 'DAYS_CREDIT_UPDATE': ['median'], 'AMT_CREDIT_MAX_OVERDUE': ['min', 'max'], 'CNT_CREDIT_PROLONG': ['sum', 'mean', 'min', 'max'], 'AMT_CREDIT_SUM': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_DEBT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_LIMIT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_OVERDUE': ['min', 'mean', 'max'], 'MONTHS_BALANCE_MEDIAN': ['median'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'AMT_ANNUITY': ['min', 'mean', 'max']}
cat_b_agg = {}
for col in cat_b:
cat_b_agg[col] = ['mean']
for col in cat_bb:
cat_b_agg[col + '_SUM'] = ['mean']
cat_b_agg[col + '_MEAN'] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**b_agg, **cat_b_agg})
bureau_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_agg.columns.tolist()])
bureau_agg = bureau_agg.reset_index()
return bureau_agg
bureau = bureau_bb()
train = pd.merge(train, bureau, how='left', on='SK_ID_CURR')
test = pd.merge(test, bureau, how='left', on='SK_ID_CURR')
def previous_application():
previous_application = pd.read_csv('../input/previous_application.csv')
previous_application, cat_pr = one_hot_encoder(previous_application)
previous_application['RATE_DOWN_PAYMENT'][previous_application['RATE_DOWN_PAYMENT'] < 0] = np.nan
previous_application['AMT_DOWN_PAYMENT'][previous_application['AMT_DOWN_PAYMENT'] < 0] = np.nan
previous_application['DAYS_TERMINATION'][previous_application['DAYS_TERMINATION'] == 365243] = np.nan
previous_application['DAYS_LAST_DUE'][previous_application['DAYS_LAST_DUE'] == 365243] = np.nan
previous_application['DAYS_FIRST_DUE'][previous_application['DAYS_FIRST_DUE'] == 365243] = np.nan
previous_application['DAYS_FIRST_DRAWING'][previous_application['DAYS_FIRST_DRAWING'] == 365243] = np.nan
pa_agg = {'AMT_ANNUITY': ['median', 'min', 'max'], 'AMT_APPLICATION': ['median', 'min', 'max'], 'AMT_CREDIT': ['median', 'min', 'max'], 'AMT_DOWN_PAYMENT': ['median', 'min', 'max'], 'AMT_GOODS_PRICE': ['median', 'min', 'max'], 'HOUR_APPR_PROCESS_START': ['mean', 'min', 'max'], 'NFLAG_LAST_APPL_IN_DAY': ['sum'], 'RATE_DOWN_PAYMENT': ['mean', 'min', 'max', 'sum'], 'RATE_INTEREST_PRIMARY': ['mean', 'min', 'max', 'sum'], 'RATE_INTEREST_PRIVILEGED': ['mean', 'min', 'max', 'sum'], 'DAYS_DECISION': ['median', 'min', 'max'], 'CNT_PAYMENT': ['median', 'min', 'max'], 'DAYS_FIRST_DRAWING': ['median', 'min', 'max'], 'DAYS_FIRST_DUE': ['median', 'min', 'max'], 'DAYS_LAST_DUE': ['median', 'min', 'max'], 'DAYS_TERMINATION': ['median', 'min', 'max'], 'NFLAG_INSURED_ON_APPROVAL': ['sum']}
cat_agg = {}
for cat in cat_pr:
cat_agg[cat] = ['mean']
previous_application_agg = previous_application.groupby('SK_ID_CURR').agg({**pa_agg, **cat_agg})
previous_application_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in previous_application_agg.columns.tolist()])
previous_application_agg = previous_application_agg.reset_index()
return previous_application_agg
previous_application = previous_application() | code |
18155947/cell_4 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return (df, new_columns)
def train_test():
train = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
(print(train.shape), print(test.shape))
ind_train = train['SK_ID_CURR']
ind_test = test['SK_ID_CURR']
new_df = train.append(test).reset_index()
avg = [col for col in new_df.columns if 'AVG' in col]
new_df['AVG_MEAN'] = new_df[avg].sum(axis=1)
new_df['AVG_SUM'] = new_df[avg].sum(axis=1)
new_df['AVG_STD'] = new_df[avg].std(axis=1)
new_df['AVG_MEDIAN'] = new_df[avg].median(axis=1)
mode = [col for col in new_df.columns if 'MODE' in col]
new_df['MODE_MEAN'] = new_df[mode].sum(axis=1)
new_df['MODE_SUM'] = new_df[mode].sum(axis=1)
new_df['MODE_STD'] = new_df[mode].std(axis=1)
new_df['MODE_MEDIAN'] = new_df[mode].median(axis=1)
medi = [col for col in new_df.columns if 'MEDI' in col]
new_df['MEDI_MEAN'] = new_df[medi].sum(axis=1)
new_df['MEDI_SUM'] = new_df[medi].sum(axis=1)
new_df['MEDI_STD'] = new_df[medi].std(axis=1)
new_df['MEDI_MEDIAN'] = new_df[medi].median(axis=1)
new_df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
new_df['AGE_CLIENT'] = new_df['DAYS_BIRTH'] / -365
new_df['EMPLOYED_YEAR'] = new_df['DAYS_EMPLOYED'] / -365
new_df['_REGISTRATION_YEAR'] = new_df['DAYS_REGISTRATION'] / -365
new_df['ID_PUBLISH_YEAR'] = new_df['DAYS_ID_PUBLISH'] / -365
new_df['RATIO_CHILD_MEMBERS_FAM'] = new_df['CNT_CHILDREN'] / new_df['CNT_FAM_MEMBERS']
new_df['RATIO_INCOME_MEMBERS_FAM'] = new_df['AMT_INCOME_TOTAL'] / new_df['CNT_FAM_MEMBERS']
new_df['RATIO_INCOME_CREDIT'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_CREDIT']
new_df['RATIO_INCOME_ANNUITY'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_ANNUITY']
new_df['RATIO_PRICE_INCOME'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_INCOME_TOTAL']
new_df['RATIO_PRICE_CREDIT'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_CREDIT']
new_df['EXT_SCORE_SUM'] = new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']
new_df['EXT_SCORE_MEAN'] = (new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']) / 3
new_df['OBS_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']
new_df['OBS_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']) / 2
new_df['DEF_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']) / 2
new_df['DEF_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']
flag_doc_col = [col for col in new_df.columns if 'FLAG_DOCUMENT' in col]
new_df['FLAG_DOC_MEAN'] = train[flag_doc_col].mean(axis=1)
new_df['FLAG_DOC_SUM'] = train[flag_doc_col].sum(axis=1)
new_df, col = one_hot_encoder(new_df)
train = new_df.loc[new_df['SK_ID_CURR'].isin(ind_train)]
test = new_df.loc[new_df['SK_ID_CURR'].isin(ind_test)]
(print(train.shape), print(test.shape))
return (train, test)
train, test = train_test() | code |
18155947/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return (df, new_columns)
def train_test():
train = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
(print(train.shape), print(test.shape))
ind_train = train['SK_ID_CURR']
ind_test = test['SK_ID_CURR']
new_df = train.append(test).reset_index()
avg = [col for col in new_df.columns if 'AVG' in col]
new_df['AVG_MEAN'] = new_df[avg].sum(axis=1)
new_df['AVG_SUM'] = new_df[avg].sum(axis=1)
new_df['AVG_STD'] = new_df[avg].std(axis=1)
new_df['AVG_MEDIAN'] = new_df[avg].median(axis=1)
mode = [col for col in new_df.columns if 'MODE' in col]
new_df['MODE_MEAN'] = new_df[mode].sum(axis=1)
new_df['MODE_SUM'] = new_df[mode].sum(axis=1)
new_df['MODE_STD'] = new_df[mode].std(axis=1)
new_df['MODE_MEDIAN'] = new_df[mode].median(axis=1)
medi = [col for col in new_df.columns if 'MEDI' in col]
new_df['MEDI_MEAN'] = new_df[medi].sum(axis=1)
new_df['MEDI_SUM'] = new_df[medi].sum(axis=1)
new_df['MEDI_STD'] = new_df[medi].std(axis=1)
new_df['MEDI_MEDIAN'] = new_df[medi].median(axis=1)
new_df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
new_df['AGE_CLIENT'] = new_df['DAYS_BIRTH'] / -365
new_df['EMPLOYED_YEAR'] = new_df['DAYS_EMPLOYED'] / -365
new_df['_REGISTRATION_YEAR'] = new_df['DAYS_REGISTRATION'] / -365
new_df['ID_PUBLISH_YEAR'] = new_df['DAYS_ID_PUBLISH'] / -365
new_df['RATIO_CHILD_MEMBERS_FAM'] = new_df['CNT_CHILDREN'] / new_df['CNT_FAM_MEMBERS']
new_df['RATIO_INCOME_MEMBERS_FAM'] = new_df['AMT_INCOME_TOTAL'] / new_df['CNT_FAM_MEMBERS']
new_df['RATIO_INCOME_CREDIT'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_CREDIT']
new_df['RATIO_INCOME_ANNUITY'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_ANNUITY']
new_df['RATIO_PRICE_INCOME'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_INCOME_TOTAL']
new_df['RATIO_PRICE_CREDIT'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_CREDIT']
new_df['EXT_SCORE_SUM'] = new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']
new_df['EXT_SCORE_MEAN'] = (new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']) / 3
new_df['OBS_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']
new_df['OBS_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']) / 2
new_df['DEF_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']) / 2
new_df['DEF_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']
flag_doc_col = [col for col in new_df.columns if 'FLAG_DOCUMENT' in col]
new_df['FLAG_DOC_MEAN'] = train[flag_doc_col].mean(axis=1)
new_df['FLAG_DOC_SUM'] = train[flag_doc_col].sum(axis=1)
new_df, col = one_hot_encoder(new_df)
train = new_df.loc[new_df['SK_ID_CURR'].isin(ind_train)]
test = new_df.loc[new_df['SK_ID_CURR'].isin(ind_test)]
(print(train.shape), print(test.shape))
return (train, test)
def bureau_bb():
bureau_balance = pd.read_csv('../input/bureau_balance.csv')
bureau = pd.read_csv('../input/bureau.csv')
bureau_balance, cat_bb = one_hot_encoder(bureau_balance)
bb_agg = {'MONTHS_BALANCE': ['median', 'min', 'max'], 'STATUS_0': ['sum', 'mean'], 'STATUS_1': ['sum', 'mean'], 'STATUS_2': ['sum', 'mean'], 'STATUS_3': ['sum', 'mean'], 'STATUS_4': ['sum', 'mean'], 'STATUS_5': ['sum', 'mean'], 'STATUS_C': ['sum', 'mean'], 'STATUS_X': ['sum', 'mean'], 'STATUS_nan': ['sum', 'mean']}
bureau_balance_agg = bureau_balance.groupby('SK_ID_BUREAU').agg(bb_agg)
bureau_balance_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_balance_agg.columns.tolist()])
bureau_balance_agg = bureau_balance_agg.reset_index()
bureau = bureau.merge(bureau_balance_agg, how='left', on='SK_ID_BUREAU').drop(['SK_ID_BUREAU'], axis=1)
bureau, cat_b = one_hot_encoder(bureau)
b_agg = {'DAYS_CREDIT': ['median'], 'CREDIT_DAY_OVERDUE': ['median', 'min', 'max'], 'DAYS_CREDIT_ENDDATE': ['median'], 'DAYS_ENDDATE_FACT': ['median'], 'DAYS_CREDIT_UPDATE': ['median'], 'AMT_CREDIT_MAX_OVERDUE': ['min', 'max'], 'CNT_CREDIT_PROLONG': ['sum', 'mean', 'min', 'max'], 'AMT_CREDIT_SUM': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_DEBT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_LIMIT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_OVERDUE': ['min', 'mean', 'max'], 'MONTHS_BALANCE_MEDIAN': ['median'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'AMT_ANNUITY': ['min', 'mean', 'max']}
cat_b_agg = {}
for col in cat_b:
cat_b_agg[col] = ['mean']
for col in cat_bb:
cat_b_agg[col + '_SUM'] = ['mean']
cat_b_agg[col + '_MEAN'] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**b_agg, **cat_b_agg})
bureau_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_agg.columns.tolist()])
bureau_agg = bureau_agg.reset_index()
return bureau_agg
bureau = bureau_bb() | code |
18155947/cell_12 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
def one_hot_encoder(df, nan_as_category=True):
original_columns = list(df.columns)
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in df.columns if c not in original_columns]
return (df, new_columns)
def train_test():
train = pd.read_csv('../input/application_train.csv')
test = pd.read_csv('../input/application_test.csv')
(print(train.shape), print(test.shape))
ind_train = train['SK_ID_CURR']
ind_test = test['SK_ID_CURR']
new_df = train.append(test).reset_index()
avg = [col for col in new_df.columns if 'AVG' in col]
new_df['AVG_MEAN'] = new_df[avg].sum(axis=1)
new_df['AVG_SUM'] = new_df[avg].sum(axis=1)
new_df['AVG_STD'] = new_df[avg].std(axis=1)
new_df['AVG_MEDIAN'] = new_df[avg].median(axis=1)
mode = [col for col in new_df.columns if 'MODE' in col]
new_df['MODE_MEAN'] = new_df[mode].sum(axis=1)
new_df['MODE_SUM'] = new_df[mode].sum(axis=1)
new_df['MODE_STD'] = new_df[mode].std(axis=1)
new_df['MODE_MEDIAN'] = new_df[mode].median(axis=1)
medi = [col for col in new_df.columns if 'MEDI' in col]
new_df['MEDI_MEAN'] = new_df[medi].sum(axis=1)
new_df['MEDI_SUM'] = new_df[medi].sum(axis=1)
new_df['MEDI_STD'] = new_df[medi].std(axis=1)
new_df['MEDI_MEDIAN'] = new_df[medi].median(axis=1)
new_df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
new_df['AGE_CLIENT'] = new_df['DAYS_BIRTH'] / -365
new_df['EMPLOYED_YEAR'] = new_df['DAYS_EMPLOYED'] / -365
new_df['_REGISTRATION_YEAR'] = new_df['DAYS_REGISTRATION'] / -365
new_df['ID_PUBLISH_YEAR'] = new_df['DAYS_ID_PUBLISH'] / -365
new_df['RATIO_CHILD_MEMBERS_FAM'] = new_df['CNT_CHILDREN'] / new_df['CNT_FAM_MEMBERS']
new_df['RATIO_INCOME_MEMBERS_FAM'] = new_df['AMT_INCOME_TOTAL'] / new_df['CNT_FAM_MEMBERS']
new_df['RATIO_INCOME_CREDIT'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_CREDIT']
new_df['RATIO_INCOME_ANNUITY'] = new_df['AMT_INCOME_TOTAL'] / new_df['AMT_ANNUITY']
new_df['RATIO_PRICE_INCOME'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_INCOME_TOTAL']
new_df['RATIO_PRICE_CREDIT'] = new_df['AMT_GOODS_PRICE'] / new_df['AMT_CREDIT']
new_df['EXT_SCORE_SUM'] = new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']
new_df['EXT_SCORE_MEAN'] = (new_df['EXT_SOURCE_1'] + new_df['EXT_SOURCE_2'] + new_df['EXT_SOURCE_3']) / 3
new_df['OBS_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']
new_df['OBS_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['OBS_30_CNT_SOCIAL_CIRCLE'] + new_df['OBS_60_CNT_SOCIAL_CIRCLE']) / 2
new_df['DEF_90_CNT_SOCIAL_CIRCLE_MEAN'] = (new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']) / 2
new_df['DEF_90_CNT_SOCIAL_CIRCLE_SUM'] = new_df['DEF_60_CNT_SOCIAL_CIRCLE'] + new_df['DEF_30_CNT_SOCIAL_CIRCLE']
flag_doc_col = [col for col in new_df.columns if 'FLAG_DOCUMENT' in col]
new_df['FLAG_DOC_MEAN'] = train[flag_doc_col].mean(axis=1)
new_df['FLAG_DOC_SUM'] = train[flag_doc_col].sum(axis=1)
new_df, col = one_hot_encoder(new_df)
train = new_df.loc[new_df['SK_ID_CURR'].isin(ind_train)]
test = new_df.loc[new_df['SK_ID_CURR'].isin(ind_test)]
(print(train.shape), print(test.shape))
return (train, test)
def bureau_bb():
bureau_balance = pd.read_csv('../input/bureau_balance.csv')
bureau = pd.read_csv('../input/bureau.csv')
bureau_balance, cat_bb = one_hot_encoder(bureau_balance)
bb_agg = {'MONTHS_BALANCE': ['median', 'min', 'max'], 'STATUS_0': ['sum', 'mean'], 'STATUS_1': ['sum', 'mean'], 'STATUS_2': ['sum', 'mean'], 'STATUS_3': ['sum', 'mean'], 'STATUS_4': ['sum', 'mean'], 'STATUS_5': ['sum', 'mean'], 'STATUS_C': ['sum', 'mean'], 'STATUS_X': ['sum', 'mean'], 'STATUS_nan': ['sum', 'mean']}
bureau_balance_agg = bureau_balance.groupby('SK_ID_BUREAU').agg(bb_agg)
bureau_balance_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_balance_agg.columns.tolist()])
bureau_balance_agg = bureau_balance_agg.reset_index()
bureau = bureau.merge(bureau_balance_agg, how='left', on='SK_ID_BUREAU').drop(['SK_ID_BUREAU'], axis=1)
bureau, cat_b = one_hot_encoder(bureau)
b_agg = {'DAYS_CREDIT': ['median'], 'CREDIT_DAY_OVERDUE': ['median', 'min', 'max'], 'DAYS_CREDIT_ENDDATE': ['median'], 'DAYS_ENDDATE_FACT': ['median'], 'DAYS_CREDIT_UPDATE': ['median'], 'AMT_CREDIT_MAX_OVERDUE': ['min', 'max'], 'CNT_CREDIT_PROLONG': ['sum', 'mean', 'min', 'max'], 'AMT_CREDIT_SUM': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_DEBT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_LIMIT': ['min', 'mean', 'max'], 'AMT_CREDIT_SUM_OVERDUE': ['min', 'mean', 'max'], 'MONTHS_BALANCE_MEDIAN': ['median'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'MONTHS_BALANCE_MIN': ['min', 'median', 'max'], 'AMT_ANNUITY': ['min', 'mean', 'max']}
cat_b_agg = {}
for col in cat_b:
cat_b_agg[col] = ['mean']
for col in cat_bb:
cat_b_agg[col + '_SUM'] = ['mean']
cat_b_agg[col + '_MEAN'] = ['mean']
bureau_agg = bureau.groupby('SK_ID_CURR').agg({**b_agg, **cat_b_agg})
bureau_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in bureau_agg.columns.tolist()])
bureau_agg = bureau_agg.reset_index()
return bureau_agg
bureau = bureau_bb()
train = pd.merge(train, bureau, how='left', on='SK_ID_CURR')
test = pd.merge(test, bureau, how='left', on='SK_ID_CURR')
def previous_application():
previous_application = pd.read_csv('../input/previous_application.csv')
previous_application, cat_pr = one_hot_encoder(previous_application)
previous_application['RATE_DOWN_PAYMENT'][previous_application['RATE_DOWN_PAYMENT'] < 0] = np.nan
previous_application['AMT_DOWN_PAYMENT'][previous_application['AMT_DOWN_PAYMENT'] < 0] = np.nan
previous_application['DAYS_TERMINATION'][previous_application['DAYS_TERMINATION'] == 365243] = np.nan
previous_application['DAYS_LAST_DUE'][previous_application['DAYS_LAST_DUE'] == 365243] = np.nan
previous_application['DAYS_FIRST_DUE'][previous_application['DAYS_FIRST_DUE'] == 365243] = np.nan
previous_application['DAYS_FIRST_DRAWING'][previous_application['DAYS_FIRST_DRAWING'] == 365243] = np.nan
pa_agg = {'AMT_ANNUITY': ['median', 'min', 'max'], 'AMT_APPLICATION': ['median', 'min', 'max'], 'AMT_CREDIT': ['median', 'min', 'max'], 'AMT_DOWN_PAYMENT': ['median', 'min', 'max'], 'AMT_GOODS_PRICE': ['median', 'min', 'max'], 'HOUR_APPR_PROCESS_START': ['mean', 'min', 'max'], 'NFLAG_LAST_APPL_IN_DAY': ['sum'], 'RATE_DOWN_PAYMENT': ['mean', 'min', 'max', 'sum'], 'RATE_INTEREST_PRIMARY': ['mean', 'min', 'max', 'sum'], 'RATE_INTEREST_PRIVILEGED': ['mean', 'min', 'max', 'sum'], 'DAYS_DECISION': ['median', 'min', 'max'], 'CNT_PAYMENT': ['median', 'min', 'max'], 'DAYS_FIRST_DRAWING': ['median', 'min', 'max'], 'DAYS_FIRST_DUE': ['median', 'min', 'max'], 'DAYS_LAST_DUE': ['median', 'min', 'max'], 'DAYS_TERMINATION': ['median', 'min', 'max'], 'NFLAG_INSURED_ON_APPROVAL': ['sum']}
cat_agg = {}
for cat in cat_pr:
cat_agg[cat] = ['mean']
previous_application_agg = previous_application.groupby('SK_ID_CURR').agg({**pa_agg, **cat_agg})
previous_application_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in previous_application_agg.columns.tolist()])
previous_application_agg = previous_application_agg.reset_index()
return previous_application_agg
previous_application = previous_application()
train = pd.merge(train, previous_application, how='left', on='SK_ID_CURR')
test = pd.merge(test, previous_application, how='left', on='SK_ID_CURR')
def POS_CASH_balance():
POS_CASH_balance = pd.read_csv('../input/POS_CASH_balance.csv')
POS_CASH_balance, cat_pc_b = one_hot_encoder(POS_CASH_balance)
pc_b_agg = {'MONTHS_BALANCE': ['median', 'min', 'max'], 'CNT_INSTALMENT': ['median', 'min', 'max'], 'CNT_INSTALMENT_FUTURE': ['median', 'min', 'max'], 'SK_DPD': ['median', 'min', 'max'], 'SK_DPD_DEF': ['median', 'min', 'max']}
cat_agg = {}
for cat in cat_pc_b:
cat_agg[cat] = ['mean']
POS_CASH_balance_agg = POS_CASH_balance.groupby(['SK_ID_CURR']).agg({**pc_b_agg, **cat_agg})
POS_CASH_balance_agg.columns = pd.Index([e[0] + '_' + e[1].upper() for e in POS_CASH_balance_agg.columns.tolist()])
POS_CASH_balance_agg = POS_CASH_balance_agg.reset_index()
return POS_CASH_balance_agg
POS_CASH_balance = POS_CASH_balance() | code |
128009699/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
file_path = '/kaggle/input/twitter-suicidal-data/twitter-suicidal_data.txt'
d = {'tweet': [], 'intention': []}
column_names = []
file = open(file_path)
for f in file:
content = f.split(',')
if content[0] == 'tweet':
column_names.append(content[0])
column_names.append(content[1][:-1])
elif content[1][:-1] != '':
d['tweet'].append(content[0])
d['intention'].append(int(content[1][:-1]))
db = pd.DataFrame(data=d, columns=column_names)
print('Cantidad de datos intencion de suicidio: ', len([i for i in list(db['intention'].values) if i == 1])) | code |
128009699/cell_4 | [
"text_plain_output_1.png"
] | from datasets import load_dataset
from datasets import load_dataset
dataset_hugging = load_dataset('dannyvas23/notas_suicidios') | code |
128009699/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
file_path = '/kaggle/input/twitter-suicidal-data/twitter-suicidal_data.txt'
d = {'tweet': [], 'intention': []}
column_names = []
file = open(file_path)
for f in file:
content = f.split(',')
if content[0] == 'tweet':
column_names.append(content[0])
column_names.append(content[1][:-1])
elif content[1][:-1] != '':
d['tweet'].append(content[0])
d['intention'].append(int(content[1][:-1]))
db = pd.DataFrame(data=d, columns=column_names)
print('Cantidad de datos sin intencion de suicidio: ', len([i for i in list(db['intention'].values) if i == 0])) | code |
2038426/cell_21 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainNum['GarageYrBlt'].fillna(trainNum['GarageYrBlt'].value_counts().idxmax(), inplace=True) | code |
2038426/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist()))) | code |
2038426/cell_34 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist())))
trainCat1 = trainCat.drop(['Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values)
trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values)
trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values)
trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values)
trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values)
trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values)
trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values)
trainCatTransformed = trainCat1.apply(le.fit_transform)
trainFinal = pd.concat([trainNum, trainCatTransformed], axis=1)
trainFinal.head() | code |
2038426/cell_39 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
np.array(list(zip(train.Id, testNum.columns[testNum.isnull().any()].tolist()))) | code |
2038426/cell_41 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
np.array(list(zip(train.Id, testNum.columns[testNum.isnull().any()].tolist())))
np.array(list(zip(train.Id, testCat.columns[testCat.isnull().any()].tolist()))) | code |
2038426/cell_11 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) | code |
2038426/cell_19 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
trainNum['LotFrontage'].fillna(trainNum['LotFrontage'].mean(), inplace=True)
trainNum['MasVnrArea'].fillna(trainNum['MasVnrArea'].mean(), inplace=True) | code |
2038426/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns))) | code |
2038426/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
trainNum['MSSubClass'] = le.fit_transform(trainNum['MSSubClass'].values)
trainNum['OverallQual'] = le.fit_transform(trainNum['OverallQual'].values)
trainNum['OverallCond'] = le.fit_transform(trainNum['OverallCond'].values)
trainNum['YearBuilt'] = le.fit_transform(trainNum['YearBuilt'].values)
trainNum['YearRemodAdd'] = le.fit_transform(trainNum['YearRemodAdd'].values)
trainNum['GarageYrBlt'] = le.fit_transform(trainNum['GarageYrBlt'].values)
trainNum['YrSold'] = le.fit_transform(trainNum['YrSold'].values) | code |
2038426/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
trainNum.columns[trainNum.isnull().any()].tolist() | code |
2038426/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist()))) | code |
2038426/cell_43 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
np.array(list(zip(train.Id, testNum.columns[testNum.isnull().any()].tolist())))
testNum['BsmtFinSF1'].fillna(testNum['BsmtFinSF1'].mean(), inplace=True)
testNum['BsmtFinSF2'].fillna(testNum['BsmtFinSF2'].mean(), inplace=True)
testNum['BsmtUnfSF'].fillna(testNum['BsmtUnfSF'].mean(), inplace=True)
testNum['TotalBsmtSF'].fillna(testNum['TotalBsmtSF'].mean(), inplace=True)
testNum['BsmtFullBath'].fillna(testNum['BsmtFullBath'].mean(), inplace=True)
testNum['BsmtHalfBath'].fillna(testNum['BsmtHalfBath'].mean(), inplace=True)
testNum['GarageCars'].fillna(testNum['GarageCars'].mean(), inplace=True)
testNum['GarageArea'].fillna(testNum['GarageArea'].mean(), inplace=True)
testNum['LotFrontage'].fillna(testNum['LotFrontage'].mean(), inplace=True)
testNum['MasVnrArea'].fillna(testNum['MasVnrArea'].mean(), inplace=True)
testNum['GarageYrBlt'].fillna(testNum['GarageYrBlt'].value_counts().idxmax(), inplace=True) | code |
2038426/cell_37 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
np.array(list(zip(train.Id, train.columns)))
np.array(list(zip(train.Id, train.columns[train.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist())))
trainNum = train.select_dtypes(include=[np.number])
trainCat = train.select_dtypes(include=[object])
testNum = test.select_dtypes(include=[np.number])
testCat = test.select_dtypes(include=[object])
np.array(list(zip(train.Id, trainCat.columns[trainCat.isnull().any()].tolist())))
np.array(list(zip(train.Id, test.columns[test.isnull().any()].tolist()))) | code |
2038426/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
72086968/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/nlp-getting-started/train.csv')
test_df = pd.read_csv('../input/nlp-getting-started/test.csv')
train_df.head(5) | code |
72086968/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/nlp-getting-started/train.csv')
test_df = pd.read_csv('../input/nlp-getting-started/test.csv')
sub_df = pd.read_csv('../input/nlp-getting-started/sample_submission.csv')
sub_df.head() | code |
72086968/cell_20 | [
"text_plain_output_1.png"
] | from sklearn import feature_extraction
from sklearn.linear_model import RidgeClassifier
from sklearn.model_selection import cross_val_score
import pandas as pd
train_df = pd.read_csv('../input/nlp-getting-started/train.csv')
test_df = pd.read_csv('../input/nlp-getting-started/test.csv')
count_vectorizer = feature_extraction.text.CountVectorizer()
example_train_vectors = count_vectorizer.fit_transform(train_df['text'][0:5])
train_vectors = count_vectorizer.fit_transform(train_df['text'])
test_vectors = count_vectorizer.transform(test_df['text'])
clf = RidgeClassifier()
scores = cross_val_score(clf, train_vectors, train_df['target'], cv=3, scoring='f1')
clf.fit(train_vectors, train_df['target']) | code |
72086968/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/nlp-getting-started/train.csv')
test_df = pd.read_csv('../input/nlp-getting-started/test.csv')
train_df[train_df['target'] == 1].text.values[1] | code |
72086968/cell_11 | [
"text_html_output_1.png"
] | from sklearn import feature_extraction
import pandas as pd
train_df = pd.read_csv('../input/nlp-getting-started/train.csv')
test_df = pd.read_csv('../input/nlp-getting-started/test.csv')
count_vectorizer = feature_extraction.text.CountVectorizer()
example_train_vectors = count_vectorizer.fit_transform(train_df['text'][0:5])
print(example_train_vectors.todense().shape)
print(example_train_vectors.todense()) | code |
72086968/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/nlp-getting-started/train.csv')
test_df = pd.read_csv('../input/nlp-getting-started/test.csv')
train_df[train_df['target'] == 0].text.values[1] | code |
72086968/cell_18 | [
"text_plain_output_1.png"
] | from sklearn import feature_extraction
from sklearn.linear_model import RidgeClassifier
from sklearn.model_selection import cross_val_score
import pandas as pd
train_df = pd.read_csv('../input/nlp-getting-started/train.csv')
test_df = pd.read_csv('../input/nlp-getting-started/test.csv')
count_vectorizer = feature_extraction.text.CountVectorizer()
example_train_vectors = count_vectorizer.fit_transform(train_df['text'][0:5])
train_vectors = count_vectorizer.fit_transform(train_df['text'])
test_vectors = count_vectorizer.transform(test_df['text'])
clf = RidgeClassifier()
scores = cross_val_score(clf, train_vectors, train_df['target'], cv=3, scoring='f1')
scores | code |
72086968/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import cross_val_score
help(cross_val_score) | code |
90155596/cell_5 | [
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
path = '../input/fzxdata2/imgs/longan2berry/TestB/3.jpg'
def img_read(path):
"""读取一张图片"""
img = cv2.imread(path)
img = img[..., ::-1]
img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_LINEAR)
return img
with tf.Session() as sess:
tf.global_variables_initializer().run()
output_graph_def = tf.GraphDef()
with open('../input/longan2blueberry/train_models/epoch_29/model.pb', 'rb') as f:
output_graph_def.ParseFromString(f.read())
tf.import_graph_def(output_graph_def, name='')
inputs = sess.graph.get_tensor_by_name('X_B:0')
label = sess.graph.get_tensor_by_name('gen_B2A/generator:0')
img = img_read(path).astype(np.float32) / 127.5 - 1
y = sess.run(label, feed_dict={inputs: np.expand_dims(img, axis=0)})
y = 0.5 * y[0] + 0.5
cv2.imwrite('res0.jpg', y[..., ::-1] * 255)
plt.axis('off')
plt.imshow(y) | code |
129026803/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape
train_df = train_df.drop(['id'], axis=1)
test_df = test_df.drop(['id'], axis=1)
columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
train_df = train_df.drop(columns=columns_to_drop)
test_df = test_df.drop(columns=columns_to_drop)
features = train_df.columns.tolist()
# create subplots
fig, axes = plt.subplots(4, 3, figsize=(12, 12))
axes = axes.ravel()
# loop over features and plot scatter plot
for ax, feature in zip(axes, features):
ax.scatter(train_df[feature], train_df['yield'])
ax.set_xlabel(feature)
ax.set_ylabel('yield')
# adjust subplot spacing
# plt.subplots_adjust(hspace=0.5)
# display plot
plt.show()
train_df.columns | code |
129026803/cell_9 | [
"application_vnd.jupyter.stderr_output_766.png",
"application_vnd.jupyter.stderr_output_116.png",
"application_vnd.jupyter.stderr_output_74.png",
"application_vnd.jupyter.stderr_output_268.png",
"application_vnd.jupyter.stderr_output_145.png",
"application_vnd.jupyter.stderr_output_362.png",
"applicatio... | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape
train_df = train_df.drop(['id'], axis=1)
test_df = test_df.drop(['id'], axis=1)
plt.figure(figsize=(12, 12))
sns.heatmap(train_df.corr(), cmap='coolwarm', annot=True, fmt='.2f') | code |
129026803/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape | code |
129026803/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape
train_df.head() | code |
129026803/cell_2 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns | code |
129026803/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape
train_df = train_df.drop(['id'], axis=1)
test_df = test_df.drop(['id'], axis=1)
columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
train_df = train_df.drop(columns=columns_to_drop)
test_df = test_df.drop(columns=columns_to_drop)
features = train_df.columns.tolist()
# create subplots
fig, axes = plt.subplots(4, 3, figsize=(12, 12))
axes = axes.ravel()
# loop over features and plot scatter plot
for ax, feature in zip(axes, features):
ax.scatter(train_df[feature], train_df['yield'])
ax.set_xlabel(feature)
ax.set_ylabel('yield')
# adjust subplot spacing
# plt.subplots_adjust(hspace=0.5)
# display plot
plt.show()
train_df.columns
train_df.describe().T
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
train_data, test_data = train_test_split(train_df, test_size=0.2, random_state=42)
X_train = train_data.drop(columns=['yield'])
y_train = train_data['yield']
X_test = test_data.drop(columns=['yield'])
y_test = test_data['yield']
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
predictions_on_test_df = model.predict(test_df)
predictions_on_test_df | code |
129026803/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129026803/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape
train_df = train_df.drop(['id'], axis=1)
test_df = test_df.drop(['id'], axis=1)
train_df.hist(figsize=(14, 14), xrot=45)
plt.show() | code |
129026803/cell_17 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape
train_df = train_df.drop(['id'], axis=1)
test_df = test_df.drop(['id'], axis=1)
columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
train_df = train_df.drop(columns=columns_to_drop)
test_df = test_df.drop(columns=columns_to_drop)
features = train_df.columns.tolist()
# create subplots
fig, axes = plt.subplots(4, 3, figsize=(12, 12))
axes = axes.ravel()
# loop over features and plot scatter plot
for ax, feature in zip(axes, features):
ax.scatter(train_df[feature], train_df['yield'])
ax.set_xlabel(feature)
ax.set_ylabel('yield')
# adjust subplot spacing
# plt.subplots_adjust(hspace=0.5)
# display plot
plt.show()
train_df.columns
train_df.describe().T
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
train_data, test_data = train_test_split(train_df, test_size=0.2, random_state=42)
X_train = train_data.drop(columns=['yield'])
y_train = train_data['yield']
X_test = test_data.drop(columns=['yield'])
y_test = test_data['yield']
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
print('MAE:', mae) | code |
129026803/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape
train_df = train_df.drop(['id'], axis=1)
test_df = test_df.drop(['id'], axis=1)
columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
train_df = train_df.drop(columns=columns_to_drop)
test_df = test_df.drop(columns=columns_to_drop)
features = train_df.columns.tolist()
# create subplots
fig, axes = plt.subplots(4, 3, figsize=(12, 12))
axes = axes.ravel()
# loop over features and plot scatter plot
for ax, feature in zip(axes, features):
ax.scatter(train_df[feature], train_df['yield'])
ax.set_xlabel(feature)
ax.set_ylabel('yield')
# adjust subplot spacing
# plt.subplots_adjust(hspace=0.5)
# display plot
plt.show()
train_df.columns
train_df.describe().T | code |
129026803/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split, GridSearchCV
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape
train_df = train_df.drop(['id'], axis=1)
test_df = test_df.drop(['id'], axis=1)
columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
train_df = train_df.drop(columns=columns_to_drop)
test_df = test_df.drop(columns=columns_to_drop)
features = train_df.columns.tolist()
# create subplots
fig, axes = plt.subplots(4, 3, figsize=(12, 12))
axes = axes.ravel()
# loop over features and plot scatter plot
for ax, feature in zip(axes, features):
ax.scatter(train_df[feature], train_df['yield'])
ax.set_xlabel(feature)
ax.set_ylabel('yield')
# adjust subplot spacing
# plt.subplots_adjust(hspace=0.5)
# display plot
plt.show()
train_df.columns
train_df.describe().T
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
train_data, test_data = train_test_split(train_df, test_size=0.2, random_state=42)
X_train = train_data.drop(columns=['yield'])
y_train = train_data['yield']
X_test = test_data.drop(columns=['yield'])
y_test = test_data['yield']
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_absolute_error
train_data, test_data = train_test_split(train_df, test_size=0.2, random_state=42)
X_train = train_data.drop(columns=['yield'])
y_train = train_data['yield']
X_test = test_data.drop(columns=['yield'])
y_test = test_data['yield']
rf_model = RandomForestRegressor()
gb_model = GradientBoostingRegressor()
rf_param_grid = {'n_estimators': [5, 20, 50, 100], 'max_features': ['auto', 'sqrt'], 'max_depth': [int(x) for x in np.linspace(10, 120, num=12)], 'min_samples_split': [2, 6, 10], 'min_samples_leaf': [1, 3, 4], 'bootstrap': [True, False]}
gb_param_grid = {'learning_rate': [0.01, 0.1], 'n_estimators': [100, 500, 1000], 'max_depth': [3, 5, 7], 'min_samples_split': [2, 5, 10], 'min_samples_leaf': [1, 2, 4], 'subsample': [0.5, 0.8, 1.0], 'max_features': ['sqrt', 'log2', None]}
rf_gs = GridSearchCV(rf_model, rf_param_grid, cv=5, n_jobs=-1)
gb_gs = GridSearchCV(gb_model, gb_param_grid, cv=5, n_jobs=-1)
rf_gs.fit(X_train, y_train)
gb_gs.fit(X_train, y_train)
rf_pred = rf_gs.predict(X_test)
gb_pred = gb_gs.predict(X_test)
rf_mae = mean_absolute_error(y_test, rf_pred)
gb_mae = mean_absolute_error(y_test, gb_pred)
print('Random Forest MAE: ', rf_mae)
print('Gradient Boosting MAE: ', gb_mae) | code |
129026803/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test_df = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
sample_submission_data = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv')
train_df.shape
train_df = train_df.drop(['id'], axis=1)
test_df = test_df.drop(['id'], axis=1)
columns_to_drop = ['MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']
train_df = train_df.drop(columns=columns_to_drop)
test_df = test_df.drop(columns=columns_to_drop)
features = train_df.columns.tolist()
fig, axes = plt.subplots(4, 3, figsize=(12, 12))
axes = axes.ravel()
for ax, feature in zip(axes, features):
ax.scatter(train_df[feature], train_df['yield'])
ax.set_xlabel(feature)
ax.set_ylabel('yield')
plt.show() | code |
104117646/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
a = np.array([2, 3, 4, 5])
a
a | code |
104117646/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
a = np.array([2, 3, 4, 5])
a
b = np.array([1, 2, 3, 4.5])
b
c = np.arange(10)
c
d = np.arange(10, 20)
d
e = np.arange(10, 20, 2)
e | code |
104117646/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
a = np.array([2, 3, 4, 5])
a
b = np.array([1, 2, 3, 4.5])
b
c = np.arange(10)
c
d = np.arange(10, 20)
d
e = np.arange(10, 20, 2)
e
f = np.linspace(1, 10, 10)
f | code |
104117646/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
a = np.array([2, 3, 4, 5])
a
b = np.array([1, 2, 3, 4.5])
b
c = np.arange(10)
c | code |
104117646/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
a = np.array([2, 3, 4, 5])
a
b = np.array([1, 2, 3, 4.5])
b
c = np.arange(10)
c
d = np.arange(10, 20)
d | code |
104117646/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
a = np.array([2, 3, 4, 5])
a
a.ndim
a.shape | code |
104117646/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
a = np.array([2, 3, 4, 5])
a | code |
104117646/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
a = np.array([2, 3, 4, 5])
a
a.ndim | code |
104117646/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
a = np.array([2, 3, 4, 5])
a
b = np.array([1, 2, 3, 4.5])
b | code |
17115081/cell_21 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test.info() | code |
17115081/cell_34 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train_df = train.drop(['Cabin', 'Ticket'], axis=1)
test_df = test.drop(['Cabin', 'Ticket'], axis=1)
train_df['Age'] = train_df['Age'].fillna(train['Age'].median())
test_df['Fare'] = test_df['Fare'].fillna(train['Fare'].median())
test_df['Age'] = test_df['Age'].fillna(train['Age'].median())
train_df['Embarked'] = train['Embarked'].fillna('S')
test_df.describe(include='all') | code |
17115081/cell_23 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum() | code |
17115081/cell_30 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train_df = train.drop(['Cabin', 'Ticket'], axis=1)
test_df = test.drop(['Cabin', 'Ticket'], axis=1)
train_df['Age'] = train_df['Age'].fillna(train['Age'].median())
test_df['Fare'] = test_df['Fare'].fillna(train['Fare'].median())
test_df['Age'] = test_df['Age'].fillna(train['Age'].median())
train_df['Embarked'] = train['Embarked'].fillna('S')
print('Train Data:')
print(train_df.isnull().sum())
print('#' * 30)
print('\nTest Data:')
print(test_df.isnull().sum()) | code |
17115081/cell_33 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train_df = train.drop(['Cabin', 'Ticket'], axis=1)
test_df = test.drop(['Cabin', 'Ticket'], axis=1)
train_df['Age'] = train_df['Age'].fillna(train['Age'].median())
test_df['Fare'] = test_df['Fare'].fillna(train['Fare'].median())
test_df['Age'] = test_df['Age'].fillna(train['Age'].median())
train_df['Embarked'] = train['Embarked'].fillna('S')
train_df.describe(include='all') | code |
17115081/cell_20 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.info() | code |
17115081/cell_29 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train_df = train.drop(['Cabin', 'Ticket'], axis=1)
test_df = test.drop(['Cabin', 'Ticket'], axis=1)
print(train_df.shape)
print(test_df.shape) | code |
17115081/cell_26 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
train.describe(include='all') | code |
17115081/cell_18 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head(10) | code |
17115081/cell_24 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test.isnull().sum() | code |
17115081/cell_27 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv
import os
os.listdir('../input')
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test.isnull().sum()
test.describe(include='all') | code |
18124779/cell_9 | [
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x="date", y="time", rot=0)
time_graph.set_xlabel("Date")
time_graph.set_ylabel("Time")
km_graph = df.plot.bar(x="date", y="km", rot=0)
km_graph.set_xlabel("Date")
km_graph.set_ylabel("Km")
rider_performance_graph = df.plot.bar(x="date", y="rider_performance", rot=0)
rider_performance_graph.set_xlabel("Date")
rider_performance_graph.set_ylabel("Rider perforamce")
horse_performance_graph = df.plot.bar(x="date", y="horse_performance", rot=0)
horse_performance_graph.set_xlabel("Date")
horse_performance_graph.set_ylabel("Horse perforamce")
avg_performance_graph = df.plot.bar(x="date", y="avg_performance", rot=0)
avg_performance_graph.set_xlabel("Date")
avg_performance_graph.set_ylabel("Average perforamce")
performance_df = pd.DataFrame({'Rider performance': df['rider_performance'], 'Horse performance': df['horse_performance']})
perfrormance_graph_comparison1 = performance_df.plot.bar(rot=0) | code |
18124779/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance': [3.5, 4.0, 4.5, 4.5]}
df = DataFrame(performance, columns=['Id', 'date', 'time', 'km', 'rider_performance', 'horse_performance', 'avg_performance'])
df
time_graph = df.plot.bar(x='date', y='time', rot=0)
time_graph.set_xlabel('Date')
time_graph.set_ylabel('Time') | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.