path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
|---|---|---|---|
72100024/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train-dataset/train.csv')
train.shape
train.drop(columns=['Id'], inplace=True)
missing_cols = train.isna().sum()
missing_cols = missing_cols[missing_cols != 0]
missing_cols.sort_values(ascending=False)
train.drop(columns=['PoolQC', 'MiscFeature', 'Alley', 'Fence'], inplace=True)
missing_cols = train.isna().sum()
missing_cols = missing_cols[missing_cols != 0]
print('No. of columns with missing values:', len(missing_cols))
missing_cols.sort_values(ascending=False)
|
code
|
72100024/cell_12
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train-dataset/train.csv')
train.shape
train.drop(columns=['Id'], inplace=True)
missing_cols = train.isna().sum()
missing_cols = missing_cols[missing_cols != 0]
missing_cols.sort_values(ascending=False)
train.drop(columns=['PoolQC', 'MiscFeature', 'Alley', 'Fence'], inplace=True)
missing_cols = train.isna().sum()
missing_cols = missing_cols[missing_cols != 0]
missing_cols.sort_values(ascending=False)
missing_cols = train.isna().sum()
missing_cols = missing_cols[missing_cols != 0]
newList = list(missing_cols.index)
newList.append('SalePrice')
train[newList].corr()
|
code
|
32062611/cell_9
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from matplotlib.ticker import MaxNLocator
from sklearn.mixture import GaussianMixture
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/Iris.csv')
data
features = ['PetalLengthCm', 'PetalWidthCm']
color_dict = {'Iris-setosa': 'darkred', 'Iris-versicolor': 'Yellow', 'Iris-virginica': 'Green'}
names_dict = {'Iris-setosa': 'Setosa', 'Iris-versicolor': 'Versicolor', 'Iris-virginica': 'Virginica'}
def get_outliers(log_prob, epsilon):
outliners = np.where(log_prob <= epsilon, 1, 0)
return outliners
def make_density_plot(data, features, model, outliers):
x = np.linspace(data[features[0]].min(), data[features[0]].max())
y = np.linspace(data[features[1]].min(), data[features[1]].max())
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = model.score_samples(XX)
Z = Z.reshape(X.shape)
levels = MaxNLocator(nbins=100).tick_values(Z.min(), Z.max())
cmap = plt.get_cmap('BuGn')
plt.figure(figsize=(10,10))
plt.contourf(X, Y, Z.reshape(X.shape), cmap=cmap, levels=levels)
plt.scatter(model.means_[:,0], model.means_[:,1], color="Blue")
g1 = plt.scatter(data[data.Outlier==0][features[0]].values,
data[data.Outlier==0][features[1]].values, label="Normal",s=4.0,c="Pink")
g2 = plt.scatter(data[data.Outlier==1][features[0]].values,
data[data.Outlier==1][features[1]].values, label="Abnormal",s=4.5,c="Black")
plt.legend(handles=[g1,g2])
plt.xlabel(features[0])
plt.ylabel(features[1])
return plt
X_train = data[features].values
model = GaussianMixture(n_components=3, covariance_type='full')
model.fit(X_train)
log_prob = model.score_samples(X_train)
outliers = get_outliers(log_prob, 0.15)
data['Outlier'] = outliers
plt.figure(figsize=(20,5))
sns.distplot(log_prob, kde=False, bins=50, color="Green")
g1 = plt.axvline(np.quantile(log_prob, 0.25), color="Red", label="Q_25")
g2 = plt.axvline(np.quantile(log_prob, 0.5), color="Blue", label="Q_50 - Median")
g3 = plt.axvline(np.quantile(log_prob, 0.75), color="Violet", label="Q_75")
g4 = plt.axvline(np.quantile(log_prob, 0.05), color="Brown", label="Q_5")
handles = [g1, g2, g3, g4]
plt.xlabel("log-probabilities of the data spots")
plt.ylabel("frequency")
plt.legend(handles)
epsilon = np.quantile(log_prob, 0.05)
print('epsilon: %f' % epsilon)
|
code
|
32062611/cell_4
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/Iris.csv')
data
features = ['PetalLengthCm', 'PetalWidthCm']
color_dict = {'Iris-setosa': 'darkred', 'Iris-versicolor': 'Yellow', 'Iris-virginica': 'Green'}
names_dict = {'Iris-setosa': 'Setosa', 'Iris-versicolor': 'Versicolor', 'Iris-virginica': 'Virginica'}
for species in color_dict.keys():
plt.scatter(data[data.Species == species][features[0]], data[data.Species == species][features[1]], c=color_dict[species], label=names_dict[species])
plt.xlabel(features[0])
plt.ylabel(features[1])
plt.title('Scatterplot')
plt.legend()
|
code
|
32062611/cell_2
|
[
"text_plain_output_1.png"
] |
import pandas as pd
data = pd.read_csv('../input/Iris.csv')
data
|
code
|
32062611/cell_11
|
[
"text_html_output_1.png"
] |
from matplotlib.ticker import MaxNLocator
from sklearn.mixture import GaussianMixture
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/Iris.csv')
data
features = ['PetalLengthCm', 'PetalWidthCm']
color_dict = {'Iris-setosa': 'darkred', 'Iris-versicolor': 'Yellow', 'Iris-virginica': 'Green'}
names_dict = {'Iris-setosa': 'Setosa', 'Iris-versicolor': 'Versicolor', 'Iris-virginica': 'Virginica'}
def get_outliers(log_prob, epsilon):
outliners = np.where(log_prob <= epsilon, 1, 0)
return outliners
def make_density_plot(data, features, model, outliers):
x = np.linspace(data[features[0]].min(), data[features[0]].max())
y = np.linspace(data[features[1]].min(), data[features[1]].max())
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = model.score_samples(XX)
Z = Z.reshape(X.shape)
levels = MaxNLocator(nbins=100).tick_values(Z.min(), Z.max())
cmap = plt.get_cmap('BuGn')
plt.figure(figsize=(10,10))
plt.contourf(X, Y, Z.reshape(X.shape), cmap=cmap, levels=levels)
plt.scatter(model.means_[:,0], model.means_[:,1], color="Blue")
g1 = plt.scatter(data[data.Outlier==0][features[0]].values,
data[data.Outlier==0][features[1]].values, label="Normal",s=4.0,c="Pink")
g2 = plt.scatter(data[data.Outlier==1][features[0]].values,
data[data.Outlier==1][features[1]].values, label="Abnormal",s=4.5,c="Black")
plt.legend(handles=[g1,g2])
plt.xlabel(features[0])
plt.ylabel(features[1])
return plt
X_train = data[features].values
model = GaussianMixture(n_components=3, covariance_type='full')
model.fit(X_train)
log_prob = model.score_samples(X_train)
outliers = get_outliers(log_prob, 0.15)
data['Outlier'] = outliers
plt.figure(figsize=(20,5))
sns.distplot(log_prob, kde=False, bins=50, color="Green")
g1 = plt.axvline(np.quantile(log_prob, 0.25), color="Red", label="Q_25")
g2 = plt.axvline(np.quantile(log_prob, 0.5), color="Blue", label="Q_50 - Median")
g3 = plt.axvline(np.quantile(log_prob, 0.75), color="Violet", label="Q_75")
g4 = plt.axvline(np.quantile(log_prob, 0.05), color="Brown", label="Q_5")
handles = [g1, g2, g3, g4]
plt.xlabel("log-probabilities of the data spots")
plt.ylabel("frequency")
plt.legend(handles)
epsilon = np.quantile(log_prob, 0.05)
outliers = get_outliers(log_prob, epsilon)
data['Outlier'] = outliers
make_density_plot(data, features, model, outliers)
|
code
|
32062611/cell_8
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from matplotlib.ticker import MaxNLocator
from sklearn.mixture import GaussianMixture
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/Iris.csv')
data
features = ['PetalLengthCm', 'PetalWidthCm']
color_dict = {'Iris-setosa': 'darkred', 'Iris-versicolor': 'Yellow', 'Iris-virginica': 'Green'}
names_dict = {'Iris-setosa': 'Setosa', 'Iris-versicolor': 'Versicolor', 'Iris-virginica': 'Virginica'}
def get_outliers(log_prob, epsilon):
outliners = np.where(log_prob <= epsilon, 1, 0)
return outliners
def make_density_plot(data, features, model, outliers):
x = np.linspace(data[features[0]].min(), data[features[0]].max())
y = np.linspace(data[features[1]].min(), data[features[1]].max())
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = model.score_samples(XX)
Z = Z.reshape(X.shape)
levels = MaxNLocator(nbins=100).tick_values(Z.min(), Z.max())
cmap = plt.get_cmap('BuGn')
plt.figure(figsize=(10,10))
plt.contourf(X, Y, Z.reshape(X.shape), cmap=cmap, levels=levels)
plt.scatter(model.means_[:,0], model.means_[:,1], color="Blue")
g1 = plt.scatter(data[data.Outlier==0][features[0]].values,
data[data.Outlier==0][features[1]].values, label="Normal",s=4.0,c="Pink")
g2 = plt.scatter(data[data.Outlier==1][features[0]].values,
data[data.Outlier==1][features[1]].values, label="Abnormal",s=4.5,c="Black")
plt.legend(handles=[g1,g2])
plt.xlabel(features[0])
plt.ylabel(features[1])
return plt
X_train = data[features].values
model = GaussianMixture(n_components=3, covariance_type='full')
model.fit(X_train)
log_prob = model.score_samples(X_train)
outliers = get_outliers(log_prob, 0.15)
data['Outlier'] = outliers
plt.figure(figsize=(20, 5))
sns.distplot(log_prob, kde=False, bins=50, color='Green')
g1 = plt.axvline(np.quantile(log_prob, 0.25), color='Red', label='Q_25')
g2 = plt.axvline(np.quantile(log_prob, 0.5), color='Blue', label='Q_50 - Median')
g3 = plt.axvline(np.quantile(log_prob, 0.75), color='Violet', label='Q_75')
g4 = plt.axvline(np.quantile(log_prob, 0.05), color='Brown', label='Q_5')
handles = [g1, g2, g3, g4]
plt.xlabel('log-probabilities of the data spots')
plt.ylabel('frequency')
plt.legend(handles)
|
code
|
32062611/cell_17
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from matplotlib.ticker import MaxNLocator
from sklearn.mixture import GaussianMixture
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
data = pd.read_csv('../input/Iris.csv')
data
features = ['PetalLengthCm', 'PetalWidthCm']
color_dict = {'Iris-setosa': 'darkred', 'Iris-versicolor': 'Yellow', 'Iris-virginica': 'Green'}
names_dict = {'Iris-setosa': 'Setosa', 'Iris-versicolor': 'Versicolor', 'Iris-virginica': 'Virginica'}
def get_outliers(log_prob, epsilon):
outliners = np.where(log_prob <= epsilon, 1, 0)
return outliners
def make_density_plot(data, features, model, outliers):
x = np.linspace(data[features[0]].min(), data[features[0]].max())
y = np.linspace(data[features[1]].min(), data[features[1]].max())
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = model.score_samples(XX)
Z = Z.reshape(X.shape)
levels = MaxNLocator(nbins=100).tick_values(Z.min(), Z.max())
cmap = plt.get_cmap('BuGn')
plt.figure(figsize=(10,10))
plt.contourf(X, Y, Z.reshape(X.shape), cmap=cmap, levels=levels)
plt.scatter(model.means_[:,0], model.means_[:,1], color="Blue")
g1 = plt.scatter(data[data.Outlier==0][features[0]].values,
data[data.Outlier==0][features[1]].values, label="Normal",s=4.0,c="Pink")
g2 = plt.scatter(data[data.Outlier==1][features[0]].values,
data[data.Outlier==1][features[1]].values, label="Abnormal",s=4.5,c="Black")
plt.legend(handles=[g1,g2])
plt.xlabel(features[0])
plt.ylabel(features[1])
return plt
X_train = data[features].values
model = GaussianMixture(n_components=3, covariance_type='full')
model.fit(X_train)
log_prob = model.score_samples(X_train)
outliers = get_outliers(log_prob, 0.15)
data['Outlier'] = outliers
plt.figure(figsize=(20,5))
sns.distplot(log_prob, kde=False, bins=50, color="Green")
g1 = plt.axvline(np.quantile(log_prob, 0.25), color="Red", label="Q_25")
g2 = plt.axvline(np.quantile(log_prob, 0.5), color="Blue", label="Q_50 - Median")
g3 = plt.axvline(np.quantile(log_prob, 0.75), color="Violet", label="Q_75")
g4 = plt.axvline(np.quantile(log_prob, 0.05), color="Brown", label="Q_5")
handles = [g1, g2, g3, g4]
plt.xlabel("log-probabilities of the data spots")
plt.ylabel("frequency")
plt.legend(handles)
epsilon = np.quantile(log_prob, 0.05)
data.Outlier.value_counts()
species_probas = np.round(model.predict_proba(X_train), 2)
best_species_idx = np.argmax(species_probas, axis=1)
data['Predicted'] = best_species_idx
def map_to_species(data, features):
grouped = data.groupby('Species')[features].mean()
feature_means = grouped.values
names_map = {}
for m in data.Predicted.unique():
predicted_mean = data[data.Predicted == m][features].mean().values
distances = np.zeros(feature_means.shape[0])
for f in range(feature_means.shape[0]):
distances[f] = np.linalg.norm(predicted_mean - feature_means[f])
name = grouped.index.values[np.argmin(distances)]
names_map[m] = name
return names_map
ig, ax = plt.subplots(1, 2, figsize=(20, 5))
for species in color_dict.keys():
ax[0].scatter(data[data.Species == species][features[0]], data[data.Species == species][features[1]], c=color_dict[species], label=names_dict[species])
ax[1].scatter(data[data.Predicted_Species == species][features[0]], data[data.Predicted_Species == species][features[1]], c=color_dict[species], label=names_dict[species])
ax[0].set_xlabel(features[0])
ax[0].set_ylabel(features[1])
ax[0].set_title('True species')
ax[0].legend()
ax[1].set_xlabel(features[0])
ax[1].set_ylabel(features[1])
ax[1].set_title('Predicted species')
ax[1].legend()
|
code
|
32062611/cell_12
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from matplotlib.ticker import MaxNLocator
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('../input/Iris.csv')
data
features = ['PetalLengthCm', 'PetalWidthCm']
color_dict = {'Iris-setosa': 'darkred', 'Iris-versicolor': 'Yellow', 'Iris-virginica': 'Green'}
names_dict = {'Iris-setosa': 'Setosa', 'Iris-versicolor': 'Versicolor', 'Iris-virginica': 'Virginica'}
def get_outliers(log_prob, epsilon):
outliners = np.where(log_prob <= epsilon, 1, 0)
return outliners
def make_density_plot(data, features, model, outliers):
x = np.linspace(data[features[0]].min(), data[features[0]].max())
y = np.linspace(data[features[1]].min(), data[features[1]].max())
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = model.score_samples(XX)
Z = Z.reshape(X.shape)
levels = MaxNLocator(nbins=100).tick_values(Z.min(), Z.max())
cmap = plt.get_cmap('BuGn')
plt.figure(figsize=(10,10))
plt.contourf(X, Y, Z.reshape(X.shape), cmap=cmap, levels=levels)
plt.scatter(model.means_[:,0], model.means_[:,1], color="Blue")
g1 = plt.scatter(data[data.Outlier==0][features[0]].values,
data[data.Outlier==0][features[1]].values, label="Normal",s=4.0,c="Pink")
g2 = plt.scatter(data[data.Outlier==1][features[0]].values,
data[data.Outlier==1][features[1]].values, label="Abnormal",s=4.5,c="Black")
plt.legend(handles=[g1,g2])
plt.xlabel(features[0])
plt.ylabel(features[1])
return plt
data.head()
data.Outlier.value_counts()
|
code
|
1007568/cell_13
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
full = pd.concat([train, test], join='outer')
full['family'] = full.Parch + full.SibSp
import numpy as np
full['singlton'] = np.where(full.family == 1, 1, 0)
full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0)
full['large'] = np.where(full.family > 4, 1, 0)
full['Fare'].fillna(full.Fare.mean(), inplace=True)
full.info()
|
code
|
1007568/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
full = pd.concat([train, test], join='outer')
full.info()
|
code
|
1007568/cell_23
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
full = pd.concat([train, test], join='outer')
full['family'] = full.Parch + full.SibSp
import numpy as np
full['singlton'] = np.where(full.family == 1, 1, 0)
full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0)
full['large'] = np.where(full.family > 4, 1, 0)
full['Fare'].fillna(full.Fare.mean(), inplace=True)
full.Age.isnull().sum()
rand = np.random.randint(full.Age.mean() - full.Age.std(), full.Age.mean() + full.Age.std(), full.Age.isnull().sum())
full.loc[full.Age.isnull(), 'Age'] = rand
full.drop(['Cabin', 'Name', 'Ticket', 'Sex'], axis=1, inplace=True)
full.head()
|
code
|
1007568/cell_11
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
full = pd.concat([train, test], join='outer')
full['family'] = full.Parch + full.SibSp
import numpy as np
full['singlton'] = np.where(full.family == 1, 1, 0)
full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0)
full['large'] = np.where(full.family > 4, 1, 0)
full.info()
|
code
|
1007568/cell_7
|
[
"text_html_output_1.png"
] |
import pandas as pd
import seaborn as sns
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
full = pd.concat([train, test], join='outer')
full['family'] = full.Parch + full.SibSp
sns.countplot(x='family', hue='Survived', data=full)
|
code
|
1007568/cell_18
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import seaborn as sns
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
full = pd.concat([train, test], join='outer')
full['family'] = full.Parch + full.SibSp
import numpy as np
full['singlton'] = np.where(full.family == 1, 1, 0)
full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0)
full['large'] = np.where(full.family > 4, 1, 0)
full['Fare'].fillna(full.Fare.mean(), inplace=True)
full.Age.isnull().sum()
rand = np.random.randint(full.Age.mean() - full.Age.std(), full.Age.mean() + full.Age.std(), full.Age.isnull().sum())
full.loc[full.Age.isnull(), 'Age'] = rand
sns.factorplot(x='Age', hue='Survived', row='Sex', data=full, kind='count', ci=None, aspect=5)
|
code
|
1007568/cell_3
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
full = pd.concat([train, test], join='outer')
full.head()
|
code
|
1007568/cell_17
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
full = pd.concat([train, test], join='outer')
full['family'] = full.Parch + full.SibSp
import numpy as np
full['singlton'] = np.where(full.family == 1, 1, 0)
full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0)
full['large'] = np.where(full.family > 4, 1, 0)
full['Fare'].fillna(full.Fare.mean(), inplace=True)
full.Age.isnull().sum()
rand = np.random.randint(full.Age.mean() - full.Age.std(), full.Age.mean() + full.Age.std(), full.Age.isnull().sum())
full.loc[full.Age.isnull(), 'Age'] = rand
full.info()
|
code
|
1007568/cell_14
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
full = pd.concat([train, test], join='outer')
full['family'] = full.Parch + full.SibSp
import numpy as np
full['singlton'] = np.where(full.family == 1, 1, 0)
full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0)
full['large'] = np.where(full.family > 4, 1, 0)
full['Fare'].fillna(full.Fare.mean(), inplace=True)
full.Age.isnull().sum()
|
code
|
88082992/cell_11
|
[
"text_plain_output_1.png"
] |
from ast import literal_eval
from pathlib import Path
from tqdm.notebook import tqdm
import joblib
import json
import numpy as np
import pandas as pd
import pytorch_lightning as pl
SEED = 42
ROOT_DIR = '../input'
MEL_PATHS = sorted(Path(ROOT_DIR).glob('birdclef-2022-melspectrogram-compute/rich_train_metadata.csv'))
TRAIN_LABEL_PATHS = sorted(Path(ROOT_DIR).glob('birdclef-2022-melspectrogram-compute/LABEL_IDS.json'))
N_CLASSES = 21
SR = 32000
DURATION = 7
MAX_READ_SAMPLES = 5
USE_FOLD = 0
TRAIN_BATCH_SIZE = 32
TRAIN_NUM_WORKERS = 2
VAL_BATCH_SIZE = 32
VAL_NUM_WORKERS = 2
EPOCHS = 15
with open(Path(ROOT_DIR) / 'birdclef-2022/scored_birds.json') as json_file:
scored_birds = json.load(json_file)
def get_df(mel_paths=MEL_PATHS, train_label_paths=TRAIN_LABEL_PATHS):
df = None
LABEL_IDS = {}
for file_path in mel_paths:
temp = pd.read_csv(str(file_path), index_col=0)
temp['impath'] = temp.apply(lambda row: file_path.parent / 'audio_images/{}.npy'.format(row.filename), axis=1)
df = temp if df is None else df.append(temp)
df['secondary_labels'] = df['secondary_labels'].apply(literal_eval)
for file_path in train_label_paths:
with open(str(file_path)) as f:
LABEL_IDS.update(json.load(f))
return (LABEL_IDS, df)
def load_data(df):
def load_row(row):
return (row.filename, np.load(str(row.impath))[:MAX_READ_SAMPLES])
pool = joblib.Parallel(4)
mapper = joblib.delayed(load_row)
tasks = [mapper(row) for row in df.itertuples(False)]
res = pool(tqdm(tasks))
res = dict(res)
return res
pl.seed_everything(SEED)
LABEL_IDS, df = get_df()
audio_image_store = load_data(df)
len(audio_image_store)
|
code
|
88082992/cell_15
|
[
"text_plain_output_1.png"
] |
from ast import literal_eval
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
from tqdm.notebook import tqdm
import joblib
import json
import numpy as np
import pandas as pd
import pytorch_lightning as pl
SEED = 42
ROOT_DIR = '../input'
MEL_PATHS = sorted(Path(ROOT_DIR).glob('birdclef-2022-melspectrogram-compute/rich_train_metadata.csv'))
TRAIN_LABEL_PATHS = sorted(Path(ROOT_DIR).glob('birdclef-2022-melspectrogram-compute/LABEL_IDS.json'))
N_CLASSES = 21
SR = 32000
DURATION = 7
MAX_READ_SAMPLES = 5
USE_FOLD = 0
TRAIN_BATCH_SIZE = 32
TRAIN_NUM_WORKERS = 2
VAL_BATCH_SIZE = 32
VAL_NUM_WORKERS = 2
EPOCHS = 15
with open(Path(ROOT_DIR) / 'birdclef-2022/scored_birds.json') as json_file:
scored_birds = json.load(json_file)
def get_df(mel_paths=MEL_PATHS, train_label_paths=TRAIN_LABEL_PATHS):
df = None
LABEL_IDS = {}
for file_path in mel_paths:
temp = pd.read_csv(str(file_path), index_col=0)
temp['impath'] = temp.apply(lambda row: file_path.parent / 'audio_images/{}.npy'.format(row.filename), axis=1)
df = temp if df is None else df.append(temp)
df['secondary_labels'] = df['secondary_labels'].apply(literal_eval)
for file_path in train_label_paths:
with open(str(file_path)) as f:
LABEL_IDS.update(json.load(f))
return (LABEL_IDS, df)
def load_data(df):
def load_row(row):
return (row.filename, np.load(str(row.impath))[:MAX_READ_SAMPLES])
pool = joblib.Parallel(4)
mapper = joblib.delayed(load_row)
tasks = [mapper(row) for row in df.itertuples(False)]
res = pool(tqdm(tasks))
res = dict(res)
return res
pl.seed_everything(SEED)
LABEL_IDS, df = get_df()
audio_image_store = load_data(df)
len(audio_image_store)
class BirdClefDataset(Dataset):
def __init__(self, audio_image_store, meta, sr=SR, is_train=True, num_classes=N_CLASSES, duration=DURATION):
self.audio_image_store = audio_image_store
self.meta = meta.copy().reset_index(drop=True)
self.sr = sr
self.is_train = is_train
self.num_classes = num_classes
self.duration = duration
self.audio_length = self.duration * self.sr
@staticmethod
def normalize(image):
image = image.astype('float32', copy=False) / 255.0
image = np.stack([image, image, image])
return image
def __len__(self):
return len(self.meta)
def __getitem__(self, idx):
row = self.meta.iloc[idx]
image = self.audio_image_store[row.filename]
image = image[np.random.choice(len(image))]
image = self.normalize(image)
t = row.label_id
return (image, t)
fold_bar = tqdm(df.reset_index().groupby('fold').index.apply(list).items(), total=df.fold.max() + 1)
for fold, val_set in fold_bar:
if fold != USE_FOLD:
continue
print(f'\n############################### [FOLD {fold}]')
fold_bar.set_description(f'[FOLD {fold}]')
train_set = np.setdiff1d(df.index, val_set)
|
code
|
88082992/cell_17
|
[
"text_plain_output_1.png"
] |
from ast import literal_eval
from pathlib import Path
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from torchmetrics import F1
from tqdm.notebook import tqdm
import joblib
import json
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import resnest.torch as resnest_torch
import torch
SEED = 42
ROOT_DIR = '../input'
MEL_PATHS = sorted(Path(ROOT_DIR).glob('birdclef-2022-melspectrogram-compute/rich_train_metadata.csv'))
TRAIN_LABEL_PATHS = sorted(Path(ROOT_DIR).glob('birdclef-2022-melspectrogram-compute/LABEL_IDS.json'))
N_CLASSES = 21
SR = 32000
DURATION = 7
MAX_READ_SAMPLES = 5
USE_FOLD = 0
TRAIN_BATCH_SIZE = 32
TRAIN_NUM_WORKERS = 2
VAL_BATCH_SIZE = 32
VAL_NUM_WORKERS = 2
EPOCHS = 15
with open(Path(ROOT_DIR) / 'birdclef-2022/scored_birds.json') as json_file:
scored_birds = json.load(json_file)
def get_df(mel_paths=MEL_PATHS, train_label_paths=TRAIN_LABEL_PATHS):
df = None
LABEL_IDS = {}
for file_path in mel_paths:
temp = pd.read_csv(str(file_path), index_col=0)
temp['impath'] = temp.apply(lambda row: file_path.parent / 'audio_images/{}.npy'.format(row.filename), axis=1)
df = temp if df is None else df.append(temp)
df['secondary_labels'] = df['secondary_labels'].apply(literal_eval)
for file_path in train_label_paths:
with open(str(file_path)) as f:
LABEL_IDS.update(json.load(f))
return (LABEL_IDS, df)
def get_model(name, num_classes=N_CLASSES):
"""
Loads a pretrained model.
Supports ResNest, ResNext-wsl, EfficientNet, ResNext and ResNet.
Arguments:
name {str} -- Name of the model to load
Keyword Arguments:
num_classes {int} -- Number of classes to use (default: {1})
Returns:
torch model -- Pretrained model
"""
if 'resnest' in name:
model = getattr(resnest_torch, name)(pretrained=False)
model.load_state_dict(torch.load('../input/resnest50/resnest50-528c19ca.pth'))
elif 'wsl' in name:
model = torch.hub.load('facebookresearch/WSL-Images', name)
elif name.startswith('resnext') or name.startswith('resnet'):
model = torch.hub.load('pytorch/vision:v0.6.0', name, pretrained=True)
elif name.startswith('tf_efficientnet_b'):
model = getattr(timm.models.efficientnet, name)(pretrained=True)
elif 'efficientnet-b' in name:
model = EfficientNet.from_pretrained(name)
else:
model = pretrainedmodels.__dict__[name](pretrained='imagenet')
if hasattr(model, 'fc'):
nb_ft = model.fc.in_features
model.fc = nn.Linear(nb_ft, num_classes)
elif hasattr(model, '_fc'):
nb_ft = model._fc.in_features
model._fc = nn.Linear(nb_ft, num_classes)
elif hasattr(model, 'classifier'):
nb_ft = model.classifier.in_features
model.classifier = nn.Linear(nb_ft, num_classes)
elif hasattr(model, 'last_linear'):
nb_ft = model.last_linear.in_features
model.last_linear = nn.Linear(nb_ft, num_classes)
return model
def load_data(df):
def load_row(row):
return (row.filename, np.load(str(row.impath))[:MAX_READ_SAMPLES])
pool = joblib.Parallel(4)
mapper = joblib.delayed(load_row)
tasks = [mapper(row) for row in df.itertuples(False)]
res = pool(tqdm(tasks))
res = dict(res)
return res
pl.seed_everything(SEED)
LABEL_IDS, df = get_df()
audio_image_store = load_data(df)
len(audio_image_store)
class BirdClefDataset(Dataset):
def __init__(self, audio_image_store, meta, sr=SR, is_train=True, num_classes=N_CLASSES, duration=DURATION):
self.audio_image_store = audio_image_store
self.meta = meta.copy().reset_index(drop=True)
self.sr = sr
self.is_train = is_train
self.num_classes = num_classes
self.duration = duration
self.audio_length = self.duration * self.sr
@staticmethod
def normalize(image):
image = image.astype('float32', copy=False) / 255.0
image = np.stack([image, image, image])
return image
def __len__(self):
return len(self.meta)
def __getitem__(self, idx):
row = self.meta.iloc[idx]
image = self.audio_image_store[row.filename]
image = image[np.random.choice(len(image))]
image = self.normalize(image)
t = row.label_id
return (image, t)
class BirdClefModel(pl.LightningModule):
def __init__(self, name, n_classes):
super().__init__()
self.model = get_model(name, n_classes)
self.f1 = F1(num_classes=n_classes, average='macro')
def forward(self, x):
batch_size, channels, height, width = x.size()
x = self.model(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.cross_entropy(logits, y)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
f1_score = self.f1(logits, y)
self.log('val_loss', loss, on_epoch=True, prog_bar=True)
self.log('val_f1_score', f1_score, on_epoch=True, prog_bar=True)
return {'val_loss': loss, 'val_f1_score': f1_score}
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=0.001)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, eta_min=1e-05, T_max=EPOCHS)
return {'optimizer': optimizer, 'lr_scheduler': {'scheduler': scheduler, 'monitor': 'val_loss'}}
fold_bar = tqdm(df.reset_index().groupby('fold').index.apply(list).items(), total=df.fold.max() + 1)
for fold, val_set in fold_bar:
if fold != USE_FOLD:
continue
fold_bar.set_description(f'[FOLD {fold}]')
train_set = np.setdiff1d(df.index, val_set)
model = BirdClefModel('resnest50', N_CLASSES)
train_data = BirdClefDataset(audio_image_store, meta=df.iloc[train_set].reset_index(drop=True), sr=SR, duration=DURATION, is_train=True)
train_dataloader = DataLoader(train_data, batch_size=TRAIN_BATCH_SIZE, num_workers=TRAIN_NUM_WORKERS, shuffle=True, pin_memory=True)
val_data = BirdClefDataset(audio_image_store, meta=df.iloc[val_set].reset_index(drop=True), sr=SR, duration=DURATION, is_train=False)
val_dataloader = DataLoader(val_data, batch_size=VAL_BATCH_SIZE, num_workers=VAL_NUM_WORKERS, shuffle=False)
chk_callback = ModelCheckpoint(filename='best', monitor='val_f1_score', save_last=False, save_top_k=1, mode='max')
trainer = pl.Trainer(gpus=-1, max_epochs=EPOCHS, callbacks=[chk_callback], log_every_n_steps=10)
trainer.fit(model, train_dataloader, val_dataloader)
|
code
|
121149840/cell_21
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
df.corr()['price'].sort_values()
X = df[['long']]
Y = df['price']
lm = LinearRegression()
lm.fit(X, Y)
lm.score(X, Y)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[['sqft_living']]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[features]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
model = Pipeline([('regressor', LinearRegression())])
X = df[features]
y = df['price']
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
X = df[features]
Y = df['price']
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.15, random_state=1)
print('number of test samples:', x_test.shape[0])
print('number of training samples:', x_train.shape[0])
|
code
|
121149840/cell_9
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
sns.regplot(x='sqft_above', y='price', data=df)
plt.title('Price vs. sqft_above')
plt.xlabel('sqft_above')
plt.ylabel('Price')
plt.show()
|
code
|
121149840/cell_23
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.metrics import r2_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
df.corr()['price'].sort_values()
X = df[['long']]
Y = df['price']
lm = LinearRegression()
lm.fit(X, Y)
lm.score(X, Y)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[['sqft_living']]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[features]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
model = Pipeline([('regressor', LinearRegression())])
X = df[features]
y = df['price']
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
X = df[features]
Y = df['price']
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.15, random_state=1)
ridge = Ridge(alpha=0.1)
ridge.fit(x_train, y_train)
y_pred = ridge.predict(x_test)
r2 = r2_score(y_test, y_pred)
print('R^2:', r2)
|
code
|
121149840/cell_6
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
print(df.dtypes)
|
code
|
121149840/cell_7
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
counts = df['floors'].value_counts().to_frame()
print(counts)
|
code
|
121149840/cell_18
|
[
"image_output_1.png"
] |
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import r2_score
from sklearn.pipeline import Pipeline
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
df.corr()['price'].sort_values()
X = df[['long']]
Y = df['price']
lm = LinearRegression()
lm.fit(X, Y)
lm.score(X, Y)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[['sqft_living']]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[features]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
model = Pipeline([('regressor', LinearRegression())])
X = df[features]
y = df['price']
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
print('R^2:', r2)
|
code
|
121149840/cell_8
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
sns.boxplot(x='waterfront', y='price', data=df)
plt.title('Price distribution by Waterfront View')
plt.xlabel('Waterfront View')
plt.ylabel('Price')
plt.show()
|
code
|
121149840/cell_16
|
[
"image_output_1.png"
] |
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import r2_score
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
df.corr()['price'].sort_values()
X = df[['long']]
Y = df['price']
lm = LinearRegression()
lm.fit(X, Y)
lm.score(X, Y)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[['sqft_living']]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[features]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
print('R^2: ', r2)
|
code
|
121149840/cell_24
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.metrics import r2_score
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler,PolynomialFeatures
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
df.corr()['price'].sort_values()
X = df[['long']]
Y = df['price']
lm = LinearRegression()
lm.fit(X, Y)
lm.score(X, Y)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[['sqft_living']]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[features]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
model = Pipeline([('regressor', LinearRegression())])
X = df[features]
y = df['price']
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
features = ['floors', 'waterfront', 'lat', 'bedrooms', 'sqft_basement', 'view', 'bathrooms', 'sqft_living15', 'sqft_above', 'grade', 'sqft_living']
X = df[features]
Y = df['price']
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.15, random_state=1)
ridge = Ridge(alpha=0.1)
ridge.fit(x_train, y_train)
y_pred = ridge.predict(x_test)
r2 = r2_score(y_test, y_pred)
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree=2)
x_train_poly = poly.fit_transform(x_train)
x_test_poly = poly.transform(x_test)
ridge = Ridge(alpha=0.1)
ridge.fit(x_train_poly, y_train)
y_pred = ridge.predict(x_test_poly)
r2 = r2_score(y_test, y_pred)
print('R^2:', r2)
|
code
|
121149840/cell_14
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
df.corr()['price'].sort_values()
X = df[['long']]
Y = df['price']
lm = LinearRegression()
lm.fit(X, Y)
lm.score(X, Y)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
X = df[['sqft_living']]
y = df['price']
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
print('R^2:', r2)
|
code
|
121149840/cell_10
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
df.corr()['price'].sort_values()
|
code
|
121149840/cell_12
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LinearRegression
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
df.corr()['price'].sort_values()
X = df[['long']]
Y = df['price']
lm = LinearRegression()
lm.fit(X, Y)
lm.score(X, Y)
|
code
|
121149840/cell_5
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/housesalesprediction/kc_house_data.csv')
df.head()
|
code
|
74060570/cell_42
|
[
"text_html_output_1.png"
] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
districts_df.head()
|
code
|
74060570/cell_25
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
prep = GetDfForPreprocessing(products_df)
prep.get_percentage_missing_columns()
|
code
|
74060570/cell_57
|
[
"text_html_output_2.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
products_df = products_df[products_df['Sector(s)'].notna()]
products_df['primary_function_main'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[0] if x == x else x)
products_df['primary_function_sub'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[1] if x == x else x)
products_df.drop('Primary Essential Function', axis=1, inplace=True)
products_sect = products_df['Sector(s)'].value_counts().reset_index()
products_sect.columns = ['Sector(s)', 'percent']
products_sect['percent'] /= len(products_df)
fig = px.pie(products_sect, names='Sector(s)', values='percent', color_discrete_sequence=px.colors.qualitative.Set3, title='Distribution of Sectors', width=700, height=500)
fig.show()
|
code
|
74060570/cell_23
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
prep = GetDfForPreprocessing(products_df)
prep.get_info()
|
code
|
74060570/cell_30
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
products_df = products_df[products_df['Sector(s)'].notna()]
products_df['primary_function_main'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[0] if x == x else x)
products_df['primary_function_sub'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[1] if x == x else x)
products_df.drop('Primary Essential Function', axis=1, inplace=True)
products_df.head()
|
code
|
74060570/cell_55
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
products_df = products_df[products_df['Sector(s)'].notna()]
products_df['primary_function_main'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[0] if x == x else x)
products_df['primary_function_sub'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[1] if x == x else x)
products_df.drop('Primary Essential Function', axis=1, inplace=True)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
result = products_df['Provider/Company Name'].value_counts().head(15)
top_comp = pd.DataFrame({'Company': result.index, 'Count': result})
bar_plot(top_comp, 'Count', 'Company', title='Top 15 Companies/Providers')
|
code
|
74060570/cell_26
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
prep = GetDfForPreprocessing(products_df)
prep.get_percentage_missing_columns()
products_null_columns = prep.get_column_with_null()
print('Columns With Null Value more than 30% : ')
print(products_null_columns)
|
code
|
74060570/cell_65
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
engagement_df.time = engagement_df.time.astype('datetime64[ns]')
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
plt.figure(figsize=(15, 11))
sns.lineplot(y=engagement_df['pct_access'], x=engagement_df['month'], palette='rocket')
plt.title('Average access per month')
|
code
|
74060570/cell_61
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
products_df = products_df[products_df['Sector(s)'].notna()]
products_df['primary_function_main'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[0] if x == x else x)
products_df['primary_function_sub'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[1] if x == x else x)
products_df.drop('Primary Essential Function', axis=1, inplace=True)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
count_plot(products_df, 'primary_function_sub', 'Sub-categories in Primary Function')
|
code
|
74060570/cell_54
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
products_df = products_df[products_df['Sector(s)'].notna()]
products_df['primary_function_main'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[0] if x == x else x)
products_df['primary_function_sub'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[1] if x == x else x)
products_df.drop('Primary Essential Function', axis=1, inplace=True)
products_df.head()
|
code
|
74060570/cell_67
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
engagement_df.time = engagement_df.time.astype('datetime64[ns]')
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
plt.figure(figsize=(15, 11))
sns.lineplot(hue=engagement_df['weekday'], y=engagement_df['pct_access'], x=engagement_df['month'], palette='rocket')
plt.title('Average access per month per day')
|
code
|
74060570/cell_11
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_df.head()
|
code
|
74060570/cell_19
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df.head()
|
code
|
74060570/cell_64
|
[
"text_html_output_1.png"
] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
engagement_df.time = engagement_df.time.astype('datetime64[ns]')
engagement_df.head()
|
code
|
74060570/cell_45
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
count_plot(districts_df, 'locale', 'Locale Distribution')
|
code
|
74060570/cell_49
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
count_plot(districts_df, 'pp_total_raw', 'Total Expenditure Per Pupil')
|
code
|
74060570/cell_18
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
|
code
|
74060570/cell_51
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
count_plot2(districts_df, 'state', 'locale', 'Locality in each State')
|
code
|
74060570/cell_59
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
products_df = products_df[products_df['Sector(s)'].notna()]
products_df['primary_function_main'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[0] if x == x else x)
products_df['primary_function_sub'] = products_df['Primary Essential Function'].apply(lambda x: x.split(' - ')[1] if x == x else x)
products_df.drop('Primary Essential Function', axis=1, inplace=True)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
count_plot(products_df, 'primary_function_main', 'Function of the Products')
|
code
|
74060570/cell_28
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
products_df = products_df[products_df['Sector(s)'].notna()]
products_df.head()
|
code
|
74060570/cell_15
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
print('Columns With Null Value more than 30% : ')
print(district_null_columns)
|
code
|
74060570/cell_47
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
count_plot(districts_df, 'pct_free/reduced', 'Percentage Free or Reduced-price lunch Distribution')
|
code
|
74060570/cell_17
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
|
code
|
74060570/cell_35
|
[
"text_html_output_1.png"
] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
engagement_df.head()
|
code
|
74060570/cell_43
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
# Ploting functions
def bar_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.barplot(data = df, x=x_col, y=y_col, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def count_plot(df, col, title):
plt.figure(figsize=(20, 7))
sns.countplot(data = df,y = col, order=df[col].value_counts().index, palette="Set3")
plt.title(title, size=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def pie_plot(df, col, title=''):
count = df[col].value_counts()
plt.figure(figsize=(20, 7))
plt.pie(list(count), labels=count.index, pallete ='cubehelix',autopct='%1.2f%%')
plt.title(title, size=16)
plt.legend()
plt.show()
def count_plot2(df,col,hue,title):
plt.figure(figsize=(20,10))
ax=sns.countplot(data=df,x=col,hue= hue,palette='Set3')
plt.xticks(rotation=90)
plt.title(title, size=16)
plt.show()
def time_plot(df, x_col, y_col, title=''):
plt.figure(figsize=(20, 7))
sns.cubehelix_palette(as_cmap=True)
sns.lineplot(data=df, x=x_col, y=y_col) #pallete='cubehelix_r')
plt.title(title, size=16)
plt.xticks(rotation=90, fontsize=14)
plt.yticks( fontsize=14)
plt.xlabel(x_col, fontsize=16)
plt.ylabel(y_col, fontsize=16)
plt.show()
count_plot(districts_df, 'state', 'Count of State Distribution')
|
code
|
74060570/cell_14
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
|
code
|
74060570/cell_22
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
products_df.head()
|
code
|
74060570/cell_12
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_info()
|
code
|
74060570/cell_36
|
[
"text_plain_output_1.png"
] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
districts_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/districts_info.csv')
products_df = pd.read_csv('../input/learnplatform-covid19-impact-on-digital-learning/products_info.csv')
class GetDfForPreprocessing:
"""
this function will prepare data form dataframe for preprocessing
Return
------
dataframe
"""
def __init__(self, df: pd.DataFrame):
self.df = df
def get_info(self):
missing = self.df.isnull().sum().sum()
total_cells = np.product(self.df.shape)
total_missing_count = self.df.isnull().sum().sum()
missing_columns = self.df.columns[self.df.isnull().any()]
def get_percentage_missing_columns(self):
"""a function to check for missing values count and percentage missing"""
count_missing = self.df.isnull().sum()
count_missing_percentage = round(self.df.isnull().sum() * 100 / len(self.df))
missing_column_name = self.df.columns
missing_df = pd.DataFrame(zip(count_missing, count_missing_percentage, missing_column_name), columns=['Missing Count', '%Missing', 'ColumnName'])
missing_df = missing_df.set_index('ColumnName')
return missing_df
def get_column_with_null(self):
"""
Return List of Columns which contain more than 30% of null values
"""
number_of_rows, number_of_columns = self.df.shape
df_size = number_of_rows * number_of_columns
df_size = self.df.shape[0]
columns_list = self.df.columns
bad_columns = []
for column in columns_list:
null_per_column = self.df[column].isnull().sum()
percentage = round(null_per_column / df_size * 100, 2)
if percentage > 30 or percentage == 30:
bad_columns.append(column)
return bad_columns
prep_dist = GetDfForPreprocessing(districts_df)
prep_dist.get_percentage_missing_columns()
district_null_columns = prep_dist.get_column_with_null()
districts_df = districts_df[districts_df.state.notna()].reset_index(drop=True)
districts_df.shape
districts_df.isnull().sum()
districts_df = districts_df.copy()
districts_df = districts_df[districts_df['state'].notna()]
for col in district_null_columns:
freq = districts_df[col].mode()[0]
districts_df[col] = districts_df[col].fillna(freq)
PATH = '../input/learnplatform-covid19-impact-on-digital-learning/engagement_data'
temp = []
for district in districts_df.district_id.unique():
df = pd.read_csv(f'{PATH}/{district}.csv', index_col=None, header=0)
df['district_id'] = district
if df.time.nunique() == 366:
temp.append(df)
engagement_df = pd.concat(temp)
engagement_df = engagement_df.reset_index(drop=True)
prep_eng = GetDfForPreprocessing(engagement_df)
prep_eng.get_info()
|
code
|
128034186/cell_13
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
train.head()
|
code
|
128034186/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.head()
|
code
|
128034186/cell_20
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
def split_col(df, col_list, delimiter_list):
new_df = pd.DataFrame()
for i, col in enumerate(col_list):
new_cols = df[col].str.split(delimiter_list[i], expand=True)
new_cols.columns = [f'{col}_{j}' for j in range(new_cols.shape[1])]
new_df = pd.concat([new_df, new_cols], axis=1)
df = pd.concat([df, new_df], axis=1)
return df
train = split_col(train, ['PassengerId', 'Cabin'], ['_', '/'])
alone = train[train.groupby('PassengerId_0').transform(len) > 1]
alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
alone['Alone'] = alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
train = pd.concat([train, alone['Alone']], axis=1)
train.nunique()
train.info()
|
code
|
128034186/cell_6
|
[
"text_html_output_1.png"
] |
import missingno as msno
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
msno.matrix(train)
|
code
|
128034186/cell_26
|
[
"text_html_output_1.png"
] |
from sklearn.preprocessing import LabelBinarizer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
def split_col(df, col_list, delimiter_list):
new_df = pd.DataFrame()
for i, col in enumerate(col_list):
new_cols = df[col].str.split(delimiter_list[i], expand=True)
new_cols.columns = [f'{col}_{j}' for j in range(new_cols.shape[1])]
new_df = pd.concat([new_df, new_cols], axis=1)
df = pd.concat([df, new_df], axis=1)
return df
train = split_col(train, ['PassengerId', 'Cabin'], ['_', '/'])
alone = train[train.groupby('PassengerId_0').transform(len) > 1]
alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
alone['Alone'] = alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
train = pd.concat([train, alone['Alone']], axis=1)
train.nunique()
lb = LabelBinarizer()
def label_numeric(df, columns):
df_lb = pd.DataFrame()
lb = LabelBinarizer()
for col in columns:
if train[col].dtype == 'object':
lb_results = lb.fit_transform(df[col])
if len(df[col].value_counts()) == 2:
lb_classes = [col]
else:
lb_classes = [f'{col}_{class_}' for class_ in lb.classes_]
df_lb = pd.concat([df_lb, pd.DataFrame(lb_results, columns=lb_classes)], axis=1)
elif train[col].dtype == 'float64':
lb_classes = [col]
df_lb = pd.concat([df_lb, pd.DataFrame(df[col], columns=lb_classes)], axis=1)
return df_lb
train.columns
train_features = label_numeric(train, ['HomePlanet', 'CryoSleep', 'Destination', 'VIP', 'Cabin_0', 'Cabin_2', 'Alone', 'Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'])
train_features
|
code
|
128034186/cell_11
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
fig, ax = plt.subplots(1, 2)
fig.set_figheight(5)
fig.set_figwidth(12)
sns.countplot(train, x='CryoSleep', ax=ax[0])
sns.countplot(train, x='CryoSleep', hue='Transported', ax=ax[1])
|
code
|
128034186/cell_19
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
def split_col(df, col_list, delimiter_list):
new_df = pd.DataFrame()
for i, col in enumerate(col_list):
new_cols = df[col].str.split(delimiter_list[i], expand=True)
new_cols.columns = [f'{col}_{j}' for j in range(new_cols.shape[1])]
new_df = pd.concat([new_df, new_cols], axis=1)
df = pd.concat([df, new_df], axis=1)
return df
train = split_col(train, ['PassengerId', 'Cabin'], ['_', '/'])
alone = train[train.groupby('PassengerId_0').transform(len) > 1]
alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
alone['Alone'] = alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
train = pd.concat([train, alone['Alone']], axis=1)
train.nunique()
|
code
|
128034186/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import missingno as msno
from sklearn.preprocessing import label_binarize
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import KFold, cross_val_score
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.metrics import accuracy_score
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from catboost import CatBoostClassifier
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
128034186/cell_7
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
|
code
|
128034186/cell_18
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
def split_col(df, col_list, delimiter_list):
new_df = pd.DataFrame()
for i, col in enumerate(col_list):
new_cols = df[col].str.split(delimiter_list[i], expand=True)
new_cols.columns = [f'{col}_{j}' for j in range(new_cols.shape[1])]
new_df = pd.concat([new_df, new_cols], axis=1)
df = pd.concat([df, new_df], axis=1)
return df
train = split_col(train, ['PassengerId', 'Cabin'], ['_', '/'])
alone = train[train.groupby('PassengerId_0').transform(len) > 1]
alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
alone['Alone'] = alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
train = pd.concat([train, alone['Alone']], axis=1)
train.head()
|
code
|
128034186/cell_8
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.info()
|
code
|
128034186/cell_16
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
def split_col(df, col_list, delimiter_list):
new_df = pd.DataFrame()
for i, col in enumerate(col_list):
new_cols = df[col].str.split(delimiter_list[i], expand=True)
new_cols.columns = [f'{col}_{j}' for j in range(new_cols.shape[1])]
new_df = pd.concat([new_df, new_cols], axis=1)
df = pd.concat([df, new_df], axis=1)
return df
train = split_col(train, ['PassengerId', 'Cabin'], ['_', '/'])
train.info()
|
code
|
128034186/cell_31
|
[
"text_plain_output_1.png"
] |
from catboost import CatBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold, cross_val_score
from sklearn.preprocessing import LabelBinarizer
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
def split_col(df, col_list, delimiter_list):
new_df = pd.DataFrame()
for i, col in enumerate(col_list):
new_cols = df[col].str.split(delimiter_list[i], expand=True)
new_cols.columns = [f'{col}_{j}' for j in range(new_cols.shape[1])]
new_df = pd.concat([new_df, new_cols], axis=1)
df = pd.concat([df, new_df], axis=1)
return df
train = split_col(train, ['PassengerId', 'Cabin'], ['_', '/'])
alone = train[train.groupby('PassengerId_0').transform(len) > 1]
alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
alone['Alone'] = alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
train = pd.concat([train, alone['Alone']], axis=1)
train.nunique()
lb = LabelBinarizer()
def label_numeric(df, columns):
df_lb = pd.DataFrame()
lb = LabelBinarizer()
for col in columns:
if train[col].dtype == 'object':
lb_results = lb.fit_transform(df[col])
if len(df[col].value_counts()) == 2:
lb_classes = [col]
else:
lb_classes = [f'{col}_{class_}' for class_ in lb.classes_]
df_lb = pd.concat([df_lb, pd.DataFrame(lb_results, columns=lb_classes)], axis=1)
elif train[col].dtype == 'float64':
lb_classes = [col]
df_lb = pd.concat([df_lb, pd.DataFrame(df[col], columns=lb_classes)], axis=1)
return df_lb
train.columns
train_features = label_numeric(train, ['HomePlanet', 'CryoSleep', 'Destination', 'VIP', 'Cabin_0', 'Cabin_2', 'Alone', 'Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'])
train_features = train_features.to_numpy()
train_label = lb.fit_transform(train['Transported'])
kf = KFold(n_splits=10, shuffle=True, random_state=42)
scores = []
def predict(clf):
global scores
for i, (train_index, test_index) in enumerate(kf.split(train_features)):
X_train, X_test = (train_features[train_index], train_features[test_index])
y_train, y_test = (train_label[train_index], train_label[test_index])
clf = clf
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_pred = lb.fit_transform(y_pred)
acc = accuracy_score(y_test, y_pred)
scores.append(acc)
scores = []
predict(CatBoostClassifier(logging_level='Silent'))
|
code
|
128034186/cell_24
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
def split_col(df, col_list, delimiter_list):
new_df = pd.DataFrame()
for i, col in enumerate(col_list):
new_cols = df[col].str.split(delimiter_list[i], expand=True)
new_cols.columns = [f'{col}_{j}' for j in range(new_cols.shape[1])]
new_df = pd.concat([new_df, new_cols], axis=1)
df = pd.concat([df, new_df], axis=1)
return df
train = split_col(train, ['PassengerId', 'Cabin'], ['_', '/'])
alone = train[train.groupby('PassengerId_0').transform(len) > 1]
alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
alone['Alone'] = alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
train = pd.concat([train, alone['Alone']], axis=1)
train.nunique()
train.columns
|
code
|
128034186/cell_22
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
def split_col(df, col_list, delimiter_list):
new_df = pd.DataFrame()
for i, col in enumerate(col_list):
new_cols = df[col].str.split(delimiter_list[i], expand=True)
new_cols.columns = [f'{col}_{j}' for j in range(new_cols.shape[1])]
new_df = pd.concat([new_df, new_cols], axis=1)
df = pd.concat([df, new_df], axis=1)
return df
train = split_col(train, ['PassengerId', 'Cabin'], ['_', '/'])
alone = train[train.groupby('PassengerId_0').transform(len) > 1]
alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
alone['Alone'] = alone[['Cabin_0']].isnull().apply(lambda x: all(x), axis=1)
train = pd.concat([train, alone['Alone']], axis=1)
train.nunique()
train.head()
|
code
|
128034186/cell_10
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
|
code
|
128034186/cell_12
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
train.isna().sum()
train.isna().sum()
fig, ax = plt.subplots(1, 2)
fig.set_figheight(5)
fig.set_figwidth(12)
sns.countplot(train, x='CryoSleep', ax=ax[0])
sns.countplot(train, x='CryoSleep', hue="Transported", ax=ax[1])
fig, ax = plt.subplots(1, 3)
fig.set_figheight(5)
fig.set_figwidth(16)
sns.histplot(data=train, x='Age', kde=True, hue='Transported', ax=ax[0])
sns.kdeplot(data=train, x='Age', hue='Transported', ax=ax[1])
sns.boxplot(data=train, x='Transported', y='Age', ax=ax[2])
|
code
|
128034186/cell_5
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
for col in train:
print('===> {}, unique values: {}'.format(col, train[col].nunique()))
print(train[col].unique())
print('\n')
|
code
|
128011575/cell_4
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
imdb_all = pd.read_csv('/kaggle/input/imdb-dataset-of-top-1000-movies-and-tv-shows/imdb_top_1000.csv')
imdb_all['Gross'] = imdb_all['Gross'].str.replace(',', '')
imdb_all['Gross'] = pd.to_numeric(imdb_all['Gross'])
imdb_all['Gross'] = imdb_all['Gross'].fillna(imdb_all['Gross'].mean())
imdb_all['Certificate'] = imdb_all['Certificate'].fillna('No certificate')
imdb_all['Meta_score'] = imdb_all['Meta_score'].fillna(imdb_all['Meta_score'].mean())
imdb_all.info()
|
code
|
128011575/cell_2
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
imdb_all = pd.read_csv('/kaggle/input/imdb-dataset-of-top-1000-movies-and-tv-shows/imdb_top_1000.csv')
imdb_all.head()
|
code
|
128011575/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
128011575/cell_8
|
[
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
imdb_all = pd.read_csv('/kaggle/input/imdb-dataset-of-top-1000-movies-and-tv-shows/imdb_top_1000.csv')
imdb_all['Gross'] = imdb_all['Gross'].str.replace(',', '')
imdb_all['Gross'] = pd.to_numeric(imdb_all['Gross'])
imdb_all['Gross'] = imdb_all['Gross'].fillna(imdb_all['Gross'].mean())
imdb_all['Certificate'] = imdb_all['Certificate'].fillna('No certificate')
imdb_all['Meta_score'] = imdb_all['Meta_score'].fillna(imdb_all['Meta_score'].mean())
import seaborn as sns
import matplotlib.pyplot as plt
director_gross = imdb_all[['Director', 'Gross']]
director_gross = director_gross.groupby('Director').agg(['mean', 'count'])
df = director_gross.sort_values(('Gross', 'mean'), ascending=False)[:20]
df['Gross', 'mean'] = round(df['Gross', 'mean'] / 1000000, 2)
color_map = ['#F2DB83' for _ in range(20)]
color_map[0] = color_map[1] = color_map[2] = '#000000'
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
ax.bar(df.index, df['Gross', 'mean'], width=0.5, edgecolor='darkgray', linewidth=0.6, color=color_map)
ax.set_xticklabels(df.index, fontfamily='serif', rotation=55)
fig.text(0.09, 0.95, 'Top 20 directors by mean gross', fontsize=13, fontweight='bold', fontfamily='serif')
fig.text(0.09, 0.9, '\nAs you can see the most profitable director is Anthony Russo and just behind him are Gareth Edwards and JJ. Abrams\n', fontsize=10, fontweight='light', fontfamily='serif')
grid_y_ticks = np.arange(0, 700, 100)
ax.set_yticks(grid_y_ticks)
plt.show()
|
code
|
128011575/cell_5
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import seaborn as sns
import seaborn as sns
import matplotlib.pyplot as plt
sns.palplot(['#DBA506', '#F2DB83', '#000000'])
plt.title('IMDB brand pallete', loc='left', fontfamily='serif', fontsize=15, y=1.2)
plt.show()
|
code
|
104130523/cell_13
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
all_train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
val_data = all_train_data[:int(0.2 * len(all_train_data))]
train_data = all_train_data[int(0.2 * len(all_train_data)):].reset_index()
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
survived_ratio = sum(train_data['Survived']) / len(train_data)
scale_pos_weight = (1 - survived_ratio) / survived_ratio
def todummy(x, categories=None):
X = np.zeros((len(x), len(categories) + 1), dtype=int)
for i, cat in enumerate(categories):
X[:, i] = x == cat
X[:, -1] = 1 - X[:, :-1].sum(axis=1)
return X
def majorcategories(x: pd.Series, cutoff_categorynum=10, cutoff_percentile=0.95, verbose=True) -> list:
xcopy = x[~x.isnull()]
nullpdf = sum(x.isnull()) / len(x)
category = np.unique(xcopy)
valuepdf = {}
for cat in category:
valuepdf[cat] = sum(xcopy == cat) / len(x)
sortvaluepdf = sorted(valuepdf.items(), key=lambda x: x[1], reverse=True)
cdf = np.cumsum([x[1] for x in sortvaluepdf])
if len(sortvaluepdf) <= cutoff_categorynum:
return [x[0] for x in sortvaluepdf]
else:
majorcategory = []
i = 0
while i < cutoff_categorynum and i < len(sortvaluepdf):
p = cdf[i]
if p < cutoff_percentile:
majorcategory.append(sortvaluepdf[i][0])
else:
break
i += 1
return majorcategory
trained_prefix = all_train_data['Name'].apply(lambda x: x.split(', ')[-1].split('. ')[0])
trained_prefix_category = majorcategories(trained_prefix)
trained_ticketstr = []
for i in range(len(all_train_data)):
ticket = all_train_data.loc[i, 'Ticket']
if ' ' in ticket:
rev_ticket = ticket[::-1]
num_ticket = rev_ticket[:rev_ticket.index(' ')]
string = ticket.replace(num_ticket[::-1], '')
elif any([x in [str(j) for j in range(10)] for x in ticket]):
string = ''
else:
string = ticket
string = ''.join([x for x in string if x not in ['.', '/', ' ']])
if len(string) > 0:
trained_ticketstr.append(string)
trained_ticketstr_category = majorcategories(pd.Series(trained_ticketstr), cutoff_categorynum=100)
trained_ticketstr_category += ['NA']
trained_cabinstr = []
for i in range(len(all_train_data)):
cabin = all_train_data.loc[i, 'Cabin']
if type(cabin) is str:
cabin = ''.join([x for x in cabin if x not in [str(j) for j in range(10)]])
cabin_split = np.array(cabin.split(' '))
_, idx = np.unique(cabin_split, return_index=True)
trained_cabinstr.append(' '.join(cabin_split[np.sort(idx)]))
trained_cabinstr_category = majorcategories(pd.Series(trained_cabinstr), cutoff_categorynum=100)
trained_cabinstr_category += ['NA']
trained_embarked_category = majorcategories(all_train_data['Embarked'])
def preprocessing(data):
X = {}
if 'Survived' in data:
y = data['Survived']
else:
y = None
X['Pclass'] = data['Pclass']
prefix = data['Name'].apply(lambda x: x.split(', ')[-1].split('. ')[0])
dummy = todummy(prefix, trained_prefix_category)
for i, cat in enumerate(trained_prefix_category):
X['Prefix_' + cat] = dummy[:, i]
dummy = todummy(data['Sex'], ['male'])
for i, cat in enumerate(['male', 'female']):
X['Sex_' + cat] = dummy[:, i]
X['Age'] = [0.01 if np.isnan(x) else x for x in data['Age']]
X['Age0'] = [x > 0 and x <= 10 for x in X['Age']]
X['Age1'] = [x > 10 and x <= 20 for x in X['Age']]
X['Age2'] = [x > 20 and x <= 40 for x in X['Age']]
X['Age3'] = [x > 40 and x <= 60 for x in X['Age']]
X['Age4'] = [x > 60 for x in X['Age']]
X['Age'] = np.log(X['Age'])
X['SibSp'] = data['SibSp']
X['Parch'] = data['Parch']
X['Ticketnum'] = []
strings = []
for i in range(len(data)):
ticket = train_data.loc[i, 'Ticket']
if ' ' in ticket:
rev_ticket = ticket[::-1]
num_ticket = rev_ticket[:rev_ticket.index(' ')]
string = ticket.replace(num_ticket[::-1], '')
string = ''.join([x for x in string if x not in ['.', '/', ' ']])
strings.append(string)
X['Ticketnum'].append(int(num_ticket[::-1]))
elif any([x in [str(j) for j in range(10)] for x in ticket]):
strings.append('NA')
X['Ticketnum'].append(np.log(int(ticket)))
else:
strings.append(ticket)
X['Ticketnum'].append(0)
dummy = todummy(strings, trained_ticketstr_category)
for i, cat in enumerate(trained_ticketstr_category):
X['Ticketstr_' + cat] = dummy[:, i]
X['Fare'] = data['Fare'].copy()
X['Fare'].at[data['Fare'].isnull()] = 0.0
X['Cabinnum'] = []
X['Travellernum'] = []
strings = []
for i in range(len(data)):
cabin = data.loc[i, 'Cabin']
if type(cabin) is float:
strings.append('NA')
if np.isnan(data.loc[i, 'Cabin']):
X['Cabinnum'].append(0.0)
else:
X['Cabinnum'].append(float(data.loc[i, 'Cabin']))
X['Travellernum'].append(1)
else:
cabinstr = ''.join([x for x in cabin if x not in [str(j) for j in range(10)]])
cabin_split = np.array(cabinstr.split(' '))
_, idx = np.unique(cabin_split, return_index=True)
string = ' '.join(cabin_split[np.sort(idx)])
if len(string) > 0:
strings.append(string)
else:
strings.append('NA')
cabinnum = cabin.replace(string, '')
if len(cabinnum) > 0:
cabinnum = [int(x) for x in cabin.replace(string, '').split(' ')]
else:
cabinnum = [0]
X['Travellernum'].append(len(cabinnum))
X['Cabinnum'].append(np.mean(cabinnum))
dummy = todummy(strings, trained_cabinstr_category)
for i, cat in enumerate(trained_cabinstr_category):
X['Cabinstr_' + cat] = dummy[:, i]
dummy = todummy(data['Embarked'], trained_embarked_category)
for i, cat in enumerate(trained_embarked_category):
X['Embarked_' + cat] = dummy[:, i]
return (pd.DataFrame(data=X), y)
X_train, y_train = preprocessing(train_data)
X_val, y_val = preprocessing(val_data)
X_test, _ = preprocessing(test_data)
from sklearn.metrics import confusion_matrix
def modelperformance(y_predict, y_ture, verbose=True):
cfmat = confusion_matrix(y_predict, y_ture)
score = cfmat.trace() / cfmat.sum()
if verbose:
precision = cfmat[1, 1] / sum(cfmat[1, :])
recall = cfmat[1, 1] / sum(cfmat[:, 1])
f1 = 2 / (1 / precision + 1 / recall)
return score
lrc = LogisticRegression(penalty='l1', solver='liblinear', class_weight={1: scale_pos_weight, 0: 1.0})
lrc.fit(X_train, y_train)
modelperformance(lrc.predict(X_val), y_val)
|
code
|
104130523/cell_9
|
[
"text_plain_output_1.png"
] |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
all_train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
val_data = all_train_data[:int(0.2 * len(all_train_data))]
train_data = all_train_data[int(0.2 * len(all_train_data)):].reset_index()
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
def todummy(x, categories=None):
X = np.zeros((len(x), len(categories) + 1), dtype=int)
for i, cat in enumerate(categories):
X[:, i] = x == cat
X[:, -1] = 1 - X[:, :-1].sum(axis=1)
return X
def majorcategories(x: pd.Series, cutoff_categorynum=10, cutoff_percentile=0.95, verbose=True) -> list:
xcopy = x[~x.isnull()]
nullpdf = sum(x.isnull()) / len(x)
category = np.unique(xcopy)
valuepdf = {}
for cat in category:
valuepdf[cat] = sum(xcopy == cat) / len(x)
sortvaluepdf = sorted(valuepdf.items(), key=lambda x: x[1], reverse=True)
cdf = np.cumsum([x[1] for x in sortvaluepdf])
if len(sortvaluepdf) <= cutoff_categorynum:
return [x[0] for x in sortvaluepdf]
else:
majorcategory = []
i = 0
while i < cutoff_categorynum and i < len(sortvaluepdf):
p = cdf[i]
if p < cutoff_percentile:
majorcategory.append(sortvaluepdf[i][0])
else:
break
i += 1
return majorcategory
trained_prefix = all_train_data['Name'].apply(lambda x: x.split(', ')[-1].split('. ')[0])
trained_prefix_category = majorcategories(trained_prefix)
trained_ticketstr = []
for i in range(len(all_train_data)):
ticket = all_train_data.loc[i, 'Ticket']
if ' ' in ticket:
rev_ticket = ticket[::-1]
num_ticket = rev_ticket[:rev_ticket.index(' ')]
string = ticket.replace(num_ticket[::-1], '')
elif any([x in [str(j) for j in range(10)] for x in ticket]):
string = ''
else:
string = ticket
string = ''.join([x for x in string if x not in ['.', '/', ' ']])
if len(string) > 0:
trained_ticketstr.append(string)
trained_ticketstr_category = majorcategories(pd.Series(trained_ticketstr), cutoff_categorynum=100)
trained_ticketstr_category += ['NA']
trained_cabinstr = []
for i in range(len(all_train_data)):
cabin = all_train_data.loc[i, 'Cabin']
if type(cabin) is str:
cabin = ''.join([x for x in cabin if x not in [str(j) for j in range(10)]])
cabin_split = np.array(cabin.split(' '))
_, idx = np.unique(cabin_split, return_index=True)
trained_cabinstr.append(' '.join(cabin_split[np.sort(idx)]))
trained_cabinstr_category = majorcategories(pd.Series(trained_cabinstr), cutoff_categorynum=100)
trained_cabinstr_category += ['NA']
trained_embarked_category = majorcategories(all_train_data['Embarked'])
def preprocessing(data):
X = {}
if 'Survived' in data:
y = data['Survived']
else:
y = None
X['Pclass'] = data['Pclass']
prefix = data['Name'].apply(lambda x: x.split(', ')[-1].split('. ')[0])
dummy = todummy(prefix, trained_prefix_category)
for i, cat in enumerate(trained_prefix_category):
X['Prefix_' + cat] = dummy[:, i]
dummy = todummy(data['Sex'], ['male'])
for i, cat in enumerate(['male', 'female']):
X['Sex_' + cat] = dummy[:, i]
X['Age'] = [0.01 if np.isnan(x) else x for x in data['Age']]
X['Age0'] = [x > 0 and x <= 10 for x in X['Age']]
X['Age1'] = [x > 10 and x <= 20 for x in X['Age']]
X['Age2'] = [x > 20 and x <= 40 for x in X['Age']]
X['Age3'] = [x > 40 and x <= 60 for x in X['Age']]
X['Age4'] = [x > 60 for x in X['Age']]
X['Age'] = np.log(X['Age'])
X['SibSp'] = data['SibSp']
X['Parch'] = data['Parch']
X['Ticketnum'] = []
strings = []
for i in range(len(data)):
ticket = train_data.loc[i, 'Ticket']
if ' ' in ticket:
rev_ticket = ticket[::-1]
num_ticket = rev_ticket[:rev_ticket.index(' ')]
string = ticket.replace(num_ticket[::-1], '')
string = ''.join([x for x in string if x not in ['.', '/', ' ']])
strings.append(string)
X['Ticketnum'].append(int(num_ticket[::-1]))
elif any([x in [str(j) for j in range(10)] for x in ticket]):
strings.append('NA')
X['Ticketnum'].append(np.log(int(ticket)))
else:
strings.append(ticket)
X['Ticketnum'].append(0)
dummy = todummy(strings, trained_ticketstr_category)
for i, cat in enumerate(trained_ticketstr_category):
X['Ticketstr_' + cat] = dummy[:, i]
X['Fare'] = data['Fare'].copy()
X['Fare'].at[data['Fare'].isnull()] = 0.0
X['Cabinnum'] = []
X['Travellernum'] = []
strings = []
for i in range(len(data)):
cabin = data.loc[i, 'Cabin']
if type(cabin) is float:
strings.append('NA')
if np.isnan(data.loc[i, 'Cabin']):
X['Cabinnum'].append(0.0)
else:
X['Cabinnum'].append(float(data.loc[i, 'Cabin']))
X['Travellernum'].append(1)
else:
cabinstr = ''.join([x for x in cabin if x not in [str(j) for j in range(10)]])
cabin_split = np.array(cabinstr.split(' '))
_, idx = np.unique(cabin_split, return_index=True)
string = ' '.join(cabin_split[np.sort(idx)])
if len(string) > 0:
strings.append(string)
else:
strings.append('NA')
cabinnum = cabin.replace(string, '')
if len(cabinnum) > 0:
cabinnum = [int(x) for x in cabin.replace(string, '').split(' ')]
else:
cabinnum = [0]
X['Travellernum'].append(len(cabinnum))
X['Cabinnum'].append(np.mean(cabinnum))
dummy = todummy(strings, trained_cabinstr_category)
for i, cat in enumerate(trained_cabinstr_category):
X['Cabinstr_' + cat] = dummy[:, i]
dummy = todummy(data['Embarked'], trained_embarked_category)
for i, cat in enumerate(trained_embarked_category):
X['Embarked_' + cat] = dummy[:, i]
return (pd.DataFrame(data=X), y)
X_train, y_train = preprocessing(train_data)
X_val, y_val = preprocessing(val_data)
X_test, _ = preprocessing(test_data)
X_train.head()
|
code
|
104130523/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
104130523/cell_3
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
all_train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
val_data = all_train_data[:int(0.2 * len(all_train_data))]
train_data = all_train_data[int(0.2 * len(all_train_data)):].reset_index()
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
survived_ratio = sum(train_data['Survived']) / len(train_data)
scale_pos_weight = (1 - survived_ratio) / survived_ratio
print(f'survival ratio = {survived_ratio}')
print(f'unbalance ratio = {scale_pos_weight}')
|
code
|
128016055/cell_13
|
[
"text_plain_output_1.png"
] |
from blip.models import blip
import importlib
import inspect
blip_path = inspect.getfile(blip)
fin = open(blip_path, 'rt')
data = fin.read()
data = data.replace("BertTokenizer.from_pretrained('bert-base-uncased')", "BertTokenizer.from_pretrained('/kaggle/input/clip-interrogator-models-x/bert-base-uncased')")
fin.close()
fin = open(blip_path, 'wt')
fin.write(data)
fin.close()
importlib.reload(blip)
|
code
|
128016055/cell_9
|
[
"text_plain_output_1.png"
] |
!pip install --no-index --find-links $wheels_path $clip_interrogator_whl_path -q
|
code
|
128016055/cell_34
|
[
"text_plain_output_1.png"
] |
from blip.models import blip
from clip_interrogator import clip_interrogator
from pathlib import Path
from sentence_transformers import SentenceTransformer, models
import importlib
import inspect
import numpy as np
import numpy as np # linear algebra
import open_clip
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sys
blip_path = inspect.getfile(blip)
fin = open(blip_path, 'rt')
data = fin.read()
data = data.replace("BertTokenizer.from_pretrained('bert-base-uncased')", "BertTokenizer.from_pretrained('/kaggle/input/clip-interrogator-models-x/bert-base-uncased')")
fin.close()
fin = open(blip_path, 'wt')
fin.write(data)
fin.close()
importlib.reload(blip)
clip_interrogator_path = inspect.getfile(clip_interrogator.Interrogator)
fin = open(clip_interrogator_path, 'rt')
data = fin.read()
data = data.replace('open_clip.get_tokenizer(clip_model_name)', 'open_clip.get_tokenizer(config.clip_model_name.split("/", 2)[0])')
fin.close()
fin = open(clip_interrogator_path, 'wt')
fin.write(data)
fin.close()
importlib.reload(clip_interrogator)
import os
import sys
from PIL import Image
from pathlib import Path
import matplotlib.pyplot as plt
from transformers import AutoProcessor, BlipForConditionalGeneration
import numpy as np
import pandas as pd
import torch
import open_clip
sys.path.append('../input/sentence-transformers-222/sentence-transformers')
from sentence_transformers import SentenceTransformer, models
comp_path = Path('/kaggle/input/stable-diffusion-image-to-prompts/')
df_submission = pd.read_csv(comp_path / 'sample_submission.csv', index_col='imgId_eId')
images = os.listdir(comp_path / 'images')
imgIds = [i.split('.')[0] for i in images]
eIds = list(range(CFG.embedding_length))
imgId_eId = ['_'.join(map(str, i)) for i in zip(np.repeat(imgIds, CFG.embedding_length), np.tile(range(CFG.embedding_length), len(imgIds)))]
assert sorted(imgId_eId) == sorted(df_submission.index)
st_model = SentenceTransformer(CFG.sentence_model_path)
model_config = clip_interrogator.Config(clip_model_name=CFG.ci_clip_model_name)
model_config.cache_path = CFG.cache_path
configs_path = os.path.join(os.path.dirname(os.path.dirname(blip_path)), 'configs')
med_config = os.path.join(configs_path, 'med_config.json')
blip_model = blip.blip_decoder(pretrained=CFG.blip_model_path, image_size=model_config.blip_image_eval_size, vit=model_config.blip_model_type, med_config=med_config)
blip_model.eval()
blip_model = blip_model.to(model_config.device)
model_config.blip_model = blip_model
clip_model = open_clip.create_model(CFG.clip_model_name, precision='fp16' if model_config.device == 'cuda' else 'fp32')
open_clip.load_checkpoint(clip_model, CFG.clip_model_path)
clip_model.to(model_config.device).eval()
model_config.clip_model = clip_model
clip_preprocess = open_clip.image_transform(clip_model.visual.image_size, is_train=False, mean=getattr(clip_model.visual, 'image_mean', None), std=getattr(clip_model.visual, 'image_std', None))
model_config.clip_preprocess = clip_preprocess
ci = clip_interrogator.Interrogator(model_config)
|
code
|
128016055/cell_20
|
[
"text_plain_output_1.png"
] |
from pathlib import Path
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sys
import os
import sys
from PIL import Image
from pathlib import Path
import matplotlib.pyplot as plt
from transformers import AutoProcessor, BlipForConditionalGeneration
import numpy as np
import pandas as pd
import torch
import open_clip
sys.path.append('../input/sentence-transformers-222/sentence-transformers')
from sentence_transformers import SentenceTransformer, models
comp_path = Path('/kaggle/input/stable-diffusion-image-to-prompts/')
df_submission = pd.read_csv(comp_path / 'sample_submission.csv', index_col='imgId_eId')
df_submission.head()
|
code
|
128016055/cell_29
|
[
"text_html_output_1.png"
] |
from blip.models import blip
from clip_interrogator import clip_interrogator
from pathlib import Path
from sentence_transformers import SentenceTransformer, models
import importlib
import inspect
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sys
blip_path = inspect.getfile(blip)
fin = open(blip_path, 'rt')
data = fin.read()
data = data.replace("BertTokenizer.from_pretrained('bert-base-uncased')", "BertTokenizer.from_pretrained('/kaggle/input/clip-interrogator-models-x/bert-base-uncased')")
fin.close()
fin = open(blip_path, 'wt')
fin.write(data)
fin.close()
importlib.reload(blip)
clip_interrogator_path = inspect.getfile(clip_interrogator.Interrogator)
fin = open(clip_interrogator_path, 'rt')
data = fin.read()
data = data.replace('open_clip.get_tokenizer(clip_model_name)', 'open_clip.get_tokenizer(config.clip_model_name.split("/", 2)[0])')
fin.close()
fin = open(clip_interrogator_path, 'wt')
fin.write(data)
fin.close()
importlib.reload(clip_interrogator)
import os
import sys
from PIL import Image
from pathlib import Path
import matplotlib.pyplot as plt
from transformers import AutoProcessor, BlipForConditionalGeneration
import numpy as np
import pandas as pd
import torch
import open_clip
sys.path.append('../input/sentence-transformers-222/sentence-transformers')
from sentence_transformers import SentenceTransformer, models
comp_path = Path('/kaggle/input/stable-diffusion-image-to-prompts/')
df_submission = pd.read_csv(comp_path / 'sample_submission.csv', index_col='imgId_eId')
images = os.listdir(comp_path / 'images')
imgIds = [i.split('.')[0] for i in images]
eIds = list(range(CFG.embedding_length))
imgId_eId = ['_'.join(map(str, i)) for i in zip(np.repeat(imgIds, CFG.embedding_length), np.tile(range(CFG.embedding_length), len(imgIds)))]
assert sorted(imgId_eId) == sorted(df_submission.index)
st_model = SentenceTransformer(CFG.sentence_model_path)
model_config = clip_interrogator.Config(clip_model_name=CFG.ci_clip_model_name)
model_config.cache_path = CFG.cache_path
configs_path = os.path.join(os.path.dirname(os.path.dirname(blip_path)), 'configs')
med_config = os.path.join(configs_path, 'med_config.json')
blip_model = blip.blip_decoder(pretrained=CFG.blip_model_path, image_size=model_config.blip_image_eval_size, vit=model_config.blip_model_type, med_config=med_config)
blip_model.eval()
blip_model = blip_model.to(model_config.device)
model_config.blip_model = blip_model
|
code
|
128016055/cell_14
|
[
"text_plain_output_1.png"
] |
from blip.models import blip
from clip_interrogator import clip_interrogator
import importlib
import inspect
blip_path = inspect.getfile(blip)
fin = open(blip_path, 'rt')
data = fin.read()
data = data.replace("BertTokenizer.from_pretrained('bert-base-uncased')", "BertTokenizer.from_pretrained('/kaggle/input/clip-interrogator-models-x/bert-base-uncased')")
fin.close()
fin = open(blip_path, 'wt')
fin.write(data)
fin.close()
importlib.reload(blip)
clip_interrogator_path = inspect.getfile(clip_interrogator.Interrogator)
fin = open(clip_interrogator_path, 'rt')
data = fin.read()
data = data.replace('open_clip.get_tokenizer(clip_model_name)', 'open_clip.get_tokenizer(config.clip_model_name.split("/", 2)[0])')
fin.close()
fin = open(clip_interrogator_path, 'wt')
fin.write(data)
fin.close()
importlib.reload(clip_interrogator)
|
code
|
128016055/cell_10
|
[
"text_plain_output_1.png"
] |
!pip install --no-index --no-deps /kaggle/input/lavis-pretrained/salesforce-lavis/transformers* -q
|
code
|
88102865/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/misoimprovedta/ta-misogyny-train (3).csv', header=None, sep='\t')
df_eval = pd.read_csv('../input/misoimprovedta/ta-misogyny-dev (2).csv', header=None, sep='\t')
df_test = pd.read_csv('../input/misoimprovedta/ta-misogyny-test (2).csv', header=None)
df_eval
|
code
|
88102865/cell_23
|
[
"text_html_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/misoimprovedta/ta-misogyny-train (3).csv', header=None, sep='\t')
df_eval = pd.read_csv('../input/misoimprovedta/ta-misogyny-dev (2).csv', header=None, sep='\t')
df_test = pd.read_csv('../input/misoimprovedta/ta-misogyny-test (2).csv', header=None)
def create_labels(sentence):
splits = sentence.split('\t')
return splits[0]
def change_sentence(sentence):
splits = sentence.split('\t')
return splits[1]
df_eval.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True)
df_eval = df_eval[['Text', 'Labels']]
df.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True)
df = df[['Text', 'Labels']]
df_test.rename(columns={0: 'Text'}, inplace=True)
df_test
|
code
|
88102865/cell_20
|
[
"text_html_output_1.png"
] |
from simpletransformers.classification import ClassificationModel, ClassificationArgs
from sklearn.model_selection import train_test_split
import pandas as pd
df = pd.read_csv('../input/misoimprovedta/ta-misogyny-train (3).csv', header=None, sep='\t')
df_eval = pd.read_csv('../input/misoimprovedta/ta-misogyny-dev (2).csv', header=None, sep='\t')
df_test = pd.read_csv('../input/misoimprovedta/ta-misogyny-test (2).csv', header=None)
def create_labels(sentence):
splits = sentence.split('\t')
return splits[0]
def change_sentence(sentence):
splits = sentence.split('\t')
return splits[1]
df_eval.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True)
df_eval = df_eval[['Text', 'Labels']]
df.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True)
df = df[['Text', 'Labels']]
df_test.rename(columns={0: 'Text'}, inplace=True)
from sklearn.model_selection import train_test_split
X_test, X_dev, y_test, y_dev = train_test_split(df_eval['Text'], df_eval['Labels'], random_state=0)
df_test_ = pd.concat([X_test, y_test], axis=1)
df_dev = pd.concat([X_dev, y_dev], axis=1)
df_dev
model_args = ClassificationArgs()
model_args.overwrite_output_dir = True
model_args.eval_batch_size = 8
model_args.train_batch_size = 8
model_args.learning_rate = 4e-05
model = ClassificationModel('bert', 'google/muril-base-cased', num_labels=9, args=model_args, tokenizer_type='bert', tokenizer_name='google/muril-base-cased')
predictions, raw_outputs = model.predict(df_test_['Text'].to_list())
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.