path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
2014006/cell_9
[ "image_output_11.png", "image_output_17.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_7.png", "image_output_20.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png", "image_output_19.png" ]
from pathlib import Path import cv2 import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) test_images = [] for fname in sorted(os.listdir(test_path)): test_images.append(fname) test = pd.DataFrame(test_images, columns=['fname']) random_images = train.sample(100) import matplotlib.pyplot as plt for i, r in random_images[:6].iterrows(): x = cv2.imread('../input/train/' + r['camera'] + '/' + r['fname']) random_test_images = test.sample(100) for i, r in random_test_images[:20].iterrows(): x = cv2.imread('../input/test/' + r['fname']) plt.figure(figsize=(10, 10)) plt.imshow(x) plt.title(r['fname']) plt.show()
code
2014006/cell_4
[ "text_plain_output_1.png" ]
from pathlib import Path import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) print(train.shape)
code
2014006/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import os from pathlib import Path import multiprocessing as mp import numpy as np import pandas as pd from skimage.data import imread from sklearn.ensemble import RandomForestClassifier import time import cv2 from sklearn.decomposition import PCA from sklearn.svm import SVC from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2014006/cell_7
[ "image_output_5.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from pathlib import Path import cv2 import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) random_images = train.sample(100) import matplotlib.pyplot as plt for i, r in random_images[:6].iterrows(): x = cv2.imread('../input/train/' + r['camera'] + '/' + r['fname']) plt.figure(figsize=(16, 16)) plt.imshow(x) plt.title(r['fname']) plt.show()
code
2014006/cell_5
[ "text_plain_output_1.png" ]
from pathlib import Path import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) input_path = Path('../input') train_path = input_path / 'train' test_path = input_path / 'test' cameras = os.listdir(train_path) train_images = [] for camera in cameras: for fname in sorted(os.listdir(train_path / camera)): train_images.append((camera, fname)) train = pd.DataFrame(train_images, columns=['camera', 'fname']) test_images = [] for fname in sorted(os.listdir(test_path)): test_images.append(fname) test = pd.DataFrame(test_images, columns=['fname']) print(test.shape)
code
128020766/cell_21
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.drop(df[df['position_id'] == 1].index, inplace=True) df.drop(df[df['potential_label'] == 'below_average'].index, inplace=True) mypivot = pd.pivot_table(data=df, index=['player_id', 'position_id', 'potential_label'], columns=['attribute_id'], values='attribute_value') mypivot
code
128020766/cell_34
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, StandardScaler from termcolor import colored import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.drop(df[df['position_id'] == 1].index, inplace=True) df.drop(df[df['potential_label'] == 'below_average'].index, inplace=True) mypivot = pd.pivot_table(data=df, index=['player_id', 'position_id', 'potential_label'], columns=['attribute_id'], values='attribute_value') mypivot mypivot.reset_index(inplace=True) mypivot = mypivot.astype(str) mypivot def label_encoder(df, column): labelencoder = LabelEncoder() df[column] = labelencoder.fit_transform(df[column]) return df mypivot = label_encoder(mypivot, 'potential_label') mypivot.columns = mypivot.columns.astype(str) mypivot.columns num_cols = mypivot.columns[3:] num_cols y = mypivot['potential_label'] X = mypivot.drop(['potential_label', 'player_id'], axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=123, stratify=y, test_size=0.2, shuffle=True) print(f"The shape of X_train is --> {colored(X_train.shape, 'red')}") print(f"The shape of X_test is --> {colored(X_test.shape, 'red')}") print(f"The shape of y_train is --> {colored(y_train.shape, 'red')}") print(f"The shape of y_test is --> {colored(y_test.shape, 'red')}")
code
128020766/cell_6
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128020766/cell_29
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from sklearn.preprocessing import LabelEncoder, StandardScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.drop(df[df['position_id'] == 1].index, inplace=True) df.drop(df[df['potential_label'] == 'below_average'].index, inplace=True) mypivot = pd.pivot_table(data=df, index=['player_id', 'position_id', 'potential_label'], columns=['attribute_id'], values='attribute_value') mypivot mypivot.reset_index(inplace=True) mypivot = mypivot.astype(str) mypivot def label_encoder(df, column): labelencoder = LabelEncoder() df[column] = labelencoder.fit_transform(df[column]) return df mypivot = label_encoder(mypivot, 'potential_label') mypivot.columns = mypivot.columns.astype(str) mypivot.columns num_cols = mypivot.columns[3:] num_cols
code
128020766/cell_26
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from sklearn.preprocessing import LabelEncoder, StandardScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.drop(df[df['position_id'] == 1].index, inplace=True) df.drop(df[df['potential_label'] == 'below_average'].index, inplace=True) mypivot = pd.pivot_table(data=df, index=['player_id', 'position_id', 'potential_label'], columns=['attribute_id'], values='attribute_value') mypivot mypivot.reset_index(inplace=True) mypivot = mypivot.astype(str) mypivot def label_encoder(df, column): labelencoder = LabelEncoder() df[column] = labelencoder.fit_transform(df[column]) return df mypivot = label_encoder(mypivot, 'potential_label') mypivot.head()
code
128020766/cell_7
[ "text_html_output_4.png", "text_html_output_6.png", "text_html_output_2.png", "text_html_output_5.png", "text_html_output_1.png", "text_html_output_3.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600)
code
128020766/cell_18
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.drop(df[df['position_id'] == 1].index, inplace=True) df.drop(df[df['potential_label'] == 'below_average'].index, inplace=True) df[['potential_label']].value_counts()
code
128020766/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.drop(df[df['position_id'] == 1].index, inplace=True) df[['position_id']].value_counts()
code
128020766/cell_24
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.drop(df[df['position_id'] == 1].index, inplace=True) df.drop(df[df['potential_label'] == 'below_average'].index, inplace=True) mypivot = pd.pivot_table(data=df, index=['player_id', 'position_id', 'potential_label'], columns=['attribute_id'], values='attribute_value') mypivot mypivot.reset_index(inplace=True) mypivot = mypivot.astype(str) mypivot
code
128020766/cell_22
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.drop(df[df['position_id'] == 1].index, inplace=True) df.drop(df[df['potential_label'] == 'below_average'].index, inplace=True) mypivot = pd.pivot_table(data=df, index=['player_id', 'position_id', 'potential_label'], columns=['attribute_id'], values='attribute_value') mypivot mypivot.info()
code
128020766/cell_27
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from sklearn.preprocessing import LabelEncoder, StandardScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.drop(df[df['position_id'] == 1].index, inplace=True) df.drop(df[df['potential_label'] == 'below_average'].index, inplace=True) mypivot = pd.pivot_table(data=df, index=['player_id', 'position_id', 'potential_label'], columns=['attribute_id'], values='attribute_value') mypivot mypivot.reset_index(inplace=True) mypivot = mypivot.astype(str) mypivot def label_encoder(df, column): labelencoder = LabelEncoder() df[column] = labelencoder.fit_transform(df[column]) return df mypivot = label_encoder(mypivot, 'potential_label') mypivot.columns = mypivot.columns.astype(str) mypivot.columns
code
128020766/cell_12
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np from termcolor import colored import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from plotly.subplots import make_subplots from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score pd.set_option('display.width', 600) sc_attributes = pd.read_csv('../input/scoutium/scoutium_attributes.csv', sep=';') sc_potential_labels = pd.read_csv('../input/scoutium/scoutium_potential_labels.csv', sep=';') df_ = pd.merge(sc_attributes, sc_potential_labels, how='right', on=['task_response_id', 'match_id', 'evaluator_id', 'player_id']) df = df_.copy() df.head()
code
105200060/cell_13
[ "text_plain_output_1.png" ]
from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os bmi = pd.read_csv('../input/bmidataset/bmi.csv') bmi_x = bmi.Weight[:, np.newaxis] bmi_x_train = bmi_x[:-30] bmi_x_test = bmi_x[-30:] bmi_y_train = bmi.Index[:-30] bmi_y_test = bmi.Index[-30:] model = linear_model.LinearRegression() model.fit(bmi_x_train, bmi_y_train) bmi_y_predicted = model.predict(bmi_x_test) print('The mean Squared error is ', mean_squared_error(bmi_y_test, bmi_y_predicted))
code
105200060/cell_6
[ "image_output_1.png" ]
import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os bmi = pd.read_csv('../input/bmidataset/bmi.csv') bmi_x = bmi.Weight[:, np.newaxis] print(bmi_x)
code
105200060/cell_2
[ "text_plain_output_1.png" ]
import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os bmi = pd.read_csv('../input/bmidataset/bmi.csv') bmi.head()
code
105200060/cell_11
[ "text_html_output_1.png" ]
from sklearn import datasets, linear_model import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os bmi = pd.read_csv('../input/bmidataset/bmi.csv') bmi_x = bmi.Weight[:, np.newaxis] bmi_x_train = bmi_x[:-30] bmi_y_train = bmi.Index[:-30] bmi_y_test = bmi.Index[-30:] model = linear_model.LinearRegression() model.fit(bmi_x_train, bmi_y_train)
code
105200060/cell_1
[ "text_plain_output_1.png" ]
import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) bmi = pd.read_csv('../input/bmidataset/bmi.csv')
code
105200060/cell_15
[ "text_plain_output_1.png" ]
from sklearn import datasets, linear_model import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os bmi = pd.read_csv('../input/bmidataset/bmi.csv') bmi_x = bmi.Weight[:, np.newaxis] bmi_x_train = bmi_x[:-30] bmi_x_test = bmi_x[-30:] bmi_y_train = bmi.Index[:-30] bmi_y_test = bmi.Index[-30:] model = linear_model.LinearRegression() model.fit(bmi_x_train, bmi_y_train) bmi_y_predicted = model.predict(bmi_x_test) print('Intercepts: ', model.intercept_)
code
105200060/cell_16
[ "text_plain_output_1.png" ]
from sklearn import datasets, linear_model import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os bmi = pd.read_csv('../input/bmidataset/bmi.csv') bmi_x = bmi.Weight[:, np.newaxis] bmi_x_train = bmi_x[:-30] bmi_x_test = bmi_x[-30:] bmi_y_train = bmi.Index[:-30] bmi_y_test = bmi.Index[-30:] model = linear_model.LinearRegression() model.fit(bmi_x_train, bmi_y_train) bmi_y_predicted = model.predict(bmi_x_test) plt.scatter(bmi_x_test, bmi_y_test) plt.plot(bmi_x_test, bmi_y_predicted) plt.show()
code
105200060/cell_3
[ "text_plain_output_1.png" ]
import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os bmi = pd.read_csv('../input/bmidataset/bmi.csv') print(bmi.keys())
code
105200060/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import datasets, linear_model import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os bmi = pd.read_csv('../input/bmidataset/bmi.csv') bmi_x = bmi.Weight[:, np.newaxis] bmi_x_train = bmi_x[:-30] bmi_x_test = bmi_x[-30:] bmi_y_train = bmi.Index[:-30] bmi_y_test = bmi.Index[-30:] model = linear_model.LinearRegression() model.fit(bmi_x_train, bmi_y_train) bmi_y_predicted = model.predict(bmi_x_test) print('Weights: ', model.coef_)
code
105200060/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import os bmi = pd.read_csv('../input/bmidataset/bmi.csv') bmi_x = bmi.Weight[:, np.newaxis]
code
104127328/cell_13
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) demo_data = pd.read_csv('/kaggle/input/pac-regression-4/PAC1954_Demo_default_data.csv') num_samples = demo_data.shape[0] split = round(num_samples * 0.7) train_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[0:split]).reshape(-1, 1) test_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[split + 1:num_samples]).reshape(-1, 1) train_demo_data_y = np.array(demo_data['Count'].iloc[0:split]).reshape(-1, 1) test_demo_data_y = np.array(demo_data['Count'].iloc[split + 1:num_samples]).reshape(-1, 1) fig, ax = plt.subplots(figsize=(10,10)) ax.plot(train_demo_data_y[0:80], train_demo_data_x[0:80], 'bo', label='Initial Vsense Data') #, c = 'blue') #ax.plot(train_demo_data_y[200:], train_demo_data_x[200:], 'r+', label='Additional Vsense Data') #, c = 'blue') #ax.plot(x_values[0:max_plot], y[0:max_plot], label='Decision Tree Regressor', c = 'green') #ax.plot(x_values[0:max_plot], error_detection[0:max_plot], label='Error', c = 'red') plt.xlabel("Window Position") plt.ylabel("Vsense") plt.title("Input Data") plt.legend(loc='center right') plt.show() dtr_model_demo_data = dtr(max_depth=8) dtr_model_demo_data.fit(train_demo_data_y[0:80], train_demo_data_x[0:80]) prediction_demo_data_y = dtr_model_demo_data.predict(test_demo_data_y) dtr_model_demo_data_all = dtr(max_depth=8) dtr_model_demo_data_all.fit(train_demo_data_y, train_demo_data_x) prediction_demo_data_all_y = dtr_model_demo_data_all.predict(test_demo_data_y) # look at the data a little #max_plot = test_demo_data_y.shape[0] max_plot = 160 x_values = [(x+10) for x in range(0,max_plot)] fig, ax = plt.subplots(figsize=(20,10)) ax.plot(x_values[0:max_plot], test_demo_data_x[0:max_plot], label='PAC Vsense Training Data', c = 'blue') ax.plot(x_values[0:max_plot], prediction_demo_data_y[0:max_plot], label='Model 80 samples Prediction', c = 'red') #ax.plot(x_values[0:max_plot], prediction_demo_data_all_y[0:max_plot], label='Model All Data Prediction', c = 'orange') #ax.plot(x_values[0:max_plot], y[0:max_plot], label='Decision Tree Regressor', c = 'red') #ax.plot(x_values[0:max_plot], error_detection[0:max_plot], label='Error', c = 'red') plt.xlabel("Sample") plt.ylabel("Vsense") plt.title("Training Data and Model Prediction") plt.legend() plt.show() fig, ax = plt.subplots(figsize=(10,10)) ax.plot(train_demo_data_y[0:80], train_demo_data_x[0:80], 'bo', label='Initial Vsense Data') #, c = 'blue') ax.plot(train_demo_data_y[200:], train_demo_data_x[200:], 'r+', label='Additional Vsense Data') #, c = 'blue') #ax.plot(x_values[0:max_plot], y[0:max_plot], label='Decision Tree Regressor', c = 'green') #ax.plot(x_values[0:max_plot], error_detection[0:max_plot], label='Error', c = 'red') plt.xlabel("Window Position") plt.ylabel("Vsense") plt.title("Input Data") plt.legend(loc='center right') plt.show() max_plot = 160 x_values = [(x+10) for x in range(0,max_plot)] fig, ax = plt.subplots(figsize=(20,10)) ax.plot(x_values[0:max_plot], test_demo_data_x[0:max_plot], label='PAC Vsense Training Data', c = 'blue') ax.plot(x_values[0:max_plot], prediction_demo_data_y[0:max_plot], label='Model 80 samples Prediction', c = 'red') ax.plot(x_values[0:max_plot], prediction_demo_data_all_y[0:max_plot], label='Model All Data Prediction', c = 'orange') plt.xlabel("Sample") plt.ylabel("Vsense") plt.title("Training Data and Model Predictions") plt.legend() plt.show() ch2_train_data4 = pd.read_csv('/kaggle/input/pac-regression-4/PAC_regression_training4.csv') ch2_test_data4 = pd.read_csv('/kaggle/input/pac-regression-4/PAC_regression_test4.csv') train_vsense_ch1_data4_x = np.array(ch2_train_data4[' Vsense_Ch1']).reshape(-1, 1) test_vsense_ch1_data4_x = np.array(ch2_test_data4[' Vsense_Ch1'].iloc[3:]).reshape(-1, 1) train_y = np.array(ch2_train_data4['Count']).reshape(-1, 1) test_y = np.array(ch2_test_data4['Count']).reshape(-1, 1) fig, ax = plt.subplots(figsize=(10, 10)) ax.plot(ch2_train_data4['Count'], ch2_train_data4[' Vsense_Ch1'], 'bo', label='PAC Vsense Data') plt.xlabel('Window Position') plt.ylabel('Vsense') plt.title('Input Data') plt.legend() plt.show()
code
104127328/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) demo_data = pd.read_csv('/kaggle/input/pac-regression-4/PAC1954_Demo_default_data.csv') num_samples = demo_data.shape[0] split = round(num_samples * 0.7) train_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[0:split]).reshape(-1, 1) test_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[split + 1:num_samples]).reshape(-1, 1) train_demo_data_y = np.array(demo_data['Count'].iloc[0:split]).reshape(-1, 1) test_demo_data_y = np.array(demo_data['Count'].iloc[split + 1:num_samples]).reshape(-1, 1) fig, ax = plt.subplots(figsize=(10,10)) ax.plot(train_demo_data_y[0:80], train_demo_data_x[0:80], 'bo', label='Initial Vsense Data') #, c = 'blue') #ax.plot(train_demo_data_y[200:], train_demo_data_x[200:], 'r+', label='Additional Vsense Data') #, c = 'blue') #ax.plot(x_values[0:max_plot], y[0:max_plot], label='Decision Tree Regressor', c = 'green') #ax.plot(x_values[0:max_plot], error_detection[0:max_plot], label='Error', c = 'red') plt.xlabel("Window Position") plt.ylabel("Vsense") plt.title("Input Data") plt.legend(loc='center right') plt.show() dtr_model_demo_data = dtr(max_depth=8) dtr_model_demo_data.fit(train_demo_data_y[0:80], train_demo_data_x[0:80]) prediction_demo_data_y = dtr_model_demo_data.predict(test_demo_data_y) dtr_model_demo_data_all = dtr(max_depth=8) dtr_model_demo_data_all.fit(train_demo_data_y, train_demo_data_x) prediction_demo_data_all_y = dtr_model_demo_data_all.predict(test_demo_data_y) # look at the data a little #max_plot = test_demo_data_y.shape[0] max_plot = 160 x_values = [(x+10) for x in range(0,max_plot)] fig, ax = plt.subplots(figsize=(20,10)) ax.plot(x_values[0:max_plot], test_demo_data_x[0:max_plot], label='PAC Vsense Training Data', c = 'blue') ax.plot(x_values[0:max_plot], prediction_demo_data_y[0:max_plot], label='Model 80 samples Prediction', c = 'red') #ax.plot(x_values[0:max_plot], prediction_demo_data_all_y[0:max_plot], label='Model All Data Prediction', c = 'orange') #ax.plot(x_values[0:max_plot], y[0:max_plot], label='Decision Tree Regressor', c = 'red') #ax.plot(x_values[0:max_plot], error_detection[0:max_plot], label='Error', c = 'red') plt.xlabel("Sample") plt.ylabel("Vsense") plt.title("Training Data and Model Prediction") plt.legend() plt.show() fig, ax = plt.subplots(figsize=(10, 10)) ax.plot(train_demo_data_y[0:80], train_demo_data_x[0:80], 'bo', label='Initial Vsense Data') ax.plot(train_demo_data_y[200:], train_demo_data_x[200:], 'r+', label='Additional Vsense Data') plt.xlabel('Window Position') plt.ylabel('Vsense') plt.title('Input Data') plt.legend(loc='center right') plt.show()
code
104127328/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) demo_data = pd.read_csv('/kaggle/input/pac-regression-4/PAC1954_Demo_default_data.csv') num_samples = demo_data.shape[0] split = round(num_samples * 0.7) train_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[0:split]).reshape(-1, 1) test_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[split + 1:num_samples]).reshape(-1, 1) train_demo_data_y = np.array(demo_data['Count'].iloc[0:split]).reshape(-1, 1) test_demo_data_y = np.array(demo_data['Count'].iloc[split + 1:num_samples]).reshape(-1, 1) fig, ax = plt.subplots(figsize=(10, 10)) ax.plot(train_demo_data_y[0:80], train_demo_data_x[0:80], 'bo', label='Initial Vsense Data') plt.xlabel('Window Position') plt.ylabel('Vsense') plt.title('Input Data') plt.legend(loc='center right') plt.show()
code
104127328/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) demo_data = pd.read_csv('/kaggle/input/pac-regression-4/PAC1954_Demo_default_data.csv') num_samples = demo_data.shape[0] split = round(num_samples * 0.7) train_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[0:split]).reshape(-1, 1) test_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[split + 1:num_samples]).reshape(-1, 1) train_demo_data_y = np.array(demo_data['Count'].iloc[0:split]).reshape(-1, 1) test_demo_data_y = np.array(demo_data['Count'].iloc[split + 1:num_samples]).reshape(-1, 1) fig, ax = plt.subplots(figsize=(10,10)) ax.plot(train_demo_data_y[0:80], train_demo_data_x[0:80], 'bo', label='Initial Vsense Data') #, c = 'blue') #ax.plot(train_demo_data_y[200:], train_demo_data_x[200:], 'r+', label='Additional Vsense Data') #, c = 'blue') #ax.plot(x_values[0:max_plot], y[0:max_plot], label='Decision Tree Regressor', c = 'green') #ax.plot(x_values[0:max_plot], error_detection[0:max_plot], label='Error', c = 'red') plt.xlabel("Window Position") plt.ylabel("Vsense") plt.title("Input Data") plt.legend(loc='center right') plt.show() dtr_model_demo_data = dtr(max_depth=8) dtr_model_demo_data.fit(train_demo_data_y[0:80], train_demo_data_x[0:80]) prediction_demo_data_y = dtr_model_demo_data.predict(test_demo_data_y) dtr_model_demo_data_all = dtr(max_depth=8) dtr_model_demo_data_all.fit(train_demo_data_y, train_demo_data_x) prediction_demo_data_all_y = dtr_model_demo_data_all.predict(test_demo_data_y) max_plot = 160 x_values = [x + 10 for x in range(0, max_plot)] fig, ax = plt.subplots(figsize=(20, 10)) ax.plot(x_values[0:max_plot], test_demo_data_x[0:max_plot], label='PAC Vsense Training Data', c='blue') ax.plot(x_values[0:max_plot], prediction_demo_data_y[0:max_plot], label='Model 80 samples Prediction', c='red') plt.xlabel('Sample') plt.ylabel('Vsense') plt.title('Training Data and Model Prediction') plt.legend() plt.show()
code
104127328/cell_10
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) demo_data = pd.read_csv('/kaggle/input/pac-regression-4/PAC1954_Demo_default_data.csv') num_samples = demo_data.shape[0] split = round(num_samples * 0.7) train_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[0:split]).reshape(-1, 1) test_demo_data_x = np.array(demo_data[' Vsense_Ch1'].iloc[split + 1:num_samples]).reshape(-1, 1) train_demo_data_y = np.array(demo_data['Count'].iloc[0:split]).reshape(-1, 1) test_demo_data_y = np.array(demo_data['Count'].iloc[split + 1:num_samples]).reshape(-1, 1) fig, ax = plt.subplots(figsize=(10,10)) ax.plot(train_demo_data_y[0:80], train_demo_data_x[0:80], 'bo', label='Initial Vsense Data') #, c = 'blue') #ax.plot(train_demo_data_y[200:], train_demo_data_x[200:], 'r+', label='Additional Vsense Data') #, c = 'blue') #ax.plot(x_values[0:max_plot], y[0:max_plot], label='Decision Tree Regressor', c = 'green') #ax.plot(x_values[0:max_plot], error_detection[0:max_plot], label='Error', c = 'red') plt.xlabel("Window Position") plt.ylabel("Vsense") plt.title("Input Data") plt.legend(loc='center right') plt.show() dtr_model_demo_data = dtr(max_depth=8) dtr_model_demo_data.fit(train_demo_data_y[0:80], train_demo_data_x[0:80]) prediction_demo_data_y = dtr_model_demo_data.predict(test_demo_data_y) dtr_model_demo_data_all = dtr(max_depth=8) dtr_model_demo_data_all.fit(train_demo_data_y, train_demo_data_x) prediction_demo_data_all_y = dtr_model_demo_data_all.predict(test_demo_data_y) # look at the data a little #max_plot = test_demo_data_y.shape[0] max_plot = 160 x_values = [(x+10) for x in range(0,max_plot)] fig, ax = plt.subplots(figsize=(20,10)) ax.plot(x_values[0:max_plot], test_demo_data_x[0:max_plot], label='PAC Vsense Training Data', c = 'blue') ax.plot(x_values[0:max_plot], prediction_demo_data_y[0:max_plot], label='Model 80 samples Prediction', c = 'red') #ax.plot(x_values[0:max_plot], prediction_demo_data_all_y[0:max_plot], label='Model All Data Prediction', c = 'orange') #ax.plot(x_values[0:max_plot], y[0:max_plot], label='Decision Tree Regressor', c = 'red') #ax.plot(x_values[0:max_plot], error_detection[0:max_plot], label='Error', c = 'red') plt.xlabel("Sample") plt.ylabel("Vsense") plt.title("Training Data and Model Prediction") plt.legend() plt.show() fig, ax = plt.subplots(figsize=(10,10)) ax.plot(train_demo_data_y[0:80], train_demo_data_x[0:80], 'bo', label='Initial Vsense Data') #, c = 'blue') ax.plot(train_demo_data_y[200:], train_demo_data_x[200:], 'r+', label='Additional Vsense Data') #, c = 'blue') #ax.plot(x_values[0:max_plot], y[0:max_plot], label='Decision Tree Regressor', c = 'green') #ax.plot(x_values[0:max_plot], error_detection[0:max_plot], label='Error', c = 'red') plt.xlabel("Window Position") plt.ylabel("Vsense") plt.title("Input Data") plt.legend(loc='center right') plt.show() max_plot = 160 x_values = [x + 10 for x in range(0, max_plot)] fig, ax = plt.subplots(figsize=(20, 10)) ax.plot(x_values[0:max_plot], test_demo_data_x[0:max_plot], label='PAC Vsense Training Data', c='blue') ax.plot(x_values[0:max_plot], prediction_demo_data_y[0:max_plot], label='Model 80 samples Prediction', c='red') ax.plot(x_values[0:max_plot], prediction_demo_data_all_y[0:max_plot], label='Model All Data Prediction', c='orange') plt.xlabel('Sample') plt.ylabel('Vsense') plt.title('Training Data and Model Predictions') plt.legend() plt.show()
code
1008698/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from collections import OrderedDict from collections import OrderedDict import csv import csv import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import csv from collections import OrderedDict import pandas as pd with open('../input/documents_meta.csv', 'r') as f: r = csv.reader(f) dict2 = {row[0]: row[1:] for row in r} with open('../input/promoted_content.csv', 'r') as f: r = csv.reader(f) dict1 = OrderedDict(((row[0], row[1:]) for row in r)) result = OrderedDict() for d in (dict1, dict2): for key, value in d.items(): result.setdefault(key, []).extend(value) with open('ab_combined.csv', 'w') as f: w = csv.writer(f) for key, value in result.items(): w.writerow([key] + value) data = pd.read_csv('ab_combined.csv', low_memory=False) data['ad_id'] import csv from collections import OrderedDict import pandas as pd with open('../input/documents_meta.csv', 'r') as f: r = csv.reader(f) dict2 = {row[0]: row[1:] for row in r} with open('../input/promoted_content.csv', 'r') as f: r = csv.reader(f) dict1 = OrderedDict(((row[0], row[1:]) for row in r)) result = OrderedDict() for d in (dict1, dict2): for key, value in d.items(): result.setdefault(key, []).extend(value) with open('ab_combined.csv', 'w') as f: w = csv.writer(f) for key, value in result.items(): w.writerow([key] + value) data = pd.read_csv('ab_combined.csv', low_memory=False) print(data[0])
code
1008698/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1008698/cell_3
[ "text_plain_output_1.png" ]
from collections import OrderedDict import csv import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import csv from collections import OrderedDict import pandas as pd with open('../input/documents_meta.csv', 'r') as f: r = csv.reader(f) dict2 = {row[0]: row[1:] for row in r} with open('../input/promoted_content.csv', 'r') as f: r = csv.reader(f) dict1 = OrderedDict(((row[0], row[1:]) for row in r)) result = OrderedDict() for d in (dict1, dict2): for key, value in d.items(): result.setdefault(key, []).extend(value) with open('ab_combined.csv', 'w') as f: w = csv.writer(f) for key, value in result.items(): w.writerow([key] + value) data = pd.read_csv('ab_combined.csv', low_memory=False) data['ad_id']
code
73073830/cell_21
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train GarageFinish1 = train['SalePrice'].groupby(train['GarageFinish']).mean() [GarageFinish1.sort_values().index]
code
73073830/cell_25
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train HeatingQC1 = train['SalePrice'].groupby(train['HeatingQC']).mean() [HeatingQC1.sort_values().index]
code
73073830/cell_23
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train GarageType1 = train['SalePrice'].groupby(train['GarageType']).mean() [GarageType1.sort_values().index]
code
73073830/cell_33
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train (train.shape, test.shape) train
code
73073830/cell_20
[ "text_html_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train BsmtQual1 = train['SalePrice'].groupby(train['BsmtQual']).mean() [BsmtQual1.sort_values().index]
code
73073830/cell_6
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value')
code
73073830/cell_29
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train (train[qual_features].shape, test[qual_features].shape)
code
73073830/cell_26
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train MasVnrType1 = train['SalePrice'].groupby(train['MasVnrType']).mean() [MasVnrType1.sort_values().index]
code
73073830/cell_19
[ "image_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train ExterQual1 = train['SalePrice'].groupby(train['ExterQual']).mean() [ExterQual1.sort_values().index]
code
73073830/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73073830/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) test
code
73073830/cell_18
[ "image_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') def anova(frame): anv = pd.DataFrame() anv['feature'] = qualitative pvals = [] for c in qualitative: samples = [] for cls in frame[c].unique(): s = frame[frame[c] == cls]['SalePrice'].values samples.append(s) pval = stats.f_oneway(*samples)[1] pvals.append(pval) anv['pval'] = pvals return anv.sort_values('pval') a = anova(train) a['disparity'] = np.log(1.0 / a['pval'].values) x = plt.xticks(rotation=90) def nova(frame): anv = pd.DataFrame() anv['feature'] = quantitative pvals = [] for c in quantitative: samples = [] for cls in frame[c].unique(): s = frame[frame[c] == cls]['SalePrice'].values samples.append(s) pval = stats.f_oneway(*samples)[1] pvals.append(pval) anv['pval'] = pvals return anv.sort_values('pval') a = nova(train) a['disparity'] = np.log(1.0 / a['pval'].values) x = plt.xticks(rotation=90) quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train sns.catplot(x='Neighborhood', y='SalePrice', kind='bar', data=train) x = plt.xticks(rotation=90)
code
73073830/cell_32
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train (train.shape, test.shape)
code
73073830/cell_16
[ "image_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train
code
73073830/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(20)
code
73073830/cell_17
[ "text_html_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train neigh1 = train['SalePrice'].groupby(train['Neighborhood']).mean() [neigh1.sort_values().index]
code
73073830/cell_35
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import mean_squared_log_error from sklearn.linear_model import LogisticRegression from sklearn.metrics import mean_squared_log_error logreg = LogisticRegression(random_state=0, solver='liblinear').fit(X_train, y_train) y_pred = logreg.predict(X_test) mean_squared_log_error(y_test, y_pred)
code
73073830/cell_24
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train BsmtFinType11 = train['SalePrice'].groupby(train['BsmtFinType1']).mean() [BsmtFinType11.sort_values().index]
code
73073830/cell_14
[ "image_output_2.png", "image_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') def anova(frame): anv = pd.DataFrame() anv['feature'] = qualitative pvals = [] for c in qualitative: samples = [] for cls in frame[c].unique(): s = frame[frame[c] == cls]['SalePrice'].values samples.append(s) pval = stats.f_oneway(*samples)[1] pvals.append(pval) anv['pval'] = pvals return anv.sort_values('pval') a = anova(train) a['disparity'] = np.log(1.0 / a['pval'].values) x = plt.xticks(rotation=90) def nova(frame): anv = pd.DataFrame() anv['feature'] = quantitative pvals = [] for c in quantitative: samples = [] for cls in frame[c].unique(): s = frame[frame[c] == cls]['SalePrice'].values samples.append(s) pval = stats.f_oneway(*samples)[1] pvals.append(pval) anv['pval'] = pvals return anv.sort_values('pval') a = nova(train) a['disparity'] = np.log(1.0 / a['pval'].values) sns.barplot(data=a, x='feature', y='disparity') x = plt.xticks(rotation=90)
code
73073830/cell_22
[ "image_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train Foundation1 = train['SalePrice'].groupby(train['Foundation']).mean() [Foundation1.sort_values().index]
code
73073830/cell_27
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') quant_features = ['OverallQual', 'GarageCars', 'GarageArea', 'FullBath', 'YearBuilt', 'TotRmsAbvGrd', 'GarageYrBlt', 'GrLivArea', 'TotalBsmtSF', 'Fireplaces', 'MSSubClass', 'YearRemodAdd', 'LotFrontage', '1stFlrSF'] qual_features = ['Neighborhood', 'ExterQual', 'BsmtQual', 'GarageFinish', 'Foundation', 'GarageType', 'BsmtFinType1', 'HeatingQC', 'MasVnrType', 'BsmtExposure'] target = ['SalePrice'] need_id = ['Id'] train = train[quant_features + qual_features + target + need_id] test = test[quant_features + qual_features + need_id] train BsmtExposure1 = train['SalePrice'].groupby(train['BsmtExposure']).mean() [BsmtExposure1.sort_values().index]
code
73073830/cell_12
[ "text_html_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) #histogram and normal probability plot from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm); fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) quantitative = [f for f in train.columns if train.dtypes[f] != 'object'] quantitative.remove('SalePrice') quantitative.remove('Id') qualitative = [f for f in train.columns if train.dtypes[f] == 'object'] f = pd.melt(train, value_vars=quantitative) g = sns.FacetGrid(f, col='variable', col_wrap=2, sharex=False, sharey=False) g = g.map(sns.distplot, 'value') def anova(frame): anv = pd.DataFrame() anv['feature'] = qualitative pvals = [] for c in qualitative: samples = [] for cls in frame[c].unique(): s = frame[frame[c] == cls]['SalePrice'].values samples.append(s) pval = stats.f_oneway(*samples)[1] pvals.append(pval) anv['pval'] = pvals return anv.sort_values('pval') a = anova(train) a['disparity'] = np.log(1.0 / a['pval'].values) sns.barplot(data=a, x='feature', y='disparity') x = plt.xticks(rotation=90)
code
73073830/cell_5
[ "text_html_output_1.png" ]
from scipy import stats from scipy.stats import norm import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) to_drop = ['PoolQC', 'MiscFeature', 'Alley', 'Fence', 'FireplaceQu'] train = train.drop(to_drop, axis=1) test = test.drop(to_drop, axis=1) from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import warnings warnings.filterwarnings('ignore') sns.distplot(train['SalePrice'], fit=norm) fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt)
code
73073830/cell_36
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_log_error from xgboost import XGBRegressor from xgboost import XGBRegressor cat1 = XGBRegressor(n_estimators=500, learning_rate=0.1, early_stopping_rounds=5, max_depth=3).fit(X_train, y_train) y_cat = cat1.predict(X_test) mean_squared_log_error(y_test, y_cat)
code
122256334/cell_13
[ "text_html_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.duplicated().sum()
code
122256334/cell_9
[ "image_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.head()
code
122256334/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.duplicated().sum() data.isnull().sum() data.nunique() keys = ['F', 'M'] sns.countplot(x='age', data=data) plt.show()
code
122256334/cell_6
[ "image_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') mat_student.head()
code
122256334/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.duplicated().sum() data.isnull().sum() data.nunique() keys = ['F', 'M'] sns.countplot(x='Pstatus', hue='sex', data=data) plt.show()
code
122256334/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.duplicated().sum() data.isnull().sum() data.nunique() keys = ['F', 'M'] sns.countplot(x='famsize', hue='sex', data=data) plt.show()
code
122256334/cell_11
[ "text_html_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.info()
code
122256334/cell_7
[ "image_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') print(portugese_student.columns) print(mat_student.columns)
code
122256334/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.duplicated().sum() data.isnull().sum() data.nunique() print(data['sex'].value_counts()) keys = ['F', 'M'] plt.pie(data['sex'].value_counts(), labels=keys) plt.legend() plt.show()
code
122256334/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.duplicated().sum() data.isnull().sum() data.nunique()
code
122256334/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.duplicated().sum() data.isnull().sum() data.nunique() keys = ['F', 'M'] sns.countplot(x='school', hue='sex', data=data) plt.show()
code
122256334/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.duplicated().sum() data.isnull().sum()
code
122256334/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.duplicated().sum() data.isnull().sum() data.nunique() keys = ['F', 'M'] sns.countplot(x='age', hue='sex', data=data) plt.show()
code
122256334/cell_10
[ "text_html_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape
code
122256334/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') mat_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-mat.csv') data = pd.concat([portugese_student, mat_student], axis=0) data.shape data.describe().transpose()
code
122256334/cell_5
[ "image_output_1.png" ]
import pandas as pd portugese_student = pd.read_csv('/kaggle/input/student-alcohol-consumption/student-por.csv') portugese_student.head(5)
code
17130585/cell_21
[ "text_plain_output_1.png" ]
import shutil import shutil shutil.make_archive('images', 'zip', '../gen_images/')
code
17130585/cell_9
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import img_to_array, load_img temp_img = load_img('../input/all-dogs/all-dogs/n02085620_10074.jpg') temp_img_array = img_to_array(temp_img) temp_img
code
17130585/cell_20
[ "text_plain_output_1.png" ]
from PIL import Image from keras.layers import Dense, Activation, Reshape from keras.layers import Flatten, Dropout from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.layers.normalization import BatchNormalization from keras.models import Sequential from keras.optimizers import Adam from keras.preprocessing import image from keras.preprocessing.image import img_to_array, load_img from tqdm import tqdm import math import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import os img_list = os.listdir('../input/all-dogs/all-dogs/') def generator_model(): model = Sequential() model.add(Dense(input_dim=100, units=1024)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(32 * 32 * 128)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Reshape((32, 32, 128), input_shape=(32 * 32 * 128,))) model.add(UpSampling2D((2, 2))) model.add(Conv2D(64, (5, 5), padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(UpSampling2D((2, 2))) model.add(Conv2D(3, (5, 5), padding='same')) model.add(Activation('tanh')) return model def discriminator_model(): model = Sequential() model.add(Conv2D(64, (5, 5), strides=(2, 2), input_shape=(128, 128, 3), padding='same')) model.add(LeakyReLU(0.2)) model.add(Conv2D(128, (5, 5), strides=(2, 2))) model.add(LeakyReLU(0.2)) model.add(Flatten()) model.add(Dense(256)) model.add(LeakyReLU(0.2)) model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) return model def combine_images(generated_images): total = generated_images.shape[0] cols = int(math.sqrt(total)) rows = math.ceil(float(total) / cols) width, height, ch = generated_images.shape[1:] output_shape = (height * rows, width * cols, ch) combined_image = np.zeros(output_shape) for index, image in enumerate(generated_images): i = int(index / cols) j = index % cols combined_image[width * i:width * (i + 1), height * j:height * (j + 1)] = image[:, :, :] return combined_image TRAIN_IMAGE_PATH = '../input/all-dogs/all-dogs/' GENERATED_IMAGE_PATH = '../working/images/' GEN_GENERATED_IMAGE_PATH = '../gen_images/' img_list = os.listdir(TRAIN_IMAGE_PATH) X_train = [] for img in img_list: img = img_to_array(load_img(TRAIN_IMAGE_PATH + img, target_size=(128, 128, 3))) img = (img.astype(np.float32) - 127.5) / 127.5 X_train.append(img) X_train = np.array(X_train) discriminator = discriminator_model() d_opt = Adam(lr=1e-05, beta_1=0.1) discriminator.compile(loss='binary_crossentropy', optimizer=d_opt) discriminator.trainable = False generator = generator_model() dcgan = Sequential([generator, discriminator]) g_opt = Adam(lr=0.0002, beta_1=0.5) dcgan.compile(loss='binary_crossentropy', optimizer=g_opt) BATCH_SIZE = 128 NUM_EPOCH = 200 num_batches = int(X_train.shape[0] / BATCH_SIZE) for epoch in tqdm(range(NUM_EPOCH)): for index in range(num_batches): noise = np.array([np.random.uniform(-1, 1, 100) for _ in range(BATCH_SIZE)]) image_batch = X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE] generated_images = generator.predict(noise, verbose=0, batch_size=BATCH_SIZE) if not os.path.exists(GEN_GENERATED_IMAGE_PATH): os.mkdir(GEN_GENERATED_IMAGE_PATH) if epoch == 200 and index > 59: generated_images_v = generated_images * 127.5 + 127.5 for j in range(100): Image.fromarray((generated_images_v[j] * 127.5 + 127.5).astype(np.uint8)).save(GEN_GENERATED_IMAGE_PATH + '%04d_%04d_%04d.png' % (epoch, index, j)) X = np.concatenate((image_batch, generated_images)) y = [1] * BATCH_SIZE + [0] * BATCH_SIZE d_loss = discriminator.train_on_batch(X, y) noise = np.array([np.random.uniform(-1, 1, 100) for _ in range(BATCH_SIZE)]) g_loss = dcgan.train_on_batch(noise, [1] * BATCH_SIZE) print(os.listdir(GENERATED_IMAGE_PATH))
code
17130585/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import os import numpy as np import pandas as pd import os img_list = os.listdir('../input/all-dogs/all-dogs/') len(img_list)
code
17130585/cell_2
[ "image_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input/all-dogs/'))
code
17130585/cell_19
[ "text_plain_output_1.png" ]
from PIL import Image from keras.layers import Dense, Activation, Reshape from keras.layers import Flatten, Dropout from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.layers.normalization import BatchNormalization from keras.models import Sequential from keras.optimizers import Adam from keras.preprocessing import image from keras.preprocessing.image import img_to_array, load_img from tqdm import tqdm import math import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import os img_list = os.listdir('../input/all-dogs/all-dogs/') def generator_model(): model = Sequential() model.add(Dense(input_dim=100, units=1024)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(32 * 32 * 128)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Reshape((32, 32, 128), input_shape=(32 * 32 * 128,))) model.add(UpSampling2D((2, 2))) model.add(Conv2D(64, (5, 5), padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(UpSampling2D((2, 2))) model.add(Conv2D(3, (5, 5), padding='same')) model.add(Activation('tanh')) return model def discriminator_model(): model = Sequential() model.add(Conv2D(64, (5, 5), strides=(2, 2), input_shape=(128, 128, 3), padding='same')) model.add(LeakyReLU(0.2)) model.add(Conv2D(128, (5, 5), strides=(2, 2))) model.add(LeakyReLU(0.2)) model.add(Flatten()) model.add(Dense(256)) model.add(LeakyReLU(0.2)) model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) return model def combine_images(generated_images): total = generated_images.shape[0] cols = int(math.sqrt(total)) rows = math.ceil(float(total) / cols) width, height, ch = generated_images.shape[1:] output_shape = (height * rows, width * cols, ch) combined_image = np.zeros(output_shape) for index, image in enumerate(generated_images): i = int(index / cols) j = index % cols combined_image[width * i:width * (i + 1), height * j:height * (j + 1)] = image[:, :, :] return combined_image TRAIN_IMAGE_PATH = '../input/all-dogs/all-dogs/' GENERATED_IMAGE_PATH = '../working/images/' GEN_GENERATED_IMAGE_PATH = '../gen_images/' img_list = os.listdir(TRAIN_IMAGE_PATH) X_train = [] for img in img_list: img = img_to_array(load_img(TRAIN_IMAGE_PATH + img, target_size=(128, 128, 3))) img = (img.astype(np.float32) - 127.5) / 127.5 X_train.append(img) X_train = np.array(X_train) discriminator = discriminator_model() d_opt = Adam(lr=1e-05, beta_1=0.1) discriminator.compile(loss='binary_crossentropy', optimizer=d_opt) discriminator.trainable = False generator = generator_model() dcgan = Sequential([generator, discriminator]) g_opt = Adam(lr=0.0002, beta_1=0.5) dcgan.compile(loss='binary_crossentropy', optimizer=g_opt) BATCH_SIZE = 128 NUM_EPOCH = 200 num_batches = int(X_train.shape[0] / BATCH_SIZE) for epoch in tqdm(range(NUM_EPOCH)): for index in range(num_batches): noise = np.array([np.random.uniform(-1, 1, 100) for _ in range(BATCH_SIZE)]) image_batch = X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE] generated_images = generator.predict(noise, verbose=0, batch_size=BATCH_SIZE) if not os.path.exists(GEN_GENERATED_IMAGE_PATH): os.mkdir(GEN_GENERATED_IMAGE_PATH) if epoch == 200 and index > 59: generated_images_v = generated_images * 127.5 + 127.5 for j in range(100): Image.fromarray((generated_images_v[j] * 127.5 + 127.5).astype(np.uint8)).save(GEN_GENERATED_IMAGE_PATH + '%04d_%04d_%04d.png' % (epoch, index, j)) X = np.concatenate((image_batch, generated_images)) y = [1] * BATCH_SIZE + [0] * BATCH_SIZE d_loss = discriminator.train_on_batch(X, y) noise = np.array([np.random.uniform(-1, 1, 100) for _ in range(BATCH_SIZE)]) g_loss = dcgan.train_on_batch(noise, [1] * BATCH_SIZE) print('epoch: %d, batch: %d, g_loss: %f, d_loss: %f' % (epoch, index, g_loss, d_loss))
code
17130585/cell_18
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Activation, Reshape from keras.layers import Flatten, Dropout from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.layers.normalization import BatchNormalization from keras.models import Sequential from keras.optimizers import Adam from keras.preprocessing import image from keras.preprocessing.image import img_to_array, load_img import math import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import os img_list = os.listdir('../input/all-dogs/all-dogs/') def generator_model(): model = Sequential() model.add(Dense(input_dim=100, units=1024)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(32 * 32 * 128)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Reshape((32, 32, 128), input_shape=(32 * 32 * 128,))) model.add(UpSampling2D((2, 2))) model.add(Conv2D(64, (5, 5), padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(UpSampling2D((2, 2))) model.add(Conv2D(3, (5, 5), padding='same')) model.add(Activation('tanh')) return model def discriminator_model(): model = Sequential() model.add(Conv2D(64, (5, 5), strides=(2, 2), input_shape=(128, 128, 3), padding='same')) model.add(LeakyReLU(0.2)) model.add(Conv2D(128, (5, 5), strides=(2, 2))) model.add(LeakyReLU(0.2)) model.add(Flatten()) model.add(Dense(256)) model.add(LeakyReLU(0.2)) model.add(Dropout(0.5)) model.add(Dense(1)) model.add(Activation('sigmoid')) return model def combine_images(generated_images): total = generated_images.shape[0] cols = int(math.sqrt(total)) rows = math.ceil(float(total) / cols) width, height, ch = generated_images.shape[1:] output_shape = (height * rows, width * cols, ch) combined_image = np.zeros(output_shape) for index, image in enumerate(generated_images): i = int(index / cols) j = index % cols combined_image[width * i:width * (i + 1), height * j:height * (j + 1)] = image[:, :, :] return combined_image TRAIN_IMAGE_PATH = '../input/all-dogs/all-dogs/' GENERATED_IMAGE_PATH = '../working/images/' GEN_GENERATED_IMAGE_PATH = '../gen_images/' img_list = os.listdir(TRAIN_IMAGE_PATH) X_train = [] for img in img_list: img = img_to_array(load_img(TRAIN_IMAGE_PATH + img, target_size=(128, 128, 3))) img = (img.astype(np.float32) - 127.5) / 127.5 X_train.append(img) X_train = np.array(X_train) discriminator = discriminator_model() d_opt = Adam(lr=1e-05, beta_1=0.1) discriminator.compile(loss='binary_crossentropy', optimizer=d_opt) discriminator.trainable = False generator = generator_model() dcgan = Sequential([generator, discriminator]) g_opt = Adam(lr=0.0002, beta_1=0.5) dcgan.compile(loss='binary_crossentropy', optimizer=g_opt) BATCH_SIZE = 128 NUM_EPOCH = 200 num_batches = int(X_train.shape[0] / BATCH_SIZE) print('Number of batches:', num_batches)
code
17130585/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.preprocessing import image from keras.preprocessing.image import img_to_array, load_img import math import numpy as np import numpy as np # linear algebra import os import os import numpy as np import pandas as pd import os img_list = os.listdir('../input/all-dogs/all-dogs/') def combine_images(generated_images): total = generated_images.shape[0] cols = int(math.sqrt(total)) rows = math.ceil(float(total) / cols) width, height, ch = generated_images.shape[1:] output_shape = (height * rows, width * cols, ch) combined_image = np.zeros(output_shape) for index, image in enumerate(generated_images): i = int(index / cols) j = index % cols combined_image[width * i:width * (i + 1), height * j:height * (j + 1)] = image[:, :, :] return combined_image TRAIN_IMAGE_PATH = '../input/all-dogs/all-dogs/' GENERATED_IMAGE_PATH = '../working/images/' GEN_GENERATED_IMAGE_PATH = '../gen_images/' img_list = os.listdir(TRAIN_IMAGE_PATH) X_train = [] for img in img_list: img = img_to_array(load_img(TRAIN_IMAGE_PATH + img, target_size=(128, 128, 3))) img = (img.astype(np.float32) - 127.5) / 127.5 X_train.append(img) len(X_train)
code
17130585/cell_3
[ "text_plain_output_1.png" ]
from keras.models import Sequential from keras.layers import Dense, Activation, Reshape from keras.layers.normalization import BatchNormalization from keras.layers.convolutional import UpSampling2D, Conv2D from keras.layers.advanced_activations import LeakyReLU from keras.layers import Flatten, Dropout from keras.preprocessing.image import img_to_array, load_img from keras.optimizers import Adam import math import numpy as np import os from tqdm import tqdm from PIL import Image from keras.preprocessing import image
code
17130585/cell_10
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import img_to_array, load_img temp_img = load_img('../input/all-dogs/all-dogs/n02085620_10074.jpg') temp_img_array = img_to_array(temp_img) temp_img_array.shape
code
17130585/cell_5
[ "text_plain_output_100.png", "application_vnd.jupyter.stderr_output_145.png", "text_plain_output_84.png", "application_vnd.jupyter.stderr_output_27.png", "application_vnd.jupyter.stderr_output_115.png", "text_plain_output_56.png", "text_plain_output_158.png", "application_vnd.jupyter.stderr_output_35.png", "application_vnd.jupyter.stderr_output_77.png", "text_plain_output_130.png", "application_vnd.jupyter.stderr_output_185.png", "application_vnd.jupyter.stderr_output_9.png", "text_plain_output_98.png", "application_vnd.jupyter.stderr_output_81.png", "application_vnd.jupyter.stderr_output_111.png", "application_vnd.jupyter.stderr_output_53.png", "application_vnd.jupyter.stderr_output_131.png", "text_plain_output_78.png", "application_vnd.jupyter.stderr_output_99.png", "text_plain_output_106.png", "text_plain_output_138.png", "text_plain_output_192.png", "application_vnd.jupyter.stderr_output_183.png", "application_vnd.jupyter.stderr_output_181.png", "application_vnd.jupyter.stderr_output_141.png", "text_plain_output_184.png", "text_plain_output_172.png", "application_vnd.jupyter.stderr_output_93.png", "text_plain_output_90.png", "application_vnd.jupyter.stderr_output_123.png", "text_plain_output_48.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_126.png", "application_vnd.jupyter.stderr_output_73.png", "application_vnd.jupyter.stderr_output_137.png", "application_vnd.jupyter.stderr_output_133.png", "application_vnd.jupyter.stderr_output_165.png", "application_vnd.jupyter.stderr_output_75.png", "application_vnd.jupyter.stderr_output_7.png", "text_plain_output_178.png", "text_plain_output_154.png", "text_plain_output_114.png", "application_vnd.jupyter.stderr_output_11.png", "application_vnd.jupyter.stderr_output_155.png", "text_plain_output_70.png", "text_plain_output_44.png", "application_vnd.jupyter.stderr_output_65.png", "text_plain_output_86.png", "text_plain_output_118.png", "application_vnd.jupyter.stderr_output_179.png", "application_vnd.jupyter.stderr_output_143.png", "text_plain_output_40.png", "text_plain_output_74.png", "application_vnd.jupyter.stderr_output_171.png", "text_plain_output_190.png", "application_vnd.jupyter.stderr_output_105.png", "text_plain_output_20.png", "text_plain_output_102.png", "text_plain_output_144.png", "text_plain_output_132.png", "text_plain_output_60.png", "application_vnd.jupyter.stderr_output_31.png", "application_vnd.jupyter.stderr_output_125.png", "application_vnd.jupyter.stderr_output_113.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_64.png", "application_vnd.jupyter.stderr_output_33.png", "application_vnd.jupyter.stderr_output_25.png", "text_plain_output_52.png", "text_plain_output_66.png", "application_vnd.jupyter.stderr_output_135.png", "application_vnd.jupyter.stderr_output_177.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_88.png", "application_vnd.jupyter.stderr_output_89.png", "text_plain_output_140.png", "application_vnd.jupyter.stderr_output_189.png", "text_plain_output_160.png", "text_plain_output_58.png", "application_vnd.jupyter.stderr_output_149.png", "application_vnd.jupyter.stderr_output_91.png", "application_vnd.jupyter.stderr_output_95.png", "application_vnd.jupyter.stderr_output_67.png", "text_plain_output_76.png", "text_plain_output_108.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_71.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_120.png", "text_plain_output_24.png", "application_vnd.jupyter.stderr_output_23.png", "application_vnd.jupyter.stderr_output_159.png", "text_plain_output_104.png", "application_vnd.jupyter.stderr_output_59.png", "text_plain_output_134.png", "text_plain_output_18.png", "text_plain_output_50.png", "text_plain_output_36.png", "application_vnd.jupyter.stderr_output_83.png", "text_plain_output_96.png", "application_vnd.jupyter.stderr_output_19.png", "text_plain_output_180.png", "text_plain_output_112.png", "text_plain_output_152.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_127.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_22.png", "text_plain_output_188.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_146.png", "application_vnd.jupyter.stderr_output_163.png", "application_vnd.jupyter.stderr_output_151.png", "application_vnd.jupyter.stderr_output_103.png", "application_vnd.jupyter.stderr_output_109.png", "text_plain_output_38.png", "text_plain_output_166.png", "application_vnd.jupyter.stderr_output_97.png", "text_plain_output_16.png", "text_plain_output_174.png", "application_vnd.jupyter.stderr_output_15.png", "text_plain_output_8.png", "text_plain_output_122.png", "application_vnd.jupyter.stderr_output_193.png", "application_vnd.jupyter.stderr_output_17.png", "application_vnd.jupyter.stderr_output_87.png", "text_plain_output_182.png", "text_plain_output_26.png", "application_vnd.jupyter.stderr_output_187.png", "application_vnd.jupyter.stderr_output_117.png", "text_plain_output_34.png", "text_plain_output_168.png", "application_vnd.jupyter.stderr_output_69.png", "application_vnd.jupyter.stderr_output_41.png", "application_vnd.jupyter.stderr_output_157.png", "text_plain_output_42.png", "text_plain_output_110.png", "application_vnd.jupyter.stderr_output_167.png", "application_vnd.jupyter.stderr_output_79.png", "application_vnd.jupyter.stderr_output_49.png", "application_vnd.jupyter.stderr_output_63.png", "application_vnd.jupyter.stderr_output_47.png", "application_vnd.jupyter.stderr_output_57.png", "application_vnd.jupyter.stderr_output_169.png", "text_plain_output_28.png", "text_plain_output_72.png", "application_vnd.jupyter.stderr_output_173.png", "application_vnd.jupyter.stderr_output_191.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_29.png", "application_vnd.jupyter.stderr_output_101.png", "application_vnd.jupyter.stderr_output_139.png", "text_plain_output_150.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_176.png", "application_vnd.jupyter.stderr_output_61.png", "text_plain_output_186.png", "application_vnd.jupyter.stderr_output_51.png", "text_plain_output_82.png", "application_vnd.jupyter.stderr_output_161.png", "text_plain_output_80.png", "text_plain_output_94.png", "text_plain_output_164.png", "application_vnd.jupyter.stderr_output_153.png", "text_plain_output_124.png", "application_vnd.jupyter.stderr_output_45.png", "text_plain_output_148.png", "application_vnd.jupyter.stderr_output_175.png", "text_plain_output_12.png", "application_vnd.jupyter.stderr_output_39.png", "application_vnd.jupyter.stderr_output_119.png", "application_vnd.jupyter.stderr_output_107.png", "application_vnd.jupyter.stderr_output_21.png", "application_vnd.jupyter.stderr_output_43.png", "text_plain_output_194.png", "application_vnd.jupyter.stderr_output_85.png", "text_plain_output_62.png", "application_vnd.jupyter.stderr_output_55.png", "text_plain_output_156.png", "application_vnd.jupyter.stderr_output_147.png", "application_vnd.jupyter.stderr_output_121.png", "application_vnd.jupyter.stderr_output_129.png", "application_vnd.jupyter.stderr_output_37.png", "text_plain_output_46.png" ]
import os import os import numpy as np import pandas as pd import os img_list = os.listdir('../input/all-dogs/all-dogs/') print(os.listdir('../working'))
code
89142701/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.shape train.isnull().sum() mask = np.triu(train.drop(['PassengerId'], axis=1).corr()) train.describe(exclude='object')
code
89142701/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test.info()
code
89142701/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.describe()
code
89142701/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.shape train.isnull().sum() mask = np.triu(train.drop(['PassengerId'], axis=1).corr()) fig = sns.catplot(x='Pclass', y='Age', data=train, hue='Survived', ci=None)
code
89142701/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.shape train.isnull().sum() mask = np.triu(train.drop(['PassengerId'], axis=1).corr()) fig = sns.catplot(x='Pclass',y='Age',data=train,hue='Survived',ci=None) fig = sns.barplot(x='Pclass',y='Survived',data=train, hue='Sex',ci=None) for container in fig.containers: fig.bar_label(container,label_type='center',fmt='%1.2f%%') fig = sns.barplot(x='Pclass', y='Survived', data=train) fig.bar_label(fig.containers[0], label_type='center', fmt='%1.1f%%')
code
89142701/cell_26
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.shape train.isnull().sum() mask = np.triu(train.drop(['PassengerId'], axis=1).corr()) fig = sns.catplot(x='Pclass',y='Age',data=train,hue='Survived',ci=None) fig = sns.barplot(x='Pclass', y='Survived', data=train, hue='Sex', ci=None) for container in fig.containers: fig.bar_label(container, label_type='center', fmt='%1.2f%%')
code
89142701/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.shape
code
89142701/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.shape train.isnull().sum() plt.figure(figsize=(16, 6)) mask = np.triu(train.drop(['PassengerId'], axis=1).corr()) sns.heatmap(train.drop(['PassengerId'], axis=1).corr(), annot=True, cmap='BrBG', mask=mask)
code
89142701/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89142701/cell_32
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.shape train.isnull().sum() mask = np.triu(train.drop(['PassengerId'], axis=1).corr()) fig = sns.catplot(x='Pclass',y='Age',data=train,hue='Survived',ci=None) fig = sns.barplot(x='Pclass',y='Survived',data=train, hue='Sex',ci=None) for container in fig.containers: fig.bar_label(container,label_type='center',fmt='%1.2f%%') fig = sns.barplot(x='Pclass', y='Survived', data=train) fig.bar_label(fig.containers[0],label_type='center',fmt='%1.1f%%') fig = sns.barplot(x='Survived', y='Sex', data=train) fig.bar_label(fig.containers[0], size=14, label_type='center', fmt='%1.2f%%')
code
89142701/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.info()
code
89142701/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test.isnull().sum() test.head()
code
89142701/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test.isnull().sum() test.shape
code
89142701/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.shape train.isnull().sum() mask = np.triu(train.drop(['PassengerId'], axis=1).corr()) fig = sns.catplot(x='Pclass',y='Age',data=train,hue='Survived',ci=None) fig = sns.barplot(x='Pclass',y='Survived',data=train, hue='Sex',ci=None) for container in fig.containers: fig.bar_label(container,label_type='center',fmt='%1.2f%%') fig = sns.barplot(x='Pclass', y='Survived', data=train) fig.bar_label(fig.containers[0],label_type='center',fmt='%1.1f%%') fig = sns.barplot(x='Survived',y='Sex',data=train) fig.bar_label(fig.containers[0],size=14,label_type='center',fmt='%1.2f%%') plt.figure(figsize=(15, 8)) g = sns.FacetGrid(train, col='Embarked', size=6) g.map(sns.barplot, 'Pclass', 'Survived', hue=train.Sex) g.add_legend()
code