path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
88102865/cell_26
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_7.png", "text_plain_output_8.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/misoimprovedta/ta-misogyny-train (3).csv', header=None, sep='\t') df_eval = pd.read_csv('../input/misoimprovedta/ta-misogyny-dev (2).csv', header=None, sep='\t') df_test = pd.read_csv('../input/misoimprovedta/ta-misogyny-test (2).csv', header=None) def create_labels(sentence): splits = sentence.split('\t') return splits[0] def change_sentence(sentence): splits = sentence.split('\t') return splits[1] df_eval.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True) df_eval = df_eval[['Text', 'Labels']] df.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True) df = df[['Text', 'Labels']] df_test.rename(columns={0: 'Text'}, inplace=True) num_labels = len(df['Labels'].unique()) keys = list(df['Labels'].unique()) values = list(range(0, num_labels)) label_dict = dict(zip(keys, values)) df['Labels'] = df['Labels'].apply(lambda x: label_dict[x]) df_eval['Labels'] = df_eval['Labels'].apply(lambda x: label_dict[x]) df_final = df_test.copy() reverse_label_dict = {v: u for u, v in label_dict.items()} reverse_label_dict df_final['Predicted_Labels'] = predictions df_final['Predicted_Labels'] = df_final['Predicted_Labels'].apply(lambda x: reverse_label_dict[x]) df_final['pid'] = df_final.index df_final = df_final[['pid', 'Predicted_Labels']] df_final
code
88102865/cell_19
[ "text_html_output_1.png" ]
for i in range(0,5): !rm -rf /kaggle/working/outputs model.train_model(df,eval_data=df_dev,acc=sklearn.metrics.classification_report) result, model_outputs, preds_list = model.eval_model(df_test_,acc=sklearn.metrics.classification_report) for j in result.values(): print(j)
code
88102865/cell_18
[ "text_html_output_1.png" ]
from simpletransformers.classification import ClassificationModel, ClassificationArgs model_args = ClassificationArgs() model_args.overwrite_output_dir = True model_args.eval_batch_size = 8 model_args.train_batch_size = 8 model_args.learning_rate = 4e-05 model = ClassificationModel('bert', 'google/muril-base-cased', num_labels=9, args=model_args, tokenizer_type='bert', tokenizer_name='google/muril-base-cased')
code
88102865/cell_8
[ "text_html_output_1.png" ]
!pip install simpletransformers
code
88102865/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
from simpletransformers.classification import ClassificationModel, ClassificationArgs from sklearn.model_selection import train_test_split import pandas as pd df = pd.read_csv('../input/misoimprovedta/ta-misogyny-train (3).csv', header=None, sep='\t') df_eval = pd.read_csv('../input/misoimprovedta/ta-misogyny-dev (2).csv', header=None, sep='\t') df_test = pd.read_csv('../input/misoimprovedta/ta-misogyny-test (2).csv', header=None) def create_labels(sentence): splits = sentence.split('\t') return splits[0] def change_sentence(sentence): splits = sentence.split('\t') return splits[1] df_eval.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True) df_eval = df_eval[['Text', 'Labels']] df.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True) df = df[['Text', 'Labels']] df_test.rename(columns={0: 'Text'}, inplace=True) from sklearn.model_selection import train_test_split X_test, X_dev, y_test, y_dev = train_test_split(df_eval['Text'], df_eval['Labels'], random_state=0) df_test_ = pd.concat([X_test, y_test], axis=1) df_dev = pd.concat([X_dev, y_dev], axis=1) df_dev model_args = ClassificationArgs() model_args.overwrite_output_dir = True model_args.eval_batch_size = 8 model_args.train_batch_size = 8 model_args.learning_rate = 4e-05 model = ClassificationModel('bert', 'google/muril-base-cased', num_labels=9, args=model_args, tokenizer_type='bert', tokenizer_name='google/muril-base-cased') predictions, raw_outputs = model.predict(df_test_['Text'].to_list()) predictions, raw_outputs = model.predict(df_test['Text'].to_list())
code
88102865/cell_22
[ "text_plain_output_1.png" ]
from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import pandas as pd df = pd.read_csv('../input/misoimprovedta/ta-misogyny-train (3).csv', header=None, sep='\t') df_eval = pd.read_csv('../input/misoimprovedta/ta-misogyny-dev (2).csv', header=None, sep='\t') df_test = pd.read_csv('../input/misoimprovedta/ta-misogyny-test (2).csv', header=None) def create_labels(sentence): splits = sentence.split('\t') return splits[0] def change_sentence(sentence): splits = sentence.split('\t') return splits[1] df_eval.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True) df_eval = df_eval[['Text', 'Labels']] df.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True) df = df[['Text', 'Labels']] df_test.rename(columns={0: 'Text'}, inplace=True) from sklearn.model_selection import train_test_split X_test, X_dev, y_test, y_dev = train_test_split(df_eval['Text'], df_eval['Labels'], random_state=0) df_test_ = pd.concat([X_test, y_test], axis=1) df_dev = pd.concat([X_dev, y_dev], axis=1) df_dev def oversample(df): classes = df['Labels'].value_counts().to_dict() most = max(classes.values()) classes_list = [] for key in classes: classes_list.append(df[df['Labels'] == key]) classes_sample = [] for i in range(1, len(classes_list)): classes_sample.append(classes_list[i].sample(most, replace=True)) df_maybe = pd.concat(classes_sample) final_df = pd.concat([df_maybe, classes_list[0]], axis=0) final_df = final_df.reset_index(drop=True) return pd.DataFrame({'Text': final_df['Text'].tolist(), 'Labels': final_df['Labels'].tolist()}) def over_under_sample(df): unq_labels = list(set(df['Labels'].tolist())) texts = df['Text'].tolist() labels = df['Labels'].tolist() data_dict = dict() for l in unq_labels: data_dict[l] = [] for i in range(len(texts)): data_dict[labels[i]].append(texts[i]) req_len = len(labels) // len(unq_labels) for label in data_dict: if len(data_dict[label]) > req_len: data_dict[label] = data_dict[label][:req_len] new_texts = [] new_labels = [] for l in data_dict: new_texts += data_dict[l] new_labels += [l] * len(data_dict[l]) return oversample(pd.DataFrame({'Text': new_texts, 'Labels': new_labels})) from sklearn.metrics import classification_report result_dict = classification_report(df_test_['Labels'], predictions, output_dict=True) report = pd.DataFrame(result_dict).transpose() report
code
88102865/cell_10
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd df = pd.read_csv('../input/misoimprovedta/ta-misogyny-train (3).csv', header=None, sep='\t') df_eval = pd.read_csv('../input/misoimprovedta/ta-misogyny-dev (2).csv', header=None, sep='\t') df_test = pd.read_csv('../input/misoimprovedta/ta-misogyny-test (2).csv', header=None) def create_labels(sentence): splits = sentence.split('\t') return splits[0] def change_sentence(sentence): splits = sentence.split('\t') return splits[1] df_eval.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True) df_eval = df_eval[['Text', 'Labels']] df.rename(columns={0: 'Labels', 1: 'Text'}, inplace=True) df = df[['Text', 'Labels']] df_test.rename(columns={0: 'Text'}, inplace=True) from sklearn.model_selection import train_test_split X_test, X_dev, y_test, y_dev = train_test_split(df_eval['Text'], df_eval['Labels'], random_state=0) df_test_ = pd.concat([X_test, y_test], axis=1) df_dev = pd.concat([X_dev, y_dev], axis=1) df_dev
code
88102865/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/misoimprovedta/ta-misogyny-train (3).csv', header=None, sep='\t') df_eval = pd.read_csv('../input/misoimprovedta/ta-misogyny-dev (2).csv', header=None, sep='\t') df_test = pd.read_csv('../input/misoimprovedta/ta-misogyny-test (2).csv', header=None) df_test
code
88099646/cell_9
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.columns import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', 'Fgper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] from sklearn.tree import DecisionTreeRegressor bd_model = DecisionTreeRegressor(random_state=1) bd_model.fit(X, y) from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) bd_model.fit(train_X, train_y) from sklearn.metrics import mean_absolute_error val_predictions = bd_model.predict(val_X) val_mae = mean_absolute_error(val_predictions, val_y) print(val_mae)
code
88099646/cell_4
[ "text_html_output_1.png" ]
from matplotlib.pyplot import figure import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.columns import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure figure(figsize=(8, 6), dpi=80) corrMatrix = big_dance.corr() sn.heatmap(corrMatrix, annot=False) plt.show()
code
88099646/cell_6
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.columns import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', 'Fgper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] from sklearn.tree import DecisionTreeRegressor bd_model = DecisionTreeRegressor(random_state=1) bd_model.fit(X, y)
code
88099646/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.head()
code
88099646/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88099646/cell_7
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.columns import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', 'Fgper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] from sklearn.tree import DecisionTreeRegressor bd_model = DecisionTreeRegressor(random_state=1) bd_model.fit(X, y) print("Making predictions for the following 5 March Madness's:") print(X.head()) print('The predictions are') print(bd_model.predict(X.head()))
code
88099646/cell_8
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.columns import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', 'Fgper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] from sklearn.tree import DecisionTreeRegressor bd_model = DecisionTreeRegressor(random_state=1) bd_model.fit(X, y) from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) bd_model.fit(train_X, train_y)
code
88099646/cell_3
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.columns
code
88099646/cell_10
[ "text_html_output_1.png" ]
from matplotlib.pyplot import figure from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.columns import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', 'Fgper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] from sklearn.tree import DecisionTreeRegressor bd_model = DecisionTreeRegressor(random_state=1) bd_model.fit(X, y) from sklearn.model_selection import train_test_split train_X, val_X, train_y, val_y = train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) bd_model.fit(train_X, train_y) from sklearn.metrics import mean_absolute_error val_predictions = bd_model.predict(val_X) val_mae = mean_absolute_error(val_predictions, val_y) set_up = [[25.7, 58.3, 44.1, 12.5, 17.5, 16.7, 71.4]] bd_model.predict(set_up)
code
88099646/cell_5
[ "text_plain_output_1.png" ]
from matplotlib.pyplot import figure import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import pandas as pd import numpy as np import random import matplotlib.pyplot as plt big_dance = pd.read_csv('../input/mm-data-prediction/MM_score_predictionv2.csv') big_dance.columns import seaborn as sn import matplotlib.pyplot as plt from matplotlib.pyplot import figure corrMatrix = big_dance.corr() y = big_dance['Total_Score_March_Madness'] features = ['FG', 'FGA', 'Fgper', 'FT', 'FTA', 'PF', 'PTS'] X = big_dance[features] X.describe()
code
122260145/cell_42
[ "text_plain_output_1.png" ]
from sklearn.metrics.pairwise import cosine_similarity import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating a = ratings_name.groupby('User-ID').count()['Book-Title'] > 300 geninue_users = a[a].index geninue_users geniune_user = ratings_name[ratings_name['User-ID'].isin(geninue_users)] geniune_user b = geniune_user.groupby('Book-Title').count()['Book-Rating'] >= 50 filtered_rat = b[b].index filtered_rat final_df = geniune_user[geniune_user['Book-Title'].isin(filtered_rat)] final_df piv_tbl = final_df.pivot_table(index='Book-Title', columns='User-ID') piv_tbl.fillna(0, inplace=True) cos_simscore = cosine_similarity(piv_tbl) cos_simscore def recommend(books_name): index = np.where(piv_tbl.index == books_name)[0][0] similar_books = sorted(list(enumerate(cos_simscore[index])), key=lambda x: x[1], reverse=True)[1:6] recommend('The Fellowship of the Ring (The Lord of the Rings, Part 1)')
code
122260145/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating
code
122260145/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') users.isnull().sum()
code
122260145/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') users.head(5)
code
122260145/cell_30
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating a = ratings_name.groupby('User-ID').count()['Book-Title'] > 300 geninue_users = a[a].index geninue_users geniune_user = ratings_name[ratings_name['User-ID'].isin(geninue_users)] geniune_user b = geniune_user.groupby('Book-Title').count()['Book-Rating'] >= 50 filtered_rat = b[b].index filtered_rat final_df = geniune_user[geniune_user['Book-Title'].isin(filtered_rat)] final_df
code
122260145/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating popular_books = no_of_rating.merge(average_rating, on='Book-Title') popular_books popular_books_df = popular_books[popular_books['num_of_rating'] >= 500].sort_values('avg_rating', ascending=False) popular_books_df pop_books = popular_books_df.merge(books, on='Book-Title').drop_duplicates('Book-Title')[['Book-Title', 'Book-Author', 'Year-Of-Publication']] pop_books.shape pop_books
code
122260145/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') books.describe()
code
122260145/cell_29
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating a = ratings_name.groupby('User-ID').count()['Book-Title'] > 300 geninue_users = a[a].index geninue_users geniune_user = ratings_name[ratings_name['User-ID'].isin(geninue_users)] geniune_user b = geniune_user.groupby('Book-Title').count()['Book-Rating'] >= 50 filtered_rat = b[b].index filtered_rat
code
122260145/cell_39
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.metrics.pairwise import cosine_similarity import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating a = ratings_name.groupby('User-ID').count()['Book-Title'] > 300 geninue_users = a[a].index geninue_users geniune_user = ratings_name[ratings_name['User-ID'].isin(geninue_users)] geniune_user b = geniune_user.groupby('Book-Title').count()['Book-Rating'] >= 50 filtered_rat = b[b].index filtered_rat final_df = geniune_user[geniune_user['Book-Title'].isin(filtered_rat)] final_df piv_tbl = final_df.pivot_table(index='Book-Title', columns='User-ID') piv_tbl.fillna(0, inplace=True) cos_simscore = cosine_similarity(piv_tbl) cos_simscore def recommend(books_name): index = np.where(piv_tbl.index == books_name)[0][0] similar_books = sorted(list(enumerate(cos_simscore[index])), key=lambda x: x[1], reverse=True)[1:6] recommend('The Da Vinci Code')
code
122260145/cell_26
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating ratings_name.head()
code
122260145/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.describe()
code
122260145/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating popular_books = no_of_rating.merge(average_rating, on='Book-Title') popular_books popular_books_df = popular_books[popular_books['num_of_rating'] >= 500].sort_values('avg_rating', ascending=False) popular_books_df
code
122260145/cell_32
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating a = ratings_name.groupby('User-ID').count()['Book-Title'] > 300 geninue_users = a[a].index geninue_users geniune_user = ratings_name[ratings_name['User-ID'].isin(geninue_users)] geniune_user b = geniune_user.groupby('Book-Title').count()['Book-Rating'] >= 50 filtered_rat = b[b].index filtered_rat final_df = geniune_user[geniune_user['Book-Title'].isin(filtered_rat)] final_df piv_tbl = final_df.pivot_table(index='Book-Title', columns='User-ID') piv_tbl.fillna(0, inplace=True) piv_tbl.head(500)
code
122260145/cell_28
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating a = ratings_name.groupby('User-ID').count()['Book-Title'] > 300 geninue_users = a[a].index geninue_users geniune_user = ratings_name[ratings_name['User-ID'].isin(geninue_users)] geniune_user
code
122260145/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum()
code
122260145/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating popular_books = no_of_rating.merge(average_rating, on='Book-Title') popular_books
code
122260145/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating popular_books = no_of_rating.merge(average_rating, on='Book-Title') popular_books popular_books[popular_books['num_of_rating'] >= 500].sort_values('num_of_rating', ascending=False)
code
122260145/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') books.head(5)
code
122260145/cell_35
[ "text_html_output_1.png" ]
from sklearn.metrics.pairwise import cosine_similarity import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating a = ratings_name.groupby('User-ID').count()['Book-Title'] > 300 geninue_users = a[a].index geninue_users geniune_user = ratings_name[ratings_name['User-ID'].isin(geninue_users)] geniune_user b = geniune_user.groupby('Book-Title').count()['Book-Rating'] >= 50 filtered_rat = b[b].index filtered_rat final_df = geniune_user[geniune_user['Book-Title'].isin(filtered_rat)] final_df piv_tbl = final_df.pivot_table(index='Book-Title', columns='User-ID') piv_tbl.fillna(0, inplace=True) cos_simscore = cosine_similarity(piv_tbl) print(cos_simscore.shape) cos_simscore
code
122260145/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating
code
122260145/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') books.isnull().sum()
code
122260145/cell_27
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name no_of_rating = ratings_name.groupby('Book-Title').count()['Book-Rating'].reset_index() no_of_rating.rename(columns={'Book-Rating': 'num_of_rating'}, inplace=True) no_of_rating average_rating = ratings_name.groupby('Book-Title').mean()['Book-Rating'].reset_index() average_rating.rename(columns={'Book-Rating': 'avg_rating'}, inplace=True) average_rating a = ratings_name.groupby('User-ID').count()['Book-Title'] > 300 geninue_users = a[a].index print(type(geninue_users)) geninue_users
code
122260145/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.isnull().sum() books.isnull().sum() ratings_name = ratings.merge(books, on='ISBN') ratings_name
code
122260145/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) books = pd.read_csv('//kaggle/input/book-recommendation-dataset/Books.csv') ratings = pd.read_csv('//kaggle/input/book-recommendation-dataset/Ratings.csv') users = pd.read_csv('//kaggle/input/book-recommendation-dataset/Users.csv') ratings.head(5)
code
129020570/cell_21
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_absolute_error, mean_squared_error import math
code
129020570/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/time-series/JohnsonJohnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) df.time.unique() df.head()
code
129020570/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/time-series/JohnsonJohnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) df.time.unique() train = df[df['time'] < 1980] test = df[df['time'] >= 1980] def arithmetic_mean(train, test): train_mean = train['value'].mean() test_df = test.copy() test_df['value'] = train_mean return test_df arithmetic_mean_df = arithmetic_mean(train, test) arithmetic_mean_df
code
129020570/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/time-series/JohnsonJohnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) df.time.unique() train = df[df['time'] < 1980] test = df[df['time'] >= 1980] def arithmetic_mean(train, test): train_mean = train['value'].mean() test_df = test.copy() test_df['value'] = train_mean return test_df def last_record(train,test): test_df = test.copy() test_df['value'] = train.tail(1)['value'].unique()[0] return test_df def seasonal(train,test): test_df = test.copy() test_df['value'] = train.tail(4)['value'].unique() return test_df seasonal_df = seasonal(train, test) seasonal_df
code
129020570/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129020570/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/time-series/JohnsonJohnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) df.time.unique() train = df[df['time'] < 1980] test = df[df['time'] >= 1980] print('train_time', train.time.unique()) print('test_time', test.time.unique())
code
129020570/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/time-series/JohnsonJohnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) df.time.unique() train = df[df['time'] < 1980] test = df[df['time'] >= 1980] def arithmetic_mean(train, test): train_mean = train['value'].mean() test_df = test.copy() test_df['value'] = train_mean return test_df def last_record(train,test): test_df = test.copy() test_df['value'] = train.tail(1)['value'].unique()[0] return test_df last_record_df = last_record(train, test) last_record_df
code
129020570/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/time-series/JohnsonJohnson.csv') df.head()
code
129020570/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/time-series/JohnsonJohnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) df.time.unique() train = df[df['time'] < 1980] test = df[df['time'] >= 1980] def arithmetic_mean(train, test): train_mean = train['value'].mean() test_df = test.copy() test_df['value'] = train_mean return test_df test
code
129020570/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/time-series/JohnsonJohnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) df.time.unique()
code
1006988/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') Batsman_score = deliveries.groupby('batsman')['batsman_runs'].agg(sum).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Top_batsman_score = Batsman_score.iloc[:15, :] Top_batsman_score Batsman_Ball_faced = deliveries.groupby(['batsman'])['ball'].count().reset_index().sort_values(by='ball', ascending=False).reset_index(drop=True) Batsman_Ball_faced_Top = Batsman_Ball_faced.iloc[:15, :] Batsman_strike_rate = pd.merge(Batsman_score, Batsman_Ball_faced, on='batsman', how='outer') Batsman_strike_rate = Batsman_strike_rate[Batsman_strike_rate['batsman_runs'] >= 500] Batsman_strike_rate['strike_rate'] = Batsman_strike_rate['batsman_runs'] / Batsman_strike_rate['ball'] * 100 Batsman_strike_rate = Batsman_strike_rate[['batsman', 'strike_rate']] Batsman_strike_rate = Batsman_strike_rate.sort_values(by='strike_rate', ascending=False).reset_index(drop=True) Batsman_strike_rate.iloc[:20, :]
code
1006988/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') Batsman_score = deliveries.groupby('batsman')['batsman_runs'].agg(sum).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Top_batsman_score = Batsman_score.iloc[:15, :] Top_batsman_score
code
1006988/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # for plotting Graphs import numpy as np # for Linear algebra import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') def autolabel(rects): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2.0, 1.02 * height, '%d' % int(height), ha='center', va='bottom') Batsman_score = deliveries.groupby('batsman')['batsman_runs'].agg(sum).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Top_batsman_score = Batsman_score.iloc[:15, :] Top_batsman_score labels = np.array(Top_batsman_score['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.7 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Top_batsman_score['batsman_runs']), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Count") ax.set_title("Top Scorer in IPL") autolabel(rects) Batsman_Ball_faced = deliveries.groupby(['batsman'])['ball'].count().reset_index().sort_values(by='ball', ascending=False).reset_index(drop=True) Batsman_Ball_faced_Top = Batsman_Ball_faced.iloc[:15, :] labels = np.array(Batsman_Ball_faced_Top['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.7 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Batsman_Ball_faced_Top['ball']), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Count") ax.set_title("Ball faced by Batsman in IPL") autolabel(rects) Batsman_strike_rate = pd.merge(Batsman_score, Batsman_Ball_faced, on='batsman', how='outer') Batsman_strike_rate = Batsman_strike_rate[Batsman_strike_rate['batsman_runs'] >= 500] Batsman_strike_rate['strike_rate'] = Batsman_strike_rate['batsman_runs'] / Batsman_strike_rate['ball'] * 100 Batsman_strike_rate = Batsman_strike_rate[['batsman', 'strike_rate']] Batsman_strike_rate = Batsman_strike_rate.sort_values(by='strike_rate', ascending=False).reset_index(drop=True) Batsman_strike_rate.iloc[:20, :] Batsman_strike_rate_Top = Batsman_strike_rate.iloc[:15, :] labels = np.array(Batsman_strike_rate_Top['batsman']) ind = np.arange(len(labels)) width = 0.5 fig, ax = plt.subplots() rects = ax.bar(ind, np.array(Batsman_strike_rate_Top['strike_rate']), width=width, color='blue') ax.set_xticks(ind + width / 2.0) ax.set_xticklabels(labels, rotation='vertical') ax.set_ylabel('Strike Rate') ax.set_title('Most Destructive Player in IPL') autolabel(rects)
code
1006988/cell_30
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # for plotting Graphs import numpy as np # for Linear algebra import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') def autolabel(rects): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2.0, 1.02 * height, '%d' % int(height), ha='center', va='bottom') Batsman_score = deliveries.groupby('batsman')['batsman_runs'].agg(sum).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Top_batsman_score = Batsman_score.iloc[:15, :] Top_batsman_score labels = np.array(Top_batsman_score['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.7 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Top_batsman_score['batsman_runs']), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Count") ax.set_title("Top Scorer in IPL") autolabel(rects) Batsman_Ball_faced = deliveries.groupby(['batsman'])['ball'].count().reset_index().sort_values(by='ball', ascending=False).reset_index(drop=True) Batsman_Ball_faced_Top = Batsman_Ball_faced.iloc[:15, :] labels = np.array(Batsman_Ball_faced_Top['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.7 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Batsman_Ball_faced_Top['ball']), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Count") ax.set_title("Ball faced by Batsman in IPL") autolabel(rects) Batsman_strike_rate = pd.merge(Batsman_score, Batsman_Ball_faced, on='batsman', how='outer') Batsman_strike_rate = Batsman_strike_rate[Batsman_strike_rate['batsman_runs'] >= 500] Batsman_strike_rate['strike_rate'] = Batsman_strike_rate['batsman_runs'] / Batsman_strike_rate['ball'] * 100 Batsman_strike_rate = Batsman_strike_rate[['batsman', 'strike_rate']] Batsman_strike_rate = Batsman_strike_rate.sort_values(by='strike_rate', ascending=False).reset_index(drop=True) Batsman_strike_rate.iloc[:20, :] Batsman_strike_rate_Top=Batsman_strike_rate.iloc[:15,:] labels = np.array(Batsman_strike_rate_Top['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.5 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Batsman_strike_rate_Top['strike_rate']), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Strike Rate") ax.set_title("Most Destructive Player in IPL") autolabel(rects) Batsman_dotballs = deliveries[deliveries['extra_runs'] == 0].groupby(['batsman'])['batsman_runs'].agg(lambda x: (x == 0).sum()).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Batsman_dotballs.columns = ['batsman', 'No_of_Balls'] Batsman_dotballs.iloc[:20, :] Batsman_dotballs_Top = Batsman_dotballs.iloc[:15,:] labels = np.array(Batsman_dotballs_Top['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.6 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Batsman_dotballs_Top["No_of_Balls"]), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Count") ax.set_title("No. of Dot Balls") autolabel(rects) Percentage_of_dot_balls = pd.merge(Batsman_Ball_faced, Batsman_dotballs, on='batsman', how='outer') Percentage_of_dot_balls['% of dot balls'] = Percentage_of_dot_balls['No_of_Balls'] / Percentage_of_dot_balls['ball'] * 100 Percentage_of_dot_balls = Percentage_of_dot_balls[Percentage_of_dot_balls['ball'] > 300].reset_index(drop=True) Percentage_of_dot_balls_top = Percentage_of_dot_balls.sort_values(by='% of dot balls', ascending=False).reset_index(drop=True).iloc[:15, :] Percentage_of_dot_balls_top.iloc[:20, :]
code
1006988/cell_26
[ "image_output_1.png" ]
import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') Batsman_score = deliveries.groupby('batsman')['batsman_runs'].agg(sum).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Top_batsman_score = Batsman_score.iloc[:15, :] Top_batsman_score Batsman_Ball_faced = deliveries.groupby(['batsman'])['ball'].count().reset_index().sort_values(by='ball', ascending=False).reset_index(drop=True) Batsman_Ball_faced_Top = Batsman_Ball_faced.iloc[:15, :] Batsman_dotballs = deliveries[deliveries['extra_runs'] == 0].groupby(['batsman'])['batsman_runs'].agg(lambda x: (x == 0).sum()).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Batsman_dotballs.columns = ['batsman', 'No_of_Balls'] Batsman_dotballs.iloc[:20, :]
code
1006988/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') matches.head(2)
code
1006988/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # for plotting Graphs import numpy as np # for Linear algebra import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') def autolabel(rects): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2.0, 1.02 * height, '%d' % int(height), ha='center', va='bottom') Batsman_score = deliveries.groupby('batsman')['batsman_runs'].agg(sum).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Top_batsman_score = Batsman_score.iloc[:15, :] Top_batsman_score labels = np.array(Top_batsman_score['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.7 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Top_batsman_score['batsman_runs']), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Count") ax.set_title("Top Scorer in IPL") autolabel(rects) Batsman_Ball_faced = deliveries.groupby(['batsman'])['ball'].count().reset_index().sort_values(by='ball', ascending=False).reset_index(drop=True) Batsman_Ball_faced_Top = Batsman_Ball_faced.iloc[:15, :] labels = np.array(Batsman_Ball_faced_Top['batsman']) ind = np.arange(len(labels)) width = 0.7 fig, ax = plt.subplots() rects = ax.bar(ind, np.array(Batsman_Ball_faced_Top['ball']), width=width, color='blue') ax.set_xticks(ind + width / 2.0) ax.set_xticklabels(labels, rotation='vertical') ax.set_ylabel('Count') ax.set_title('Ball faced by Batsman in IPL') autolabel(rects)
code
1006988/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # for plotting Graphs import numpy as np # for Linear algebra import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') def autolabel(rects): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2.0, 1.02 * height, '%d' % int(height), ha='center', va='bottom') Batsman_score = deliveries.groupby('batsman')['batsman_runs'].agg(sum).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Top_batsman_score = Batsman_score.iloc[:15, :] Top_batsman_score labels = np.array(Top_batsman_score['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.7 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Top_batsman_score['batsman_runs']), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Count") ax.set_title("Top Scorer in IPL") autolabel(rects) Batsman_Ball_faced = deliveries.groupby(['batsman'])['ball'].count().reset_index().sort_values(by='ball', ascending=False).reset_index(drop=True) Batsman_Ball_faced_Top = Batsman_Ball_faced.iloc[:15, :] labels = np.array(Batsman_Ball_faced_Top['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.7 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Batsman_Ball_faced_Top['ball']), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Count") ax.set_title("Ball faced by Batsman in IPL") autolabel(rects) Batsman_strike_rate = pd.merge(Batsman_score, Batsman_Ball_faced, on='batsman', how='outer') Batsman_strike_rate = Batsman_strike_rate[Batsman_strike_rate['batsman_runs'] >= 500] Batsman_strike_rate['strike_rate'] = Batsman_strike_rate['batsman_runs'] / Batsman_strike_rate['ball'] * 100 Batsman_strike_rate = Batsman_strike_rate[['batsman', 'strike_rate']] Batsman_strike_rate = Batsman_strike_rate.sort_values(by='strike_rate', ascending=False).reset_index(drop=True) Batsman_strike_rate.iloc[:20, :] Batsman_strike_rate_Top=Batsman_strike_rate.iloc[:15,:] labels = np.array(Batsman_strike_rate_Top['batsman'])# x axis label of graph ind = np.arange(len(labels)) # making them as indexes width = 0.5 # width of rectangle fig, ax = plt.subplots() # for figure rects = ax.bar(ind, np.array(Batsman_strike_rate_Top['strike_rate']), width=width, color='blue')# here ind is X #and np.array(Batsman_Ball_faced_Top['ball']) value is height ax.set_xticks(ind+((width)/2.))# this is to define the postion in x axis ax.set_xticklabels(labels, rotation='vertical') # this is for label x axis ax.set_ylabel("Strike Rate") ax.set_title("Most Destructive Player in IPL") autolabel(rects) Batsman_dotballs = deliveries[deliveries['extra_runs'] == 0].groupby(['batsman'])['batsman_runs'].agg(lambda x: (x == 0).sum()).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Batsman_dotballs.columns = ['batsman', 'No_of_Balls'] Batsman_dotballs.iloc[:20, :] Batsman_dotballs_Top = Batsman_dotballs.iloc[:15, :] labels = np.array(Batsman_dotballs_Top['batsman']) ind = np.arange(len(labels)) width = 0.6 fig, ax = plt.subplots() rects = ax.bar(ind, np.array(Batsman_dotballs_Top['No_of_Balls']), width=width, color='blue') ax.set_xticks(ind + width / 2.0) ax.set_xticklabels(labels, rotation='vertical') ax.set_ylabel('Count') ax.set_title('No. of Dot Balls') autolabel(rects)
code
1006988/cell_8
[ "image_output_1.png" ]
import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') deliveries.head(2)
code
1006988/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # for plotting Graphs import numpy as np # for Linear algebra import pandas as pd # for data manipulation/CSV I/O deliveries = pd.read_csv('../input/deliveries.csv') matches = pd.read_csv('../input/matches.csv') def autolabel(rects): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2.0, 1.02 * height, '%d' % int(height), ha='center', va='bottom') Batsman_score = deliveries.groupby('batsman')['batsman_runs'].agg(sum).reset_index().sort_values(by='batsman_runs', ascending=False).reset_index(drop=True) Top_batsman_score = Batsman_score.iloc[:15, :] Top_batsman_score labels = np.array(Top_batsman_score['batsman']) ind = np.arange(len(labels)) width = 0.7 fig, ax = plt.subplots() rects = ax.bar(ind, np.array(Top_batsman_score['batsman_runs']), width=width, color='blue') ax.set_xticks(ind + width / 2.0) ax.set_xticklabels(labels, rotation='vertical') ax.set_ylabel('Count') ax.set_title('Top Scorer in IPL') autolabel(rects)
code
1006988/cell_3
[ "image_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns
code
128021213/cell_4
[ "text_plain_output_1.png" ]
! pip install -q kaggle
code
128021213/cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
!pip install scikit-optimize import numpy as np import pandas as pd from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score,classification_report from sklearn.model_selection import train_test_split import math import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import RandomizedSearchCV, cross_val_score from scipy.stats import randint import skopt from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA import torch import torch.nn as nn import torch.nn.functional as f import torch.optim as optim from torch.utils.data import DataLoader,random_split,TensorDataset import os import re import pandas as pd import librosa from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import accuracy_score,classification_report from scipy.fft import fft
code
128021213/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from google.colab import files from google.colab import files files.upload()
code
323429/cell_4
[ "text_plain_output_1.png" ]
from subprocess import check_output import sqlite3 import numpy as np import pandas as pd import sqlite3 import nltk import numpy as np from sklearn.feature_extraction.text import CountVectorizer import scipy from subprocess import check_output con = sqlite3.connect('../input/database.sqlite') cur = con.cursor() sqlString = ' \n SELECT complaint_id, product, consumer_complaint_narrative, company\n FROM consumer_complaints\n WHERE product = "Mortgage" AND \n consumer_complaint_narrative != ""\n ' cur.execute(sqlString) complaints = cur.fetchall() con.close() complaint_list = [] for i in range(len(complaints)): complaint_list.append(complaints[i][2])
code
323429/cell_2
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from subprocess import check_output import sqlite3 import numpy as np import pandas as pd import sqlite3 import nltk import numpy as np from sklearn.feature_extraction.text import CountVectorizer import scipy from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) con = sqlite3.connect('../input/database.sqlite') cur = con.cursor() sqlString = ' \n SELECT complaint_id, product, consumer_complaint_narrative, company\n FROM consumer_complaints\n WHERE product = "Mortgage" AND \n consumer_complaint_narrative != ""\n ' cur.execute(sqlString) complaints = cur.fetchall() con.close()
code
17112386/cell_13
[ "image_output_1.png" ]
from PIL import Image from torch.utils.data import Dataset, DataLoader from torchvision import datasets, transforms import matplotlib.pyplot as plt import os import torch batch_size = 32 latent_dim = 256 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class DogDataset(Dataset): def __init__(self, img_dir, transform1=None, transform2=None): self.img_dir = img_dir self.img_names = os.listdir(img_dir) self.transform1 = transform1 self.transform2 = transform2 self.imgs = [] for img_name in self.img_names: img = Image.open(os.path.join(img_dir, img_name)) if self.transform1 is not None: img = self.transform1(img) self.imgs.append(img) def __getitem__(self, index): img = self.imgs[index] if self.transform2 is not None: img = self.transform2(img) return img def __len__(self): return len(self.imgs) transform1 = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64)]) random_transforms = [transforms.RandomRotation(degrees=10)] transform2 = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply(random_transforms, p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = DogDataset(img_dir='../input/all-dogs/all-dogs/', transform1=transform1, transform2=transform2) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) x = next(iter(train_loader)) fig = plt.figure(figsize=(25, 16)) for ii, img in enumerate(x): ax = fig.add_subplot(4, 8, ii + 1, xticks=[], yticks=[]) img = img.numpy().transpose(1, 2, 0) plt.imshow(img)
code
17112386/cell_20
[ "text_plain_output_5.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_13.png", "image_output_5.png", "image_output_7.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_7.png", "image_output_6.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "text_plain_output_11.png" ]
from PIL import Image from torch import nn, optim from torch.autograd import Variable from torch.utils.data import Dataset, DataLoader from torchvision import datasets, transforms import matplotlib.pyplot as plt import os import torch import torch.nn.functional as F batch_size = 32 latent_dim = 256 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class DogDataset(Dataset): def __init__(self, img_dir, transform1=None, transform2=None): self.img_dir = img_dir self.img_names = os.listdir(img_dir) self.transform1 = transform1 self.transform2 = transform2 self.imgs = [] for img_name in self.img_names: img = Image.open(os.path.join(img_dir, img_name)) if self.transform1 is not None: img = self.transform1(img) self.imgs.append(img) def __getitem__(self, index): img = self.imgs[index] if self.transform2 is not None: img = self.transform2(img) return img def __len__(self): return len(self.imgs) transform1 = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64)]) random_transforms = [transforms.RandomRotation(degrees=10)] transform2 = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply(random_transforms, p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = DogDataset(img_dir='../input/all-dogs/all-dogs/', transform1=transform1, transform2=transform2) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) x = next(iter(train_loader)) fig = plt.figure(figsize=(25, 16)) for ii, img in enumerate(x): ax = fig.add_subplot(4, 8, ii + 1, xticks=[], yticks=[]) img = img.numpy().transpose(1, 2, 0) plt.imshow(img) class VAE(nn.Module): def __init__(self, latent_dim=128, no_of_sample=10, batch_size=32, channels=3): super(VAE, self).__init__() self.no_of_sample = no_of_sample self.batch_size = batch_size self.channels = channels self.latent_dim = latent_dim def convlayer_enc(n_input, n_output, k_size=4, stride=2, padding=1, bn=False): block = [nn.Conv2d(n_input, n_output, kernel_size=k_size, stride=stride, padding=padding, bias=False)] if bn: block.append(nn.BatchNorm2d(n_output)) block.append(nn.LeakyReLU(0.2, inplace=True)) return block self.encoder = nn.Sequential(*convlayer_enc(self.channels, 64, 4, 2, 2), *convlayer_enc(64, 128, 4, 2, 2), *convlayer_enc(128, 256, 4, 2, 2, bn=True), *convlayer_enc(256, 512, 4, 2, 2, bn=True), nn.Conv2d(512, self.latent_dim * 2, 4, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True)) def convlayer_dec(n_input, n_output, k_size=4, stride=2, padding=0): block = [nn.ConvTranspose2d(n_input, n_output, kernel_size=k_size, stride=stride, padding=padding, bias=False), nn.BatchNorm2d(n_output), nn.ReLU(inplace=True)] return block self.decoder = nn.Sequential(*convlayer_dec(self.latent_dim, 512, 4, 2, 1), *convlayer_dec(512, 256, 4, 2, 1), *convlayer_dec(256, 128, 4, 2, 1), *convlayer_dec(128, 64, 4, 2, 1), nn.ConvTranspose2d(64, self.channels, 3, 1, 1), nn.Sigmoid()) def encode(self, x): """return mu_z and logvar_z""" x = self.encoder(x) return (x[:, :self.latent_dim, :, :], x[:, self.latent_dim:, :, :]) def decode(self, z): z = self.decoder(z) return z.view(-1, 3 * 64 * 64) def reparameterize(self, mu, logvar): if self.training: sample_z = [] for _ in range(self.no_of_sample): std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) sample_z.append(eps.mul(std).add_(mu)) return sample_z else: return mu def forward(self, x): mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) if self.training: return ([self.decode(z) for z in z], mu, logvar) else: return (self.decode(z), mu, logvar) def loss_function(self, recon_x, x, mu, logvar): if self.training: BCE = 0 for recon_x_one in recon_x: BCE += F.binary_cross_entropy(recon_x_one, x.view(-1, 3 * 64 * 64)) BCE /= len(recon_x) else: BCE = F.binary_cross_entropy(recon_x, x.view(-1, 3 * 64 * 64)) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) KLD /= self.batch_size * 3 * 64 * 64 return BCE + 1.5 * KLD lr = 0.0005 epochs = 30 model = VAE(latent_dim, batch_size=batch_size).to(device) optimizer = optim.Adam(model.parameters(), lr=lr) for epoch in range(1, epochs + 1): model.train() print(f'Epoch {epoch} start') for batch_idx, data in tqdm(enumerate(train_loader), total=len(train_loader)): data = data.to(device) optimizer.zero_grad() recon_batch, mu, logvar = model(data) loss = model.loss_function(recon_batch, data, mu, logvar) loss.backward() optimizer.step() model.eval() recon_img, _, _ = model(x[:1].to(device)) img = recon_img.view(3, 64, 64).detach().cpu().numpy().transpose(1, 2, 0) plt.imshow(img) plt.show()
code
17112386/cell_18
[ "image_output_1.png" ]
from PIL import Image from torch import nn, optim from torch.autograd import Variable from torch.utils.data import Dataset, DataLoader from torchvision import datasets, transforms import matplotlib.pyplot as plt import os import torch import torch.nn.functional as F batch_size = 32 latent_dim = 256 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class DogDataset(Dataset): def __init__(self, img_dir, transform1=None, transform2=None): self.img_dir = img_dir self.img_names = os.listdir(img_dir) self.transform1 = transform1 self.transform2 = transform2 self.imgs = [] for img_name in self.img_names: img = Image.open(os.path.join(img_dir, img_name)) if self.transform1 is not None: img = self.transform1(img) self.imgs.append(img) def __getitem__(self, index): img = self.imgs[index] if self.transform2 is not None: img = self.transform2(img) return img def __len__(self): return len(self.imgs) transform1 = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64)]) random_transforms = [transforms.RandomRotation(degrees=10)] transform2 = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply(random_transforms, p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = DogDataset(img_dir='../input/all-dogs/all-dogs/', transform1=transform1, transform2=transform2) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) x = next(iter(train_loader)) fig = plt.figure(figsize=(25, 16)) for ii, img in enumerate(x): ax = fig.add_subplot(4, 8, ii + 1, xticks=[], yticks=[]) img = img.numpy().transpose(1, 2, 0) plt.imshow(img) class VAE(nn.Module): def __init__(self, latent_dim=128, no_of_sample=10, batch_size=32, channels=3): super(VAE, self).__init__() self.no_of_sample = no_of_sample self.batch_size = batch_size self.channels = channels self.latent_dim = latent_dim def convlayer_enc(n_input, n_output, k_size=4, stride=2, padding=1, bn=False): block = [nn.Conv2d(n_input, n_output, kernel_size=k_size, stride=stride, padding=padding, bias=False)] if bn: block.append(nn.BatchNorm2d(n_output)) block.append(nn.LeakyReLU(0.2, inplace=True)) return block self.encoder = nn.Sequential(*convlayer_enc(self.channels, 64, 4, 2, 2), *convlayer_enc(64, 128, 4, 2, 2), *convlayer_enc(128, 256, 4, 2, 2, bn=True), *convlayer_enc(256, 512, 4, 2, 2, bn=True), nn.Conv2d(512, self.latent_dim * 2, 4, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True)) def convlayer_dec(n_input, n_output, k_size=4, stride=2, padding=0): block = [nn.ConvTranspose2d(n_input, n_output, kernel_size=k_size, stride=stride, padding=padding, bias=False), nn.BatchNorm2d(n_output), nn.ReLU(inplace=True)] return block self.decoder = nn.Sequential(*convlayer_dec(self.latent_dim, 512, 4, 2, 1), *convlayer_dec(512, 256, 4, 2, 1), *convlayer_dec(256, 128, 4, 2, 1), *convlayer_dec(128, 64, 4, 2, 1), nn.ConvTranspose2d(64, self.channels, 3, 1, 1), nn.Sigmoid()) def encode(self, x): """return mu_z and logvar_z""" x = self.encoder(x) return (x[:, :self.latent_dim, :, :], x[:, self.latent_dim:, :, :]) def decode(self, z): z = self.decoder(z) return z.view(-1, 3 * 64 * 64) def reparameterize(self, mu, logvar): if self.training: sample_z = [] for _ in range(self.no_of_sample): std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) sample_z.append(eps.mul(std).add_(mu)) return sample_z else: return mu def forward(self, x): mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) if self.training: return ([self.decode(z) for z in z], mu, logvar) else: return (self.decode(z), mu, logvar) def loss_function(self, recon_x, x, mu, logvar): if self.training: BCE = 0 for recon_x_one in recon_x: BCE += F.binary_cross_entropy(recon_x_one, x.view(-1, 3 * 64 * 64)) BCE /= len(recon_x) else: BCE = F.binary_cross_entropy(recon_x, x.view(-1, 3 * 64 * 64)) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) KLD /= self.batch_size * 3 * 64 * 64 return BCE + 1.5 * KLD plt.imshow(x[0].numpy().transpose(1, 2, 0)) plt.show()
code
17112386/cell_24
[ "image_output_1.png" ]
from PIL import Image from torch import nn, optim from torch.autograd import Variable from torch.utils.data import Dataset, DataLoader from torchvision import datasets, transforms import matplotlib.pyplot as plt import os import torch import torch.nn.functional as F batch_size = 32 latent_dim = 256 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class DogDataset(Dataset): def __init__(self, img_dir, transform1=None, transform2=None): self.img_dir = img_dir self.img_names = os.listdir(img_dir) self.transform1 = transform1 self.transform2 = transform2 self.imgs = [] for img_name in self.img_names: img = Image.open(os.path.join(img_dir, img_name)) if self.transform1 is not None: img = self.transform1(img) self.imgs.append(img) def __getitem__(self, index): img = self.imgs[index] if self.transform2 is not None: img = self.transform2(img) return img def __len__(self): return len(self.imgs) transform1 = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64)]) random_transforms = [transforms.RandomRotation(degrees=10)] transform2 = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply(random_transforms, p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = DogDataset(img_dir='../input/all-dogs/all-dogs/', transform1=transform1, transform2=transform2) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) x = next(iter(train_loader)) fig = plt.figure(figsize=(25, 16)) for ii, img in enumerate(x): ax = fig.add_subplot(4, 8, ii + 1, xticks=[], yticks=[]) img = img.numpy().transpose(1, 2, 0) plt.imshow(img) class VAE(nn.Module): def __init__(self, latent_dim=128, no_of_sample=10, batch_size=32, channels=3): super(VAE, self).__init__() self.no_of_sample = no_of_sample self.batch_size = batch_size self.channels = channels self.latent_dim = latent_dim def convlayer_enc(n_input, n_output, k_size=4, stride=2, padding=1, bn=False): block = [nn.Conv2d(n_input, n_output, kernel_size=k_size, stride=stride, padding=padding, bias=False)] if bn: block.append(nn.BatchNorm2d(n_output)) block.append(nn.LeakyReLU(0.2, inplace=True)) return block self.encoder = nn.Sequential(*convlayer_enc(self.channels, 64, 4, 2, 2), *convlayer_enc(64, 128, 4, 2, 2), *convlayer_enc(128, 256, 4, 2, 2, bn=True), *convlayer_enc(256, 512, 4, 2, 2, bn=True), nn.Conv2d(512, self.latent_dim * 2, 4, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True)) def convlayer_dec(n_input, n_output, k_size=4, stride=2, padding=0): block = [nn.ConvTranspose2d(n_input, n_output, kernel_size=k_size, stride=stride, padding=padding, bias=False), nn.BatchNorm2d(n_output), nn.ReLU(inplace=True)] return block self.decoder = nn.Sequential(*convlayer_dec(self.latent_dim, 512, 4, 2, 1), *convlayer_dec(512, 256, 4, 2, 1), *convlayer_dec(256, 128, 4, 2, 1), *convlayer_dec(128, 64, 4, 2, 1), nn.ConvTranspose2d(64, self.channels, 3, 1, 1), nn.Sigmoid()) def encode(self, x): """return mu_z and logvar_z""" x = self.encoder(x) return (x[:, :self.latent_dim, :, :], x[:, self.latent_dim:, :, :]) def decode(self, z): z = self.decoder(z) return z.view(-1, 3 * 64 * 64) def reparameterize(self, mu, logvar): if self.training: sample_z = [] for _ in range(self.no_of_sample): std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) sample_z.append(eps.mul(std).add_(mu)) return sample_z else: return mu def forward(self, x): mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) if self.training: return ([self.decode(z) for z in z], mu, logvar) else: return (self.decode(z), mu, logvar) def loss_function(self, recon_x, x, mu, logvar): if self.training: BCE = 0 for recon_x_one in recon_x: BCE += F.binary_cross_entropy(recon_x_one, x.view(-1, 3 * 64 * 64)) BCE /= len(recon_x) else: BCE = F.binary_cross_entropy(recon_x, x.view(-1, 3 * 64 * 64)) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) KLD /= self.batch_size * 3 * 64 * 64 return BCE + 1.5 * KLD lr = 0.0005 epochs = 30 model = VAE(latent_dim, batch_size=batch_size).to(device) optimizer = optim.Adam(model.parameters(), lr=lr) for epoch in range(1, epochs + 1): model.train() for batch_idx, data in tqdm(enumerate(train_loader), total=len(train_loader)): data = data.to(device) optimizer.zero_grad() recon_batch, mu, logvar = model(data) loss = model.loss_function(recon_batch, data, mu, logvar) loss.backward() optimizer.step() model.eval() recon_img, _, _ = model(x[:1].to(device)) img = recon_img.view(3, 64, 64).detach().cpu().numpy().transpose(1, 2, 0) reconstructed, _, _ = model(x.to(device)) reconstructed = reconstructed.view(-1, 3, 64, 64).detach().cpu().numpy().transpose(0, 2, 3, 1) fig = plt.figure(figsize=(25, 16)) for ii, img in enumerate(reconstructed): ax = fig.add_subplot(4, 8, ii + 1, xticks=[], yticks=[]) plt.imshow(img) samples = Variable(torch.randn(32, latent_dim, 4, 4)).to(device) samples = model.decoder(samples).detach().cpu().numpy().transpose(0, 2, 3, 1) fig = plt.figure(figsize=(25, 16)) for ii, img in enumerate(samples): ax = fig.add_subplot(4, 8, ii + 1, xticks=[], yticks=[]) plt.imshow(img)
code
17112386/cell_22
[ "image_output_1.png" ]
from PIL import Image from torch import nn, optim from torch.autograd import Variable from torch.utils.data import Dataset, DataLoader from torchvision import datasets, transforms import matplotlib.pyplot as plt import os import torch import torch.nn.functional as F batch_size = 32 latent_dim = 256 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class DogDataset(Dataset): def __init__(self, img_dir, transform1=None, transform2=None): self.img_dir = img_dir self.img_names = os.listdir(img_dir) self.transform1 = transform1 self.transform2 = transform2 self.imgs = [] for img_name in self.img_names: img = Image.open(os.path.join(img_dir, img_name)) if self.transform1 is not None: img = self.transform1(img) self.imgs.append(img) def __getitem__(self, index): img = self.imgs[index] if self.transform2 is not None: img = self.transform2(img) return img def __len__(self): return len(self.imgs) transform1 = transforms.Compose([transforms.Resize(64), transforms.CenterCrop(64)]) random_transforms = [transforms.RandomRotation(degrees=10)] transform2 = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply(random_transforms, p=0.3), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = DogDataset(img_dir='../input/all-dogs/all-dogs/', transform1=transform1, transform2=transform2) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) x = next(iter(train_loader)) fig = plt.figure(figsize=(25, 16)) for ii, img in enumerate(x): ax = fig.add_subplot(4, 8, ii + 1, xticks=[], yticks=[]) img = img.numpy().transpose(1, 2, 0) plt.imshow(img) class VAE(nn.Module): def __init__(self, latent_dim=128, no_of_sample=10, batch_size=32, channels=3): super(VAE, self).__init__() self.no_of_sample = no_of_sample self.batch_size = batch_size self.channels = channels self.latent_dim = latent_dim def convlayer_enc(n_input, n_output, k_size=4, stride=2, padding=1, bn=False): block = [nn.Conv2d(n_input, n_output, kernel_size=k_size, stride=stride, padding=padding, bias=False)] if bn: block.append(nn.BatchNorm2d(n_output)) block.append(nn.LeakyReLU(0.2, inplace=True)) return block self.encoder = nn.Sequential(*convlayer_enc(self.channels, 64, 4, 2, 2), *convlayer_enc(64, 128, 4, 2, 2), *convlayer_enc(128, 256, 4, 2, 2, bn=True), *convlayer_enc(256, 512, 4, 2, 2, bn=True), nn.Conv2d(512, self.latent_dim * 2, 4, 1, 1, bias=False), nn.LeakyReLU(0.2, inplace=True)) def convlayer_dec(n_input, n_output, k_size=4, stride=2, padding=0): block = [nn.ConvTranspose2d(n_input, n_output, kernel_size=k_size, stride=stride, padding=padding, bias=False), nn.BatchNorm2d(n_output), nn.ReLU(inplace=True)] return block self.decoder = nn.Sequential(*convlayer_dec(self.latent_dim, 512, 4, 2, 1), *convlayer_dec(512, 256, 4, 2, 1), *convlayer_dec(256, 128, 4, 2, 1), *convlayer_dec(128, 64, 4, 2, 1), nn.ConvTranspose2d(64, self.channels, 3, 1, 1), nn.Sigmoid()) def encode(self, x): """return mu_z and logvar_z""" x = self.encoder(x) return (x[:, :self.latent_dim, :, :], x[:, self.latent_dim:, :, :]) def decode(self, z): z = self.decoder(z) return z.view(-1, 3 * 64 * 64) def reparameterize(self, mu, logvar): if self.training: sample_z = [] for _ in range(self.no_of_sample): std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) sample_z.append(eps.mul(std).add_(mu)) return sample_z else: return mu def forward(self, x): mu, logvar = self.encode(x) z = self.reparameterize(mu, logvar) if self.training: return ([self.decode(z) for z in z], mu, logvar) else: return (self.decode(z), mu, logvar) def loss_function(self, recon_x, x, mu, logvar): if self.training: BCE = 0 for recon_x_one in recon_x: BCE += F.binary_cross_entropy(recon_x_one, x.view(-1, 3 * 64 * 64)) BCE /= len(recon_x) else: BCE = F.binary_cross_entropy(recon_x, x.view(-1, 3 * 64 * 64)) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) KLD /= self.batch_size * 3 * 64 * 64 return BCE + 1.5 * KLD lr = 0.0005 epochs = 30 model = VAE(latent_dim, batch_size=batch_size).to(device) optimizer = optim.Adam(model.parameters(), lr=lr) for epoch in range(1, epochs + 1): model.train() for batch_idx, data in tqdm(enumerate(train_loader), total=len(train_loader)): data = data.to(device) optimizer.zero_grad() recon_batch, mu, logvar = model(data) loss = model.loss_function(recon_batch, data, mu, logvar) loss.backward() optimizer.step() model.eval() recon_img, _, _ = model(x[:1].to(device)) img = recon_img.view(3, 64, 64).detach().cpu().numpy().transpose(1, 2, 0) reconstructed, _, _ = model(x.to(device)) reconstructed = reconstructed.view(-1, 3, 64, 64).detach().cpu().numpy().transpose(0, 2, 3, 1) fig = plt.figure(figsize=(25, 16)) for ii, img in enumerate(reconstructed): ax = fig.add_subplot(4, 8, ii + 1, xticks=[], yticks=[]) plt.imshow(img)
code
128024816/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_train.shape idsUnique = len(set(df_train.Id)) idsTotal = df_train.shape[0] idsDupli = idsTotal - idsUnique df_train.drop('Id', axis=1, inplace=True) df_train = df_train[df_train.GrLivArea < 4000] def find_cat(dataframe): cat_vars = [] for name in dataframe.columns: if dataframe[name].dtype.name == 'object': cat_vars.append(name) return cat_vars non_cat_vars = list(df_train.select_dtypes(exclude='object').columns) cat_vars = find_cat(df_train) print(cat_vars)
code
128024816/cell_4
[ "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_train.shape df_train.describe()
code
128024816/cell_6
[ "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_train.shape print(set(df_train.Id)) idsUnique = len(set(df_train.Id)) idsTotal = df_train.shape[0] idsDupli = idsTotal - idsUnique df_train.drop('Id', axis=1, inplace=True) df_train.head()
code
128024816/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_train.shape
code
128024816/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_train.shape idsUnique = len(set(df_train.Id)) idsTotal = df_train.shape[0] idsDupli = idsTotal - idsUnique df_train.drop('Id', axis=1, inplace=True) df_train = df_train[df_train.GrLivArea < 4000] def find_cat(dataframe): cat_vars = [] for name in dataframe.columns: if dataframe[name].dtype.name == 'object': cat_vars.append(name) return cat_vars non_cat_vars = list(df_train.select_dtypes(exclude='object').columns) df_train.drop(non_cat_vars, axis=1, inplace=True) import seaborn as sns sns.distplot(df_train.Saleprice)
code
128024816/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_train.shape idsUnique = len(set(df_train.Id)) idsTotal = df_train.shape[0] idsDupli = idsTotal - idsUnique df_train.drop('Id', axis=1, inplace=True) plt.scatter(df_train.GrLivArea, df_train.SalePrice) plt.title('Looking for outliers') plt.xlabel('GrLivArea') plt.ylabel('SalePrice') plt.show() df_train = df_train[df_train.GrLivArea < 4000]
code
128024816/cell_5
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') df_train.shape df_train.hist(bins=200, figsize=(20, 15))
code
34127932/cell_42
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.countplot(x='parch', hue='survived', data=dataTrain)
code
34127932/cell_63
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) y = dataTrain['survived'] x = dataTrain.drop('survived', axis=1) (x.shape, y.shape) from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler stdscale = MinMaxScaler() x_new = stdscale.fit_transform(x) testd = stdscale.transform(dataTest) (x_new.shape, testd.shape) X = pd.DataFrame(x_new, columns=x.columns) testData = pd.DataFrame(testd, columns=dataTest.columns) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) (x_train.shape, y_train.shape) for i in x_train.columns: x_train[i].fillna(x_train[i].median(), inplace=True) for i in x_test.columns: x_test[i].fillna(x_test[i].median(), inplace=True) for i in testData.columns: testData[i].fillna(testData[i].median(), inplace=True)
code
34127932/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) sns.countplot(x='sex', hue='survived', data=dataTrain)
code
34127932/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.boxplot(x='age', orient='horizontal', data=dataTrain)
code
34127932/cell_57
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) y = dataTrain['survived'] x = dataTrain.drop('survived', axis=1) (x.shape, y.shape) from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler stdscale = MinMaxScaler() x_new = stdscale.fit_transform(x) testd = stdscale.transform(dataTest) (x_new.shape, testd.shape) X = pd.DataFrame(x_new, columns=x.columns) testData = pd.DataFrame(testd, columns=dataTest.columns) testData.head()
code
34127932/cell_56
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) y = dataTrain['survived'] x = dataTrain.drop('survived', axis=1) (x.shape, y.shape) from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler stdscale = MinMaxScaler() x_new = stdscale.fit_transform(x) testd = stdscale.transform(dataTest) (x_new.shape, testd.shape) X = pd.DataFrame(x_new, columns=x.columns) testData = pd.DataFrame(testd, columns=dataTest.columns) X.head()
code
34127932/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.distplot(dataTrain['age'])
code
34127932/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.boxplot(x='age', orient='horizontal', data=dataTrain)
code
34127932/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.boxplot(x='age', orient='horizontal', data=dataTrain)
code
34127932/cell_44
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.countplot(x='parch', hue='survived', data=dataTrain)
code
34127932/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain['sex'].value_counts()
code
34127932/cell_55
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) y = dataTrain['survived'] x = dataTrain.drop('survived', axis=1) (x.shape, y.shape) from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler stdscale = MinMaxScaler() x_new = stdscale.fit_transform(x) testd = stdscale.transform(dataTest) (x_new.shape, testd.shape)
code
34127932/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTrain.info()
code
34127932/cell_39
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.countplot(x='sibsp', hue='survived', data=dataTrain)
code
34127932/cell_26
[ "text_html_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) agemean, agemedian, agemode = (dataTrain['age'].mean(), dataTrain['age'].median(), dataTrain['age'].mode()[0]) print(agemean, agemedian, agemode)
code
34127932/cell_65
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) y = dataTrain['survived'] x = dataTrain.drop('survived', axis=1) (x.shape, y.shape) from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler stdscale = MinMaxScaler() x_new = stdscale.fit_transform(x) testd = stdscale.transform(dataTest) (x_new.shape, testd.shape) X = pd.DataFrame(x_new, columns=x.columns) testData = pd.DataFrame(testd, columns=dataTest.columns) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) (x_train.shape, y_train.shape) from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from xgboost import XGBClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import accuracy_score, f1_score, classification_report, precision_score, recall_score lr = LogisticRegression() rand = RandomForestClassifier() gbr = GradientBoostingClassifier() for i in x_train.columns: x_train[i].fillna(x_train[i].median(), inplace=True) for i in x_test.columns: x_test[i].fillna(x_test[i].median(), inplace=True) for i in testData.columns: testData[i].fillna(testData[i].median(), inplace=True) lr.fit(x_train, y_train) lr.score(x_train, y_train)
code
34127932/cell_48
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTrain['embarked'].value_counts()
code
34127932/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTrain['parch'].value_counts()
code
34127932/cell_61
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) y = dataTrain['survived'] x = dataTrain.drop('survived', axis=1) (x.shape, y.shape) from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler stdscale = MinMaxScaler() x_new = stdscale.fit_transform(x) testd = stdscale.transform(dataTest) (x_new.shape, testd.shape) X = pd.DataFrame(x_new, columns=x.columns) testData = pd.DataFrame(testd, columns=dataTest.columns) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) (x_train.shape, y_train.shape) x_train.describe()
code
34127932/cell_2
[ "text_html_output_1.png" ]
import os import os os.getcwd()
code