path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
|---|---|---|---|
105200129/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data['ingredients'].value_counts()
|
code
|
105200129/cell_30
|
[
"text_html_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
Enco = En.fit_transform(data['bean_origin'])
data.drop('bean_origin', axis=1, inplace=True)
data['bean_origin'] = Enco
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
E = En.fit_transform(data['ingredients'])
data.drop('ingredients', axis=1, inplace=True)
data['ingredients'] = E
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
lb = En.fit_transform(data['review'])
data.drop('review', axis=1, inplace=True)
data['review'] = lb
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
l = le.fit_transform(data['manufacturer'])
data.drop('manufacturer', axis=1, inplace=True)
data['manufacturer'] = l
data.dtypes
data = data[['id', 'cocoa_percent', 'year_reviewed', 'num_ingredients', 'ingredients', 'review', 'rating']]
data.head()
|
code
|
105200129/cell_20
|
[
"text_plain_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
Enco = En.fit_transform(data['bean_origin'])
data.drop('bean_origin', axis=1, inplace=True)
data['bean_origin'] = Enco
data.head()
|
code
|
105200129/cell_11
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data['num_ingredients'].value_counts()
|
code
|
105200129/cell_19
|
[
"text_plain_output_1.png"
] |
"""from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
Enco_lab = En.fit_transform(data['bar_name'])
data.drop("bar_name", axis=1, inplace=True)
data["bar_name"] = Enco_lab"""
|
code
|
105200129/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
105200129/cell_7
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.head()
|
code
|
105200129/cell_28
|
[
"text_plain_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
Enco = En.fit_transform(data['bean_origin'])
data.drop('bean_origin', axis=1, inplace=True)
data['bean_origin'] = Enco
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
E = En.fit_transform(data['ingredients'])
data.drop('ingredients', axis=1, inplace=True)
data['ingredients'] = E
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
lb = En.fit_transform(data['review'])
data.drop('review', axis=1, inplace=True)
data['review'] = lb
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
l = le.fit_transform(data['manufacturer'])
data.drop('manufacturer', axis=1, inplace=True)
data['manufacturer'] = l
data.dtypes
|
code
|
105200129/cell_8
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
|
code
|
105200129/cell_15
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data.isnull().sum()
|
code
|
105200129/cell_16
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data.isnull().sum()
data['bean_origin'].value_counts()
|
code
|
105200129/cell_3
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.head()
|
code
|
105200129/cell_17
|
[
"text_plain_output_1.png"
] |
"""from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
Enco_com = En.fit_transform(data['company_location'])
data.drop("company_location", axis=1, inplace=True)
data["company_location"] = Enco_com"""
|
code
|
105200129/cell_31
|
[
"text_plain_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
Enco = En.fit_transform(data['bean_origin'])
data.drop('bean_origin', axis=1, inplace=True)
data['bean_origin'] = Enco
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
E = En.fit_transform(data['ingredients'])
data.drop('ingredients', axis=1, inplace=True)
data['ingredients'] = E
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
lb = En.fit_transform(data['review'])
data.drop('review', axis=1, inplace=True)
data['review'] = lb
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
l = le.fit_transform(data['manufacturer'])
data.drop('manufacturer', axis=1, inplace=True)
data['manufacturer'] = l
data.dtypes
data = data[['id', 'cocoa_percent', 'year_reviewed', 'num_ingredients', 'ingredients', 'review', 'rating']]
X = data.iloc[:, :-1]
X.head()
|
code
|
105200129/cell_24
|
[
"text_html_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
Enco = En.fit_transform(data['bean_origin'])
data.drop('bean_origin', axis=1, inplace=True)
data['bean_origin'] = Enco
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
E = En.fit_transform(data['ingredients'])
data.drop('ingredients', axis=1, inplace=True)
data['ingredients'] = E
data.head()
|
code
|
105200129/cell_22
|
[
"text_plain_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
Enco = En.fit_transform(data['bean_origin'])
data.drop('bean_origin', axis=1, inplace=True)
data['bean_origin'] = Enco
data.isnull().sum()
data['ingredients'].value_counts()
|
code
|
105200129/cell_27
|
[
"text_plain_output_1.png"
] |
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
data.isnull().sum()
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
Enco = En.fit_transform(data['bean_origin'])
data.drop('bean_origin', axis=1, inplace=True)
data['bean_origin'] = Enco
data.isnull().sum()
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
E = En.fit_transform(data['ingredients'])
data.drop('ingredients', axis=1, inplace=True)
data['ingredients'] = E
from sklearn.preprocessing import LabelEncoder
En = LabelEncoder()
lb = En.fit_transform(data['review'])
data.drop('review', axis=1, inplace=True)
data['review'] = lb
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
l = le.fit_transform(data['manufacturer'])
data.drop('manufacturer', axis=1, inplace=True)
data['manufacturer'] = l
data.head()
|
code
|
34149808/cell_4
|
[
"text_plain_output_1.png"
] |
from typing import List, Tuple
import numpy as np
def soft_accuracy(y_true: np.ndarray, y_pred: np.ndarray) -> Tuple[List[float], float]:
"""
Args:
y_true (np.ndarray): GT int indices of (N, K). N is number of samples, K is number of possible answers.
y_pred (np.ndarray): Predicted int indices of (N). N is number of samples.
Return:
List of scaler values for each given GT-Prediction pairs.
Mean of above list values.
"""
acc = []
for yt, yp in zip(y_true, y_pred):
ret = 0
for k in range(len(yt)):
res = 0
for j in range(len(yt)):
if k == j:
continue
res += 1 if yp == yt[j] else 0
ret += min(1, res / 3)
ret /= len(yt)
acc.append(ret)
return (acc, np.mean(acc))
y_true = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 2], [0, 0, 0, 1, 1, 1, 3, 3, 3, 3]])
y_pred_all_clear = np.array([0, 1, 3])
y_pred_vague_minor = np.array([0, 1, 1])
y_pred_vague_incorrect = np.array([0, 1, 2])
y_pred_normal_minor = np.array([0, 0, 3])
y_pred_normal_incorrect = np.array([0, 3, 3])
y_pred_precise_incorrect = np.array([1, 1, 3])
y_pred_vm_nm = np.array([0, 0, 1])
y_pred_vi_nm = np.array([0, 0, 2])
y_pred_vm_ni = np.array([0, 3, 1])
y_pred_vi_ni = np.array([0, 3, 2])
y_pred_vm_nm_pi = np.array([1, 0, 1])
y_pred_vi_nm_pi = np.array([1, 0, 2])
y_pred_vm_ni_pi = np.array([1, 3, 1])
y_pred_vi_ni_pi = np.array([1, 3, 2])
print('vm_nm : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vm_nm)))
print('vi_nm : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vi_nm)))
print('vm_ni : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vm_ni)))
print('vi_ni : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vi_ni)))
print('vm_nm_pi : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vm_nm_pi)))
print('vi_nm_pi : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vi_nm_pi)))
print('vm_ni_pi : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vm_ni_pi)))
print('vi_ni_pi : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vi_ni_pi)))
|
code
|
34149808/cell_3
|
[
"text_plain_output_1.png"
] |
from typing import List, Tuple
import numpy as np
def soft_accuracy(y_true: np.ndarray, y_pred: np.ndarray) -> Tuple[List[float], float]:
"""
Args:
y_true (np.ndarray): GT int indices of (N, K). N is number of samples, K is number of possible answers.
y_pred (np.ndarray): Predicted int indices of (N). N is number of samples.
Return:
List of scaler values for each given GT-Prediction pairs.
Mean of above list values.
"""
acc = []
for yt, yp in zip(y_true, y_pred):
ret = 0
for k in range(len(yt)):
res = 0
for j in range(len(yt)):
if k == j:
continue
res += 1 if yp == yt[j] else 0
ret += min(1, res / 3)
ret /= len(yt)
acc.append(ret)
return (acc, np.mean(acc))
y_true = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 2], [0, 0, 0, 1, 1, 1, 3, 3, 3, 3]])
y_pred_all_clear = np.array([0, 1, 3])
y_pred_vague_minor = np.array([0, 1, 1])
y_pred_vague_incorrect = np.array([0, 1, 2])
y_pred_normal_minor = np.array([0, 0, 3])
y_pred_normal_incorrect = np.array([0, 3, 3])
y_pred_precise_incorrect = np.array([1, 1, 3])
print('all_clear : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_all_clear)))
print('vague_minor : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vague_minor)))
print('vague_incorrect : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_vague_incorrect)))
print('normal_minor : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_normal_minor)))
print('normal_incorrect : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_normal_incorrect)))
print('precise_incorrect : {}, {:.4f}'.format(*soft_accuracy(y_true, y_pred_precise_incorrect)))
|
code
|
34149808/cell_5
|
[
"image_output_1.png"
] |
from typing import List, Tuple
import matplotlib.pyplot as plt
import numpy as np
def soft_accuracy(y_true: np.ndarray, y_pred: np.ndarray) -> Tuple[List[float], float]:
"""
Args:
y_true (np.ndarray): GT int indices of (N, K). N is number of samples, K is number of possible answers.
y_pred (np.ndarray): Predicted int indices of (N). N is number of samples.
Return:
List of scaler values for each given GT-Prediction pairs.
Mean of above list values.
"""
acc = []
for yt, yp in zip(y_true, y_pred):
ret = 0
for k in range(len(yt)):
res = 0
for j in range(len(yt)):
if k == j:
continue
res += 1 if yp == yt[j] else 0
ret += min(1, res / 3)
ret /= len(yt)
acc.append(ret)
return (acc, np.mean(acc))
y_true = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 2], [0, 0, 0, 1, 1, 1, 3, 3, 3, 3]])
y_pred_all_clear = np.array([0, 1, 3])
y_pred_vague_minor = np.array([0, 1, 1])
y_pred_vague_incorrect = np.array([0, 1, 2])
y_pred_normal_minor = np.array([0, 0, 3])
y_pred_normal_incorrect = np.array([0, 3, 3])
y_pred_precise_incorrect = np.array([1, 1, 3])
y_pred_vm_nm = np.array([0, 0, 1])
y_pred_vi_nm = np.array([0, 0, 2])
y_pred_vm_ni = np.array([0, 3, 1])
y_pred_vi_ni = np.array([0, 3, 2])
y_pred_vm_nm_pi = np.array([1, 0, 1])
y_pred_vi_nm_pi = np.array([1, 0, 2])
y_pred_vm_ni_pi = np.array([1, 3, 1])
y_pred_vi_ni_pi = np.array([1, 3, 2])
K = 10
scores = []
for i in range(K + 1):
y_true_sim = np.array([[0] * i + [1] * (K - i)])
_, score = soft_accuracy(y_true_sim, np.array([0]))
scores.append(score)
plt.plot(scores)
plt.xlabel('number of answers which is the same as a prediction')
plt.ylabel('soft accuracy')
plt.show()
|
code
|
16155942/cell_9
|
[
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/Shinzo Abe Tweet 20171024 - Tweet.csv')
df.describe()
|
code
|
16155942/cell_6
|
[
"text_html_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/Shinzo Abe Tweet 20171024 - Tweet.csv')
df.head(5)
|
code
|
16155942/cell_11
|
[
"text_html_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/Shinzo Abe Tweet 20171024 - Tweet.csv')
data = df[['Retweets', 'Likes']]
data.corr(method='pearson')
data = df[['Replies', 'Retweets']]
data.corr(method='pearson')
|
code
|
16155942/cell_19
|
[
"text_html_output_1.png"
] |
from sklearn.feature_extraction import stop_words
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/Shinzo Abe Tweet 20171024 - Tweet.csv')
like_mean = df['Likes'].mean()
df_popular = df.query('Likes > ' + str(like_mean))
df_unpopular = df.query('Likes <= ' + str(like_mean))
def add_words(word_set, text):
words = text.split(' ')
word_set = word_set | set(words)
return word_set
def delete_words(words, text):
for w in words:
text = text.replace(' ' + w + ' ', ' ')
text = text.replace('pictwittercom', '')
return text
stop = stop_words.ENGLISH_STOP_WORDS
text_unpop = df_unpopular['English Translation'].replace('[¥.¥,¥!¥?]', '', regex=True)
text_pop = df_popular['English Translation'].replace('[¥.¥,¥!¥?]', '', regex=True)
words_unpop = set()
words_pop = set()
unpop_text = ''
pop_text = ''
for w in text_unpop:
words_unpop = add_words(words_unpop, w)
unpop_text = unpop_text + ' ' + w
for w in text_pop:
words_pop = add_words(words_pop, w)
pop_text = pop_text + ' ' + w
unpop_text = delete_words(words_pop, unpop_text)
unpop_text = delete_words(stop, unpop_text)
pop_text = delete_words(words_unpop, pop_text)
pop_text = delete_words(stop, pop_text)
wordcloud = WordCloud().generate(unpop_text)
wordcloud = WordCloud().generate(pop_text)
plt.figure(figsize=(15, 15), dpi=50)
plt.imshow(wordcloud, interpolation='bilinear')
plt.show()
|
code
|
16155942/cell_8
|
[
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/Shinzo Abe Tweet 20171024 - Tweet.csv')
att = ['Replies', 'Retweets', 'Likes']
pd.plotting.scatter_matrix(df[att])
|
code
|
16155942/cell_3
|
[
"text_html_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/Shinzo Abe Tweet 20171024 - Tweet.csv')
df.head(5)
|
code
|
16155942/cell_17
|
[
"text_html_output_1.png"
] |
from sklearn.feature_extraction import stop_words
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/Shinzo Abe Tweet 20171024 - Tweet.csv')
like_mean = df['Likes'].mean()
df_popular = df.query('Likes > ' + str(like_mean))
df_unpopular = df.query('Likes <= ' + str(like_mean))
def add_words(word_set, text):
words = text.split(' ')
word_set = word_set | set(words)
return word_set
def delete_words(words, text):
for w in words:
text = text.replace(' ' + w + ' ', ' ')
text = text.replace('pictwittercom', '')
return text
stop = stop_words.ENGLISH_STOP_WORDS
text_unpop = df_unpopular['English Translation'].replace('[¥.¥,¥!¥?]', '', regex=True)
text_pop = df_popular['English Translation'].replace('[¥.¥,¥!¥?]', '', regex=True)
words_unpop = set()
words_pop = set()
unpop_text = ''
pop_text = ''
for w in text_unpop:
words_unpop = add_words(words_unpop, w)
unpop_text = unpop_text + ' ' + w
for w in text_pop:
words_pop = add_words(words_pop, w)
pop_text = pop_text + ' ' + w
unpop_text = delete_words(words_pop, unpop_text)
unpop_text = delete_words(stop, unpop_text)
pop_text = delete_words(words_unpop, pop_text)
pop_text = delete_words(stop, pop_text)
wordcloud = WordCloud().generate(unpop_text)
plt.figure(figsize=(15, 15), dpi=50)
plt.imshow(wordcloud, interpolation='bilinear')
plt.show()
|
code
|
16155942/cell_10
|
[
"text_html_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/Shinzo Abe Tweet 20171024 - Tweet.csv')
data = df[['Retweets', 'Likes']]
data.corr(method='pearson')
|
code
|
16155942/cell_12
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd
df = pd.read_csv('../input/Shinzo Abe Tweet 20171024 - Tweet.csv')
data = df[['Retweets', 'Likes']]
data.corr(method='pearson')
data = df[['Replies', 'Retweets']]
data.corr(method='pearson')
data = df[['Likes', 'Replies']]
data.corr(method='pearson')
|
code
|
89132601/cell_25
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
import seaborn as sb
from xgboost import XGBClassifier
train_df = pd.read_csv('../input/spaceship-titanic/train.csv')
test_df = pd.read_csv('../input/spaceship-titanic/test.csv')
X_train = train_df.iloc[:, :-1]
Y_train = train_df.iloc[:, -1]
X_test = test_df
X_train.drop(['Name'], axis=1, inplace=True)
X_test.drop(['Name'], axis=1, inplace=True)
max_age = X_train['Age'].max()
X_train['Age'] = X_train['Age'] / max_age
X_test['Age'] = X_test['Age'] / max_age
max_money = np.log(X_train['Summary'].max() + 1)
X_train['Summary'] = np.log(X_train['Summary'] + 1) / max_money
X_test['Summary'] = np.log(X_test['Summary'] + 1) / max_money
mean_age = X_train['Age'].mean(axis=0)
X_train['Age'].fillna(mean_age, axis=0, inplace=True)
X_test['Age'].fillna(mean_age, axis=0, inplace=True)
def id_parser(s):
if s is np.nan:
return np.nan
group, _ = s.split('_')
return group
X_train['GroupId'] = X_train['PassengerId'].apply(id_parser)
X_train.drop(['PassengerId'], axis=1, inplace=True)
X_test['GroupId'] = X_test['PassengerId'].apply(id_parser)
X_test.drop(['PassengerId'], axis=1, inplace=True)
def cabin_parser(row):
s = row['Cabin']
if s is np.nan:
return [np.nan] * 3
deck, cabin, side = s.split('/')
return [deck, side, cabin]
X_train[['Deck', 'Side', 'GroupCabin']] = X_train.apply(cabin_parser, axis=1, result_type='expand')
X_test[['Deck', 'Side', 'GroupCabin']] = X_test.apply(cabin_parser, axis=1, result_type='expand')
X_train.drop(['Cabin'], axis=1, inplace=True)
X_test.drop(['Cabin'], axis=1, inplace=True)
def get_groups(group_column):
groups = group_column.value_counts()
groups = dict(groups)
return groups
groups = ['GroupId', 'GroupCabin']
united_df = pd.concat([X_train, X_test], ignore_index=True)
for group in groups:
group_count = get_groups(united_df[group])
X_train.replace(group_count, inplace=True)
X_test.replace(group_count, inplace=True)
m = (X_train[groups] - 1).max()
X_train[groups] = (X_train[groups] - 1) / m
X_test[groups] = (X_test[groups] - 1) / m
X_train[groups] = X_train[groups].fillna(0, axis=0)
X_test[groups] = X_test[groups].fillna(0, axis=0)
cols = ['CryoSleep', 'VIP', 'Side', 'Destination', 'HomePlanet', 'Deck']
X_train = pd.get_dummies(X_train, columns=cols, dtype='float64')
X_test = pd.get_dummies(X_test, columns=cols, dtype='float64')
X_train.describe()
|
code
|
89132601/cell_3
|
[
"text_html_output_1.png"
] |
import pandas as pd
import numpy as np
import pandas as pd
import seaborn as sb
from xgboost import XGBClassifier
train_df = pd.read_csv('../input/spaceship-titanic/train.csv')
test_df = pd.read_csv('../input/spaceship-titanic/test.csv')
X_train = train_df.iloc[:, :-1]
Y_train = train_df.iloc[:, -1]
X_test = test_df
train_df.info()
train_df.head(10)
|
code
|
89132601/cell_24
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
import seaborn as sb
from xgboost import XGBClassifier
train_df = pd.read_csv('../input/spaceship-titanic/train.csv')
test_df = pd.read_csv('../input/spaceship-titanic/test.csv')
X_train = train_df.iloc[:, :-1]
Y_train = train_df.iloc[:, -1]
X_test = test_df
X_train.drop(['Name'], axis=1, inplace=True)
X_test.drop(['Name'], axis=1, inplace=True)
max_age = X_train['Age'].max()
X_train['Age'] = X_train['Age'] / max_age
X_test['Age'] = X_test['Age'] / max_age
max_money = np.log(X_train['Summary'].max() + 1)
X_train['Summary'] = np.log(X_train['Summary'] + 1) / max_money
X_test['Summary'] = np.log(X_test['Summary'] + 1) / max_money
mean_age = X_train['Age'].mean(axis=0)
X_train['Age'].fillna(mean_age, axis=0, inplace=True)
X_test['Age'].fillna(mean_age, axis=0, inplace=True)
def id_parser(s):
if s is np.nan:
return np.nan
group, _ = s.split('_')
return group
X_train['GroupId'] = X_train['PassengerId'].apply(id_parser)
X_train.drop(['PassengerId'], axis=1, inplace=True)
X_test['GroupId'] = X_test['PassengerId'].apply(id_parser)
X_test.drop(['PassengerId'], axis=1, inplace=True)
def cabin_parser(row):
s = row['Cabin']
if s is np.nan:
return [np.nan] * 3
deck, cabin, side = s.split('/')
return [deck, side, cabin]
X_train[['Deck', 'Side', 'GroupCabin']] = X_train.apply(cabin_parser, axis=1, result_type='expand')
X_test[['Deck', 'Side', 'GroupCabin']] = X_test.apply(cabin_parser, axis=1, result_type='expand')
X_train.drop(['Cabin'], axis=1, inplace=True)
X_test.drop(['Cabin'], axis=1, inplace=True)
def get_groups(group_column):
groups = group_column.value_counts()
groups = dict(groups)
return groups
groups = ['GroupId', 'GroupCabin']
united_df = pd.concat([X_train, X_test], ignore_index=True)
for group in groups:
group_count = get_groups(united_df[group])
X_train.replace(group_count, inplace=True)
X_test.replace(group_count, inplace=True)
m = (X_train[groups] - 1).max()
X_train[groups] = (X_train[groups] - 1) / m
X_test[groups] = (X_test[groups] - 1) / m
X_train[groups] = X_train[groups].fillna(0, axis=0)
X_test[groups] = X_test[groups].fillna(0, axis=0)
cols = ['CryoSleep', 'VIP', 'Side', 'Destination', 'HomePlanet', 'Deck']
X_train = pd.get_dummies(X_train, columns=cols, dtype='float64')
X_test = pd.get_dummies(X_test, columns=cols, dtype='float64')
X_train.info()
|
code
|
49127363/cell_9
|
[
"text_plain_output_100.png",
"text_plain_output_84.png",
"text_plain_output_56.png",
"text_plain_output_35.png",
"text_plain_output_130.png",
"text_plain_output_117.png",
"text_plain_output_98.png",
"text_plain_output_43.png",
"text_plain_output_78.png",
"text_plain_output_106.png",
"text_plain_output_37.png",
"text_plain_output_90.png",
"text_plain_output_79.png",
"text_plain_output_5.png",
"text_plain_output_75.png",
"text_plain_output_48.png",
"text_plain_output_116.png",
"text_plain_output_128.png",
"text_plain_output_30.png",
"text_plain_output_73.png",
"text_plain_output_126.png",
"text_plain_output_115.png",
"text_plain_output_15.png",
"text_plain_output_114.png",
"text_plain_output_70.png",
"text_plain_output_9.png",
"text_plain_output_44.png",
"text_plain_output_119.png",
"text_plain_output_86.png",
"text_plain_output_118.png",
"text_plain_output_131.png",
"text_plain_output_40.png",
"text_plain_output_123.png",
"text_plain_output_74.png",
"text_plain_output_31.png",
"text_plain_output_20.png",
"text_plain_output_102.png",
"text_plain_output_111.png",
"text_plain_output_101.png",
"text_plain_output_132.png",
"text_plain_output_60.png",
"text_plain_output_68.png",
"text_plain_output_4.png",
"text_plain_output_65.png",
"text_plain_output_64.png",
"text_plain_output_13.png",
"text_plain_output_107.png",
"text_plain_output_52.png",
"text_plain_output_66.png",
"text_plain_output_45.png",
"text_plain_output_14.png",
"text_plain_output_32.png",
"text_plain_output_88.png",
"text_plain_output_29.png",
"text_plain_output_129.png",
"text_plain_output_58.png",
"text_plain_output_49.png",
"text_plain_output_63.png",
"text_plain_output_27.png",
"text_plain_output_76.png",
"text_plain_output_108.png",
"text_plain_output_54.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_92.png",
"text_plain_output_57.png",
"text_plain_output_120.png",
"text_plain_output_24.png",
"text_plain_output_21.png",
"text_plain_output_104.png",
"text_plain_output_47.png",
"text_plain_output_121.png",
"text_plain_output_25.png",
"text_plain_output_77.png",
"text_plain_output_18.png",
"text_plain_output_50.png",
"text_plain_output_36.png",
"text_plain_output_96.png",
"text_plain_output_87.png",
"text_plain_output_3.png",
"text_plain_output_112.png",
"text_plain_output_113.png",
"text_plain_output_22.png",
"text_plain_output_81.png",
"text_plain_output_69.png",
"text_plain_output_125.png",
"text_plain_output_38.png",
"text_plain_output_7.png",
"text_plain_output_91.png",
"text_plain_output_16.png",
"text_plain_output_59.png",
"text_plain_output_103.png",
"text_plain_output_71.png",
"text_plain_output_8.png",
"text_plain_output_122.png",
"text_plain_output_26.png",
"text_plain_output_109.png",
"text_plain_output_41.png",
"text_plain_output_34.png",
"text_plain_output_85.png",
"text_plain_output_42.png",
"text_plain_output_110.png",
"text_plain_output_67.png",
"text_plain_output_53.png",
"text_plain_output_23.png",
"text_plain_output_89.png",
"text_plain_output_51.png",
"text_plain_output_28.png",
"text_plain_output_72.png",
"text_plain_output_99.png",
"text_plain_output_2.png",
"text_plain_output_127.png",
"text_plain_output_97.png",
"text_plain_output_1.png",
"text_plain_output_33.png",
"text_plain_output_39.png",
"text_plain_output_55.png",
"text_plain_output_82.png",
"text_plain_output_93.png",
"text_plain_output_19.png",
"text_plain_output_105.png",
"text_plain_output_80.png",
"text_plain_output_94.png",
"text_plain_output_124.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"text_plain_output_62.png",
"text_plain_output_95.png",
"text_plain_output_61.png",
"text_plain_output_83.png",
"text_plain_output_46.png"
] |
from collections import Counter
from keras.callbacks import History, EarlyStopping
from keras.layers import Conv1D, BatchNormalization, Dense, Flatten, Activation, Dropout
from keras.models import Sequential
from keras.utils import Sequence
from tensorflow.python.client import device_lib
from time import perf_counter
import keras
import numpy as np
import os
import pywt
import soundfile as sf
import tensorflow as tf
import os
from time import perf_counter
import numpy as np
import soundfile as sf
from collections import Counter
import matplotlib.pyplot as plt
from tensorflow.python.client import device_lib
import tensorflow as tf
import keras
from keras.layers import Conv1D, BatchNormalization, Dense, Flatten, Activation, Dropout
from tensorflow.keras.layers.experimental import preprocessing
from keras.models import Sequential
from keras.callbacks import History, EarlyStopping
history = History()
generator_init = False
model_saved = False
checkpoint_saved = False
SAVED_MODEL_PATH = './'
MODEL_NAME = SAVED_MODEL_PATH + 'langid_model'
CHECKPOINT_FILEPATH = MODEL_NAME + '_CP'
USE_OVERFITTING_ORIGINAL_NW = 1
USE_DROPOUT_NO_REGULARIZATION_NW = 2
USE_DROPOUT_REGULARIZATION_NW = 3
nn_choice = USE_DROPOUT_REGULARIZATION_NW
WAVELET = 'bior6.8'
train_path = '../input/spoken-language-identification/train/train/'
test_path = '../input/spoken-language-identification/test/test/'
from keras.utils import Sequence
import pywt
import pdb
class langidDataGenerator(Sequence):
"""Generates data for Keras"""
def __init__(self, list_IDs, labels, wavelet='rbio3.1', drop_levels=None, batch_size=32, n_channels=1, n_classes=6, shuffle=True):
"""Initialization"""
self.wvlt = wavelet
self.drop_lvls = slice(0, -drop_levels)
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_IDs
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
X, y = self.__data_generation(self.list_IDs[:2])
X = np.expand_dims(X, 2)
self.dim = X.shape[1:]
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y = self.__data_generation(list_IDs_temp)
X = tf.expand_dims(X, 2)
return (X, y)
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = tf.range(len(self.list_IDs))
if self.shuffle == True:
tf.random.shuffle(self.indexes)
def wavelet_features(self, list_IDs_temp):
features = []
y = []
for ID in list_IDs_temp:
signal, fs = sf.read(ID)
list_coeff = pywt.wavedec(signal, self.wvlt, mode='per')
dwt_local_coeff = []
end_flag = 0
for coeff in list_coeff[self.drop_lvls]:
dwt_local_coeff.extend(coeff)
features.append(dwt_local_coeff)
y.append(self.labels[ID])
X = tf.convert_to_tensor(features)
return (X, y)
def __data_generation(self, list_IDs_temp):
"""Generates data containing batch_size samples"""
X, y = self.wavelet_features(list_IDs_temp)
return (X, keras.utils.to_categorical(y, num_classes=self.n_classes))
"""
DATA_FOLDER = '../datasets/langid/'
train_path = DATA_FOLDER+'test/'
test_path = DATA_FOLDER+'test/'
"""
train_labels = []
for filename in os.listdir(train_path):
train_labels.append(filename[:4])
test_labels = []
for filename in os.listdir(test_path):
test_labels.append(filename[:4])
lb = 0
labeld = {}
for k in Counter(train_labels).keys():
labeld[k] = lb
lb = lb + 1
num_classes = lb
train_files = []
train_labels2 = {}
for filename in os.listdir(train_path):
train_files.append(train_path + filename)
train_labels2[train_path + filename] = labeld[filename[:4]]
test_files = []
test_labels2 = {}
for filename in os.listdir(test_path):
test_files.append(test_path + filename)
test_labels2[test_path + filename] = labeld[filename[:4]]
drop_levels = 2
"\n#ss = np.random.random_sample(2**17)\nsig_dwt = pywt.wavedec(sig,WAVELET,mode='per')\nprint('# of levels decomposed {}'.format(dec_lvls))\n"
class CustomEarlyStopping(keras.callbacks.Callback):
def __init__(self, patience=0):
super(CustomEarlyStopping, self).__init__()
self.patience = patience
self.best_weights = None
def on_train_begin(self, logs=None):
self.wait = 0
self.stopped_epoch = 0
self.best_v_loss = np.Inf
self.best_v_accuracy = 0
def on_epoch_end(self, epoch, logs=None):
v_loss = logs.get('val_loss')
v_acc = logs.get('val_accuracy')
if np.less(v_loss, self.best_v_loss) or np.greater(v_acc, self.best_v_accuracy):
self.best_v_loss = v_loss
self.best_v_accuracy = v_acc
self.wait = 0
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
pass
def dropout_block(model, no_nodes, dropout, reg=None, activation='relu'):
model.add(Dense(units=no_nodes, kernel_regularizer=reg))
model.add(Dropout(dropout))
model.add(BatchNormalization())
model.add(Activation(activation))
epochs = 128
no_train = 8000
no_test = None
params = {'wavelet': WAVELET, 'drop_levels': drop_levels, 'batch_size': 32, 'n_classes': 6, 'n_channels': 1, 'shuffle': True}
if not generator_init:
training_generator = langidDataGenerator(train_files[:no_train], train_labels2, **params)
validation_generator = langidDataGenerator(test_files[:no_test], test_labels2, **params)
'\nif not data_normalized:\n training_generator.normalize_data()\n'
if not model_saved:
model = Sequential()
model.add(BatchNormalization(input_shape=training_generator.dim))
model.add(Dropout(0.2))
model.add(Conv1D(32, kernel_size=9, strides=3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv1D(16, kernel_size=5, strides=2))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv1D(8, kernel_size=3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv1D(1, kernel_size=3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
if nn_choice == USE_OVERFITTING_ORIGINAL_NW:
model.add(Dense(1024, kernel_regularizer=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(256, kernel_regularizer=keras.regularizers.l1_l2(l1=0.0005, l2=0.0005)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(64, kernel_regularizer=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(16, kernel_regularizer=keras.regularizers.l1_l2(l1=1e-05, l2=1e-05)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(num_classes, kernel_regularizer=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05), activation='softmax'))
elif nn_choice == USE_DROPOUT_REGULARIZATION_NW:
dropout_block(model, no_nodes=2 ** 13, dropout=0.3, reg=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05))
dropout_block(model, no_nodes=2 ** 12, dropout=0.3, reg=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05))
dropout_block(model, no_nodes=2048, dropout=0.3, reg=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05))
dropout_block(model, no_nodes=1536, dropout=0.2, reg=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05))
dropout_block(model, no_nodes=1024, dropout=0.2, reg=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05))
dropout_block(model, no_nodes=784, dropout=0.2, reg=keras.regularizers.l1_l2(l1=0.0005, l2=0.0005))
dropout_block(model, no_nodes=512, dropout=0.2, reg=keras.regularizers.l1_l2(l1=0.0005, l2=0.0005))
dropout_block(model, no_nodes=256, dropout=0.15, reg=keras.regularizers.l1_l2(l1=0.0005, l2=0.0005))
dropout_block(model, no_nodes=128, dropout=0.15, reg=keras.regularizers.l1_l2(l1=0.0005, l2=0.0005))
dropout_block(model, no_nodes=64, dropout=0.1, reg=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05))
dropout_block(model, no_nodes=16, dropout=0.1, reg=keras.regularizers.l1_l2(l1=1e-05, l2=1e-05))
model.add(Flatten())
model.add(Dense(num_classes, kernel_regularizer=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05), activation='softmax'))
elif nn_choice == USE_DROPOUT_NO_REGULARIZATION_NW:
dropout_block(model, no_nodes=2048, dropout=0.4)
dropout_block(model, no_nodes=1536, dropout=0.3)
dropout_block(model, no_nodes=1024, dropout=0.3)
dropout_block(model, no_nodes=784, dropout=0.3)
dropout_block(model, no_nodes=512, dropout=0.3)
dropout_block(model, no_nodes=256, dropout=0.25)
dropout_block(model, no_nodes=128, dropout=0.25)
dropout_block(model, no_nodes=64, dropout=0.2)
dropout_block(model, no_nodes=16, dropout=0.1)
model.add(Dense(num_classes, kernel_regularizer=keras.regularizers.l1_l2(l1=5e-05, l2=5e-05), activation='softmax'))
model.summary()
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
else:
model = keras.models.load_model(MODEL_NAME)
es = CustomEarlyStopping(patience=4)
cp = tf.keras.callbacks.ModelCheckpoint(filepath=CHECKPOINT_FILEPATH, save_weights_only=True, save_freq=16)
if checkpoint_saved:
model.load_weights(CHECKPOINT_FILEPATH)
t_start = perf_counter()
model.fit_generator(generator=training_generator, validation_data=validation_generator, epochs=epochs, use_multiprocessing=False, verbose=1, callbacks=[history, es, cp])
'\nmodel.fit(training_generator, epochs=epochs, verbose=1,\n callbacks=[history,es])\n'
t_stop = perf_counter()
t_diff = t_stop - t_start
print('Time to train the network {} seconds'.format(t_diff))
train_score = model.evaluate(training_generator, verbose=0)
print('Train loss: {}, Train accuracy: {}'.format(train_score[0], train_score[1]))
test_score = model.evaluate(validation_generator, verbose=0)
print('Test loss: {}, Test accuracy: {}'.format(test_score[0], test_score[1]))
|
code
|
49127363/cell_4
|
[
"text_plain_output_1.png"
] |
from collections import Counter
from keras.callbacks import History, EarlyStopping
from keras.utils import Sequence
from tensorflow.python.client import device_lib
import keras
import numpy as np
import os
import pywt
import soundfile as sf
import tensorflow as tf
import os
from time import perf_counter
import numpy as np
import soundfile as sf
from collections import Counter
import matplotlib.pyplot as plt
from tensorflow.python.client import device_lib
import tensorflow as tf
import keras
from keras.layers import Conv1D, BatchNormalization, Dense, Flatten, Activation, Dropout
from tensorflow.keras.layers.experimental import preprocessing
from keras.models import Sequential
from keras.callbacks import History, EarlyStopping
history = History()
generator_init = False
model_saved = False
checkpoint_saved = False
SAVED_MODEL_PATH = './'
MODEL_NAME = SAVED_MODEL_PATH + 'langid_model'
CHECKPOINT_FILEPATH = MODEL_NAME + '_CP'
USE_OVERFITTING_ORIGINAL_NW = 1
USE_DROPOUT_NO_REGULARIZATION_NW = 2
USE_DROPOUT_REGULARIZATION_NW = 3
nn_choice = USE_DROPOUT_REGULARIZATION_NW
WAVELET = 'bior6.8'
train_path = '../input/spoken-language-identification/train/train/'
test_path = '../input/spoken-language-identification/test/test/'
from keras.utils import Sequence
import pywt
import pdb
class langidDataGenerator(Sequence):
"""Generates data for Keras"""
def __init__(self, list_IDs, labels, wavelet='rbio3.1', drop_levels=None, batch_size=32, n_channels=1, n_classes=6, shuffle=True):
"""Initialization"""
self.wvlt = wavelet
self.drop_lvls = slice(0, -drop_levels)
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_IDs
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
X, y = self.__data_generation(self.list_IDs[:2])
X = np.expand_dims(X, 2)
self.dim = X.shape[1:]
def __len__(self):
"""Denotes the number of batches per epoch"""
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
"""Generate one batch of data"""
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y = self.__data_generation(list_IDs_temp)
X = tf.expand_dims(X, 2)
return (X, y)
def on_epoch_end(self):
"""Updates indexes after each epoch"""
self.indexes = tf.range(len(self.list_IDs))
if self.shuffle == True:
tf.random.shuffle(self.indexes)
def wavelet_features(self, list_IDs_temp):
features = []
y = []
for ID in list_IDs_temp:
signal, fs = sf.read(ID)
list_coeff = pywt.wavedec(signal, self.wvlt, mode='per')
dwt_local_coeff = []
end_flag = 0
for coeff in list_coeff[self.drop_lvls]:
dwt_local_coeff.extend(coeff)
features.append(dwt_local_coeff)
y.append(self.labels[ID])
X = tf.convert_to_tensor(features)
return (X, y)
def __data_generation(self, list_IDs_temp):
"""Generates data containing batch_size samples"""
X, y = self.wavelet_features(list_IDs_temp)
return (X, keras.utils.to_categorical(y, num_classes=self.n_classes))
"""
DATA_FOLDER = '../datasets/langid/'
train_path = DATA_FOLDER+'test/'
test_path = DATA_FOLDER+'test/'
"""
train_labels = []
for filename in os.listdir(train_path):
train_labels.append(filename[:4])
test_labels = []
for filename in os.listdir(test_path):
test_labels.append(filename[:4])
lb = 0
labeld = {}
for k in Counter(train_labels).keys():
labeld[k] = lb
lb = lb + 1
num_classes = lb
train_files = []
train_labels2 = {}
for filename in os.listdir(train_path):
train_files.append(train_path + filename)
train_labels2[train_path + filename] = labeld[filename[:4]]
test_files = []
test_labels2 = {}
for filename in os.listdir(test_path):
test_files.append(test_path + filename)
test_labels2[test_path + filename] = labeld[filename[:4]]
sig, f = sf.read(train_files[0])
siglen = len(sig)
print('Signal length is {}, sampling frequency {}'.format(siglen, f))
|
code
|
49127363/cell_6
|
[
"text_plain_output_1.png"
] |
drop_levels = 2
"\n#ss = np.random.random_sample(2**17)\nsig_dwt = pywt.wavedec(sig,WAVELET,mode='per')\nprint('# of levels decomposed {}'.format(dec_lvls))\n"
|
code
|
49127363/cell_1
|
[
"text_plain_output_1.png"
] |
from keras.callbacks import History, EarlyStopping
from tensorflow.python.client import device_lib
import os
from time import perf_counter
import numpy as np
import soundfile as sf
from collections import Counter
import matplotlib.pyplot as plt
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import tensorflow as tf
import keras
from keras.layers import Conv1D, BatchNormalization, Dense, Flatten, Activation, Dropout
from tensorflow.keras.layers.experimental import preprocessing
from keras.models import Sequential
from keras.callbacks import History, EarlyStopping
history = History()
generator_init = False
model_saved = False
checkpoint_saved = False
SAVED_MODEL_PATH = './'
MODEL_NAME = SAVED_MODEL_PATH + 'langid_model'
CHECKPOINT_FILEPATH = MODEL_NAME + '_CP'
USE_OVERFITTING_ORIGINAL_NW = 1
USE_DROPOUT_NO_REGULARIZATION_NW = 2
USE_DROPOUT_REGULARIZATION_NW = 3
nn_choice = USE_DROPOUT_REGULARIZATION_NW
WAVELET = 'bior6.8'
train_path = '../input/spoken-language-identification/train/train/'
test_path = '../input/spoken-language-identification/test/test/'
|
code
|
72068883/cell_13
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score,precision_score,confusion_matrix,f1_score,classification_report
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X_train, y_train)
predicted = model.predict(X_test)
from sklearn.ensemble import GradientBoostingClassifier
gb = GradientBoostingClassifier(n_estimators=100, max_depth=2, random_state=0)
gb.fit(X_train, y_train)
predicted = gb.predict(X_test)
print('Accuracy score: ', accuracy_score(y_test, predicted))
print('Precision score: ', precision_score(y_test, predicted))
print(classification_report(y_test, predicted))
|
code
|
72068883/cell_4
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
train_df.isnull().sum()
|
code
|
72068883/cell_11
|
[
"text_plain_output_1.png"
] |
from sklearn import tree
from sklearn.metrics import accuracy_score,precision_score,confusion_matrix,f1_score,classification_report
classifier = tree.DecisionTreeClassifier(max_depth=2, random_state=0)
classifier.fit(X_train, y_train)
predictions = classifier.predict(X_test)
print(accuracy_score(y_test, predictions))
print(precision_score(y_test, predictions))
|
code
|
72068883/cell_19
|
[
"text_plain_output_1.png"
] |
from sklearn.metrics import accuracy_score,precision_score,confusion_matrix,f1_score,classification_report
from sklearn.svm import SVC
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
svclassifier = SVC(C=1.0, kernel='linear')
svclassifier.fit(X_train, y_train)
y_pred = svclassifier.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print('Accuracy:', accuracy_score(y_test, y_pred))
print('Precision score: ', precision_score(y_test, y_pred))
|
code
|
72068883/cell_15
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score,precision_score,confusion_matrix,f1_score,classification_report
from sklearn.ensemble import RandomForestClassifier
random_forest = RandomForestClassifier(n_estimators=180, max_depth=4, random_state=0)
random_forest.fit(X_train, y_train)
prediction = random_forest.predict(X_test)
print('Accuracy score: ', accuracy_score(y_test, prediction))
print('Precision score: ', precision_score(y_test, prediction))
print(classification_report(y_test, prediction))
|
code
|
72068883/cell_17
|
[
"text_plain_output_1.png"
] |
from sklearn.metrics import accuracy_score,precision_score,confusion_matrix,f1_score,classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
print('The accuracy of the Knn classifier on training data is {:.2f}'.format(knn.score(X_train, y_train)))
print('The accuracy of the Knn classifier on test data is {:.2f}'.format(knn.score(X_test, y_test)))
knnpre = knn.predict(X_test)
cm = confusion_matrix(y_test, knnpre)
print(cm)
|
code
|
72068883/cell_22
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
train_df.isnull().sum()
test_df.isnull().sum()
def train_preprocess(train_df):
train_df = train_df.fillna(train_df.groupby('Survived').transform('mean'))
train_df['Sex'] = pd.get_dummies(train_df['Sex'], drop_first=True)
train_df['Embarked'] = pd.get_dummies(train_df['Embarked'], drop_first=True)
X = np.asarray(train_df.drop(['Name', 'Survived', 'Cabin', 'Ticket'], axis=1))
y = np.asarray(train_df['Survived'])
return (X, y)
def test_preprocess(test_df):
for i in test_df.columns:
if test_df[i].isnull().sum() != 0:
if test_df[i].dtype == 'int64' or test_df[i].dtype == 'float64':
mean = test_df[str(i)].mean()
test_df[str(i)].replace(np.nan, mean, inplace=True)
test_df['Sex'] = pd.get_dummies(test_df['Sex'], drop_first=True)
test_df['Embarked'] = pd.get_dummies(test_df['Embarked'], drop_first=True)
test_df = test_df.drop(['Name', 'Cabin', 'Ticket'], axis=1)
X = np.asarray(test_df)
return X
submission_csv = pd.read_csv('./submission_gb.csv')
submission_csv
|
code
|
72068883/cell_12
|
[
"text_plain_output_1.png"
] |
from sklearn.metrics import accuracy_score,precision_score,confusion_matrix,f1_score,classification_report
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X_train, y_train)
predicted = model.predict(X_test)
print(accuracy_score(y_test, predicted))
print(precision_score(y_test, predicted, average='micro'))
print(classification_report(y_test, predicted))
|
code
|
72068883/cell_5
|
[
"text_html_output_1.png"
] |
import pandas as pd
train_df = pd.read_csv('../input/titanic/train.csv')
test_df = pd.read_csv('../input/titanic/test.csv')
test_df.isnull().sum()
|
code
|
128040649/cell_9
|
[
"text_plain_output_1.png"
] |
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import os
import pandas as pd
covid_path = '/kaggle/input/covid19/COVID-19_Radiography_Dataset/COVID/images/'
normal_path = '/kaggle/input/covid19/COVID-19_Radiography_Dataset/Normal/images/'
base_dir = 'base_dir'
os.mkdir(base_dir)
train_dir = os.path.join(base_dir, 'train_dir')
os.mkdir(train_dir)
val_dir = os.path.join(base_dir, 'val_dir')
os.mkdir(val_dir)
test_dir = os.path.join(base_dir, 'test_dir')
os.mkdir(test_dir)
Normal = os.path.join(train_dir, 'Normal')
os.mkdir(Normal)
COVID = os.path.join(train_dir, 'COVID')
os.mkdir(COVID)
Normal = os.path.join(val_dir, 'Normal')
os.mkdir(Normal)
COVID = os.path.join(val_dir, 'COVID')
os.mkdir(COVID)
Normal = os.path.join(test_dir, 'Normal')
os.mkdir(Normal)
COVID = os.path.join(test_dir, 'COVID')
os.mkdir(COVID)
folder_1 = os.listdir(covid_path)
folder_1 = shuffle(folder_1)
folder_2 = os.listdir(normal_path)
folder_2 = shuffle(folder_2)
covid_data = pd.DataFrame(folder_1, columns=['FILE NAME'])
normal_data = pd.DataFrame(folder_2, columns=['FILE NAME'])
covid_data['Target'] = 'COVID'
normal_data['Target'] = 'Normal'
covid_data['Labels'] = '0'
normal_data['Labels'] = '1'
data = pd.concat([covid_data, normal_data], axis=0, sort=False)
data
y1 = data['Labels']
df_train, df_val_test = train_test_split(data, test_size=0.3, random_state=101, stratify=y1)
y2 = df_val_test['Labels']
df_val, df_test = train_test_split(df_val_test, test_size=0.5, random_state=101, stratify=y2)
print(df_train.shape)
print(df_val.shape)
print(df_test.shape)
df_train[100:120]
|
code
|
128040649/cell_2
|
[
"text_html_output_1.png"
] |
import os
import cv2
import imageio
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.metrics import binary_accuracy
from tensorflow.keras.layers import Activation
import shutil
import matplotlib.pyplot as plt
import plotly.offline as py
import plotly.figure_factory as ff
|
code
|
128040649/cell_8
|
[
"text_html_output_1.png"
] |
from sklearn.utils import shuffle
import os
import pandas as pd
covid_path = '/kaggle/input/covid19/COVID-19_Radiography_Dataset/COVID/images/'
normal_path = '/kaggle/input/covid19/COVID-19_Radiography_Dataset/Normal/images/'
base_dir = 'base_dir'
os.mkdir(base_dir)
train_dir = os.path.join(base_dir, 'train_dir')
os.mkdir(train_dir)
val_dir = os.path.join(base_dir, 'val_dir')
os.mkdir(val_dir)
test_dir = os.path.join(base_dir, 'test_dir')
os.mkdir(test_dir)
Normal = os.path.join(train_dir, 'Normal')
os.mkdir(Normal)
COVID = os.path.join(train_dir, 'COVID')
os.mkdir(COVID)
Normal = os.path.join(val_dir, 'Normal')
os.mkdir(Normal)
COVID = os.path.join(val_dir, 'COVID')
os.mkdir(COVID)
Normal = os.path.join(test_dir, 'Normal')
os.mkdir(Normal)
COVID = os.path.join(test_dir, 'COVID')
os.mkdir(COVID)
folder_1 = os.listdir(covid_path)
folder_1 = shuffle(folder_1)
folder_2 = os.listdir(normal_path)
folder_2 = shuffle(folder_2)
covid_data = pd.DataFrame(folder_1, columns=['FILE NAME'])
normal_data = pd.DataFrame(folder_2, columns=['FILE NAME'])
covid_data['Target'] = 'COVID'
normal_data['Target'] = 'Normal'
covid_data['Labels'] = '0'
normal_data['Labels'] = '1'
data = pd.concat([covid_data, normal_data], axis=0, sort=False)
data
|
code
|
128040649/cell_10
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
from sklearn.utils import shuffle
import os
import pandas as pd
covid_path = '/kaggle/input/covid19/COVID-19_Radiography_Dataset/COVID/images/'
normal_path = '/kaggle/input/covid19/COVID-19_Radiography_Dataset/Normal/images/'
base_dir = 'base_dir'
os.mkdir(base_dir)
train_dir = os.path.join(base_dir, 'train_dir')
os.mkdir(train_dir)
val_dir = os.path.join(base_dir, 'val_dir')
os.mkdir(val_dir)
test_dir = os.path.join(base_dir, 'test_dir')
os.mkdir(test_dir)
Normal = os.path.join(train_dir, 'Normal')
os.mkdir(Normal)
COVID = os.path.join(train_dir, 'COVID')
os.mkdir(COVID)
Normal = os.path.join(val_dir, 'Normal')
os.mkdir(Normal)
COVID = os.path.join(val_dir, 'COVID')
os.mkdir(COVID)
Normal = os.path.join(test_dir, 'Normal')
os.mkdir(Normal)
COVID = os.path.join(test_dir, 'COVID')
os.mkdir(COVID)
folder_1 = os.listdir(covid_path)
folder_1 = shuffle(folder_1)
folder_2 = os.listdir(normal_path)
folder_2 = shuffle(folder_2)
covid_data = pd.DataFrame(folder_1, columns=['FILE NAME'])
normal_data = pd.DataFrame(folder_2, columns=['FILE NAME'])
covid_data['Target'] = 'COVID'
normal_data['Target'] = 'Normal'
covid_data['Labels'] = '0'
normal_data['Labels'] = '1'
data = pd.concat([covid_data, normal_data], axis=0, sort=False)
data
data.set_index('FILE NAME', inplace=True)
data
|
code
|
128040649/cell_5
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import cv2
import matplotlib.pyplot as plt
import numpy as np
covid_path = '/kaggle/input/covid19/COVID-19_Radiography_Dataset/COVID/images/'
normal_path = '/kaggle/input/covid19/COVID-19_Radiography_Dataset/Normal/images/'
def img_preprocessing(image_path):
img = cv2.imread(image_path, 0)
org_img = img.copy()
brightest = np.max(img)
darkest = np.min(img)
T = darkest + 0.9 * (brightest - darkest)
thre_img = cv2.threshold(img, T, 255, cv2.THRESH_BINARY)
thre_img = thre_img[1]
kernel = np.ones((5, 5), np.uint8)
cleaned = cv2.erode(thre_img, kernel, iterations=5)
cleaned = cv2.dilate(cleaned, kernel, iterations=5)
cleaned = cleaned // 255
img = img * cleaned
img = org_img - img
dim = (224, 224)
img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
B = cv2.bilateralFilter(img, 9, 75, 75)
R = cv2.equalizeHist(img)
new_img = cv2.merge((B, img, R))
return new_img
img = img_preprocessing(covid_path + 'COVID-1.png')
plt.imshow(img)
|
code
|
128006817/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd
df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv')
df.head()
|
code
|
128006817/cell_6
|
[
"text_html_output_1.png"
] |
import pandas as pd
df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv')
df.isnull().sum()
|
code
|
128006817/cell_2
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
import plotly.express as px
|
code
|
128006817/cell_18
|
[
"text_html_output_2.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv')
df.isnull().sum()
df_rating = df.groupby('City')['Rating'].mean().reset_index()
df_rating = df_rating.sort_values('Rating')
average_cost = df.groupby(['City'])['Cost'].mean().reset_index()
fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'},
title='Average Cost in Each City', color = 'City')
fig.show()
avg_vote = df.groupby(['City'])['Votes'].mean().reset_index()
fig = px.bar(avg_vote, x='City', y='Votes', labels={'City': 'City', 'Name': 'Average Number of Votes of Restaurants'},
title='Average Votes in Each City', color = 'City')
fig.show()
max_votes = df.groupby(['City'])['Votes'].sum().reset_index()
fig = px.bar(max_votes, x='City', y='Votes', labels={'City': 'City', 'Name': 'Number of Votes of Restaurants'},
title='Top Votes in Each City', color = 'City')
fig.show()
df_cuisine = df.groupby(['City', 'Cuisine'])['Name'].count().reset_index()
df_top_cuisine = df_cuisine.loc[df_cuisine.groupby('City')['Name'].idxmax()]
fig = px.bar(df_top_cuisine, x='City', y='Name', color='Cuisine', labels={'City': 'City', 'Name': 'Number of Restaurants'}, title='Top Cuisine in Each City')
fig.show()
|
code
|
128006817/cell_8
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv')
df.isnull().sum()
df_rating = df.groupby('City')['Rating'].mean().reset_index()
df_rating = df_rating.sort_values('Rating')
plt.figure(figsize=(16, 10))
plt.pie(df_rating['Rating'], labels=df_rating['City'], autopct='%1.2f%%')
plt.title('Comparison of Average Ratings Across Cities')
plt.show()
|
code
|
128006817/cell_16
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv')
df.isnull().sum()
df_rating = df.groupby('City')['Rating'].mean().reset_index()
df_rating = df_rating.sort_values('Rating')
average_cost = df.groupby(['City'])['Cost'].mean().reset_index()
fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'},
title='Average Cost in Each City', color = 'City')
fig.show()
avg_vote = df.groupby(['City'])['Votes'].mean().reset_index()
fig = px.bar(avg_vote, x='City', y='Votes', labels={'City': 'City', 'Name': 'Average Number of Votes of Restaurants'},
title='Average Votes in Each City', color = 'City')
fig.show()
max_votes = df.groupby(['City'])['Votes'].sum().reset_index()
fig = px.bar(max_votes, x='City', y='Votes', labels={'City': 'City', 'Name': 'Number of Votes of Restaurants'}, title='Top Votes in Each City', color='City')
fig.show()
|
code
|
128006817/cell_14
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv')
df.isnull().sum()
df_rating = df.groupby('City')['Rating'].mean().reset_index()
df_rating = df_rating.sort_values('Rating')
average_cost = df.groupby(['City'])['Cost'].mean().reset_index()
fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'},
title='Average Cost in Each City', color = 'City')
fig.show()
avg_vote = df.groupby(['City'])['Votes'].mean().reset_index()
fig = px.bar(avg_vote, x='City', y='Votes', labels={'City': 'City', 'Name': 'Average Number of Votes of Restaurants'}, title='Average Votes in Each City', color='City')
fig.show()
|
code
|
128006817/cell_10
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv')
df.isnull().sum()
df_rating = df.groupby('City')['Rating'].mean().reset_index()
df_rating = df_rating.sort_values('Rating')
average_cost = df.groupby(['City'])['Cost'].mean().reset_index()
fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'}, title='Average Cost in Each City', color='City')
fig.show()
|
code
|
128006817/cell_12
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
import seaborn as sns
df = pd.read_csv('/kaggle/input/indian-restaurants-2023/restaurants.csv')
df.isnull().sum()
df_rating = df.groupby('City')['Rating'].mean().reset_index()
df_rating = df_rating.sort_values('Rating')
average_cost = df.groupby(['City'])['Cost'].mean().reset_index()
fig = px.bar(average_cost, x='City', y='Cost', labels={'City': 'City', 'Name': 'Average Cost of Restaurants'},
title='Average Cost in Each City', color = 'City')
fig.show()
plt.xticks(rotation=90, fontsize=12)
sns.countplot(x=df['City'], data=df)
plt.ylabel('Count of Restaurants')
|
code
|
90139661/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
90139661/cell_5
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import gc
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dataset = pd.read_csv('../input/challenges-in-representation-learning-facial-expression-recognition-challenge/icml_face_data.csv')
dataset.columns = ['emotion', 'Usage', 'pixels']
test_dataset = dataset.loc[dataset['Usage'] == 'PublicTest', ['emotion', 'pixels']]
train_dataset = dataset.loc[dataset['Usage'] == 'Training', ['emotion', 'pixels']]
validation_dataset = dataset.loc[dataset['Usage'] == 'PrivateTest', ['emotion', 'pixels']]
def pixels_to_array(pixels):
array = np.array(pixels.split(), 'uint8')
return array
def image_reshape(data):
image = np.reshape(data['pixels'].to_list(), (data.shape[0], 48, 48, 1))
image = np.repeat(image, 3, -1)
return image
train_dataset['pixels'] = train_dataset['pixels'].apply(pixels_to_array)
test_dataset['pixels'] = test_dataset['pixels'].apply(pixels_to_array)
validation_dataset['pixels'] = validation_dataset['pixels'].apply(pixels_to_array)
print('Train:')
print(type(train_dataset['pixels']))
print(train_dataset.shape)
print('Validation:')
print(type(validation_dataset['pixels']))
print(validation_dataset.shape)
print('Test:')
print(type(test_dataset['pixels']))
print(test_dataset.shape)
X_train = image_reshape(train_dataset)
y_train = train_dataset['emotion']
print(X_train.shape)
X_test = image_reshape(test_dataset)
y_test = test_dataset['emotion']
print(X_test.shape)
X_val = image_reshape(validation_dataset)
y_val = validation_dataset['emotion']
print(X_val.shape)
del dataset
gc.collect()
|
code
|
128030655/cell_21
|
[
"text_html_output_1.png"
] |
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
print('BEFORE .. \n', headlines[0])
corpus = [clean_text(x) for x in headlines]
print('\n AFTER .. \n ', headlines[0])
|
code
|
128030655/cell_13
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
full_txt = ' '.join(map(str, headlines))
nlp = spacy.load('en_core_web_sm')
stopword = nltk.corpus.stopwords.words('english')
def text_cleaning(text):
text = re.sub('[^\\w\\s]', '', str(text))
text = re.split('\\W+', text)
text = [word for word in text if word not in stopword]
text = ' '.join(text)
return text
def frequent_of_words(string):
clean_string = text_cleaning(string)
split_string = pd.DataFrame(clean_string.split(), columns=['Words'])
split_string = split_string.value_counts()[:1000].reset_index(drop=False)[:1000]
split_string.columns = ['Words', 'Count']
return split_string
frequent_words = frequent_of_words(full_txt)
frequent_words[:15].style.background_gradient(cmap='Blues')
|
code
|
128030655/cell_9
|
[
"text_plain_output_1.png"
] |
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
headlines[:10]
|
code
|
128030655/cell_4
|
[
"image_output_1.png"
] |
import tensorflow as tf
import pandas as pd
import os, string, sys, numpy, spacy, nltk, re, random, timeit
import numpy as np
import matplotlib.pyplot as plt
from spacy import displacy
import plotly.express as px
from tensorflow import keras
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
code
|
128030655/cell_33
|
[
"text_plain_output_1.png"
] |
from tensorflow import keras
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
import tensorflow as tf
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
corpus = [clean_text(x) for x in headlines]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i + 1]
input_sequences.append(n_gram_sequence)
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
xs, labels = (input_sequences[:, :-1], input_sequences[:, -1])
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
model = Sequential([Embedding(total_words, embedding_dim, input_length=max_sequence_len - 1), Bidirectional(LSTM(lstm_units)), Dense(total_words, activation='softmax')])
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), metrics=['accuracy'])
keras.utils.plot_model(model, show_shapes=True)
|
code
|
128030655/cell_29
|
[
"text_plain_output_1.png"
] |
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
import tensorflow as tf
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
corpus = [clean_text(x) for x in headlines]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i + 1]
input_sequences.append(n_gram_sequence)
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
xs, labels = (input_sequences[:, :-1], input_sequences[:, -1])
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
sentence = corpus[0].split()
token_list = []
for word in sentence:
token_list.append(tokenizer.word_index[word])
elem_number = 7
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
|
code
|
128030655/cell_7
|
[
"text_plain_output_1.png"
] |
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
len(headlines)
|
code
|
128030655/cell_38
|
[
"text_plain_output_1.png"
] |
from tensorflow import keras
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import matplotlib.pyplot as plt
import numpy as np
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
import tensorflow as tf
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
full_txt = ' '.join(map(str, headlines))
nlp = spacy.load('en_core_web_sm')
stopword = nltk.corpus.stopwords.words('english')
def text_cleaning(text):
text = re.sub('[^\\w\\s]', '', str(text))
text = re.split('\\W+', text)
text = [word for word in text if word not in stopword]
text = ' '.join(text)
return text
def frequent_of_words(string):
clean_string = text_cleaning(string)
split_string = pd.DataFrame(clean_string.split(), columns=['Words'])
split_string = split_string.value_counts()[:1000].reset_index(drop=False)[:1000]
split_string.columns = ['Words', 'Count']
return split_string
frequent_words = frequent_of_words(full_txt)
frequent_words[:15].style.background_gradient(cmap='Blues')
name_list = ['Trump', 'Obama']
scripts = []
split_string = full_txt.split()
for name in name_list:
scripts.append((name, split_string.count(name)))
colors = ['#2F86A6', '#F2F013']
sections = [scripts[0][1], scripts[1][1]]
plt.axis('equal')
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
corpus = [clean_text(x) for x in headlines]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i + 1]
input_sequences.append(n_gram_sequence)
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
xs, labels = (input_sequences[:, :-1], input_sequences[:, -1])
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
model = Sequential([Embedding(total_words, embedding_dim, input_length=max_sequence_len - 1), Bidirectional(LSTM(lstm_units)), Dense(total_words, activation='softmax')])
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), metrics=['accuracy'])
history = model.fit(xs, ys, epochs=100)
def plot_loss_curves(history):
"""
returns seperate loss curves for training and validation metrics
"""
train_loss = history.history['loss']
train_accuracy = history.history['accuracy']
epochs = range(1, len(history.history['loss']) + 1)
plt.figure(figsize=(8, 3))
plt.subplot(1, 2, 2)
plt.plot(epochs, train_accuracy, label='training_acc')
plt.title('Accuracy curves', size=5)
plt.xlabel('epochs', size=5)
plt.ylabel('Accuracy', size=5)
plt.tight_layout()
plt.legend(fontsize=10)
plt.subplot(1, 2, 1)
plt.plot(epochs, train_loss, label='training_loss')
plt.title('Loss curves', size=5)
plt.xlabel('epochs', size=5)
plt.ylabel('loss', size=5)
plt.legend(fontsize=10)
plt.title('Model Performance Curves')
plot_loss_curves(history)
|
code
|
128030655/cell_17
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
full_txt = ' '.join(map(str, headlines))
nlp = spacy.load('en_core_web_sm')
stopword = nltk.corpus.stopwords.words('english')
def text_cleaning(text):
text = re.sub('[^\\w\\s]', '', str(text))
text = re.split('\\W+', text)
text = [word for word in text if word not in stopword]
text = ' '.join(text)
return text
def frequent_of_words(string):
clean_string = text_cleaning(string)
split_string = pd.DataFrame(clean_string.split(), columns=['Words'])
split_string = split_string.value_counts()[:1000].reset_index(drop=False)[:1000]
split_string.columns = ['Words', 'Count']
return split_string
frequent_words = frequent_of_words(full_txt)
frequent_words[:15].style.background_gradient(cmap='Blues')
name_list = ['Trump', 'Obama']
scripts = []
split_string = full_txt.split()
for name in name_list:
scripts.append((name, split_string.count(name)))
colors = ['#2F86A6', '#F2F013']
sections = [scripts[0][1], scripts[1][1]]
plt.figure(figsize=(6, 6), dpi=75)
plt.pie(sections, labels=name_list, colors=colors, wedgeprops=dict(alpha=1), startangle=90, autopct='%0.1f%%', textprops={'fontsize': 15, 'fontweight': 'normal'})
plt.axis('equal')
plt.title('Script Count', fontsize=20)
plt.show()
|
code
|
128030655/cell_35
|
[
"text_plain_output_1.png"
] |
from tensorflow import keras
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
import tensorflow as tf
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
corpus = [clean_text(x) for x in headlines]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i + 1]
input_sequences.append(n_gram_sequence)
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
xs, labels = (input_sequences[:, :-1], input_sequences[:, -1])
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
model = Sequential([Embedding(total_words, embedding_dim, input_length=max_sequence_len - 1), Bidirectional(LSTM(lstm_units)), Dense(total_words, activation='softmax')])
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), metrics=['accuracy'])
history = model.fit(xs, ys, epochs=100)
|
code
|
128030655/cell_43
|
[
"text_plain_output_1.png"
] |
from tensorflow import keras
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
import tensorflow as tf
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
corpus = [clean_text(x) for x in headlines]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i + 1]
input_sequences.append(n_gram_sequence)
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
xs, labels = (input_sequences[:, :-1], input_sequences[:, -1])
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
sentence = corpus[0].split()
token_list = []
for word in sentence:
token_list.append(tokenizer.word_index[word])
elem_number = 7
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
model = Sequential([Embedding(total_words, embedding_dim, input_length=max_sequence_len - 1), Bidirectional(LSTM(lstm_units)), Dense(total_words, activation='softmax')])
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), metrics=['accuracy'])
history = model.fit(xs, ys, epochs=100)
def expect_next_sequence(seed_text, next_words):
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len - 1, padding='pre')
probabilities = model.predict(token_list)
predicted = np.argmax(probabilities, axis=-1)[0]
if predicted != 0:
output_word = tokenizer.index_word[predicted]
seed_text += ' ' + output_word
seed_text = 'White House Will'
next_words = 5
expect_next_sequence(seed_text, next_words)
|
code
|
128030655/cell_31
|
[
"text_plain_output_1.png"
] |
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
import tensorflow as tf
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
corpus = [clean_text(x) for x in headlines]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i + 1]
input_sequences.append(n_gram_sequence)
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
xs, labels = (input_sequences[:, :-1], input_sequences[:, -1])
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
sentence = corpus[0].split()
token_list = []
for word in sentence:
token_list.append(tokenizer.word_index[word])
elem_number = 7
numpy.set_printoptions(threshold=sys.maxsize)
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
|
code
|
128030655/cell_46
|
[
"image_output_1.png"
] |
from tensorflow import keras
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
import tensorflow as tf
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
corpus = [clean_text(x) for x in headlines]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i + 1]
input_sequences.append(n_gram_sequence)
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
xs, labels = (input_sequences[:, :-1], input_sequences[:, -1])
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
sentence = corpus[0].split()
token_list = []
for word in sentence:
token_list.append(tokenizer.word_index[word])
elem_number = 7
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
model = Sequential([Embedding(total_words, embedding_dim, input_length=max_sequence_len - 1), Bidirectional(LSTM(lstm_units)), Dense(total_words, activation='softmax')])
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), metrics=['accuracy'])
history = model.fit(xs, ys, epochs=100)
def expect_next_sequence(seed_text, next_words):
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len - 1, padding='pre')
probabilities = model.predict(token_list)
predicted = np.argmax(probabilities, axis=-1)[0]
if predicted != 0:
output_word = tokenizer.index_word[predicted]
seed_text += ' ' + output_word
seed_text = 'White House Will'
next_words = 5
expect_next_sequence(seed_text, next_words)
def expect_next_sequence_max_probability(seed_text, next_words):
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len - 1, padding='pre')
probabilities = model.predict(token_list)
choice = np.random.choice([1, 2, 3])
predicted = np.argsort(probabilities)[0][-choice]
if predicted != 0:
output_word = tokenizer.index_word[predicted]
seed_text += ' ' + output_word
expect_next_sequence_max_probability(seed_text, next_words)
|
code
|
128030655/cell_24
|
[
"text_html_output_2.png"
] |
from tensorflow.keras.preprocessing.text import Tokenizer
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
corpus = [clean_text(x) for x in headlines]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
print(f'total words: {total_words}')
|
code
|
128030655/cell_14
|
[
"text_plain_output_1.png"
] |
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
import plotly.express as px
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
full_txt = ' '.join(map(str, headlines))
nlp = spacy.load('en_core_web_sm')
stopword = nltk.corpus.stopwords.words('english')
def text_cleaning(text):
text = re.sub('[^\\w\\s]', '', str(text))
text = re.split('\\W+', text)
text = [word for word in text if word not in stopword]
text = ' '.join(text)
return text
def frequent_of_words(string):
clean_string = text_cleaning(string)
split_string = pd.DataFrame(clean_string.split(), columns=['Words'])
split_string = split_string.value_counts()[:1000].reset_index(drop=False)[:1000]
split_string.columns = ['Words', 'Count']
return split_string
frequent_words = frequent_of_words(full_txt)
frequent_words[:15].style.background_gradient(cmap='Blues')
fig = px.funnel(frequent_words[:15], x='Count', y='Words')
fig.show()
|
code
|
128030655/cell_27
|
[
"image_output_1.png"
] |
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import numpy as np
import os , string , sys , numpy, spacy , nltk, re, random, timeit
import pandas as pd
import tensorflow as tf
working_dir = '../input/nyt-comments/'
headlines = []
for filename in os.listdir(working_dir):
if 'Articles' in filename:
article_df = pd.read_csv(working_dir + filename)
headlines.extend(list(article_df.headline.values))
all_headlines = [x for x in headlines if x != 'Unknown']
all_headlines = headlines
def clean_text(txt):
txt = ''.join((v for v in txt if v not in string.punctuation)).lower()
txt = txt.encode('utf8').decode('ascii', 'ignore')
return txt
corpus = [clean_text(x) for x in headlines]
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i + 1]
input_sequences.append(n_gram_sequence)
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
xs, labels = (input_sequences[:, :-1], input_sequences[:, -1])
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
sentence = corpus[0].split()
print(f'sample sentence: {sentence}')
token_list = []
for word in sentence:
print(word)
token_list.append(tokenizer.word_index[word])
print(token_list)
|
code
|
122257913/cell_21
|
[
"text_html_output_1.png"
] |
from sklearn import linear_model
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
train = train.dropna()
slr = linear_model.LinearRegression()
X_train = np.array(train.iloc[:, :-1].values)
y_train = np.array(train.iloc[:, 1].values)
X_test = np.array(test.iloc[:, :-1].values)
y_test = np.array(test.iloc[:, 1].values)
slr.fit(X_train, y_train)
|
code
|
122257913/cell_13
|
[
"text_plain_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
import matplotlib.pyplot as plt
plt.scatter('x', 'y', data=train)
|
code
|
122257913/cell_9
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.head()
|
code
|
122257913/cell_34
|
[
"image_output_1.png"
] |
from sklearn import linear_model
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
train = train.dropna()
slr = linear_model.LinearRegression()
X_train = np.array(train.iloc[:, :-1].values)
y_train = np.array(train.iloc[:, 1].values)
X_test = np.array(test.iloc[:, :-1].values)
y_test = np.array(test.iloc[:, 1].values)
slr.fit(X_train, y_train)
slr.coef_
slr.intercept_
y_pred = slr.predict(X_test)
r2_score(y_test, y_pred)
|
code
|
122257913/cell_23
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from sklearn import linear_model
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
train = train.dropna()
slr = linear_model.LinearRegression()
X_train = np.array(train.iloc[:, :-1].values)
y_train = np.array(train.iloc[:, 1].values)
X_test = np.array(test.iloc[:, :-1].values)
y_test = np.array(test.iloc[:, 1].values)
slr.fit(X_train, y_train)
slr.coef_
slr.intercept_
|
code
|
122257913/cell_30
|
[
"text_plain_output_1.png"
] |
from sklearn import linear_model
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
import matplotlib.pyplot as plt
train = train.dropna()
slr = linear_model.LinearRegression()
X_train = np.array(train.iloc[:, :-1].values)
y_train = np.array(train.iloc[:, 1].values)
X_test = np.array(test.iloc[:, :-1].values)
y_test = np.array(test.iloc[:, 1].values)
slr.fit(X_train, y_train)
slr.coef_
slr.intercept_
y_pred = slr.predict(X_test)
plt.scatter('x', 'y', data=test)
plt.plot(X_test, y_pred, color='red')
plt.show()
|
code
|
122257913/cell_33
|
[
"text_plain_output_1.png"
] |
from sklearn import linear_model
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
train = train.dropna()
slr = linear_model.LinearRegression()
X_train = np.array(train.iloc[:, :-1].values)
y_train = np.array(train.iloc[:, 1].values)
X_test = np.array(test.iloc[:, :-1].values)
y_test = np.array(test.iloc[:, 1].values)
slr.fit(X_train, y_train)
slr.coef_
slr.intercept_
y_pred = slr.predict(X_test)
mean_squared_error(y_test, y_pred)
|
code
|
122257913/cell_6
|
[
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
|
code
|
122257913/cell_29
|
[
"text_plain_output_1.png"
] |
from sklearn import linear_model
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
import matplotlib.pyplot as plt
train = train.dropna()
slr = linear_model.LinearRegression()
X_train = np.array(train.iloc[:, :-1].values)
y_train = np.array(train.iloc[:, 1].values)
X_test = np.array(test.iloc[:, :-1].values)
y_test = np.array(test.iloc[:, 1].values)
slr.fit(X_train, y_train)
slr.coef_
slr.intercept_
y_pred = slr.predict(X_test)
plt.plot(X_test, y_pred, color='red')
plt.show()
|
code
|
122257913/cell_11
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
|
code
|
122257913/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
122257913/cell_7
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.info()
|
code
|
122257913/cell_32
|
[
"text_plain_output_1.png"
] |
from sklearn import linear_model
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
train = train.dropna()
slr = linear_model.LinearRegression()
X_train = np.array(train.iloc[:, :-1].values)
y_train = np.array(train.iloc[:, 1].values)
X_test = np.array(test.iloc[:, :-1].values)
y_test = np.array(test.iloc[:, 1].values)
slr.fit(X_train, y_train)
slr.coef_
slr.intercept_
y_pred = slr.predict(X_test)
mean_absolute_error(y_test, y_pred)
|
code
|
122257913/cell_28
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from sklearn import linear_model
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
train = train.dropna()
slr = linear_model.LinearRegression()
X_train = np.array(train.iloc[:, :-1].values)
y_train = np.array(train.iloc[:, 1].values)
X_test = np.array(test.iloc[:, :-1].values)
y_test = np.array(test.iloc[:, 1].values)
slr.fit(X_train, y_train)
slr.coef_
slr.intercept_
y_pred = slr.predict(X_test)
accuracy = slr.score(X_test, y_test)
print(accuracy)
|
code
|
122257913/cell_8
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
test.info()
|
code
|
122257913/cell_15
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
import matplotlib.pyplot as plt
train = train.dropna()
plt.boxplot('x', data=train)
|
code
|
122257913/cell_16
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
import matplotlib.pyplot as plt
train = train.dropna()
plt.boxplot(train['y'])
|
code
|
122257913/cell_22
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from sklearn import linear_model
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.sample(5)
train = train.dropna()
slr = linear_model.LinearRegression()
X_train = np.array(train.iloc[:, :-1].values)
y_train = np.array(train.iloc[:, 1].values)
X_test = np.array(test.iloc[:, :-1].values)
y_test = np.array(test.iloc[:, 1].values)
slr.fit(X_train, y_train)
slr.coef_
|
code
|
122257913/cell_10
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv')
test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv')
(train.shape, test.shape)
train.tail()
|
code
|
32065703/cell_21
|
[
"image_output_1.png"
] |
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_df = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_df = metadata_df.fillna(0)
metadata_df_wt_abs = metadata_df[metadata_df['abstract'] != 0]
metadata_df_wt_abs.shape
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in metadata_df_wt_abs['abstract']:
temp = word_tokenize(word.lower())
for txt in temp:
if txt not in stop_words:
key_words.append(txt)
def transformations(sentences):
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in sentences.split():
temp = word_tokenize(word.lower())
for txt in temp:
txt = lemmatizer.lemmatize(txt)
if txt not in stop_words:
key_words.append(txt)
return key_words
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
Label_df = pd.DataFrame(columns=['Task_text'], data=['What has been published about medical care?', ' What has been published concerning surge capacity and nursing homes?', 'What has been published concerning efforts to inform allocation of scarce resources?', 'What do we know about personal protective equipment?', 'What has been published concerning alternative methods to advise on disease management?', 'What has been published concerning processes of care?', 'What do we know about the clinical characterization and management of the virus?', 'Resources to support skilled nursing facilities and long term care facilities.', 'Mobilization of surge medical staff to address shortages in overwhelmed communities Age-adjusted mortality data for Acute Respiratory Distress Syndrome (ARDS) with/without other organ failure – particularly for viral etiologies', 'Extracorporeal membrane oxygenation (ECMO) outcomes data of COVID-19 patients Outcomes data for COVID-19 after mechanical ventilation adjusted for age.', 'Knowledge of the frequency, manifestations, and course of extrapulmonary manifestations of COVID-19, including, but not limited to, possible cardiomyopathy and cardiac arrest.', 'Application of regulatory standards (e.g., EUA, CLIA) and ability to adapt care to crisis standards of care level.', 'Approaches for encouraging and facilitating the production of elastomeric respirators, which can save thousands of N95 masks. Best telemedicine practices, barriers and faciitators, and specific actions to remove/expand them within and across state boundaries. Guidance on the simple things people can do at home to take care of sick people and manage disease. Oral medications that might potentially work.', 'Use of AI in real-time health care delivery to evaluate interventions, risk factors, and outcomes in a way that could not be done manually. Best practices and critical challenges and innovative solutions and technologies in hospital flow and organization, workforce protection, workforce allocation, community-based support resources, payment, and supply chain management to enhance capacity, efficiency, and outcomes. Efforts to define the natural history of disease to inform clinical care, public health interventions, infection prevention control, transmission, and clinical trials Efforts to develop a core clinical outcome set to maximize usability of data across a range of trials Efforts to determine adjunctive and supportive interventions that can improve the clinical outcomes of infected patients (e.g. steroids, high flow oxygen)'])
Label_df['Bag_of_words'] = Label_df['Task_text'].apply(lambda x: transformations(x))
Label_df
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_with_pid = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_with_pid.drop_duplicates(['abstract'], inplace=True)
metadata_with_pid.dropna(subset=['abstract'], inplace=True)
metadata_with_pid.drop(columns=['WHO #Covidence', 'journal', 'authors', 'full_text_file', 'license'])
metadata_with_pid.shape
for pid in range(metadata_with_pid.shape[0]):
try:
if metadata_with_pid.loc[pid, 'sha'] != None:
metadata_with_pid.loc[pid, 'paper_id'] = metadata_with_pid.loc[pid, 'sha']
elif metadata_with_pid.loc[pid, 'pmcid'] != None:
metadata_with_pid.loc[pid, 'paper_id'] = metadata_with_pid.loc[pid, 'pmcid']
except:
metadata_with_pid.loc[pid, 'paper_id'] = ''
metadata_with_pid
metadata_with_pid.dropna(subset=['sha', 'pmcid'], how='all')
metadata_with_pid[:200]
dict_ = {'paper_id': [], 'doi': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'journal': [], 'abstract_summary': []}
for idx, entry in enumerate(all_json):
try:
content = FileReader(entry)
except Exception as e:
continue
meta_data = metadata_with_pid.loc[metadata_with_pid['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['abstract'].append(content.abstract)
dict_['paper_id'].append(content.paper_id)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = metadata_with_pid.loc[metadata_with_pid['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append(get_breaks('. '.join(authors), 40))
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
dict_['doi'].append(meta_data['doi'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'doi', 'abstract', 'body_text', 'authors', 'title', 'journal', 'abstract_summary'])
df_covid = pd.read_csv('/kaggle/input/cosine-df/cosine_df.csv', index_col=0)
sort_by_q1 = df_covid.sort_values('Q1cosine_similarity', ascending=False)
sort_by_q1.loc[:, ['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'abstract_summary', 'Q1cosine_similarity']].head(n=10)
|
code
|
32065703/cell_13
|
[
"text_plain_output_1.png"
] |
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_df = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_df = metadata_df.fillna(0)
metadata_df_wt_abs = metadata_df[metadata_df['abstract'] != 0]
metadata_df_wt_abs.shape
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in metadata_df_wt_abs['abstract']:
temp = word_tokenize(word.lower())
for txt in temp:
if txt not in stop_words:
key_words.append(txt)
def transformations(sentences):
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in sentences.split():
temp = word_tokenize(word.lower())
for txt in temp:
txt = lemmatizer.lemmatize(txt)
if txt not in stop_words:
key_words.append(txt)
return key_words
Label_df = pd.DataFrame(columns=['Task_text'], data=['What has been published about medical care?', ' What has been published concerning surge capacity and nursing homes?', 'What has been published concerning efforts to inform allocation of scarce resources?', 'What do we know about personal protective equipment?', 'What has been published concerning alternative methods to advise on disease management?', 'What has been published concerning processes of care?', 'What do we know about the clinical characterization and management of the virus?', 'Resources to support skilled nursing facilities and long term care facilities.', 'Mobilization of surge medical staff to address shortages in overwhelmed communities Age-adjusted mortality data for Acute Respiratory Distress Syndrome (ARDS) with/without other organ failure – particularly for viral etiologies', 'Extracorporeal membrane oxygenation (ECMO) outcomes data of COVID-19 patients Outcomes data for COVID-19 after mechanical ventilation adjusted for age.', 'Knowledge of the frequency, manifestations, and course of extrapulmonary manifestations of COVID-19, including, but not limited to, possible cardiomyopathy and cardiac arrest.', 'Application of regulatory standards (e.g., EUA, CLIA) and ability to adapt care to crisis standards of care level.', 'Approaches for encouraging and facilitating the production of elastomeric respirators, which can save thousands of N95 masks. Best telemedicine practices, barriers and faciitators, and specific actions to remove/expand them within and across state boundaries. Guidance on the simple things people can do at home to take care of sick people and manage disease. Oral medications that might potentially work.', 'Use of AI in real-time health care delivery to evaluate interventions, risk factors, and outcomes in a way that could not be done manually. Best practices and critical challenges and innovative solutions and technologies in hospital flow and organization, workforce protection, workforce allocation, community-based support resources, payment, and supply chain management to enhance capacity, efficiency, and outcomes. Efforts to define the natural history of disease to inform clinical care, public health interventions, infection prevention control, transmission, and clinical trials Efforts to develop a core clinical outcome set to maximize usability of data across a range of trials Efforts to determine adjunctive and supportive interventions that can improve the clinical outcomes of infected patients (e.g. steroids, high flow oxygen)'])
Label_df['Bag_of_words'] = Label_df['Task_text'].apply(lambda x: transformations(x))
Label_df
|
code
|
32065703/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_df = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_df = metadata_df.fillna(0)
metadate_wto_abs = metadata_df[metadata_df['abstract'] == 0]
metadate_wto_abs.shape
|
code
|
32065703/cell_23
|
[
"text_plain_output_1.png"
] |
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_df = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_df = metadata_df.fillna(0)
metadata_df_wt_abs = metadata_df[metadata_df['abstract'] != 0]
metadata_df_wt_abs.shape
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in metadata_df_wt_abs['abstract']:
temp = word_tokenize(word.lower())
for txt in temp:
if txt not in stop_words:
key_words.append(txt)
def transformations(sentences):
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english') + [i for i in string.punctuation] + ['may', 'also', 'used'])
key_words = []
for word in sentences.split():
temp = word_tokenize(word.lower())
for txt in temp:
txt = lemmatizer.lemmatize(txt)
if txt not in stop_words:
key_words.append(txt)
return key_words
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
Label_df = pd.DataFrame(columns=['Task_text'], data=['What has been published about medical care?', ' What has been published concerning surge capacity and nursing homes?', 'What has been published concerning efforts to inform allocation of scarce resources?', 'What do we know about personal protective equipment?', 'What has been published concerning alternative methods to advise on disease management?', 'What has been published concerning processes of care?', 'What do we know about the clinical characterization and management of the virus?', 'Resources to support skilled nursing facilities and long term care facilities.', 'Mobilization of surge medical staff to address shortages in overwhelmed communities Age-adjusted mortality data for Acute Respiratory Distress Syndrome (ARDS) with/without other organ failure – particularly for viral etiologies', 'Extracorporeal membrane oxygenation (ECMO) outcomes data of COVID-19 patients Outcomes data for COVID-19 after mechanical ventilation adjusted for age.', 'Knowledge of the frequency, manifestations, and course of extrapulmonary manifestations of COVID-19, including, but not limited to, possible cardiomyopathy and cardiac arrest.', 'Application of regulatory standards (e.g., EUA, CLIA) and ability to adapt care to crisis standards of care level.', 'Approaches for encouraging and facilitating the production of elastomeric respirators, which can save thousands of N95 masks. Best telemedicine practices, barriers and faciitators, and specific actions to remove/expand them within and across state boundaries. Guidance on the simple things people can do at home to take care of sick people and manage disease. Oral medications that might potentially work.', 'Use of AI in real-time health care delivery to evaluate interventions, risk factors, and outcomes in a way that could not be done manually. Best practices and critical challenges and innovative solutions and technologies in hospital flow and organization, workforce protection, workforce allocation, community-based support resources, payment, and supply chain management to enhance capacity, efficiency, and outcomes. Efforts to define the natural history of disease to inform clinical care, public health interventions, infection prevention control, transmission, and clinical trials Efforts to develop a core clinical outcome set to maximize usability of data across a range of trials Efforts to determine adjunctive and supportive interventions that can improve the clinical outcomes of infected patients (e.g. steroids, high flow oxygen)'])
Label_df['Bag_of_words'] = Label_df['Task_text'].apply(lambda x: transformations(x))
Label_df
root_path = '/kaggle/input/CORD-19-research-challenge/'
metadata_path = root_path + 'metadata.csv'
metadata_with_pid = pd.read_csv(metadata_path, dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str})
metadata_with_pid.drop_duplicates(['abstract'], inplace=True)
metadata_with_pid.dropna(subset=['abstract'], inplace=True)
metadata_with_pid.drop(columns=['WHO #Covidence', 'journal', 'authors', 'full_text_file', 'license'])
metadata_with_pid.shape
for pid in range(metadata_with_pid.shape[0]):
try:
if metadata_with_pid.loc[pid, 'sha'] != None:
metadata_with_pid.loc[pid, 'paper_id'] = metadata_with_pid.loc[pid, 'sha']
elif metadata_with_pid.loc[pid, 'pmcid'] != None:
metadata_with_pid.loc[pid, 'paper_id'] = metadata_with_pid.loc[pid, 'pmcid']
except:
metadata_with_pid.loc[pid, 'paper_id'] = ''
metadata_with_pid
metadata_with_pid.dropna(subset=['sha', 'pmcid'], how='all')
metadata_with_pid[:200]
dict_ = {'paper_id': [], 'doi': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'journal': [], 'abstract_summary': []}
for idx, entry in enumerate(all_json):
try:
content = FileReader(entry)
except Exception as e:
continue
meta_data = metadata_with_pid.loc[metadata_with_pid['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['abstract'].append(content.abstract)
dict_['paper_id'].append(content.paper_id)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = metadata_with_pid.loc[metadata_with_pid['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append(get_breaks('. '.join(authors), 40))
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
dict_['doi'].append(meta_data['doi'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'doi', 'abstract', 'body_text', 'authors', 'title', 'journal', 'abstract_summary'])
df_covid = pd.read_csv('/kaggle/input/cosine-df/cosine_df.csv', index_col=0)
sort_by_q1 = df_covid.sort_values('Q1cosine_similarity', ascending=False)
sort_by_q2 = df_covid.sort_values('Q2cosine_similarity', ascending=False)
sort_by_q3 = df_covid.sort_values('Q3cosine_similarity', ascending=False)
sort_by_q3.loc[:, ['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'abstract_summary', 'Q3cosine_similarity']].head(n=10)
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.