path stringlengths 8 399 | content_id stringlengths 40 40 | detected_licenses list | license_type stringclasses 2 values | repo_name stringlengths 6 109 | repo_url stringlengths 25 128 | star_events_count int64 0 52.9k | fork_events_count int64 0 7.07k | gha_license_id stringclasses 9 values | gha_event_created_at timestamp[us] | gha_updated_at timestamp[us] | gha_language stringclasses 28 values | language stringclasses 1 value | is_generated bool 1 class | is_vendor bool 1 class | conversion_extension stringclasses 17 values | size int64 317 10.5M | script stringlengths 245 9.7M | script_size int64 245 9.7M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
/notebooks/KG - Storages.ipynb | c7d45998146cf96c74f5e55421b80d18263158ff | [
"Apache-2.0"
] | permissive | naranil/nexus-python-sdk | https://github.com/naranil/nexus-python-sdk | 1 | 0 | Apache-2.0 | 2019-08-06T08:56:12 | 2019-08-02T18:08:01 | null | Jupyter Notebook | false | false | .py | 9,950 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### 协作型过滤(Collaborative Filtering)
# >*从一大群人中进行搜索, 找出与我们口味相近的人。* 时下,有很多网站都采用这样活着那样的协作型过滤的算法。主要涉及到电影、音乐、书籍、交友、购物等。
# ---
# ### 搜集偏好(Collecting Prefrences)
# 一个影评者对几部电影的评分情况的字典
critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
critics['Lisa Rose']['Lady in the Water']
critics['Toby']['Snakes on Plane'] = 4.5
critics['Toby']
# ---
# ### 寻找相近的用户(Finding Similar Users)
# - 欧几里得距离
# - 皮尔逊相关度
# +
# 欧几里得距离
from math import sqrt
def sim_distance(prefs, person1, person2):
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item] = 1
if len(si) == 0:
return 0
# 计算所有差值的平方和
sum_of_squares = sum([pow(prefs[person1][item] - prefs[person2][item], 2)
for item in prefs[person1] if item in prefs[person2]])
return 1/(1+sum_of_squares)
# -
sim_distance(critics, 'Lisa Rose', 'Gene Seymour')
# 皮尔逊系数
def sim_pearson(prefs, p1, p2):
si = {}
for item in prefs[p1]:
if item in prefs[p2]:
si[item] = 1
if len(si) == 0:
return 0
n = len(si)
sum1 = sum([prefs[p1][it] for it in si])
sum2 = sum([prefs[p2][it] for it in si])
sum1Sq=sum([pow(prefs[p1][it],2) for it in si])
sum2Sq=sum([pow(prefs[p2][it],2) for it in si])
# 求平方和
pSum = sum([prefs[p1][it]*prefs[p2][it] for it in si])
# 计算皮尔逊相关系数
num = pSum - (sum1*sum2/n)
den=sqrt((sum1Sq-pow(sum1,2)/n)*(sum2Sq-pow(sum2,2)/n))
if den == 0:
return 0
r = num/den
return r
sim_pearson(critics, 'Lisa Rose', 'Gene Seymour')
# ---
# ### 为评论者打分(Ranking the Critics)
def top_mathches(prefs, person, n=5, similarity=sim_pearson):
scores=[(similarity(prefs, person, other), other) for other in prefs if other != person]
scores.sort()
scores.reverse()
return scores[0:n]
top_mathches(critics, 'Toby', n=3)
# ---
# ### 推荐物品
# 利用所有他人的评价值加权平均, 为某人提供建议
def get_recommendations(prefs, person, similarity=sim_pearson):
totals={}
simSums={}
for other in prefs:
# 不要和自己做比较
if other==person:
continue
sim=similarity(prefs, person, other)
# 忽略评价值为零或者小于零的情况
if sim<=0:
continue
for item in prefs[other]:
# 只对自己还未曾看过的影片进行评价
if item not in prefs[person] or prefs[person][item]==0:
# 相似度*评价值
totals.setdefault(item, 0)
totals[item] += prefs[other][item]*sim
# 相似度之和
simSums.setdefault(item,0)
simSums[item] += sim
# 建立一个归一化的列表
rankings=[(total/simSums[item],item) for item,total in totals.items()]
# 返回经过排序的列表
rankings.sort()
rankings.reverse()
return rankings
get_recommendations(critics, 'Toby')
# ---
# ### 匹配商品
#
# 首先将字典转换,将商品作为键
def transformPerfs(prefs):
result = {}
for person in prefs:
for item in prefs[person]:
result.setdefault(item,{})
result[item][person] = prefs[person][item]
return result
movies = transformPerfs(critics)
# ---
# ### 构建一个基于del.icio.us的链接推荐系统
import pydelicious
out_rate)) #prevents any one neuron from becoming too weighted
model.add(Dense(4, input_dim = 8, kernel_initializer='normal', activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(1, activation ='sigmoid'))
#compile the model
adam = Adam(lr = learn_rate) #learning rate as a variable
model.compile(loss = 'binary_crossentropy', optimizer = adam, metrics = ['accuracy'])
return model
#create the model
#use the best epochs and batch size found above
model = KerasClassifier(build_fn = create_model, epochs = 50, batch_size = 20, verbose = 0) #Using the best epochs and batch size
#define the grid search parameters
#plays with the learning and dropout rate
learn_rate = [0.001, 0.01, 0.1]
dropout_rate = [0.0, 0.1, 0.2]
#make a dictionary of the gri seach parameters
param_grid = dict(learn_rate=learn_rate, dropout_rate=dropout_rate)
#build and fit the grid search
grid = GridSearchCV(estimator = model, param_grid = param_grid, cv = KFold(random_state=seed), verbose = 10)
grid_results = grid.fit(X_standardized, Y)
#summarize the results
print("Best:{0}, using {1}".format(grid_results.best_score_,grid_results.best_params_))
mean = grid_results.cv_results_['mean_test_score']
stds = grid_results.cv_results_['std_test_score']
params = grid_results.cv_results_['params']
for mean, stdev, param in zip(mean, stds, params):
print('{0} ({1}) with: {2}'.format(mean, stdev, param))
# +
#weight Initializing, activations, and neurons
#Now we know the best learn_rate and dropout_rates so make changes to that
# +
#activation and initialization
#Change around the parameters we want to use just a little bit dropout rate and learning rate
seed = 6
np.random.seed(seed)
#start defining the model
def create_model(activation, init):
#create model
model = Sequential()
model.add(Dense(8, input_dim = 8, kernel_initializer=init, activation=activation))
model.add(Dense(4, input_dim = 8, kernel_initializer=init, activation=activation))
model.add(Dense(1, activation ='sigmoid'))
#compile the model
adam = Adam(lr = 0.001) #learning rate as a variable
model.compile(loss = 'binary_crossentropy', optimizer = adam, metrics = ['accuracy'])
return model
#create the model
#use the best epochs and batch size found above
model = KerasClassifier(build_fn = create_model, epochs = 100, batch_size = 20, verbose = 0) #Using the best epochs and batch size
#define the grid search parameters
#plays with the activation and init
activation = ['softmax', 'relu', 'tanh', 'linear']
init = ['uniform', 'normal', ' zero' ]
#make a dictionary of the gri seach parameters
param_grid = dict(activation = activation, init = init)
#build and fit the grid search
grid = GridSearchCV(estimator = model, param_grid = param_grid, cv = KFold(random_state=seed), verbose = 10)
grid_results = grid.fit(X_standardized, Y)
#summarize the results
print("Best:{0}, using {1}".format(grid_results.best_score_,grid_results.best_params_))
mean = grid_results.cv_results_['mean_test_score']
stds = grid_results.cv_results_['std_test_score']
params = grid_results.cv_results_['params']
for mean, stdev, param in zip(mean, stds, params):
print('{0} ({1}) with: {2}'.format(mean, stdev, param))
# +
#Number of neurons
# +
#Number of neurons in each layer
#Change around the parameters we want to use just a little bit dropout rate and learning rate
seed = 6
np.random.seed(seed)
#start defining the model
def create_model(neuron1, neuron2):
#create model
model = Sequential()
model.add(Dense(neuron1, input_dim = 8, kernel_initializer='uniform', activation='linear'))
model.add(Dense(neuron2, input_dim = neuron1, kernel_initializer='uniform', activation='linear')) #input dim must match previous layer
model.add(Dense(1, activation ='sigmoid'))
#compile the model
adam = Adam(lr = 0.001) #learning rate as a variable
model.compile(loss = 'binary_crossentropy', optimizer = adam, metrics = ['accuracy'])
return model
#create the model
#use the best epochs and batch size found above
model = KerasClassifier(build_fn = create_model, epochs = 100, batch_size = 20, verbose = 0) #Using the best epochs and batch size
#define the grid search parameters
#plays with the activation and init
neuron1 = [4, 8, 16, 24]
neuron2 = [2, 4, 8, 16]
#make a dictionary of the gri seach parameters
param_grid = dict(neuron1 = neuron1, neuron2 = neuron2)
#build and fit the grid search
#refit parameter added retrains the models with the new best parameters
grid = GridSearchCV(estimator = model, param_grid = param_grid, cv = KFold(random_state=seed), refit = True, verbose = 10)
grid_results = grid.fit(X_standardized, Y)
#summarize the results
print("Best:{0}, using {1}".format(grid_results.best_score_,grid_results.best_params_))
mean = grid_results.cv_results_['mean_test_score']
stds = grid_results.cv_results_['std_test_score']
params = grid_results.cv_results_['params']
for mean, stdev, param in zip(mean, stds, params):
print('{0} ({1}) with: {2}'.format(mean, stdev, param))
# -
# generate predictins with optimal hyperparameters
y_pred = grid.predict(X_standardized)
print(y_pred.shape)
print(y_pred[:5])
# +
#Generate a classification report
from sklearn.metrics import classification_report, accuracy_score
print(accuracy_score(Y, y_pred))
print(classification_report(Y, y_pred))
# -
#example datapoint
example = df.iloc[1]
print(example)
#make a prediction using our optimized deep neural network
prediction = grid.predict(X_standardized[1].reshape(1, -1)) #turns from column vector to row vector
print(prediction)
# +
#Could do the whole grid at the same time
#maybe 8 and 4 neurons could be better for a combination of other parameters and ultimately be the best
#more parameters being optimized at once would add time exponentially
#start with very course grid then narrow in i.e. neurons (1, 10, 50, 100) then (15,16,17)
#these parameters will not work for all datasets and the parameters are problem specific
| 10,527 |
/lab3/lab3.ipynb | 7616c9037d0cb2bd4f357774304326b088163325 | [] | no_license | th3rring/comp330 | https://github.com/th3rring/comp330 | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,383 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import time;
# +
# there are 2000 words in the corpus
alpha = np.full (2000, .1)
# there are 100 topics
beta = np.full (100, .1)
# this gets us the probabilty of each word happening in each of the 100 topics
wordsInTopic = np.random.dirichlet (alpha, 100)
# produced [doc, topic, word] gives us the number of times that the given word was
# produced by the given topic in the given doc
produced = np.zeros ((50, 100, 2000))
# -
# generate each doc
for doc in range (0, 50):
#
# get the topic probabilities for this doc
topicsInDoc = np.random.dirichlet (beta)
#
# assign each of the 2000 words in this doc to a topic
wordsToTopic = np.random.multinomial (2000, topicsInDoc)
#
# and generate each of the 2000 words
for topic in range (0, 100):
produced[doc, topic] = np.random.multinomial (wordsToTopic[topic], wordsInTopic[topic])
print(produced[18,np.arange(17,46)].sum())
print(produced.sum())
print(produced[:,17].sum())
print(produced[:,15].sum(0))
print(produced.sum(0).argmax(0))
| 1,371 |
/Machine Learning Basics/6.Classification (Sınıflandırma)/Untitled.ipynb | df2ee16c34fd47e8d5f428092388b6e29f75fa29 | [] | no_license | onurkulat/machine_learning | https://github.com/onurkulat/machine_learning | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 5,433 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sınıflandırma (Classification)
# ## Lojistik Regresyon(Logistic)
# + active=""
# Lojistik regresyon, bir sonucu belirleyen bir veya daha fazla bağımsız değişken bulunan bir veri kümesini analiz etmek için kullanılan istatistiksel bir yöntemdir. Sonuç, ikili bir değişkenle ölçülür (yalnızca iki olası sonuç vardır).
# +
#Kütüphaneler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#veri yükleme
veriler=pd.read_csv("veriler.csv")
x=veriler.iloc[:,1:4].values #bağımsız değişkenler(boy,kilo ve yaş)
y=veriler.iloc[:,-1:].values #bağımlı değişkenler(cinsiyet)
#boy kilo ve yaştan cinsiyet bulmaya çalışan program
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.33,random_state=0)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(x_train)
X_test=sc.transform(x_test)
from sklearn.linear_model import LogisticRegression
logr= LogisticRegression(random_state=0)
logr.fit(X_train,y_train)
y_pred=logr.predict(X_test)
print("tahminler:",y_pred)
print("sonuclar:",y_test)
# -
# ## Confusion Matrix (Karmaşıklık Matrisi)
# + active=""
# Bir karışıklık matrisi, gerçek değerlerin bilinmekte olduğu bir dizi test verisi üzerinde, bir sınıflandırma modelinin performansını tanımlamak için sıklıkla kullanılan bir matristir.
# +
#Kütüphaneler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#veri yükleme
veriler=pd.read_csv("veriler.csv")
x=veriler.iloc[:,1:4].values #bağımsız değişkenler(boy,kilo ve yaş)
y=veriler.iloc[:,-1:].values #bağımlı değişkenler(cinsiyet)
#boy kilo ve yaştan cinsiyet bulmaya çalışan program
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.33,random_state=0)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(x_train)
X_test=sc.transform(x_test)
from sklearn.linear_model import LogisticRegression
logr= LogisticRegression(random_state=0)
logr.fit(X_train,y_train)
y_pred=logr.predict(X_test)
print("tahminler:",y_pred)
print("sonuclar:",y_test)
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(y_test,y_pred)
print("confusion matrix:\n",cm)
# -
| 2,552 |
/homework/Day_099_data_augmentation.ipynb | 3fdf3851756611513dd4e3c52867456ad8916886 | [] | no_license | greamown/2nd-ML100Days | https://github.com/greamown/2nd-ML100Days | 2 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 385,177 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
# %matplotlib inline
# 此函數會幫我們把多張影像畫成一張多宮格圖
def img_combine(img, ncols=8, size=1, path=False):
from math import ceil
import matplotlib.pyplot as plt
import numpy as np
nimg = len(img)
nrows = int(ceil(nimg/ncols))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True, figsize=(ncols*size,nrows*size))
if nrows == 0:
return
elif ncols == 1:
for r, ax in zip(np.arange(nrows), axes):
nth=r
if nth < nimg:
ax.imshow(img[nth], cmap='rainbow', vmin=0, vmax=1)
ax.set_axis_off()
elif nrows == 1:
for c, ax in zip(np.arange(ncols), axes):
nth=c
if nth < nimg:
ax.imshow(img[nth], cmap='rainbow', vmin=0, vmax=1)
ax.set_axis_off()
else:
for r, row in zip(np.arange(nrows), axes):
for c, ax in zip(np.arange(ncols), row):
nth=r*ncols+c
if nth < nimg:
ax.imshow(img[nth], cmap='rainbow', vmin=0, vmax=1)
ax.set_axis_off()
plt.show()
# 讀取 Cifar-10 資料集
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 取前 32 張圖片做視覺化
images = x_train[:32]
img_combine(images)
# 建立 ImageDataGenerator,並指定我們要做資料增強的數值範圍
data_generator = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# 注意!! ImageDataGenerator 是一個 Generator (生成器)! 對 Generator 不熟悉的同學請回到 Day098 做複習。
# 使用 .flow 後,就會對我們的影像進行增強,再 call next 取出 generator 的圖像。(shuffle=False 因為我們希望圖像的順序不要改變,方便觀察。實際訓練時預設是 shuffle=True)
augmented_iamges = next(data_generator.flow(images, shuffle=False))
img_combine(augmented_iamges.astype("int")) # 注意在訓練時神經網路時,圖像資料必須要是 float32,但在做視覺化時要轉為 int 才能順利畫圖。所以為了畫圖才把資料轉為 int
# 因為隨機性的關係,所以一樣的圖像再經過一次 generator 後的結果不一定相同
augmented_iamges = next(data_generator.flow(images, shuffle=False))
img_combine(augmented_iamges.astype("int"))
# ## 作業
# 請使用 ImageDataGenerator 來進行 Cifar-10 資料集的訓練,並觀察不同的圖像增強方法是否會顯著影響訓練結果
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import RMSprop, Adam
import os
# +
batch_size = 128
num_classes = 10
epochs = 10
data_augmentation = True
num_predictions =20
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'
# +
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# +
model = Sequential()
model.add(Conv2D(32,(3,3), padding="same",
input_shape =x_train.shape[1:]))
model.add(Activation("relu"))
model.add(Conv2D(32,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(64,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation("softmax"))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
# +
data_generator.fit(x_train)
model.fit_generator(data_generator.flow(x_train, y_train,
batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
steps_per_epoch=10,
workers=4)
history = model.fit_generator(data_generator.flow(x_train, y_train,
batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
steps_per_epoch=10,
workers=4)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# -
# Use ImageDataGenerator
data_generator = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
rescale=1./255)
# +
num_classes = 10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
data_generator.fit(x_train)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# +
data_generator.fit(x_train)
model.fit_generator(data_generator.flow(x_train, y_train,
batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
steps_per_epoch=10,
workers=4)
history = model.fit_generator(data_generator.flow(x_train, y_train,
batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
steps_per_epoch=10,
workers=4)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# -
| 6,176 |
/SESSION_13-Intro_to_ML/SESSION13b (per Milo) - Data Cleaning part/.ipynb_checkpoints/UntidyToTidy_last-checkpoint.ipynb | 9dbea809f92f2e88c25a5866ef9d83fc550a522d | [] | no_license | maklaskarlos/Cogingwaves-Fuerte-2019 | https://github.com/maklaskarlos/Cogingwaves-Fuerte-2019 | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 27,371 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Untidy dataset
#
# The dataset manipulation we will see are quite complicated, so we will just have a brief look at them, without going in a deep dive.
# lets look at the dataset
import pandas as pd
import numpy as np
df=pd.read_csv('untidy_airquality.csv')
print(df.head())
print(df.columns)
# +
df=df.pivot_table(index=['Month', 'Day'], columns='measurement', values='reading')
#del df.index.name
df=df.reset_index()
df.head()
# +
# Data set esploration deep dive removed
# -
# # Missing values
# Now we will see some methods on how to deal with missing values
#
# ## Dropping NaN
df = pd.read_csv('airquality.csv')
print(df.head())
print(df.info())
# +
# Drop all the rows containing NaN
df_no_na=df.dropna()
print(df_no_na.head())
print(df_no_na.info())
#What is the problem here? Now we have a very small dataset. The humidity featura has a lot of NA values. So we
# drop the humidity column
# -
new_df=df.drop(['Humidity'], axis=1)
new_df.head()
# using subset you can decide to eliminate rows containing NaN on some columns.
# you should use this for eliminate rows where the target value is omissing.
df_clean_target=new_df.dropna(subset=['Temp'])
df_clean_target.info()
# +
# you can decide to mantain the rows where percentage of NaN is low enough
# For example if we decide to maintain only rows with only at least 5 non NaN
#(so only one NaN for row)
your_threshold=5
final_dropped=df_clean_target.dropna(thresh=your_threshold)
final_dropped.info()
# -
# ## Imputing missing values
# In case you mantain some missing values, they should be filled
# Now we will see some of the simplest methods.
# +
# NEVER, NEVER infer the target values in this way.
# What I just said? NEVER!
#Let's use simple imputer
df = final_dropped
import numpy as np
from sklearn.impute import SimpleImputer
#print the head of the original dataset
print(df.head())
# create an imputation method
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
# create a new dataframe with the imputed values
imputed=imp.fit_transform(df)
print('\n\n Transformed dataset with the imputed values')
# print the head of the new dataframe
print(imputed[:5])
# +
#Obviously I can use also the median.
#Try to do it by yourself! (google if necessary)
# NEVER, NEVER infer the target values in this way.
# What I just said? NEVER!
#Let's use simple imputer
df = final_dropped
print(df.head())
# create an imputation method
imp = SimpleImputer(missing_values=np.nan, strategy='median')
# create a new dataframe with the imputed values
imputed=imp.fit_transform(df)
print('\n\n Transformed dataset with the imputed values')
# print the head of the new dataframe
print(imputed[:5])
# -
# ### Advanced
# Do not execute next cell.
# +
# Home works: As said the best way is to infer the missing values using some better algorythms than
# the mean and the median.
# For example IterativeImputer (that will be in the next release of sklearn) use multiregression.
# At the end of this course you will be able to use even more sofisticated methods!
# For this we need last versions of
# %pip install --pre -f https://sklearn-nightly.scdn8.secure.raxcdn.com scikit-learn
import sklearn
import pandas as pd
print(sklearn.__version__)
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
df = pd.read_csv('airquality.csv')
imp = IterativeImputer(max_iter=4, random_state=0)
imputed=imp.fit_transform(df)
imputed[:5]
#df.columns
# -
# # Use a pipeline
# As told the best practice is to use a pipeline.
#
# And automate all the process as much as you can.
#
# +
# Example on how to create a pipeline for the numerical columns
#split the dataset in tran and test
df_train=new_df.sample(frac=0.8,random_state=200) #random state is a seed value
df_test=new_df.drop(df_train.index)
# seed value--> to make sure ramdomnes selection remains the same wheneverwe run this code
# -
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
# simple imputer step
num_si_step = ('si', SimpleImputer(strategy='mean'))
# standard scaler step.... I am presenting here just to see how everything can be combined
num_ss_step = ('ss', StandardScaler())
# combine all the numerical steps
num_steps = [num_si_step, num_ss_step]
# create the pipeline for the numerical steps
num_pipe = Pipeline(num_steps)
# define the numerical columns that you want to process with the pipeline
num_cols=['Ozone', 'Solar.R', 'Temp', 'Wind']
# create the parameter list for the function
num_transformers = [('num', num_pipe, num_cols)]
# create the processing function
ct = ColumnTransformer(transformers=num_transformers)
# impute and transform the numerical columns you defined and store in X_num_transformed
X_num_transformed = ct.fit_transform(df_train)
# X_num_transformed is now a matrix, no more a dataframe
# Now we have to substitute the transformed column into the original dataset
df_train_clean=df_train
df_train_clean[num_cols]=X_num_transformed
df_train_clean.info()
X_num_transformed=ct.transform(df_test)
print(X_num_transformed[:5])
df_test_clean=df_test
df_test_clean[num_cols]=X_num_transformed
df_test_clean.info()
# ### Categorical variables
# +
#Let's create an other toy dataset
df = pd.DataFrame({'Martial art': ['Kung_Fu', 'Box', 'Tai_chi', 'Karate', 'Wing-Chu','Capoeria'],
'Kicks': ['Yes','No' , 'NA', 'Yes', 'NA','Yes'],
'Aerial': ['Yes', 'No','No', 'Few', 'Few', 'Yes']})
df
# +
# Impute missing values with constant
imp = SimpleImputer(missing_values='NA', strategy='constant',
fill_value='MISSING')
imputed=imp.fit_transform(df)
imputed
# -
# imputing missing values with most frequent values
imp = SimpleImputer(missing_values='NA', strategy="most_frequent")
imputed=imp.fit_transform(df)
imputed
# +
# HOME WORK/COMPETITION:
# I will provide you a dataset with missing values both
# numerical and categorical.
# You should create a data cleaning pipeline and a ML algorithm.
# I will write a "certificate" for all the people that are able to do it.
# I will write a special one for the best result.
# PS: all the pipeline should run on a normal PC, not a giant cluster :P
| 6,579 |
/ETL_proj - Copy.ipynb | f823eb4f9a2ebe0e7e797f35f66f8d7905730b09 | [] | no_license | arinmuk/ETL_Challenge | https://github.com/arinmuk/ETL_Challenge | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,547,132 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# ## GroupBy
# +
## Mechanics:
df = pd.DataFrame({'key1' : ['a', 'a', 'b', 'b', 'a'],
'key2' : ['one', 'two', 'one', 'two', 'one'],
'data1' : np.random.randn(5),
'data2' : np.random.randn(5)})
## Group by:
grouped = df['data1'].groupby(df['key1'])
grouped.mean() # Df now indexed by the group key
## What if don't provide column:
df.groupby(df.key1).mean() # Will return mean for all appropriate values (no col returned for categorical / strings)
## Don't need to write df.groupby(df[<col name>]) -> if col in df, just pass col name
df.groupby('key1').mean()
## Iteration in groupby
## pd's groupby supports iteration, will generate sequence of 2 tuples w/ group name and actual group:
for name, group in df.groupby('key1'):
print("Name: ",name)
print("Group:\n", group)
list(df.groupby('key1')) ## Returns a list of tuples (with key and group) --> can transform to dict easily
## Selecting cols or subsets of cols
## What's the diff between following:
df.groupby('key1')['data1']
df['data1'].groupby(df['key1']) # Same, but because did subsetting before and type(df['data1']) is Series ..
# need to specify df in groupby argument.
## Using dicts and series for mappings
## Quick way to map and group in one step:
ex = pd.DataFrame(np.random.randn(5,3), columns = ['a', 'b','c'])
mapping = {'a':'Ar', 'b':'Blue', 'c':'CA'}
ex.groupby(mapping, axis = 1).sum()
# -
# ## Apply: General split-apply-combine
# +
##
o create a new Music Library of albums
#
# ### Here is the information asked of the data:
# #### 1. Give every album in the music library that was created by a given artist
# `select * from music_library WHERE artist_name="The Beatles"`
#
# ### Here is the collection of data
# <img src="images/table3.png" width="650" height="350">
# #### Practice by making the PRIMARY KEY only 1 Column (not 2 or more)
query = "CREATE TABLE IF NOT EXISTS music_library "
query = query + "(year int, artist_name text, album_name text, city text, PRIMARY KEY (artist_name))"
try:
session.execute(query)
except Exception as e:
print(e)
# ### Let's insert the data into the table
# +
query = "INSERT INTO music_library (year, artist_name, album_name, city)"
query = query + " VALUES (%s, %s, %s, %s)"
try:
session.execute(query, (1970, "The Beatles", "Let it Be", "Liverpool"))
except Exception as e:
print(e)
try:
session.execute(query, (1965, "The Beatles", "Rubber Soul", "Oxford"))
except Exception as e:
print(e)
try:
session.execute(query, (1965, "The Who", "My Generation", "London"))
except Exception as e:
print(e)
try:
session.execute(query, (1966, "The Monkees", "The Monkees", "Los Angeles"))
except Exception as e:
print(e)
try:
session.execute(query, (1970, "The Carpenters", "Close To You", "San Diego"))
except Exception as e:
print(e)
# -
# ### Validate the Data Model -- Does it give you two rows?
# +
query = "select * from music_library WHERE artist_name='The Beatles'"
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.artist_name, row.album_name, row.city)
# -
# ### If you used just one column as your PRIMARY KEY, your output should be:
# 1965 The Beatles Rubber Soul Oxford
#
#
# ### That didn't work out as planned! Why is that? Did you create a unique primary key?
# ### Try again - Create a new table with a composite key this time
query = "CREATE TABLE IF NOT EXISTS music_library1 "
query = query + "(artist_name text, album_name text, year int, city text, PRIMARY KEY (artist_name, album_name))"
try:
session.execute(query)
except Exception as e:
print(e)
# +
## You can opt to change the sequence of columns to match your composite key. \
## Make sure to match the values in the INSERT statement
query = "INSERT INTO music_library1 (year, artist_name, album_name, city)"
query = query + " VALUES (%s, %s, %s, %s)"
try:
session.execute(query, (1970, "The Beatles", "Let it Be", "Liverpool"))
except Exception as e:
print(e)
try:
session.execute(query, (1965, "The Beatles", "Rubber Soul", "Oxford"))
except Exception as e:
print(e)
try:
session.execute(query, (1965, "The Who", "My Generation", "London"))
except Exception as e:
print(e)
try:
session.execute(query, (1966, "The Monkees", "The Monkees", "Los Angeles"))
except Exception as e:
print(e)
try:
session.execute(query, (1970, "The Carpenters", "Close To You", "San Diego"))
except Exception as e:
print(e)
# -
# ### Validate the Data Model -- Did it work?
# +
query = "select * from music_library1 WHERE artist_name='The Beatles'"
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.artist_name, row.album_name, row.city)
# -
# ### Your output should be:
# 1970 The Beatles Let it Be Liverpool<br>
# 1965 The Beatles Rubber Soul Oxford
# ### Drop the tables
# +
query = "drop table music_library"
try:
rows = session.execute(query)
except Exception as e:
print(e)
query = "#####"
try:
rows = session.execute(query)
except Exception as e:
print(e)
# -
# ### Close the session and cluster connection
session.shutdown()
cluster.shutdown()
"Total_Gross"]=movie_df["Total_Gross"].replace({'\$': '',',': ''}, regex=True)
#movie_df["imdbVotes"]=movie_df["imdbVotes"].replace(',', '')
movie_df["Total_Gross"].astype(float)
#movie_df["Rating"]=movie_df["Rating"].astype(float)
#df[col] = df[col].replace('$', '') '\$': '',
#df[col] = df[col].replace(',', '') # assuming ',' is the thousand's separator in your locale
#df[col] = df[col].replace('%', '')
# -
movie_df.head()
movie_df.to_excel("boxOfficeNumbersaws.xlsx",index=False,header=True)
d Numerical features
# ### 1.4.1 encoding categorical features: clean_categories
# +
vectorizer_cat = CountVectorizer()
vectorizer_cat.fit(X_train['clean_categories'].values) # fit has to happen only on train data
X_train_cc_ohe = vectorizer_cat.transform(X_train['clean_categories'].values)
X_test_cc_ohe = vectorizer_cat.transform(X_test['clean_categories'].values)
print("After vectorizations")
print(X_train_cc_ohe.shape, y_train.shape)
print(X_test_cc_ohe.shape, y_test.shape)
print(vectorizer_cat.get_feature_names())
# -
# ### 1.4.2 encoding categorical features: clean_subcategories
# +
vectorizer_subcat = CountVectorizer()
vectorizer_subcat.fit(X_train['clean_subcategories'].values) # fit has to happen only on train data
X_train_csc_ohe = vectorizer_subcat.transform(X_train['clean_subcategories'].values)
X_test_csc_ohe = vectorizer_subcat.transform(X_test['clean_subcategories'].values)
print("After vectorizations")
print(X_train_csc_ohe.shape, y_train.shape)
print(X_test_csc_ohe.shape, y_test.shape)
print(vectorizer_subcat.get_feature_names())
# -
# ### 1.4.3 encoding categorical features: school_state
# +
vectorizer_school_state = CountVectorizer()
vectorizer_school_state.fit(X_train['school_state'].values)
X_train_state_ohe = vectorizer_school_state.transform(X_train['school_state'].values)
X_test_state_ohe = vectorizer_school_state.transform(X_test['school_state'].values)
print("After vectorizations")
print(X_train_state_ohe.shape, y_train.shape)
print(X_test_state_ohe.shape, y_test.shape)
print(vectorizer_school_state.get_feature_names())
# -
# ### 1.4.4 encoding categorical features: teacher_prefix
# +
vectorizer_prefix = CountVectorizer()
vectorizer_prefix.fit(X_train['teacher_prefix'].values)
X_train_teacher_ohe = vectorizer_prefix.transform(X_train['teacher_prefix'].values)
X_test_teacher_ohe = vectorizer_prefix.transform(X_test['teacher_prefix'].values)
print("After vectorizations")
print(X_train_teacher_ohe.shape, y_train.shape)
print(X_test_teacher_ohe.shape, y_test.shape)
print(vectorizer_prefix.get_feature_names())
# -
# ### 1.4.5 encoding categorical features: project_grade_category
# +
vectorizer_grade = CountVectorizer()
vectorizer_grade.fit(X_train['project_grade_category'].values)
X_train_grade_ohe = vectorizer_grade.transform(X_train['project_grade_category'].values)
X_test_grade_ohe = vectorizer_grade.transform(X_test['project_grade_category'].values)
print("After vectorizations")
print(X_train_grade_ohe.shape, y_train.shape)
print(X_test_grade_ohe.shape, y_test.shape)
print(vectorizer_grade.get_feature_names())
# -
# ### 1.4.6 encoding numerical features: price
# +
from sklearn.preprocessing import Normalizer
normalizer = Normalizer()
normalizer.fit(X_train['price'].values.reshape(1,-1))
X_train_price_norm = normalizer.transform(X_train['price'].values.reshape(1,-1)).reshape(-1,1)
X_test_price_norm = normalizer.transform(X_test['price'].values.reshape(1,-1)).reshape(-1,1)
print("After vectorizations")
print(X_train_price_norm.shape, y_train.shape)
print(X_test_price_norm.shape, y_test.shape)
print(X_train_price_norm)
print(X_test_price_norm)
# -
# ### 1.4.7 encoding numerical features: teacher_number_of_previously_posted_projects
# +
from sklearn.preprocessing import Normalizer
normalizer = Normalizer()
# normalizer.fit(X_train['price'].values)
# this will rise an error Expected 2D array, got 1D array instead:
# array=[105.22 215.96 96.01 ... 368.98 80.53 709.67].
# Reshape your data either using
# array.reshape(-1, 1) if your data has a single feature
# array.reshape(1, -1) if it contains a single sample.
normalizer.fit(X_train['teacher_number_of_previously_posted_projects'].values.reshape(1,-1))
X_train_ppp_norm = normalizer.transform(X_train['teacher_number_of_previously_posted_projects'].values.reshape(1,-1)).reshape(-1,1)
X_test_ppp_norm = normalizer.transform(X_test['teacher_number_of_previously_posted_projects'].values.reshape(1,-1)).reshape(-1,1)
print("After vectorizations")
print(X_train_ppp_norm.shape, y_train.shape)
print(X_test_ppp_norm.shape, y_test.shape)
print(X_train_ppp_norm)
print(X_test_ppp_norm)
# -
# ### 1.4.8 encoding numerical features: quantity
# +
from sklearn.preprocessing import Normalizer
normalizer = Normalizer()
# normalizer.fit(X_train['price'].values)
# this will rise an error Expected 2D array, got 1D array instead:
# array=[105.22 215.96 96.01 ... 368.98 80.53 709.67].
# Reshape your data either using
# array.reshape(-1, 1) if your data has a single feature
# array.reshape(1, -1) if it contains a single sample.
normalizer.fit(X_train['quantity'].values.reshape(1,-1))
X_train_quantity_norm = normalizer.transform(X_train['quantity'].values.reshape(1,-1)).reshape(-1,1)
X_test_quantity_norm = normalizer.transform(X_test['quantity'].values.reshape(1,-1)).reshape(-1,1)
print("After vectorizations")
print(X_train_quantity_norm.shape, y_train.shape)
print(X_test_quantity_norm.shape, y_test.shape)
print(X_train_quantity_norm)
print(X_test_quantity_norm)
# -
# ### 1.4.9 encoding numerical features: sentiment score's of each of the essay
# +
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# import nltk
# nltk.download('vader_lexicon')
sid = SentimentIntensityAnalyzer()
ss_train = []
ss_test = []
for essay in X_train['essay']:
ss_train.append(sid.polarity_scores(essay)['pos'])
for essay in X_test['essay']:
ss_test.append(sid.polarity_scores(essay)['pos'])
# we can use these 4 things as features/attributes (neg, neu, pos, compound)
# neg: 0.0, neu: 0.753, pos: 0.247, compound: 0.93
print(len(ss_train))
print(len(ss_test))
print(ss_train[7])
print(ss_test[7])
ss_train_array = np.array(ss_train)
ss_test_array = np.array(ss_test)
print(ss_train_array.shape)
print(ss_test_array.shape)
# +
from sklearn.preprocessing import Normalizer
normalizer = Normalizer()
normalizer.fit(ss_train_array.reshape(1,-1))
X_train_ss_norm = normalizer.transform(ss_train_array.reshape(1,-1)).reshape(-1,1)
X_test_ss_norm = normalizer.transform(ss_test_array.reshape(1,-1)).reshape(-1,1)
print("After vectorizations")
print(X_train_ss_norm.shape, y_train.shape)
print(X_test_ss_norm.shape, y_test.shape)
print(X_train_ss_norm)
print(X_test_ss_norm)
# -
# ### 1.4.10 encoding numerical features: number of words in the title
# +
title_word_count_train = []
title_word_count_test = []
for i in X_train['project_title']:
title_word_count_train.append(len(i.split()))
for i in X_test['project_title']:
title_word_count_test.append(len(i.split()))
print(len(title_word_count_train))
print(len(title_word_count_test))
print(title_word_count_train[7])
print(title_word_count_train[7])
title_word_count_train_array = np.array(title_word_count_train)
title_word_count_test_array = np.array(title_word_count_test)
print(title_word_count_train_array.shape)
print(title_word_count_test_array.shape)
# +
from sklearn.preprocessing import Normalizer
normalizer = Normalizer()
normalizer.fit(title_word_count_train_array.reshape(1,-1))
X_train_twc_norm = normalizer.transform(title_word_count_train_array.reshape(1,-1)).reshape(-1,1)
X_test_twc_norm = normalizer.transform(title_word_count_test_array.reshape(1,-1)).reshape(-1,1)
print("After vectorizations")
print(X_train_twc_norm.shape, y_train.shape)
print(X_test_twc_norm.shape, y_test.shape)
print(X_train_twc_norm)
print(X_test_twc_norm)
# -
# ### 1.4.11 encoding numerical features: number of words in the combine essays
# +
essay_word_count_train = []
essay_word_count_test = []
for i in X_train['essay']:
essay_word_count_train.append(len(i.split()))
for i in X_test['essay']:
essay_word_count_test.append(len(i.split()))
print(len(essay_word_count_train))
print(len(essay_word_count_test))
print(essay_word_count_train[7])
print(essay_word_count_test[7])
essay_word_count_train_array = np.array(essay_word_count_train)
essay_word_count_test_array = np.array(essay_word_count_test)
print(essay_word_count_train_array.shape)
print(essay_word_count_test_array.shape)
# +
from sklearn.preprocessing import Normalizer
normalizer = Normalizer()
normalizer.fit(essay_word_count_train_array.reshape(1,-1))
X_train_ewc_norm = normalizer.transform(essay_word_count_train_array.reshape(1,-1)).reshape(-1,1)
X_test_ewc_norm = normalizer.transform(essay_word_count_test_array.reshape(1,-1)).reshape(-1,1)
print("After vectorizations")
print(X_train_ewc_norm.shape, y_train.shape)
print(X_test_ewc_norm.shape, y_test.shape)
print(X_train_ewc_norm)
print(X_test_ewc_norm)
# -
# ### Merging all the categorical and numerical features with variations of text features
# +
# merge two sparse matrices: https://stackoverflow.com/a/19710648/4084039
from scipy.sparse import hstack
X_train_matrix = hstack((X_train_cc_ohe, X_train_csc_ohe, X_train_grade_ohe, X_train_state_ohe,
X_train_teacher_ohe, X_train_price_norm, X_train_ppp_norm,
X_train_ewc_norm, X_train_twc_norm, X_train_ss_norm, X_train_quantity_norm,
train_vec)).tocsr()
X_test_matrix = hstack((X_test_cc_ohe, X_test_csc_ohe, X_test_grade_ohe, X_test_state_ohe,
X_test_teacher_ohe, X_test_price_norm, X_test_ppp_norm,
X_test_ewc_norm, X_test_twc_norm, X_test_ss_norm, X_test_quantity_norm,
test_vec)).tocsr()
print("Final Data matrix")
print(X_train_matrix.shape, y_train.shape)
print(X_test_matrix.shape, y_test.shape)
# -
# ### Finding Best Hyper parameters using K-Fold CV
# +
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
parameters = {'max_depth' : [1, 2, 3, 4, 5], 'n_estimators' : [100, 200, 250, 300]}
xgbdt = xgb.XGBClassifier()
clf = GridSearchCV(xgbdt, parameters, cv=5, scoring='roc_auc', return_train_score=True)
clf.fit(X_train_matrix, y_train)
results = pd.DataFrame.from_dict(clf.cv_results_)
results = results.sort_values(['param_max_depth'])
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_max_depth']
M = results['param_n_estimators']
# +
trace1 = go.Scatter3d(x = K, y = M, z = train_auc, name = 'Train')
trace2 = go.Scatter3d(x = K, y = M, z = cv_auc, name = 'Cross Validation')
data = [trace1, trace2]
layout = go.Layout(scene = dict(xaxis = dict(title = 'max_depth'), yaxis = dict(title = 'n_estimators'),
zaxis = dict(title = 'AUC'),))
fig = go.Figure(data = data, layout = layout)
offline.iplot(fig, filename='3d-scatter-colorscale')
# +
import seaborn as sns; sns.set()
max_scores1 = pd.DataFrame(clf.cv_results_).groupby(['param_n_estimators', 'param_max_depth']).max().unstack()[['mean_test_score', 'mean_train_score']]
plt.figure(figsize=(10,6))
plt.title('Train Set')
sns.heatmap(max_scores1.mean_train_score, annot = True, fmt='.4g')
plt.show()
plt.figure(figsize=(10,6))
plt.title('CV Set')
sns.heatmap(max_scores1.mean_test_score, annot = True, fmt='.4g')
plt.show()
# -
best_max_depth = clf.best_params_['max_depth']
best_n_estimators = clf.best_params_['n_estimators']
print('best value for max depth is {} and best value for n_estimators is {}'.format(best_max_depth,best_n_estimators))
def batch_predict(clf, data):
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_data_pred = []
tr_loop = data.shape[0] - data.shape[0]%1000
# consider you X_tr shape is 49041, then your tr_loop will be 49041 - 49041%1000 = 49000
# in this for loop we will iterate unti the last 1000 multiplier
for i in range(0, tr_loop, 1000):
y_data_pred.extend(clf.predict_proba(data[i:i+1000])[:,1])
# we will be predicting for the last data points
if data.shape[0]%1000 !=0:
y_data_pred.extend(clf.predict_proba(data[tr_loop:])[:,1])
return y_data_pred
# +
# we are writing our own function for predict, with defined thresould
# we will pick a threshold that will give the least fpr
def find_best_threshold(threshould, fpr, tpr):
t = threshould[np.argmax(tpr*(1-fpr))]
# (tpr*(1-fpr)) will be maximum if your fpr is very low and tpr is very high
print("the maximum value of tpr*(1-fpr)", max(tpr*(1-fpr)), "for threshold", np.round(t,3))
return t
def predict_with_best_t(proba, threshould):
predictions = []
for i in proba:
if i>=threshould:
predictions.append(1)
else:
predictions.append(0)
return predictions
# -
# ## Applying GBDT with obtained best Hyper parameters
# +
xgbdt = xgb.XGBClassifier(max_depth = best_max_depth, n_estimators = best_n_estimators)
xgbdt.fit(X_train_matrix, y_train)
# roc_auc_score(y_true, y_score) the 2nd parameter should be probability estimates of the positive class
# not the predicted outputs
y_train_pred = batch_predict(xgbdt, X_train_matrix)
y_test_pred = batch_predict(xgbdt, X_test_matrix)
train_fpr, train_tpr, tr_thresholds = roc_curve(y_train, y_train_pred)
test_fpr, test_tpr, te_thresholds = roc_curve(y_test, y_test_pred)
plt.plot(train_fpr, train_tpr, label="train AUC = "+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label="test AUC = "+str(auc(test_fpr, test_tpr)))
plt.legend()
plt.xlabel("TPR")
plt.ylabel("FPR")
plt.title("ERROR PLOTS")
plt.grid()
plt.show()
# +
from sklearn.metrics import confusion_matrix
best_t = find_best_threshold(tr_thresholds, train_fpr, train_tpr)
train = confusion_matrix(y_train, predict_with_best_t(y_train_pred, best_t))
test = confusion_matrix(y_test, predict_with_best_t(y_test_pred, best_t))
#https://stackoverflow.com/a/35572247dt
df_cm = pd.DataFrame(train, index = [i for i in range(2)], columns = [i for i in range(2)])
plt.figure(figsize = (10,7))
plt.title('Train confusion Matrix')
sns.heatmap(train, annot=True, fmt="d")
plt.show()
df_cm = pd.DataFrame(test, index = [i for i in range(2)], columns = [i for i in range(2)])
plt.figure(figsize = (10,7))
plt.title('Test confusion Matrix')
sns.heatmap(test, annot=True, fmt="d")
plt.show()
# -
# ## Conclusion
# +
## http://zetcode.com/python/prettytable/
from prettytable import PrettyTable
table = PrettyTable()
table.field_names = ["Vectorizer", "Model", "Hyper Parameters", "AUC"]
table.add_row(['AVG W2V', 'GBDT', ('Max Depth = '+str(best_max_depth) + ', n_estimators = '+str(best_n_estimators)), 0.7279])
print(table)
# -
# # Summary
#
# * Concatinated titles and essays.
# * Selected top 2k words based on their IDF values from the concatinated text.
# * Used Truncated SVD to reduce dimensions to 100 which explains more than 95% varience.
# * Calculated AVG W2V with the dictionary made from top 2000 words with 100 dimensions.
# * The obtained Vectorizer gave AUC 0.7279 with Max Depth = 1, Min Samples = 300
| 21,092 |
/tarea1/.ipynb_checkpoints/Untitled-checkpoint.ipynb | 70c21123539269056d9f9ef97a572bb412208d46 | [] | no_license | emenesesayc/INFO257_2020 | https://github.com/emenesesayc/INFO257_2020 | 0 | 0 | null | 2020-05-08T14:24:01 | 2020-05-04T18:23:09 | null | Jupyter Notebook | false | false | .py | 105,871 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Dependencies
# + _kg_hide-input=true
import json, warnings, shutil, glob
from jigsaw_utility_scripts import *
from scripts_step_lr_schedulers import *
from transformers import TFXLMRobertaModel, XLMRobertaConfig
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses, layers
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
# -
# ## TPU configuration
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
strategy, tpu = set_up_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
AUTO = tf.data.experimental.AUTOTUNE
# -
# # Load data
# + _kg_hide-input=true
database_base_path = '/kaggle/input/jigsaw-data-split-roberta-192-ratio-2-clean-tail6/'
k_fold = pd.read_csv(database_base_path + '5-fold.csv')
valid_df = pd.read_csv("/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv",
usecols=['comment_text', 'toxic', 'lang'])
print('Train samples: %d' % len(k_fold))
display(k_fold.head())
print('Validation samples: %d' % len(valid_df))
display(valid_df.head())
base_data_path = 'fold_1/'
fold_n = 1
# Unzip files
# !tar -xf /kaggle/input/jigsaw-data-split-roberta-192-ratio-2-clean-tail6/fold_1.tar.gz
# -
# # Model parameters
# + _kg_hide-input=true
base_path = '/kaggle/input/jigsaw-transformers/XLM-RoBERTa/'
config = {
"MAX_LEN": 192,
"BATCH_SIZE": 128,
"EPOCHS": 4,
"LEARNING_RATE": 1e-5,
"ES_PATIENCE": None,
"base_model_path": base_path + 'tf-xlm-roberta-large-tf_model.h5',
"config_path": base_path + 'xlm-roberta-large-config.json'
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
config
# -
# ## Learning rate schedule
# + _kg_hide-input=true
lr_min = 1e-7
lr_start = 0
lr_max = config['LEARNING_RATE']
step_size = len(k_fold[k_fold[f'fold_{fold_n}'] == 'train']) // config['BATCH_SIZE']
total_steps = config['EPOCHS'] * step_size
warmup_steps = step_size * 1
num_cycles = 1
rng = [i for i in range(0, total_steps, config['BATCH_SIZE'])]
y = [cosine_with_hard_restarts_schedule_with_warmup(tf.cast(x, tf.float32), total_steps=total_steps,
warmup_steps=warmup_steps, lr_start=lr_start,
lr_max=lr_max, lr_min=lr_min, num_cycles=num_cycles) for x in rng]
sns.set(style="whitegrid")
fig, ax = plt.subplots(figsize=(20, 6))
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
# -
# # Model
# +
module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config)
last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
cls_token = last_hidden_state[:, 0, :]
output = layers.Dense(1, activation='sigmoid', name='output')(cls_token)
model = Model(inputs=[input_ids, attention_mask], outputs=output)
return model
# -
# # Train
# + _kg_hide-input=true
# Load data
x_train = np.load(base_data_path + 'x_train.npy')
y_train = np.load(base_data_path + 'y_train_int.npy').reshape(x_train.shape[1], 1).astype(np.float32)
x_valid_ml = np.load(database_base_path + 'x_valid.npy')
y_valid_ml = np.load(database_base_path + 'y_valid.npy').reshape(x_valid_ml.shape[1], 1).astype(np.float32)
#################### ADD TAIL ####################
x_train_tail = np.load(base_data_path + 'x_train_tail.npy')
y_train_tail = np.load(base_data_path + 'y_train_int_tail.npy').reshape(x_train_tail.shape[1], 1).astype(np.float32)
x_train = np.hstack([x_train, x_train_tail])
y_train = np.vstack([y_train, y_train_tail])
step_size = x_train.shape[1] // config['BATCH_SIZE']
valid_step_size = x_valid_ml.shape[1] // config['BATCH_SIZE']
# Build TF datasets
train_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO, seed=SEED))
valid_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid_ml, y_valid_ml, config['BATCH_SIZE'], AUTO, repeated=True, seed=SEED))
train_data_iter = iter(train_dist_ds)
valid_data_iter = iter(valid_dist_ds)
# Step functions
@tf.function
def train_step(data_iter):
def train_step_fn(x, y):
with tf.GradientTape() as tape:
probabilities = model(x, training=True)
loss = loss_fn(y, probabilities)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_auc.update_state(y, probabilities)
train_loss.update_state(loss)
for _ in tf.range(step_size):
strategy.experimental_run_v2(train_step_fn, next(data_iter))
@tf.function
def valid_step(data_iter):
def valid_step_fn(x, y):
probabilities = model(x, training=False)
loss = loss_fn(y, probabilities)
valid_auc.update_state(y, probabilities)
valid_loss.update_state(loss)
for _ in tf.range(valid_step_size):
strategy.experimental_run_v2(valid_step_fn, next(data_iter))
# Train model
with strategy.scope():
model = model_fn(config['MAX_LEN'])
lr = lambda: cosine_with_hard_restarts_schedule_with_warmup(tf.cast(optimizer.iterations, tf.float32),
total_steps=total_steps, warmup_steps=warmup_steps,
lr_start=lr_start, lr_max=lr_max, lr_min=lr_min,
num_cycles=num_cycles)
optimizer = optimizers.Adam(learning_rate=lr)
loss_fn = losses.binary_crossentropy
train_auc = metrics.AUC()
valid_auc = metrics.AUC()
train_loss = metrics.Sum()
valid_loss = metrics.Sum()
metrics_dict = {'loss': train_loss, 'auc': train_auc,
'val_loss': valid_loss, 'val_auc': valid_auc}
history = custom_fit(model, metrics_dict, train_step, valid_step, train_data_iter, valid_data_iter,
step_size, valid_step_size, config['BATCH_SIZE'], config['EPOCHS'],
config['ES_PATIENCE'], save_last=False)
# model.save_weights('model.h5')
# Make predictions
# x_train = np.load(base_data_path + 'x_train.npy')
# x_valid = np.load(base_data_path + 'x_valid.npy')
x_valid_ml_eval = np.load(database_base_path + 'x_valid.npy')
# train_preds = model.predict(get_test_dataset(x_train, config['BATCH_SIZE'], AUTO))
# valid_preds = model.predict(get_test_dataset(x_valid, config['BATCH_SIZE'], AUTO))
valid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO))
# k_fold.loc[k_fold[f'fold_{fold_n}'] == 'train', f'pred_{fold_n}'] = np.round(train_preds)
# k_fold.loc[k_fold[f'fold_{fold_n}'] == 'validation', f'pred_{fold_n}'] = np.round(valid_preds)
valid_df[f'pred_{fold_n}'] = valid_ml_preds
# Fine-tune on validation set
#################### ADD TAIL ####################
x_valid_ml_tail = np.hstack([x_valid_ml, np.load(database_base_path + 'x_valid_tail.npy')])
y_valid_ml_tail = np.vstack([y_valid_ml, y_valid_ml])
valid_step_size_tail = x_valid_ml_tail.shape[1] // config['BATCH_SIZE']
# Build TF datasets
train_ml_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_valid_ml_tail, y_valid_ml_tail,
config['BATCH_SIZE'], AUTO, seed=SEED))
train_ml_data_iter = iter(train_ml_dist_ds)
history_ml = custom_fit(model, metrr in top_node.children:
top_node = top_node.children[letter]
else:
# Prefix not in tree, go no further
return words
# Get words under prefix
if top_node == self.head:
queue = [node for key, node in top_node.children.iteritems()]
else:
queue = [top_node]
# Perform a breadth first search under the prefix
# A cool effect of using BFS as opposed to DFS is that BFS will return
# a list of words ordered by increasing length
while queue:
current_node = queue.pop()
if current_node.data != None:
# Isn't it nice to not have to go back up the tree?
words.append(current_node.data)
queue = [node for key,node in current_node.children.items()] + queue
return words
def getData(self, word):
""" This returns the 'data' of the node identified by the given word """
if not self.has_word(word):
raise ValueError('{} not found in trie'.format(word))
# Race to the bottom, get data
current_node = self.head
for letter in word:
current_node = current_node[letter]
return current_node.data
if __name__ == '__main__':
""" Example use """
trie = Trie()
words = 'hello goodbye help gerald gold tea ted team to too tom stan standard money'
for word in words.split():
trie.add(word)
print( "'goodbye' in trie: ", )
print(trie.start_with_prefix('g'))
print(trie.start_with_prefix('to'))
# -
# %timeit trie.has_word('goodbye')
s = set('hello goodbye help gerald gold tea ted team to too tom stan standard money'.split(' '))
# %timeit 'goodbye' in s
e-input=true
x_test = np.load(database_base_path + 'x_test.npy')
test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE'], AUTO))
# + _kg_hide-input=true
submission = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv')
submission['toxic'] = test_preds
submission.to_csv('submission.csv', index=False)
display(submission.describe())
display(submission.head(10))
| 10,443 |
/Notas 04B - Simulated Annealing.ipynb | 31247f818cb90f87dd878aa2a0fba0d9dfce0217 | [] | no_license | leotalorac/20152.ai.uis | https://github.com/leotalorac/20152.ai.uis | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 222,156 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Refs:
#
# - Original paper: Kirkpatrick, S., Gelatt, C.D., and Vecchi, M.P., “Optimization by Simulated
# Annealing,” Science, Volume 220, Number 4598, 13 May 1983, pp. 671-
# 680
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
w_size = 100
n_cities = 20
cities = (np.random.random((n_cities,2))*w_size).astype(int)
cities = np.array([[ 2,88],[87,84],[84,6],[99,37], [60, 87], [ 8, 83], [43, 33], [45, 66], [28, 94], [ 3, 56], [14, 92], [88, 10], [33, 12], [33, 85], [69, 60], [67, 58], [80, 19], [81, 30], [69, 21], [78, 35]])
plt.scatter(cities[:,0], cities[:,1])
def TSP_cost(cities, solution):
sol_cities = cities[solution]
return np.sum(np.sqrt(np.sum((sol_cities - np.roll(sol_cities,-1, axis=0))**2, axis=1)))
sol = np.random.permutation(len(cities))
print sol
# ## Creamos una función que para obtener un _vecino_ de cualquier solución
def TSP_neighbour(solution):
i1 = np.random.randint(len(solution))
i2 = i1+1 if i1<len(solution)-1 else 0
r = np.copy(solution)
r[i1]=solution[i2]
r[i2]=solution[i1]
return r
# ## Usamos las mismas funciones para el TSP de las notas anteriores
# +
def TSP_initialize_population(n_individuals, n_cities):
r = []
for i in xrange(n_individuals):
r.append(np.random.permutation(n_cities))
return np.array(r)
def TSP_cost(cities, solution):
sol_cities = cities[solution]
return np.sum(np.sqrt(np.sum((sol_cities - np.roll(sol_cities,-1, axis=0))**2, axis=1)))
def TSP_plot_solution(cities, solution):
plt.scatter(cities[:,0], cities[:,1])
plt.plot(cities[solution,0].tolist()+[cities[solution[0],0]], cities[solution,1].tolist()+[cities[solution[0],1]])
plt.scatter(cities[solution[0],0], cities[solution[0],1], marker="x", s=60, c="red", lw="5")
plt.title("cost %.3f"%(TSP_cost(cities, solution)))
def TSP_plot_result(best, bests, means, stds):
fig = plt.figure(figsize=(12,4))
fig.add_subplot(121)
plot_evolution(bests, means, stds)
fig.add_subplot(122)
TSP_plot_solution(cities, best)
# -
# ## Hemos un bucle bajando la temperatura
# +
# %%writefile code/sa.py
import numpy as np
import matplotlib.pyplot as plt
def plot_evolution(bests, means, stds):
plt.plot(means, label="means")
plt.plot(bests, label="bests")
plt.fill_between(range(len(means)), means-stds, means+stds, color="yellow", alpha=0.2)
plt.legend()
def run_sa(n_individuals, n_cooling_steps, init_population_function, cost_function, generate_neighbor_function):
pop = init_population_function(n_individuals)
mean_costs = []
std_costs = []
best_costs = []
best_sols = []
min_cost = np.inf
min_sol = None
for T in np.linspace(1,0,n_cooling_steps):
costs = []
for i in range(len(pop)):
sol = pop[i]
cost_sol = cost_function(sol)
# generate a neighbour
nbr = generate_neighbor_function(sol)
cost_nbr = cost_function(nbr)
# if the neighbour is better
if cost_nbr<cost_sol or np.random.random()<T:
sol = nbr
cost_sol = cost_nbr
pop[i] = sol
costs.append(cost_sol)
if cost_sol < min_cost:
min_sol = np.copy(pop[i])
min_cost = cost_function(pop[i])
best_costs.append(np.min(costs))
mean_costs.append(np.mean(costs))
std_costs.append(np.std(costs))
mean_costs = np.array(mean_costs)
std_costs = np.array(std_costs)
best_costs = np.array(best_costs)
return min_sol, best_costs, mean_costs, std_costs
# -
# ## Con pocos individuos y pocos pasos de enfrieamiento
# +
# %run code/sa.py
n_individuals = 10
n_cooling_steps = 50
bestsol, bests, means, stds = run_sa(n_individuals = n_individuals,
n_cooling_steps = n_cooling_steps,
init_population_function = lambda x: TSP_initialize_population(x, n_cities),
cost_function = lambda x: TSP_cost(cities, x),
generate_neighbor_function = TSP_neighbour)
TSP_plot_result(bestsol, bests, means, stds)
# -
# ## Con pocos individuos y muchos pasos de enfrieamiento
# +
# %run code/sa.py
n_individuals = 20
n_cooling_steps = 10000
bestsol, bests, means, stds = run_sa(n_individuals = n_individuals,
n_cooling_steps = n_cooling_steps,
init_population_function = lambda x: TSP_initialize_population(x, n_cities),
cost_function = lambda x: TSP_cost(cities, x),
generate_neighbor_function = TSP_neighbour)
TSP_plot_result(bestsol, bests, means, stds)
# -
# ## Con muchos individuos y no tantos pasos de enfrieamiento
# (puede demorar uno o dos minutos)
# +
# %run code/sa.py
n_individuals = 100
n_cooling_steps = 5000
bestsol, bests, means, stds = run_sa(n_individuals = n_individuals,
n_cooling_steps = n_cooling_steps,
init_population_function = lambda x: TSP_initialize_population(x, n_cities),
cost_function = lambda x: TSP_cost(cities, x),
generate_neighbor_function = TSP_neighbour)
TSP_plot_result(bestsol, bests, means, stds)
# -
| 5,883 |
/jupyter/Untitled.ipynb | ed852c43c3dd1185c94ddd19c9078e7839776d6b | [] | no_license | justinphan3110/spert | https://github.com/justinphan3110/spert | 0 | 1 | null | 2020-07-29T08:27:29 | 2020-07-28T08:49:21 | null | Jupyter Notebook | false | false | .py | 48,103 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Estructuras de control
# for(i=0; i<100; i=i+1) { ... }
for i in range(0, 100):
print(i)
# ## MINI RETO: Imprimir la lista de todos los número impares en el rango de 0 a 99
for i in range(1,100,2):
print(i) # Solución Pythonesca
for i in range(0, 100):
print(i, i*i, i**3)
b = 1
while b <= 10:
print("Estoy en el ciclo while", b)
b += 1 # b = b + 1, b++
n = "10m"
if n es un entero:
n = int(n)
else:
# omitir el valor o marcarlo como valor inválido
print(n, "no es un entero")
n = "10m"
n = int(n)
type(n)
n = "10"
if n.isdigit():
n = int(n)
print(n, "es entero")
else:
print(n, "no es un entero")
n = "10"
n.isdigit()
# ## __RETO:__ Realizar un código en Python para solicitar un número entero al usuario, si el número es un entero, entonces imprimir la lista de los número enteros desde el 1 hasta el número N indicado por el usuario. Si N no es entero entonces solicitar nuevamente el valor al usuario y así hasta infinito.
enize_person) + 1
artwork_start, artwork_end = find_head_idx(tokens, artwork.split(" "))
person_start, person_end = find_head_idx(tokens, person.split(" "))
item = dict()
item.setdefault('tokens', tokens)
entities = list()
entities.append({'type': 'artwork', 'start': artwork_start, 'end': artwork_end})
entities.append({'type': 'person', 'start': person_start, 'end': person_end})
item.setdefault('entities', entities)
item.setdefault('relations', [{'type': 'artwork', 'head': 1, 'tail': 0}])
data.append(item)
find_head_idx(tokens, person.split(" "))
print(tokens)
print(person.split(" "))
train_data = list()
dev_data = list()
limit = len(data) * 0.85
print(limit)
for d in data:
if len(train_data) <= limit:
train_data.append(d)
else:
dev_data.append(d)
dev_data
import json
with open('data/data_train.json', 'w', encoding='utf-8') as f:
json.dump(train_data, f)
with open('data/data_dev.json', 'w', encoding='utf-8') as f:
json.dump(dev_data, f)
# +
types = dict()
types.setdefault("entities",{"artwork": {"short": "artwork", "verbose": "artwork"},
"person": {"short":"person", "verbose": "person"}})
types.setdefault("relations", {"artwork": {"short": "artwork",
"verbose": "artwork",
"symmetric": False}})
# -
with open('data/data_types.json', 'w', encoding='utf-8') as f:
json.dump(types, f)
| 2,783 |
/jupyter_notebooks/.ipynb_checkpoints/predict_problem-checkpoint.ipynb | 597160b8ed6f4db3b17187f9248d310a99b8d8a7 | [
"MIT"
] | permissive | jirifilip/pyIDS | https://github.com/jirifilip/pyIDS | 22 | 13 | MIT | 2020-12-06T14:13:39 | 2020-12-06T14:11:24 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 38,378 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import pyarc
from pyids import IDS
from pyids.data_structures.ids_classifier import mine_CARs
import scipy
from pyids.model_selection import mode
import numpy as np
from sklearn.metrics import f1_score, accuracy_score, roc_auc_score
# +
from pyarc.qcba.transformation import QCBATransformation
from pyarc import CBA
from pyarc.data_structures import TransactionDB
from pyarc.qcba.data_structures import QuantitativeDataFrame
import pandas as pd
from pyarc.qcba.data_structures import (
IntervalReader,
Interval,
QuantitativeDataFrame,
QuantitativeCAR
)
from pyarc.qcba.classifier import QuantitativeClassifier
from sklearn.metrics import accuracy_score
interval_reader = IntervalReader()
interval_reader.closed_bracket = "", "NULL"
interval_reader.open_bracket = "NULL", ""
interval_reader.infinity_symbol = "inf", "inf"
interval_reader.members_separator = "_to_"
interval_reader.compile_reader()
QuantitativeCAR.interval_reader = interval_reader
# -
data_train_disc = pd.read_csv("C:/code/python/machine_learning/assoc_rules/train/iris0.csv")
data_test_disc = pd.read_csv("C:/code/python/machine_learning/assoc_rules/train/iris1.csv")
quant_dataframe_train_disc = QuantitativeDataFrame(data_train_disc)
quant_dataframe_test_disc = QuantitativeDataFrame(data_test_disc)
cars = mine_CARs(data_train_disc, 10, sample=True)
ids = IDS()
ids.fit(class_association_rules=cars, quant_dataframe=quant_dataframe_train_disc, debug=False)
ids.clf.rules
print("Přednost IDS (train):", ids.score(quant_dataframe_train_disc))
print("Přednost IDS (test):", ids.score(quant_dataframe_test_disc))
ids.predict(quant_dataframe_train_disc)
# +
#iris test dataset obsahuje 1/3 instancí každé třídy, tedy výsledek test "Přednost IDS (test): 0.3333333333333333" odpovídá situaci, kdy se použil pouze modus.
# -
#dataframe
quant_dataframe_test_disc.dataframe.iloc[40:100, :]
#pro instanci 6 je predikována třída "setosa" ačkoliv žádné z pravidel ve výpisu výše predikující setosa nelze použít (jestli jsem se dobře díval)
# lze ale použít pravidlo predikující Iris-versicolor (id 476)
# +
# výpis níže ukazuje, že všechny instance jsou skutečně predikovány jako setosa -stejně jako výchozí třída.
# +
#PREDICT FUNCTION FOR DEBUGGING
quant_dataframe = quant_dataframe_test_disc
if type(quant_dataframe) != QuantitativeDataFrame:
print("Type of quant_dataframe must be QuantitativeDataFrame")
Y = quant_dataframe.dataframe.iloc[:,-1]
y_pred_dict = dict()
for rule in ids.clf.rules:
conf = rule.car.confidence
sup = rule.car.support
y_pred_per_rule = rule.predict(quant_dataframe)
rule_f1_score = scipy.stats.hmean([conf, sup])
y_pred_dict.update({rule_f1_score: y_pred_per_rule})
# rules in rows, instances in columns
y_pred_array = np.array(list(y_pred_dict.values()))
y_pred = []
minority_classes = []
if y_pred_dict:
for i in range(len(Y)):
all_NA = np.all(y_pred_array[:,i] == IDSRule.DUMMY_LABEL)
if all_NA:
minority_classes.append(Y[i])
# if the ruleset covers all instances
default_class = Y[0]
if minority_classes:
default_class = mode(minority_classes)
for i in range(len(Y)):
y_pred_array_datacase = y_pred_array[:,i]
non_na_mask = y_pred_array_datacase != IDSRule.DUMMY_LABEL
y_pred_array_datacase_non_na = y_pred_array_datacase[non_na_mask]
if len(y_pred_array_datacase_non_na) > 0:
y_pred.append(y_pred_array_datacase_non_na[0])
else:
y_pred.append(default_class)
else:
y_pred = len(Y) * [mode(Y)]
print(y_pred)
print(default_class)
| 3,929 |
/점프 투 파이썬/2.자료형/.ipynb_checkpoints/7.불-checkpoint.ipynb | 497acc897f0706800d8bf601054f066f64fef219 | [] | no_license | cheolhyunlee/self-studying | https://github.com/cheolhyunlee/self-studying | 2 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 2,989 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 불
# - 불(bool) 자료형이란 참(True)과 거짓(False)을 나타내는 자료형
# - True 또는 False 첫 문자를 항상 대문자로 사용해야 한다.
print(1 == 1)
print(2>1)
print(2<1)
# ### 자료형의 참과 거짓
# 값 참 or 거짓
# "python" 참
# "" 거짓
# [1, 2, 3] 참
# [] 거짓
# () 거짓
# {} 거짓
# 1 참
# 0 거짓
# None 거짓
# - 문자열, 리스트, 튜플, 딕셔너리 등의 값이 비어 있으면(" ", [ ], ( ), { }) 거짓이 된다.
# 당연히 비어있지 않으면 참이 된다.
a = [1, 2, 3, 4]
while a : # while 조건문:
print(a.pop()) # 수행할 문장
#a .pop()이라는 함수는 리스트 a의 마지막 요소를 끄집어내는 함수
#a가 빈 리스트([ ])가 되어 거짓이 된다.
#따라서 while문에서 조건이 거짓이 되므로 중지된다.
# +
if []:
print("참")
else:
print("거짓")
# 만약 [1, 2, 3]이 참이면 "참"이라는 문자열을 출력하고 그렇지 않으면 "거짓"이라는 문자열을 출력하라.
if [1, 2, 3]:
print("참")
else:
print("거짓")
# +
# 불 연산
a = bool("python")
print(a)
b = bool('')
print(b)
x = bool(())
print(x)
c = bool(0)
print(c)
d = bool(6)
print(d)
ax'}),
pd.DataFrame(x_train.groupby(key)[target].std()).rename(columns={target:f'te_{key}_{target}_std'}),
pd.DataFrame(x_train.groupby(key)[target].mean()).rename(columns={target:f'te_{key}_{target}_mean'}),
pd.DataFrame(x_train.groupby(key)[target].median()).rename(columns={target:f'te_{key}_{target}_median'}),
], axis = 1)
x_train = pd.merge(x_train, target_encoding, on=key, how='left')
x_valid = pd.merge(x_valid, target_encoding, on=key, how='left')
x_test = pd.merge(x_test, target_encoding, on=key, how='left')
x_train[f'diff_doi_cites_{key}'] = x_train['doi_cites'] - x_train[f'te_{key}_{target}_mean']
x_train[f'rate_doi_cites_{key}'] = x_train['doi_cites'] / x_train[f'te_{key}_{target}_mean']
x_valid[f'diff_doi_cites_{key}'] = x_valid['doi_cites'] - x_valid[f'te_{key}_{target}_mean']
x_valid[f'rate_doi_cites_{key}'] = x_valid['doi_cites'] / x_valid[f'te_{key}_{target}_mean']
x_test[f'diff_doi_cites_{key}'] = x_test['doi_cites'] - x_test[f'te_{key}_{target}_mean']
x_test[f'rate_doi_cites_{key}'] = x_test['doi_cites'] / x_test[f'te_{key}_{target}_mean']
return x_train, x_valid, x_test
# +
SEED = 777
# --------------------------------------
# パラメータ定義
# --------------------------------------
lgb_params = {
'objective': 'root_mean_squared_error',
'boosting_type': 'gbdt',
'n_estimators': 50000,
'learning_rate': 0.1,
}
result_y = []
result_proba = []
for fold_no in range(NFOLDS):
test_fold_no = fold_no
valid_fold_no = fold_no + 1
if valid_fold_no == NFOLDS:
valid_fold_no = 0
# train
train = df_train.copy()
y_train = train[~train['fold_no'].isin([test_fold_no, valid_fold_no])]['cites'].values
y_valid = train[train['fold_no'] == valid_fold_no]['cites'].values
y_test = train[train['fold_no'] == test_fold_no]['cites'].values
train['doi_cites'] = train['doi_cites'].astype('int')
train = train.drop(
['id', 'authors', 'title', 'comments',
'journal-ref', 'doi', 'report-no', 'categories', 'license',
'abstract', 'versions', 'update_date_x', 'authors_parsed', 'pub_publisher',
'update_date_y', 'first_created_date', 'last_created_date', 'doi_id', 'submitter', 'author_first']
, axis=1
)
x_train = train[~train['fold_no'].isin([test_fold_no, valid_fold_no])]
x_valid = train[train['fold_no'] == valid_fold_no]
x_test = train[train['fold_no'] == test_fold_no]
# target encoding
target = 'cites'
key = 'update_ym'
x_train, x_valid, x_test = make_statics_table(target, key, x_train, x_valid, x_test)
# drop
x_train = x_train.drop([target], axis=1)
x_valid = x_valid.drop([target], axis=1)
x_test = x_test.drop([target], axis=1)
# LightGBM
model = lgb.LGBMRegressor(**lgb_params)
model.fit(x_train, y_train,
eval_set=(x_valid, y_valid),
eval_metric='mse',
verbose=100,
early_stopping_rounds=50,
categorical_feature=['submitter_label', 'doi_id_label', 'author_first_label', 'pub_publisher_label', 'license_label']
)
fold_result = model.predict(x_test)
result_y.extend(y_test)
result_proba.extend(fold_result)
rmsle = mean_squared_error(y_test, fold_result, squared=False)
print(f"fold {fold_no} lgb score: {rmsle}")
rmsle = mean_squared_error(result_y, result_proba, squared=False)
print("+-" * 40)
print(f"score: {rmsle}")
# -
0.5060531287008133
x_train.shape
feature = x_train.copy()
df_feature = pd.DataFrame(model.booster_.feature_importance(importance_type='gain'), index=feature.columns, columns=['importance']).sort_values('importance', ascending=False)
df_feature.head(50)
df_feature.tail(50)
| 4,985 |
/BACK_ZELD.ipynb | 70fcb21082d0c82cbad092a73d0d4819f6970daf | [] | no_license | skariel/VELREC_T3 | https://github.com/skariel/VELREC_T3 | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .jl | 38,098 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Julia 0.4.1
# language: julia
# name: julia-0.4
# ---
include("jl/init_default.jl");
kd = get_kd(pos);
include("jl/all.jl")
pop_realization()
rho_to_1st_order_vel_pot!(rho);
v = zeros(Float32, (3,PARTICLE_NUM_SMALL_REALIZATION))
v_original = Any[vx,vy,vz]
for dim in 1:3
get_1st_order_comoving_vel!(c1, 1.0, dim, pos, rho)
v[dim,:] = real(c1)
println(get_slope_std_smoothed(kd, v[dim,:], v_original[dim], pos, 10000.0, 100000))
end
pop_realization()
to_delta!(rho);
corr = get_correlation(rho);
pop_realization()
move_periodic_all_dims!(pos, v[1,:], v[2,:], v[3,:], -5.0);
println(mean_std_dx_vs_pushed_pos(pos))
to_cic!(pos, m, rho)
to_delta!(rho)
_corr = get_correlation(rho);
pop_realization()
move_periodic_all_dims!(pos, v[1,:], v[2,:], v[3,:], -6.0);
println(mean_std_dx_vs_pushed_pos(pos))
to_cic!(pos, m, rho)
to_delta!(rho)
_corr2 = get_correlation(rho);
hold(false)
plot(xl/1000, corr)
hold(true)
plot(xl/1000, _corr*1.32, "red")
plot(xl/1000, _corr2*1.35, "blue")
hold(false)
xlim(50,180)
ylim(0.0027,0.009)
grid(true)
xlabel("Mpc/h")
ylabel("\\xi")
| 1,299 |
/module_5/archives/Module_5.ipynb | 00a78c85489fc29ef5666676ed42be60e5df7706 | [] | no_license | R-ichardBall/mit_bda2 | https://github.com/R-ichardBall/mit_bda2 | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 167,385 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question 1
# +
Altitude = input("")
Altitude = int(Altitude)
if Altitude <=1000:
print("Safe to Land")
elif (Altitude >1000 and Altitude <=5000):
print("Bring down to 1000")
else:
print("trun around")
# -
# # Question 2
# +
lower = 1
upper = 200
for num in range(lower, upper + 1):
order = len(str(num))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** order
temp //= 10
if num == sum:
print(num)
shed /Neoproterozoic_Palaeozoic_Shapes_20170508.gpml'
input_COBs = '/Users/Andrew/Documents/PhD/Models/Rodinia_Models/Palaeozoic_Bridge/Artificial_COBs.gpml'
topology_features = ['/Users/Andrew/Documents/PhD/Models/Rodinia_Models/Palaeozoic_Bridge/Convergence_20170424.gpml',
'/Users/Andrew/Documents/PhD/Models/Rodinia_Models/Palaeozoic_Bridge/Divergence_20170424.gpml',
'/Users/Andrew/Documents/PhD/Models/Rodinia_Models/Palaeozoic_Bridge/Transfoms_201704124.gpml',
'/Users/Andrew/Documents/PhD/Models/Rodinia_Models/Palaeozoic_Bridge/Topologies_20170424.gpml']
rotation_model = pygplates.RotationModel(input_rotation_model)
shapes = pygplates.FeatureCollection(input_shapes)
COBs = pygplates.FeatureCollection(input_COBs)
# -
#some parameters
num_of_time_steps = 1000
# +
#for time in range(520, num_of_time_steps + 1, 1):
time =750
resolved_topologies = []
shared_boundary_sections = []
all_tesselated_shared_sub_segment = []
continental_arcs = []
reconstructed_COBs = []
pygplates.resolve_topologies(topology_features, rotation_model, resolved_topologies, time, shared_boundary_sections)
pygplates.reconstruct(COBs, rotation_model, reconstructed_COBs, time)
for shared_boundary_section in shared_boundary_sections:
if shared_boundary_section.get_feature().get_feature_type() != pygplates.FeatureType.gpml_subduction_zone:
continue
for shared_sub_segment in shared_boundary_section.get_shared_sub_segments():
tmp = shared_sub_segment.get_resolved_geometry()
tesselated_shared_sub_segment = tmp.to_tessellated(np.radians(.2))
all_tesselated_shared_sub_segment.append(tesselated_shared_sub_segment)
for vertex in tesselated_shared_sub_segment:
#print vertex
for COB in reconstructed_COBs:
#print COB.get_reconstructed_geometry()
distance_radians = pygplates.GeometryOnSphere.distance(vertex, COB.get_reconstructed_geometry())*pygplates.Earth.mean_radius_in_kms
print np.average(distance_radians)
# -
len(tesselated_shared_sub_segment)
n. To compute the average number of calls per week we need only the total number of weeks and total number of calls. To compute the standard deviation we need the actual numbers in bins.
average_no_of_calls = len(user_calls) / float(no_weeks )
calls_per_week = [len(calls) for calls in weekly_bins]
# +
from numpy import std
calls_per_week_standard_deviation = std(calls_per_week)
print 'Users {} call data show {:.2f} calls per week on average with standard deviation of {:.2f}'.format(
uid, average_no_of_calls, calls_per_week_standard_deviation)
# -
# ## EX2 - Bandicoot intro
import bandicoot as bc
# We are now going to load bandicoot formated data for the same user we used before
B = bc.read_csv(uid, '../dateset/bandicootCall/data/records/', '../dataset/bandicootCall/data/antennas.csv')
# ### Now we can compute the number of call (only interactions we have included so far) with just one function call. Notice the groupby parameter.
bc.individual.number_of_interactions(B, groupby='week', interaction='call')
# We could do the same on a daily basis
bc.individual.number_of_interactions(B, groupby='day', interaction='call')
# Or even per month
bc.individual.number_of_interactions(B, groupby='month', interaction='call')
# We can further split the bins in times of day or parts of the week (weekday vs weekend). For instance for weekly indicators
bc.individual.number_of_interactions(B, groupby='week', interaction='call', split_day=True)
# Here we can see as one would expect a much higher number of interactions during the day vs the night
# ### Examples of other interesting indicators
bc.individual.active_days(B)
# Notice that bandicoot defaults to grouping by week if grouping to specified explicitly
bc.individual.response_rate_text(B, split_day=True)
# ## EX3:
# ## EX4:
# ## EX5:
# ## EX6: Loading a full dataset
# We will now load the full Friends and Family Reality commons dataset and compute a metric for all the users. We need to specify a flat records directory, with each file corresponding to a single user. It is crucial that the record file naming convention is being observed i.e. names of the files are the user ids.like in the example below:
# !ls -1 ../dateset/bandicootCall/data/records/ | head -n 6
# Now we are
# +
import glob, os
records_path = '../dateset/bandicootCall/data/records/'
antenna_file = '../dateset/bandicootCall/data/antennas.csv'
interactions = {}
for f in glob.glob(records_path + '*.csv'):
user_id = os.path.basename(f)[:-4]
try:
B = bc.read_csv(user_id, records_path, antenna_file, describe=False)
metrics_dict = bc.individual.number_of_interactions(B)
except Exception as e:
metrics_dict = {'name': user_id, 'error': str(e)}
interactions[user_id] = metrics_dict
# -
interactions
| 5,730 |
/sw.ipynb | 17c341376f979a33d084b616ba0d01d357aa3249 | [
"MIT"
] | permissive | shuklakirti/datascience | https://github.com/shuklakirti/datascience | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,403,493 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
pd.set_option('max_colwidth', 200)
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
f= r'SW_EpisodeIVLocal.txt'
data1 = pd.read_csv(f, delimiter='|')
data1 = data1.drop('Sr', axis=1)
data1.head(10)
f= r'SW_EpisodeVLocal.txt'
data2 = pd.read_csv(f, delimiter='|')
data2 = data2.drop('Sr', axis=1)
data2.head(10)
f= r'SW_EpisodeVILocal.txt'
data3 = pd.read_csv(f, delimiter='|')
data3 = data3.drop('Sr', axis=1)
data3.head(10)
# +
def cleancorpus(data):
stop_words = set(stopwords.words('english'))
# add words that aren't in the NLTK stopwords list
new_stopwords = ['thats','weve','hes','theres','ive','im','will','can','cant','dont','youve','us'
,'youre','youll','theyre','whats','didnt']
new_stopwords_list = stop_words.union(new_stopwords)
data["dialogue"] = data["dialogue"].str.replace('[^\w\s]','')
data.dialogue = data.dialogue.apply(lambda x: x.lower())
data.dialogue = data.dialogue.str.replace('\d+', '')
data.dialogue = data.dialogue.str.split().apply\
(lambda x: ' '.join(item for item in x if item not in new_stopwords_list))
data.dialogue = data.dialogue.str.replace(' ', '')
#print(data1.dialogue.head(120))
#print(data.dialogue)
return data
def get_top_n_words(corpus):
word_list = []
dialogue_list = pd.Series(corpus['dialogue'])
dialogue_list_temp = dialogue_list.tolist()
for stat in dialogue_list_temp:
word_list.extend(stat.split())
word_series = pd.Series(word_list)
return word_series.value_counts()
def bigrams_calculate(bigramfile):
i = bigramfile.dialogue \
.str.split(expand=True) \
.stack()
j = i + ' ' + i.shift(-1)
bigrams = j.dropna().reset_index(drop=True)
return bigrams
def most_frequent_bigrams(freq_bigrams):
# bigrams_list = pd.Series(freq_bigrams)
count_bigrams = freq_bigrams.value_counts()[0:20,]
return count_bigrams
# -
data1=cleancorpus(data1)
data1
#Dialogues Episodes
len(data1.dialogue)
#Characters Episodes
len(data1.character.unique())
#Most Frequent words
get_top_n_words(data1)
top_char = data1.character.value_counts()[0:20,]
top_char
mfb = most_frequent_bigrams(bigrams_calculate(data1))
mfb
a = mfb.to_frame()
a = a.reset_index()
a
# +
def ggplt_bg(df_ep_bigram):
plt.style.use('ggplot')
a = df_ep_bigram.to_frame()
a = a.reset_index()
ax = df_ep_bigram.plot(kind='bar', title="Most Frequent Bigrams(Top 20)", figsize=(15, 10),
legend=True, fontsize=12)
ax.set_xlabel("BIGRAM", fontsize=12)
ax.set_ylabel("FREQUENCY", fontsize=12)
return plt.show()
ggplt_bg(mfb)
# +
def ggplt(df_ep):
plt.style.use('ggplot')
a = df_ep.to_frame()
a = a.reset_index()
ax = a[['index', 'character']].plot(kind='bar', title="Dialogues by a character(Top 20)", figsize=(15, 10),
legend=True, fontsize=12)
ax.set_xlabel("CHARACTER", fontsize=12)
ax.set_ylabel("DIALOGUE", fontsize=12)
return plt.show()
ggplt(top_char)
# +
import wordcloud
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
data1['dialogue']=data1['dialogue'].astype('str')
dialogue_corpus=' '.join(data1['dialogue'])
dialogue_wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', height=2000, width=4000).generate(dialogue_corpus)
plt.figure(figsize=(16,8))
plt.imshow(dialogue_wordcloud)
plt.axis('off')
plt.show()
# -
data2=cleancorpus(data2)
data2
#Dialogues Episodes
len(data2.dialogue)
#Characters Episodes
len(data2.character.unique())
#Most Frequent words
get_top_n_words(data2)
top_char = data2.character.value_counts()[0:20,]
top_char
mfb = most_frequent_bigrams(bigrams_calculate(data2))
mfb
ggplt_bg(mfb)
# +
def ggplt(df_ep):
plt.style.use('ggplot')
a = df_ep.to_frame()
a = a.reset_index()
ax = a[['index', 'character']].plot(kind='bar', title="Dialogues by a character(Top 20)", figsize=(15, 10),
legend=True, fontsize=12)
ax.set_xlabel("CHARACTER", fontsize=12)
ax.set_ylabel("DIALOGUE", fontsize=12)
return plt.show()
ggplt(top_char)
import wordcloud
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
data2['dialogue']=data2['dialogue'].astype('str')
dialogue_corpus=' '.join(data2['dialogue'])
dialogue_wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', height=2000, width=4000).generate(dialogue_corpus)
plt.figure(figsize=(16,8))
plt.imshow(dialogue_wordcloud)
plt.axis('off')
plt.show()
# -
data3=cleancorpus(data3)
data3
#Dialogues Episodes
len(data3.dialogue)
#Characters Episodes
len(data3.character.unique())
#Most Frequent words
get_top_n_words(data3)
top_char = data3.character.value_counts()[0:20,]
top_char
mfb = most_frequent_bigrams(bigrams_calculate(data3))
mfb
ggplt_bg(mfb)
# +
def ggplt(df_ep):
plt.style.use('ggplot')
a = df_ep.to_frame()
a = a.reset_index()
ax = a[['index', 'character']].plot(kind='bar', title="Dialogues by a character(Top 20)", figsize=(15, 10),
legend=True, fontsize=12)
ax.set_xlabel("CHARACTER", fontsize=12)
ax.set_ylabel("DIALOGUE", fontsize=12)
return plt.show()
ggplt(top_char)
import wordcloud
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
data3['dialogue']=data3['dialogue'].astype('str')
dialogue_corpus=' '.join(data3['dialogue'])
dialogue_wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', height=2000, width=4000).generate(dialogue_corpus)
plt.figure(figsize=(16,8))
plt.imshow(dialogue_wordcloud)
plt.axis('off')
plt.show()
# -
frames = [data1, data2, data3]
result = pd.concat(frames)
result
result=cleancorpus(result)
result
#Dialogues Episodes
len(result.dialogue)
#Characters Episodes
len(result.character.unique())
#Most Frequent words
get_top_n_words(result)
top_char = result.character.value_counts()[0:20,]
top_char
mfb = most_frequent_bigrams(bigrams_calculate(result))
mfb
ggplt_bg(mfb)
# +
def ggplt(df_ep):
plt.style.use('ggplot')
a = df_ep.to_frame()
a = a.reset_index()
ax = a[['index', 'character']].plot(kind='bar', title="Dialogues by a character(Top 20)", figsize=(15, 10),
legend=True, fontsize=12)
ax.set_xlabel("CHARACTER", fontsize=12)
ax.set_ylabel("DIALOGUE", fontsize=12)
return plt.show()
ggplt(top_char)
# +
import wordcloud
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
result['dialogue']=result['dialogue'].astype('str')
dialogue_corpus=' '.join(result['dialogue'])
dialogue_wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', height=2000, width=4000).generate(dialogue_corpus)
plt.figure(figsize=(16,8))
plt.imshow(dialogue_wordcloud)
plt.axis('off')
plt.show()
# +
#Sentiment Analysis
import nltk
import itertools
import collections
from collections import defaultdict, Counter
from nltk import word_tokenize
import csv
result['dialogue1'] = result['dialogue'].apply(word_tokenize)
dialogue_list = list(result.dialogue)
dialogue_merged = list(itertools.chain.from_iterable(dialogue_list))
wordList = defaultdict(list)
emotionList = defaultdict(list)
with open('NRC-Emotion-Lexicon-Wordlevel-v0.92.txt', 'r') as f:
reader = csv.reader(f, delimiter='\t')
headerRows = [i for i in range(0, 46)]
for row in headerRows:
next(reader)
for word, emotion, present in reader:
if int(present) == 1:
print(emotion)
wordList[word].append(emotion)
emotionList[emotion].append(word)
def generate_emotion_count(string, wt):
emoCount = Counter()
for token in dialogue_merged:
emoCount += Counter(wordList[token])
return emoCount
wt = list(itertools.chain.from_iterable(dialogue_list))
emotionCounts = [generate_emotion_count(result.dialogue, wt)]
dialogues = result['dialogue']
emotion_df = pd.DataFrame(emotionCounts)
emotion_df = emotion_df.fillna(0)
plt.style.use('ggplot')
axs = emotion_df.plot(kind='bar', title="Dialogues by a character(Top 20)", figsize=(15, 10), legend=True, fontsize=12)
axs.set_xlabel("CHARACTER", fontsize=12)
axs.set_ylabel("DIALOGUE", fontsize=12)
plt.show()
# emotion_df
# -
| 8,610 |
/NYC_Citibike_Challenge.ipynb | af4b580f94dd57c1686795391498f64f4873172b | [] | no_license | su-linterhell/bikesharing | https://github.com/su-linterhell/bikesharing | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 31,732 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: PythonData
# language: python
# name: pythondata
# ---
import pandas as pd
# +
# 1. Create a DataFrame for the 201908-citibike-tripdata data.
df = pd.read_csv('/Users/replate-blueberry/Desktop/Data Analysis/bikeshariing/201908-citibike-tripdata.csv')
df
# -
# 2. Check the datatypes of your columns.
df.dtypes
# 3. Convert the 'tripduration' column to datetime datatype.
df['tripduration'] = pd.to_datetime(df['tripduration'], unit = 'm')
df
# 4. Check the datatypes of your columns.
df.dtypes
# 5. Export the Dataframe as a new CSV file without the index.
df.to_csv('citibike_data.csv', header = True, index= False)
e>1</code> y <code>8</code>, inclusive.
# - Si el usuario no proporciona un número entero positivo no mayor que <code>8</code>, debe volver a solicitar el mismo.
# - Luego, genere (con la ayuda de <code>print</code> uno o más bucles) la media pirámide deseada.
# - Tenga cuidado de alinear la esquina inferior izquierda de su media pirámide con el borde izquierdo de la ventana de su terminal.
# ## Uso
# Su programa debería comportarse según el ejemplo siguiente.
# <code>$ ./mario
# Height: 4
# #
# ##
# ###
# ####</code>
# ## Pruebas
# - Ejecute su programa como <code>python mario.py</code> y espere una solicitud de entrada. Escribe <code>-1</code> y presiona enter. Su programa debe rechazar esta entrada como inválida, por ejemplo, volviendo a solicitar al usuario que escriba otro número.
# - Ejecute su programa como <code>python mario.py</code> y espere una solicitud de entrada. Escribe <code>0</code> y presiona enter. Su programa debe rechazar esta entrada como inválida, por ejemplo, volviendo a solicitar al usuario que escriba otro número.
# - Ejecute su programa como <code>python mario.py</code> y espere una solicitud de entrada. Escribe <code>1</code> y presiona enter. Su programa debería generar la siguiente salida. Asegúrese de que la pirámide esté alineada con la esquina inferior izquierda de su terminal y de que no haya espacios adicionales al final de cada línea.
# <code>#</code>
# Ejecute su programa como <code>python mario.py</code> y espere una solicitud de entrada. Escribe <code>2</code> y presiona enter. Su programa debería generar la siguiente salida. Asegúrese de que la pirámide esté alineada con la esquina inferior izquierda de su terminal y de que no haya espacios adicionales al final de cada línea.
#
# <code> #
# ##</code>
# Ejecute su programa como <code>python mario.py</code> y espere una solicitud de entrada. Escribe <code>8</code> y presiona enter. Su programa debería generar la siguiente salida. Asegúrese de que la pirámide esté alineada con la esquina inferior izquierda de su terminal y de que no haya espacios adicionales al final de cada línea.
#
# <code> #
# ##
# ###
# ####
# #####
# ######
# #######
# ########</code>
# Ejecute su programa como <code>python mario.py</code> y espere una solicitud de entrada. Escribe <code>9</code> y presiona enter. Su programa debe rechazar esta entrada como inválida, por ejemplo, volviendo a solicitar al usuario que escriba otro número. Luego, escribe <code>2</code> y presiona enter. Su programa debería generar la siguiente salida. Asegúrese de que la pirámide esté alineada con la esquina inferior izquierda de su terminal y de que no haya espacios adicionales al final de cada línea.
#
# <code> #
# ##</code>
#
#
# - Ejecute su programa como <code>python mario.py</code> y espere una solicitud de entrada. Escribe fooy presiona enter. Su programa debe rechazar esta entrada como inválida, por ejemplo, volviendo a solicitar al usuario que escriba otro número.
# - Ejecute su programa como <code>python mario.py</code> y espere una solicitud de entrada. No escriba nada y presione enter. Su programa debe rechazar esta entrada como inválida, por ejemplo, volviendo a solicitar al usuario que escriba otro número.
while True:
n = input("\nIngrese una altura entre 1 y 8: ")
if n.isdigit():
n = int(n)
if n in range(1, 9) :
for i in range(1, n + 1):
print(" " * (n - i) + '#' * i)
break
else:
print("\nERROR!! vuelva a ingresar la altura")
else:
print("\nComando INVALIDO... vuelva a ingresar la altura")
, example
#bigram = this is, is a, test example
#trigram = this is a, is a test, a test example
# -
# #UNIGRAM
def get_top_n_words(x, n):
vec = CountVectorizer().fit(x)
bow = vec.transform(x)
sum_words = bow.sum(axis = 0)
words_freq = [(word, sum_words[0, idx])for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key = lambda x: x[1], reverse = True)
return words_freq[:n]
words = get_top_n_words(df['text'], 20)
print(words)
df1 = pd.DataFrame(words, columns = ['Unigram', 'Frequency'])
df1 = df1.set_index('Unigram')
df1.iplot(kind = 'bar', xTitle = 'Unigram', yTitle = 'Counts', title = 'Top 20 Unigram Words')
# #BIGRAM
def get_top_n_words(x, n):
vec = CountVectorizer(ngram_range=(2,2)).fit(x)
bow = vec.transform(x)
sum_words = bow.sum(axis = 0)
words_freq = [(word, sum_words[0, idx])for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key = lambda x: x[1], reverse = True)
return words_freq[:n]
words = get_top_n_words(df['text'], 20)
print(words)
df1 = pd.DataFrame(words, columns = ['Bigram', 'Frequency'])
df1 = df1.set_index('Bigram')
df1.iplot(kind = 'bar', xTitle = 'Bigram', yTitle = 'Counts', title = 'Top 20 Bigram Words')
# #TRIGRAM
def get_top_n_words(x, n):
vec = CountVectorizer(ngram_range=(3,3)).fit(x)
bow = vec.transform(x)
sum_words = bow.sum(axis = 0)
words_freq = [(word, sum_words[0, idx])for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key = lambda x: x[1], reverse = True)
return words_freq[:n]
words = get_top_n_words(df['text'], 20)
df1 = pd.DataFrame(words, columns = ['Trigram', 'Frequency'])
df1 = df1.set_index('Trigram')
df1.iplot(kind = 'bar', xTitle = 'Trigram', yTitle = 'Counts', title = 'Top 20 Trigram Words')
# + active=""
# #DISTRIBUTION OF TOP 20 PART-OF-SPEECH POS TAGS
# -
# !pip install nltk
import nltk
print(str(df['text']))
# #BIVARIATE ANALYSIS
df.head(10)
sns.catplot(x = 'category', y = 'polarity', data = df)
sns.catplot(x = 'category', y = 'polarity', data = df, kind = 'box')
sns.catplot(x = 'category', y = 'review_len', data=df)
# #DISTRIBUTION OF RSENTIMENT POLARITY BASED ON RECOMMENDATION
import plotly.express as px
import plotly.graph_objects as go
sns.jointplot(x = 'polarity', y = 'review_len', data=df, kind = 'scatter')
df.head()
df = df.drop_duplicates()
df = df.reset_index()
df.drop('index',inplace=True,axis=1)
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
df.sample(5)
corpus = []
for i in range(0, len(df)):
text = re.sub('[^a-zA-Z]', ' ', df['text'][i])
text = text.lower()
text = ' '.join((word) for word in text.split() if word not in stopwords.words('english'))
corpus.append(text)
type(corpus)
# !pip install gensim
import gensim
X = [text.split() for text in corpus]
print(X[0])
word_dim = 100
w2v_model = gensim.models.Word2Vec(sentences=X, size=word_dim, window=10, min_count=1)
len(w2v_model.wv.vocab)
w2v_model.wv['tv']
w2v_model.wv.most_similar('tvs')
# !pip install keras
# !pip install Tensorflow
from keras.preprocessing.text import Tokenizer
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
corpus = []
for i in range(0, len(df)):
text = re.sub('[^a-zA-Z]', ' ', df['text'][i])
text = text.lower()
text = ' '.join((word) for word in text.split() if word not in stopwords.words('english'))
corpus.append(text)
# + active=""
# CountVectorizer(Bag of Words)
# +
from sklearn.feature_extraction.text import CountVectorizer
bow = CountVectorizer(max_features=2000)
X_bow = bow.fit_transform(corpus).toarray()
# -
X_bow[4:9]
y=df.category
from sklearn.model_selection import train_test_split
X_train_bow,X_test_bow,y_train,y_test = train_test_split(X_bow,y,test_size=0.2,random_state=42)
# +
#LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression(solver='saga',max_iter=500)
lr.fit(X_train_bow,y_train)
y_pred1 = lr.predict(X_test_bow)
print("******** LogisticRegression ********")
print(f'Accuracy is : {accuracy_score(y_pred1,y_test)}')
# +
from sklearn.naive_bayes import MultinomialNB
mnb = MultinomialNB()
mnb.fit(X_train_bow,y_train)
y_pred2 = mnb.predict(X_test_bow)
print("******** MultinomialNB ********")
print(f'Accuracy is : {accuracy_score(y_pred2,y_test)}')
# +
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X_train_bow,y_train)
y_pred3 = rf.predict(X_test_bow)
print("******** RandomForestClassifier ********")
print(f'Accuracy is : {accuracy_score(y_pred3,y_test)}')
# +
import xgboost
from xgboost import XGBClassifier
xgb = XGBClassifier()
xgb.fit(X_train_bow,y_train)
y_pred4 = xgb.predict(X_test_bow)
print("******** XGBClassifier ********")
print(f'Accuracy is : {accuracy_score(y_pred4,y_test)}')
# +
from sklearn.svm import SVC
svc = SVC()
svc.fit(X_train_bow,y_train)
y_pred5 = svc.predict(X_test_bow)
print("*******SVC*******")
print(f'Accuracy is: {accuracy_score(y_pred5,y_test)}')
# + active=""
# Tf-Idf Vectorizer
# +
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(max_features=2000,ngram_range=(1,2))
X_tfidf = tfidf.fit_transform(corpus).toarray()
# -
X_tfidf[4:9]
X_tfidf.shape
y = df.category
y
from sklearn.model_selection import train_test_split
X_train_tfidf, X_test_tfidf, y_train, y_test = train_test_split(X_tfidf,y,test_size = 0.2, random_state = 42)
X_train_tfidf.shape , X_test_tfidf.shape
# +
#LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr = LogisticRegression()
lr.fit(X_train_tfidf,y_train)
y_pred1 = lr.predict(X_test_tfidf)
print("******** LogisticRegression after tfidf********")
print(f'Accuracy is : {accuracy_score(y_pred1,y_test)}')
# +
from sklearn.naive_bayes import MultinomialNB
mnb = MultinomialNB()
mnb.fit(X_train_tfidf,y_train)
y_pred2 = mnb.predict(X_test_tfidf)
print("******** MultinomialNB after tfidf********")
print(f'Accuracy is : {accuracy_score(y_pred2,y_test)}')
# +
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X_train_tfidf,y_train)
y_pred3 = rf.predict(X_test_tfidf)
print("******** RandomForestClassifier after tfidf ********")
print(f'Accuracy is : {accuracy_score(y_pred3,y_test)}')
# +
from xgboost import XGBClassifier
xgb = XGBClassifier()
xgb.fit(X_train_tfidf,y_train)
y_pred4 = xgb.predict(X_test_tfidf)
print("******** XGBClassifier after tfidf********")
print(f'Accuracy is : {accuracy_score(y_pred4,y_test)}')
# +
from sklearn.svm import SVC
svc = SVC()
svc.fit(X_train_tfidf,y_train)
y_pred5 = svc.predict(X_test_tfidf)
print("*******SVC after tfidf*******")
print(f'Accuracy is: {accuracy_score(y_pred5,y_test)}')
# -
| 11,407 |
/MSOR411/Homework/1/energy.ipynb | d7bf7de7b593d39518ee9bf8cb90d93f9dc184c7 | [] | no_license | Mestrace/learn-linear-programming | https://github.com/Mestrace/learn-linear-programming | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,815 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import cvxpy as cp
# +
# The amount of energy output of
# coal, natural gas, nuclear, hydroelectric, petroleum
# MW-hr
energy_output = cp.Variable(5, integer=True)
# Unit cost of generation
# dollor / MW-hr
unit_cost = np.array([6.0, 5.5, 4.5, 5.0, 7.0])
total_capacity = np.array([45000, 15000, 45000, 24000, 48000])
# Rates of Pollutant Emissions
# g / MW-hr
pollutant_emissions = np.array([
[1.5, 1.2, 0.7, 0.4],
[0.2, 0.5, 0, 0],
[0.5, 0.2, 0.4, 0.5],
[0, 0, 0, 0],
[0.4, 0.8, 0.5, 0.1]
]).T
# Alowable Pollution
# g
limits_on_pollutant_emissions = np.array([75000, 60000, 30000, 25000])
# +
objective_function = cp.Minimize(unit_cost.T @ energy_output)
# resource1 = energy_output >= 0
resource2 = energy_output >= np.array([36000, 0, 0, 0, 0])
resource3 = energy_output <= total_capacity
# regulation 1: nuclear material energy should not exceed 20% of total energy
# E_nuclear <= 0.2 * sum(E)
regulation1 = np.array([-0.2, -0.2, 0.8, -0.2, -0.2]) @ energy_output <= 0
# regulation 2: at least 80% capacity of coal plant is used
# E_coal >= 0.8 * coal
regulation2 = np.array([1.25, 0, 0, 0, 0]) @ energy_output >= total_capacity[0]
# regulation 3: energy from natural gas should be at least 30% or more on that from petroleum
# E_gas >= 0.3 * E_petro
regulation3 = np.array([0, 1, 0,0, -0.3]) @ energy_output >= 0
# regulation 4: the pollutant produced must meet regulations
#
regulation4 = pollutant_emissions @ energy_output <= limits_on_pollutant_emissions
# need 1: A total of 100000MW
need1 = sum(energy_output) >= 125000
# +
constraints = [
regulation1,
regulation2,
regulation3,
regulation4,
need1,
resource2,
resource3
]
prob = cp.Problem(objective_function, constraints)
# -
prob.solve()
energy_output.value
: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
# $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
# $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
#
# where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
# +
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)]
parameters["b" + str(l+1)] = parameters["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)]
### END CODE HERE ###
return parameters
# +
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# -
# **Expected Output**:
#
# <table>
# <tr>
# <td > **W1** </td>
# <td > [[ 1.63535156 -0.62320365 -0.53718766]
# [-1.07799357 0.85639907 -2.29470142]] </td>
# </tr>
#
# <tr>
# <td > **b1** </td>
# <td > [[ 1.74604067]
# [-0.75184921]] </td>
# </tr>
#
# <tr>
# <td > **W2** </td>
# <td > [[ 0.32171798 -0.25467393 1.46902454]
# [-2.05617317 -0.31554548 -0.3756023 ]
# [ 1.1404819 -1.09976462 -0.1612551 ]] </td>
# </tr>
#
# <tr>
# <td > **b2** </td>
# <td > [[-0.88020257]
# [ 0.02561572]
# [ 0.57539477]] </td>
# </tr>
# </table>
#
# A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
#
# - **(Batch) Gradient Descent**:
#
# ``` python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# # Forward propagation
# a, caches = forward_propagation(X, parameters)
# # Compute cost.
# cost = compute_cost(a, Y)
# # Backward propagation.
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
#
# ```
#
# - **Stochastic Gradient Descent**:
#
# ```python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# for j in range(0, m):
# # Forward propagation
# a, caches = forward_propagation(X[:,j], parameters)
# # Compute cost
# cost = compute_cost(a, Y[:,j])
# # Backward propagation
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
# ```
#
# In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this:
#
# <img src="images/kiank_sgd.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
#
# **Note** also that implementing SGD requires 3 for-loops in total:
# 1. Over the number of iterations
# 2. Over the $m$ training examples
# 3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
#
# In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
#
# <img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
#
# <font color='blue'>
# **What you should remember**:
# - The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.
# - You have to tune a learning rate hyperparameter $\alpha$.
# - With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).
# ## 2 - Mini-Batch Gradient descent
#
# Let's learn how to build mini-batches from the training set (X, Y).
#
# There are two steps:
# - **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
#
# <img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
#
# - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
#
# <img src="images/kiank_partition.png" style="width:550px;height:300px;">
#
# **Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
# ```python
# first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
# second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
# ...
# ```
#
# Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$).
# +
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:,k * mini_batch_size:(k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:,k * mini_batch_size:(k + 1) * mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:,num_complete_minibatches * mini_batch_size:]
mini_batch_Y = shuffled_Y[:,num_complete_minibatches * mini_batch_size:]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# +
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
# -
# **Expected Output**:
#
# <table style="width:50%">
# <tr>
# <td > **shape of the 1st mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 2nd mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 3rd mini_batch_X** </td>
# <td > (12288, 20) </td>
# </tr>
# <tr>
# <td > **shape of the 1st mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 2nd mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 3rd mini_batch_Y** </td>
# <td > (1, 20) </td>
# </tr>
# <tr>
# <td > **mini batch sanity check** </td>
# <td > [ 0.90085595 -0.7612069 0.2344157 ] </td>
# </tr>
#
# </table>
# <font color='blue'>
# **What you should remember**:
# - Shuffling and Partitioning are the two steps required to build mini-batches
# - Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
# ## 3 - Momentum
#
# Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
#
# Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
#
# <img src="images/opt_momentum.png" style="width:400px;height:250px;">
# <caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
#
#
# **Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
# for $l =1,...,L$:
# ```python
# v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# ```
# **Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop.
# +
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l + 1)] = np.zeros_like(parameters["W" + str(l+1)])
v["db" + str(l + 1)] = np.zeros_like(parameters["b" + str(l+1)])
### END CODE HERE ###
return v
# +
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
# -
# **Expected Output**:
#
# <table style="width:40%">
# <tr>
# <td > **v["dW1"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db1"]** </td>
# <td > [[ 0.]
# [ 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["dW2"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db2"]** </td>
# <td > [[ 0.]
# [ 0.]
# [ 0.]] </td>
# </tr>
# </table>
#
# **Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
#
# $$ \begin{cases}
# v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
# W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
# \end{cases}\tag{3}$$
#
# $$\begin{cases}
# v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
# b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
# \end{cases}\tag{4}$$
#
# where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding.
# +
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l + 1)] = beta * v["dW" + str(l + 1)] + (1 - beta) * grads['dW' + str(l + 1)]
v["db" + str(l + 1)] = beta * v["db" + str(l + 1)] + (1 - beta) * grads['db' + str(l + 1)]
# update parameters
parameters["W" + str(l+1)] = parameters["W" + str(l + 1)] - learning_rate * v["dW" + str(l + 1)]
parameters["b" + str(l+1)] = parameters["b" + str(l + 1)] - learning_rate * v["db" + str(l + 1)]
### END CODE HERE ###
return parameters, v
# +
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
# -
# **Expected Output**:
#
# <table style="width:90%">
# <tr>
# <td > **W1** </td>
# <td > [[ 1.62544598 -0.61290114 -0.52907334]
# [-1.07347112 0.86450677 -2.30085497]] </td>
# </tr>
#
# <tr>
# <td > **b1** </td>
# <td > [[ 1.74493465]
# [-0.76027113]] </td>
# </tr>
#
# <tr>
# <td > **W2** </td>
# <td > [[ 0.31930698 -0.24990073 1.4627996 ]
# [-2.05974396 -0.32173003 -0.38320915]
# [ 1.13444069 -1.0998786 -0.1713109 ]] </td>
# </tr>
#
# <tr>
# <td > **b2** </td>
# <td > [[-0.87809283]
# [ 0.04055394]
# [ 0.58207317]] </td>
# </tr>
#
# <tr>
# <td > **v["dW1"]** </td>
# <td > [[-0.11006192 0.11447237 0.09015907]
# [ 0.05024943 0.09008559 -0.06837279]] </td>
# </tr>
#
# <tr>
# <td > **v["db1"]** </td>
# <td > [[-0.01228902]
# [-0.09357694]] </td>
# </tr>
#
# <tr>
# <td > **v["dW2"]** </td>
# <td > [[-0.02678881 0.05303555 -0.06916608]
# [-0.03967535 -0.06871727 -0.08452056]
# [-0.06712461 -0.00126646 -0.11173103]] </td>
# </tr>
#
# <tr>
# <td > **v["db2"]** </td>
# <td > [[ 0.02344157]
# [ 0.16598022]
# [ 0.07420442]]</td>
# </tr>
# </table>
#
#
# **Note** that:
# - The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
# - If $\beta = 0$, then this just becomes standard gradient descent without momentum.
#
# **How do you choose $\beta$?**
#
# - The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much.
# - Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
# - Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$.
# <font color='blue'>
# **What you should remember**:
# - Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
# - You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
# ## 4 - Adam
#
# Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
#
# **How does Adam work?**
# 1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
# 2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
# 3. It updates parameters in a direction based on combining information from "1" and "2".
#
# The update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
# v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
# s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
# s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_1)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
# \end{cases}$$
# where:
# - t counts the number of steps taken of Adam
# - L is the number of layers
# - $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
# - $\alpha$ is the learning rate
# - $\varepsilon$ is a very small number to avoid dividing by zero
#
# As usual, we will store all parameters in the `parameters` dictionary
# **Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.
#
# **Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:
# for $l = 1, ..., L$:
# ```python
# v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
#
# ```
# +
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = None
v["db" + str(l+1)] = None
s["dW" + str(l+1)] = None
s["db" + str(l+1)] = None
### END CODE HERE ###
return v, s
# +
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
# -
# **Expected Output**:
#
# <table style="width:40%">
# <tr>
# <td > **v["dW1"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db1"]** </td>
# <td > [[ 0.]
# [ 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["dW2"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db2"]** </td>
# <td > [[ 0.]
# [ 0.]
# [ 0.]] </td>
# </tr>
# <tr>
# <td > **s["dW1"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **s["db1"]** </td>
# <td > [[ 0.]
# [ 0.]] </td>
# </tr>
#
# <tr>
# <td > **s["dW2"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **s["db2"]** </td>
# <td > [[ 0.]
# [ 0.]
# [ 0.]] </td>
# </tr>
#
# </table>
#
# **Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\
# v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\
# s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\
# s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon}
# \end{cases}$$
#
#
# **Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
# +
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = None
v["db" + str(l+1)] = None
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = None
v_corrected["db" + str(l+1)] = None
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = None
s["db" + str(l+1)] = None
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = None
s_corrected["db" + str(l+1)] = None
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = None
parameters["b" + str(l+1)] = None
### END CODE HERE ###
return parameters, v, s
# +
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
# -
# **Expected Output**:
#
# <table>
# <tr>
# <td > **W1** </td>
# <td > [[ 1.63178673 -0.61919778 -0.53561312]
# [-1.08040999 0.85796626 -2.29409733]] </td>
# </tr>
#
# <tr>
# <td > **b1** </td>
# <td > [[ 1.75225313]
# [-0.75376553]] </td>
# </tr>
#
# <tr>
# <td > **W2** </td>
# <td > [[ 0.32648046 -0.25681174 1.46954931]
# [-2.05269934 -0.31497584 -0.37661299]
# [ 1.14121081 -1.09245036 -0.16498684]] </td>
# </tr>
#
# <tr>
# <td > **b2** </td>
# <td > [[-0.88529978]
# [ 0.03477238]
# [ 0.57537385]] </td>
# </tr>
# <tr>
# <td > **v["dW1"]** </td>
# <td > [[-0.11006192 0.11447237 0.09015907]
# [ 0.05024943 0.09008559 -0.06837279]] </td>
# </tr>
#
# <tr>
# <td > **v["db1"]** </td>
# <td > [[-0.01228902]
# [-0.09357694]] </td>
# </tr>
#
# <tr>
# <td > **v["dW2"]** </td>
# <td > [[-0.02678881 0.05303555 -0.06916608]
# [-0.03967535 -0.06871727 -0.08452056]
# [-0.06712461 -0.00126646 -0.11173103]] </td>
# </tr>
#
# <tr>
# <td > **v["db2"]** </td>
# <td > [[ 0.02344157]
# [ 0.16598022]
# [ 0.07420442]] </td>
# </tr>
# <tr>
# <td > **s["dW1"]** </td>
# <td > [[ 0.00121136 0.00131039 0.00081287]
# [ 0.0002525 0.00081154 0.00046748]] </td>
# </tr>
#
# <tr>
# <td > **s["db1"]** </td>
# <td > [[ 1.51020075e-05]
# [ 8.75664434e-04]] </td>
# </tr>
#
# <tr>
# <td > **s["dW2"]** </td>
# <td > [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]
# [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]
# [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]] </td>
# </tr>
#
# <tr>
# <td > **s["db2"]** </td>
# <td > [[ 5.49507194e-05]
# [ 2.75494327e-03]
# [ 5.50629536e-04]] </td>
# </tr>
# </table>
#
# You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
# ## 5 - Model with different optimization algorithms
#
# Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
train_X, train_Y = load_dataset()
# We have already implemented a 3-layer neural network. You will train it with:
# - Mini-batch **Gradient Descent**: it will call your function:
# - `update_parameters_with_gd()`
# - Mini-batch **Momentum**: it will call your functions:
# - `initialize_velocity()` and `update_parameters_with_momentum()`
# - Mini-batch **Adam**: it will call your functions:
# - `initialize_adam()` and `update_parameters_with_adam()`
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost
cost = compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# You will now run this 3 layer neural network with each of the 3 optimization methods.
#
# ### 5.1 - Mini-batch Gradient descent
#
# Run the following code to see how the model does with mini-batch gradient descent.
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# ### 5.2 - Mini-batch gradient descent with momentum
#
# Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# ### 5.3 - Mini-batch with Adam mode
#
# Run the following code to see how the model does with Adam.
# +
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# -
# ### 5.4 - Summary
#
# <table>
# <tr>
# <td>
# **optimization method**
# </td>
# <td>
# **accuracy**
# </td>
# <td>
# **cost shape**
# </td>
#
# </tr>
# <td>
# Gradient descent
# </td>
# <td>
# 79.7%
# </td>
# <td>
# oscillations
# </td>
# <tr>
# <td>
# Momentum
# </td>
# <td>
# 79.7%
# </td>
# <td>
# oscillations
# </td>
# </tr>
# <tr>
# <td>
# Adam
# </td>
# <td>
# 94%
# </td>
# <td>
# smoother
# </td>
# </tr>
# </table>
#
# Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.
#
# Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.
#
# Some advantages of Adam include:
# - Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum)
# - Usually works well even with little tuning of hyperparameters (except $\alpha$)
# **References**:
#
# - Adam paper: https://arxiv.org/pdf/1412.6980.pdf
| 42,368 |
/notebooks/41 - Validating machine learning models.ipynb | ee48c1d64e116c0abf229ea337f6b9caa2fe6ba5 | [
"CC0-1.0"
] | permissive | JesperDramsch/skillshare-data-science | https://github.com/JesperDramsch/skillshare-data-science | 26 | 11 | null | null | null | null | Jupyter Notebook | false | false | .py | 14,342 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Validating machine learning models
#
# Once we built a machine learning model, we need to validate that this model learnt something meaningful from our training. This part is machine learning validation.
#
# Validating a machine learning model is essential in developing any data-driven solution.
#
# It ensures that the model performs as intended and has learned relevant patterns from the data. Validation involves assessing a model's accuracy, reliability, and generalization performance. Machine learning validation is crucial because models can easily overfit the training data, making them unreliable in real-world scenarios.
#
# This process involves splitting the data into training and validation sets, evaluating the model's performance on the validation set, and tuning the model parameters until an acceptable level of performance is achieved.
# ## How To
# +
from sklearn.model_selection import train_test_split
import pandas as pd
df = pd.read_csv("data/housing.csv")
df.head()
# -
df = df.dropna()
# +
x_train, x_, y_train, y_ = train_test_split(df.drop(["longitude","latitude", "ocean_proximity", "median_house_value"], axis=1),
df.median_house_value, test_size=.5, stratify=df.ocean_proximity)
x_val, x_test, y_val, y_test = train_test_split(x_, y_, test_size=.5)
# -
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor().fit(x_train, y_train)
model.score(x_val, y_val)
# ## Cross-validation
from sklearn.model_selection import cross_val_score, cross_val_predict
cross_val_score(model, x_val, y_val)
cross_val_predict(model, x_test, y_test)
# ## Dummy Models
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import RandomForestClassifier
dummy = DummyRegressor(strategy="mean")
dummy.fit(x_train, y_train)
dummy.score(x_val, y_val)
cross_val_predict(dummy, x_test, y_test)
# +
x_train, x_, y_train, y_ = train_test_split(df.drop(["longitude","latitude", "ocean_proximity", "median_house_value"], axis=1),
df.ocean_proximity, test_size=.5)
x_val, x_test, y_val, y_test = train_test_split(x_, y_, test_size=.5)
# -
dummy = DummyClassifier(strategy="prior")
dummy.fit(x_train, y_train)
dummy.score(x_val, y_val)
model = RandomForestClassifier().fit(x_train, y_train)
model.score(x_val, y_val)
cross_val_score(model, x_test, y_test)
cross_val_score(dummy, x_test, y_test)
# ## Exercise
#
# Try different dummy strategies and how they compare.
dummy = DummyClassifier(strategy=...)
# ## Additional Resources
# - [ELI5](https://eli5.readthedocs.io/)
# - [Dummy Models](https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html)
# - [ML Fairness](https://en.wikipedia.org/wiki/Fairness_(machine_learning))
would like to print out every element in a list.
# Let's try to use a <code>for</code> loop to print all the years presented in the list <code>dates</code>:
# + [markdown] colab_type="text" id="0kC8QFTA-bdy"
# This can be done as follows:
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="eKXQ2f1j-bd0" outputId="43357fe5-679a-4df0-a08d-31dd9390e9c2"
# For loop example
dates = [1982,1980,1973]
N = len(dates)
for i in range(N):
print(dates[i])
# + [markdown] colab_type="text" id="2MoRDBDI-bd4"
# The code in the indent is executed <code>N</code> times, each time the value of <code>i</code> is increased by 1 for every execution. The statement executed is to <code>print</code> out the value in the list at index <code>i</code> as shown here:
# + [markdown] colab_type="text" id="XAtsP5hG-bd6"
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/LoopsForRange.gif" width="800" />
# + [markdown] colab_type="text" id="cq1rNhuY-bd7"
# In this example we can print out a sequence of numbers from 0 to 7:
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="Lb2IB9Ww-bd8" outputId="f6302acf-0d03-4d5e-8137-64eeacb68691"
# Example of for loop
for i in range(0, 8):
print(i)
# + [markdown] colab_type="text" id="-hB0EBAL-beB"
# In Python we can directly access the elements in the list as follows:
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="92-Tv3mi-beC" outputId="34d05435-d09b-4973-b60c-8b565c223318"
# Exmaple of for loop, loop through list
for year in dates:
print(year)
# + [markdown] colab_type="text" id="0sRhUy89-beI"
# For each iteration, the value of the variable <code>years</code> behaves like the value of <code>dates[i]</code> in the first example:
# + [markdown] colab_type="text" id="kN2yzubu-beJ"
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/LoopsForList.gif" width="800">
# + [markdown] colab_type="text" id="VRtigeHy-beK"
# We can change the elements in a list:
# + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="IiJ1R258-beL" outputId="128aa06f-2374-4fe6-bfc8-5362a7aa2217"
# Use for loop to change the elements in list
squares = ['red', 'yellow', 'green', 'purple', 'blue']
for i in range(0, 5):
print("Before square ", i, 'is', squares[i])
squares[i] = 'weight'
print("After square ", i, 'is', squares[i])
# + [markdown] colab_type="text" id="PXCYWOVX-beQ"
# We can access the index and the elements of a list as follows:
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="gkDz_jXV-beQ" outputId="319df571-4ccf-4e0f-b7ed-5e7ce3fb7660"
# Loop through the list and iterate on both index and element value
squares=['red', 'yellow', 'green', 'purple', 'blue']
for i, square in enumerate(squares):
print(i, square)
# + [markdown] colab_type="text" id="G1dNibif-beT"
# ## <h3 id="while">What is <code>while</code> loop?</h3>
# + [markdown] colab_type="text" id="aQ9cr9lF-beU"
# As you can see, the <code>for</code> loop is used for a controlled flow of repetition. However, what if we don't know when we want to stop the loop? What if we want to keep executing a code block until a certain condition is met? The <code>while</code> loop exists as a tool for repeated execution based on a condition. The code block will keep being executed until the given logical condition returns a **False** boolean value.
#
# + [markdown] colab_type="text" id="WgXUBnoy-beW"
# Let’s say we would like to iterate through list <code>dates</code> and stop at the year 1973, then print out the number of iterations. This can be done with the following block of code:
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="dOENLySm-beW" outputId="02045ca0-e486-495c-c3e4-fb819d7e8159"
# While Loop Example
dates = [1982, 1980, 1973, 2000]
i = 0
year = 0
while(year != 1973):
year = dates[i]
i = i + 1
print(year)
print("It took ", i ,"repetitions to get out of loop.")
# + [markdown] colab_type="text" id="xeRnp6LL-bea"
# A while loop iterates merely until the condition in the argument is not met, as shown in the following figure:
# + [markdown] colab_type="text" id="26Yh5Np4-beb"
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/LoopsWhile.gif" width="650" />
# + [markdown] colab_type="text" id="ZL3qDOCy-bec"
# <hr>
# + [markdown] colab_type="text" id="LFh63Y4x-bed"
# # <h2 id="quiz">Quiz on Loops</h2>
# + [markdown] colab_type="text" id="_BqYdLzV-bed"
# Write a <code>for</code> loop the prints out all the element between <b>-5</b> and <b>5</b> using the range function.
# + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="GFedB0h_-bef" outputId="ebfe14de-3e13-4016-985b-2af3d83c2b68"
# Write your code below and press Shift+Enter to execute
for i in range(-5, 5):
print(i)
# + [markdown] colab_type="text" id="3OKc5mln-bei"
# Double-click __here__ for the solution.
# <!--
# for i in range(-5, 6):
# print(i)
# -->
# + [markdown] colab_type="text" id="tEdLdn-W-bej"
# Print the elements of the following list:
# <code>Genres=[ 'rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']</code>
# Make sure you follow Python conventions.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="3EhPEc-K-bek" outputId="26f2b23c-4cfc-4ce1-c47d-51947f5ac8a1"
# Write your code below and press Shift+Enter to execute
Genres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']
Genres
# + [markdown] colab_type="text" id="fAnIhn4b-beo"
# Double-click __here__ for the solution.
# <!--
# Genres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']
# for Genre in Genres:
# print(Genre)
# -->
# + [markdown] colab_type="text" id="w1vfBKag-bep"
# <hr>
# + [markdown] colab_type="text" id="xBMppTr1-beq"
# Write a for loop that prints out the following list: <code>squares=['red', 'yellow', 'green', 'purple', 'blue']</code>
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="RRqmWLLY-ber" outputId="8ec3d101-65b5-47da-ab18-ca7d0ead1fe0"
# Write your code below and press Shift+Enter to execute
squares = [
'red',
'yellow',
'green',
'purple',
'blue'
]
for i,name in enumerate(squares):
print(i+1, name)
# + [markdown] colab_type="text" id="7ZCTRC4m-bev"
# Double-click __here__ for the solution.
# <!--
# squares=['red', 'yellow', 'green', 'purple', 'blue']
# for square in squares:
# print(square)
# -->
# + [markdown] colab_type="text" id="y2Lehi88-bex"
# <hr>
# + [markdown] colab_type="text" id="i1qqoGK6-bey"
# Write a while loop to display the values of the Rating of an album playlist stored in the list <code>PlayListRatings</code>. If the score is less than 6, exit the loop. The list <code>PlayListRatings</code> is given by: <code>PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]</code>
# + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="wCEuqZSZ-bez" outputId="6a1becff-dd47-4659-db66-def26ad57f2e"
# Write your code below and press Shift+Enter to execute
playListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]
for i in playListRatings:
if i < 6:
break
print(i)
# + [markdown] colab_type="text" id="8nyJULFK-be2"
# Double-click __here__ for the solution.
# <!--
# PlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]
# i = 1
# Rating = PlayListRatings[0]
# while(Rating >= 6):
# print(Rating)
# Rating = PlayListRatings[i]
# i = i + 1
# -->
# + [markdown] colab_type="text" id="NONa3Kvz-be3"
# <hr>
# + [markdown] colab_type="text" id="BqJUHrET-be3"
# Write a while loop to copy the strings <code>'orange'</code> of the list <code>squares</code> to the list <code>new_squares</code>. Stop and exit the loop if the value on the list is not <code>'orange'</code>:
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="X2VAg3qD-be4" outputId="da79f07d-0202-4c7e-c82f-34e6a65c644e"
# Write your code below and press Shift+Enter to execute
squares = ['orange', 'orange', 'purple', 'blue ', 'orange']
new_squares = []
i = 0
while (squares[i] == 'orange'):
new_squares = squares[i]
i+=1
print(new_squares)
print(new_squares,'the loop is repeat : ', i)
# + [markdown] colab_type="text" id="tKCS24V3-be8"
# Double-click __here__ for the solution.
# <!--
# squares = ['orange', 'orange', 'purple', 'blue ', 'orange']
# new_squares = []
# i = 0
# while(squares[i] == 'orange'):
# new_squares.append(squares[i])
# i = i + 1
# print (new_squares)
# -->
# + [markdown] colab_type="text" id="zCs8cDzi-be9"
# <hr>
# <h2>The last exercise!</h2>
# <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work.
# <hr>
# + [markdown] colab_type="text" id="3X34mHe6-bfA"
# <p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
"base_uri": "https://localhost:8080/", "height": 202}
nba.head()
# + id="2rUpPFBJrEvu" colab_type="code" colab={}
| 13,118 |
/hw8.ipynb | 16bfdf8de6d6d18dd4832376054318256cdba793 | [] | no_license | shunkeai/computatinal-phys | https://github.com/shunkeai/computatinal-phys | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 111,401 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework
#
# Merging compact objects produces a chirp signal in the gravitational wave detector.
# Assume the chirp signal is
# \begin{equation}
# y(t)=\begin{cases} sin(t^2) \quad 0<t<15 \\
# 0 \quad else\\
# \end{cases}
# \end{equation}
#
# Please do wavelet transform to show the chirp signal is similar to the one you have seen
# online. (Notice that the figure online has time as x-axis and frequency as the y-axis)
# (You are welcome to use any wavelet you prefer.)
#
#
# +
from numpy import *
ns=100
ntau=300
dstep=2.9/ns
sar=arange(0.101,3.001,dstep)
dtaustep=16./ntau
tauar=arange(0.,16.,dtaustep)
ystau=zeros((ntau, ns),complex)
nt=600
dtstep=15./nt
tar=arange(0.,15.,dtstep)
def func(t):
if (t>0 and t<15):
y=sin(t**2)
else:
y=0
return y
def morlet(t,sigma):
psi=exp(2.*pi*t*1j)*exp(-t*t/2./sigma/sigma)
return psi
yarr=zeros(nt)
def wavelet():
for idext in range(nt):
yarr[idext]=func(tar[idext])
for idextau in range(ntau):
for idexs in range(ns):
ystau[idextau,idexs]= sum(conjugate((1/sar[idexs]**(1/2))*morlet((tar-tauar[idextau])/sar[idexs],1)*yarr*dtstep))
wavelet()
far=1./sar
# +
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pylab as p;
fig = p.figure()
p.plot(tar,yarr)
fig = p.figure()
ax = fig.add_subplot(2,2,1,projection='3d')
X, Y = p.meshgrid(tauar, far)
ax.plot_surface(X, Y, abs(transpose(ystau)))
ax.plot_wireframe(X, Y, abs(transpose(ystau)), color = 'r')
ax.set_xlabel('tau')
ax.set_ylabel('f')
ax.set_zlabel('Y')
ax = fig.add_subplot(2,2,3)
levels=p.arange(abs(transpose(ystau)).min(),abs(transpose(ystau)).max(),(abs(transpose(ystau)).max() - abs(transpose(ystau)).min())/100.)
CS=p.contour(tauar,far,transpose(ystau).imag, levels=levels)
p.xlabel('T')
p.ylabel('Frequency')
p.show()
p.show()
| 2,174 |
/docs/notebooks/pipeline_report.ipynb | 47f91bc3d110a9cc1a27a697a8b6dfcbc7bdc5f7 | [
"MIT"
] | permissive | gaborsomogyi/ldssa-capstone | https://github.com/gaborsomogyi/ldssa-capstone | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,455 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Imports
# +
import pickle
import json
import pandas as pd
import numpy as np
import category_encoders
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.externals import joblib
from sklearn.model_selection import cross_validate
from pipeline.custom_transformers import NAEncoder, ColumnDropper
# -
# ## Data handling
X_train = pd.read_csv('data/X_train.csv', na_values=['N/A or Unknown', 'unknown'])
y_train = pd.read_csv('data/y_train.csv', names=['injury'])
# +
with open('pipeline/columns.json', 'w') as fh:
json.dump(X_train.columns.tolist(), fh)
with open('pipeline/dtypes.pickle', 'wb') as fh:
pickle.dump(X_train.dtypes, fh)
# -
# ## Baseline
# +
pipeline = make_pipeline(
category_encoders.OneHotEncoder(),
LogisticRegression(),
)
pipeline.fit(X_train, y_train.values.ravel())
joblib.dump(pipeline, 'pipeline/pipeline.pickle')
# -
# ## Final pipeline
# +
pipeline = make_pipeline(
ColumnDropper('age_in_years'),
NAEncoder(['other_person_location']),
NAEncoder(['other_factor_1', 'other_factor_2', 'other_factor_3']),
category_encoders.OneHotEncoder(),
XGBClassifier(base_score=np.mean(y_train.values), booster='dart',
colsample_bylevel=1, colsample_bytree=0.55, gamma=1,
learning_rate=0.1, max_delta_step=0, max_depth=7,
min_child_weight=3, missing=None, n_estimators=100, n_jobs=1,
nthread=1, objective='binary:logistic', random_state=0, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, silent=True,
subsample=1
)
)
pipeline.fit(X_train, y_train.values.ravel())
joblib.dump(pipeline, 'pipeline/pipeline.pickle')
| 1,998 |
/notebooks/mytestnotebook.ipynb | 8aad45109b7b09ab15989888d3b04aa6667d2cc2 | [] | no_license | ityulnev/fft_in_jupyter | https://github.com/ityulnev/fft_in_jupyter | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 72,047 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <a id=notebook_start></a>
# There is an infinte amount of resources out there, for instance [here](https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/).
#
#
# # Notebook intro
#
#
# ## navigating the notebook
#
# There are three types of cells:
# 1. input cells - contain the actual code
# 2. output cell - display the results of the computation
# 3. markdown cells - provide documentation and instructions
#
#
# The Jupyter Notebook has two different keyboard input modes.
# 1. Edit mode allows you to type code or text into a cell and is indicated by a green cell border.
# 2. Command mode binds the keyboard to notebook level commands and is indicated by a grey cell border with a blue left margin.
#
#
# Command mode is activated by hitting `ESC`
# You can swtich back to edit mode by hitting `ENTER`
#
#
#
# Some useful shortcuts are
# - `ESC`+`dd` - delete cell
# - `ESC`+`a` - add cell above
# - `ESC`+`b` - add cell below
# - `ESC`+`l` - toggle line numbers
# - `SHIFT`+`ENTER` - execute cell
# - `ENTER` - enter edit mode
#
# To get more help, open the shortcut by hitting `ESC` followed by `h`
#
#
#
# ## imports, packages and magic commands
#
# In almost any case you will use existing packages. A common good practice is to load them at the beginning of the notebook using the `import` command.
#
import numpy as np # widely used python library for data manipulation, the 'as' allows you to rename the package on import
from scipy import constants # this is how you just get specific subpackages or functions
import sys # get some information about the current OS
sys.version # show the python version
sys.executable # show the path to the python executable - very useful to check when things don't work as expected!
# [magic commands](https://ipython.readthedocs.io/en/stable/interactive/magics.html) are special commands that give some extended functionality to the notebook.
# +
# show figures inline in the notebook
# %matplotlib inline
# the following command reloads external packages that have been changed externally without the need to restart the kernel
# %load_ext autoreload
# %autoreload 2
# -
# This will list all magic commands
# %lsmagic
# +
a = 'this is a string'
b = 1 # this is an integer
# list variables
# %who str
# %who int
# -
# The `!` allows you to execute shell commands directly from the notebook. You can also [exectute different kernels](https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/) in a single notebook!
# ! ls ../../images/nbextensions.png
# ## extensions
#
# You can install notebook extensions by
#
# If everything is installed, you should get the following tab
# 
#
# One extension that I find particularly usefull is the Table of contents, that provides a TOC based on the titles and subtitles in the notebook.
# ## Latex, Markdown and HTML
#
# You can write beatiful notebooks using LaTex and [Markdown](https://www.markdownguide.org/cheat-sheet/). Just open any of the cells in this notebook to see the underlying markdown code.
#
# Latex is rendered as expected, for example: $\alpha$.
#
# You can also have inline equations:
#
# $$
# \alpha = \int_0^\infty sin(x) dx
# $$
#
# and numbered equations
#
#
# \begin{align}
# \alpha &= \int_0^\infty \sin(x) dx \\
# \beta &= \frac{\partial}{\partial y} \cos(y)
# \end{align}
#
#
# To navigate the notebook you can also create internal links within the notebook using regular HTML code:
#
# - `<a href='#my_label'>some text</a>` - links to `#my_label`
# - `<a id=my_label></a>` - defines the target of the link.
#
# For examle, this <a href='#notebook_start'>link</a> brings you back to the beginning of the notebook.
# Note that links "don't work" when the cell is still in edit mode!
#
#
# <p style="color: red;">
# Note: HTML also allows you to add some color to you notebooks.
# </p>
# # Python intro
#
#
# There are more the enough resources on the internet. Here, I will just remind some specifically pythonic code snippets that I find useful.
# ## functions
#
# Are defined as follows. You can also create classes and more complicated objects.
def simple_function(my_number, my_string='there is nothing'):
"""
This is the doctring. It contains the documentation of the functions. You can access it with `?`
"""
return my_string + str(my_number)
# ?simple_function
# a short version of writing the above is using a lambda function
simple_function = lambda my_number, my_string='there is nothing' : my_string + str(my_number)
# ## dictionaries
#
# Are very usefull for writing human friendly code:
# +
simple_dict = {'my_number': 1, 'my_string': 'hello'}
# an alternative way to define the same dictionary (this one is usefull when you want to turn a bunch of definitions into a dict)
simple_dict = dict(
my_number = 1,
my_string = 'hello'
)
simple_dict
# -
# you can use dicts to pass many arguments in a compact way
# this unpacks the dictionary, the two stars mean that the unpacking is as key:value
# check what happens if you only have one star
simple_function(**simple_dict)
# ## loops
for k,v in simple_dict.items():
print('this is the value:',v, ' and this is the key:', k)
# zip allows you to bundle different data quickly together
for k,v in zip(['a', 'b'], [1,2]):
print('this is the value:',v, ' and this is the key:', k)
{k:v for k,v in zip(['a', 'b'], [1,2])} # you can also loops to create dictionaries
# ## paths
#
# python provides a convinient pathlib library
from pathlib import Path # working with path objects - usefull for OS independent code
image_path = Path('../../images/') # define the path - usefull to define a global path at the beginning of the notebook
[f for f in image_path.glob('*')] # glob allows you to search the path
image_path/'motivation.svg' # appending to a path
image_path.name # f
image_path.exists() # check if file exists
image_path.is_dir() # check if is directory
(image_path/'motivation.svg').exists() # check if file exists
(image_path/'motivation.svg').is_dir()
image_path.absolute() # get the absolute path
image_path.parent # get the parent
# ## strings
#
# you can easily format string useing the format function
'this is a bare string, that takes a float here: {:0.3f}'.format(0.2)
s = 'thies is ...'
s.replace('thies', 'this')
s.split(' ') # breaking up string creates a list
# +
# this is usefull to encode and decode information into filenames
filename = 'run_{:02d}_pressure_{:0.2e}_frequency_{:0.2f}Hz'.format(1, 1.2e-4, 102024)
print(filename)
# +
# now we extract the information
run = int(filename.split('_')[1])
pressure = float(filename.split('_')[3])
frequency = float(filename.split('_')[5].split('Hz')[0])
run, pressure, frequency
# -
# # Working with paths, functions and ploting
# +
import numpy as np # widely used python library for data manipulation, the 'as' allows you to rename the package on import
import pandas as pd
import json, yaml
import sys # get some information about the current OS
import matplotlib.pyplot as plt
# show figures inline in the notebook
# %matplotlib inline
from pathlib import Path # working with path objects - usefull for OS independent code
#path_data = Path('./data_tmp')
# create the folder if it doesn't exist
#if not data_path.exists(): data_path.mkdir()
# -
my_function = lambda x, a, b, c : np.exp(-(x/(2*c))**2)
# +
x = np.linspace(-200,200,250)
parameters = dict(
a = 1,
b = 0.5,
c = 2
)
y = my_function(x, **parameters)
# plot the data
plt.plot(x,y)
plt.xlabel('x')
plt.ylabel('y')
# -
my_fft=lambda x : np.fft.fft(x)
yf=my_fft(y)
# plot the data
plt.plot(x,np.abs(np.fft.fftshift(yf)))
np.trapz(x,y)
# make a pandas dataframe
df = pd.DataFrame.from_dict({'my_x':x, 'my_y':y})
df
if not path_data.exists(): path_data.mkdir()
df.to_csv(path_data/'thismydata.csv', index=None)
dfloaded = pd.read_csv(path_data/'thismydata.csv')
dfloaded.plot('my_x','my_y') # pandas also allows you to directly plot the data
# Code to install a package from GIT: { !pip install git+https://github.com/JanGieseler/edaipynb.git }
# <br> (! used to run external code from notebook)
# <br>
# <br> another new line appears!
# !pip install git+https://github.com/JanGieseler/edaipynb.git
| 8,582 |
/PS2/PS2.ipynb | 40b48521d76a5a5989a483af4f03b78b701aeacb | [
"MIT"
] | permissive | ChampionApe/FinancialFrictions2019 | https://github.com/ChampionApe/FinancialFrictions2019 | 1 | 0 | MIT | 2019-08-05T12:49:23 | 2019-08-05T12:46:45 | HTML | Jupyter Notebook | false | false | .py | 38,539 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import math
from scipy import optimize
import numba as nb
import cap_equi as func
import interactive_plots as int_plot
import scipy.stats as stats
# For plots:
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('seaborn-whitegrid')
mpl.style.use('seaborn')
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
# -
# # PS2, part 1: Welfare effects of capital injection
# The exercise consists of the core setup:
# * Two periods, $t=0,1$,
# * Two types of agents: Consumers and entrepreneurs.
# * **Consumers:**
# * Linear utility $u_c = c_0^c+c_1^c$.
# * Large endowment of consumption goods in each period: $e$.
# * Labor endowment of $e_l=1$. Inelastically supplied in a competitive labor market at the price $w_1$.
# * **Entrepreneurs:**
# * Linear utility $u_e = c_0^e+c_1^e$.
# * Small endowment $n_0$ in period 0.
# * Borrow $b_1$, invest in capital $k_1$.
# * Period 1 they hire labor $l_1$ at wage $w_1$.
# * Entrepreneurs utilize capital and labor in $t=1$ with Cobb-Douglas production function
# $$ \begin{align}
# y_1 = k_1^{\alpha}l_1^{1-\alpha} \notag
# \end{align} $$
# * **Entrepreneurs are borrowing constrained:**
# $$\begin{align}
# b_1\leq \lambda \left(y_1-w_1l_1\right), && \lambda\in (0,1) \notag
# \end{align} $$
# * Assume that endowment of consumption goods $(e)$ is *sufficiently* large, for the interest rate to be 1 in equilibrium. (We will show what *sufficiently* means here).
# ## Q1: Three parts. (i) State the entrepreneur's problem. (ii) Argue that entrepreneurs will choose to maximize profits in period 1, a linear function of $k_1$, and (3) restate entrepreneur's problem as a simple linear problem.
# ### (i) Entrepreneur problem:
# Interpret bullets from **entrepreneurs** above as a problem:
#
# $$
# \begin{align}
# \max_{c_0,c_1,k_1,l_1,y_1,b_1} \mbox{ } c_0+c_1,& \notag \\
# \text{s.t. }\mbox{ }c_0+k_1 &= n_0+b_1 \notag \\
# c_1+b_1 &= y_1-w_1l_1 \tag{Problem 1}\\
# y_1 &= k_1^{\alpha}l_1^{1-\alpha} \notag \\
# b_1\leq &\lambda \left(y_1-w_1l_1\right) \notag
# \end{align}$$
# States that:
# 1. Maximize consumption (risk-neutrality).
# 2. Budget $t=0$: Consumption $(c_0)$ and investment $(k_1)$ equals income from endowment $(n_0)$ and borrowing $(b_1)$.
# 3. Budget $t=1$: Consumption $(c_1)$ and repayment of debt $(b_1)$ equals income from production $(y_1)$ minues wages $(w_1l_1)$.
# 4. Production is Cobb-Douglas.
# 5. Entrepreneur is credit-constrained, with $\lambda$ denoting the degree of future profits that can be used as collateral.
#
# Substitute for $c_0$ and $c_1$ into the utility function, using the budgets for $t=0,1$:
#
# $$\begin{align}
# c_0+c_1 &= n_0+b_1-k_1+y_1-w_1l_1-b_1 \\
# &= n_0+y_1-k_1-w_1l_1
# \end{align}$$
#
# Substitue for $y_1$ using the Cobb-Douglas function (remember you should substitute for $y_1$ in both the object function **and** the constraints). The problem is then reduced to:
#
# $$\begin{align}
# \max_{k_1,l_1,b_1} n_0+k_1^{\alpha}l_1^{1-\alpha}-k_1-w_1l_1, && s.t. b_1\leq \lambda \left(k_1^{\alpha}l_1^{1-\alpha}-w_1l_1\right). \tag{Problem 2}
# \end{align}$$
#
# The Lagrangian function corresponding to this problem is then given by:
#
# $$\begin{align}
# \mathcal{L}_2 = n_0+k_1^{\alpha}l_1^{1-\alpha}-k_1-w_1l_1+\mu\left[\lambda\left(k_1^{\alpha}l_1^{1-\alpha}-w_1l_1\right)-b_1\right],
# \end{align}$$
#
# where $\mu\geq0$ is the shadow-variable related to the credit-constraint. As in problem set, exercise 3.5 in (JT), this problem is characterized by a set of first order conditions with Karush-Kuhn-Tucker conditions for $\mu$ and the credit constraint. This is because of the inequality constraint (instead of standard Lagrangian approach when the constraint is with "=").
#
# ### (ii) Optimal choice of labor:
# Note that profits in $t=1$ is given by:
#
# $$\begin{align}
# \pi_1 = k_1^{\alpha}l_1^{1-\alpha}-w_1l_1.
# \end{align}$$
#
# Optimal choice of labor $(l^*)$ is then characterized by:
#
# $$\begin{align}
# \dfrac{\partial \mathcal{L}}{\partial l_1} = (1-\alpha)k_1^{\alpha}l_1^{-\alpha} -w_1 + \mu\lambda\left[(1-\alpha)k_1^{\alpha}l_1^{-\alpha}-w_1\right] = 0
# \end{align}$$
#
# We can rewrite this condition as:
#
# $$\begin{align}
# \left[(1-\alpha)k_1^{\alpha}l_1^{-\alpha}-w_1\right](1+\mu\lambda)=0 && \Rightarrow && w_1 = (1-\alpha)k_1^{\alpha}l_1^{-\alpha}
# \end{align}$$
#
# Note that this corresponds to maximizing $\pi_1$ wrt. labor. Note further that we can use this to write up two other terms:
#
# $$\begin{align}
# w_1l_1^* = (1-\alpha)k_1^{\alpha}(l_1^*)^{1-\alpha}, \tag{Labor Share}
# \end{align}$$
#
# and by isolating for $l_1$:
#
# $$\begin{align}
# l_1^* = \dfrac{(1-\alpha)^{1/\alpha}k_1}{w_1^{1/\alpha}}. \tag{$l^*$}
# \end{align}$$
#
# To show that the profit function is linear in $k_1$, when labor is chosen optimally, start by using (Labor Share):
#
# $$\begin{align}
# \pi_1(l=l^*) &= k_1^{\alpha}(l_1^*)^{1-\alpha}-w_1l_1^* \notag \\
# &= \alpha k_1^{\alpha}(l_1^*)^{1-\alpha}
# \end{align}$$
#
# Now substitute for $(l^*)$:
#
# $$\begin{align}
# \pi_1(l=l^*) &= \alpha k_1^{\alpha}\dfrac{(1-\alpha)^{(1-\alpha)/\alpha}k_1^{1-\alpha}}{w_1^{(1-\alpha)/\alpha}} \notag \\
# &= \underbrace{\dfrac{\alpha (1-\alpha)^{(1-\alpha)/\alpha}}{w_1^{(1-\alpha)/\alpha}}}_{\equiv R(w_1)}k_1 \tag{Profit}
# \end{align}$$
#
#
#
#
#
#
# ### (iii) Restate as linear problem:
# Using (Profit) in (Problem 2):
#
# $$\begin{align}
# \max_{k_1,b_1}\mbox{ }n_0-k_1+R(w_1)k_1, && \text{s.t. } b_1\leq \lambda R(w_1)k_1 \tag{Problem 3}.
# \end{align} $$
# ## Q2: Derive entrepreneur's demand for capital, and argue how the solution depends on $\lambda$ and $R(w_1)$.
# Problem $3$ is linear in $k_1$. Thus capital demand depends on:
#
# * If $R(w_1)<1$: Investment in capital is not profitable, $k_1^*=0$. (it is implicitly assumed that $k\geq0$)
# * If $\lambda R(w_1)>1$: Investment in capital is so profitable that borrowing constraint never binds, $k_1^*=\infty$.
# * If $\lambda R(w_1)<1<R(w_1)$: Capital is postponed as much as possible, but borrowing constraint will bind. Solution is then to set $c_0=0$ in the budget at $t=0$, and use that $b_1$ is determined by the borrowing constraint:
#
# $$\begin{align}
# k_1^* &= n_0 +b_1 \\
# &= n_0+\lambda R(w_1)k_1^* \\
# \Rightarrow k_1^* &= \dfrac{n_0}{1-\lambda R(w_1)}
# \end{align}$$
#
# * Finally, if $R(w_1)=1$ the entrepreneur is indifferent between consumption and investment. In this case capital demand is indeterminate:
# $$\begin{align}
# k_1^*\in\left[0,\mbox{ }\dfrac{n_0}{1-\lambda}\right].
# \end{align}$$
# **Illustrating demand for capital, and equilibrium capital**:
#
# Before we proceed to the next questions, let us briefly illustrate how capital demand looks like, and how it behaves in equilibrium. *(NB: You don't need to change anything in the cell with code below (you can though), just use the sliders in the graph.)*
#
# The first part here plots the capital demand and the interest rate function $(R=\alpha k^{\alpha-1})$.
# +
# Baseline parameters:
n_0base, lambda_base, alpha_base, Inf = 0.1, 0.55, 0.9, 10**3
# Exogenous grid of R (interest rate):
R_min, R_max, N_R= 0.1, 2, 100
R_grid = np.linspace(R_min, R_max, N_R)
# Upper and lower limit of sliders to operate below:
n_0_lower, n_0_upper, n_0_step = 0, 0.5, 0.05
lambda_lower, lambda_upper, lambda_step = 0, 0.9, 0.05
# Plot:
int_plot.interactive_cap(n_0base, lambda_base, alpha_base, Inf, N_R, R_grid, n_0_lower, n_0_upper, n_0_step, lambda_lower, lambda_upper, lambda_step)
# -
# The second part here plots the equilibrium value across $(\lambda,n)$.
# Grids:
n_0_lower, n_0_upper, N_n = 0, 0.5, 51
lambda_lower, lambda_upper, N_lambda = 0, 0.9, 91
n_grid = np.linspace(n_0_lower, n_0_upper,N_n, dtype=np.float)
lambda_grid, lambda_step = np.linspace(lambda_lower, lambda_upper, N_lambda, retstep=True, dtype=np.float)
# baseline:
n_0base, lambda_base, alpha = 0.1, 0.5, 0.9
# Plot:
int_plot.interactive_equicap(N_n,N_lambda,alpha,n_grid,lambda_grid,math.ceil(N_lambda/2),lambda_lower, lambda_upper,lambda_step)
# ## Q3: Show the cutoff $n^*$, where $n_0 \geq n^*$$ implies that the first-best level of capital can be financed, and the borrowing constraint is not binding.
# Let us define the first-best level of capital. Recall that in equilibrium $l_1=1$ (as this is the endowment supplied by consumers). Define first-best as **the level of capital that maximizes net output**:
#
# $$\begin{align}
# \max_{k_1} k_1^{\alpha}-k_1 && \Rightarrow && k_1^{fb} = \alpha^{1/(1-\alpha)}.
# \end{align}$$
#
# In equilibrium the interest rate $R(w_1)$ is defined by the marginal product of capital:
#
# $$\begin{align}
# R(w_1) = \alpha k_1^{\alpha-1}, && \Rightarrow && k_1 = \left(\dfrac{\alpha}{R(w_1)}\right)^{1/(1-\alpha)}
# \end{align}$$
#
# Thus for $k_1=k_1^{fb}$ we need $R(w_1)=1$. When $R(w_1)=1$ the capital demand was indeterminate, i.e. it covered a range of possible solutions. The optimal level $k_1^{fb}$ is one of the feasible capital demand levels as long as:
#
# $$\begin{align}
# k_1^{fb}\equiv \alpha^{1/(1-\alpha)}\in\left[0,\mbox{ }\dfrac{n_0}{1-\lambda}\right].
# \end{align}$$
#
# As long as $n_0$ is sufficiently large the first best is thus an equilibrium. The threshold $n^*$ where the first best is an equilibrium for $n_0\geq n^*$ is then given by the condition above with equality:
#
# $$\begin{align}
# \alpha^{1/(1-\alpha)} = \dfrac{n^*}{1-\lambda} && \Rightarrow && n^* = (1-\lambda)\alpha^{1/(1-\alpha)}
# \end{align}$$
#
#
#
# **Addtional remark:** Give another explanation for why we need R=1 for optimality:
#
# Optimality requires that the $\color{blue}{\text{marginal rate of transformation}}$ equals $\color{blue}{\text{the marginal rate of substitution}}$
# ## Q4: Show that if n<n* there is an equilibrium where the entrepreneurs are constrained and $k_1$ becomes an increasing function of $n_0$.
# * **Capital demand:** Recall that when $n_0<n^*$ the entrepreneurs do not have sufficient equity; thus they are credit constrained.
# * In the credit constrained case recall that capital was given by the budget constraint for $t=0$ with $c_0=0$ imposed, i.e.:
#
# $$\begin{align}
# k_1 = n_0+\lambda R(w_1) k_1.
# \end{align}$$
#
# Substituting for $R(w_1)$ this yields:
#
# $$\begin{align}
# k_1 - \lambda\alpha k_1^{\alpha}=n_0. \tag{$k_1^*$, credit-constrained}
# \end{align}$$
#
# As long as $n_0<n^*$ this equations determines $k_1$. Differentiation wrt. $n_0$ then yields
#
# $$\begin{align}
# \dfrac{\partial k_1}{\partial n_0}-\lambda \alpha^2k_1^{\alpha-1}\dfrac{\partial k_1}{\partial n_0}=1 && \Rightarrow && \dfrac{\partial k_1}{\partial n_0} = \left(1-\lambda \alpha k_1^{\alpha}\dfrac{\alpha}{k_1}\right)^{-1}.
# \end{align}$$
#
# Using the expression above for the credit constrained $k_1$ we can rearrange slightly to show that
#
# $$\begin{align}
# \dfrac{\partial k_1}{\partial n_0} = \dfrac{1}{1-\alpha(k_1-n_0)/k_1}>1
# \end{align}$$
#
# another neat way of writing this is:
#
# $$\begin{align}
# \dfrac{\partial k_1}{\partial n_0} = \dfrac{1}{1-\alpha \lambda R(w_1)}>1.
# \end{align}$$
#
# Capital thus increases more than 1-to-1 with the entrepreneur's equity here. The reason for this is the following:
#
# In the credit constrained case the entrepreneur prefers to allocate **all** of his consumption to $t=1$: Thus when $n_0$ increases marginally so does $k_1$, initially 1-to-1. When $k_1$ increases the entrepreneur increases his future profits $(y_1-w_1l_1)$; this relaxes the credit-constraint, allowing him to borrow more against future profits. This increases investments further.
# ## Q5: Introduce a lump-sum tax on consumers in period 0 that transfers wealth to entrepreneurs. Derive expected utility as a function of the lump sum tax rate.
# Let us start with the consumers' utility. This is straightforward:
#
# $$\begin{align}
# U_C(\tau) = 2e-\tau + \overbrace{(1-\alpha)k_1^{\alpha}}^{\equiv w_1(\tau)}
# \end{align}$$
#
# **The entrepreneur part:**
#
# Setup is as defined in (Problem 3) earlier, simply with an added $\tau$:
#
# $$\begin{align}
# U_E(\tau) = n_0+\tau +k_1\left[R(w_1)-1\right]
# \end{align}$$
#
# Substitute for the equilibrium interest rate we then have
#
# $$\begin{align}
# U_E(\tau) = n_0+\tau + \alpha k_1^{\alpha}-k_1
# \end{align}$$
#
# Now we have two cases:
#
# ### The credit-constrained case
#
# If $n_0+\tau<n^*$: The entrepreneur is **credit-constrained**, and capital is defined as earlier by the equality
#
# $$\begin{align}
# k_1-\lambda \alpha k_1^{\alpha} = n_0+\tau.
# \end{align}$$
#
# Substituting for this we get
#
# $$\begin{align}
# U_E(\tau) = \alpha k_1^{\alpha}(1-\lambda)
# \end{align}$$
#
#
# Increasing the tax $\tau$ corresponds to increasing the available endowment for entrepreneurs at $t=0$. As we confirmed above, capital is increasing in $n_0$ and thus in $\tau$, when the credit constraint is binding. Thus $\partial U_E/\partial\tau>0$ when the entrepreneur is constrained. Technically we can confirm this:
#
#
# $$\begin{align}
# \dfrac{\partial U_E}{\partial \tau} &= \alpha^2k_1^{\alpha-1}(1-\lambda)\dfrac{\partial k_1}{\partial n_0} \\
# &= \alpha R(w_1)(1-\lambda)\dfrac{\partial k_1}{\partial n_0}>0
# \end{align}$$
#
# Using the derivative $\partial k_1/\partial n_0$, we can (if we prefer) simplify this further as
#
# $$\begin{align}
# \dfrac{\partial U_E}{\partial \tau} &= \dfrac{\alpha R(w_1)(1-\lambda)}{1-\alpha \lambda R(w_1)}>0, && n_0+\tau<n^*
# \end{align}$$
#
#
# ### The non-constrained case:
#
# If $n_0+\tau\geq n^*$ the capital equilibrium is unaffected. In this case the tax simply transfers wealth from consumers to entrepreneurs, who then immediately consume it. In other words we sum up:
#
# $$\begin{align}
# \dfrac{\partial U_e}{\partial \tau} = \left\lbrace \begin{array}{ll} 1 & n_0+\tau\geq n^* \\
# \dfrac{\alpha R(w_1)(1-\lambda)}{1-\alpha \lambda R(w_1)} & n_0+\tau<n^* \end{array}\right.
# \end{align}$$
# ## Q6: Show that the relationship between consumer utility and the lump-sum tax is non-monotone. Show that for sufficiently small $n_0$ the tax can be pareto-improving.
# Entrepreneurs always benefit from the tax. For consumers the tax includes three potential effects:
#
# $$\begin{align}
# \dfrac{\partial U_c}{\partial \tau} = \left\lbrace \begin{array}{ll} \color{red}{-1}, & n_0+\tau\geq n^* \\ \color{red}{-1} +\color{blue}{(1-\alpha)R(w_1)}\color{blueviolet}{\dfrac{\partial k_1}{\partial \tau}}, & n_0+\tau<n^*\end{array}\right.
# \end{align}$$
#
# When $n_0+\tau\geq n^*$ the effect is clearly negative. However, when entrepreneurs are credit-constrained:
#
# 1. Negative wealth transfer effect: $\color{red}{-1}$.
# 2. Entrepreneurs increase capital investment, which increases labor wages: $\color{blue}{(1-\alpha)R(w_1)}$.
# 3. **(Credit multiplier)** The transfer loosens credit constraitn incuding further investment: $\color{blueviolet}{\partial k_1/\partial \tau}>1$.
#
# If effects 2-3. dominate effect 1., the tax has a pareto-improving effect. Effects 2-3. are largest for small values of $k_1$ (thus $n_0$).
#
#
# ## Q7: Discuss how this justifies government's capital injection during the financial crisis. What features might the model be missing?
# The role for capital injections in this model:
#
# * A capital injection relaxes the banks' borrowing constraint, increases lending to the real sector and thus investments. This increases employment (not in our model though) and wages, which can offset the effects of the recession.
#
# * *Caveats (a few of potentially many):*
# 1. Moral hazard (reputation effect): If financial institutions expect bailouts $\Rightarrow$ increase risky investments.
# 2. Endogenous uncertainty: A feature of the financial crisis was precautionary behavior in light of increased uncertainty. The capital injection reduces uncertainty of asset prices.
# ## Q8: How does key findings in Q6 depend on the type of borrowing constraint?
# The short answer: The borrowing constraint is quite standard. There are microfoundations that lead to this type of constraint.
#
# In this model future profits can (partly) be collateralized. There are several potential explanations why future profits cannot entirely be collateralized.
#
# * **Informational/enforcement frictions** are at the heart of any borrowing constraint (to the best of my knowledge at least).
# * **Moral Hazard**: Behave/shirk.
# * **Collateralized assets** as Kiyotaki and Moore (1997) leads to similar results.
# # PS2, part 2: Costly state verification and optimality of debt
# The core of the model setup:
#
# * **Utility** is linear: $U_E=c_0+c_1$.
# * Entrepreneur invests at time $0$, cost $I$. Return at time $t=1$ is random, distributed according to $g(s)$:
#
# $$\begin{align}
# Return = Rs, && s\sim g(s), && \text{Support on }S\equiv \left[s^{min}, s^{max}\right]
# \end{align}$$
# Also assumed that
# $$\begin{align}
# \mathbb{E}[s]=1, && R>I.
# \end{align}$$
#
# * **Asymmetric information:** State $s$ is unknown at time $t=0$. At time $t=1$ the entrepreneur (E) observes the true value of $s$ costlessly. The financier (F) can choose to audit and observe it, but at the cost $\kappa>0$.
# * **Assumption:** The financier chooses a *deterministic* audit rule. I.e. no *random/stochastic* audit rule. By the *revelation principle:* Only focus on contracts in which E has no incentive to lie.
# * **Contracts:** Specified by the triplet
#
# $$\begin{align}
# Contract = \left( R^E(s), \mbox{ }R^F(s), \mbox{ }S_A\right), && S_A\subset S
# \end{align}$$
# where $R^E(s)$ and $R^F(s)$ denotes the payoffs for E,F (net of audit cost). $S_A$ is the set of states $S$, where $F$ chooses to audit.
#
# * **Resource constraint:**
#
# $$\begin{align}
# R^E(s)+R^F(s)+\kappa \mathbf{1}_{s\in S_A} = Rs, && R^E(s)\geq 0.
# \end{align}$$
#
# where $\mathbf{1}_{s\in S_A}$ indicates auditing, and the inequality captures *limited liability* of the entrepreneur.
#
# * **Participation constraint:** (for F)
#
# $$\begin{align}
# \int_S R^F(s)g(s)\mbox{ }ds\geq I.
# \end{align}$$
#
# States that expected revenue covers investment cost.
# ### Q1: Write down the IC constraint for E. What does it imply for F's net payoff?
# **IC constraint** with a continuous set of possible outcomes: *In any given state $s$ it must not be profitable to deviate to *any* state $s'$.*
#
# Another useful definition is **the revelation principle**. The principle roughly states that every potential equilibrium outcome, can be achieved by an incentive-compatible strategy.
#
# The IC constraint then states that
#
# $$\begin{align}
# Rs-R^F(s)-\kappa \mathbf{1}_{s\in S_A}\geq Rs-R^F(s'), && \forall s'\in\left(S\backslash S_A\right)
# \end{align}$$
#
# The left-hand side (LHS) is the profit from E telling the true $s$. The RHS is the outcome if E is in state $s$, but signal to F that he is in state $s'$. Note that we only consider deviations to $s'\in (S\backslash S_A)$. There is no incentive to lie about a state $s'$, where E knows he will be audited.
#
# **What does it imply for F's net payoff?**
#
# * Consider two states, $s,s'$, both in the no-audit region. The IC constraint for state $s$ states $R^F(s)\geq R^F(s')$. The IC constraint for state $s$ states $R^F(s')\geq R^F(s)$.
#
# * For them both to hold simultaneously, there must be exactly one payoff level for all $s\notin S_A$. Denote this $R^F(\bar{s})$. If there was a state with higher payoffs in the region $S\backslash S_A$, where E is not audited, he would always report this.
#
#
# * Now consider the case where $s\in S_A$ (recall that alternative $s'\notin S_A$): The $(IC)$ then states that
#
# $$\begin{align}
# Rs-R^F(s)-\kappa \geq Rs-R^F(\bar{s}) && \Rightarrow && R^F(s)\leq R^F(\bar{s}), && \forall s\in S_A.
# \end{align}$$
#
# * This states that in the audit region, the financier (F) has to accept lower returns $R^F(s)$ than in the non-audit region. If this was not the case, E would always report a state in the no-audit region, and thus get higher profits.
# ### Q2: Define a standard debt contract:
#
# $$\begin{align}
# R^F(s) = \left\lbrace \begin{array}{ll} R\bar{s}, & s\geq \bar{s} \\ Rs-\kappa, & s\leq \bar{s} \end{array}\right., && S_A=\left[s^{min},\mbox{ }\bar{s}\right].
# \end{align}$$
#
# ### Check that any level $\bar{s}$ satisfies both the resource constraint and the (IC).
# * **(IC) constraint:**
# * When $s\geq \bar{s}:$ The entrepreneur receives $R(s-\bar{s})$ by telling the truth. The same if he over-reports. If he under-reports in the audit-region he receives 0.
# * When $s<\bar{s}:$ If E over-reports (in the no-audit region) he receives $R(s-\bar{s})<0$. By true reporting he receives 0.
#
#
# * **Resource constraint:**
# * If $s\geq \bar{s}$ the resource constraint reads:
# $$\begin{align}
# \overbrace{R(s-\bar{s})}^{R^E}+\overbrace{R\bar{s}}^{R^F} + \overbrace{0}^{audit}= Rs
# \end{align}$$
# * If $s<\bar{s}$ the resource constraint reads:
#
# $$\begin{align}
# \overbrace{0}^{R^E} +\overbrace{Rs-\kappa}^{R^F} + \overbrace{\kappa}^{audit} = Rs
# \end{align}$$
#
#
#
#
#
# ### Q3: Prove that the standard contract is optimal.
# Consider a non-standard contract:
#
# $$\begin{align}
# (\tilde{R}^E, \tilde{R}^F, \tilde{S}_A)
# \end{align}$$
#
# * The (IC) constraint still needs to hold. Thus for $s\notin \tilde{S}_A$ the non-standard contract still has to offer the same return. Denote this constant $\tilde{R}^F$.
# * Note furthermore that $\tilde{S}_A$ has to be a closed interval on the form $\tilde{S}_A = [s^{min},\tilde{s}]$: Assume there is a level $s$ that is not audited, but $s'>s$ is audited. If E is in state s', he can report $s$ without being audited and profit from it.
# * This implies that $\tilde{R}^F=R\tilde{s}$.
#
# Consider now the various regions of $s$:
#
# * For $s\in\left(\tilde{S}_A \cap S_A\right):$ F receives $Rs-\kappa$ in both cases.
# * For $s\in\left(\tilde{S}_A \cap (S\backslash S_A)\right):$ F receives higher payoff in standard debt contract, where he does not pay the audit cost.
# * For $s$ in non-audit region: Let the standard contract offer the level $\bar{R}^F= \tilde{R}^F$. Thus F is indifferent in these cases.
#
# For entrepreneurs: The standard debt contract minimizes audit-costs (given IC and participation constraints). Given that they receive the residual income F's payment, E prefers a standard contract as well.
#
#
# ### Q4: Assume that $s$ is uniform. Characterizes the riskiness of the optimal debt contract.
# Assume that $s$ is uniform on $[1-\Delta, 1+\Delta]$. In other words the pdf (f) and cdf (F) are given by:
#
# $$\begin{align}
# f(s) = \dfrac{1}{2\Delta}, && F(s) = \dfrac{s-(1-\Delta)}{2\Delta}, && s\in [1-\Delta, 1+\Delta]
# \end{align}$$
#
# The entrepreneur then solves:
#
# $$\begin{align}
# \max_{\bar{s}} \dfrac{1}{2\Delta} \int_{\bar{s}}^{1+\Delta}(Rs-R\bar{s})\mbox{ }ds
# \end{align}$$
#
# subject to F's participation constraint
#
# $$\begin{align}
# \dfrac{1}{2\Delta} \int_{\bar{s}}^{1+\Delta} R\bar{s}\mbox{ }ds+\dfrac{1}{2\Delta}\int_{1-\Delta}^{\bar{s}}(Rs-\kappa)\mbox{ }ds\geq I
# \end{align}$$
#
# Integrating out the expression we write this as:
#
# $$\begin{align}
# \left[1+\Delta-\bar{s}\right]R\bar{s}+\left[\dfrac{R}{2}\bar{s}^2-\bar{s}\kappa\right]-\left[\dfrac{R}{2}(1-\Delta)^2-(1-\Delta)\kappa\right]\geq 2\Delta I,
# \end{align}$$
#
# or rewritten as
#
# $$\begin{align}
# -\dfrac{R}{2}\bar{s}^2+\left[R(1+\Delta)-\kappa\right]\bar{s}+(1-\Delta)\kappa-\dfrac{R}{2}(1-\Delta)^2\geq 2\Delta I. \tag{PC}
# \end{align}$$
#
#
#
# We note here that:
# * For $s<\bar{s}:$ the entrepreneur receives 0. Thus the object function integrates from $\bar{s}$ to $1+\Delta$.
# * For $\bar{s}\leq s \leq 1+\Delta$, the financier receives $R\bar{s}$. In the non-audit region he receives $Rs-\kappa$.
#
# It is straightforward to see that for the entrepreneur, the lowest possible $\bar{s}$ is preferred, where the participation constraint still holds.
# To do this proceed as follows:
#
# * If $I$ is sufficiently small, the obvious solution is to choose $\bar{s}=1-\Delta$. This is feasible (participation constraint holds) if:
#
# $$\begin{align}
# R(1-\Delta)\geq I
# \end{align}$$
#
# * There are two options now: Either there is **no** solution within $[1-\Delta, 1+\Delta]$. Otherwise the solution is given in the case where $(PC)$ holds with equality.
#
# Solving for $(PC)$ with equality, we have:
#
# $$\begin{align}
# \bar{s}^* = 1+\Delta-\dfrac{\kappa}{R}-\dfrac{1}{R}\sqrt{(R(1+\Delta)-\kappa)^2+2R*\left[(1-\Delta)\kappa-\dfrac{R}{2}(1-\Delta)^2-2\Delta I\right]}, \tag{$s^*$}
# \end{align}$$
#
# Note that there might not be any solution at all, if the term in the $\sqrt{\cdot}$ is negative.
#
#
# Parameter values:
R = 2
I = 1.5
Delta = 0.5
kappa = 0.1
# Grid of sbar:
sbar = np.linspace(1-Delta, 1+Delta, 101)
# PC constraint:
int_plot.interactive_pc(R,I,Delta,kappa,sbar)
s_star = 1+Delta-kappa/R-(1/R)*math.sqrt((R*(1+Delta)-kappa)**2+2*R*((1-Delta)*kappa-(R/2)*(1-Delta)**2-2*Delta*I))
print(s_star)
# In the figure above (with values $R=2,I=1.5,\Delta=0.5,\kappa=0.1$) note that the PC constraint holds for values of $\bar{s}$ slightly larger 0.8.
# ### Q5: Find the threshold cost $I$, where the project is profitable.
# Using the result from Q4 the threshold $I^{max}$, where the project is financed as long as $I\leq I^{\max}$ is defined as follows:
#
# * If $R(1-\Delta)\geq I$ then $s^*=1-\Delta$. In this case the largest cost $I$ that is financed, is given by $R(1-\Delta)$.
# * If $R(1-\Delta)<I$ then there exists a solution, recall that there exists a solution as long as the term $\sqrt{\cdot}$ in $s^*$ is non-negative.
# * Note furthermore, that $I$ enters negatively in that expression; thus the higher $I$, the closer we get to a scenario, where there is no $\bar{s}^*$.
# * In other words, the largest $I$ that allows a feasible solution is defined by the condition:
#
# $$\begin{align}
# (R(1+\Delta)-\kappa)^2+2R*\left[(1-\Delta)\kappa-\dfrac{R}{2}(1-\Delta)^2-2\Delta I\right] = 0 && \Rightarrow && I = R-\kappa+\dfrac{\kappa^2}{4\Delta R}.
# \end{align}$$
#
# Combining the two potential scenarios the highest possible $I$ that can be financed is thus given by
#
# $$\begin{align}
# I^{max} = \max \Bigg\lbrace \underbrace{R-\kappa+\dfrac{\kappa^2}{4\Delta R}}_{\text{Scenario 1}},\mbox{ }\mbox{ }\underbrace{R(1-\Delta)}_{\text{Scenario 2}}\Bigg\rbrace.
# \end{align}$$
#
# A few comments on this solution:
#
# * **When $\kappa<2R\Delta$:** The max-operator returns *scenario 1*. In this case $I^{max}$ is decreasing in both $\kappa$ and $\Delta$. Why?
#
# * When $\kappa$ increases the cost of auditing goes up. When $\kappa<2\Delta$ the $(PC)$ constraint is binding implying that $s^*>1-\Delta$; in other words, the optimal debt contract involves some costly auditing. In this scenario a larger audit-cost makes investment projects less profitable over all $\Rightarrow I^{max}$ decreases.
#
# * An increase in $\Delta$ increases the uncertainty of the outcome. You may verify (either from just adjusting the graph above, or solving for the derivative) that $s^*$ in this case is increasing in $\Delta$. In other words, the increase in $\Delta$ increases the region of $s$, where the financier has to audit, in order to ensure zero expected profits. This lowers the profitability of the project for F, which lowers $I^{max}$.
#
# * **When $\kappa\geq 2R\Delta$:** The max operator returns *scenario 2*.
#
# * In this case $I^{max}$ is simply given by $R(1-\Delta)$: When there is no auditing at all, the project is still profitable for F. Thus any change in auditing-costs $(\kappa)$ does not affect $I^{max}$.
#
# * An increase in $\Delta$ clearly lowers $s^*$ proportionally. As the financier in this case always receive the simple payoff $R^F=Rs^* = R(1-\Delta)$, an increase in $\Delta$ lowers the profits from the project, thus $I$ decreases.
#
#
| 28,817 |
/notebooks/moseymo/hotel-demand-eda-business-perspective.ipynb | 48a84887a1a30c3e405eeea652812b96d1b3552f | [] | no_license | Sayem-Mohammad-Imtiaz/kaggle-notebooks | https://github.com/Sayem-Mohammad-Imtiaz/kaggle-notebooks | 5 | 6 | null | null | null | null | Jupyter Notebook | false | false | .py | 17,457 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Overview
#
# This notebook will investigate hotel booking data to find trends and determine relationships between variables of intrest, the data was obtained from the following link - https://www.kaggle.com/jessemostipak/hotel-booking-demand
#
# Goal - use the data to find trends and make business decisons about where to invest more into and what the patrons of the hotel servives want more of. Also to expose areas were money is being lost or profitability is not being maximized.
#
# Due to the somewhat cyclical nature of the vacation industry, it important to find ways to maximize income during the high seasons and minimize loss during the low seasons.
# ### Imports and Settings
# +
import numpy as np
import pandas as pd
pd.set_option("display.max_columns",500)
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
# ### Get the data
# load data:
file_path = "../input/hotel-booking-demand/hotel_bookings.csv"
data = pd.read_csv(file_path)
#take a look at the first 5 rows of the data
data.head(5)
#let's describe the numerical data and see basic stats
data.describe()
#descibe the categorical data and see basic stats
data.describe(include="O")
#get some basic info about the data contained
data.info()
#find all the null values in the for each column
#looks like all null values are in country, agent, and company columns
data.isnull().sum()
# ### Visualize the data with some plots
# **Let's investigate the finances of the hotel**
#
# We will use columns such as average daily rate and create a new column called revenue as well
# **ADR Analysis**
#For further analysis we will split the data into city and resort hotel
city_data = data[data["hotel"]=="City Hotel"]
resort_data = data[data["hotel"]=="Resort Hotel"]
#first general pair plot to try and see relationships
sns.distplot(city_data[city_data["adr"]<=2000]["adr"],bins=30)
plt.show()
sns.distplot(resort_data[resort_data["adr"]<=2000]["adr"],bins=30)
plt.show()
#let's see monthly adr data
monc_adr = city_data.groupby("arrival_date_month")["adr"].describe()
monc_adr = monc_adr.reindex(["January","February","March","April","May","June","July","August","September","October",\
"November","December"])
monc_adr
#repeat for resort data
monr_adr = resort_data.groupby("arrival_date_month")["adr"].describe()
monr_adr = monr_adr.reindex(["January","February","March","April","May","June","July","August","September","October",\
"November","December"])
monr_adr
mon_adr = data.groupby("arrival_date_month")["adr"].describe()
mon_adr = mon_adr.reindex(["January","February","March","April","May","June","July","August","September","October",\
"November","December"])
mon_adr
#higher variability with min just under $50
ax1=sns.barplot(x=monc_adr["mean"],y=monc_adr.index,palette='muted')
ax1.set_xlabel("ADR")
ax1.set_ylabel("Month")
plt.show()
#lower variability with min just over $80
ax2=sns.barplot(x=monr_adr["mean"],y=monr_adr.index,palette='muted')
ax2.set_xlabel("ADR")
ax2.set_ylabel("Month")
plt.show()
# *Here we see the resort hotel charges mcuh higher rates during the months of July and August which may present opportunities to try and maximize revenue. As we will se later, the cancellation rate is high and they may not be capturing as much revenue as desired.*
# ### Cancellations
# **Below we will take a brief look at cancellations**<br/>
# When we look at revenue below and visitors we will need to modify the dataset to reflect the actual visitors <br/>
# and the revenue obtained from them
#
data.is_canceled.value_counts()
# *We see here that just over one third of our entries resulted in cancellations. If this is not taken into account when we look about revenue and visitor data, it will greatly skew any insights. When looking at other items, such as ADR trends and popular packages the cancelations aren't as big a factor.* <br/>
#
# *This is different from the no-show case where the person it is assumed the full payment for the visit has been remitted and not refunded. Whereas with the cancellations, the revenue value is dependent on the cancellation poilicy where there may be a full or partial refund given.*
#create ndata variable for new data not including the cancelled bookings
ndata = data[data.is_canceled == 0].copy()
ndata.head()
# **Revenue Analysis** <br/>
#
# Revenue will be estimated by multiplying adr by duration, revenue and duration column will be created
#create duration column
ndata["duration"] = ndata['stays_in_weekend_nights'] + ndata['stays_in_week_nights']
#create revenue column
ndata["revenue"] = ndata["adr"]*ndata["duration"]
#we split the data again to look at each individually
city_data = ndata[ndata["hotel"]=="City Hotel"]
resort_data = ndata[ndata["hotel"]=="Resort Hotel"]
#revenue data and distribution
city_data["revenue"].describe()
city_data["revenue"].sum()
resort_data["revenue"].describe()
resort_data["revenue"].sum()
#let's see monthly adr data
monc_rev = city_data.groupby("arrival_date_month").sum()["revenue"]
monc_rev = monc_rev.reindex(["January","February","March","April","May","June","July","August","September","October",\
"November","December"])
monc_rev
ax3=ax = monc_rev.plot.bar(rot=50)
ax3.set_xlabel("Month")
ax3.set_ylabel("Revenue")
plt.show()
#let's see monthly adr data
monr_rev = resort_data.groupby("arrival_date_month").sum()["revenue"]
monr_rev = monr_rev.reindex(["January","February","March","April","May","June","July","August","September","October",\
"November","December"])
monr_rev
ax4 = monr_rev.plot.bar(rot=50)
ax4.set_xlabel("Month")
ax4.set_ylabel("Revenue")
plt.show()
#revenue by channel
city_roi = city_data.groupby("distribution_channel").sum()["revenue"]
city_roi
city_roi.plot(kind="bar")
plt.show()
resort_roi = resort_data.groupby("distribution_channel").sum()["revenue"]
resort_roi
resort_roi.plot(kind="bar")
plt.show()
# **Investigating Most Popular Packages, Room Types and Special Requests**
#distribution of special requests
sns.countplot(data["total_of_special_requests"])
plt.show()
#most popular meal
sns.countplot(data["meal"])
plt.show()
#Most popular booking channel
#Travel agents and tour operators are bring in most of the visitors
sns.countplot(data["distribution_channel"])
plt.show()
#Most popular market segment
#travel agents are most represented among our visitors as well
sns.countplot(data["market_segment"])
plt.xticks(rotation=50)
plt.show()
# **Investigating the distribution and behaviour of people**
top10c = city_data.country.value_counts().nlargest(10).to_frame().reset_index()
top10c.rename(columns={'index': 'Country', 'country': 'Visitors'}, inplace=True)
top10c
top10r = resort_data.country.value_counts().nlargest(10).to_frame().reset_index()
top10r.rename(columns={'index': 'Country', 'country': 'Visitors'}, inplace=True)
top10r
#Average duration of stay per month
av_dur = ndata.groupby("arrival_date_month").mean()["duration"]
av_dur = av_dur.reindex(["January","February","March","April","May","June","July","August","September","October",\
"November","December"])
av_dur
ax5 = av_dur.plot.bar(rot=50)
ax5.set_xlabel("Month")
ax5.set_ylabel("Duration")
plt.show()
#lead time for booking
sns.distplot(data['lead_time'],bins=30)
plt.show()
#amount of cancellations per month
df_can = data[data["reservation_status"]=="Canceled"]
mon_can = df_can.groupby("arrival_date_month").sum()["is_canceled"]
mon_can = mon_can.reindex(["January","February","March","April","May","June","July","August","September","October",\
"November","December"])
mon_can
ax6 = mon_can.plot.bar(rot=50)
ax6.set_xlabel("Month")
ax6.set_ylabel("cancellations")
plt.show()
# ### Investigating correlations in the data
data.describe(include="O").columns
corr_data = data.drop(['hotel', 'arrival_date_month', 'meal', 'country', 'market_segment',
'distribution_channel', 'reserved_room_type', 'assigned_room_type',
'deposit_type', 'customer_type', 'reservation_status',
'reservation_status_date'],axis=1)
corr_data.corr()
plt.figure(figsize=(12,6))
sns.heatmap(corr_data.corr())
corr_data.corr()["is_canceled"].sort_values(ascending=False).to_frame()
#hypothesis - city has higher lead times but also high cancellations, would reducing the lead times
#lead to reduced cancellations and high revenues
city_data["lead_time"].mean()
ocity_data = data[data["hotel"]=="City Hotel"]
oresort_data = data[data["hotel"]=="Resort Hotel"]
ocity_data["is_canceled"].sum()
city_can = ocity_data["is_canceled"].sum()/ocity_data.shape[0]
city_can
#which channel are we seeing the most cancellations
ocity_data.groupby("distribution_channel")["is_canceled"].sum().to_frame()
oresort_data["lead_time"].mean()
oresort_data["is_canceled"].sum()
resort_can = oresort_data["is_canceled"].sum()/oresort_data.shape[0]
resort_can
#which channel are we seeing the most cancellations
oresort_data.groupby("distribution_channel")["is_canceled"].sum().to_frame()
# ### Recommendations and Future Work
# **These are some suggestions based on the information provided. Additional work will need to be done to get more actionable insights**
#
# <ol>
# <li>A cost benefit analysis should be done to determine if an appropriate return is being made on all the distribution channels being utilized</li>
# <li>Cancellations during peak season for the resort may be more detrimental to the revenue as that period contribute to most to revenue. A recommendation would be to further investigate the connection between lead time and cancellation and other factors in the causal chain to minimize this. The the resort can possibly run promotions and provide incentives to minimize cancellations as well. </li>
# <li>It seems that BB, HB, and SC meal packages are the most popular. To save on costs, it may be good to phase out the other meal packages and focus on the best performing. Further analysis should be done to determine what makes these three so popular.</li>
# </ol>
| 10,465 |
/Q1/MSAI 339 - Data Science Seminar/Project/Checkpoints/CP_3/src/.ipynb_checkpoints/ch3_code-checkpoint.ipynb | 12dc5eec8dfc03a6a068989305014c12abd1c5ef | [] | no_license | Ikhlas-Attarwala/MSAI | https://github.com/Ikhlas-Attarwala/MSAI | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 442,923 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
#import numpy as np
from autograd.util import flatten_func
from autograd import grad as compute_grad
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from sklearn import linear_model
from sklearn.cluster import KMeans
import math
import autograd.numpy as np
import custom_utilities as util
get_ipython().run_line_magic('matplotlib', 'inline')
# + deletable=true editable=true
csv = open('final_project_data/final_project_point_cloud.fuse', 'r')
data=[]
num=0
for line in csv:
ls=line.split(" ")
data.append([float(ls[0]),float(ls[1]),float(ls[2]),float(ls[3])])
num += 1
csv.close()
data=np.array(data)
#print(data)
print(num)
lat, long, height, i = data.T
# + deletable=true editable=true
print(data[0])
lat0 = data[0][0]
lon0 = data[0][1]
# + deletable=true editable=true
"""
def latlontoxy(lat1, lon1, lat0,lon0):
R = 6378.137 # Radius of earth in KM
dLat = lat2 * math.pi / 180 - lat1 * math.pi / 180
dLon = lon2 * math.pi / 180 - lon1 * math.pi / 180
a = math.sin(dLat/2) * math.sin(dLat/2) + math.cos(lat1 * math.pi / 180) * math.cos(lat2 * math.pi / 180) * math.sin(dLon/2) * math.sin(dLon/2);
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a));
d = R * c
return d * 1000 # meters
"""
# + deletable=true editable=true
def rot(x,y,co):
cosf=1/(1+co**2)
sinf=co/(1+co**2)
xt=(x*cosf + y*sinf)*0.002
yt=x*(-sinf)+y*cosf
return xt,yt
# + deletable=true editable=true
def latlontoxy(lat1, lon1, lat0,lon0):
m_per_deg_lat = 111132.954 - 559.822 * math.cos( 2.0 * lat0 ) + 1.175 * math.cos( 4.0 * lat0);
m_per_deg_lon = (3.14159265359/180 ) * 6367449 * math.cos ( lat0 );
x = (lat1-lat0) * m_per_deg_lat
y = (lon1-lon0) * m_per_deg_lon
return x,y
# + deletable=true editable=true
# gradient descent function
def gradient_descent(g,w,alpha,max_its,beta,version):
# flatten the input function, create gradient based on flat function
g_flat, unflatten, w = flatten_func(g, w)
grad = compute_grad(g_flat)
# record history
w_hist = []
w_hist.append(unflatten(w))
# start gradient descent loop
z = np.zeros((np.shape(w))) # momentum term
# over the line
for k in range(max_its):
# plug in value into func and derivative
grad_eval = grad(w)
grad_eval.shape = np.shape(w)
### normalized or unnormalized descent step? ###
if version == 'normalized':
grad_norm = np.linalg.norm(grad_eval)
if grad_norm == 0:
grad_norm += 10**-6*np.sign(2*np.random.rand(1) - 1)
grad_eval /= grad_norm
# take descent step with momentum
z = beta*z + grad_eval
w = w - alpha*z
# record weight update
w_hist.append(unflatten(w))
return w_hist
# + deletable=true editable=true
def normalize(data,data_mean,data_std):
normalized_data = (data - data_mean)/data_std
return normalized_data
# + deletable=true editable=true
model = []
for d in data:
h = np.zeros(4)
h[0],h[1]=latlontoxy(d[0],d[1],lat0,lon0)
h[2]=d[2]
h[3]=d[3]
model.append(h)
model=np.array(model)
# + deletable=true editable=true
np.savetxt('b.csv',model, delimiter=' ')
# + deletable=true editable=true
x,y,z,i=model.T
# + deletable=true editable=true
#randomly select all points for the linear regression
#sample = np.array(random.sample(list(model), 4300))
sample=model
#print(np.ones(len(sample))[:, np.newaxis])
sample_pos=np.concatenate((np.ones(len(sample))[:, np.newaxis],sample[:,:-1]),axis=1)
#print(sample_pos)
sample_pos_means = np.mean(sample_pos,axis = 0)
sample_pos_stds = np.std(sample_pos,axis = 0)
sample_pos_norm = normalize(sample_pos[:,1:],sample_pos_means[1:],sample_pos_stds[1:])
sample_pos_norm = np.concatenate((np.ones(len(sample_pos))[:, np.newaxis],sample_pos_norm),axis=1)
#print(sample_pos_norm)
z_norm = sample_pos_norm[:,-1][:,np.newaxis]
#z_norm.shape(len(z_norm,1))
data_norm = sample_pos_norm[:,:-1]
# + deletable=true editable=true
sample_pos_norm = normalize(sample_pos[:,1:],sample_pos_means[1:],sample_pos_stds[1:])
sample_pos_norm = np.concatenate((np.ones(len(sample_pos))[:, np.newaxis],sample_pos_norm),axis=1)
#print(sample_pos_norm)
z_norm = sample_pos_norm[:,-1][:,np.newaxis]
#z_norm.shape(len(z_norm,1))
data_norm = sample_pos_norm[:,:-1]
# + deletable=true editable=true
w = np.random.randn(3,1)*0.01
print(w)
# least squares MSE rergession
least_squares = lambda w: np.sum((np.dot(data_norm,w) - z_norm)**2)
alpha = 10**(-3)
max_its = 800
# + deletable=true editable=true
# run gradient descent
weight_history = gradient_descent(least_squares,w,alpha,max_its,beta = 0,version = 'normalized')
# plot cost function history
cost_history = [least_squares(v) for v in weight_history]
histories = [cost_history]
# + deletable=true editable=true
# plot everything
demo = util.Visualizer()
demo.compare_regression_histories(histories)
# + deletable=true editable=true
#print(weight_history[-1])
final_w_norm = weight_history[-1]
#threshold numbers
thresholds = (z_norm-np.dot(data_norm,final_w_norm))**2
#print(thresholds.shape)
#print(thresholds)
# + deletable=true editable=true
data_filtered_z = []
for d in range(len(model)):
if thresholds[d] < 0.035:
data_filtered_z.append(model[d])
road_data = np.array(data_filtered_z)
rx, ry, rz, ri = road_data.T
# + deletable=true editable=true
plt.hist(z,255)
plt.show()
# + deletable=true editable=true
plt.hist(i)
plt.show()
# + deletable=true editable=true
'''
zrange=max(z)-min(z)
med=np.median(z)
print(med)
minimum=np.median(z)-0.005*zrange
maximum=np.median(z)+0.02*zrange
#mean=np.array(z).mean()
#std=np.array(z).std()
road_data=[ ]
k=0
for d in model:
if d[2]>=minimum and d[2]<=maximum:
road_data.append(d)
k=k+1
road_data=np.array(road_data)
rx, ry, rz, ri = road_data.T
counts=plt.hist(rz, 14,color='black',alpha=0.5)
#x=[mean+2*std]*100
#x=np.array(x)
#plt.plot(x)
plt.show()
print(k)
'''
# + deletable=true editable=true
np.savetxt('c.csv',road_data, delimiter=' ')
# -
iteration=1000000
# ,random_state=0
iclass = KMeans(max_iter=iteration,n_clusters=2).fit(ri.reshape(-1,1))
label = iclass.labels_
b,w,other=[],[],[]
for q in range(len(label)):
lane=[road_data[q][0],road_data[q][1],road_data[q][2],road_data[q][3]]
if (label[q]==0):
b.append(lane)
elif (label[q]==1):
w.append(lane)
else:
other.append(lane)
w=np.asarray(w)
print(len(b))
print(len(w))
print(len(other))
print(np.shape(w))
wx,wy,wz, wi = w.T
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rx, ry, ',')
ax.plot(wx, wy, ',', color='red')
plt.savefig('lane_rough.png',dpi=400,bbox_inches='tight')
plt.show()
original = w
print(np.shape(original))
# # Here we get original
# + deletable=true editable=true
"""
# draw pixel picture
fig=plt.figure()
road=fig.add_subplot(111)
road.plot(rx,ry,',')# draw pixel
plt.show()
original=[]
#intensity
si= sorted(i)[int(0.997*len(i)):int(0.9995*len(i))]
minl=min(si)
maxl=max(si)
print("{} {}".format(minl, maxl))
for e in road_data:
if e[3]>=minl and e[3]<=maxl:
original.append(e[:-1])
original= np.array(original)
lx, ly, lz = original.T
plt.savefig('original.png',dpi=400,bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rx, ry, ',')
ax.plot(lx, ly, ',', color='red')
plt.savefig('lane_rough.png',dpi=400,bbox_inches='tight')
plt.show()
#bnear(e,distance=0.3) > 5
"""
# + deletable=true editable=true
ox=original[:,0][:,np.newaxis]
oy=original[:,1]
model_all=linear_model.LinearRegression()
model_all.fit(ox,oy)
# + deletable=true editable=true
np.savetxt('d.csv',original, delimiter=' ')
# + deletable=true editable=true
model_ransac_all=linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac_all.fit(ox,oy)
inlier_mask_all=model_ransac_all.inlier_mask_
outlier_mask_all=np.logical_not(inlier_mask_all)
linex = ox
liney = model_all.predict(ox)
#alpha = model_all.coef_
print('para',alpha)
alpha = -0.252
plt.plot(rx, ry, ',')
plt.plot(linex, liney, color='yellow', linestyle='-', linewidth=2, label='lane1')
# + deletable=true editable=true
rotate=[]
for t in original:
first,second = rot(t[0],t[1],alpha)
rotate.append([first,second,t[2],t[3]])
rotate=np.asarray(rotate)
#print(rotate)
rotate=rotate.reshape(np.shape(rotate)[0],4)
#print(np.shape(rotate))
rotx,roty,rotz,i=rotate.T
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rotx, roty, ',', color='red')
# +
upper=9
lower=-1
limit=[]
limitr=[]
for p in range(len(rotate)):
if roty[p]<upper and roty[p]>lower:
limit.append(original[p])
limitr.append([rotate[p][0],rotate[p][1]])
limit=np.asarray(limit)
print(np.shape(limit))
limx,limy,limz,limi=limit.T
limitr=np.asarray(limitr)
limrx,limry=limitr.T
print(np.shape(limitr))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(limx, limy, ',', color='red')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.axis([-5, 5, -6, 18])
ax.plot(limrx, limry, ',', color='red')
# + deletable=true editable=true
plt.hist(limrx,255)
plt.show()
plt.hist(limry,255)
plt.show()
# + deletable=true editable=true
# + deletable=true editable=true
# In practice, the k-means algorithm is very fast (one of the fastest clustering algorithms available)
# But it falls in local minima. That’s why it can be useful to restart it several times.
iteration=1000000
# ,random_state=0
data = KMeans(max_iter=iteration,n_clusters=4).fit(limitr)
label = data.labels_
lane1,lane2,lane3,lane4,lane=[],[],[],[],[]
for q in range(len(label)):
lane=[limit[q][0],limit[q][1],limit[q][2]]
if (label[q]==0):
lane1.append(lane)
elif (label[q]==1):
lane2.append(lane)
elif (label[q]==2):
lane3.append(lane)
else:
lane4.append(lane)
print(len(lane1))
lane1=np.array(lane1)
if(len(lane1)!=0):
x1,y1,z1=lane1.T
X1=lane1[:,0][:,np.newaxis]
Y1=lane1[:,1]
lane2=np.array(lane2)
if(len(lane2)!=0):
x2,y2,z2=lane2.T
X2=lane2[:,0][:,np.newaxis]
Y2=lane2[:,1]
lane3=np.array(lane3)
if(len(lane3)!=0):
x3,y3,z3=lane3.T
X3=lane3[:,0][:,np.newaxis]
Y3=lane3[:,1]
print(X1.shape,Y1.shape)
lane4=np.array(lane4)
if(len(lane4)!=0):
x4,y4,z4=lane4.T
X4=lane4[:,0][:,np.newaxis]
Y4=lane4[:,1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rx, ry, ',')
ax.plot(X1, Y1, ',', color='red')
plt.savefig('lane_rough.png',dpi=400,bbox_inches='tight')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rx, ry, ',')
ax.plot(X2, Y2, ',', color='red')
plt.savefig('lane_rough.png',dpi=400,bbox_inches='tight')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rx, ry, ',')
ax.plot(X3, Y3, ',', color='red')
plt.savefig('lane_rough.png',dpi=400,bbox_inches='tight')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(rx, ry, ',')
ax.plot(X4, Y4, ',', color='red')
plt.savefig('lane_rough.png',dpi=400,bbox_inches='tight')
plt.show()
ox=original[:,0][:,np.newaxis]
oy=original[:,1]
# + deletable=true editable=true
model_ransac_all=linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac_all.fit(ox,oy)
inlier_mask_all=model_ransac_all.inlier_mask_
outlier_mask_all=np.logical_not(inlier_mask_all)
linex = ox
liney = model_all.predict(ox)
alpha = model_all.coef_
print('para',alpha)
plt.plot(rx, ry, ',')
plt.plot(linex, liney, color='yellow', linestyle='-', linewidth=2, label='lane1')
# + deletable=true editable=true
model=linear_model.LinearRegression()
model.fit(X1,Y1)
model2=linear_model.LinearRegression()
model3=linear_model.LinearRegression()
model2.fit(X2,Y2)
model3.fit(X3,Y3)
model4=linear_model.LinearRegression()
model4.fit(X4,Y4)
model_all=linear_model.LinearRegression()
model_all.fit(ox,oy)
model_ransac=linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X1,Y1)
inlier_mask=model_ransac.inlier_mask_
outlier_mask=np.logical_not(inlier_mask)
model_ransac2=linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac2.fit(X2,Y2)
inlier_mask2=model_ransac2.inlier_mask_
outlier_mask2=np.logical_not(inlier_mask2)
model_ransac3=linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac3.fit(X3,Y3)
inlier_mask3=model_ransac3.inlier_mask_
outlier_mask3=np.logical_not(inlier_mask3)
model_ransac4=linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac4.fit(X4,Y4)
inlier_mask4=model_ransac4.inlier_mask_
outlier_mask4=np.logical_not(inlier_mask4)
line_X=X1
line_y=model.predict(X1)
line_y_ransac=model_ransac.predict(X1)
line_X2=X2
line_y2=model2.predict(X2)
line_y_ransac2=model_ransac2.predict(X2)
line_X3=X3
line_y3=model3.predict(X3)
line_y_ransac3=model_ransac3.predict(X3)
line_X4=X4
line_y4=model4.predict(X4)
line_y_ransac4=model_ransac4.predict(X4)
plt.plot(rx, ry, ',')
plt.plot(line_X, line_y_ransac, color='yellow', linestyle='-', linewidth=2, label='lane1')
plt.plot(line_X2, line_y_ransac2, color='red', linestyle='-',linewidth=2, label='lane2')
plt.plot(line_X3, line_y_ransac3, color='green', linestyle='-',linewidth=2, label='lane3')
plt.plot(line_X4, line_y_ransac4, color='black', linestyle='-',linewidth=2, label='lane4')
plt.legend(loc='lower right')
plt.show()
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
| 13,826 |
/homework/Alex-赵鑫荣/1220_Exercises.ipynb | 3607969fff6a2ca4b26b94ea05a69b112cf12933 | [] | no_license | north-jewel/data_analysis | https://github.com/north-jewel/data_analysis | 8 | 6 | null | 2018-11-20T09:13:38 | 2018-11-20T09:11:40 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 145,738 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing Chipotle's Data
# This time we are going to pull data directly from the internet.
# Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
# +
import pandas as pd
import collections
from collections import Counter
import matplotlib.pyplot as plt
# set this so the graphs open internally
# %matplotlib inline
# -
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv).
# ### Step 3. Assign it to a variable called chipo.
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv'
location = r'E:\python4\1211OfDataFrame\chipotle.tsv'
chipo = pd.read_csv(location,sep = '\t')
# ### Step 4. See the first 10 entries
chipo.head(10)
# ### Step 5. Create a histogram of the top 5 items bought
x = chipo.item_name
letter_counts = Counter(x)
letter_counts
pd.DataFrame.from_dict(letter_counts,orient = 'index')[0].sort_values(ascending = False).head(5).plot(kind = 'bar')
s = chipo.item_name.value_counts().head(5)
s.plot(kind = 'bar')
plt.xlabel('item_name')
plt.ylabel('price')
plt.rcParams['font.sans-serif'] = 'SimHei'
plt.title('订单排名前五柱状图')
plt.show()
# ### Step 6. Create a scatterplot with the number of items orderered per order price
# #### Hint: Price should be in the X-axis and Items ordered in the Y-axis
# +
chipo.item_price = [float(x[1:]) for x in chipo.item_price]
s2 = chipo.groupby('order_id').sum()
# -
plt.scatter(s2.item_price,s2.quantity,s = 50,c = 'g')
plt.xlabel('item_price')
plt.ylabel('quantity')
plt.title('每个订单下的价格、数量')
plt.show()
# ### Step 7. BONUS: Create a question and a graph to answer your own question.
chipo.groupby('item_name')['item_price'].sum().sort_values(ascending = False).head().plot(kind = 'bar')
# +
s = chipo.sort_values('item_price',ascending = False).head(5)
plt.bar(s.item_name,s.item_price)
plt.xticks(s.item_name,rotation = 'vertical')
plt.title('价钱较贵的几个item')
plt.show()
# # plt.xlabel?
# -
ss = chipo.groupby('item_name').max().sort_values('item_price',ascending = False).head()
plt.bar(ss.index,ss.item_price)
plt.xticks(ss.index,rotation = 'vertical')
plt.xlabel('item_name')
plt.ylabel('item_price',rotation = 'horizontal',horizontalalignment = 'right')
plt.title('通过item_name分组后,价钱较贵的5个item与price')
plt.show()
# +
# help(plt.bar)
# -
chipo.head()
chipo.item_price.value_counts().head().plot(kind = 'bar')
plt.show()
chipo_and = chipo[chipo.item_name.str.contains('and')]
ss = chipo_and[['item_name','item_price']].drop_duplicates()
plt.barh(ss.item_name,ss.item_price)
plt.xlabel('item_price')
plt.ylabel('item_name')
plt.title('item_name中有and的item与price')
plt.show()
chipo.groupby(['item_name']).apply(lambda x:x.sort_index())
| 3,073 |
/CRISPR-scripts/Fig. S2f.ipynb | e5a769ff45716a23800017407f806c367d692c99 | [] | no_license | noireauxlab/CRISPR-Proofreading-Paper-AK | https://github.com/noireauxlab/CRISPR-Proofreading-Paper-AK | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 111,833 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import scipy as sp
import pandas as pd
import numpy.random as random
import matplotlib.pyplot as plt
import matplotlib as mpl
# %matplotlib inline
# %precision 3
sample_array = np.arange(10)
print('sample_array : ', sample_array)
# +
print(sample_array)
sample_array_slice = sample_array[0:5]
print(sample_array_slice)
# -
sample_array_slice[0:3] = 10
print(sample_array_slice)
print(sample_array)
# +
sample_array_copy = np.copy(sample_array)
print(sample_array_copy)
sample_array_copy[0:3] = 20
print(sample_array_copy)
print(sample_array)
# +
sample_names = np.array(['a', 'b', 'c', 'd', 'e'])
random.seed(0)
data = random.randn(5, 5)
print(sample_names)
print(data)
# -
sample_names == 'a'
data[sample_names =='a']
cond_data = np.array([True, True, False, False, True])
x_array = np.array([1,2,3,4,5])
y_array = np.array([100,200,300,400,500])
print(np.where(cond_data, x_array, y_array))
# +
sample_names = np.array(['a', 'b', 'c', 'd', 'a'])
random.seed(0)
data = random.randn(5,5)
print(sample_names)
print(data)
# -
data[sample_names == 'b']
data[sample_names != 'c']
cond_data = np.array([False, False, True, True, False])
x_array = np.array([1,2,3,4,5])
y_array = np.array([6,7,8,9,10])
print(np.where(cond_data, x_array, y_array))
cond_data = np.array([True, True, False, False, True])
print(np.unique(cond_data))
sample_data = np.arange(10)
print('원본 데이터 : ', sample_data)
print('모든 원소의 제곱근 : ', np.sqrt(sample_data))
print('모든 원소의 자연상수 지수함수 : ', np.exp(sample_data))
sample_multi_array_data1 = np.arange(9).reshape(3, 3)
print(sample_multi_array_data1)
print('최솟값 : ', sample_multi_array_data1.min())
print('최댓값 : ', sample_multi_array_data1.max())
print('평균 : ', sample_multi_array_data1.mean())
print('합계 : ', sample_multi_array_data1.sum())
cond_data = np.array([True, True, False, False, True])
print('True가 하나라도 있는지', cond_data.any())
print('모두 True인가', cond_data.all())
print(sample_multi_array_data1)
print('5보다 큰 숫자가 몇 개인가 : ', (sample_multi_array_data1>5).sum())
print('대각성분 : ', np.diag(sample_multi_array_data1))
print('대각성분의 합 : ',np.trace(sample_multi_array_data1))
sample_multi_array_data2 = np.arange(16).reshape(4, 4)
sample_multi_array_data2
print('대각성분의 합 : ', np.trace(sample_multi_array_data2))
sample_array3 = np.array([[1,2,3], [4,5,6]])
sample_array4 = np.array([[7,8,9],[10,11,12]])
sample_array_vstack = np.concatenate([sample_array3, sample_array4])
first, second, third = np.split(sample_array_vstack, [1,3])
print(first, third)
sample_array5 = np.array([[13,14,15], [16,17,18], [19,20,21]])
sample_array_vstack2 = np.concatenate([sample_array3,sample_array4,sample_array5])
print(sample_array_vstack2)
first, second, third, fourth = np.split(sample_array_vstack2, [1,2,3])
print('-첫 번째 : \n', first, '\n')
print('-두 번째 : \n', second, '\n')
print('-세 번째 : \n', third, '\n')
print('-네 번째 : \n', fourth, '\n')
# +
sample_array = np.arange(10)
print(sample_array + 3)
sample_array1 = np.arange(12).reshape(3,4)
sample_array2 = np.arange(12,24).reshape(3,4)
concat_array = np.concatenate([sample_array1, sample_array2], axis=0)
concat_array
# -
sample_list = [1,2,3,4,5]
sample_list = np.array(sample_list) + 3
sample_list = list(sample_list)
| 3,497 |
/Data Visualization/Challenge - Data Visualization.ipynb | 668ae0663f6f45247f8edc98df3506ea02bf0c63 | [
"Apache-2.0"
] | permissive | foxan/dataquest | https://github.com/foxan/dataquest | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 104,828 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# https://www.dataquest.io/mission/185/challenge-data-visualization/
# ## 1: Introduction To The Data
#
# In this challenge, you'll practice creating data visualizations using data on Hollywood movies that were released betwen 2007 to 2011. The goal is to better understand the underlying economics of Hollywood and explore the outlier nature of success of movies. The dataset was compiled by [David McCandless](http://www.informationisbeautiful.net/) and you can read about how the data was compiled here. You'll use a version of this dataset that was compiled by [John Goodall](http://jgoodall.me/), which can be downloaded from his Github repo [here](https://github.com/jgoodall/cinevis/blob/master/data/csvs/moviedata.csv).
# +
# # %sh
# wget https://raw.githubusercontent.com/jgoodall/cinevis/master/data/csvs/moviedata.csv
# # ls -l
# +
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
hollywood_movies = pd.read_csv('moviedata.csv')
print hollywood_movies.head()
# -
print hollywood_movies['exclude'].value_counts()
hollywood_movies = hollywood_movies.drop('exclude', axis=1)
# ## 2: Scatter Plots - Profitability And Audience Ratings
#
# Let's generate 2 scatter plots to better understand the relationship between the profitability of a movie and how an audience rated it.
# +
fig = plt.figure(figsize=(6, 10))
ax1 = fig.add_subplot(2, 1, 1)
ax1.scatter(hollywood_movies['Profitability'], hollywood_movies['Audience Rating'])
ax1.set(xlabel='Profitability', ylabel='Audience Rating', title='Hollywood Movies, 2007-2011')
ax2 = fig.add_subplot(2, 1, 2)
ax2.scatter(hollywood_movies['Audience Rating'], hollywood_movies['Profitability'])
ax2.set(xlabel='Audience Rating', ylabel='Profitability', title='Hollywood Movies, 2007-2011')
plt.show()
# -
# ## 3: Scatter Matrix - Profitability And Critic Ratings
#
# Both scatter plots in the previous step contained 1 outlier data point, which caused the scale of both plots to be incredibly lopsided to accomodate for this one outlier. The movie in question is [Paranormal Activity](https://en.wikipedia.org/wiki/Paranormal_Activity), and is widely known as the [most profitable movie ever](http://www.thewrap.com/paranormal-now-most-profitable-film-ever-9335/). The movie brought in *$193.4 million in revenue with a budget of only $15,000*. Let's filter out this movie so you can create useful visualizations with the rest of the data.
# +
from pandas.tools.plotting import scatter_matrix
normal_movies = hollywood_movies[hollywood_movies['Film'] != 'Paranormal Activity']
scatter_matrix(normal_movies[['Profitability', 'Audience Rating']], figsize=(6,6))
plt.show()
# -
# ## 4: Box Plot - Audience And Critic Ratings
#
# Let's use box plots to better understand the distributions of ratings by critics versus ratings by the audience.
#
# Use the Pandas Dataframe method plot to generate boxplots for the `Critic Rating` and `Audience Rating` columns.
fig = plt.figure()
normal_movies.boxplot(['Critic Rating', 'Audience Rating'])
plt.show()
# ## 5: Box Plot - Critic Vs Audience Ratings Per Year
#
# Now that you've visualized the total distribution of both the ratings columns, visualize how this distribution changed year to year.
normal_movies = normal_movies.sort(columns='Year')
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(1, 2, 1)
sns.boxplot(x=normal_movies['Year'], y=normal_movies['Critic Rating'], ax=ax1)
ax2 = fig.add_subplot(1, 2, 2)
sns.boxplot(x=normal_movies['Year'], y=normal_movies['Audience Rating'], ax=ax2)
plt.show()
# ## 6: Box Plots - Profitable Vs Unprofitable Movies
#
# Many Hollywood movies aren't profitable and it's interesting to understand the role of ratings in a movie's profitability. You first need to separate the movies into those were that profitable and those that weren't.
#
# We've created a new Boolean column called `Profitable` with the following specification:
#
# `False` if the value for `Profitability` is less than or equal to `1.0`. <br/>
# `True` if the value for `Profitability` is greater than or equal to `1.0`.
# +
def is_profitable(row):
if row["Profitability"] <= 1.0:
return False
return True
normal_movies["Profitable"] = normal_movies.apply(is_profitable, axis=1)
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(1, 2, 1)
sns.boxplot(x=normal_movies['Profitable'], y=normal_movies['Audience Rating'], ax=ax1)
ax2 = fig.add_subplot(1, 2, 2)
sns.boxplot(x=normal_movies['Profitable'], y=normal_movies['Critic Rating'], ax=ax2)
plt.show()
# -
| 4,824 |
/.ipynb_checkpoints/9-19-2019 - Lecture Notebook-checkpoint.ipynb | 0e4470e024aff72991063e316a0ec79462874cdf | [
"MIT"
] | permissive | sju-chem264-2019/9-19-2019-lecture-justyn-cespedes | https://github.com/sju-chem264-2019/9-19-2019-lecture-justyn-cespedes | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 134,603 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting and Functions
# This notebook will work trough how to plot data and how to define functions. Throughout the lecture we will take a few moments to plot different functions and see how they depend on their parameters
# ## Plotting in Python: Matplot
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
# Pyplot is a powerful plotting library that can be used to make publication quaility plots. It is also useful for quikly plotting the results of a calcualtion.
#
# This is a quick demonstration of its use
#
# Note: when you call al library `import matplotlib.pyplot as plt` the way that use it is to do the following `plt.function()` where `function()` is whatever you are trying to call from the library
# Define x and y values for some function
x = [i for i in range(20)]
y1 = [i**2 for i in x]
y2 = [i**3 for i in x]
# The methods used above to make the lists is considered very *pythonic*. It works the same as a loop, but outputs all the results into a list. The left-hand most argument is what the list elements will be and the right hand side is the the way the loop will work.
# When you use pyplot to make a plot, you can add more than one data set to the figure until you render the plot. Once you render the plot it resets
plt.plot(x,y1)
plt.plot(x,y2)
plt.xlabel('X', fontsize=24)
plt.ylabel('Y', fontsize=24)
plt.legend(['Quadratic', 'Cubic'], loc=0)
plt.show()
# We can call also use numpy fucntions to make our plots. Numpy is a very powerful math library
# linspace will make a list of values from initial to final with however many increments you want
# this example goes from 0-2.5 with 20 increments
x=numpy.linspace(0,1.0,20)
print(x)
exp_func=np.exp(-2*np.pi*x)
print(exp_func)
plt.plot(x,exp_func, color="black")
plt.xlabel('x', fontsize=24)
plt.ylabel("y(x)", fontsize=24)
plt.show()
# All aspects of the plot can be changed. The best way to figure out what you want to do is to go to the Matplotlib gallery and choose an image that looks like what you are trying to do.
#
# https://matplotlib.org/gallery/index.html
# ### Example: Scatter plot with histograms
# +
import numpy as np
#Fixing random state for reproducibility
np.random.seed(19680801)
# the random data
x = np.random.randn(1000)
y = np.random.randn(1000)
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(figsize=(8, 8))
ax_scatter = plt.axes(rect_scatter)
ax_scatter.tick_params(direction='in', top=True, right=True)
ax_histx = plt.axes(rect_histx)
ax_histx.tick_params(direction='in', labelbottom=False)
ax_histy = plt.axes(rect_histy)
ax_histy.tick_params(direction='in', labelleft=False)
# the scatter plot:
ax_scatter.scatter(x, y)
# now determine nice limits by hand:
binwidth = 0.25
lim = np.ceil(np.abs([x, y]).max() / binwidth) * binwidth
ax_scatter.set_xlim((-lim, lim))
ax_scatter.set_ylim((-lim, lim))
bins = np.arange(-lim, lim + binwidth, binwidth)
ax_histx.hist(x, bins=bins)
ax_histy.hist(y, bins=bins, orientation='horizontal')
ax_histx.set_xlim(ax_scatter.get_xlim())
ax_histy.set_ylim(ax_scatter.get_ylim())
plt.show()
# -
# I don't have to be an expert in making that kind of plot. I just have to understand and guess enough to figure out. I also google things I don't know
#
# https://www.google.com/search?client=firefox-b-1-d&q=pyplot+histogram+change+color
#
# https://stackoverflow.com/questions/42172440/python-matplotlib-histogram-color?rq=1
#
# https://matplotlib.org/examples/color/named_colors.html
#
# Then I can make small changes to have the plot look how I want it to look
#
# Notice below I changed
#
# `ax_scatter.scatter(x, y, color="purple")`,
#
# `ax_histx.hist(x, bins=bins, color = "skyblue")`,
#
# `ax_histy.hist(y, bins=bins, orientation='horizontal', color="salmon")`
# +
#Fixing random state for reproducibility
np.random.seed(19680801)
# the random data
x = np.random.randn(1000)
y = np.random.randn(1000)
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(figsize=(8, 8))
ax_scatter = plt.axes(rect_scatter)
ax_scatter.tick_params(direction='in', top=True, right=True)
ax_histx = plt.axes(rect_histx)
ax_histx.tick_params(direction='in', labelbottom=False)
ax_histy = plt.axes(rect_histy)
ax_histy.tick_params(direction='in', labelleft=False)
# the scatter plot:
ax_scatter.scatter(x, y, color="purple")
# now determine nice limits by hand:
binwidth = 0.25
lim = np.ceil(np.abs([x, y]).max() / binwidth) * binwidth
ax_scatter.set_xlim((-lim, lim))
ax_scatter.set_ylim((-lim, lim))
bins = np.arange(-lim, lim + binwidth, binwidth)
ax_histx.hist(x, bins=bins, color = "skyblue")
ax_histy.hist(y, bins=bins, orientation='horizontal', color="salmon")
ax_histx.set_xlim(ax_scatter.get_xlim())
ax_histy.set_ylim(ax_scatter.get_ylim())
plt.show()
# -
# Notice how I changed the colors on the plot based off of what I found on the stack exchange. The way to solve issues in the course and computational work is to google them.
# ## Plotting Exersice 1
# Find a plot from the gallery that you like. Then make some sort of noticable change to it.
# +
# orginal plot here
# +
# your new plot here
# -
# ## Plotting Exersice 2
# Plot a the following functions on the same plot from $ -2\pi $ to $2\pi$
#
# $$ \sin(2\pi x+\pi)$$
# $$ \cos(2\pi x+\pi)$$
# $$\sin(2\pi x+\pi)+\cos(2\pi x+\pi)$$
# This might be useful:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.sin.html
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.cos.html#numpy.cos
# +
# Your code here
# -
# # Lecture plots
# Periodically during lecture we will take a pause to plot some of the interesting functions that we use in class.
# ## Classical wavefunctions
#
# The following plot shows the the spacial component of the standard wavefunction with a wavelength of $\lambda=\text{1.45 m}$ and a relative amplitude of $A=1$ when the time, $t=0$ and the phase $\phi=1.0$.
x=numpy.linspace(0,3.0,100)
sinx=np.sin(2*np.pi*x+0+1)
plt.plot(x,sinx, color="black")
plt.xlabel('x', fontsize=24)
plt.ylabel("y(x)", fontsize=24)
plt.show()
# Make a new figure where you plot the same wave function at three time points in the future. Assume the frequency is $\nu=.1 \text{ ms / s} $ Use a different color for each plot
# +
# Your code here
# -
# ## Orthogonality
# Graphically show that the the following two functions are orthogonal on the interval $-3\pi$ to $3\pi$
# $$ \sin(x) \text{ and } \cos(3x)$$
#
# Plot both functions together, then plot the product of both functions and explain why it is orthogonal
# +
# Your plots here
# +
prod=sinx*cos3x
# -
# Use the numpy trapezoid rule integrator to show the the two functions are orthogonal
# `np.trapz(y,x)`
#
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.trapz.html
# Example code
x=numpy.linspace(0,1.0,20)
exp_func=np.exp(-2*np.pi*x)
np.trapz(exp_func,x)
# +
# Your code here
# -
| 7,633 |
/L05-Logistic-Regression/cancer-detection.ipynb | b97e3483071374e535f194da24c2a34fe29f77dd | [] | no_license | macabdul9/ml-cb | https://github.com/macabdul9/ml-cb | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 216,246 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_breast_cancer
df = load_breast_cancer()
print(type(df))
features = df.feature_names
target = df.target_names
print(features)
print(target)
X = df.data
Y = df.target
plt.scatter(X[:, 3], X[:, 5], c = Y);
X = np.vstack((X[:, 3],X[:, 5])).T
print(X.shape)
plt.scatter(X[:, 0], X[:, 1], c = Y);
# ### Normalizing the data
mean = np.mean(X[:, 0])
sigma = np.std(X[:, 0])
X[:, 0] = (X[:, 0] - mean)/sigma
plt.scatter(X[:, 0], X[:, 1], c = Y);
# ### splitting the data
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3)
print(X_train.shape, X_test.shape)
print(Y_train.shape, Y_test.shape)
ones = np.ones((X.shape[0], 1))
X = np.hstack((ones, X))
print(X[:4])
# ### Model
# +
def sigmoid(z):
return (1 /(1 + np.exp(-z)))
def hypothesis(theta, x):
return sigmoid(np.dot(theta.T, x))
def gradient(x,y, theta):
grad = 0.0
m = x.shape[0]
for i in range(m):
y_hat = hypothesis(X[i],theta)
x = X[i].reshape((-1,1))
grad += (Y[i] - y_hat)*x
return grad
def getTheta(x, y, learning_rate = 0.0001):
theta = np.zeros((X.shape[1],1))
for i in range(200):
theta = theta + learning_rate*gradient(x, y, theta)
return theta
# -
theta = getTheta(X_train, Y_train)
theta
def predict(xtest,theta):
prob = hypothesis(xtest,theta)
if prob<=0.5:
return 0
else:
return 1
ones = np.ones((X_test.shape[0], 1))
X_test = np.hstack((ones, X_test))
plt.scatter(X[:, 1], X[:, 2], c = Y);
# +
def drawLine():
# plt.figure(figsize=(5,5))
x = np.linspace(-1,2,10)
print(x.shape)
y = -(theta[0,0]*1 + theta[1,0]*x)/theta[2,0]
plt.scatter(X[:,1],X[:,2],c=Y)
# plt.plot(x,y)
drawLine()
# -
| 2,184 |
/homework/Day_031_HW.ipynb | d1e3bddad8186062463446ae79356c7ae8d0e6b6 | [] | no_license | yehchitsai/2nd-ML100Days | https://github.com/yehchitsai/2nd-ML100Days | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 21,870 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Priority Queue - Nirali
class PriorityQueue():
'''
An implementation of a (minimum) priority queue
The arguments passed to a PriorityQueue must consist of
objects than can be compared using <.
Use a tuple (priority, item) if necessary.
'''
def __init__(self):
self._array = []
self._counter_bubble = 0
self._counter_sift = 0
def push(self, obj):
# append at end and bubble up
self._array.append( obj )
n = len(self._array)
self._bubble_up(n-1)
def pop(self):
n = len(self._array)
if n==0:
return None
if n==1:
return self._array.pop()
# replace with last item and sift down:
obj = self._array[0]
self._array[0] = self._array.pop()
self._sift_down(0)
return obj
def _parent(self, n):
return (n-1)//2
def _left_child(self, n):
return 2*n + 1
def _right_child(self, n):
return 2*n + 2
def _bubble_up(self, index):
self._counter_bubble += 1
while index>0:
cur_item = self._array[index]
parent_idx = self._parent(index)
parent_item = self._array[parent_idx]
if cur_item < parent_item:
# swap with parent
self._array[parent_idx] = cur_item
self._array[index] = parent_item
index = parent_idx
else:
break
def count_bubble_operations(self):
return self._counter_bubble
def _sift_down(self,index):
self._counter_sift += 1
n = len(self._array)
while index<n:
cur_item = self._array[index]
lc = self._left_child(index)
self._counter_sift += 1
if n <= lc:
break
# first set small child to left child:
small_child_item = self._array[lc]
small_child_idx = lc
# right exists and is smaller?
rc = self._right_child(index)
if rc < n:
r_item = self._array[rc]
if r_item < small_child_item:
# right child is smaller than left child:
small_child_item = r_item
small_child_idx = rc
# done: we are smaller than both children:
if cur_item <= small_child_item:
break
# swap with smallest child:
self._array[index] = small_child_item
self._array[small_child_idx] = cur_item
# continue with smallest child:
index = small_child_idx
def count_sift_operations(self):
return self._counter_sift
def size(self):
return len(self._array)
def is_empty(self):
return len(self._array) == 0
def show(self, index=0, level=0):
cur_item = self._array[index]
print("\t"*level, cur_item)
n = len(self._array)
left_child_idx = self._left_child(index)
if left_child_idx<n:
self.show(left_child_idx, level+1)
right_child_idx = self._right_child(index)
if right_child_idx<n:
self.show(right_child_idx, level+1)
def heapify(self, items):
""" Take an array of unsorted items and replace the contents
of this priority queue by them. """
self._array = []
for item in items:
self.push(item)
def decrease_priority(self, old, new):
""" replace the item old (assumed in the priority queue)
by the item new, which is assumed to have a smaller value """
# replace old by new and we can assume that new will compare smaller
# (so priority is higher or the value is smaller)
assert(new <= old)
for index, item in enumerate(self._array):
if item==old:
self._array[index] = new
self._bubble_up(index)
break
# +
#COMPLEXITY TESTS by COUNTING OPERATIONS - NIRALI
PQ = PriorityQueue()
import random
# %matplotlib inline
import matplotlib.pyplot as plt
from pylab import *
from scipy import stats
class Count_Complexity():
def __init__(self):
self._siftArray = []
self._bubbleArray = []
def bubbleup_ops(self):
t1 = [32]
t5 = [941, 101, 149, 857, 616]
t10 = [989, 721, 588, 411, 284, 860, 988, 594, 27, 548]
t20 = [475, 808, 691, 902, 521, 507, 703, 256, 301, 792, 52, 244, 893, 764, 692, 926, 866, 42, 587, 15]
t50 = [34, 685, 698, 403, 440, 688, 573, 90, 312, 682, 925, 598, 304, 629, 635, 947, 184, 784, 523, 365, 656, 146, 189, 310, 522, 128, 800, 94, 474, 180, 789, 386, 321, 773, 987, 166, 214, 650, 550, 531, 255, 552, 724, 696, 526, 133, 383, 884, 410, 360]
t100 = [909, 648, 4, 882, 278, 370, 189, 691, 994, 629, 107, 131, 468, 168, 910, 590, 712, 75, 830, 697, 159, 63, 46, 733, 441, 898, 800, 927, 233, 382, 255, 414, 748, 188, 407, 146, 473, 343, 144, 628, 174, 48, 71, 586, 54, 716, 607, 595, 25, 211, 595, 670, 534, 597, 483, 502, 205, 984, 167, 92, 667, 971, 226, 848, 59, 10, 166, 187, 981, 890, 314, 529, 667, 157, 78, 480, 994, 876, 912, 738, 428, 978, 55, 275, 873, 345, 67, 935, 572, 578, 379, 666, 876, 309, 453, 866, 44, 139, 208, 996]
t200 = [742, 511, 796, 303, 330, 841, 236, 900, 619, 653, 113, 440, 212, 567, 965, 839, 677, 997, 310, 80, 631, 74, 27, 885, 762, 886, 298, 225, 325, 482, 504, 502, 912, 225, 379, 704, 857, 618, 448, 233, 196, 986, 3, 913, 183, 167, 327, 477, 542, 572, 882, 421, 480, 721, 701, 294, 407, 277, 324, 748, 973, 754, 293, 95, 743, 293, 479, 169, 142, 440, 457, 399, 776, 856, 736, 300, 382, 658, 733, 853, 18, 554, 763, 982, 924, 579, 151, 894, 56, 954, 525, 746, 965, 77, 841, 820, 252, 290, 88, 532, 597, 687, 744, 59, 679, 566, 772, 841, 171, 739, 187, 512, 348, 49, 713, 366, 217, 272, 368, 247, 313, 662, 952, 870, 528, 209, 300, 860, 955, 806, 671, 937, 171, 653, 950, 176, 602, 12, 520, 492, 379, 908, 243, 547, 578, 562, 460, 654, 391, 686, 758, 995, 518, 595, 264, 343, 601, 911, 149, 345, 650, 362, 510, 967, 128, 53, 551, 902, 71, 318, 592, 180, 50, 113, 435, 768, 37, 423, 405, 199, 583, 149, 24, 587, 825, 256, 368, 316, 748, 63, 262, 128, 730, 735, 11, 53, 125, 607, 122, 361]
t500 = [528, 412, 858, 626, 833, 972, 393, 8, 830, 784, 985, 659, 501, 575, 263, 697, 230, 314, 715, 885, 199, 140, 593, 154, 564, 748, 763, 197, 486, 805, 822, 423, 804, 908, 147, 744, 260, 583, 918, 374, 161, 471, 677, 713, 963, 99, 315, 557, 528, 463, 334, 15, 666, 93, 409, 506, 169, 988, 54, 551, 618, 952, 193, 109, 814, 520, 850, 515, 778, 846, 248, 839, 301, 319, 480, 927, 482, 387, 692, 73, 700, 61, 133, 920, 242, 265, 292, 368, 434, 525, 952, 965, 351, 57, 290, 321, 434, 153, 16, 131, 496, 194, 39, 29, 907, 800, 961, 634, 163, 798, 702, 645, 556, 948, 379, 213, 299, 803, 255, 76, 848, 307, 319, 182, 822, 123, 17, 851, 801, 707, 957, 386, 682, 775, 471, 406, 673, 654, 266, 107, 240, 738, 428, 594, 262, 278, 837, 16, 836, 596, 376, 698, 367, 544, 875, 925, 438, 655, 274, 162, 670, 466, 751, 233, 318, 314, 67, 375, 577, 416, 138, 650, 549, 914, 767, 80, 18, 86, 702, 527, 901, 528, 863, 472, 216, 100, 62, 867, 853, 694, 812, 587, 517, 845, 324, 556, 636, 269, 211, 339, 238, 139, 418, 929, 917, 689, 194, 425, 701, 70, 266, 593, 497, 865, 1, 815, 229, 214, 669, 896, 107, 879, 855, 909, 714, 696, 400, 793, 846, 568, 638, 935, 957, 801, 360, 825, 205, 314, 599, 490, 898, 711, 437, 871, 29, 26, 317, 185, 792, 563, 44, 944, 810, 236, 634, 247, 211, 235, 312, 793, 93, 339, 657, 55, 906, 719, 675, 182, 21, 24, 486, 658, 219, 40, 410, 57, 60, 97, 557, 208, 450, 931, 77, 152, 691, 234, 830, 783, 447, 295, 78, 74, 795, 952, 846, 560, 716, 348, 228, 284, 1, 937, 351, 217, 564, 732, 132, 673, 515, 280, 890, 465, 197, 254, 309, 308, 607, 917, 785, 687, 985, 164, 896, 296, 567, 577, 714, 985, 433, 341, 639, 320, 360, 486, 727, 964, 735, 421, 61, 115, 734, 981, 294, 279, 156, 120, 363, 343, 610, 248, 791, 397, 767, 892, 266, 410, 463, 611, 835, 315, 850, 23, 372, 239, 854, 588, 960, 521, 151, 699, 651, 775, 381, 116, 114, 115, 999, 111, 517, 713, 820, 722, 251, 725, 265, 686, 323, 232, 473, 80, 285, 698, 972, 848, 612, 583, 119, 328, 502, 661, 628, 430, 734, 239, 950, 325, 584, 245, 443, 535, 994, 363, 670, 149, 938, 395, 184, 146, 326, 95, 392, 194, 802, 649, 455, 75, 377, 547, 225, 321, 47, 513, 803, 87, 823, 86, 448, 928, 867, 341, 931, 101, 796, 261, 800, 787, 745, 880, 757, 613, 852, 400, 165, 914, 660, 256, 732, 831, 124, 117, 316, 460, 273, 449, 993, 977, 650, 80, 85, 882, 110, 861, 52, 386, 161, 787, 38, 297, 799, 11, 945, 885, 727, 338, 332, 657, 182, 287, 494, 373, 616, 400, 315, 836, 231, 960, 432, 977, 337, 458]
t1000 = [5, 130, 297, 91, 254, 978, 950, 46, 147, 448, 574, 191, 979, 69, 368, 706, 952, 914, 801, 451, 89, 139, 243, 52, 329, 698, 455, 323, 66, 201, 664, 580, 584, 152, 764, 174, 986, 148, 644, 947, 291, 632, 53, 90, 528, 112, 58, 977, 222, 62, 775, 805, 19, 991, 779, 986, 468, 960, 97, 443, 426, 309, 865, 188, 206, 346, 798, 998, 495, 221, 810, 95, 368, 111, 431, 361, 187, 137, 915, 135, 596, 176, 296, 600, 757, 415, 92, 640, 735, 266, 232, 535, 683, 694, 799, 973, 226, 954, 834, 261, 115, 109, 648, 422, 541, 4, 236, 827, 816, 268, 692, 297, 758, 820, 696, 741, 179, 56, 328, 814, 101, 316, 530, 365, 215, 288, 59, 675, 718, 695, 953, 242, 728, 404, 760, 807, 998, 118, 922, 395, 366, 325, 882, 949, 521, 697, 247, 217, 115, 340, 3, 165, 949, 396, 3, 463, 263, 915, 345, 570, 316, 192, 36, 560, 648, 948, 609, 467, 442, 956, 326, 943, 753, 998, 577, 222, 835, 562, 847, 753, 905, 64, 349, 879, 393, 773, 698, 117, 335, 611, 396, 409, 334, 201, 820, 90, 956, 708, 569, 938, 132, 768, 930, 698, 52, 632, 290, 609, 854, 925, 158, 224, 860, 812, 107, 198, 560, 702, 360, 503, 239, 845, 539, 107, 697, 400, 459, 671, 617, 651, 855, 899, 862, 999, 563, 616, 867, 567, 956, 394, 303, 878, 994, 546, 638, 883, 913, 329, 609, 294, 801, 609, 244, 651, 26, 799, 441, 826, 276, 834, 78, 174, 901, 837, 549, 401, 922, 178, 445, 624, 402, 509, 985, 890, 169, 919, 183, 33, 815, 222, 713, 54, 163, 555, 892, 971, 26, 289, 669, 952, 114, 469, 202, 210, 353, 705, 988, 270, 589, 968, 7, 702, 182, 43, 289, 394, 972, 23, 459, 281, 568, 196, 669, 411, 19, 763, 346, 169, 911, 605, 851, 715, 624, 888, 911, 442, 62, 105, 540, 603, 211, 159, 628, 143, 435, 976, 918, 211, 830, 864, 56, 707, 732, 55, 5, 672, 700, 425, 67, 628, 84, 365, 117, 355, 370, 219, 805, 889, 694, 516, 486, 135, 907, 552, 841, 48, 728, 456, 306, 377, 58, 334, 26, 421, 85, 815, 876, 345, 57, 654, 140, 660, 720, 913, 257, 573, 364, 732, 192, 491, 793, 682, 510, 902, 812, 387, 94, 761, 86, 974, 28, 616, 522, 380, 286, 678, 733, 994, 325, 84, 783, 40, 782, 461, 963, 684, 629, 294, 470, 528, 179, 846, 808, 866, 673, 541, 332, 243, 470, 141, 401, 188, 142, 491, 676, 981, 345, 36, 900, 27, 637, 201, 431, 78, 829, 493, 847, 73, 528, 957, 25, 733, 824, 195, 89, 914, 556, 163, 815, 45, 974, 637, 310, 455, 916, 222, 918, 638, 696, 239, 966, 510, 377, 934, 698, 23, 625, 698, 109, 142, 367, 406, 981, 856, 55, 188, 531, 761, 549, 338, 300, 33, 153, 642, 158, 606, 472, 632, 594, 187, 512, 591, 278, 997, 520, 629, 600, 610, 988, 652, 989, 600, 54, 613, 795, 517, 964, 727, 54, 674, 280, 492, 495, 141, 248, 568, 723, 608, 882, 565, 168, 196, 989, 537, 917, 47, 742, 929, 829, 931, 406, 160, 18, 214, 151, 273, 335, 742, 779, 166, 492, 429, 444, 417, 399, 459, 113, 552, 103, 162, 238, 803, 725, 957, 67, 328, 365, 647, 774, 991, 40, 81, 612, 992, 772, 259, 701, 844, 769, 438, 397, 749, 37, 69, 196, 257, 903, 342, 823, 793, 719, 50, 76, 747, 966, 727, 466, 618, 778, 167, 179, 19, 138, 999, 676, 25, 336, 347, 456, 544, 526, 174, 943, 44 | 12,288 |
/LabML04.ipynb | 9ab23d51f70abe7d641fa4fc8ef2982fcc9918b1 | [] | no_license | l1quid17/Notebook-PCD | https://github.com/l1quid17/Notebook-PCD | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 6,528 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **LabML04**
#
# Explain the initial dataset
#
# What type of transformation was performed?
#
# What is the meaning of this transformation?
#
# What other type of processing may be performed?
#Invocar bibliotecas sklearn - PCA e pandas
from sklearn.decomposition import PCA
import pandas as pd
#Buscar os dados ao github
df=pd.read_csv("https://raw.githubusercontent.com/masterfloss/data/main/worlddata.csv")
#Ver quais os tipos de dados
df.dtypes
#Transformar o GDP em Float
df['GDP']=pd.to_numeric(df['GDP'], downcast='float', errors='coerce')
#Tirar o pais - Coluna
df1=df.drop(['Country'], axis=1)
#No dataframe eliminar todas as linhas que nao tenham dados
X=df1.dropna()
# +
#Aplicar o modelo do PCA -
PCAModel = PCA(n_components=4)
#Fazer o fit e o transform
XPCA = PCAModel.fit(X).transform(X)
# -
#Mostrar o numero de caracteristicas 44 - 4
print('Original number of features:', X.shape[1])
print('Reduced number of features:', XPCA.shape[1])
#Qual o nivel de explicaçao
PCAModel.explained_variance_ratio_
| 1,293 |
/LU_Day6_PythonAssignment.ipynb | 3bb17fcf87dc437cb259f359cb1a65a69e743482 | [] | no_license | Akshatha5/LetsUpgrade-Python-Essentials | https://github.com/Akshatha5/LetsUpgrade-Python-Essentials | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,122 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
'''
Q1) Convert to a dictionary in one line using list comprehension(without using zip method)
'''
List1=[1,2,3,4,5,6,7,8]
List2=["a","b","c","d","e"]
len1=min(len(List1),len(List2))
print({List1[each]:List2[each] for each in range(len1)})
tanford CS231N materials: http://cs231n.stanford.edu/
"""
# note to properly run this lab, you should execute all code blocks sequentially
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
from collections import namedtuple, defaultdict, deque
import numpy as np
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
# -
# ## Introduction to Chainer
#
# Chainer can be understood as Numpy plus the ability to record the computation graph of numerical operations to enable Automatic Differentiation. (Chainer actually also offers many other things; for example, a Numpy equivalent library that runs on GPU, but we will ignore them for now)
# Let's illustrate how Chainer works by a simple 1D regression task.
#
# Suppose we have observations from the following model $y = w x + b + \epsilon$ where $\epsilon \sim \mathcal{N}(0, 0.1)$ and the task is to estimate the linear model parameters $w, b$ from data.
# first generate some observations
true_a = 1.3
true_b = 0.4
data_x = (np.arange(100) / 99.0 - .5).astype(np.float32) # Chainer assumes all the cpu computation is done in float32
data_y = (data_x * true_a + true_b + np.random.randn(*data_x.shape) * 0.1).astype(np.float32)
_ = plt.scatter(data_x, data_y, c='b')
# Chainer provides an abstraction called `Link` that describe some computation and keeps track of parameters for it. For instance, a `Linear` link describes a linear map on input and keeps track of `w` and bias `b`.
# +
model = L.Linear(in_size=1, out_size=1) # input is 1D data and output is also 1D data
# Chainer will randomly initialize `w` and `b` for us.
# we can take a look at their values
print("w:", model.W)
print("b:", model.b)
# model.W and model.b have type `chainer.Variable`,
# which is a wrapper around Numpy array
assert isinstance(model.W, chainer.Variable)
# operations that involve `chainer.Variable` will produce
# `chainer.Variable` and this records the computation graph
var_result = model.W + 123 # some random computation
print("Operations on chainer.Variable: %s, type: %s" % (var_result, type(var_result)))
# the underlying numpy array can be accessed by `data` attribute
print("numpy arrays:", model.W.data, var_result.data)
# +
# A chainer link is a callable object. calling it performs the
# forward computation. (in this case, it performs Wx + b)
model_y = model(data_x[:, None]) # chainer's link usually assumes input is [Batch Size, Input Dimension]
# `model_y` is a chainer variable so we use `.data` to access its numpy array for plotting
# we can plot the model's current fit in red. it should be terrible because we haven't trained it yet
_ = plt.plot(data_x, model_y.data[:,0], c='r')
_ = plt.scatter(data_x, data_y, c='b')
_ = plt.title("Initial model")
# +
# now let's walk through how to perform forward computation
# and use AD to get gradients
# first we clear the gradients that are stored in the model
model.cleargrads()
# as we have seen we can perform forward computation by calling the link
model_y = model(data_x[:, None])
# remember that `model_y` is a chainer variable. to operate on chainer variable
# we will use functions from chainer.functions to operate on those objects.
loss = F.mean(F.square(model_y - data_y[:, None]))
# `loss` is a scalar chainer variable
assert isinstance(loss, chainer.Variable)
print("loss", loss)
# # calculating gradients d loss /d params is as simple as
loss.backward()
# # we can inspect the gradient of loss with respect to W
print("dloss/dW", model.W.grad)
# -
# Now that we know how to calculate gradients, we can code up a simple loop to perform gradient descent to train this model:
#
# (Hint: if you run into weird problems, maybe the state has been messed up and you can try re-runing all the code blocks from the beginning)
# +
# now we can perform gradient descent to improve this model
model = L.Linear(in_size=1, out_size=1)
losses = []
alpha = 5e-1
for i in range(100):
model.cleargrads()
loss = F.mean(F.square(model(data_x[:, None]) - data_y[:, None]))
losses.append(float(loss.data))
## *** YOUR CODE HERE TO PERFORM GRADIENT DESCENT ***
## Hint: you could access gradients with model.W.grad, model.b.grad
## Hint2: you could write data into a parameter with model.W.data[:] = some_numpy_array
## Hint3: if your model doesn't learn, remember to try different learning rates
loss.backward()
model.W.data[:] = model.W.data[:] - alpha * model.W.grad
model.b.data[:] = model.b.data[:] - alpha * model.b.grad
if i % 25 == 0:
print("Itr", i, "loss:", loss)
plt.plot(np.array(losses))
plt.title("Learning curve")
plt.figure()
plt.plot(data_x, model(data_x[:, None])[:,0].data, c='r')
plt.scatter(data_x, data_y, c='b')
_ = plt.title("Trained model fitness")
# -
# ## Train your first deep model
#
# Now we have learned the basics of Chainer. We can use it to train a deep model to classify MNIST digits. We will train a model on the MNIST dataset because the dataset is small.
#
# First we load the data and see what the images look like:
train, test = chainer.datasets.get_mnist()
# use train[data_point_index] to access data
print("train[i][0] is the ith image that's flattened, and has shape:", train[12][0].shape)
print("train[i][1] is the ith image's label, such as:", train[12][1])
# here we visualize two of them
plt.imshow(train[12][0].reshape([28, 28,]))
plt.title("Label: %s" % train[12][1])
plt.figure()
plt.imshow(train[42][0].reshape([28, 28,]))
_ = plt.title("Label: %s" % train[42][1])
# Next we will provide some boilerplate code and train a linear classifier as an example:
# +
def run(model, batchsize=16, num_epochs=2):
optimizer = chainer.optimizers.Adam() # we will use chainer's Adam implementation instead of writing our own gradient based optimization
optimizer.setup(model)
stats = defaultdict(lambda: deque(maxlen=25))
for epoch in range(num_epochs):
train_iter = chainer.iterators.SerialIterator(train, batchsize, repeat=False, shuffle=True)
test_iter = chainer.iterators.SerialIterator(test, batchsize, repeat=False, shuffle=False)
for itr, batch in enumerate(train_iter):
xs = np.concatenate([datum[0][None, :] for datum in batch])
ys = np.array([datum[1] for datum in batch])
logits = model(xs)
loss = F.softmax_cross_entropy(logits, ys)
model.cleargrads()
loss.backward()
optimizer.update()
# calculate stats
stats["loss"].append(float(loss.data))
stats["accuracy"].append(float((logits.data.argmax(1) == ys).sum() / batchsize))
if itr % 300 == 0:
print("; ".join("%s: %s" % (k, np.mean(vs)) for k, vs in stats.items()))
# try a simple linear model
run(L.Linear(None, 10))
# -
# Next we will try to improve performance by training an MLP instead. A partial implementation is provided for you to fill in:
# +
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
def __call__(self, x):
# *** YOUR CODE HERE TO BUILD AN MLP W/ self.l1, self.l2, self.l3 ***
#
# Hint: you should make use of non-linearities / activation functions
# https://docs.chainer.org/en/stable/reference/functions.html#activation-functions
a = self.l1(x)
a = F.relu(a)
a = self.l2(a)
a = F.relu(a)
a = self.l3(a)
return(a)
run(MLP(200, 10))
# -
# Next you should try to implement logging test loss and see if the model is overfitting.
# +
def better_run(model, batchsize=16, num_epochs=2):
optimizer = chainer.optimizers.Adam() # we will use chainer's Adam implementation instead of writing our own gradient based optimization
optimizer.setup(model)
stats = defaultdict(lambda: deque(maxlen=25))
for epoch in range(num_epochs):
train_iter = chainer.iterators.SerialIterator(train, batchsize, repeat=False, shuffle=True)
for itr, batch in enumerate(train_iter):
xs = np.concatenate([datum[0][None, :] for datum in batch])
ys = np.array([datum[1] for datum in batch])
logits = model(xs)
loss = F.softmax_cross_entropy(logits, ys)
model.cleargrads()
loss.backward()
optimizer.update()
# calculate stats
stats["loss"].append(float(loss.data))
stats["train_accuracy"].append(float((logits.data.argmax(1) == ys).sum() / batchsize))
if itr % 300 == 0:
test_iter = chainer.iterators.SerialIterator(test, 10000, repeat=False, shuffle=False)
# *** YOUR CODE implement logging of stats on test set ***
for test_itr, test_batch in enumerate(test_iter):
xs_test = np.concatenate([datum[0][None, :] for datum in test_batch])
ys_test = np.array([datum[1] for datum in test_batch])
y_hat_test = model(xs_test)
stats["test_accuracy"].append(float((y_hat_test.data.argmax(1) == ys_test).sum() / 10000))
print("; \t".join("%s: %s" % (k, np.mean(vs)) for k, vs in stats.items()))
better_run(MLP(200, 10))
# -
# Try different variants!
#
# - Does using a ConvNet improve performance (reduce overfitting?)
# - Try changing the learning rate and observe the effect
# - Does the model train if you give it correlated gradients? (consecutively sample many batches of "1", then many batches of "2", ... etc
| 10,534 |
/dev/_downloads/d12911920e4d160c9fd8c97cffdda6b7/time_frequency_erds.ipynb | 43405ca3f50f66aabb3f19734442e203be8931a7 | [] | permissive | mne-tools/mne-tools.github.io | https://github.com/mne-tools/mne-tools.github.io | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2023-01-27T08:59:05 | HTML | Jupyter Notebook | false | false | .py | 11,245 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
#
# # Compute and visualize ERDS maps
#
# This example calculates and displays ERDS maps of event-related EEG data.
# ERDS (sometimes also written as ERD/ERS) is short for event-related
# desynchronization (ERD) and event-related synchronization (ERS)
# :footcite:`PfurtschellerLopesdaSilva1999`. Conceptually, ERD corresponds to a
# decrease in power in a specific frequency band relative to a baseline.
# Similarly, ERS corresponds to an increase in power. An ERDS map is a
# time/frequency representation of ERD/ERS over a range of frequencies
# :footcite:`GraimannEtAl2002`. ERDS maps are also known as ERSP (event-related
# spectral perturbation) :footcite:`Makeig1993`.
#
# In this example, we use an EEG BCI data set containing two different motor
# imagery tasks (imagined hand and feet movement). Our goal is to generate ERDS
# maps for each of the two tasks.
#
# First, we load the data and create epochs of 5s length. The data set contains
# multiple channels, but we will only consider C3, Cz, and C4. We compute maps
# containing frequencies ranging from 2 to 35Hz. We map ERD to red color and ERS
# to blue color, which is customary in many ERDS publications. Finally, we
# perform cluster-based permutation tests to estimate significant ERDS values
# (corrected for multiple comparisons within channels).
#
# +
# Authors: Clemens Brunner <clemens.brunner@gmail.com>
# Felix Klotzsche <klotzsche@cbs.mpg.de>
#
# License: BSD-3-Clause
# -
# As usual, we import everything we need.
#
#
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import TwoSlopeNorm
import pandas as pd
import seaborn as sns
import mne
from mne.datasets import eegbci
from mne.io import concatenate_raws, read_raw_edf
from mne.time_frequency import tfr_multitaper
from mne.stats import permutation_cluster_1samp_test as pcluster_test
# First, we load and preprocess the data. We use runs 6, 10, and 14 from
# subject 1 (these runs contains hand and feet motor imagery).
#
#
# +
fnames = eegbci.load_data(subject=1, runs=(6, 10, 14))
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in fnames])
raw.rename_channels(lambda x: x.strip(".")) # remove dots from channel names
events, _ = mne.events_from_annotations(raw, event_id=dict(T1=2, T2=3))
# -
# Now we can create 5-second epochs around events of interest.
#
#
# +
tmin, tmax = -1, 4
event_ids = dict(hands=2, feet=3) # map event IDs to tasks
epochs = mne.Epochs(
raw,
events,
event_ids,
tmin - 0.5,
tmax + 0.5,
picks=("C3", "Cz", "C4"),
baseline=None,
preload=True,
)
# -
#
# Here we set suitable values for computing ERDS maps. Note especially the
# ``cnorm`` variable, which sets up an *asymmetric* colormap where the middle
# color is mapped to zero, even though zero is not the middle *value* of the
# colormap range. This does two things: it ensures that zero values will be
# plotted in white (given that below we select the ``RdBu`` colormap), and it
# makes synchronization and desynchronization look equally prominent in the
# plots, even though their extreme values are of different magnitudes.
#
#
# +
freqs = np.arange(2, 36) # frequencies from 2-35Hz
vmin, vmax = -1, 1.5 # set min and max ERDS values in plot
baseline = (-1, 0) # baseline interval (in s)
cnorm = TwoSlopeNorm(vmin=vmin, vcenter=0, vmax=vmax) # min, center & max ERDS
kwargs = dict(
n_permutations=100, step_down_p=0.05, seed=1, buffer_size=None, out_type="mask"
) # for cluster test
# -
# Finally, we perform time/frequency decomposition over all epochs.
#
#
# +
tfr = tfr_multitaper(
epochs,
freqs=freqs,
n_cycles=freqs,
use_fft=True,
return_itc=False,
average=False,
decim=2,
)
tfr.crop(tmin, tmax).apply_baseline(baseline, mode="percent")
for event in event_ids:
# select desired epochs for visualization
tfr_ev = tfr[event]
fig, axes = plt.subplots(
1, 4, figsize=(12, 4), gridspec_kw={"width_ratios": [10, 10, 10, 1]}
)
for ch, ax in enumerate(axes[:-1]): # for each channel
# positive clusters
_, c1, p1, _ = pcluster_test(tfr_ev.data[:, ch], tail=1, **kwargs)
# negative clusters
_, c2, p2, _ = pcluster_test(tfr_ev.data[:, ch], tail=-1, **kwargs)
# note that we keep clusters with p <= 0.05 from the combined clusters
# of two independent tests; in this example, we do not correct for
# these two comparisons
c = np.stack(c1 + c2, axis=2) # combined clusters
p = np.concatenate((p1, p2)) # combined p-values
mask = c[..., p <= 0.05].any(axis=-1)
# plot TFR (ERDS map with masking)
tfr_ev.average().plot(
[ch],
cmap="RdBu",
cnorm=cnorm,
axes=ax,
colorbar=False,
show=False,
mask=mask,
mask_style="mask",
)
ax.set_title(epochs.ch_names[ch], fontsize=10)
ax.axvline(0, linewidth=1, color="black", linestyle=":") # event
if ch != 0:
ax.set_ylabel("")
ax.set_yticklabels("")
fig.colorbar(axes[0].images[-1], cax=axes[-1]).ax.set_yscale("linear")
fig.suptitle(f"ERDS ({event})")
plt.show()
# -
# Similar to `~mne.Epochs` objects, we can also export data from
# `~mne.time_frequency.EpochsTFR` and `~mne.time_frequency.AverageTFR` objects
# to a :class:`Pandas DataFrame <pandas.DataFrame>`. By default, the `time`
# column of the exported data frame is in milliseconds. Here, to be consistent
# with the time-frequency plots, we want to keep it in seconds, which we can
# achieve by setting ``time_format=None``:
#
#
df = tfr.to_data_frame(time_format=None)
df.head()
# This allows us to use additional plotting functions like
# :func:`seaborn.lineplot` to plot confidence bands:
#
#
# +
df = tfr.to_data_frame(time_format=None, long_format=True)
# Map to frequency bands:
freq_bounds = {"_": 0, "delta": 3, "theta": 7, "alpha": 13, "beta": 35, "gamma": 140}
df["band"] = pd.cut(
df["freq"], list(freq_bounds.values()), labels=list(freq_bounds)[1:]
)
# Filter to retain only relevant frequency bands:
freq_bands_of_interest = ["delta", "theta", "alpha", "beta"]
df = df[df.band.isin(freq_bands_of_interest)]
df["band"] = df["band"].cat.remove_unused_categories()
# Order channels for plotting:
df["channel"] = df["channel"].cat.reorder_categories(("C3", "Cz", "C4"), ordered=True)
g = sns.FacetGrid(df, row="band", col="channel", margin_titles=True)
g.map(sns.lineplot, "time", "value", "condition", n_boot=10)
axline_kw = dict(color="black", linestyle="dashed", linewidth=0.5, alpha=0.5)
g.map(plt.axhline, y=0, **axline_kw)
g.map(plt.axvline, x=0, **axline_kw)
g.set(ylim=(None, 1.5))
g.set_axis_labels("Time (s)", "ERDS")
g.set_titles(col_template="{col_name}", row_template="{row_name}")
g.add_legend(ncol=2, loc="lower center")
g.fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.08)
# -
# Having the data as a DataFrame also facilitates subsetting,
# grouping, and other transforms.
# Here, we use seaborn to plot the average ERDS in the motor imagery interval
# as a function of frequency band and imagery condition:
#
#
# +
df_mean = (
df.query("time > 1")
.groupby(["condition", "epoch", "band", "channel"])[["value"]]
.mean()
.reset_index()
)
g = sns.FacetGrid(
df_mean, col="condition", col_order=["hands", "feet"], margin_titles=True
)
g = g.map(
sns.violinplot,
"channel",
"value",
"band",
n_boot=10,
cut=0,
palette="deep",
order=["C3", "Cz", "C4"],
hue_order=freq_bands_of_interest,
linewidth=0.5,
).add_legend(ncol=4, loc="lower center")
g.map(plt.axhline, **axline_kw)
g.set_axis_labels("", "ERDS")
g.set_titles(col_template="{col_name}", row_template="{row_name}")
g.fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.3)
# -
# ## References
# .. footbibliography::
#
#
| 8,196 |
/실습과제/5day/5day_SoftmaxRegression_answer.ipynb | 471397c9388dab7a555d76134ecc10235cfad5de | [] | no_license | dlsdndia/MLDL- | https://github.com/dlsdndia/MLDL- | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 125,278 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 문제) 제공 되는 'auto-mpg.csv' 파일에 대하여 EDA를 수행 후 소프트맥스 회귀 모델을 구현
# - ‘horsepower’를 저출력, 보통출력, 고출력을 갖는 범주형을 추가한 후 소프트맥스 회귀 모델을 작성
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras import optimizers
# -
# IPython 디스플레이 설정 - 출력할 열의 개수 한도 늘리기
pd.set_option( 'display.max_columns', 10 )
# # softmax 회귀 모델
# ## 1. 데이터 준비
# csv파일 데이터프레임으로 변환
raw_df = pd.read_csv( 'auto-mpg.csv', header = None, encoding = 'utf-8' )
# 열이름 지정
raw_df.columns = [ 'mpg', 'cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'model', 'origin', 'name' ]
raw_df.head()
df = raw_df.copy()
df.head()
# ### 'name'변수 제거
df.pop( 'name' )
df.head()
# ## 2. 데이터 탐색
df.info()
df.describe()
# #### 엔진 출력인 'horsepower' 변수가 포함되어 있지 않으므로 자료형 변경
df[ 'horsepower' ].unique()
df[ 'horsepower' ].replace( '?', np.nan, inplace = True )
df.dropna( subset = [ 'horsepower' ], axis = 0, inplace = True )
df[ 'horsepower' ].unique()
df[ 'horsepower' ] = df[ 'horsepower' ].astype( 'float' )
origin = df.pop( 'origin' )
df['USA'] = ( origin == 1 ) * 1.0
df['Europe'] = ( origin == 2 ) * 1.0
df['Japan'] = ( origin == 3 ) * 1.0
# np.histogram 함수로 3개의 bin으로 나누는 경계값의 리스트 생성
count, bin_dividers = np.histogram( df[ 'horsepower' ], bins = 3 )
bin_names = [ '저출력', '보통출력', '고출력' ]
# pd.cut으로 각 데이터를 3개의 bin에 할당
df[ 'hp_bin' ] = pd.cut( x = df[ 'horsepower' ],
bins = bin_dividers,
labels = bin_names,
include_lowest = True )
df[ 'hp_bin' ] = df[ 'hp_bin' ].replace(['저출력','보통출력', '고출력'], [ 0, 1, 2 ] )
df.head()
df.tail()
df.info()
df.describe()
# ### 훈련용 데이터 셋과 테스트용 데이터 셋 분리
train_df = df.sample( frac = 0.8, random_state = 0 )
test_df = df.drop( train_df.index )
print( 'train data 개수 : {:5d}'.format( len( train_df ) ) )
print( 'test data 개수 : {:5d}'.format( len( test_df ) ) )
# 학습용 데이터
train_df.head()
# 테스트용 데이터
test_df.head()
# ## 3. 속성 선택
#
# - softmax 회귀 모델 변수로 사용할 후보 변수를 선택한다.
# - 예측 목표 변수인 종속 변수( y )가 될 'hp_bin' 변수와 독립 변수( X )로 사용할 후보로 3개 변수( 'cylinders', 'displacement', 'horsepower', 'weight' )를 포함
df = train_df[ [ 'hp_bin', 'cylinders', 'displacement', 'horsepower', 'weight' ] ]
df.head()
df.describe()
# ### 정규화
df_stats = df.describe()
df_stats.pop( "hp_bin" )
df_stats = df_stats.transpose()
df_stats
# ### y( Label ) 분리
y_train = np.asarray( train_df.pop( 'hp_bin' ) )
y_test = np.asarray( test_df.pop( 'hp_bin' ) )
# y에 대한 One-Hot encoding
y_train = to_categorical( y_train )
y_test = to_categorical( y_test )
print( len( y_train ), len( y_test ) )
# +
def normalization( x ):
return ( x - df_stats[ 'mean' ] ) / df_stats[ 'std' ]
normed_train_df = normalization( train_df )
normed_test_df = normalization( test_df )
# -
# ## 4. 모델 학습
# ### 훈련 / 테스트 데이터 NumPy 배열로 변환
# # softmax 회귀 모델
# ### 훈련 / 테스트 데이터 NumPy 배열로 변환
X_train = np.asarray( normed_train_df[ [ 'cylinders', 'displacement', 'horsepower', 'weight' ] ] )
X_test = np.asarray( normed_test_df[ [ 'cylinders', 'displacement', 'horsepower', 'weight' ] ] )
print( len( X_train ), len( X_test ) )
print( len( y_train ), len( y_test ) )
# ### Keras 모델구성
X_train = np.array( X_train )
X_test = np.array( X_test )
model = Sequential()
model.add( Dense( 3, input_dim = 4, activation = 'softmax' ) )
sgd = optimizers.SGD( lr = 0.0001 )
model.compile( optimizer = 'adam' ,loss = 'categorical_crossentropy', metrics = [ 'accuracy' ] )
history = model.fit( X_train, y_train, batch_size = 1, epochs = 200, validation_split = 0.2 )
# +
loss = history.history[ 'loss' ]
val_loss = history.history[ 'val_loss' ]
epochs = range( 1, len( history.history[ 'accuracy' ] ) + 1 )
# +
plt.plot( epochs, loss, 'b', label = 'Training loss' )
plt.plot( epochs, val_loss, 'r', label = 'Training loss' )
plt.title( 'Training and validation loss' )
plt.xlabel( 'Epochs' )
plt.ylabel( 'Loss' )
plt.legend()
plt.show()
# -
hist = pd.DataFrame(history.history)
hist[ 'epoch' ] = history.epoch
hist.tail()
# # 5. 평가
print( '테스트 정확도 : {:.4f}'.format( model.evaluate( X_test, y_test )[ 1 ] ) )
| 4,598 |
/file_analysis/ngram_from_json_II.ipynb | 72af694686905a02bfd2353d85fa521b4c124ce8 | [] | no_license | karthikaS03/IoT-Project | https://github.com/karthikaS03/IoT-Project | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 402,668 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !start .
import tensorflow as tf
import tensorflow_datasets
from transformers import *
# Load dataset, tokenizer, model from pretrained model/vocabulary
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
model = TFBertForSequenceClassification.from_pretrained('bert-base-cased')
data = tensorflow_datasets.load('glue/mrpc')
target = train.pop('label')
dataset = tf.data.Dataset.from_tensor_slices((train.values, target.values))
import pandas as pd
train = pd.read_csv('glue/train.txt', sep='\t')
test = pd.read_csv('glue/test.txt', sep='\t')
train.columns = ['label', 'idx', 'na', 'sentence1', 'sentence2']
test.columns = ['label', 'idx', 'na', 'sentence1', 'sentence2']
# Prepare dataset for GLUE as a tf.data.Dataset instance
train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, max_length=128, task='mrpc')
valid_dataset = glue_convert_examples_to_features(data['test'], tokenizer, max_length=128, task='mrpc')
train_dataset = train_dataset.shuffle(100).batch(32).repeat(2)
valid_dataset = valid_dataset.batch(64)
train_dataset
# +
# Prepare training: Compile tf.keras model with optimizer, loss and learning rate schedule
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
# Train and evaluate using tf.keras.Model.fit()
history = model.fit(train_dataset, epochs=2, steps_per_epoch=115,
validation_data=valid_dataset, validation_steps=7)
# Load the TensorFlow model in PyTorch for inspection
model.save_pretrained('./save/')
pytorch_model = BertForSequenceClassification.from_pretrained('./save/', from_tf=True)
# Quickly test a few predictions - MRPC is a paraphrasing task, let's see if our model learned the task
sentence_0 = "This research was consistent with his findings."
sentence_1 = "His findings were compatible with this research."
sentence_2 = "His findings were not compatible with this research."
inputs_1 = tokenizer.encode_plus(sentence_0, sentence_1, add_special_tokens=True, return_tensors='pt')
inputs_2 = tokenizer.encode_plus(sentence_0, sentence_2, add_special_tokens=True, return_tensors='pt')
pred_1 = pytorch_model(inputs_1['input_ids'], token_type_ids=inputs_1['token_type_ids'])[0].argmax().item()
pred_2 = pytorch_model(inputs_2['input_ids'], token_type_ids=inputs_2['token_type_ids'])[0].argmax().item()
print("sentence_1 is", "a paraphrase" if pred_1 else "not a paraphrase", "of sentence_0")
print("sentence_2 is", "a paraphrase" if pred_2 else "not a paraphrase", "of sentence_0")
_grammed]
return {
'f' : featurres,
'nf' : scaled_features,
'rf' : complete_grammed,
'df' : dirty_data,
'drf': complete_dirty,
'sl' : seqList
}
# -
# # load the files from json
#
#
# +
file_to_load = 'long_running_multip_light_key.json'
training_set = makeFeatures(os.path.join( '../files/json/', file_to_load ),clean_by_time=False)
test_data = makeFeatures ( os.path.join( '../files/json/','bulb_OnOff_30sgap.json' ) ,clean_by_time=False)
# -
plt.hist( [x[1] for x in training_set['f']] )
# # PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
reduced_features = pca.fit_transform(training_set['nf'])
# +
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter( xs=[x[0] for x in reduced_features] , ys=[x[1] for x in reduced_features], zs=[x[2] for x in reduced_features] )
# plt.scatter( [ x[0] for x in reduced_features],[ x[1] for x in reduced_features] )
# -
from sklearn.cluster import KMeans
classifier = KMeans(n_clusters=10)
classifier.fit(training_set['nf'])
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
for x in range(len(classifier.cluster_centers_)):
ax.scatter( xs=classifier.cluster_centers_[x][0] , ys=classifier.cluster_centers_[x][1], zs=classifier.cluster_centers_[x][2], color=colors[x] )
y = classifier.predict(training_set['nf'])
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
for x in range(len(y)):
ax.scatter( xs=training_set['nf'][x][0] , ys=training_set['nf'][x][1], zs=training_set['nf'][x][3], color=colors[y[x]] )
# # test
#
#
len(test_data['nf'])
y2= classifier.predict( test_data['nf'])
y2
# fig = plt.figure(figsize=(8,8))
# ax = fig.add_subplot(111, projection='3d')
# for x in range(len(y2)):
# ax.scatter( xs=test_data[x][0] , ys=test_data[x][1], zs=test_data[x][3], color=colors[y2[x]] )
np.array(np.unique( y2, return_counts=True )).T
np.array(np.unique( y, return_counts=True )).T
for x in range( len( training_set['nf'] ) ):
print( "%d - %s " % ( y[x], training_set['sl' ][x] ) )
re_at_50(df, 1, 2)
p2_g1 = get_score(df, 2, 1)
p2_g2 = get_score(df, 2, 2)
p2_score_diff = p2_g2 - p2_g1
p2_g1_50 = get_score_at_50(df, 2, 1)
p2_g2_50 = get_score_at_50(df, 2, 2)
return p1_g1, p1_g2, p2_g1, p2_g2, p1_score_diff, p2_score_diff, p1_g1_50,p1_g2_50, p2_g1_50, p2_g2_50
''''if version == 'm':
p1_g1 = get_score(df, 1, 1)
p1_g2 = get_score(df, 1, 2)
p1_score_diff = p1_g2 - p1_g1
p2_g1 = get_score(df, 2, 1)
p2_g2 = get_score(df, 2, 2)
p2_score_diff = p2_g2 - p2_g1
return p1_g1, p1_g2, p2_g1, p2_g2, p1_score_diff, p2_score_diff'''
return p1_g1, p1_g2, p1_score_diff, p1_g1_50, p1_g2_50
# In[9]:
#separate data frames for p1 and p2
def separate_df(df, version):
#df.set_index(df['trial'])
df_p1 = df[df['player'] == 1]
df_p1_g1 = df_p1[df['game number'] == 1]
df_p1_g2 = df_p1[df['game number'] == 2]
#remove trials for g1
#df_p1_g1 = df_p1_g1[20:40]
#remove trials for g2
#df_p1_g2 = df_p1_g2[20:40]
if version == 's':
return df_p1_g1, df_p1_g2
if version == 'c':
return df_p1_g1, df_p1_g2
df_p2 = df[df['player'] ==2 ]
df_p2_g1 = df_p2[df['game number'] == 1]
df_p2_g2 = df_p2[df['game number'] == 2]
#remove trials for g1
# df_p2_g1 = df_p2_g1[20:40]
#remove trials for g2
#df_p2_g2= df_p2_g2[20:40]
return df_p1_g1, df_p1_g2, df_p2_g1, df_p2_g2
# In[10]:
#notes from Binghong:
#see reshape to restrict csv file size
#https://urldefense.com/v3/__https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.reshape.html__;!!Mih3wA!SwZbl3yG75UWaB_c9Pq_T5wxVHgFZMbUZ5HHf5pZDf119g1JHaZr-dX9KXz57to$
#https://urldefense.com/v3/__https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html__;!!Mih3wA!SwZbl3yG75UWaB_c9Pq_T5wxVHgFZMbUZ5HHf5pZDf119g1JHaZr-dX9PslIsfw$
#https://urldefense.com/v3/__https://www.w3schools.com/python/numpy_array_shape.asp__;!!Mih3wA!SwZbl3yG75UWaB_c9Pq_T5wxVHgFZMbUZ5HHf5pZDf119g1JHaZr-dX9xnFVd7o$
#https://urldefense.com/v3/__https://stackoverflow.com/__;!!Mih3wA!SwZbl3yG75UWaB_c9Pq_T5wxVHgFZMbUZ5HHf5pZDf119g1JHaZr-dX9koWV6hk$
#use drop to filter data frames or cut
#Df=df[20:]
#https://urldefense.com/v3/__https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop.html__;!!Mih3wA!SwZbl3yG75UWaB_c9Pq_T5wxVHgFZMbUZ5HHf5pZDf119g1JHaZr-dX9a46rcc8$
#drop columns, python, df
# # ANOVA
# In[11]:
#Note from Bar 7/23/20: Not sure what pingouin is?
#import pingouin as pg
# -
# # SMAB4 Hand Coding
reader = csv.reader( open('data/export_test.csv'))
game_data = [row for row in reader]
df = pd.DataFrame(np.array(game_data))
df = df.drop(range(0,9))
df = organize_trial_by_trial_hand_code('data/export_test.csv')
df
# # SMAB3 Single Player Analysis
# +
'''
Here, we load all of the csv files into pandas dataframes using organize_trial_by_trial and get the game 1 and 2 scores using
call_get_score.'''
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/s01_gamedata.csv')
s01 = df
s01_p1_g1_score,s01_p1_g2_score,s01_score_diff,s01_p1_g1_50,s01_p1_g2_50 = call_get_score(s01, 's')
s01.head()
#in the s files, p2 is the confederate
#df = organize_trial_by_trial('data/s02_gamedata.csv')
#s02 = df
#s02_p1_g1_score = get_score(s02, 1, 1)
#s02.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/s03_gamedata.csv')
s03 = df
s03_p1_g1_score,s03_p1_g2_score,s03_score_diff,s03_p1_g1_50,s03_p1_g2_50 = call_get_score(s03, 's')
s03.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/s05_gamedata.csv')
s05 = df
s05_p1_g1_score,s05_p1_g2_score,s05_score_diff, s05_p1_g1_50,s05_p1_g2_50 = call_get_score(s05, 's')
s05.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/s06_gamedata.csv')
s06 = df
s06_p1_g1_score,s06_p1_g2_score,s06_score_diff, s06_p1_g1_50,s06_p1_g2_50 = call_get_score(s06, 's')
s06.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/s07_gamedata.csv')
s07 = df
s07_p1_g1_score,s07_p1_g2_score,s07_score_diff, s07_p1_g1_50,s07_p1_g2_50 = call_get_score(s07, 's')
s07.head()
pd.set_option('display.max_columns', None)
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/s08_gamedata.csv')
s08 = df
s08_p1_g1_score,s08_p1_g2_score,s08_score_diff, s08_p1_g1_50,s08_p1_g2_50 = call_get_score(s08, 's')
s08.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/s09_gamedata.csv')
s09 = df
s09_p1_g1_score,s09_p1_g2_score,s09_score_diff, s09_p1_g1_50,s09_p1_g2_50 = call_get_score(s09, 's')
s09.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/s11_gamedata.csv')
s11 = df
s11_p1_g1_score,s11_p1_g2_score,s11_score_diff,s11_p1_g1_50,s11_p1_g2_50 = call_get_score(s11, 's')
s11.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_300('data/s12_gamedata.csv')
s12 = df
s12_p1_g1_score,s12_p1_g2_score,s12_score_diff, s12_p1_g1_50,s12_p1_g2_50 = call_get_score(s12, 's')
s12.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/s15_gamedata.csv')
s15 = df
s15_p1_g1_score,s15_p1_g2_score,s15_score_diff,s15_p1_g1_50,s15_p1_g2_50 = call_get_score(s15, 's')
s15.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/s16_gamedata.csv')
s16 = df
s16_p1_g1_score,s16_p1_g2_score,s16_score_diff,s16_p1_g1_50,s16_p1_g2_50 = call_get_score(s16, 's')
s16.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_300('data/s19_gamedata.csv')
s19 = df
s19_p1_g1_score,s19_p1_g2_score,s19_score_diff,s19_p1_g1_50,s19_p1_g2_50 = call_get_score(s19, 's')
s19.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/s17_gamedata.csv')
s17 = df
s17_p1_g1_score,s17_p1_g2_score,s17_score_diff,s17_p1_g1_50,s17_p1_g2_50 = call_get_score(s17, 's')
s17.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/s20_gamedata.csv')
s20 = df
s20_p1_g1_score,s20_p1_g2_score,s20_score_diff, s20_p1_g1_50,s20_p1_g2_50= call_get_score(s20, 's')
s20.head()
# -
reader = csv.reader( open('data/c06_gamedata.csv'))
game_data = [row for row in reader]
df = pd.DataFrame(np.array(game_data))
df = df.T
df
# +
#organize_trial_by_trial_floatclick is missing 'time'
# +
'''
Here, we create a dataframe of all of the scores and score differences between game 1 and 2'''
#list out all the scores in game 1 by participant
s_scores_1 = pd.DataFrame({'participant':['s01','s03', 's05',
's06', 's07', 's08', 's09', 's11', 's12', 's15', 's16', 's17', 's19', 's20'], 'score_1':
[s01_p1_g1_score, s03_p1_g1_score, s05_p1_g1_score,
s06_p1_g1_score, s07_p1_g1_score, s08_p1_g1_score, s09_p1_g1_score, s11_p1_g1_score, s12_p1_g1_score, s15_p1_g1_score, s16_p1_g1_score, s17_p1_g1_score, s19_p1_g1_score, s20_p1_g1_score]})
#to prevent an extra column that numbers each row:
s_scores_1.set_index('participant', inplace=True, drop=True)
#list out all the scores in game 2 by participant
s_scores_2 = pd.DataFrame({'participant':['s01', 's03', 's05',
's06', 's07', 's08', 's09', 's11', 's12', 's15', 's16', 's17', 's19', 's20'], 'score_2':
[s01_p1_g2_score, s03_p1_g2_score, s05_p1_g2_score,
s06_p1_g2_score, s07_p1_g2_score, s08_p1_g2_score, s09_p1_g2_score, s11_p1_g2_score, s12_p1_g2_score, s15_p1_g2_score, s16_p1_g2_score, s17_p1_g2_score, s19_p1_g2_score, s20_p1_g2_score]})
#to prevent an extra column that numbers each row:
s_scores_2.set_index('participant', inplace=True, drop=True)
#list out all the scores in game 1 by participant
s_scores_1_50 = pd.DataFrame({'participant':['s01','s03', 's05',
's06', 's07', 's08', 's09', 's11', 's12', 's15', 's16', 's17', 's19', 's20'], 'score_1_50':
[s01_p1_g1_50, s03_p1_g1_50, s05_p1_g1_50,
s06_p1_g1_50, s07_p1_g1_50, s08_p1_g1_50, s09_p1_g1_50, s11_p1_g1_50, s12_p1_g1_50, s15_p1_g1_50, s16_p1_g1_50, s17_p1_g1_50, s19_p1_g1_50, s20_p1_g1_50]})
#to prevent an extra column that numbers each row:
s_scores_1_50.set_index('participant', inplace=True, drop=True)
#list out all the scores in game 2 by participant
s_scores_2_50 = pd.DataFrame({'participant':['s01', 's03', 's05',
's06', 's07', 's08', 's09', 's11', 's12', 's15', 's16', 's17', 's19', 's20'], 'score_2_50':
[s01_p1_g2_50, s03_p1_g2_50, s05_p1_g2_50,
s06_p1_g2_50, s07_p1_g2_50, s08_p1_g2_50, s09_p1_g2_50, s11_p1_g2_50, s12_p1_g2_50, s15_p1_g2_50, s16_p1_g2_50, s17_p1_g2_50, s19_p1_g2_50, s20_p1_g2_50]})
s_scores_2_50.set_index('participant', inplace=True, drop=True)
frames = [s_scores_1_50,s_scores_2_50,s_scores_1,s_scores_2]
# -
s_scores_50 = s_scores_1_50.merge(s_scores_2_50,right_index=True,left_index=True)
s_scores = s_scores_1.merge(s_scores_2,right_index=True,left_index=True)
s_combo = s_scores_50.merge(s_scores,right_index=True,left_index=True)
s_combo
# +
#unnecessary graph
#need to make g1 and g2 bars diff colors
colors = list(islice(cycle(['red','blue','red', 'red','blue','red','blue','red','blue','red','blue','red','blue','red','blue']),None,len(s_combo)))
#my_colors = list(islice(cycle(['b', 'r', 'g', 'y', 'k']), None, len(df)))
s_combo_bar = s_combo.plot(kind='bar',color=colors)
'''s_combo = pd.DataFrame({'participant':['s01', 's01', 's03', 's03','s05', 's05'
's06', 's06','s07','s07','s08','s08','s09','s09', 's11', 's11', 's12', 's12', 's15', 's15', 's16', 's16', 's17', 's17', 's19', 's19', 's20', 's20'], 'score':
[s01_p1_g1_score, s01_p1_g2_score, s02_p1_g1_score, s03_p1_g1_score, s03_p1_g2_score, s05_p1_g1_score,
s05_p1_g2_score,s06_p1_g1_score,s06_p1_g2_score, s07_p1_g1_score,s07_p1_g2_score, s08_p1_g1_score,s08_p1_g2_score,
s09_p1_g1_score, s09_p1_g2_score, s11_p1_g1_score, s11_p1_g2_score, s12_p1_g1_score, s12_p1_g2_score, s15_p1_g1_score, s15_p1_g2_score, s16_p1_g1_score, s16_p1_g2_score, s17_p1_g1_score, s17_p1_g2_score, s19_p1_g1_score, s19_p1_g2_score, s20_p1_g1_score, s20_p1_g2_score]})
s_combo.set_index('participant', inplace=True, drop=True)'''
# +
'''Here we plot the scores.'''
s_scores_1_bar = s_scores_1.plot.bar()
s_scores_2_bar = s_scores_2.plot.bar()
#merge both games by participant:
s_scores = pd.merge(s_scores_1, s_scores_2,on='participant')
#label columns
s_scores.columns=['Game 1','Game 2']
s_scores
# +
#make a boxplot of the scores in game 1 and game 2
#s_scores_box = s_scores.boxplot(labels=['Game 1','Game 2'])
#plt.grid(b=None)
#plt.yticks(np.arange(50, 105, step=10))
s_scores_box = sns.boxplot(
data=s_combo, order = ['score_1_50','score_2_50','score_1','score_2'],
width=0.5,
palette="pastel")
s_scores_box.axes.set_title("SMAB3 Control Scores",
fontsize=16)
plot_file_name="s_score_box.jpg"
plt.ylim(0, 100)
s_scores_box.axes.set(ylabel='Score')
# save as jpeg
s_scores_box.figure.savefig(plot_file_name,
format='jpeg')
# +
#get a p value comparing scores in game 1 to scores in game 2
s_scores_p = sp.stats.ttest_rel(s_scores['Game 1'],s_scores['Game 2'])
s_scores_p
s_scores_2_bar = s_scores_2.plot.bar()
#new_index = ('s01_g1','s01_g2','s02_g1','s03_g1','s03_g2','s05_g1','s05_g2',
#'s06_g1','s06_g2','s07_g1','s07_g2','s08_g1','s08_g2','s09_g1','s09_g2')
#s_combo.index = new_index
# +
'''Here we look at the means, standard deviations, and medians of the scores in games 1 and 2.'''
s_scores_1_mean = s_scores_1.mean()
s_scores_1_mean
# +
s_scores_1_std = s_scores_1.std()
# In[52]:
s_scores_1_std
# In[53]:
# +
s_scores_2_mean = s_scores_2.mean()
# In[54]:
s_scores_2_mean
# In[55]:
# +
s_scores_2_std = s_scores_2.std()
# In[56]:
s_scores_2_std
# In[57]:
# -
s_scores_med = s_scores.median()
s_scores_med
# +
'''Here we separate the dataframes of game 1 and game 2 for each subject.'''
s01_p1_g1, s01_p1_g2 = separate_df(s01, 's')
s01_p1_g2
s01_p1_g1, s01_p1_g2 = separate_df(s01, 's')
s03_p1_g1, s03_p1_g2 = separate_df(s03, 's')
s05_p1_g1, s05_p1_g2 = separate_df(s05, 's')
s06_p1_g1, s06_p1_g2 = separate_df(s06, 's')
s07_p1_g1, s07_p1_g2 = separate_df(s07, 's')
s08_p1_g1, s08_p1_g2 = separate_df(s08, 's')
s09_p1_g1, s09_p1_g2 = separate_df(s09, 's')
s11_p1_g1, s11_p1_g2 = separate_df(s11, 's')
s12_p1_g1, s12_p1_g2 = separate_df(s12, 's')
s15_p1_g1, s15_p1_g2 = separate_df(s15, 's')
s16_p1_g1, s16_p1_g2 = separate_df(s16, 's')
s17_p1_g1, s17_p1_g2 = separate_df(s17, 's')
s19_p1_g1, s19_p1_g2 = separate_df(s19, 's')
s20_p1_g1, s20_p1_g2 = separate_df(s20, 's')
# +
'''Here we count the number of times that each hole was selected by each subject in each game by calling value_counts'''
s01_p1_g1_count = s01_p1_g1['probability'].value_counts(sort=False)
s01_p1_g2_count = s01_p1_g2['probability'].value_counts(sort=False)
s03_p1_g1_count = s03_p1_g1['probability'].value_counts(sort=False)
s03_p1_g2_count = s03_p1_g2['probability'].value_counts(sort=False)
s05_p1_g1_count = s05_p1_g1['probability'].value_counts(sort=False)
s05_p1_g2_count = s05_p1_g2['probability'].value_counts(sort=False)
s06_p1_g1_count = s06_p1_g1['probability'].value_counts(sort=False)
s06_p1_g2_count = s06_p1_g2['probability'].value_counts(sort=False)
s07_p1_g1_count = s07_p1_g1['probability'].value_counts(sort=False)
s07_p1_g2_count = s07_p1_g2['probability'].value_counts(sort=False)
s08_p1_g1_count = s08_p1_g1['probability'].value_counts(sort=False)
s08_p1_g2_count = s08_p1_g2['probability'].value_counts(sort=False)
s09_p1_g1_count = s09_p1_g1['probability'].value_counts(sort=False)
s09_p1_g2_count = s09_p1_g2['probability'].value_counts(sort=False)
s11_p1_g1_count = s11_p1_g1['probability'].value_counts(sort=False)
s11_p1_g2_count = s11_p1_g2['probability'].value_counts(sort=False)
s12_p1_g1_count = s12_p1_g1['probability'].value_counts(sort=False)
s12_p1_g2_count = s12_p1_g2['probability'].value_counts(sort=False)
s15_p1_g1_count = s15_p1_g1['probability'].value_counts(sort=False)
s15_p1_g2_count = s15_p1_g2['probability'].value_counts(sort=False)
s16_p1_g1_count = s16_p1_g1['probability'].value_counts(sort=False)
s16_p1_g2_count = s16_p1_g2['probability'].value_counts(sort=False)
s17_p1_g1_count = s17_p1_g1['probability'].value_counts(sort=False)
s17_p1_g2_count = s17_p1_g2['probability'].value_counts(sort=False)
s19_p1_g1_count = s19_p1_g1['probability'].value_counts(sort=False)
s19_p1_g2_count = s19_p1_g2['probability'].value_counts(sort=False)
s20_p1_g1_count = s20_p1_g1['probability'].value_counts(sort=False)
s20_p1_g2_count = s20_p1_g2['probability'].value_counts(sort=False)
# +
'''Here we create dataframes with the counts for game 1.'''
s_g1_counts = pd.DataFrame([s01_p1_g1_count, s03_p1_g1_count, s05_p1_g1_count, s06_p1_g1_count, s07_p1_g1_count,
s08_p1_g1_count, s09_p1_g1_count, s11_p1_g1_count, s12_p1_g1_count, s15_p1_g1_count, s16_p1_g1_count,
s17_p1_g1_count, s19_p1_g1_count, s20_p1_g1_count],
index=['s01_p1_g1_count', 's03_p1_g1_count',
's05_p1_g1_count', 's06_p1_g1_count', 's07_p1_g1_count', 's08_p1_g1_count',
's09_p1_g1_count', 's11_p1_g1_count', 's12_p1_g1_count', 's15_p1_g1_count',
's16_p1_g1_count','s17_p1_g1_count', 's19_p1_g1_count', 's20_p1_g1_count'])
#How to prevent NaN: df = df.fillna(0). This makes the value 0 if a hole was never selected by a subject.
s_g1_counts = s_g1_counts.fillna(0)
#print
s_g1_counts
# -
'''Here we plot the value counts
s_g1_counts.sum(axis=1)
s_g1_counts_bar = s_g1_counts.plot.bar()
# save as jpeg
s_g1_counts_bar.figure.savefig(s_g1_counts_bar,
format='jpeg',
dpi=100)'''
# +
s_g1_counts_box = sns.boxplot(
data=s_g1_counts,
width=0.5,
palette="pastel")
s_g1_counts_box.axes.set_title("SMAB3 Control Choice Distributions in Game 1",
fontsize=16)
plot_file_name="s_g1_counts_box.jpg"
plt.ylim(0, 100)
s_g1_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
s_g1_counts_box.figure.savefig(plot_file_name,
format='jpeg')
# +
'''Here we get the mean and standard deviation of the number of selections of each hole where low is the 0th column and the 30% hole,
med is the 1st column and the 60% hole, and high is the 2nd column and the 90% hole.'''
s_g1_low = np.mean(s_g1_counts.iloc[:,0])
s_g1_med = np.mean(s_g1_counts.iloc[:,1])
s_g1_high = np.mean(s_g1_counts.iloc[:,2])
s_g1_low_std = np.std(s_g1_counts.iloc[:,0])
s_g1_med_std = np.std(s_g1_counts.iloc[:,1])
s_g1_high_std = np.std(s_g1_counts.iloc[:,2])
# +
'''Here we create dataframes with the counts for game 2.'''
s_g2_counts = pd.DataFrame([s01_p1_g2_count, s03_p1_g2_count,
s05_p1_g2_count, s06_p1_g2_count, s07_p1_g2_count, s08_p1_g2_count, s09_p1_g2_count,
s11_p1_g2_count, s12_p1_g2_count, s15_p1_g2_count, s16_p1_g2_count,
s17_p1_g2_count, s19_p1_g2_count, s20_p1_g2_count],
index= ['s01_p1_g2_count', 's03_p1_g2_count', 's05_p1_g2_count', 's06_p1_g2_count',
's07_p1_g2_count', 's08_p1_g2_count', 's09_p1_g2_count', 's11_p1_g2_count',
's12_p1_g2_count', 's15_p1_g2_count', 's16_p1_g2_count','s17_p1_g2_count',
's19_p1_g2_count', 's20_p1_g2_count'])
#How to prevent NaN: df = df.fillna(0)
s_g2_counts = s_g2_counts.fillna(0)
#print
s_g2_counts
s_g2_counts.sum(axis=1)
# +
'''Here we plot the counts in game 2.'''
s_g2_counts_bar = s_g2_counts.plot.bar()
# save as jpeg
s_g2_counts_bar.figure.savefig(plot_file_name,
format='jpeg',
dpi=100)
# +
s_g2_counts_box = sns.boxplot(
data=s_g2_counts,
width=0.5,
palette="pastel")
s_g2_counts_box.axes.set_title("SMAB3 Control Choice Distributions in Game 2",
fontsize=16)
plot_file_name="s_g2_counts_box.jpg"
plt.ylim(0, 100)
s_g2_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
s_g2_counts_box.figure.savefig(plot_file_name,
format='jpeg')
# +
'''Here we get the mean and standard deviation of the number of times participants chose each hole.'''
s_g2_low = np.mean(s_g2_counts.iloc[:,0])
s_g2_med = np.mean(s_g2_counts.iloc[:,1])
s_g2_high = np.mean(s_g2_counts.iloc[:,2])
# +
s_g2_low_std = np.std(s_g2_counts.iloc[:,0])
s_g2_med_std = np.std(s_g2_counts.iloc[:,1])
s_g2_high_std = np.std(s_g2_counts.iloc[:,2])
s_g2_low_std
s_g2_med_std
s_g2_high_std
# +
'''Here we place the value counts for each hole for each game into separate numy arrays and compare them statistically.'''
import researchpy as rp
s_90_1 = np.array(s_g1_counts.iloc[:,2])
s_90_2 = np.array(s_g2_counts.iloc[:,2])
s_60_1 = np.array(s_g1_counts.iloc[:,1])
s_60_2 = np.array(s_g2_counts.iloc[:,1])
s_30_1 = np.array(s_g1_counts.iloc[:,0])
s_30_2 = np.array(s_g2_counts.iloc[:,0])
s_90_p = sp.stats.ttest_rel(s_90_1,s_90_2)
s_60_p = sp.stats.ttest_rel(s_60_1,s_60_2)
s_30_p = sp.stats.ttest_rel(s_30_1,s_30_2)
# -
# # SMAB4 Single Player Analysis
# +
'''Here we load the csv files into pandas dataframes by calling organize_trial_by_trial and get the scores in games
1 and 2 by calling call_get_score.'''
df = organize_trial_by_trial('data/c02_gamedata.csv')
c02 = df
c02_p1_g1_score,c02_p1_g2_score,c02_score_diff, c02_p1_g1_50,c02_p1_g2_50 = call_get_score(c02, 'c')
c02.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/c03_gamedata.csv')
c03 = df
c03_p1_g1_score,c03_p1_g2_score,c03_score_diff, c03_p1_g1_50,c03_p1_g2_50 = call_get_score(c03, 'c')
c03.head()
#in the s files, p2 is the confederate
#df = organize_trial_by_trial('data/c04_gamedata.csv')
#c04 = df
#c04_p1_g1_score,c04_p1_g2_score,c04_score_diff = call_get_score(c04, 'c')
#c04.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/c05_gamedata.csv')
c05 = df
c05_p1_g1_score,c05_p1_g2_score,c05_score_diff, c05_p1_g1_50,c05_p1_g2_50 = call_get_score(c05, 'c')
c05.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/c06_gamedata.csv')
c06 = df
c06_p1_g1_score,c06_p1_g2_score,c06_score_diff, c06_p1_g1_50,c06_p1_g2_50 = call_get_score(c06, 'c')
c06.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/c09_gamedata.csv')
c09 = df
c09_p1_g1_score,c09_p1_g2_score,c09_score_diff, c09_p1_g1_50,c09_p1_g2_50 = call_get_score(c09, 'c')
c09.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/c10_gamedata.csv')
c10 = df
c10_p1_g1_score,c10_p1_g2_score,c10_score_diff, c10_p1_g1_50,c10_p1_g2_50 = call_get_score(c10, 'c')
c10.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial('data/c11_gamedata.csv')
c11 = df
c11_p1_g1_score,c11_p1_g2_score,c11_score_diff, c11_p1_g1_50,c11_p1_g2_50 = call_get_score(c11, 'c')
c11.head()
pd.set_option('display.max_columns', None)
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/c12_gamedata.csv')
c12 = df
c12_p1_g1_score,c12_p1_g2_score,c12_score_diff, c12_p1_g1_50,c12_p1_g2_50 = call_get_score(c12, 'c')
c12.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/c13_gamedata.csv')
c13 = df
c13_p1_g1_score,c13_p1_g2_score,c13_score_diff, c13_p1_g1_50,c13_p1_g2_50 = call_get_score(c13, 'c')
c13.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/c14_gamedata.csv')
c14 = df
c14_p1_g1_score,c14_p1_g2_score,c14_score_diff, c14_p1_g1_50,c14_p1_g2_50 = call_get_score(c14, 'c')
c14.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/c16_gamedata.csv')
c16 = df
c16_p1_g1_score,c16_p1_g2_score,c16_score_diff, c16_p1_g1_50,c16_p1_g2_50 = call_get_score(c16, 'c')
c12.head()
#in the s files, p2 is the confederate
df = organize_trial_by_trial_floatclick('data/c17_gamedata.csv')
c17 = df
c17_p1_g1_score,c17_p1_g2_score,c17_score_diff, c17_p1_g1_50,c17_p1_g2_50 = call_get_score(c17, 'c')
c17#in the s files, p2 is the confederate
# +
'''Here we place the score differences between games 1 and 2 in pandas dataframes and get the mean.'''
# In[119]:
score_diff_confederate = pd.DataFrame({'participant':['c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'], 'score_diff_confederate':
[c02_score_diff, c03_score_diff,
c05_score_diff, c06_score_diff, c09_score_diff, c10_score_diff, c11_score_diff, c12_score_diff, c13_score_diff, c14_score_diff, c16_score_diff, c17_score_diff ]})
score_diff_confederate
score_diff_confederate_mean = score_diff_confederate['score_diff_confederate'].mean()
score_diff_confederate_mean
# +
'''Here we place the scores in games 1 and 2 into dataframes.'''
#list out all the scores in game 1 by participant
c_scores_1 = pd.DataFrame({'participant':['c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'], 'confederate_score_1':
[c02_p1_g1_score, c03_p1_g1_score,
c05_p1_g1_score, c06_p1_g1_score, c09_p1_g1_score, c10_p1_g1_score, c11_p1_g1_score, c12_p1_g1_score, c13_p1_g1_score, c14_p1_g1_score, c16_p1_g1_score, c17_p1_g1_score]})
#to prevent an extra column that numbers each row:
c_scores_1.set_index('participant', inplace=True, drop=True)
#print
c_scores_1
#list out all the scores in game 2 by participant
c_scores_2 = pd.DataFrame({'participant':['c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'], 'confederate_score_2':[c02_p1_g2_score, c03_p1_g2_score, c05_p1_g2_score, c06_p1_g2_score, c09_p1_g2_score, c10_p1_g2_score, c11_p1_g2_score, c12_p1_g2_score, c13_p1_g2_score, c14_p1_g2_score, c16_p1_g2_score, c17_p1_g2_score]})
#to prevent an extra column that numbers each row:
c_scores_2.set_index('participant', inplace=True, drop=True)
#s_scores_1 = pd.DataFrame(data = d1)
#s_scores_2 = pd.DataFrame(data = d2)
#print
c_scores_2
#list out all the scores in game 1 by participant
c_scores_1_50 = pd.DataFrame({'participant':['c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'], 'score_1_50':
[c02_p1_g1_50, c03_p1_g1_50, c05_p1_g1_50,
c06_p1_g1_50, c09_p1_g1_50, c10_p1_g1_50, c11_p1_g1_50, c12_p1_g1_50, c13_p1_g1_50, c14_p1_g1_50, c16_p1_g1_50, c17_p1_g1_50]})
#to prevent an extra column that numbers each row:
c_scores_1_50.set_index('participant', inplace=True, drop=True)
#list out all the scores in game 2 by participant
c_scores_2_50 = pd.DataFrame({'participant':['c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'], 'score_1_50':
[c02_p1_g2_50, c03_p1_g2_50, c05_p1_g2_50,
c06_p1_g2_50, c09_p1_g2_50, c10_p1_g2_50, c11_p1_g2_50, c12_p1_g2_50, c13_p1_g2_50, c14_p1_g2_50, c16_p1_g2_50, c17_p1_g2_50]})
c_scores_2_50.set_index('participant', inplace=True, drop=True)
# +
'''Here we combine the dataframes with the scores from the 2 games.'''
c_scores_50 = c_scores_1_50.merge(c_scores_2_50,right_index=True,left_index=True)
c_scores = c_scores_1.merge(c_scores_2,right_index=True,left_index=True)
c_combo = c_scores_50.merge(c_scores,right_index=True,left_index=True)
#rename columns
c_combo.columns=["score_1_50", "score_2_50", "score_1", "score_2"]
# -
'''Here we plot the scores in games 1 and 2.
c_scores_1_bar = c_scores_1.plot.bar()
c_scores_2_bar = c_scores_2.plot.bar()
#label columns
#c_scores.columns=['Game 1','Game 2']
c_scores'''
#need to fix rewards for SMAB4
c_combo
# +
c_scores_box = c_scores.boxplot(labels=['Game 1','Game 2'])
plt.grid(b=None)
#plt.yticks(np.arange(50, 105, step=10))
c_scores_box = sns.boxplot(
data=c_scores,
width=0.5,
palette="pastel")
c_scores_box.axes.set_title("SMAB4 Control Scores",
fontsize=16)
plot_file_name="c_score_box.jpg"
plt.ylim(0, 100)
c_scores_box.axes.set(ylabel='Score')
# save as jpeg
c_scores_box.figure.savefig(plot_file_name,
format='jpeg')
# +
'''Here we make a boxplot with the scores in the first and second half of the games included'''
c_combo_box = sns.boxplot(
data=c_combo, order = ['score_1_50','score_2_50','score_1','score_2'],
width=0.5,
palette="pastel")
c_combo_box.axes.set_title("SMAB4 Control Scores",
fontsize=16)
plot_file_name="c_score_box.jpg"
plt.ylim(0, 100)
c_combo_box.axes.set(ylabel='Score')
# save as jpeg
c_combo_box.figure.savefig(plot_file_name,
format='jpeg')
# -
'''Get the p value between the scores in games 1 and 2.
c_scores_p = sp.stats.ttest_rel(c_scores['Game 1'],c_scores['Game 2'])
c_scores_p
#plot bar graph of scores
c_scores_2_bar = c_scores_2.plot.bar()
#combine g1 and g2 to one dataframe
c_combo = c_scores_1.append(c_scores_2)
c_combo = pd.DataFrame({'participant':['c02', 'c02', 'c03', 'c03',
'c06','c06','c09','c09','c10','c10','c11','c11','c12','c12','c13','c13','c14','c14','c16','c16','c17','c17'], 'score':
[c02_p1_g1_score, c02_p1_g2_score, c03_p1_g1_score, c03_p1_g2_score, c05_p1_g1_score, c05_p1_g2_score, c06_p1_g1_score, c06_p1_g2_score, c09_p1_g1_score, c09_p1_g2_score, c10_p1_g1_score, c10_p1_g2_score, c11_p1_g1_score, c11_p1_g2_score, c12_p1_g1_score, c12_p1_g2_score, c13_p1_g1_score, c13_p1_g2_score, c14_p1_g1_score, c14_p1_g2_score, c16_p1_g1_score, c16_p1_g2_score, c17_p1_g1_score, c17_p1_g2_score]})
c_combo.set_index('participant', inplace=True, drop=True)
#new_index = ('c02_g1','c02_g2','c03_g1','c03_g2','c04_g1','c04_g2','c05_g1','c05_g2','c06_g1','c06_g2','c09_g1','c09_g2','c10_g1','c10_g2','c11_g1','c11_g2','c12_g1','c12_g2','c13_g1','c13_g2','c14_g1','c14_g2','c16_g1','c16_g2','c17_g1','c17_g2')
#c_combo.index = new_index
c_combo = c_combo.sort_values(by=['participant'])'''
#unnecessary graph
#need to make g1 and g2 bars diff colors
colors = list(islice(cycle(['red','blue','green', 'yellow']),None,len(c_combo)))
#blue','red','blue','red','blue','red','blue','red','blue','red','blue']),None,len(c_combo)))
#my_colors = list(islice(cycle(['b', 'r', 'g', 'y', 'k']), None, len(df)))
c_combo_bar = c_combo.plot(kind='bar',color=colors)
# # Checking for outliars
#scatter plot
fig, ax = plt.subplots(figsize=(16,8))
ax.scatter(c_combo['score_2_50'], c_combo['score_2'])
ax.set_xlabel('Score at trial 50')
ax.set_ylabel('Score at trial 100')
plt.show()
#calculate z score
z = np.abs(stats.zscore(c_combo['score_2']))
print(z)
z = np.abs(stats.zscore(c_combo['score_2_50']))
print(z)
# +
'''Here we get the mean, median, and standard deviation of the scores in games 1 and 2.'''
c_scores_1_mean = c_scores_1.mean()
c_scores_1_mean
# +
c_scores_1_std = c_scores_1.std()
c_scores_1_std
# +
# In[146]:
c_scores_2_mean = c_scores_2.mean()
# In[147]:
c_scores_2_mean
# +
# In[148]:
c_scores_2_std = c_scores_2.std()
# In[149]:
c_scores_2_std
# +
# In[150]:
c_scores_med = c_scores.median()
c_scores_med
# +
'''Here we separate the dataframes into games 1 and 2.'''
#s01_p1_g1, s01_p1_g2 = separate_df(s01, 's')
c02_p1_g1, c02_p1_g2 = separate_df(c02, 'c')
c03_p1_g1, c03_p1_g2 = separate_df(c03, 'c')
c05_p1_g1, c05_p1_g2 = separate_df(c05, 'c')
c06_p1_g1, c06_p1_g2 = separate_df(c06, 'c')
c09_p1_g1, c09_p1_g2 = separate_df(c09, 'c')
c10_p1_g1, c10_p1_g2 = separate_df(c10, 'c')
c11_p1_g1, c11_p1_g2 = separate_df(c11, 'c')
c12_p1_g1, c12_p1_g2 = separate_df(c12, 'c')
c13_p1_g1, c13_p1_g2 = separate_df(c13, 'c')
c14_p1_g1, c14_p1_g2 = separate_df(c14, 'c')
c16_p1_g1, c16_p1_g2 = separate_df(c16, 'c')
c17_p1_g1, c17_p1_g2 = separate_df(c17, 'c')
# +
'''Here we count the number of times each hole was chosen in each game and place it in an array.'''
c02_p1_g1_count = c02_p1_g1['probability'].value_counts(sort=False)
c02_p1_g2_count = c02_p1_g2['probability'].value_counts(sort=False)
c03_p1_g1_count = c03_p1_g1['probability'].value_counts(sort=False)
c03_p1_g2_count = c03_p1_g2['probability'].value_counts(sort=False)
c05_p1_g1_count = c05_p1_g1['probability'].value_counts(sort=False)
c05_p1_g2_count = c05_p1_g2['probability'].value_counts(sort=False)
c06_p1_g1_count = c06_p1_g1['probability'].value_counts(sort=False)
c06_p1_g2_count = c06_p1_g2['probability'].value_counts(sort=False)
c09_p1_g1_count = c09_p1_g1['probability'].value_counts(sort=False)
c09_p1_g2_count = c09_p1_g2['probability'].value_counts(sort=False)
c10_p1_g1_count = c10_p1_g1['probability'].value_counts(sort=False)
c10_p1_g2_count = c10_p1_g2['probability'].value_counts(sort=False)
c11_p1_g1_count = c11_p1_g1['probability'].value_counts(sort=False)
c11_p1_g2_count = c11_p1_g2['probability'].value_counts(sort=False)
c12_p1_g1_count = c12_p1_g1['probability'].value_counts(sort=False)
c12_p1_g2_count = c12_p1_g2['probability'].value_counts(sort=False)
c13_p1_g1_count = c13_p1_g1['probability'].value_counts(sort=False)
c13_p1_g2_count = c13_p1_g2['probability'].value_counts(sort=False)
c14_p1_g1_count = c14_p1_g1['probability'].value_counts(sort=False)
c14_p1_g2_count = c14_p1_g2['probability'].value_counts(sort=False)
c16_p1_g1_count = c16_p1_g1['probability'].value_counts(sort=False)
c16_p1_g2_count = c16_p1_g2['probability'].value_counts(sort=False)
c17_p1_g1_count = c17_p1_g1['probability'].value_counts(sort=False)
c17_p1_g2_count = c17_p1_g2['probability'].value_counts(sort=False)
# -
c09_p1_g2_count
# +
'''Here we place the number of times each hole was chosen by each subject in game 1 and into a dataframe.
We also plot this data and get the means and standard deviations of it.'''
c_g1_counts = pd.DataFrame([c02_p1_g1_count, c03_p1_g1_count,
c05_p1_g1_count, c06_p1_g1_count, c09_p1_g1_count, c10_p1_g1_count, c11_p1_g1_count, c12_p1_g1_count, c13_p1_g1_count, c14_p1_g1_count, c16_p1_g1_count, c17_p1_g1_count],
index=['c02_p1_g1_count', 'c03_p1_g1_count', 'c05_p1_g1_count', 'c06_p1_g1_count',
'c09_p1_g1_count', 'c10_p1_g1_count', 'c11_p1_g1_count', 'c12_p1_g1_count', 'c13_p1_g1_count', 'c14_p1_g1_count',
'c16_p1_g1_count', 'c17_p1_g1_count'])
#How to prevent NaN: df = df.fillna(0)
c_g1_counts = c_g1_counts.fillna(0)
#print
c_g1_counts
c_g1_counts.sum(axis=1)
c_g1_counts_bar = c_g1_counts.plot.bar()
# save as jpeg
c_g1_counts_bar.figure.savefig(plot_file_name,
format='jpeg',
dpi=100)
# +
c_g1_counts_box = sns.boxplot(
data=c_g1_counts,
width=0.5,
palette="pastel")
c_g1_counts_box.axes.set_title("SMAB4 Control Choice Distributions in Game 1",
fontsize=16)
plot_file_name="c_g1_counts_box.jpg"
plt.ylim(0, 100)
c_g1_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
c_g1_counts_box.figure.savefig(plot_file_name,
format='jpeg')
# +
# In[158]:
#0 is low (30), 1 is med (60), 2 is high (90)
c_g1_low = np.mean(c_g1_counts.iloc[:,0])
c_g1_med = np.mean(c_g1_counts.iloc[:,1])
c_g1_high = np.mean(c_g1_counts.iloc[:,2])
c_g1_low
c_g1_med
c_g1_high
# +
c_g1_low_std = np.std(c_g1_counts.iloc[:,0])
c_g1_med_std = np.std(c_g1_counts.iloc[:,1])
c_g1_high_std = np.std(c_g1_counts.iloc[:,2])
c_g1_low_std
c_g1_med_std
c_g1_high_std
# +
'''Here we place the number of times each hole was chosen by each subject in game 2 and into a dataframe.
We also plot this data and get the means and standard deviations of it.'''
c_g2_counts = pd.DataFrame([c02_p1_g2_count, c03_p1_g2_count,
c05_p1_g2_count, c06_p1_g2_count, c09_p1_g2_count, c10_p1_g2_count, c11_p1_g2_count, c12_p1_g2_count, c13_p1_g2_count, c14_p1_g2_count, c16_p1_g2_count, c17_p1_g2_count],
index=['c02_p1_g2_count', 'c03_p1_g2_count', 'c05_p1_g2_count', 'c06_p1_g2_count',
'c09_p1_g2_count', 'c10_p1_g2_count', 'c11_p1_g2_count', 'c12_p1_g2_count', 'c13_p1_g2_count', 'c14_p1_g2_count',
'c16_p1_g2_count', 'c17_p1_g2_count'])
#How to prevent NaN: df = df.fillna(0)
c_g2_counts = c_g2_counts.fillna(0)
#print
c_g2_counts
c_g2_counts.sum(axis=1)
c_g2_counts_bar = c_g2_counts.plot.bar()
# save as jpeg
c_g2_counts_bar.figure.savefig(plot_file_name,
format='jpeg',
dpi=100)
# +
c_g2_counts_box = sns.boxplot(
data=c_g2_counts,
width=0.5,
palette="pastel")
c_g2_counts_box.axes.set_title("SMAB4 Control Choice Distributions in Game 2",
fontsize=16)
plot_file_name="c_g2_counts_box.jpg"
plt.ylim(0, 100)
c_g2_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
c_g2_counts_box.figure.savefig(plot_file_name,
format='jpeg')
# +
c_g2_low = np.mean(c_g2_counts.iloc[:,0])
c_g2_med = np.mean(c_g2_counts.iloc[:,1])
c_g2_high = np.mean(c_g2_counts.iloc[:,2])
c_g2_low
c_g2_med
c_g2_high
# +
c_g2_low_std = np.std(c_g2_counts.iloc[:,0])
c_g2_med_std = np.std(c_g2_counts.iloc[:,1])
c_g2_high_std = np.std(c_g2_counts.iloc[:,2])
c_g2_low_std
c_g2_med_std
c_g2_high_std
# +
'''Here we convert the value counts of each hole in each game into Numpy arrays.'''
import researchpy as rp
c_90_1 = np.array(c_g1_counts.iloc[:,2])
c_90_2 = np.array(c_g2_counts.iloc[:,2])
# In[182]:
c_90_1
# In[183]:
c_90_2
# In[184]:
c_60_1 = np.array(c_g1_counts.iloc[:,1])
c_60_2 = np.array(c_g2_counts.iloc[:,1])
# In[185]:
c_60_1
# In[186]:
c_60_2
# In[187]:
c_30_1 = np.array(c_g1_counts.iloc[:,0])
c_30_2 = np.array(c_g2_counts.iloc[:,0])
# In[188]:
c_30_1
# In[189]:
c_30_2
# +
'''Here we do a t-test comparing the hole choices in games 1 and 2.'''
c_90_p = sp.stats.ttest_rel(c_90_1,c_90_2)
c_60_p = sp.stats.ttest_rel(c_60_1,c_60_2)
c_30_p = sp.stats.ttest_rel(c_30_1,c_30_2)
# In[191]:
c_90_p
# In[192]:
c_60_p
# In[193]:
c_30_p
# In[194]:
#maximizing and matching?
# In[195]:
#maximizing and matching?
# -
# # Single Player Analysis of SMAB 3 and SMAB 4
# ## Testing for differences between SMAB 3 and SMAB 4
'''scores, games 1 vs 2, by condition, in smab 3 vs 4. So 2 x 2 x 2,
mixed-measures anova or GLM, with game as a repeated measure.
You could also collapse game and just do a simple 2x2 anova to get a sense of the data.'''
import pingouin as pg
# +
'''Here we place the scores in games 1 and 2 into dataframes, combining SMAB3 and SMAB4.'''
control_scores_1.set_index('participant', inplace=True, drop=True)
# NEED: add scores at trial 50
#list out all the scores in game 1 by participant
control_scores_1 = pd.DataFrame({'participant':['s01','s03', 's05',
's06', 's07', 's08', 's09', 's11', 's12', 's15', 's16', 's17', 's19', 's20','c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'], 'control_score_1':
[s01_p1_g1_score, s03_p1_g1_score, s05_p1_g1_score,
s06_p1_g1_score, s07_p1_g1_score, s08_p1_g1_score, s09_p1_g1_score, s11_p1_g1_score, s12_p1_g1_score, s15_p1_g1_score, s16_p1_g1_score, s17_p1_g1_score, s19_p1_g1_score, s20_p1_g1_score, c02_p1_g1_score, c03_p1_g1_score,
c05_p1_g1_score, c06_p1_g1_score, c09_p1_g1_score, c10_p1_g1_score, c11_p1_g1_score, c12_p1_g1_score, c13_p1_g1_score, c14_p1_g1_score, c16_p1_g1_score, c17_p1_g1_score]})
#to prevent an extra column that numbers each row:
#print
control_scores_1
#list out all the scores in game 2 by participant
control_scores_2 = pd.DataFrame({'participant':['s01', 's03', 's05',
's06', 's07', 's08', 's09', 's11', 's12', 's15', 's16', 's17', 's19', 's20','c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'], 'control_score_2':
[s01_p1_g2_score, s03_p1_g2_score, s05_p1_g2_score,
s06_p1_g2_score, s07_p1_g2_score, s08_p1_g2_score, s09_p1_g2_score, s11_p1_g2_score, s12_p1_g2_score, s15_p1_g2_score, s16_p1_g2_score, s17_p1_g2_score, s19_p1_g2_score, s20_p1_g2_score,
c02_p1_g2_score, c03_p1_g2_score, c05_p1_g2_score, c06_p1_g2_score, c09_p1_g2_score, c10_p1_g2_score, c11_p1_g2_score, c12_p1_g2_score, c13_p1_g2_score, c14_p1_g2_score, c16_p1_g2_score, c17_p1_g2_score]})
#to prevent an extra column that numbers each row:
control_scores_2.set_index('participant', inplace=True, drop=True)
#s_scores_1 = pd.DataFrame(data = d1)
#s_scores_2 = pd.DataFrame(data = d2)
#print
control_scores_2
#merge both games by participant:
control_scores = pd.merge(control_scores_1, control_scores_2, on = 'participant')
#label columns
control_scores.columns=['Game 1','Game 2']
control_scores
control_scores.sum(axis=1)
# In[203]:
control_scores=control_scores.sum(axis=1)
# In[204]:
control_scores.sort_values(inplace=True)
# In[205]:
control_scores
# In[206]:
control_scores_1_mean = control_scores_1.mean()
# In[207]:
control_scores_1_mean
# In[208]:
control_scores_1_std = control_scores_1.std()
# In[209]:
control_scores_1_std
# In[210]:
control_scores_2_mean = control_scores_2.mean()
# In[211]:
control_scores_2_mean
# In[212]:
control_scores_2_std = control_scores_2.std()
# In[213]:
control_scores_2_std
# In[214]:
control_scores_med = control_scores.median()
control_scores_med
# In[215]:
control_scores_p = sp.stats.ttest_rel(control_scores_1,control_scores_2)
# In[216]:
control_scores_p
# In[217]:
def degreesOfFreedom(X, Y):
s1 = (stdev(X)**2)
s2 = (stdev(Y)**2)
df = (s1 / len(X) + s2 / len(Y))**2 / ((s1 / len(X))**2 / (len(X) - 1) + (s2 / len(Y))**2 / len(Y) - 1)
return(df)
#
# # Maximizing/Matching CONTROL
# In[218]:
s_g1_counts
# In[219]:
s_g1_counts.sum(axis=1)
# In[220]:
s_g2_counts
# In[221]:
s_g2_counts.sum(axis=1)
# In[222]:
c_g1_counts
# In[223]:
c_g1_counts.sum(axis=1)
# In[224]:
c_g2_counts
# In[225]:
c_g2_counts.sum(axis=1)
# -
#combine control_score_2 and control_score_1
control_scores = control_scores_1.merge(control_scores_2,right_index=True,left_index=True)
control_scores
# +
#convert from wide-format to long-format
#pd.melt(control_scores,id_vars=0, value_vars=['control_score_1','control_score_2'],ignore_index=False)
control_scores = pd.melt(control_scores,ignore_index=False)
# +
subjects = control_scores.index.to_numpy()
experiment = np.array([])
for i in subjects:
if [i][0][0] == 's':
experiment = np.append(experiment,3)
elif [i][0][0] == 'c':
experiment = np.append(experiment,4)
control_scores['experiment'] = experiment
# -
experiment
control_scores.reset_index()
'''Here we conduct a 2 x 2 mixed measures ANOVA to compare the scores in games 1 and 2'''
pg.mixed_anova(data=control_scores, dv='value', between='experiment', within='variable', subject='participant')
import patsy
import statsmodels.api as sm
# +
'''Here we conduct a linear regression to compare the scores in games 1 and 2'''
outcome_1,predictors_1 = patsy.dmatrices("value ~ variable + experiment", control_scores)
mod_1 = sm.OLS(outcome_1,predictors_1)
res_1 = mod_1.fit()
print(res_1.summary())
# +
'''Here we place the hole counts from into dataframes, combining SMAB3 and SMAB4.'''
control_counts_g1 = pd.DataFrame([s01_p1_g1_count, s03_p1_g1_count, s05_p1_g1_count, s06_p1_g1_count, s07_p1_g1_count,
s08_p1_g1_count, s09_p1_g1_count, s11_p1_g1_count, s12_p1_g1_count, s15_p1_g1_count, s16_p1_g1_count,
s17_p1_g1_count, s19_p1_g1_count, s20_p1_g1_count,c02_p1_g1_count, c03_p1_g1_count,
c05_p1_g1_count, c06_p1_g1_count, c09_p1_g1_count, c10_p1_g1_count, c11_p1_g1_count, c12_p1_g1_count, c13_p1_g1_count, c14_p1_g1_count, c16_p1_g1_count, c17_p1_g1_count],
index=['s01_p1_g1_count', 's03_p1_g1_count',
's05_p1_g1_count', 's06_p1_g1_count', 's07_p1_g1_count', 's08_p1_g1_count',
's09_p1_g1_count', 's11_p1_g1_count', 's12_p1_g1_count', 's15_p1_g1_count',
's16_p1_g1_count','s17_p1_g1_count', 's19_p1_g1_count', 's20_p1_g1_count','c02_p1_g1_count', 'c03_p1_g1_count', 'c05_p1_g1_count', 'c06_p1_g1_count',
'c09_p1_g1_count', 'c10_p1_g1_count', 'c11_p1_g1_count', 'c12_p1_g1_count', 'c13_p1_g1_count', 'c14_p1_g1_count',
'c16_p1_g1_count', 'c17_p1_g1_count'])
control_counts_g1
# In[228]:
control_counts_g2 = pd.DataFrame([s01_p1_g2_count, s03_p1_g2_count,
s05_p1_g2_count, s06_p1_g2_count, s07_p1_g2_count, s08_p1_g2_count, s09_p1_g2_count,
s11_p1_g2_count, s12_p1_g2_count, s15_p1_g2_count, s16_p1_g2_count,
s17_p1_g2_count, s19_p1_g2_count, s20_p1_g2_count,c02_p1_g2_count, c03_p1_g2_count,
c05_p1_g2_count, c06_p1_g2_count, c09_p1_g2_count, c10_p1_g2_count, c11_p1_g2_count, c12_p1_g2_count, c13_p1_g2_count, c14_p1_g2_count, c16_p1_g2_count, c17_p1_g2_count],
index= ['s01_p1_g2_count', 's03_p1_g2_count', 's05_p1_g2_count', 's06_p1_g2_count',
's07_p1_g2_count', 's08_p1_g2_count', 's09_p1_g2_count', 's11_p1_g2_count',
's12_p1_g2_count', 's15_p1_g2_count', 's16_p1_g2_count','s17_p1_g2_count',
's19_p1_g2_count', 's20_p1_g2_count','c02_p1_g2_count', 'c03_p1_g2_count', 'c05_p1_g2_count', 'c06_p1_g2_count',
'c09_p1_g2_count', 'c10_p1_g2_count', 'c11_p1_g2_count', 'c12_p1_g2_count', 'c13_p1_g2_count', 'c14_p1_g2_count',
'c16_p1_g2_count', 'c17_p1_g2_count'])
# In[229]:
control_counts_g2
# -
subjects_1 = control_counts_g1.index.to_numpy()
# +
#combine control_counts_2 and control_counts_1
#modify the index to be only participant
subjects_1 = control_counts_g1.index.to_numpy()
subjects_2 = control_counts_g2.index.to_numpy()
subjects_new1 = np.array([])
subjects_new2 = np.array([])
ii = 0
for i in subjects_1:
subjects_new1 = np.append(subjects_new1,subjects_1[ii][0:3])
ii = ii +1
iii = 0
for i in subjects_2:
subjects_new2 = np.append(subjects_new2,subjects_2[iii][0:3])
iii = iii +1
control_counts_g1.index = subjects_new1
control_counts_g2.index = subjects_new2
control_counts = control_counts_g1.merge(control_counts_g2,right_index=True,left_index=True)
# -
control_counts = pd.melt(control_counts,ignore_index=False)
control_counts
length = len(control_counts['variable'][0])
control_counts['variable'][0][length-1]
# +
#clean up control_counts
#modify 'variable' to be only '30','60', or '90' and add a column 'game' specifying the game #
variable = control_counts['variable'].to_numpy()
choice = np.array([])
game = np.array([])
ii = 0
for i in variable:
length = len(variable[ii])
if variable[ii][length - 1] == 'x':
game = np.append(game,'1')
elif variable[ii][len(variable[ii]) - 1] == 'y':
game = np.append(game,'2')
choice = np.append(choice,variable[ii][0:2])
ii = ii +1
control_counts['variable'] = choice
control_counts['game'] = game
# -
control_counts
# +
# In[230]:
#Here we compare the choice distributions to idealized models of two decision-making strategies:
#matching and maximizing. Matching would be choosing each each hole proportionally to its reward rate,
#and maximizing would be always choosing the high reward hole.
# In[231]:
#JSD: Jensen Shannon Divergence. Measures the similarity between two probability distributions. Taken from
#https://urldefense.com/v3/__https://gist.github.com/zhiyzuo/f80e2b1cfb493a5711330d271a228a3d__;!!Mih3wA!SwZbl3yG75UWaB_c9Pq_T5wxVHgFZMbUZ5HHf5pZDf119g1JHaZr-dX94Xg7JRI$ . *Note: probability distributions need to be
#normalized.'''
#parameters: p and q are the two probability distributions to be compared.
def jsd(p, q, base=np.e):
#Implementation of pairwise `jsd` based on https://urldefense.com/v3/__https://en.wikipedia.org/wiki/Jensen**BShannon_divergence__;4oCT!!Mih3wA!SwZbl3yG75UWaB_c9Pq_T5wxVHgFZMbUZ5HHf5pZDf119g1JHaZr-dX9SSBIvMc$
# convert to np.array
p, q = np.asarray(p), np.asarray(q)
# normalize p, q to probabilities
p, q = p/p.sum(), q/q.sum()
m = 1./2*(p + q)
return sp.stats.entropy(p,m, base=base)/2. + sp.stats.entropy(q, m, base=base)/2.
# In[232]:
#Are ppl using a strategy similar to their partner's? This functions calls jsd for p1 and for p2. *UNFINISHED*
#DO NOT USE.
#calls jsd for p1 and p2 and for p1 and p2 with the mean
#def compare_strat(p1,p2):
#strat = jsd(p1,p2)
# comp_1 = jsd(p1,[j_g1_low,j_g1_med,j_g1_high])
#comp_2 = jsd(p2,[j_g1_low,j_g1_med,j_g1_high])
#return strat, comp_1, comp_2
# In[233]:
#Idealized probability distributions to use for comparison. You can change these or add more models.
maximize = [0,0,100]
match = [30/180 * 100,60/180 * 100,90/180 * 100]
random = [30/90 * 100,30/90 * 100,30/90 * 100]
social = [1/3 * 100,1/3 * 100, 1/3 * 100]
# In[234]:
#This function calls jsd.
#Input: counts- a player's choice distributions in a particular game (ex. s01_p1_g1_counts)
#Output: maxi- JSD score for the maximizing strategy
#matchi- JSD score for the matching strategy
def call_jsd(counts):
maxi = jsd(counts,maximize) #(p, q)
matchi = jsd(counts,match)
rando = jsd(counts,random)
soci = jsd(counts, social)
return maxi,matchi,rando,soci
# In[235]:
from scipy import stats
# In[236]:
#if same, 0
#test = jsd(counts,maximize)
# In[237]:
#test
# In[238]:
def check_type(df,*social):
types = pd.DataFrame(index=['type'])
# +
# In[239]:
#Call 'call_jsd' for each participant for each game in the single player sessions.
#with 0, put in actual probabilities in brackets
s01_g1_max,s01_g1_match,s01_g1_rand,s01_g1_soc = call_jsd(s01_p1_g1_count)
s01_g2_max,s01_g2_match,s01_g2_rand,s01_g2_soc = call_jsd(s01_p1_g2_count)
s03_g1_max,s03_g1_match,s03_g1_rand,s03_g1_soc = call_jsd(s03_p1_g1_count)
s03_g2_max,s03_g2_match,s03_g2_rand,s03_g2_soc = call_jsd(s03_p1_g2_count)
s05_g1_max,s05_g1_match,s05_g1_rand,s05_g1_soc= call_jsd(s05_p1_g1_count)
s05_g2_max,s05_g2_match,s05_g2_rand,s05_g2_soc = call_jsd([11, 0, 89])
s06_g1_max,s06_g1_match,s06_g1_rand,s06_g1_soc = call_jsd(s06_p1_g1_count)
s06_g2_max,s06_g2_match,s06_g2_rand,s06_g2_soc = call_jsd(s06_p1_g2_count)
s07_g1_max,s07_g1_match,s07_g1_rand,s07_g1_soc = call_jsd(s07_p1_g1_count)
s07_g2_max,s07_g2_match,s07_g2_rand,s07_g2_soc = call_jsd(s07_p1_g2_count)
s08_g1_max,s08_g1_match,s08_g1_rand,s08_g1_soc = call_jsd(s08_p1_g1_count)
s08_g2_max,s08_g2_match,s08_g2_rand,s08_g2_soc = call_jsd(s08_p1_g2_count)
s09_g1_max,s09_g1_match,s09_g1_rand,s09_g1_soc = call_jsd(s09_p1_g1_count)
s09_g2_max,s09_g2_match,s09_g2_rand,s09_g2_soc = call_jsd(s09_p1_g2_count)
s11_g1_max,s11_g1_match,s11_g1_rand,s11_g1_soc = call_jsd(s11_p1_g1_count)
s11_g2_max,s11_g2_match,s11_g2_rand,s11_g2_soc = call_jsd(s11_p1_g2_count)
s12_g1_max,s12_g1_match,s12_g1_rand,s12_g1_soc = call_jsd(s12_p1_g1_count)
s12_g2_max,s12_g2_match,s12_g2_rand,s12_g2_soc = call_jsd(s12_p1_g2_count)
s15_g1_max,s15_g1_match,s15_g1_rand,s15_g1_soc = call_jsd(s15_p1_g1_count)
s15_g2_max,s15_g2_match,s15_g2_rand,s15_g2_soc = call_jsd(s15_p1_g2_count)
s16_g1_max,s16_g1_match,s16_g1_rand,s16_g1_soc = call_jsd(s16_p1_g1_count)
s16_g2_max,s16_g2_match,s16_g2_rand,s16_g2_soc = call_jsd(s16_p1_g2_count)
s17_g1_max,s17_g1_match,s17_g1_rand,s17_g1_soc = call_jsd(s17_p1_g1_count)
s17_g2_max,s17_g2_match,s17_g2_rand,s17_g2_soc = call_jsd([0,22,78])
s19_g1_max,s19_g1_match,s19_g1_rand,s19_g1_soc = call_jsd(s19_p1_g1_count)
s19_g2_max,s19_g2_match,s19_g2_rand,s19_g2_soc = call_jsd(s19_p1_g2_count)
s20_g1_max,s20_g1_match,s20_g1_rand,s20_g1_soc = call_jsd(s20_p1_g1_count)
s20_g2_max,s20_g2_match,s20_g2_rand,s20_g2_soc = call_jsd(s20_p1_g2_count)
c02_g1_max,c02_g1_match,c02_g1_rand,c02_g1_soc = call_jsd(c02_p1_g1_count)
c02_g2_max,c02_g2_match,c02_g2_rand,c02_g2_soc = call_jsd(c02_p1_g2_count)
c03_g1_max,c03_g1_match,c03_g1_rand,c03_g1_soc = call_jsd(c03_p1_g1_count)
c03_g2_max,c03_g2_match,c03_g2_rand,c03_g2_soc = call_jsd(c03_p1_g2_count)
c05_g1_max,c05_g1_match,c05_g1_rand,c05_g1_soc = call_jsd(c05_p1_g1_count)
c05_g2_max,c05_g2_match,c05_g2_rand,c05_g2_soc = call_jsd(c05_p1_g2_count)
c06_g1_max,c06_g1_match,c06_g1_rand,c06_g1_soc = call_jsd(c06_p1_g1_count)
c06_g2_max,c06_g2_match,c06_g2_rand,c06_g2_soc = call_jsd(c06_p1_g1_count)
c09_g1_max,c09_g1_match,c09_g1_rand,c09_g1_soc = call_jsd(c09_p1_g1_count)
c09_g2_max,c09_g2_match,c09_g2_rand,c09_g2_soc = call_jsd(c09_p1_g2_count)
c10_g1_max,c10_g1_match,c10_g1_rand,c10_g1_soc = call_jsd(c10_p1_g1_count)
c10_g2_max,c10_g2_match,c10_g2_rand,c10_g2_soc = call_jsd(c10_p1_g2_count)
c11_g1_max,c11_g1_match,c11_g1_rand,c11_g1_soc = call_jsd(c11_p1_g1_count)
c11_g2_max,c11_g2_match,c11_g2_rand,c11_g2_soc= call_jsd(c11_p1_g2_count)
c12_g1_max,c12_g1_match,c12_g1_rand,c12_g1_soc = call_jsd(c12_p1_g1_count)
c12_g2_max,c12_g2_match,c12_g2_rand,c12_g2_soc = call_jsd(c12_p1_g2_count)
c13_g1_max,c13_g1_match,c13_g1_rand,c13_g1_soc = call_jsd(c13_p1_g1_count)
c13_g2_max,c13_g2_match,c13_g2_rand,c13_g2_soc = call_jsd(c13_p1_g2_count)
c14_g1_max,c14_g1_match,c14_g1_rand,c14_g1_soc = call_jsd(c14_p1_g1_count)
c14_g2_max,c14_g2_match,c14_g2_rand,c14_g2_soc = call_jsd(c14_p1_g2_count)
c16_g1_max,c16_g1_match,c16_g1_rand,c16_g1_soc = call_jsd(c16_p1_g1_count)
c16_g2_max,c16_g2_match,c16_g2_rand,c16_g2_soc = call_jsd(c16_p1_g2_count)
c17_g1_max,c17_g1_match,c17_g1_rand,c17_g1_soc = call_jsd(c17_p1_g1_count)
c17_g2_max,c17_g2_match,c17_g2_rand,c17_g2_soc = call_jsd(c17_p1_g2_count)
# +
# In[ ]:
#Create a dataframe for single player game 1 jsd scores, with a column for each strategy.'''
control_jsd_g1 = pd.DataFrame({'max':[s01_g1_max,s03_g1_max,s05_g1_max,s06_g1_max,s07_g1_max,s08_g1_max,
s09_g1_max, s11_g1_max, s12_g1_max, s15_g1_max, s16_g1_max, s17_g1_max,
s19_g1_max, s20_g1_max, c02_g1_max, c03_g1_max, c05_g1_max, c06_g1_max,
c09_g1_max, c10_g1_max, c11_g1_max, c12_g1_max, c13_g1_max, c14_g1_max,
c16_g1_max, c17_g1_max],
'match':[s01_g1_match,s03_g1_match,s05_g1_match
,s06_g1_match,s07_g1_match,s08_g1_match,s09_g1_match, s11_g1_match,
s12_g1_match, s15_g1_match, s16_g1_match, s17_g1_match, s19_g1_match,
s20_g1_match, c02_g1_match, c03_g1_match, c05_g1_match, c06_g1_match,
c09_g1_match, c10_g1_match, c11_g1_match, c12_g1_match, c13_g1_match, c14_g1_match,
c16_g1_match, c17_g1_match],
'rand':[s01_g1_rand,s03_g1_rand,s05_g1_rand,s06_g1_rand,s07_g1_rand,s08_g1_rand,
s09_g1_rand, s11_g1_rand, s12_g1_rand, s15_g1_rand, s16_g1_rand, s17_g1_rand,
s19_g1_rand, s20_g1_rand, c02_g1_rand, c03_g1_rand, c05_g1_rand, c06_g1_rand,
c09_g1_rand, c10_g1_rand, c11_g1_rand, c12_g1_rand, c13_g1_rand, c14_g1_rand,
c16_g1_rand, c17_g1_rand],
'soc':[s01_g1_soc,s03_g1_soc,s05_g1_soc,s06_g1_soc,s07_g1_soc,s08_g1_soc,
s09_g1_soc, s11_g1_soc, s12_g1_soc, s15_g1_soc, s16_g1_soc, s17_g1_soc,
s19_g1_soc, s20_g1_soc, c02_g1_soc, c03_g1_soc, c05_g1_soc, c06_g1_soc,
c09_g1_soc, c10_g1_soc, c11_g1_soc, c12_g1_soc, c13_g1_soc, c14_g1_soc,
c16_g1_soc, c17_g1_soc]},
index=['s01', 's03','s05','s06','s07','s08','s09', 's11', 's12', 's15', 's16', 's17', 's19', 's20', 'c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'])
# +
# In[ ]:
#closer to 0 = closer to that pure strategy
control_jsd_g1
# In[ ]:
# get the column name of min values in every row
strategy_control_jsd_g1 = control_jsd_g1.idxmin(axis=1)
print("min values of row are at following columns :")
print(strategy_control_jsd_g1)
# In[ ]:
strategy_control_jsd_g1.value_counts()
# In[ ]:
#control_types_g1 = check_type(control_jsd_g1)
# In[ ]:
#control_types_g1.drop([0])
# In[ ]:
#(control_types_g1 == 'maxi').sum()
# In[ ]:
#Create a dataframe for single player game 1 jsd scores, with a column for each strategy.'''
control_jsd_g2 = pd.DataFrame({'max':[s01_g2_max,s03_g2_max,s05_g2_max,s06_g2_max,s07_g2_max,s08_g2_max,
s09_g2_max, s11_g2_max, s12_g2_max, s15_g2_max, s16_g2_max, s17_g2_max,
s19_g2_max, s20_g2_max, c02_g2_max, c03_g2_max, c05_g2_max, c06_g2_max,
c09_g2_max, c10_g2_max, c11_g2_max, c12_g2_max, c13_g2_max, c14_g2_max,
c16_g2_max, c17_g2_max],
'match':[s01_g2_match,s03_g2_match,s05_g2_match
,s06_g2_match,s07_g2_match,s08_g2_match,s09_g2_match, s11_g2_match,
s12_g2_match, s15_g2_match, s16_g2_match, s17_g2_match, s19_g2_match,
s20_g2_match, c02_g2_match, c03_g2_match, c05_g2_match, c06_g2_match,
c09_g2_match, c10_g2_match, c11_g2_match, c12_g2_match, c13_g2_match, c14_g2_match,
c16_g2_match, c17_g2_match],
'rand':[s01_g2_rand,s03_g2_rand,s05_g2_rand,s06_g2_rand,s07_g2_rand,s08_g2_rand,
s09_g2_rand, s11_g2_rand, s12_g2_rand, s15_g2_rand, s16_g2_rand, s17_g2_rand,
s19_g2_rand, s20_g2_rand, c02_g2_rand, c03_g2_rand, c05_g2_rand, c06_g2_rand,
c09_g2_rand, c10_g2_rand, c11_g2_rand, c12_g2_rand, c13_g2_rand, c14_g2_rand,
c16_g2_rand, c17_g2_rand],
'soc':[s01_g2_soc,s03_g2_soc,s05_g2_soc,s06_g2_soc,s07_g2_soc,s08_g2_soc,
s09_g2_soc, s11_g2_soc, s12_g2_soc, s15_g2_soc, s16_g2_soc, s17_g2_soc,
s19_g2_soc, s20_g2_soc, c02_g2_soc, c03_g2_soc, c05_g2_soc, c06_g2_soc,
c09_g2_soc, c10_g2_soc, c11_g2_soc, c12_g2_soc, c13_g2_soc, c14_g2_soc,
c16_g2_soc, c17_g2_soc]},
index=['s01', 's03','s05','s06','s07','s08','s09', 's11', 's12', 's15', 's16', 's17', 's19', 's20', 'c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'])
# +
# In[ ]:
control_jsd_g2
# In[ ]:
# get the column name of min values in every row
strategy_control_jsd_g2 = control_jsd_g2.idxmin(axis=1)
print("min values of row are at following columns :")
print(strategy_control_jsd_g2)
# In[ ]:
minValues_control_jsd_g2 = control_jsd_g2.min()
print('minimum value in each column : ')
print(minValues_control_jsd_g2)
# In[ ]:
maxValues_control_jsd_g2 = control_jsd_g2.max()
print('maximum value in each column : ')
print(maxValues_control_jsd_g2)
# In[ ]:
strategy_control_jsd_g2.value_counts()
# In[ ]:
#control_types_g2 = check_type(control_jsd_g2)
# In[ ]:
#control_types_g2.drop([0])
# In[ ]:
#(control_types_g2 == 'maxi').sum()
# In[ ]:
#how different max and match dists are from eachother
#neither max or match(?)
#threshold = jsd(maximize,match)
# +
# In[ ]:
import matplotlib.lines as mlines
# In[ ]:
#colors_jsd = [['lightcoral','red','lightcoral','red','bisque','orange','bisque','orange','palegreen','green','palegreen','green',
#'lightblue','blue','lightblue','blue','mediumslateblue','indigo','mediumslateblue','indigo','lightpink','deeppink',
#'lightpink','deeppink','yellow','gold','yellow','gold','lightgray','gray','lightgray','gray']
# In[ ]:
import itertools
# In[ ]:
#Here I print the means of the JSD values in each condition, just to see what they are.'''
# In[ ]:
#run t test to see sig between max and match per game
#put in box plot
control_jsd_g1.mean()
# In[ ]:
control_jsd_g1.std()
# In[ ]:
control_jsd_g2.mean()
# In[ ]:
control_jsd_g2.std()
# # Control z-scores
# In[ ]:
sp.stats.zscore(control_jsd_g1['max'])
# In[ ]:
sp.stats.zscore(control_jsd_g2['max'])
# In[ ]:
sp.stats.zscore(control_jsd_g1['match'])
# In[ ]:
sp.stats.zscore(control_jsd_g2['match'])
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
# if using a Jupyter notebook, inlcude:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[ ]:
mu = 0.217806
sigma = 0.118674
x1 = 0.035604
x2 = 0.442841
# In[ ]:
# calculate the z-transform
z1 = ( x1 - mu ) / sigma
z2 = ( x2 - mu ) / sigma
# In[ ]:
x = np.arange(z1, z2, 0.001) # range of x in spec
x_all = np.arange(-10, 10, 0.001) # entire range of x, both in and out of spec
# mean = 0, stddev = 1, since Z-transform was calculated
y = norm.pdf(x,0,1)
y2 = norm.pdf(x_all,0,1)
# In[ ]:
# build the plot
fig, ax = plt.subplots(figsize=(9,6))
plt.style.use('fivethirtyeight')
ax.plot(x_all,y2)
ax.fill_between(x,y,0, alpha=0.3, color='b')
ax.fill_between(x_all,y2,0, alpha=0.1)
ax.set_xlim([-4,4])
ax.set_xlabel('# of Standard Deviations Outside the Mean')
ax.set_yticklabels([])
ax.set_title('Normal Gaussian Curve')
plt.savefig('normal_curve.png', dpi=72, bbox_inches='tight')
plt.show()
# +
# # Control strategy stats
# In[ ]:
control_jsd_g1.dropna(inplace = True)
# # Max strategy
# In[ ]:
control_max_g1 = control_jsd_g1.loc[ : , 'max' ]
# In[ ]:
control_max_g1
# In[ ]:
control_max_g2 = control_jsd_g2.loc[ : , 'max' ]
# In[ ]:
control_max_g2
# In[ ]:
control_max_g1_mean = control_max_g1.mean()
# In[ ]:
control_max_g1_mean
# In[ ]:
control_max_g1_std = control_max_g1.std()
# In[ ]:
control_max_g1_std
# In[ ]:
control_max_g2_mean = control_max_g2.mean()
# In[ ]:
control_max_g2_mean
# In[ ]:
control_max_g2_std = control_max_g2.std()
control_max_g2_std
# Max T-Test
# In[ ]:
control_max_p = sp.stats.ttest_rel(control_max_g1,control_max_g2)
# In[ ]:
control_max_p
# # Match strategy
# In[ ]:
control_match_g1 = control_jsd_g1.loc[ : , 'match' ]
# In[ ]:
control_match_g1
# In[ ]:
control_match_g2 = control_jsd_g2.loc[ : , 'match' ]
# In[ ]:
control_match_g2
# In[ ]:
control_match_g1_mean = control_match_g1.mean()
# In[ ]:
control_match_g1_mean
# In[ ]:
control_match_g1_std = control_match_g1.std()
# In[ ]:
control_match_g1_std
# In[ ]:
control_match_g2_mean = control_match_g2.mean()
# In[ ]:
control_match_g2_mean
# In[ ]:
control_match_g2_std = control_match_g2.std()
# In[ ]:
control_match_g2_std
# In[ ]:
control_match_p = sp.stats.ttest_rel(control_match_g1,control_match_g2)
# In[ ]:
control_match_p
# # Random strategy
# In[ ]:
control_rand_g1 = control_jsd_g1.loc[ : , 'rand' ]
# In[ ]:
control_rand_g1
# In[ ]:
control_rand_g2 = control_jsd_g2.loc[ : , 'rand' ]
# In[ ]:
control_rand_g2
# In[ ]:
control_rand_g1_mean = control_rand_g1.mean()
# In[ ]:
control_rand_g1_mean
# In[ ]:
control_rand_g1_std = control_rand_g1.std()
# In[ ]:
control_rand_g1_std
# In[ ]:
control_rand_g2_mean = control_rand_g2.mean()
# In[ ]:
control_rand_g2_mean
# In[ ]:
control_rand_g2_std = control_rand_g2.std()
control_rand_g2_std
# In[ ]:
control_rand_p = sp.stats.ttest_rel(control_rand_g1,control_rand_g2)
# In[ ]:
control_rand_p
# # Social strategy
# In[ ]:
control_soc_g1 = control_jsd_g1.loc[ : , 'soc' ]
# In[ ]:
control_soc_g1
# In[ ]:
control_soc_g2 = control_jsd_g2.loc[ : , 'soc' ]
# In[ ]:
control_soc_g2
# In[ ]:
control_soc_g1_mean = control_soc_g1.mean()
# In[ ]:
control_soc_g1_mean
# In[ ]:
control_soc_g1_std = control_soc_g1.std()
# In[ ]:
control_soc_g1_std
# In[ ]:
control_soc_g2_mean = control_soc_g2.mean()
# In[ ]:
control_soc_g2_mean
# In[ ]:
control_soc_g2_std = control_soc_g2.std()
# In[ ]:
control_soc_g2_std
# In[ ]:
control_soc_p = sp.stats.ttest_rel(control_soc_g1,control_soc_g2)
# In[ ]:
control_soc_p
# In[ ]:
#control_scores_p = sp.stats.ttest_rel(control_scores['Game 1'],control_scores['Game 2'])
# In[ ]:
#control_scores_p
# In[ ]:
# In[ ]:
import matplotlib.patches as mpatches
# In[ ]:
#,'orange','green','blue','pink','purple','peru','cyan','red','lightcoral','orange','darkorange','green','palegreen','blue', 'peru','cyan', 'red','lightcoral', 'orange','darkorange','green','palegreen','blue','palegreen','blue']
# In[ ]:
#Here I think I tried to create colors for each subject in each condition for use in graphs. I DON'T think this is
#a good way to do this.'''
#colors_jsd_j = ['red','lightcoral','red','darkred','green','red',
# 'blue','darkblue','hotblue','blue','purple','plum','peru','saddlebrown','gray','lightgray']
colors_jsd_control_g1_ego = ['red','red','red','red','red','blue','red','red','red','red',
'red','blue','red','red',
'red', 'blue','red', 'red','red', 'red','red','red','red','red','red','red']
colors_jsd_control_g2_ego = ['red','red','blue','blue','red','red','red','red','blue','blue',
'red','red','red','red',
'red', 'blue','red', 'red','red', 'red','red','red','red','red','red','red']
colors_jsd_control_g1_joint = ['green','green','green','green','green','green','green','green'
,'green','green', 'green','green','green','green',
'green','green','green','green','green','green','green','green','green'
,'green','green','green']
colors_jsd_control_g2_joint = ['green','green','green','green','green','green','green','green'
,'green','green', 'green','green','green','green',
'green','green','green','green','green','green','green','green','green'
,'green','green','green']
#markers_jsd = ['o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v'
# 'o','v','o','v','o','v','o','v']
colors
# +
# # Graphs
# In[ ]:
#Here I try to plot the JSD Values. Continued below under 'JSD with gender.
# In[ ]:
#under line = matching (closer to 0)
#plots of actual jsd values
m, b = np.polyfit(control_jsd_g1['max'], control_jsd_g1['match'], 1)
control_ego_jsd_g1_plot = control_jsd_g1.plot.scatter(x='max',y='match',s=50,color=colors_jsd_control_g1_ego)
plt.title('Control Egocentric Strategies Game 1', fontsize=18, y=1.05)
control_ego_jsd_g1_plot.plot(control_jsd_g1['max'], m*control_jsd_g1['max'] + b, linewidth=2)
plt.xticks(np.arange(0, 1, step=0.1))
plt.yticks(np.arange(0, 1, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
#plt.xlabel('Match')
#plt.ylabel('Maximize')
plt.ylim(-.1, .5)
plt.xlim(-.1, .5)
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
#add legend (NEEDS TO BE FIXED)
from numpy.random import randn
z = randn(100)
#blue_dot, = plt.plot(z, "bo", markersize=10)
#red_dot, = plt.plot(z, "ro", markersize=10)
#plt.legend([blue_dot, red_dot], ["Maximize", "Match"])
line1 = mlines.Line2D([-1, .5],[-1, .5], color='black', linewidth=1)
control_ego_jsd_g1_plot.add_line(line1)
#s_jsd_g2_plot.add_line(line2)
plt.savefig('control_ego_jsd_1.eps',format='eps',bbox_inches='tight',dpi=1000)
plt.show()
# +
# In[ ]:
'''ontrol_ego_jsd_g1_plot = control_jsd_g1.plot.scatter(x='max',y='match',s=50,color=colors_jsd_control_g1_ego)
hist, xbins,ybins = control_ego_jsd_g1_plot.figure.histogram(y,x, bins=range(6))
x,y = np.meshgrid(xbins[:-1], ybins[:-1])
x = x[hist != 0]; y = y[hist != 0]
z = hist[hist != 0]
fig, ax = plt.subplots()
ax.scatter(x,y, s=49, alpha=0.4)
for i in range(len(z)):
ax.annotate(str(int(z[i])), xy=(x[i],y[i]), xytext=(4,0),
textcoords="offset points" )
plt.show()'''
# In[ ]:
'''#under line = matching (closer to 0)
#plots of atual jsd values
m, b = np.polyfit(control_jsd_g1['rand'], control_jsd_g1['soc'], 1)
control_joint_jsd_g1_plot = control_jsd_g1.plot.scatter(x='rand',y='soc',s=50,color=colors_jsd_control_g1_joint)
plt.title('Control Social Strategies Game 1', fontsize=18, y=1.05)
control_joint_jsd_g2_plot.plot(control_jsd_g1['rand'], m*control_jsd_g1['soc'] + b, linewidth=2)
plt.xticks(np.arange(0, 1, step=0.1))
plt.yticks(np.arange(0, 1, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
#plt.xlabel('Match')
#plt.ylabel('Maximize')
plt.ylim(-.1, .5)
plt.xlim(-.1, .5)
#make axes equal length
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
#add legend FIX
#from numpy.random import randn
#z = randn(100)
#blue_dot, = plt.plot(z, "bo", markersize=10)
#red_dot, = plt.plot(z, "ro", markersize=10)
#plt.legend([blue_dot, red_dot], ["Maximize", "Match"])
#add line
line1 = mlines.Line2D([-1, .5],[-1, .5], color='black', linewidth=1)
control_joint_jsd_g1_plot.add_line(line1)
F.set_figsize_inches( (DefaultSize[0]*2, DefaultSize[1]*2) )
Size = F.get_size_inches()
plt.savefig('control_joint_jsd_1.eps',format='eps',dpi=1000)
plt.show()
'''
# +
# In[ ]:
#under line = matching (closer to 0)
#plots of actual jsd values
m, b = np.polyfit(control_jsd_g2['max'], control_jsd_g2['match'], 1)
control_ego_jsd_g2_plot = control_jsd_g2.plot.scatter(x='max',y='match',s=50,color=colors_jsd_control_g2_ego)
plt.title('Control Egocentric Strategies Game 2', fontsize=18, y=1.05)
control_ego_jsd_g2_plot.plot(control_jsd_g2['max'], m*control_jsd_g2['max'] + b, linewidth=2)
plt.xticks(np.arange(0, 1, step=0.1))
plt.yticks(np.arange(0, 1, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
#plt.xlabel('Match')
#plt.ylabel('Maximize')
plt.ylim(-.1, .5)
plt.xlim(-.1, .5)
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
#add legend (NEEDS TO BE FIXED)
from numpy.random import randn
z = randn(100)
#blue_dot, = plt.plot(z, "bo", markersize=10)
#red_dot, = plt.plot(z, "ro", markersize=10)
#plt.legend([blue_dot, red_dot], ["Maximize", "Match"])
line1 = mlines.Line2D([-1, .5],[-1, .5], color='black', linewidth=1)
control_ego_jsd_g2_plot.add_line(line1)
#s_jsd_g2_plot.add_line(line2)
plt.savefig('control_ego_jsd_2.eps',format='eps',bbox_inches='tight',dpi=1000)
plt.show()
# -
# # SMAB4 Joint Analysis
# +
# In[ ]:
'''#under line = matching (closer to 0)
#plots of atual jsd values
control_joint_jsd_g2_plot = control_jsd_g2.plot.scatter(x='rand',y='soc',s=50,color=colors_jsd_control_g2_joint,title='Control Social Strategies Game 2')
plt.xticks(np.arange(0, 1, step=0.1))
plt.yticks(np.arange(0, 1, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
#plt.xlabel('Match')
#plt.ylabel('Maximize')
plt.ylim(-.1, .5)
plt.xlim(-.1, .5)
#make axes equal length
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
#add legend FIX
#from numpy.random import randn
#z = randn(100)
#blue_dot, = plt.plot(z, "bo", markersize=10)
#red_dot, = plt.plot(z, "ro", markersize=10)
#plt.legend([blue_dot, red_dot], ["Maximize", "Match"])
#add line
line1 = mlines.Line2D([-1, .5],[-1, .5], color='black')
control_joint_jsd_g2_plot.add_line(line1)
plt.savefig('control_joint_jsd_1.eps',format='eps',dpi=1000)
plt.show()
# save as jpeg
control_joint_jsd_g2_plot.figure.savefig(plot_file_name,
format='jpeg',
dpi=100)'''
# +
'''Here we load the csv files into Pandas dataframes by calling organize_trial_by_trial and get the score of each subject
in each game by calling call_get_score.'''
df = organize_trial_by_trial('data/j01_gamedata.csv')
j01 = df
j01_p1_g1_score,j01_p1_g2_score,j01_p2_g1_score,j01_p2_g2_score,j01_p1_diff,j01_p2_diff,j01_p1_g1_50,j01_p1_g2_50,j01_p2_g1_50,j01_p2_g2_50= call_get_score(j01, 'j')
j01.head()
df = organize_trial_by_trial('data/j02_gamedata.csv')
j02 = df
j02_p1_g1_score,j02_p1_g2_score,j02_p2_g1_score,j02_p2_g2_score,j02_p1_diff,j02_p2_diff,j02_p1_g1_50,j02_p1_g2_50,j02_p2_g1_50,j02_p2_g2_50 = call_get_score(j02, 'j')
j02.head()
df = organize_trial_by_trial('data/j03_gamedata.csv')
j03 = df
j03_p1_g1_score,j03_p1_g2_score,j03_p2_g1_score,j03_p2_g2_score,j03_p1_diff,j03_p2_diff,j03_p1_g1_50,j03_p1_g2_50,j03_p2_g1_50,j03_p2_g2_50 = call_get_score(j03, 'j')
j03.head()
df = organize_trial_by_trial('data/j04_gamedata.csv')
j04 = df
j04_p1_g1_score,j04_p1_g2_score,j04_p2_g1_score,j04_p2_g2_score,j04_p1_diff,j04_p2_diff,j04_p1_g1_50,j04_p1_g2_50,j04_p2_g1_50,j04_p2_g2_50 = call_get_score(j04, 'j')
j04.head()
df = organize_trial_by_trial('data/j05_gamedata.csv')
j05 = df
j05_p1_g1_score,j05_p1_g2_score,j05_p2_g1_score,j05_p2_g2_score,j05_p1_diff,j05_p2_diff,j05_p1_g1_50,j05_p1_g2_50,j05_p2_g1_50,j05_p2_g2_50 = call_get_score(j05, 'j')
j05.head()
df = organize_trial_by_trial('data/j06_gamedata.csv')
j06 = df
j06_p1_g1_score,j06_p1_g2_score,j06_p2_g1_score,j06_p2_g2_score,j06_p1_diff,j06_p2_diff,j06_p1_g1_50,j06_p1_g2_50,j06_p2_g1_50,j06_p2_g2_50 = call_get_score(j06, 'j')
j06.head()
df = organize_trial_by_trial('data/j07_gamedata.csv')
j07 = df
j07_p1_g1_score,j07_p1_g2_score,j07_p2_g1_score,j07_p2_g2_score,j07_p1_diff,j07_p2_diff,j07_p1_g1_50,j07_p1_g2_50,j07_p2_g1_50,j07_p2_g2_50 = call_get_score(j07, 'j')
j07.head()
df = organize_trial_by_trial('data/j08_gamedata.csv')
j08 = df
j08_p1_g1_score,j08_p1_g2_score,j08_p2_g1_score,j08_p2_g2_score,j08_p1_diff,j08_p2_diff,j08_p1_g1_50,j08_p1_g2_50,j08_p2_g1_50,j08_p2_g2_50 = call_get_score(j08, 'j')
j08.head()
df = organize_trial_by_trial_floatclick('data/j10_gamedata.csv')
j10 = df
j10_p1_g1_score,j10_p1_g2_score,j10_p2_g1_score,j10_p2_g2_score,j10_p1_diff,j10_p2_diff,j10_p1_g1_50,j10_p1_g2_50,j10_p2_g1_50,j10_p2_g2_50 = call_get_score(j10, 'j')
j10.head()
df = organize_trial_by_trial_floatclick('data/j11_gamedata.csv')
j11 = df
j11_p1_g1_score,j11_p1_g2_score,j11_p2_g1_score,j11_p2_g2_score,j11_p1_diff,j11_p2_diff,j11_p1_g1_50,j11_p1_g2_50,j11_p2_g1_50,j11_p2_g2_50 = call_get_score(j11, 'j')
j11.head()
# +
'''Here we separate the dataframes by suject and game by calling separate_df. We also create arrays with the number of
times that each subject chose each hole by calling value_counts.'''
j01_p1_g1, j01_p1_g2, j01_p2_g1, j01_p2_g2 = separate_df(j01, 'j')
j02_p1_g1, j02_p1_g2, j02_p2_g1, j02_p2_g2 = separate_df(j02, 'j')
j03_p1_g1, j03_p1_g2, j03_p2_g1, j03_p2_g2 = separate_df(j03, 'j')
j04_p1_g1, j04_p1_g2, j04_p2_g1, j04_p2_g2 = separate_df(j04, 'j')
j05_p1_g1, j05_p1_g2, j05_p2_g1, j05_p2_g2 = separate_df(j05, 'j')
j06_p1_g1, j06_p1_g2, j06_p2_g1, j06_p2_g2 = separate_df(j06, 'j')
j07_p1_g1, j07_p1_g2, j07_p2_g1, j07_p2_g2 = separate_df(j07, 'j')
j08_p1_g1, j08_p1_g2, j08_p2_g1, j08_p2_g2 = separate_df(j08, 'j')
j10_p1_g1, j10_p1_g2, j10_p2_g1, j10_p2_g2 = separate_df(j10, 'j')
j11_p1_g1, j11_p1_g2, j11_p2_g1, j11_p2_g2 = separate_df(j11, 'j')
j01_p1_g1_count = j01_p1_g1['probability'].value_counts(sort=False)
j01_p1_g2_count = j01_p1_g2['probability'].value_counts(sort=False)
j01_p2_g1_count = j01_p2_g1['probability'].value_counts(sort=False)
j01_p2_g2_count = j01_p2_g2['probability'].value_counts(sort=False)
j02_p1_g1_count = j02_p1_g1['probability'].value_counts(sort=False)
j02_p1_g2_count = j02_p1_g2['probability'].value_counts(sort=False)
j02_p2_g1_count = j02_p2_g1['probability'].value_counts(sort=False)
j02_p2_g2_count = j02_p2_g2['probability'].value_counts(sort=False)
j03_p1_g1_count = j03_p1_g1['probability'].value_counts(sort=False)
j03_p1_g2_count = j03_p1_g2['probability'].value_counts(sort=False)
j03_p2_g1_count = j03_p2_g1['probability'].value_counts(sort=False)
j03_p2_g2_count = j03_p2_g2['probability'].value_counts(sort=False)
j04_p1_g1_count = j04_p1_g1['probability'].value_counts(sort=False)
j04_p1_g2_count = j04_p1_g2['probability'].value_counts(sort=False)
j04_p2_g1_count = j04_p2_g1['probability'].value_counts(sort=False)
j04_p2_g2_count = j04_p2_g2['probability'].value_counts(sort=False)
j05_p1_g1_count = j05_p1_g1['probability'].value_counts(sort=False)
j05_p1_g2_count = j05_p1_g2['probability'].value_counts(sort=False)
j05_p2_g1_count = j05_p2_g1['probability'].value_counts(sort=False)
j05_p2_g2_count = j05_p2_g2['probability'].value_counts(sort=False)
j06_p1_g1_count = j06_p1_g1['probability'].value_counts(sort=False)
j06_p1_g2_count = j06_p1_g2['probability'].value_counts(sort=False)
j06_p2_g1_count = j06_p2_g1['probability'].value_counts(sort=False)
j06_p2_g2_count = j06_p2_g2['probability'].value_counts(sort=False)
j07_p1_g1_count = j07_p1_g1['probability'].value_counts(sort=False)
j07_p1_g2_count = j07_p1_g2['probability'].value_counts(sort=False)
j07_p2_g1_count = j07_p2_g1['probability'].value_counts(sort=False)
j07_p2_g2_count = j07_p2_g2['probability'].value_counts(sort=False)
j08_p1_g1_count = j08_p1_g1['probability'].value_counts(sort=False)
j08_p1_g2_count = j08_p1_g2['probability'].value_counts(sort=False)
j08_p2_g1_count = j08_p2_g1['probability'].value_counts(sort=False)
j08_p2_g2_count = j08_p2_g2['probability'].value_counts(sort=False)
j10_p1_g1_count = j10_p1_g1['probability'].value_counts(sort=False)
j10_p1_g2_count = j10_p1_g2['probability'].value_counts(sort=False)
j10_p2_g1_count = j10_p2_g1['probability'].value_counts(sort=False)
j10_p2_g2_count = j10_p2_g2['probability'].value_counts(sort=False)
j11_p1_g1_count = j11_p1_g1['probability'].value_counts(sort=False)
j11_p1_g2_count = j11_p1_g2['probability'].value_counts(sort=False)
j11_p2_g1_count = j11_p2_g1['probability'].value_counts(sort=False)
j11_p2_g2_count = j11_p2_g2['probability'].value_counts(sort=False)
# +
'''Here we create dataframes with all of the scores of each subject in games 1 and 2. 50 refers to the score half-way
through the games'''
# # Combined Joint scores G1
j_scores_1 = pd.DataFrame({'participant':['j01_p1', 'j01_p2','j02_p1', 'j02_p2','j03_p1', 'j03_p2','j04_p1', 'j04_p2','j05_p1', 'j05_p2','j06_p1','j06_p2','j07_p1', 'j07_p2','j08_p1', 'j08_p2','j10_p1', 'j10_p2','j11_p1', 'j11_p2'], 'score_1':
[j01_p1_g1_score, j01_p2_g1_score, j02_p1_g1_score, j02_p2_g1_score, j03_p1_g1_score, j03_p2_g1_score,
j04_p1_g1_score, j04_p2_g1_score, j05_p1_g1_score, j05_p2_g1_score, j06_p1_g1_score, j06_p2_g1_score,
j07_p1_g1_score, j07_p2_g1_score, j08_p1_g1_score, j08_p2_g1_score, j10_p1_g1_score, j10_p2_g1_score, j11_p1_g1_score, j11_p2_g1_score]})
j_scores_1.set_index('participant', inplace=True, drop=True)
j_scores_1
# # Combined Joint scores G2
# In[ ]:
j_scores_2 = pd.DataFrame({'participant':['j01_p1', 'j01_p2','j02_p1', 'j02_p2','j03_p1', 'j03_p2','j04_p1', 'j04_p2','j05_p1', 'j05_p2','j06_p1','j06_p2','j07_p1', 'j07_p2','j08_p1', 'j08_p2','j10_p1', 'j10_p2','j11_p1', 'j11_p2'], 'score_2':
[j01_p1_g2_score, j01_p2_g2_score, j02_p1_g2_score, j02_p2_g2_score, j03_p1_g2_score, j03_p2_g2_score,
j04_p1_g2_score, j04_p2_g2_score, j05_p1_g2_score, j05_p2_g2_score, j06_p1_g2_score, j06_p2_g2_score,
j07_p1_g2_score, j07_p2_g2_score, j08_p1_g2_score, j08_p2_g2_score, j10_p1_g2_score, j10_p2_g2_score, j11_p1_g2_score, j11_p2_g2_score]})
j_scores_2.set_index('participant', inplace=True, drop=True)
j_scores_2
j_scores_1_50 = pd.DataFrame({'participant':['j01_p1', 'j01_p2','j02_p1', 'j02_p2','j03_p1', 'j03_p2','j04_p1', 'j04_p2','j05_p1', 'j05_p2','j06_p1','j06_p2','j07_p1', 'j07_p2','j08_p1', 'j08_p2','j10_p1', 'j10_p2','j11_p1', 'j11_p2'], 'score_1_50':
[j01_p1_g1_50, j01_p2_g1_50, j02_p1_g1_50, j02_p2_g1_50, j03_p1_g1_50, j03_p2_g1_50,
j04_p1_g1_50, j04_p2_g1_50, j05_p1_g1_50, j05_p2_g1_50, j06_p1_g1_50, j06_p2_g1_50,
j07_p1_g1_50, j07_p2_g1_50, j08_p1_g1_50, j08_p2_g1_50, j10_p1_g1_50, j10_p2_g1_50, j11_p1_g1_50, j11_p2_g1_50]})
j_scores_1_50.set_index('participant', inplace=True, drop=True)
j_scores_2_50 = pd.DataFrame({'participant':['j01_p1', 'j01_p2','j02_p1', 'j02_p2','j03_p1', 'j03_p2','j04_p1', 'j04_p2','j05_p1', 'j05_p2','j06_p1','j06_p2','j07_p1', 'j07_p2','j08_p1', 'j08_p2','j10_p1', 'j10_p2','j11_p1', 'j11_p2'], 'score_2_50':
[j01_p1_g2_50, j01_p2_g2_50, j02_p1_g2_50, j02_p2_g2_50, j03_p1_g2_50, j03_p2_g2_50,
j04_p1_g2_50, j04_p2_g2_50, j05_p1_g2_50, j05_p2_g2_50, j06_p1_g2_50, j06_p2_g2_50,
j07_p1_g2_50, j07_p2_g2_50, j08_p1_g2_50, j08_p2_g2_50, j10_p1_g2_50, j10_p2_g2_50, j11_p1_g2_50, j11_p2_g2_50]})
j_scores_2_50.set_index('participant', inplace=True, drop=True)
# +
'''Here we combine the dataframes with the scores from the 2 games.'''
j_scores_50 = j_scores_1_50.merge(j_scores_2_50,right_index=True,left_index=True)
j_scores = j_scores_1.merge(j_scores_2,right_index=True,left_index=True)
j_combo = j_scores_50.merge(j_scores,right_index=True,left_index=True)
#rename columns
#c_combo.columns=["score_1_50", "score_2_50", "score_1", "score_2"]
# -
j_combo
''''#Combined Joint scores G1 and G2
j_scores = pd.merge(j_scores_1, j_scores_2, on = 'participant')
j_scores.columns=['Game 1','Game 2']
#jp1_scores.set_index('participant', inplace=True, drop=True)
j_scores'''
# +
'''Here we get the mean, median, and standard deviation of the scores. We also run a t-test comparing the scores in games 1 and 2
and plot the data.'''
j_scores_1_mean = j_scores_1.mean()
# In[ ]:
j_scores_1_mean
# In[ ]:
j_scores_1_std = j_scores_1.std()
# In[ ]:
j_scores_1_std
# In[ ]:
j_scores_2_mean = j_scores_2.mean()
# In[ ]:
j_scores_2_mean
# In[ ]:
j_scores_2_std = j_scores_2.std()
# In[ ]:
j_scores_2_std
# In[ ]:
j_scores_med = j_scores.median()
j_scores_med
# In[ ]:
# +
#j_scores_p = sp.stats.ttest_rel(j_scores['Game 1'],j_scores['Game 2'])
# +
j_scores_box = sns.boxplot(
data=j_scores,
width=0.5,
palette="pastel")
j_scores_box.axes.set_title("SMAB3 Social Scores",
fontsize=16)
plot_file_name="j_score_box.jpg"
plt.ylim(0, 100)
j_scores_box.axes.set(ylabel='Score')
# save as jpeg
j_scores_box.figure.savefig(plot_file_name,
format='jpeg')
# +
j_combo_box = sns.boxplot(
data=j_combo, order = ['score_1_50','score_2_50','score_1','score_2'],
width=0.5,
palette="pastel")
j_combo_box.axes.set_title("SMAB3 Social Scores",
fontsize=16)
plot_file_name="j_score_box.jpg"
plt.ylim(0, 100)
j_combo_box.axes.set(ylabel='Score')
# save as jpeg
j_combo_box.figure.savefig(plot_file_name,
format='jpeg')
# +
'''Here we place all of the data into the number of times each subject chose each hole into dataframes for games 1
and 2. We plot this data and run statistics on it.'''
# # Joint combined choices in G1
j_g1_counts = pd.DataFrame([j01_p1_g1_count, j01_p2_g1_count, j02_p1_g1_count, j02_p2_g1_count, j03_p1_g1_count,
j03_p2_g1_count, j04_p1_g1_count, j04_p2_g1_count, j05_p1_g1_count, j05_p2_g1_count, j06_p1_g1_count,
j06_p2_g1_count, j07_p1_g1_count, j07_p2_g1_count, j08_p1_g1_count, j08_p2_g1_count,
j10_p1_g1_count, j10_p2_g1_count, j11_p1_g1_count, j11_p2_g1_count],
index = ['j01_p1_g1_count', 'j01_p2_g1_count', 'j02_p1_g1_count', 'j02_p2_g1_count',
'j03_p1_g1_count', 'j03_p2_g1_count', 'j04_p1_g1_count', 'j04_p2_g1_count', 'j05_p1_g1_count',
'j05_p2_g1_count', 'j06_p1_g1_count', 'j06_p2_g1_count', 'j07_p1_g1_count',
'j07_p2_g1_count', 'j08_p1_g1_count', 'j08_p2_g1_count', 'j10_p1_g1_count', 'j10_p2_g1_count',
'j11_p1_g1_count', 'j11_p2_g1_count'])
# In[ ]:
j_g1_counts = j_g1_counts.fillna(0)
# In[ ]:
j_g1_counts
# In[ ]:
j_g1_counts.sum(axis=1)
# In[ ]:
j_g1_counts_bar = j_g1_counts.plot.bar()
# +
# In[ ]:
j_g1_counts_box = sns.boxplot(
data=j_g1_counts,
width=0.5,
palette="pastel")
j_g1_counts_box.axes.set_title("SMAB3 Social Choice Distributions in Game 1",
fontsize=16)
plot_file_name="j_g1_counts_box.jpg"
plt.ylim(0, 100)
j_g1_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
j_g1_counts_box.figure.savefig(plot_file_name,
format='jpeg')
# In[ ]:
# +
j_g1_low = np.mean(j_g1_counts.iloc[:,0])
j_g1_med = np.mean(j_g1_counts.iloc[:,1])
j_g1_high = np.mean(j_g1_counts.iloc[:,2])
# In[ ]:
j_g1_low
# In[ ]:
j_g1_med
# In[ ]:
j_g1_high
# In[ ]:
j_g1_low_std = np.std(j_g1_counts.iloc[:,0])
j_g1_med_std = np.std(j_g1_counts.iloc[:,1])
j_g1_high_std = np.std(j_g1_counts.iloc[:,2])
# In[ ]:
j_g1_low_std
# In[ ]:
j_g1_med_std
# In[ ]:
j_g1_high_std
# # Joint combined choices in G2
# In[ ]:
j_g2_counts = pd.DataFrame([j01_p1_g2_count, j01_p2_g2_count, j02_p1_g2_count, j02_p2_g2_count, j03_p1_g2_count,
j03_p2_g2_count, j04_p1_g2_count, j04_p2_g2_count, j05_p1_g2_count, j05_p2_g2_count, j06_p1_g2_count,
j06_p2_g2_count, j07_p1_g2_count, j07_p2_g2_count, j08_p1_g2_count, j08_p2_g2_count,
j10_p1_g2_count, j10_p2_g2_count, j11_p1_g2_count, j11_p2_g2_count],
index = ['j01_p1_g2_count', 'j01_p2_g2_count', 'j02_p1_g2_count', 'j02_p2_g2_count',
'j03_p1_g2_count', 'j03_p2_g2_count', 'j04_p1_g2_count', 'j04_p2_g2_count', 'j05_p1_g2_count',
'j05_p2_g2_count', 'j06_p1_g2_count', 'j06_p2_g2_count', 'j07_p1_g1_count', 'j07_p1_g2_count',
'j08_p1_g2_count', 'j08_p2_g2_count', 'j10_p1_g2_count', 'j10_p2_g2_count',
'j11_p1_g2_count', 'j11_p2_g2_count'])
# In[ ]:
j_g2_counts = j_g2_counts.fillna(0)
j_g2_counts
# In[ ]:
j_g2_counts.sum(axis=1)
# In[ ]:
j_g2_counts_bar = j_g2_counts.plot.bar()
# +
# In[ ]:
j_g2_counts_box = sns.boxplot(
data=j_g2_counts,
width=0.5,
palette="pastel")
j_g2_counts_box.axes.set_title("SMAB3 Social Choice Distributions in Game 2",
fontsize=16)
plot_file_name="j_g2_counts_box.jpg"
plt.ylim(0, 100)
j_g2_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
j_g2_counts_box.figure.savefig(plot_file_name,
format='jpeg')
# +
# In[ ]:
j_g2_low = np.mean(j_g2_counts.iloc[:,0])
j_g2_med = np.mean(j_g2_counts.iloc[:,1])
j_g2_high = np.mean(j_g2_counts.iloc[:,2])
# In[ ]:
j_g2_low
# In[ ]:
j_g2_med
# In[ ]:
j_g2_high
# In[ ]:
j_g2_low_std = np.std(j_g2_counts.iloc[:,0])
j_g2_med_std = np.std(j_g2_counts.iloc[:,1])
j_g2_high_std = np.std(j_g2_counts.iloc[:,2])
# In[ ]:
j_g2_low_std
# In[ ]:
j_g2_med_std
# In[ ]:
j_g2_high_std
# -
j_g1_counts
# +
#combine joint hole count dataframes
#make index just the participant code
#combine control_counts_2 and control_counts_1
#modify the index to be only participant
subjects_1 = j_g1_counts.index.to_numpy()
subjects_2 = j_g2_counts.index.to_numpy()
subjects_new1 = np.array([])
subjects_new2 = np.array([])
ii = 0
for i in subjects_1:
subjects_new1 = np.append(subjects_new1,subjects_1[ii][0:6])
ii = ii +1
iii = 0
for i in subjects_2:
subjects_new2 = np.append(subjects_new2,subjects_2[iii][0:6])
iii = iii +1
j_g1_counts.index = subjects_new1
j_g2_counts.index = subjects_new2
j_counts = j_g1_counts.merge(j_g2_counts,right_index=True,left_index=True)
# -
j_counts
# +
'''Here we combine the separate the counts of each hole into separate arrays. We run t-tests to compare the counts
of each hole in each game.'''
# # Joint combined arrays
# In[ ]:
#pip install researchpy
# In[ ]:
import researchpy as rp
# In[ ]:
j_90_1 = np.array(j_g1_counts.iloc[:,2])
j_90_2 = np.array(j_g2_counts.iloc[:,2])
# In[ ]:
j_90_1
# In[ ]:
j_90_2
# In[ ]:
j_60_1 = np.array(j_g1_counts.iloc[:,1])
j_60_2 = np.array(j_g2_counts.iloc[:,1])
# In[ ]:
j_60_1
# In[ ]:
j_60_2
# In[ ]:
j_30_1 = np.array(j_g1_counts.iloc[:,0])
j_30_2 = np.array(j_g2_counts.iloc[:,0])
# In[ ]:
j_30_1
# In[ ]:
j_30_2
# # Joint combined choice statistics
# In[ ]:
j_90_p = sp.stats.ttest_rel(j_90_1,j_90_2)
j_60_p = sp.stats.ttest_rel(j_60_1,j_60_2)
j_30_p = sp.stats.ttest_rel(j_30_1,j_30_2)
# In[ ]:
j_90_p
# In[ ]:
j_60_p
# In[ ]:
j_30_p
# -
# # SMAB4 Joint Data
# +
'''Here we load the csv files into pandas dataframes by calling organize_trial_by_trial and get the scores in games
1 and 2 by calling call_get_score.'''
# # Multi Analysis
df = organize_trial_by_trial('data/m05_gamedata.csv')
m05 = df
m05_p1_g1_score,m05_p1_g2_score,m05_p2_g1_score,m05_p2_g2_score,m05_p1_diff,m05_p2_diff,m05_p1_g1_50,m05_p1_g2_50,m05_p2_g1_50,m05_p2_g2_50 = call_get_score(m05, 'm')
m05.head()
# In[ ]:
df = organize_trial_by_trial_floatclick('data/m06_gamedata.csv')
m06 = df
m06_p1_g1_score,m06_p1_g2_score,m06_p2_g1_score,m06_p2_g2_score,m06_p1_diff,m06_p2_diff,m06_p1_g1_50,m06_p1_g2_50,m06_p2_g1_50,m06_p2_g2_50 = call_get_score(m06, 'm')
m06.head()
# In[ ]:
df = organize_trial_by_trial_floatclick('data/m08_gamedata.csv')
m08 = df
m08_p1_g1_score,m08_p1_g2_score,m08_p2_g1_score,m08_p2_g2_score,m08_p1_diff,m08_p2_diff,m08_p1_g1_50,m08_p1_g2_50,m08_p2_g1_50,m08_p2_g2_50 = call_get_score(m08, 'm')
m08.head()
# In[ ]:
# In[ ]:
df = organize_trial_by_trial_floatclick('data/m10_gamedata.csv')
m10 = df
m10_p1_g1_score,m10_p1_g2_score,m10_p2_g1_score,m10_p2_g2_score,m10_p1_diff,m10_p2_diff,m10_p1_g1_50,m10_p1_g2_50,m10_p2_g1_50,m10_p2_g2_50 = call_get_score(m10, 'm')
m10.head()
# In[ ]:
df = organize_trial_by_trial_floatclick('data/m11_gamedata.csv')
m11 = df
m11_p1_g1_score,m11_p1_g2_score,m11_p2_g1_score,m11_p2_g2_score,m11_p1_diff,m11_p2_diff,m11_p1_g1_50,m11_p1_g2_50,m11_p2_g1_50,m11_p2_g2_50 = call_get_score(m11, 'm')
m11.head()
# In[ ]:
#df = organize_trial_by_trial_floatclick('data/m12_gamedata.csv')
#m12 = df
#m12_p1_g1_score,m12_p1_g2_score,m12_p2_g1_score,m12_p2_g2_score,m12_p1_score_diff,m12_p2_score_diff = call_get_score(m12, 'm')
#m12.head()
# In[ ]:
df = organize_trial_by_trial_floatclick('data/m13_gamedata.csv')
m13 = df
m13_p1_g1_score,m13_p1_g2_score,m13_p2_g1_score,m13_p2_g2_score,m13_p1_diff,m13_p2_diff,m13_p1_g1_50,m13_p1_g2_50,m13_p2_g1_50,m13_p2_g2_50 = call_get_score(m13, 'm')
m13.head()
# In[ ]:
df = organize_trial_by_trial_floatclick('data/m14_gamedata.csv')
m14 = df
m14_p1_g1_score,m14_p1_g2_score,m14_p2_g1_score,m14_p2_g2_score,m14_p1_diff,m14_p2_diff,m14_p1_g1_50,m14_p1_g2_50,m14_p2_g1_50,m14_p2_g2_50 = call_get_score(m14, 'm')
m14.head()
# In[ ]:
m05_p1_g1, m05_p1_g2, m05_p2_g1, m05_p2_g2 = separate_df(m05, 'm')
m06_p1_g1, m06_p1_g2, m06_p2_g1, m06_p2_g2 = separate_df(m06, 'm')
m08_p1_g1, m08_p1_g2, m08_p2_g1, m08_p2_g2 = separate_df(m08, 'm')
m10_p1_g1, m10_p1_g2, m10_p2_g1, m10_p2_g2 = separate_df(m10, 'm')
m11_p1_g1, m11_p1_g2, m11_p2_g1, m11_p2_g2 = separate_df(m11, 'm')
m13_p1_g1, m13_p1_g2, m13_p2_g1, m13_p2_g2 = separate_df(m13, 'm')
m14_p1_g1, m14_p1_g2, m14_p2_g1, m14_p2_g2 = separate_df(m14, 'm')
# In[ ]:
m05_p1_g1_count = m05_p1_g1['probability'].value_counts(sort=False)
m05_p1_g2_count = m05_p1_g2['probability'].value_counts(sort=False)
m05_p2_g1_count = m05_p2_g1['probability'].value_counts(sort=False)
m05_p2_g2_count = m05_p2_g2['probability'].value_counts(sort=False)
m06_p1_g1_count = m06_p1_g1['probability'].value_counts(sort=False)
m06_p1_g2_count = m06_p1_g2['probability'].value_counts(sort=False)
m06_p2_g1_count = m06_p2_g1['probability'].value_counts(sort=False)
m06_p2_g2_count = m06_p2_g2['probability'].value_counts(sort=False)
m08_p1_g1_count = m08_p1_g1['probability'].value_counts(sort=False)
m08_p1_g2_count = m08_p1_g2['probability'].value_counts(sort=False)
m08_p2_g1_count = m08_p2_g1['probability'].value_counts(sort=False)
m08_p2_g2_count = m08_p2_g2['probability'].value_counts(sort=False)
m10_p1_g1_count = m10_p1_g1['probability'].value_counts(sort=False)
m10_p1_g2_count = m10_p1_g2['probability'].value_counts(sort=False)
m10_p2_g1_count = m10_p2_g1['probability'].value_counts(sort=False)
m10_p2_g2_count = m10_p2_g2['probability'].value_counts(sort=False)
m11_p1_g1_count = m11_p1_g1['probability'].value_counts(sort=False)
m11_p1_g2_count = m11_p1_g2['probability'].value_counts(sort=False)
m11_p2_g1_count = m11_p2_g1['probability'].value_counts(sort=False)
m11_p2_g2_count = m11_p2_g2['probability'].value_counts(sort=False)
m13_p1_g1_count = m13_p1_g1['probability'].value_counts(sort=False)
m13_p1_g2_count = m13_p1_g2['probability'].value_counts(sort=False)
m13_p2_g1_count = m13_p2_g1['probability'].value_counts(sort=False)
m13_p2_g2_count = m13_p2_g2['probability'].value_counts(sort=False)
m14_p1_g1_count = m14_p1_g1['probability'].value_counts(sort=False)
m14_p1_g2_count = m14_p1_g2['probability'].value_counts(sort=False)
m14_p2_g1_count = m14_p2_g1['probability'].value_counts(sort=False)
m14_p2_g2_count = m14_p2_g2['probability'].value_counts(sort=False)
# +
#importing m09- 1st 12 trials missing- 6 for each player
reader = csv.reader( open('data/m09_gamedata.csv'))
game_data = [row for row in reader]
df = pd.DataFrame(np.array(game_data))
df = df.T
#column headings all wrong
df = df.rename(columns = {0:"game number", 1:"trial", 2:"player", 3:"arm", 4:"probability", 5:"reward", 6:"time", 7:"P1 score", 8:"P2 score"})
#drops first 20 trials of game 1
#remove if you want to analyze the whole session
#df = df[40:]
#df = df.drop(columns = 'time') #can comment this out if want to look at time
df = df.drop(['reward'], axis=1)
m09 = df.apply(pd.to_numeric)
m09.head()
''''df = organize_trial_by_trial_floatclick('data/m09_gamedata.csv')
m09 = df
m09_p1_g1_score,m09_p1_g2_score,m09_p2_g1_score,m09_p2_g2_score,m09_p1_score_diff,m09_p2_score_diff = call_get_score(m09, 'm')
m09.head()'''
# -
reader = csv.reader( open('data/s01_gamedata.csv'))
game_data = [row for row in reader]
df = pd.DataFrame(np.array(game_data))
df = df.T
#column headings all wrong
#df = df.rename(columns = {0:"game number", 1:"trial", 2:"player", 3:"arm", 4:"probability", 5:"reward", 6:"time", 7:"P1 score", 8:"P2 score"})
#drops first 20 trials of game 1
#remove if you want to analyze the whole session
#df = df[40:]
#df = df.drop(columns = 'time') #can comment this out if want to look at time
#df = df.drop(['reward'], axis=1)
s01 = df.apply(pd.to_numeric)
s01
reader = csv.reader( open('data/s12_gamedata.csv'))
game_data = [row for row in reader]
df = pd.DataFrame(np.array(game_data))
df = df.T
#column headings all wrong
#df = df.rename(columns = {0:"game number", 1:"trial", 2:"player", 3:"arm", 4:"probability", 5:"reward", 6:"time", 7:"P1 score", 8:"P2 score"})
#drops first 20 trials of game 1
#remove if you want to analyze the whole session
#df = df[40:]
#df = df.drop(columns = 'time') #can comment this out if want to look at time
#df = df.drop(['reward'], axis=1)
s12 = df.apply(pd.to_numeric)
s12
m09
# +
# # Combined multi scores G1
# In[ ]:
m_scores_1 = pd.DataFrame({'participant':['m05_p1', 'm05_p2','m06_p1', 'm06_p2', 'm08_p1', 'm08_p2', 'm10_p1', 'm10_p2', 'm11_p1', 'm11_p2', 'm13_p1', 'm13_p2', 'm14_p1', 'm14_p2'], 'score_1':
[m05_p1_g1_score, m05_p2_g1_score, m06_p1_g1_score, m06_p2_g1_score, m08_p1_g1_score, m08_p2_g1_score,
m10_p1_g1_score, m10_p2_g1_score, m11_p1_g1_score, m11_p2_g1_score, m13_p1_g1_score, m13_p2_g1_score,
m14_p1_g1_score, m14_p2_g1_score]})
m_scores_1.set_index('participant', inplace=True, drop=True)
m_scores_1
# # Combined multi scores G2
# In[ ]:
m_scores_2 = pd.DataFrame({'participant':['m05_p1', 'm05_p2','m06_p1', 'm06_p2', 'm08_p1', 'm08_p2', 'm10_p1', 'm10_p2', 'm11_p1', 'm11_p2', 'm13_p1', 'm13_p2', 'm14_p1', 'm14_p2'], 'score_2':
[m05_p1_g2_score, m05_p2_g2_score, m06_p1_g2_score, m06_p2_g2_score, m08_p1_g2_score, m08_p2_g2_score,
m10_p1_g2_score, m10_p2_g2_score, m11_p1_g2_score, m11_p2_g2_score, m13_p1_g2_score, m13_p2_g2_score,
m14_p1_g2_score, m14_p2_g2_score]})
m_scores_2.set_index('participant', inplace=True, drop=True)
m_scores_2
m_scores_1_50 = pd.DataFrame({'participant':['m05_p1', 'm05_p2','m06_p1', 'm06_p2', 'm08_p1', 'm08_p2', 'm10_p1', 'm10_p2', 'm11_p1', 'm11_p2', 'm13_p1', 'm13_p2', 'm14_p1', 'm14_p2'], 'score_1_50':
[m05_p1_g1_50, m05_p2_g1_50, m06_p1_g1_50, m06_p2_g1_50, m08_p1_g1_50, m08_p2_g1_50, m10_p1_g1_50, m10_p2_g1_50, m11_p1_g1_50, m11_p2_g1_50, m13_p1_g1_50, m13_p2_g1_50,
m14_p1_g1_50, m14_p2_g1_50,]})
m_scores_1_50.set_index('participant', inplace=True, drop=True)
m_scores_2_50 = pd.DataFrame({'participant':['m05_p1', 'm05_p2','m06_p1', 'm06_p2', 'm08_p1', 'm08_p2', 'm10_p1', 'm10_p2', 'm11_p1', 'm11_p2', 'm13_p1', 'm13_p2', 'm14_p1', 'm14_p2'], 'score_1_50':
[m05_p1_g2_50, m05_p2_g2_50, m06_p1_g2_50, m06_p2_g2_50, m08_p1_g2_50, m08_p2_g2_50, m10_p1_g2_50, m10_p2_g2_50, m11_p1_g2_50, m11_p2_g2_50, m13_p1_g2_50, m13_p2_g2_50,
m14_p1_g2_50, m14_p2_g2_50,]})
m_scores_2_50.set_index('participant', inplace=True, drop=True)
# +
'''Here we combine the dataframes with the scores from the 2 games.'''
m_scores_50 = m_scores_1_50.merge(m_scores_2_50,right_index=True,left_index=True)
m_scores = m_scores_1.merge(m_scores_2,right_index=True,left_index=True)
m_combo = m_scores_50.merge(m_scores,right_index=True,left_index=True)
#rename columns
m_combo.columns=["score_1_50", "score_2_50", "score_1", "score_2"]
# -
''''m_scores = pd.merge(m_scores_1, m_scores_2, on = 'participant')
m_scores.columns=['Game 1','Game 2']
#jp1_scores.set_index('participant', inplace=True, drop=True)
m_scores
# In[ ]:
m_scores_1_mean = m_scores_1.mean()
# In[ ]:
m_scores_1_mean
# In[ ]:
m_scores_1_std = m_scores_1.std()
# In[ ]:
m_scores_1_std
# In[ ]:
m_scores_2_mean = m_scores_2.mean()
# In[ ]:
m_scores_2_mean
# In[ ]:
m_scores_2_std = m_scores_2.std()
# In[ ]:
m_scores_2_std
# In[ ]:
m_scores_med = m_scores.median()
m_scores_med
# In[ ]:
m_scores_p = sp.stats.ttest_rel(m_scores['Game 1'],m_scores['Game 2'])
# In[ ]:
m_scores_p'''
m_combo
# +
m_scores_box = sns.boxplot(
data=m_scores,
width=0.5,
palette="pastel")
m_scores_box.axes.set_title("Multi Player Scores",
fontsize=16)
plot_file_name="m_score_box.jpg"
plt.ylim(0, 100)
m_scores_box.axes.set(ylabel='Score')
# save as jpeg
m_scores_box.figure.savefig(plot_file_name,
format='jpeg')
# +
m_combo_box = sns.boxplot(
data=m_combo, order = ['score_1_50','score_2_50','score_1','score_2'],
width=0.5,
palette="pastel")
m_combo_box.axes.set_title("SMAB4 Social Scores",
fontsize=16)
plot_file_name="m_score_box.jpg"
plt.ylim(0, 100)
m_combo_box.axes.set(ylabel='Score')
# save as jpeg
m_combo_box.figure.savefig(plot_file_name,
format='jpeg')
# +
'''Here we place the counts of how many times each hole was chosen into dataframes for games 1 and 2. We also
get statistics on this data and plot it.'''
# # Multi combined choices in G1
# In[ ]:
m_g1_counts = pd.DataFrame([m05_p1_g1_count, m05_p2_g1_count, m06_p1_g1_count, m06_p2_g1_count, m08_p1_g1_count, m08_p2_g1_count,
m10_p1_g1_count, m10_p2_g1_count, m11_p1_g1_count, m11_p2_g1_count, m13_p1_g1_count, m13_p2_g1_count,
m14_p1_g1_count, m14_p2_g1_count],
index = ['m05_p1_g1_count', 'm05_p2_g1_count', 'm06_p1_g1_count', 'm06_p2_g1_count',
'm08_p1_g1_count', 'm08_p2_g1_count', 'm10_p1_g1_count', 'm10_p2_g1_count', 'm11_p1_g1_count',
'm11_p2_g1_count', 'm13_p1_g1_count', 'm13_p2_g1_count', 'm14_p1_g1_count', 'm14_p2_g1_count'])
# In[ ]:
m_g1_counts = m_g1_counts.fillna(0)
# In[ ]:
m_g1_counts
# In[ ]:
m_g1_counts.sum(axis=1)
# In[ ]:
m_g1_counts_bar = m_g1_counts.plot.bar()
# +
# In[ ]:
m_g1_counts_box = sns.boxplot(
data = m_g1_counts,
width=0.5,
palette="pastel")
m_g1_counts_box.axes.set_title("SMAB4 Social Choice Distributions in Game 1",
fontsize=16)
plot_file_name="m_g1_counts_box.jpg"
plt.ylim(0, 100)
m_g1_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
m_g1_counts_box.figure.savefig(plot_file_name,
format='jpeg')
# +
# In[ ]:
m_g1_low = np.mean(m_g1_counts.iloc[:,0])
m_g1_med = np.mean(m_g1_counts.iloc[:,1])
m_g1_high = np.mean(m_g1_counts.iloc[:,2])
# In[ ]:
m_g1_low
# In[ ]:
m_g1_med
# In[ ]:
m_g1_high
# In[ ]:
m_g1_low_std = np.std(m_g1_counts.iloc[:,0])
m_g1_med_std = np.std(m_g1_counts.iloc[:,1])
m_g1_high_std = np.std(m_g1_counts.iloc[:,2])
# In[ ]:
m_g1_low_std
# In[ ]:
m_g1_med_std
# In[ ]:
m_g1_high_std
# # Multi combined choices in G2
# In[ ]:
m_g2_counts = pd.DataFrame([m05_p1_g2_count, m05_p2_g2_count, m06_p1_g2_count, m06_p2_g2_count, m08_p1_g2_count, m08_p2_g2_count,
m10_p1_g2_count, m10_p2_g2_count, m11_p1_g2_count, m11_p2_g2_count, m13_p1_g2_count, m13_p2_g2_count,
m14_p1_g2_count, m14_p2_g2_count],
index = ['m05_p1_g2_count', 'm05_p2_g2_count', 'm06_p1_g2_count', 'm06_p2_g2_count',
'm08_p1_g2_count', 'm08_p2_g2_count', 'm10_p1_g2_count', 'm10_p2_g2_count', 'm11_p1_g2_count',
'm11_p2_g2_count', 'm13_p1_g2_count', 'm13_p2_g2_count', 'm14_p1_g2_count', 'm14_p2_g2_count'])
# In[ ]:
m_g2_counts = m_g2_counts.fillna(0)
# In[ ]:
m_g2_counts
# In[ ]:
m_g2_counts.sum(axis=1)
# In[ ]:
# -
m_g2_counts_bar = m_g2_counts.plot.bar()
# +
m_g2_counts_box = sns.boxplot(
data = m_g2_counts,
width=0.5,
palette="pastel")
m_g2_counts_box.axes.set_title("SMAB4 Social Choice Distributions in Game 2",
fontsize=16)
plot_file_name="m_g2_counts_box.jpg"
plt.ylim(0, 100)
m_g2_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
m_g2_counts_box.figure.savefig(plot_file_name,
format='jpeg')
# +
# In[ ]:
m_g2_low = np.mean(m_g2_counts.iloc[:,0])
m_g2_med = np.mean(m_g2_counts.iloc[:,1])
m_g2_high = np.mean(m_g2_counts.iloc[:,2])
# In[ ]:
m_g2_low
# In[ ]:
m_g2_med
# In[ ]:
m_g2_high
# In[ ]:
m_g2_low_std = np.std(m_g2_counts.iloc[:,0])
m_g2_med_std = np.std(m_g2_counts.iloc[:,1])
m_g2_high_std = np.std(m_g2_counts.iloc[:,2])
# In[ ]:
m_g2_low_std
# In[ ]:
m_g2_med_std
# In[ ]:
m_g2_high_std
# # Multi combined arrays
# In[ ]:
#pip install researchpy
# In[ ]:
import researchpy as rp
# In[ ]:
m_90_1 = np.array(m_g1_counts.iloc[:,2])
m_90_2 = np.array(m_g2_counts.iloc[:,2])
# In[ ]:
m_90_1
# In[ ]:
m_90_2
# In[ ]:
m_60_1 = np.array(m_g1_counts.iloc[:,1])
m_60_2 = np.array(m_g2_counts.iloc[:,1])
# In[ ]:
m_60_1
# In[ ]:
m_60_2
# In[ ]:
m_30_1 = np.array(m_g1_counts.iloc[:,0])
m_30_2 = np.array(m_g2_counts.iloc[:,0])
# In[ ]:
m_30_1
# In[ ]:
m_30_2
# # Multi combined choice statistics
# In[ ]:
m_90_p = sp.stats.ttest_rel(m_90_1,m_90_2)
m_60_p = sp.stats.ttest_rel(m_60_1,m_60_2)
m_30_p = sp.stats.ttest_rel(m_30_1,m_30_2)
# In[ ]:
m_90_p
# In[ ]:
m_60_p
# In[ ]:
m_30_p
# In[ ]:
#maximizing and matching?
# +
'''Here we combine scores from SMAB3 and SMAB4 in one dataframe'''
# # SOCIAL (total) scores G1
social_scores_1 = pd.DataFrame({'participant':['j01_p1', 'j01_p2','j02_p1', 'j02_p2','j03_p1', 'j03_p2','j04_p1',
'j04_p2','j05_p1', 'j05_p2','j06_p1','j06_p2','j07_p1', 'j07_p2','j08_p1',
'j08_p2','j10_p1', 'j10_p2','j11_p1', 'j11_p2','m05_p1', 'm05_p2','m06_p1',
'm06_p2', 'm08_p1', 'm08_p2', 'm10_p1', 'm10_p2', 'm11_p1', 'm11_p2', 'm13_p1', 'm13_p2',
'm14_p1', 'm14_p2'], 'social_score_1':
[j01_p1_g1_score, j01_p2_g1_score, j02_p1_g1_score, j02_p2_g1_score, j03_p1_g1_score, j03_p2_g1_score,
j04_p1_g1_score, j04_p2_g1_score, j05_p1_g1_score, j05_p2_g1_score, j06_p1_g1_score, j06_p2_g1_score,
j07_p1_g1_score, j07_p2_g1_score, j08_p1_g1_score, j08_p2_g1_score, j10_p1_g1_score, j10_p2_g1_score, j11_p1_g1_score, j11_p2_g1_score, m05_p1_g1_score, m05_p2_g1_score, m06_p1_g1_score, m06_p2_g1_score, m08_p1_g1_score, m08_p2_g1_score,
m10_p1_g1_score, m10_p2_g1_score, m11_p1_g1_score, m11_p2_g1_score, m13_p1_g1_score, m13_p2_g1_score,
m14_p1_g1_score, m14_p2_g1_score]})
social_scores_1.set_index('participant', inplace=True, drop=True)
social_scores_1
# # SOCIAL (total) scores G2
# In[ ]:
social_scores_2 = pd.DataFrame({'participant':['j01_p1', 'j01_p2','j02_p1', 'j02_p2','j03_p1', 'j03_p2','j04_p1',
'j04_p2','j05_p1', 'j05_p2','j06_p1','j06_p2','j07_p1', 'j07_p2','j08_p1', 'j08_p2','j10_p1', 'j10_p2','j11_p1', 'j11_p2',
'm05_p1', 'm05_p2','m06_p1', 'm06_p2', 'm08_p1', 'm08_p2', 'm10_p1', 'm10_p2', 'm11_p1', 'm11_p2', 'm13_p1', 'm13_p2', 'm14_p1', 'm14_p2'], 'social_score_2':
[j01_p1_g2_score, j01_p2_g2_score, j02_p1_g2_score, j02_p2_g2_score, j03_p1_g2_score, j03_p2_g2_score,
j04_p1_g2_score, j04_p2_g2_score, j05_p1_g2_score, j05_p2_g2_score, j06_p1_g2_score, j06_p2_g2_score,
j07_p1_g2_score, j07_p2_g2_score, j08_p1_g2_score, j08_p2_g2_score, j10_p1_g2_score, j10_p2_g2_score, j11_p1_g2_score, j11_p2_g2_score, m05_p1_g2_score, m05_p2_g2_score, m06_p1_g2_score, m06_p2_g2_score, m08_p1_g2_score, m08_p2_g2_score,
m10_p1_g2_score, m10_p2_g2_score, m11_p1_g2_score, m11_p2_g2_score, m13_p1_g2_score, m13_p2_g2_score, m14_p1_g2_score, m14_p2_g2_score]})
social_scores_2.set_index('participant', inplace=True, drop=True)
social_scores_2
# In[ ]:
social_scores = pd.merge(social_scores_1, social_scores_2, on = 'participant')
social_scores.columns=['Game 1','Game 2']
#jp1_scores.set_index('participant', inplace=True, drop=True)
social_scores
# In[ ]:
#export to excel
social_scores.to_excel("output14.xlsx")
# In[ ]:
social_scores=social_scores.sum(axis=1)
# In[ ]:
social_scores.sort_values(inplace=True)
# In[ ]:
social_scores
# In[ ]:
social_scores_1_mean = social_scores_1.mean()
# In[ ]:
social_scores_1_mean
# In[ ]:
social_scores_1_std = social_scores_1.std()
# In[ ]:
social_scores_1_std
# In[ ]:
social_scores_2_mean = social_scores_2.mean()
# In[ ]:
social_scores_2_mean
# In[ ]:
social_scores_2_std = social_scores_2.std()
# In[ ]:
social_scores_2_std
# In[ ]:
social_scores_med = social_scores.median()
social_scores_med
# In[ ]:
social_scores_p = sp.stats.ttest_rel(social_scores_1,social_scores_2)
# In[ ]:
social_scores_p
# # Arm1
# In[ ]:
# +
#combine joint hole count dataframes
#make index just the participant code
#combine control_counts_2 and control_counts_1
#modify the index to be only participant
subjects_1 = m_g1_counts.index.to_numpy()
subjects_2 = m_g2_counts.index.to_numpy()
subjects_new1 = np.array([])
subjects_new2 = np.array([])
ii = 0
for i in subjects_1:
subjects_new1 = np.append(subjects_new1,subjects_1[ii][0:6])
ii = ii +1
iii = 0
for i in subjects_2:
subjects_new2 = np.append(subjects_new2,subjects_2[iii][0:6])
iii = iii +1
m_g1_counts.index = subjects_new1
m_g2_counts.index = subjects_new2
m_counts = m_g1_counts.merge(m_g2_counts,right_index=True,left_index=True)
# -
m_counts
# # Test for differences between SMAB3 and SMAB4 in multi-player data
# +
#combine control_score_2 and control_score_1
social_scores = social_scores_1.merge(social_scores_2,right_index=True,left_index=True)
#convert from wide-format to long-format
social_scores = pd.melt(social_scores,ignore_index=False)
# -
subjects = social_scores.index.to_numpy()
subjects
# +
experiment = np.array([])
for i in subjects:
if [i][0][0] == 'j':
experiment = np.append(experiment,3)
elif [i][0][0] == 'm':
experiment = np.append(experiment,4)
social_scores['experiment'] = experiment
# -
social_scores
'''Here we conduct a 2 x 2 mixed measures ANOVA to compare the scores in games 1 and 2'''
pg.mixed_anova(data=social_scores, dv='value', between='experiment', within='variable', subject='participant')
import patsy
# +
'''Here we conduct a linear regression to compare the scores in games 1 and 2'''
outcome_1,predictors_1 = patsy.dmatrices("value ~ variable + experiment", social_scores)
mod_1 = sm.OLS(outcome_1,predictors_1)
res_1 = mod_1.fit()
print(res_1.summary())
# -
#combine m_counts and j_counts
joint_counts = m_counts.append(j_counts)
joint_counts = joint_counts.melt(ignore_index=False)
joint_counts
# +
#clean up joint_counts
#modify 'variable' to be only '30','60', or '90' and add a column 'game' specifying the game #
variable = joint_counts['variable'].to_numpy()
choice = np.array([])
game = np.array([])
ii = 0
for i in variable:
length = len(variable[ii])
if variable[ii][length - 1] == 'x':
game = np.append(game,'1')
elif variable[ii][len(variable[ii]) - 1] == 'y':
game = np.append(game,'2')
choice = np.append(choice,variable[ii][0:2])
ii = ii +1
joint_counts['variable'] = choice
joint_counts['game'] = game
# -
joint_counts
# # Test for differences in SMAB3 and SMAB4 including single and multiplayer
'''Here we combine all of the scores (all games, confed and multiplayer, SMAB 3 and 4)'''
scores = control_scores.append(social_scores)
scores
#combnine all SMAB3 scores
j_scores = j_scores.rename(columns={"score_1": "Game 1", "score_2": "Game 2"})
smab3_scores = s_scores.append(j_scores)
smab3_scores
smab3_scores_1_mean = smab3_scores['Game 1'].mean()
smab3_scores_1_std = smab3_scores['Game 1'].std()
smab3_scores_2_mean = smab3_scores['Game 2'].mean()
smab3_scores_2_std = smab3_scores['Game 2'].std()
smab3_scores_1_mean, smab3_scores_1_std, smab3_scores_2_mean, smab3_scores_2_std
# +
#working on now
smab3_scores_box = sns.boxplot(
data=smab3_scores,
width=0.5,
palette="pastel")
smab3_scores_box.axes.set_title("SMAB3 Scores (Control and Social)",
fontsize=16)
plot_file_name="smab3_score_box.jpg"
plt.ylim(0, 100)
smab3_scores_box.axes.set(ylabel='Score')
# save as jpeg
smab3_scores_box.figure.savefig(plot_file_name,
format='jpeg')
# -
#combnine all SMAB4 scores
c_scores = c_scores.rename(columns={"confederate_score_1": "Game 1", "confederate_score_2": "Game 2"})
m_scores = m_scores.rename(columns={"score_1": "Game 1", "score_2": "Game 2"})
smab4_scores = c_scores.append(m_scores)
smab4_scores
c09
# +
#working on now
smab4_scores_box = sns.boxplot(
data=smab4_scores,
width=0.5,
palette="pastel")
smab4_scores_box.axes.set_title("SMAB4 Scores (Control and Social)",
fontsize=16)
plot_file_name="smab4_score_box.jpg"
plt.ylim(0, 100)
smab4_scores_box.axes.set(ylabel='Score')
# save as jpeg
smab4_scores_box.figure.savefig(plot_file_name,
format='jpeg')
# -
smab4_scores_1_mean = smab4_scores['Game 1'].mean()
smab4_scores_1_std = smab4_scores['Game 1'].std()
smab4_scores_2_mean = smab4_scores['Game 2'].mean()
smab4_scores_2_std = smab4_scores['Game 2'].std()
smab4_scores_1_mean, smab4_scores_1_std, smab4_scores_2_mean, smab4_scores_2_std
# +
#add another column to scores indicating whether it's the single or multiplayer
subjects = scores.index.to_numpy()
condition = np.array([])
for i in subjects:
if [i][0][0] == 's' or [i][0][0] == 'c':
condition = np.append(condition,'control')
elif [i][0][0] == 'm' or [i][0][0] == 'j':
condition = np.append(condition,'social')
scores['condition'] = condition
# +
#change 'variable' column to reflect game number
var = scores['variable'].to_numpy()
game = np.array([])
ii = 0
for i in var:
length = len(var[ii])
game = np.append(game,var[ii][length - 1])
ii = ii + 1
scores['variable'] = game
# -
print(scores)
scores = scores.reset_index()
scores
scores.to_csv('scores.csv')
# +
#NEED
#pg.mixed_anova(data=scores, dv='value', between=['experiment','condition'], within='variable', subject='participant')
# +
#using statsmodels instead of pingouin
#sm.AnovaRM(scores, 'value', 'participant', within=within, between='experiment', aggregate_func=None)
# +
'''Here we conduct a linear regression to compare the scores in games 1 and 2'''
outcome_1,predictors_1 = patsy.dmatrices("value ~ variable + experiment", scores)
mod_1 = sm.OLS(outcome_1,predictors_1)
res_1 = mod_1.fit()
print(res_1.summary())
# +
'''Here we conduct a linear regression to compare the scores in games 1 and 2; condition is added!'''
outcome_1,predictors_1 = patsy.dmatrices("value ~ variable + experiment + condition", scores)
mod_1 = sm.OLS(outcome_1,predictors_1)
res_1 = mod_1.fit()
print(res_1.summary())
# -
scores = control_scores.append(social_scores)
# # Test for differences looking at all the value counts data with the holes denoted by probability
#combine joint_counts and control_counts
counts = joint_counts.append(control_counts)
counts
# +
#add another column to counts indicating whether it's the single or multiplayer
subjects = counts.index.to_numpy()
condition = np.array([])
for i in subjects:
if [i][0][0] == 's' or [i][0][0] == 'c':
condition = np.append(condition,'control')
elif [i][0][0] == 'm' or [i][0][0] == 'j':
condition = np.append(condition,'social')
counts['condition'] = condition
# +
#add another column to counts indiciating whether it's SMAB 3 or 4
subjects = counts.index.to_numpy()
experiment = np.array([])
for i in subjects:
if [i][0][0] == 's' or [i][0][0] == 'j':
experiment = np.append(experiment,3)
elif [i][0][0] == 'c' or [i][0][0] == 'm':
experiment = np.append(experiment,4)
counts['experiment'] = experiment
# -
#break up counts dataframe into 30,60,90
grouped = counts.groupby(counts.variable)
counts_30 = grouped.get_group("30")
counts_60 = grouped.get_group("60")
counts_90 = grouped.get_group("90")
counts_60
#run 2x2 between measures ANOVA for the 90 hole
pg.anova(data=counts_90, dv='value', between=['experiment','condition'])
#run 2x2 between measures ANOVA for the 60 hole
pg.anova(data=counts_60, dv='value', between=['experiment','condition'])
import statsmodels
#plot interaction
int_60 = sm.graphics.interaction_plot(counts_60['experiment'], counts_60['condition'], counts_60['value'],
colors=['red','blue'], markers=['D','^'], ms=10)
int_90 = sm.graphics.interaction_plot(counts_90['experiment'], counts_90['condition'], counts_90['value'],
colors=['red','blue'], markers=['D','^'], ms=10)
m05_p1_g1_arm = m05_p1_g1['arm'].value_counts(sort=False)
m05_p1_g2_arm = m05_p1_g2['arm'].value_counts(sort=False)
m05_p2_g1_arm = m05_p2_g1['arm'].value_counts(sort=False)
m05_p2_g2_arm = m05_p2_g2['arm'].value_counts(sort=False)
m06_p1_g1_arm = m06_p1_g1['arm'].value_counts(sort=False)
m06_p1_g2_arm = m06_p1_g2['arm'].value_counts(sort=False)
m06_p2_g1_arm = m06_p2_g1['arm'].value_counts(sort=False)
m06_p2_g2_arm = m06_p2_g2['arm'].value_counts(sort=False)
m08_p1_g1_arm = m08_p1_g1['arm'].value_counts(sort=False)
m08_p1_g2_arm = m08_p1_g2['arm'].value_counts(sort=False)
m08_p2_g1_arm = m08_p2_g1['arm'].value_counts(sort=False)
m08_p2_g2_arm = m08_p2_g2['arm'].value_counts(sort=False)
m10_p1_g1_arm = m10_p1_g1['arm'].value_counts(sort=False)
m10_p1_g2_arm = m10_p1_g2['arm'].value_counts(sort=False)
m10_p2_g1_arm = m10_p2_g1['arm'].value_counts(sort=False)
m10_p2_g2_arm = m10_p2_g2['arm'].value_counts(sort=False)
m11_p1_g1_arm = m11_p1_g1['arm'].value_counts(sort=False)
m11_p1_g2_arm = m11_p1_g2['arm'].value_counts(sort=False)
m11_p2_g1_arm = m11_p2_g1['arm'].value_counts(sort=False)
m11_p2_g2_arm = m11_p2_g2['arm'].value_counts(sort=False)
m13_p1_g1_arm = m13_p1_g1['arm'].value_counts(sort=False)
m13_p1_g2_arm = m13_p1_g2['arm'].value_counts(sort=False)
m13_p2_g1_arm = m13_p2_g1['arm'].value_counts(sort=False)
m13_p2_g2_arm = m13_p2_g2['arm'].value_counts(sort=False)
m14_p1_g1_arm = m14_p1_g1['arm'].value_counts(sort=False)
m14_p1_g2_arm = m14_p1_g2['arm'].value_counts(sort=False)
m14_p2_g1_arm = m14_p2_g1['arm'].value_counts(sort=False)
m14_p2_g2_arm = m14_p2_g2['arm'].value_counts(sort=False)
j01_p1_g1_arm = j01_p1_g1['arm'].value_counts(sort=False)
j01_p1_g2_arm = j01_p1_g2['arm'].value_counts(sort=False)
j01_p2_g1_arm = j01_p2_g1['arm'].value_counts(sort=False)
j01_p2_g2_arm = j01_p2_g2['arm'].value_counts(sort=False)
j02_p1_g1_arm = j02_p1_g1['arm'].value_counts(sort=False)
j02_p1_g2_arm = j02_p1_g2['arm'].value_counts(sort=False)
j02_p2_g1_arm = j02_p2_g1['arm'].value_counts(sort=False)
j02_p2_g2_arm = j02_p2_g2['arm'].value_counts(sort=False)
j03_p1_g1_arm = j03_p1_g1['arm'].value_counts(sort=False)
j03_p1_g2_arm = j03_p1_g2['arm'].value_counts(sort=False)
j03_p2_g1_arm = j03_p2_g1['arm'].value_counts(sort=False)
j03_p2_g2_arm = j03_p2_g2['arm'].value_counts(sort=False)
j04_p1_g1_arm = j04_p1_g1['arm'].value_counts(sort=False)
j04_p1_g2_arm = j04_p1_g2['arm'].value_counts(sort=False)
j04_p2_g1_arm = j04_p2_g1['arm'].value_counts(sort=False)
j04_p2_g2_arm = j04_p2_g2['arm'].value_counts(sort=False)
j05_p1_g1_arm = j05_p1_g1['arm'].value_counts(sort=False)
j05_p1_g2_arm = j05_p1_g2['arm'].value_counts(sort=False)
j05_p2_g1_arm = j05_p2_g1['arm'].value_counts(sort=False)
j05_p2_g2_arm = j05_p2_g2['arm'].value_counts(sort=False)
j06_p1_g1_arm = j06_p1_g1['arm'].value_counts(sort=False)
j06_p1_g2_arm = j06_p1_g2['arm'].value_counts(sort=False)
j06_p2_g1_arm = j06_p2_g1['arm'].value_counts(sort=False)
j06_p2_g2_arm = j06_p2_g2['arm'].value_counts(sort=False)
j07_p1_g1_arm = j07_p1_g1['arm'].value_counts(sort=False)
j07_p1_g2_arm = j07_p1_g2['arm'].value_counts(sort=False)
j07_p2_g1_arm = j07_p2_g1['arm'].value_counts(sort=False)
j07_p2_g2_arm = j07_p2_g2['arm'].value_counts(sort=False)
j08_p1_g1_arm = j08_p1_g1['arm'].value_counts(sort=False)
j08_p1_g2_arm = j08_p1_g2['arm'].value_counts(sort=False)
j08_p2_g1_arm = j08_p2_g1['arm'].value_counts(sort=False)
j08_p2_g2_arm = j08_p2_g2['arm'].value_counts(sort=False)
j10_p1_g1_arm = j10_p1_g1['arm'].value_counts(sort=False)
j10_p1_g2_arm = j10_p1_g2['arm'].value_counts(sort=False)
j10_p2_g1_arm = j10_p2_g1['arm'].value_counts(sort=False)
j10_p2_g2_arm = j10_p2_g2['arm'].value_counts(sort=False)
j11_p1_g1_arm = j11_p1_g1['arm'].value_counts(sort=False)
j11_p1_g2_arm = j11_p1_g2['arm'].value_counts(sort=False)
j11_p2_g1_arm = j11_p2_g1['arm'].value_counts(sort=False)
j11_p2_g2_arm = j11_p2_g2['arm'].value_counts(sort=False)
# +
# In[ ]:
social_arms_g1 = pd.DataFrame([j01_p1_g1_arm, j01_p2_g1_arm, j02_p1_g1_arm, j02_p2_g1_arm, j03_p1_g1_arm,
j03_p2_g1_arm, j04_p1_g1_arm, j04_p2_g1_arm, j05_p1_g1_arm, j05_p2_g1_arm, j06_p1_g1_arm,
j06_p2_g1_arm, j07_p1_g1_arm, j07_p2_g1_arm, j08_p1_g1_arm, j08_p2_g1_arm,
j10_p1_g1_arm, j10_p2_g1_arm, j11_p1_g1_arm, j11_p2_g1_arm, m05_p1_g1_arm, m05_p2_g1_arm, m06_p1_g1_arm, m06_p2_g1_arm, m08_p1_g1_arm, m08_p2_g1_arm,
m10_p1_g1_arm, m10_p2_g1_arm, m11_p1_g1_arm, m11_p2_g1_arm, m13_p1_g1_arm, m13_p2_g1_arm,
m14_p1_g1_arm, m14_p2_g1_arm],
index = ['j01_p1_g1_arm', 'j01_p2_g1_arm', 'j02_p1_g1_arm', 'j02_p2_g1_arm',
'j03_p1_g1_arm', 'j03_p2_g1_arm', 'j04_p1_g1_arm', 'j04_p2_g1_arm', 'j05_p1_g1_arm',
'j05_p2_g1_arm', 'j06_p1_g1_arm', 'j06_p2_g1_arm', 'j07_p1_g1_arm',
'j07_p2_g1_arm', 'j08_p1_g1_arm', 'j08_p2_g1_arm', 'j10_p1_g1_arm', 'j10_p2_g1_arm',
'j11_p1_g1_arm', 'j11_p2_g1_arm', 'm05_p1_g1_arm', 'm05_p2_g1_arm', 'm06_p1_g1_arm', 'm06_p2_g1_arm',
'm08_p1_g1_arm', 'm08_p2_g1_arm', 'm10_p1_g1_arm', 'm10_p2_g1_arm', 'm11_p1_g1_arm',
'm11_p2_g1_arm', 'm13_p1_g1_arm', 'm13_p2_g1_arm', 'm14_p1_g1_arm', 'm14_p2_g1_arm'])
# In[ ]:
#How to prevent NaN: df = df.fillna(0)
social_arms_g1 = social_arms_g1.fillna(0)
#print
social_arms_g1
# In[ ]:
#export to excel
social_arms_g1.to_excel("output10.xlsx")
# In[ ]:
social_arms_g2 = pd.DataFrame([j01_p1_g2_arm, j01_p2_g2_arm, j02_p1_g2_arm, j02_p2_g2_arm, j03_p1_g2_arm,
j03_p2_g2_arm, j04_p1_g2_arm, j04_p2_g2_arm, j05_p1_g2_arm, j05_p2_g2_arm, j06_p1_g2_arm,
j06_p2_g2_arm, j07_p1_g2_arm, j07_p2_g2_arm, j08_p1_g2_arm, j08_p2_g2_arm,
j10_p1_g2_arm, j10_p2_g2_arm, j11_p1_g2_arm, j11_p2_g2_arm, m05_p1_g2_arm, m05_p2_g2_arm, m06_p1_g2_arm, m06_p2_g2_arm, m08_p1_g2_arm, m08_p2_g2_arm,
m10_p1_g2_arm, m10_p2_g2_arm, m11_p1_g2_arm, m11_p2_g2_arm, m13_p1_g2_arm, m13_p2_g2_arm,
m14_p1_g2_arm, m14_p2_g2_arm],
index = ['j01_p1_g2_arm', 'j01_p2_g2_arm', 'j02_p1_g2_arm', 'j02_p2_g2_arm',
'j03_p1_g2_arm', 'j03_p2_g2_arm', 'j04_p1_g2_arm', 'j04_p2_g2_arm', 'j05_p1_g2_arm',
'j05_p2_g2_arm', 'j06_p1_g2_arm', 'j06_p2_g2_arm', 'j07_p1_g2_arm',
'j07_p2_g2_arm', 'j08_p1_g2_arm', 'j08_p2_g2_arm', 'j10_p1_g2_arm', 'j10_p2_g2_arm',
'j11_p1_g2_arm', 'j11_p2_g2_arm', 'm05_p1_g2_arm', 'm05_p2_g2_arm', 'm06_p1_g2_arm', 'm06_p2_g2_arm',
'm08_p1_g2_arm', 'm08_p2_g2_arm', 'm10_p1_g2_arm', 'm10_p2_g2_arm', 'm11_p1_g2_arm',
'm11_p2_g2_arm', 'm13_p1_g2_arm', 'm13_p2_g2_arm', 'm14_p1_g2_arm', 'm14_p2_g2_arm'])
# In[ ]:
#How to prevent NaN: df = df.fillna(0)
social_arms_g2 = social_arms_g2.fillna(0)
#print
social_arms_g2
# In[ ]:
social_arms_g2.to_excel("output11.xlsx")
# -
social_arms_g1
# +
# # Total social counts (Multi & Joint)
# In[ ]:
social_counts_g1 = pd.DataFrame([j01_p1_g1_count, j01_p2_g1_count, j02_p1_g1_count, j02_p2_g1_count, j03_p1_g1_count,
j03_p2_g1_count, j04_p1_g1_count, j04_p2_g1_count, j05_p1_g1_count, j05_p2_g1_count, j06_p1_g1_count,
j06_p2_g1_count, j07_p1_g1_count, j07_p2_g1_count, j08_p1_g1_count, j08_p2_g1_count,
j10_p1_g1_count, j10_p2_g1_count, j11_p1_g1_count, j11_p2_g1_count, m05_p1_g1_count, m05_p2_g1_count, m06_p1_g1_count, m06_p2_g1_count, m08_p1_g1_count, m08_p2_g1_count,
m10_p1_g1_count, m10_p2_g1_count, m11_p1_g1_count, m11_p2_g1_count, m13_p1_g1_count, m13_p2_g1_count,
m14_p1_g1_count, m14_p2_g1_count],
index = ['j01_p1_g1_count', 'j01_p2_g1_count', 'j02_p1_g1_count', 'j02_p2_g1_count',
'j03_p1_g1_count', 'j03_p2_g1_count', 'j04_p1_g1_count', 'j04_p2_g1_count', 'j05_p1_g1_count',
'j05_p2_g1_count', 'j06_p1_g1_count', 'j06_p2_g1_count', 'j07_p1_g1_count',
'j07_p2_g1_count', 'j08_p1_g1_count', 'j08_p2_g1_count', 'j10_p1_g1_count', 'j10_p2_g1_count',
'j11_p1_g1_count', 'j11_p2_g1_count', 'm05_p1_g1_count', 'm05_p2_g1_count', 'm06_p1_g1_count', 'm06_p2_g1_count',
'm08_p1_g1_count', 'm08_p2_g1_count', 'm10_p1_g1_count', 'm10_p2_g1_count', 'm11_p1_g1_count',
'm11_p2_g1_count', 'm13_p1_g1_count', 'm13_p2_g1_count', 'm14_p1_g1_count', 'm14_p2_g1_count'])
# In[ ]:
#How to prevent NaN: df = df.fillna(0)
social_counts_g1=social_counts_g1.fillna(0)
# In[ ]:
social_counts_g1
# In[ ]:
#export to excel
social_counts_g1.to_excel("output3.xlsx")
# In[ ]:
social_counts_g1.sum(axis = 0, skipna = True)
# In[ ]:
social_counts_g1.mean(axis = 0)
# In[ ]:
social_counts_g1.sum(axis=1)
# In[ ]:
social_counts_g2 = pd.DataFrame([j01_p1_g2_count, j01_p2_g2_count, j02_p1_g2_count, j02_p2_g2_count, j03_p1_g2_count,
j03_p2_g2_count, j04_p1_g2_count, j04_p2_g2_count, j05_p1_g2_count, j05_p2_g2_count, j06_p1_g2_count,
j06_p2_g2_count, j07_p1_g2_count, j07_p2_g2_count, j08_p1_g2_count, j08_p2_g2_count,
j10_p1_g2_count, j10_p2_g2_count, j11_p1_g2_count, j11_p2_g2_count, m05_p1_g2_count, m05_p2_g2_count, m06_p1_g2_count, m06_p2_g2_count, m08_p1_g2_count, m08_p2_g2_count,
m10_p1_g2_count, m10_p2_g2_count, m11_p1_g2_count, m11_p2_g2_count, m13_p1_g2_count, m13_p2_g2_count,
m14_p1_g2_count, m14_p2_g2_count],
index = ['j01_p1_g2_count', 'j01_p2_g2_count', 'j02_p1_g2_count', 'j02_p2_g2_count',
'j03_p1_g2_count', 'j03_p2_g2_count', 'j04_p1_g2_count', 'j04_p2_g2_count', 'j05_p1_g2_count',
'j05_p2_g2_count', 'j06_p1_g2_count', 'j06_p2_g2_count', 'j07_p1_g2_count',
'j07_p2_g2_count', 'j08_p1_g2_count', 'j08_p2_g2_count', 'j10_p1_g2_count', 'j10_p2_g2_count',
'j11_p1_g2_count', 'j11_p2_g2_count', 'm05_p1_g2_count', 'm05_p2_g2_count', 'm06_p1_g2_count', 'm06_p2_g2_count',
'm08_p1_g2_count', 'm08_p2_g2_count', 'm10_p1_g2_count', 'm10_p2_g2_count', 'm11_p1_g2_count',
'm11_p2_g2_count', 'm13_p1_g2_count', 'm13_p2_g2_count', 'm14_p1_g2_count', 'm14_p2_g2_count'])
# In[ ]:
#How to prevent NaN: df = df.fillna(0)
social_counts_g2=social_counts_g2.fillna(0)
social_counts_g2
# In[ ]:
#export to excel
social_counts_g2.to_excel("output4.xlsx")
# In[ ]:
social_counts_g2.sum(axis=1)
# -
social_counts_g1
#'''Here we combine all of the hole counts identifying them by probability (all games, confed and multiplayer, SMAB 3 and 4)'''
scores = control_scores.append(social_scores)
# +
# # Maximizing/Matching SOCIAL
# In[ ]:
from scipy import stats
# In[ ]:
#Optional: Idealized probability distributions to use for comparison. These assume a social strategy in which the probability
#distributions of both partners in a joint session are averaged (all holes reward at a 60% rate)
#social_max = [0, 0, 100]
#social_match = [30/180 * 100,60/180 * 100, 90/180 * 100]
# In[ ]:
#Calls jsd with the social distributions.
#def call_jsd_social(counts):
# maxi = jsd(counts,social_max)
# matchi = jsd(counts,social_match)
#return maxi,matchi
# In[ ]:
j01_p1_g1_max,j01_p1_g1_match,j01_p1_g1_rand,j01_p1_g1_soc = call_jsd(j01_p1_g1_count)
j01_p1_g2_max,j01_p1_g2_match,j01_p1_g2_rand,j01_p1_g2_soc = call_jsd(j01_p1_g2_count)
j01_p2_g1_max,j01_p2_g1_match,j01_p2_g1_rand,j01_p2_g1_soc = call_jsd(j01_p2_g1_count)
j01_p2_g2_max,j01_p2_g2_match,j01_p2_g2_rand,j01_p2_g2_soc = call_jsd(j01_p2_g2_count)
j02_p1_g1_max,j02_p1_g1_match,j02_p1_g1_rand,j02_p1_g1_soc = call_jsd(j02_p1_g1_count)
j02_p1_g2_max,j02_p1_g2_match,j02_p1_g2_rand,j02_p1_g2_soc = call_jsd(j02_p1_g2_count)
j02_p2_g1_max,j02_p2_g1_match,j02_p2_g1_rand,j02_p2_g1_soc = call_jsd(j02_p2_g1_count)
j02_p2_g2_max,j02_p2_g2_match,j02_p2_g2_rand,j02_p2_g2_soc = call_jsd(j02_p2_g2_count)
j03_p1_g1_max,j03_p1_g1_match,j03_p1_g1_rand,j03_p1_g1_soc = call_jsd(j03_p1_g1_count)
j03_p1_g2_max,j03_p1_g2_match,j03_p1_g2_rand,j03_p1_g2_soc = call_jsd(j03_p1_g2_count)
j03_p2_g1_max,j03_p2_g1_match,j03_p2_g1_rand,j03_p2_g1_soc = call_jsd(j03_p2_g1_count)
j03_p2_g2_max,j03_p2_g2_match,j03_p2_g2_rand,j03_p2_g2_soc = call_jsd(j03_p2_g2_count)
j04_p1_g1_max,j04_p1_g1_match,j04_p1_g1_rand,j04_p1_g1_soc = call_jsd(j04_p1_g1_count)
j04_p1_g2_max,j04_p1_g2_match,j04_p1_g2_rand,j04_p1_g2_soc = call_jsd(j04_p1_g2_count)
j04_p2_g1_max,j04_p2_g1_match,j04_p2_g1_rand,j04_p2_g1_soc = call_jsd(j04_p2_g1_count)
j04_p2_g2_max,j04_p2_g2_match,j04_p2_g2_rand,j04_p2_g2_soc = call_jsd(j04_p2_g2_count)
j05_p1_g1_max,j05_p1_g1_match,j05_p1_g1_rand,j05_p1_g1_soc = call_jsd(j05_p1_g1_count)
j05_p1_g2_max,j05_p1_g2_match,j05_p1_g2_rand,j05_p1_g2_soc = call_jsd(j05_p1_g2_count)
j05_p2_g1_max,j05_p2_g1_match,j05_p2_g1_rand,j05_p2_g1_soc = call_jsd(j05_p2_g1_count)
j05_p2_g2_max,j05_p2_g2_match,j05_p2_g2_rand,j05_p2_g2_soc = call_jsd(j05_p2_g2_count)
j06_p1_g1_max,j06_p1_g1_match,j06_p1_g1_rand,j06_p1_g1_soc = call_jsd(j06_p1_g1_count)
j06_p1_g2_max,j06_p1_g2_match,j06_p1_g2_rand,j06_p1_g2_soc = call_jsd(j06_p1_g2_count)
j06_p2_g1_max,j06_p2_g1_match,j06_p2_g1_rand,j06_p2_g1_soc = call_jsd(j06_p2_g1_count)
j06_p2_g2_max,j06_p2_g2_match,j06_p2_g2_rand,j06_p2_g2_soc = call_jsd(j06_p2_g2_count)
j07_p1_g1_max,j07_p1_g1_match,j07_p1_g1_rand,j07_p1_g1_soc = call_jsd(j07_p1_g1_count)
j07_p1_g2_max,j07_p1_g2_match,j07_p1_g2_rand,j07_p1_g2_soc = call_jsd(j07_p1_g2_count)
j07_p2_g1_max,j07_p2_g1_match,j07_p2_g1_rand,j07_p2_g1_soc = call_jsd(j07_p2_g1_count)
j07_p2_g2_max,j07_p2_g2_match,j07_p2_g2_rand,j07_p2_g2_soc = call_jsd(j07_p2_g2_count)
j08_p1_g1_max,j08_p1_g1_match,j08_p1_g1_rand,j08_p1_g1_soc = call_jsd(j08_p1_g1_count)
j08_p1_g2_max,j08_p1_g2_match,j08_p1_g2_rand,j08_p1_g2_soc = call_jsd(j08_p1_g2_count)
j08_p2_g1_max,j08_p2_g1_match,j08_p2_g1_rand,j08_p2_g1_soc = call_jsd(j08_p2_g1_count)
j08_p2_g2_max,j08_p2_g2_match,j08_p2_g2_rand,j08_p2_g2_soc = call_jsd(j08_p2_g2_count)
j10_p1_g1_max,j10_p1_g1_match,j10_p1_g1_rand,j10_p1_g1_soc = call_jsd(j10_p1_g1_count)
j10_p1_g2_max,j10_p1_g2_match,j10_p1_g2_rand,j10_p1_g2_soc = call_jsd(j10_p1_g2_count)
j10_p2_g1_max,j10_p2_g1_match,j10_p2_g1_rand,j10_p2_g1_soc = call_jsd(j10_p2_g1_count)
j10_p2_g2_max,j10_p2_g2_match,j10_p2_g2_rand,j10_p2_g2_soc = call_jsd(j10_p2_g2_count)
j11_p1_g1_max,j11_p1_g1_match,j11_p1_g1_rand,j11_p1_g1_soc = call_jsd([0,40,60])
j11_p1_g2_max,j11_p1_g2_match,j11_p1_g2_rand,j11_p1_g2_soc = call_jsd([0,22,78])
j11_p2_g1_max,j11_p2_g1_match,j11_p2_g1_rand,j11_p2_g1_soc = call_jsd(j11_p2_g1_count)
j11_p2_g2_max,j11_p2_g2_match,j11_p2_g2_rand,j11_p2_g2_soc = call_jsd([0,1,99])
m05_p1_g1_max,m05_p1_g1_match,m05_p1_g1_rand,m05_p1_g1_soc = call_jsd(m05_p1_g1_count)
m05_p1_g2_max,m05_p1_g2_match,m05_p1_g2_rand,m05_p1_g2_soc = call_jsd(m05_p1_g2_count)
m05_p2_g1_max,m05_p2_g1_match,m05_p2_g1_rand,m05_p2_g1_soc = call_jsd(m05_p2_g1_count)
m05_p2_g2_max,m05_p2_g2_match,m05_p2_g2_rand,m05_p2_g2_soc = call_jsd(m05_p2_g2_count)
m06_p1_g1_max,m06_p1_g1_match,m06_p1_g1_rand,m06_p1_g1_soc = call_jsd(m06_p1_g1_count)
m06_p1_g2_max,m06_p1_g2_match,m06_p1_g2_rand,m06_p1_g2_soc = call_jsd(m06_p1_g2_count)
m06_p2_g1_max,m06_p2_g1_match,m06_p2_g1_rand,m06_p2_g1_soc = call_jsd(m06_p2_g1_count)
m06_p2_g2_max,m06_p2_g2_match,m06_p2_g2_rand,m06_p2_g2_soc = call_jsd(m06_p2_g2_count)
m08_p1_g1_max,m08_p1_g1_match,m08_p1_g1_rand,m08_p1_g1_soc = call_jsd(m08_p1_g1_count)
m08_p1_g2_max,m08_p1_g2_match,m08_p1_g2_rand,m08_p1_g2_soc = call_jsd(m08_p1_g2_count)
m08_p2_g1_max,m08_p2_g1_match,m08_p2_g1_rand,m08_p2_g1_soc = call_jsd(m08_p2_g1_count)
m08_p2_g2_max,m08_p2_g2_match,m08_p2_g2_rand,m08_p2_g2_soc = call_jsd(m08_p2_g2_count)
m10_p1_g1_max,m10_p1_g1_match,m10_p1_g1_rand,m10_p1_g1_soc = call_jsd(m10_p1_g1_count)
m10_p1_g2_max,m10_p1_g2_match,m10_p1_g2_rand,m10_p1_g2_soc = call_jsd(m10_p1_g2_count)
m10_p2_g1_max,m10_p2_g1_match,m10_p2_g1_rand,m10_p2_g1_soc = call_jsd(m10_p2_g1_count)
m10_p2_g2_max,m10_p2_g2_match,m10_p2_g2_rand,m10_p2_g2_soc = call_jsd(m10_p2_g2_count)
m11_p1_g1_max,m11_p1_g1_match,m11_p1_g1_rand,m11_p1_g1_soc = call_jsd(m11_p1_g1_count)
m11_p1_g2_max,m11_p1_g2_match,m11_p1_g2_rand,m11_p1_g2_soc = call_jsd([0,1,99])
m11_p2_g1_max,m11_p2_g1_match,m11_p2_g1_rand,m11_p2_g1_soc = call_jsd(m11_p2_g1_count)
m11_p2_g2_max,m11_p2_g2_match,m11_p2_g2_rand,m11_p2_g2_soc = call_jsd([0,0,100])
m13_p1_g1_max,m13_p1_g1_match,m13_p1_g1_rand,m13_p1_g1_soc = call_jsd(m13_p1_g1_count)
m13_p1_g2_max,m13_p1_g2_match,m13_p1_g2_rand,m13_p1_g2_soc = call_jsd(m13_p1_g2_count)
m13_p2_g1_max,m13_p2_g1_match,m13_p2_g1_rand,m13_p2_g1_soc = call_jsd(m13_p2_g1_count)
m13_p2_g2_max,m13_p2_g2_match,m13_p2_g2_rand,m13_p2_g2_soc = call_jsd(m13_p2_g2_count)
m14_p1_g1_max,m14_p1_g1_match,m14_p1_g1_rand,m14_p1_g1_soc = call_jsd(m14_p1_g1_count)
m14_p1_g2_max,m14_p1_g2_match,m14_p1_g2_rand,m14_p1_g2_soc = call_jsd(m14_p1_g2_count)
m14_p2_g1_max,m14_p2_g1_match,m14_p2_g1_rand,m14_p2_g1_soc = call_jsd(m14_p2_g1_count)
m14_p2_g2_max,m14_p2_g2_match,m14_p2_g2_rand,m14_p2_g2_soc = call_jsd(m14_p2_g2_count)
# +
#Create a dataframe for single player game 1 jsd scores, with a column for each strategy.'''
social_jsd_g1 = pd.DataFrame({'max':[j01_p1_g1_max,j01_p2_g1_max,j02_p1_g1_max,j02_p2_g1_max,j03_p1_g1_max,j03_p2_g1_max,
j04_p1_g1_max,j04_p2_g1_max,j05_p1_g1_max,j05_p2_g1_max,j06_p1_g1_max,j06_p2_g1_max,
j07_p1_g1_max,j07_p2_g1_max,j08_p1_g1_max,j08_p2_g1_max, j10_p1_g1_max, j10_p2_g1_max,
j11_p1_g1_max, j11_p2_g1_max, m05_p1_g1_max, m05_p2_g1_max, m06_p1_g1_max, m06_p2_g1_max,
m08_p1_g1_max, m08_p2_g1_max, m10_p1_g1_max, m10_p2_g1_max, m11_p1_g1_max, m11_p2_g1_max,
m13_p1_g1_max, m13_p2_g1_max, m14_p1_g1_max, m14_p2_g1_max],
'match':[j01_p1_g1_match,j01_p2_g1_match,j02_p1_g1_match, j02_p2_g1_match,j03_p1_g1_match,j03_p2_g1_match,
j04_p1_g1_match,j04_p2_g1_match,j05_p1_g1_match,
j05_p2_g1_match,j06_p1_g1_match,j06_p2_g1_match,
j07_p1_g1_match,j07_p2_g1_match,j08_p1_g1_match,j08_p2_g1_match, j10_p1_g1_match,
j10_p2_g1_match, j11_p1_g1_match, j11_p2_g1_match, m05_p1_g1_match, m05_p2_g1_match,
m06_p1_g1_match, m06_p2_g1_match, m08_p1_g1_match, m08_p2_g1_match, m10_p1_g1_match,
m10_p2_g1_match, m11_p1_g1_match, m11_p2_g1_match, m13_p1_g1_match, m13_p2_g1_match,
m14_p1_g1_match, m14_p2_g1_match],
'rand':[j01_p1_g1_rand,j01_p2_g1_rand,j02_p1_g1_rand,j02_p2_g1_rand,j03_p1_g1_rand,j03_p2_g1_rand,
j04_p1_g1_rand,j04_p2_g1_rand,j05_p1_g1_rand,j05_p2_g1_rand,j06_p1_g1_rand,j06_p2_g1_rand,
j07_p1_g1_rand,j07_p2_g1_rand,j08_p1_g1_rand,j08_p2_g1_rand, j10_p1_g1_rand, j10_p2_g1_rand,
j11_p1_g1_rand, j11_p2_g1_rand, m05_p1_g1_rand, m05_p2_g1_rand, m06_p1_g1_rand, m06_p2_g1_rand,
m08_p1_g1_rand, m08_p2_g1_rand, m10_p1_g1_rand, m10_p2_g1_rand, m11_p1_g1_rand, m11_p2_g1_rand,
m13_p1_g1_rand, m13_p2_g1_rand, m14_p1_g1_rand, m14_p2_g1_rand],
'soc':[j01_p1_g1_soc,j01_p2_g1_soc,j02_p1_g1_soc, j02_p2_g1_soc,j03_p1_g1_soc,j03_p2_g1_soc,
j04_p1_g1_soc,j04_p2_g1_soc,j05_p1_g1_soc,
j05_p2_g1_soc,j06_p1_g1_soc,j06_p2_g1_soc,
j07_p1_g1_soc,j07_p2_g1_soc,j08_p1_g1_soc,j08_p2_g1_soc, j10_p1_g1_soc,
j10_p2_g1_soc, j11_p1_g1_soc, j11_p2_g1_soc, m05_p1_g1_soc, m05_p2_g1_soc,
m06_p1_g1_soc, m06_p2_g1_soc, m08_p1_g1_soc, m08_p2_g1_soc, m10_p1_g1_soc,
m10_p2_g1_soc, m11_p1_g1_soc, m11_p2_g1_soc, m13_p1_g1_soc, m13_p2_g1_soc,
m14_p1_g1_soc, m14_p2_g1_soc]},
index=['j01', 'j01', 'j02', 'j02', 'j03', 'j03','j04',
'j04','j05', 'j05', 'j06', 'j06','j07','j07','j08','j08', 'j10', 'j10', 'j11', 'j11','m05','m05','m06', 'm06', 'm08', 'm08', 'm10', 'm10', 'm11', 'm11', 'm13','m13', 'm14', 'm14'])
# In[ ]:
#to print wothout truncation
pd.set_option('display.max_rows', None)
# In[ ]:
social_jsd_g1
# In[ ]:
#export to excel
social_jsd_g1.to_excel("output1.xlsx")
# In[ ]:
# get the column name of min values in every row
strategy_social_jsd_g1 = social_jsd_g1.idxmin(axis=1)
print("min values of row are at following columns :")
print(strategy_social_jsd_g1)
# In[ ]:
strategy_social_jsd_g1.value_counts()
# In[ ]:
#social_types_g1 = check_type(social_jsd_g1)
# In[ ]:
#social_types_g1.drop([0])
# In[ ]:
#(social_types_g1 == 'maxi').sum()
# +
#Create a dataframe for single player game 1 jsd scores, with a column for each strategy.'''
social_jsd_g2 = pd.DataFrame({'max':[j01_p1_g2_max,j01_p2_g2_max,j02_p1_g2_max,j02_p2_g2_max,j03_p1_g2_max,j03_p2_g2_max,
j04_p1_g2_max,j04_p2_g2_max,j05_p1_g2_max,j05_p2_g2_max,j06_p1_g2_max,j06_p2_g2_max,
j07_p1_g2_max,j07_p2_g2_max,j08_p1_g2_max,j08_p2_g2_max, j10_p1_g2_max, j10_p2_g2_max,
j11_p1_g2_max, j11_p2_g2_max, m05_p1_g2_max, m05_p2_g2_max, m06_p1_g2_max, m06_p2_g2_max,
m08_p1_g2_max, m08_p2_g2_max, m10_p1_g2_max, m10_p2_g2_max, m11_p1_g2_max, m11_p2_g2_max,
m13_p1_g2_max, m13_p2_g2_max, m14_p1_g2_max, m14_p2_g2_max],
'match':[j01_p1_g2_match,j01_p2_g2_match,j02_p1_g2_match, j02_p2_g2_match,j03_p1_g2_match,j03_p2_g2_match,
j04_p1_g2_match,j04_p2_g2_match,j05_p1_g2_match,
j05_p2_g2_match,j06_p1_g2_match,j06_p2_g2_match,
j07_p1_g2_match,j07_p2_g2_match,j08_p1_g2_match,j08_p2_g2_match, j10_p1_g2_match,
j10_p2_g2_match, j11_p1_g2_match, j11_p2_g2_match, m05_p1_g2_match, m05_p2_g2_match,
m06_p1_g2_match, m06_p2_g2_match, m08_p1_g2_match, m08_p2_g2_match, m10_p1_g2_match,
m10_p2_g2_match, m11_p1_g2_match, m11_p2_g2_match, m13_p1_g2_match, m13_p2_g2_match,
m14_p1_g2_match, m14_p2_g2_match],
'rand':[j01_p1_g2_rand,j01_p2_g2_rand,j02_p1_g2_rand,j02_p2_g2_rand,j03_p1_g2_rand,j03_p2_g2_rand,
j04_p1_g2_rand,j04_p2_g2_rand,j05_p1_g2_rand,j05_p2_g2_rand,j06_p1_g2_rand,j06_p2_g2_rand,
j07_p1_g2_rand,j07_p2_g2_rand,j08_p1_g2_rand,j08_p2_g2_rand, j10_p1_g2_rand, j10_p2_g2_rand,
j11_p1_g2_rand, j11_p2_g2_rand, m05_p1_g2_rand, m05_p2_g2_rand, m06_p1_g2_rand, m06_p2_g2_rand,
m08_p1_g2_rand, m08_p2_g2_rand, m10_p1_g2_rand, m10_p2_g2_rand, m11_p1_g2_rand, m11_p2_g2_rand,
m13_p1_g2_rand, m13_p2_g2_rand, m14_p1_g2_rand, m14_p2_g2_rand],
'soc':[j01_p1_g2_soc,j01_p2_g2_soc,j02_p1_g2_soc, j02_p2_g2_soc,j03_p1_g2_soc,j03_p2_g2_soc,
j04_p1_g2_soc,j04_p2_g2_soc,j05_p1_g2_soc,
j05_p2_g2_soc,j06_p1_g2_soc,j06_p2_g2_soc,
j07_p1_g2_soc,j07_p2_g2_soc,j08_p1_g2_soc,j08_p2_g2_soc, j10_p1_g2_soc,
j10_p2_g2_soc, j11_p1_g2_soc, j11_p2_g2_soc, m05_p1_g2_soc, m05_p2_g2_soc,
m06_p1_g2_soc, m06_p2_g2_soc, m08_p1_g2_soc, m08_p2_g2_soc, m10_p1_g2_soc,
m10_p2_g2_soc, m11_p1_g2_soc, m11_p2_g2_soc, m13_p1_g2_soc, m13_p2_g2_soc,
m14_p1_g2_soc, m14_p2_g2_soc]},
index=['j01', 'j01', 'j02', 'j02', 'j03', 'j03','j04',
'j04','j05', 'j05', 'j06', 'j06','j07','j07','j08','j08', 'j10', 'j10', 'j11', 'j11','m05','m05','m06', 'm06', 'm08', 'm08', 'm10', 'm10', 'm11', 'm11', 'm13','m13', 'm14', 'm14'])
# In[ ]:
social_jsd_g2
# In[ ]:
#export to excel
social_jsd_g2.to_excel("output2.xlsx")
# In[ ]:
# get the column name of min values in every row
strategy_social_jsd_g2 = social_jsd_g2.idxmin(axis=1)
print("min values of row are at following columns :")
print(strategy_social_jsd_g2)
# In[ ]:
strategy_social_jsd_g2.value_counts()
# In[ ]:
social_jsd_g1.mean()
# In[ ]:
social_jsd_g2.mean()
# # Social strategy stats
# # Max strategy
# In[ ]:
social_max_g1 = social_jsd_g1.loc[ : , 'max' ]
# In[ ]:
social_max_g1
# In[ ]:
social_max_g2 = social_jsd_g2.loc[ : , 'max' ]
# In[ ]:
social_max_g2
# In[ ]:
social_max_g1_mean = social_max_g1.mean()
# In[ ]:
social_max_g1_mean
# In[ ]:
social_max_g1_std = social_max_g1.std()
# In[ ]:
social_max_g1_std
# In[ ]:
social_max_g2_mean = social_max_g2.mean()
# In[ ]:
social_max_g2_mean
# In[ ]:
social_max_g2_std = social_max_g2.std()
social_max_g2_std
# In[ ]:
social_max_p = sp.stats.ttest_rel(social_max_g1,social_max_g2)
# In[ ]:
social_max_p
# # Match strategy
# In[ ]:
social_match_g1 = social_jsd_g1.loc[ : , 'match' ]
# In[ ]:
social_match_g1
# In[ ]:
social_match_g2 = social_jsd_g2.loc[ : , 'match' ]
# In[ ]:
social_match_g2
# In[ ]:
social_match_g1_mean = social_match_g1.mean()
# In[ ]:
social_match_g1_mean
# In[ ]:
social_match_g1_std = social_match_g1.std()
# In[ ]:
social_match_g1_std
# In[ ]:
social_match_g2_mean = social_match_g2.mean()
# In[ ]:
social_match_g2_mean
# In[ ]:
social_match_g2_std = social_match_g2.std()
# In[ ]:
social_match_g2_std
# In[ ]:
social_match_p = sp.stats.ttest_rel(social_match_g1,social_match_g2)
# In[ ]:
social_match_p
# # Random strategy
# In[ ]:
social_rand_g1 = social_jsd_g1.loc[ : , 'rand' ]
# In[ ]:
social_rand_g1
# In[ ]:
social_rand_g2 = social_jsd_g2.loc[ : , 'rand' ]
# In[ ]:
social_rand_g2
# In[ ]:
social_rand_g1_mean = social_rand_g1.mean()
# In[ ]:
social_rand_g1_mean
# In[ ]:
social_rand_g1_std = social_rand_g1.std()
# In[ ]:
social_rand_g1_std
# In[ ]:
social_rand_g2_mean = social_rand_g2.mean()
# In[ ]:
social_rand_g2_mean
# In[ ]:
social_rand_g2_std = social_rand_g2.std()
# In[ ]:
social_rand_g2_std
# In[ ]:
social_rand_p = sp.stats.ttest_rel(social_rand_g1,social_rand_g2)
# In[ ]:
social_rand_p
# # Social strategy
# In[ ]:
social_soc_g1 = social_jsd_g1.loc[ : , 'soc' ]
# In[ ]:
social_soc_g1
# In[ ]:
social_soc_g2 = social_jsd_g2.loc[ : , 'soc' ]
# In[ ]:
social_soc_g2
# In[ ]:
social_soc_g1_mean = social_soc_g1.mean()
# In[ ]:
social_soc_g1_mean
# In[ ]:
social_soc_g1_std = social_soc_g1.std()
# In[ ]:
social_soc_g1_std
# In[ ]:
social_soc_g2_mean = social_soc_g2.mean()
# In[ ]:
social_soc_g2_mean
# In[ ]:
social_soc_g2_std = social_soc_g2.std()
# In[ ]:
social_soc_g2_std
# In[ ]:
social_soc_p = sp.stats.ttest_rel(social_soc_g1,social_soc_g2)
# In[ ]:
social_soc_p
# In[ ]:
#social_types_g2 = check_type(social_jsd_g2)
# In[ ]:
#social_types_g2.drop([0])
# In[ ]:
#(social_types_g2 == 'maxi').sum()
# In[ ]:
import matplotlib.lines as mlines
# In[ ]:
#colors_jsd = [['lightcoral','red','lightcoral','red','bisque','orange','bisque','orange','palegreen','green','palegreen','green',
#'lightblue','blue','lightblue','blue','mediumslateblue','indigo','mediumslateblue','indigo','lightpink','deeppink',
#'lightpink','deeppink','yellow','gold','yellow','gold','lightgray','gray','lightgray','gray']
# +
# In[ ]:
import itertools
# In[ ]:
#Here I think I tried to create colors for each subject in each condition for use in graphs. I DON'T think this is
#a good way to do this.'''
#colors_jsd_j = ['red','lightcoral','red','darkred','green','red',
# 'blue','darkblue','hotblue','blue','purple','plum','peru','saddlebrown','gray','lightgray']
colors_jsd_social_g1_egocentric = ['red','red','red','red','red','red','red',
'red','red','red', 'red','red','red','red',
'red', 'red','red', 'red','red', 'blue','red','red',
'red','red','red','red', 'red', 'red','blue',
'blue','red','red','red','red']
colors_jsd_social_g2_egocentric = ['red','red','red','red','red','blue','red',
'red','red','red', 'red','red','blue','red',
'blue', 'red','red', 'red','red', 'blue','red','blue',
'red','red','red','blue', 'red', 'red','blue',
'blue','red','red','red','red']
colors_jsd_social_g1_joint = ['green','green','green','green','green','green','green','green'
,'green','green', 'green','green','green','green',
'green','green','green','green','green','green','green','green','green'
,'green','green','green','green', 'green','green',
'green','green','green','green','green']
colors_jsd_social_g2_joint = ['green','green','green','green','green','green','green','green'
,'green','green', 'green','green','green','green',
'green','green','green','green','green','green','green','green','green'
,'green','green','green','green', 'green','green',
'green','green','green','green','green']
#markers_jsd = ['o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v'
# 'o','v','o','v','o','v','o','v']
colors
# In[ ]:
#Here I print the means of the JSD values in each condition, just to see what they are.'''
# In[ ]:
#run t test to see sig between max and match per game
#put in box plot
social_jsd_g1.mean()
# In[ ]:
social_jsd_g2.mean()
# In[ ]:
import matplotlib.patches as mpatches
# # Graphs
# In[ ]:
#Here I try to plot the JSD Values. Continued below under 'JSD with gender.
# In[ ]:
#under line = matching (closer to 0)
#plots of actual jsd values
m, b = np.polyfit(social_jsd_g1['max'], social_jsd_g1['match'], 1)
social_ego_jsd_g1_plot = social_jsd_g1.plot.scatter(x='max',y='match',s=50,color=colors_jsd_social_g1_egocentric)
plt.title('Social Egocentric Strategies Game 1', fontsize=18, y=1.05)
social_ego_jsd_g1_plot.plot(social_jsd_g1['max'], m*social_jsd_g1['max'] + b, linewidth=2)
plt.xticks(np.arange(0, 1, step=0.1))
plt.yticks(np.arange(0, 1, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
#plt.xlabel('Match')
#plt.ylabel('Maximize')
plt.ylim(-.1, .5)
plt.xlim(-.1, .5)
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
#add legend (NEEDS TO BE FIXED)
from numpy.random import randn
z = randn(100)
#blue_dot, = plt.plot(z, "bo", markersize=10)
#red_dot, = plt.plot(z, "ro", markersize=10)
#plt.legend([blue_dot, red_dot], ["Maximize", "Match"])
line1 = mlines.Line2D([-1, .5],[-1, .5], color='black', linewidth=1)
social_ego_jsd_g1_plot.add_line(line1)
#s_jsd_g2_plot.add_line(line2)
plt.savefig('social_ego_jsd_1.eps',format='eps',bbox_inches='tight',dpi=1000)
plt.show()
# In[ ]:
#under line = matching (closer to 0)
#plots of atual jsd values
social_joint_jsd_g1_plot = social_jsd_g1.plot.scatter(x='rand',y='soc',s=50,color=colors_jsd_social_g1_joint,title='Multiplayer Social Strategies Game 1')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
plt.ylim(-.1, .5)
plt.xlim(-.1, .5)
#make axes equal length
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
#add legend FIX
#from numpy.random import randn
#z = randn(100)
#blue_dot, = plt.plot(z, "bo", markersize=10)
#red_dot, = plt.plot(z, "ro", markersize=10)
#plt.legend([blue_dot, red_dot], ["Maximize", "Match"])
#add line
line1 = mlines.Line2D([-1, .5],[-1, .5], color='black')
social_joint_jsd_g1_plot.add_line(line1)
plt.savefig('social_joint_jsd_1.eps',format='eps',dpi=1000)
plt.show()
# save as jpeg
social_joint_jsd_g1_plot.figure.savefig(plot_file_name,
format='jpeg',
dpi=100)
# In[ ]:
#under line = matching (closer to 0)
#plots of actual jsd values
m, b = np.polyfit(social_jsd_g2['max'], social_jsd_g2['match'], 1)
social_ego_jsd_g2_plot = social_jsd_g2.plot.scatter(x='max',y='match',s=50,color=colors_jsd_social_g2_egocentric)
plt.title('Social Egocentric Strategies Game 2', fontsize=18, y=1.05)
social_ego_jsd_g2_plot.plot(social_jsd_g2['max'], m*social_jsd_g2['max'] + b, linewidth=2)
plt.xticks(np.arange(0, 1, step=0.1))
plt.yticks(np.arange(0, 1, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
#plt.xlabel('Match')
#plt.ylabel('Maximize')
plt.ylim(-.1, .5)
plt.xlim(-.1, .5)
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
#add legend (NEEDS TO BE FIXED)
from numpy.random import randn
z = randn(100)
#blue_dot, = plt.plot(z, "bo", markersize=10)
#red_dot, = plt.plot(z, "ro", markersize=10)
#plt.legend([blue_dot, red_dot], ["Maximize", "Match"])
line1 = mlines.Line2D([-1, .5],[-1, .5], color='black', linewidth=1)
social_ego_jsd_g2_plot.add_line(line1)
#s_jsd_g2_plot.add_line(line2)
plt.savefig('social_ego_jsd_2.eps',format='eps',bbox_inches='tight',dpi=1000)
plt.show()
# In[ ]:
#under line = matching (closer to 0)
#plots of atual jsd values
social_joint_jsd_g2_plot = social_jsd_g2.plot.scatter(x='rand',y='soc',s=50,color=colors_jsd_social_g2_joint,title='Multiplayer Social Strategies Game 2')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
plt.ylim(-.1, .5)
plt.xlim(-.1, .5)
#make axes equal length
plt.gca().set_aspect('equal', adjustable='box')
plt.draw()
#add legend FIX
#from numpy.random import randn
#z = randn(100)
#blue_dot, = plt.plot(z, "bo", markersize=10)
#red_dot, = plt.plot(z, "ro", markersize=10)
#plt.legend([blue_dot, red_dot], ["Maximize", "Match"])
#add line
line1 = mlines.Line2D([-1, .5],[-1, .5], color='black')
social_joint_jsd_g2_plot.add_line(line1)
plt.savefig('social_joint_jsd_2.eps',format='eps',dpi=1000)
plt.show()
# save as jpeg
social_joint_jsd_g2_plot.figure.savefig(plot_file_name,
format='jpeg',
dpi=100)
# # CONTROL (total) scores in G1 and G2
# In[ ]:
#list out all the scores in game 1 by participant
control_scores_1 = pd.DataFrame({'participant':['s01','s03', 's05',
's06', 's07', 's08', 's09', 's11', 's12', 's15', 's16', 's17', 's19', 's20','c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'], 'control_score_1':
[s01_p1_g1_score, s03_p1_g1_score, s05_p1_g1_score,
s06_p1_g1_score, s07_p1_g1_score, s08_p1_g1_score, s09_p1_g1_score, s11_p1_g1_score, s12_p1_g1_score, s15_p1_g1_score, s16_p1_g1_score, s17_p1_g1_score, s19_p1_g1_score, s20_p1_g1_score, c02_p1_g1_score, c03_p1_g1_score,
c05_p1_g1_score, c06_p1_g1_score, c09_p1_g1_score, c10_p1_g1_score, c11_p1_g1_score, c12_p1_g1_score, c13_p1_g1_score, c14_p1_g1_score, c16_p1_g1_score, c17_p1_g1_score]})
#to prevent an extra column that numbers each row:
control_scores_1.set_index('participant', inplace=True, drop=True)
#print
control_scores_1
# In[ ]:
#list out all the scores in game 2 by participant
control_scores_2 = pd.DataFrame({'participant':['s01', 's03', 's05',
's06', 's07', 's08', 's09', 's11', 's12', 's15', 's16', 's17', 's19', 's20','c02', 'c03',
'c05', 'c06', 'c09', 'c10', 'c11', 'c12', 'c13', 'c14', 'c16', 'c17'], 'control_score_2':
[s01_p1_g2_score, s03_p1_g2_score, s05_p1_g2_score,
s06_p1_g2_score, s07_p1_g2_score, s08_p1_g2_score, s09_p1_g2_score, s11_p1_g2_score, s12_p1_g2_score, s15_p1_g2_score, s16_p1_g2_score, s17_p1_g2_score, s19_p1_g2_score, s20_p1_g2_score,
c02_p1_g2_score, c03_p1_g2_score, c05_p1_g2_score, c06_p1_g2_score, c09_p1_g2_score, c10_p1_g2_score, c11_p1_g2_score, c12_p1_g2_score, c13_p1_g2_score, c14_p1_g2_score, c16_p1_g2_score, c17_p1_g2_score]})
#to prevent an extra column that numbers each row:
control_scores_2.set_index('participant', inplace=True, drop=True)
#s_scores_1 = pd.DataFrame(data = d1)
#s_scores_2 = pd.DataFrame(data = d2)
#print
control_scores_2
# In[ ]:
#merge both games by participant:
control_scores = pd.merge(control_scores_1, control_scores_2, on = 'participant')
# In[ ]:
#label columns
control_scores.columns=['Game 1','Game 2']
# In[ ]:
control_scores
# In[ ]:
control_scores_1_mean = control_scores_1.mean()
# In[ ]:
control_scores_1_mean
# In[ ]:
control_scores_1_std = control_scores_1.std()
# In[ ]:
control_scores_1_std
# In[ ]:
control_scores_2_mean = control_scores_2.mean()
# In[ ]:
control_scores_2_mean
# In[ ]:
control_scores_2_std = control_scores_2.std()
# In[ ]:
control_scores_2_std
# In[ ]:
control_scores_med = control_scores.median()
control_scores_med
# In[ ]:
control_scores_p = sp.stats.ttest_rel(control_scores['Game 1'],control_scores['Game 2'])
# In[ ]:
control_scores_p
# In[ ]:
# # End of SMAB3 analysis
# +
# In[ ]:
#hypothesis: scores lower for same gender pairs: male-male
'''here i compared scores of pairs of mixed gender and same gender pairs. there are a lot of different types of analyses you can
do on the choice data, and you can group the data by demographics, questionnaire responses, etc.'''
# In[ ]:
# In[ ]:
scores_f = pd.DataFrame([j03_p1_g1_score,j03_p1_g2_score,j03_p2_g1_score,j03_p2_g2_score,j04_p1_g1_score,
j04_p1_g2_score,j04_p2_g1_score,j04_p2_g2_score,j07_p1_g1_score,j07_p1_g2_score,j07_p2_g1_score,
j07_p2_g2_score,j08_p1_g1_score,j08_p1_g2_score,j08_p2_g1_score,j08_p2_g2_score])
# In[ ]:
scores_mix = pd.DataFrame([j01_p1_g1_score,j01_p1_g2_score,j01_p2_g1_score,j01_p2_g2_score,j02_p1_g1_score,
j02_p1_g2_score,j02_p2_g1_score,j02_p2_g2_score,j05_p1_g1_score,j05_p1_g2_score,j05_p2_g1_score,
j05_p2_g2_score,j05_p1_g1_score,j05_p1_g2_score,j05_p2_g1_score,j05_p2_g2_score,j06_p1_g1_score,
j06_p1_g2_score,j06_p2_g1_score,j06_p2_g2_score])
# In[ ]:
#m_mean=scores_m.mean()
f_mean=scores_f.mean()
mix_mean=scores_mix.mean()
# In[ ]:
#m_mean
# In[ ]:
f_mean
# In[ ]:
mix_mean
# In[ ]:
j_combo = j_combo.sort_values(by=['participant'])
# In[ ]:
#j_combo_bar = j_combo.plot(kind='bar',color=colors)
# In[ ]:
j_score_diff = pd.DataFrame({'participant':['j01_p1','j01_p2','j02_p1','j02_p2','j03_p1','j03_p2','j04_p1','j04_p2',
'j05_p1','j05_p2','j06_p1','j06_p2','j07_p1','j07_p2','j06_p1','j06_p2'], 'score_diff':
[j01_p1_diff,j01_p2_diff,j02_p1_diff,j02_p2_diff,j03_p1_diff,j03_p2_diff,j04_p1_diff,j04_p2_diff,
j05_p1_diff,j05_p2_diff,j06_p1_diff,j06_p2_diff,j07_p1_diff,j07_p2_diff,j08_p1_diff,j08_p2_diff]})
# In[ ]:
j_score_diff
# In[ ]:
#impairs learning?
# In[ ]:
#pattern within sessions for score_diff?
#5 ppl did worse second time; joint interfered with learning
# In[ ]:
j_score_diff_mean = j_score_diff['score_diff'].mean()
# In[ ]:
j_score_diff_mean
# +
# In[ ]:
j01_p1_g1, j01_p1_g2, j01_p2_g1, j01_p2_g2 = separate_df(j01, 'j')
j02_p1_g1, j02_p1_g2, j02_p2_g1, j02_p2_g2 = separate_df(j02, 'j')
j03_p1_g1, j03_p1_g2, j03_p2_g1, j03_p2_g2 = separate_df(j03, 'j')
j04_p1_g1, j04_p1_g2, j04_p2_g1, j04_p2_g2 = separate_df(j04, 'j')
j05_p1_g1, j05_p1_g2, j05_p2_g1, j05_p2_g2 = separate_df(j05, 'j')
j06_p1_g1, j06_p1_g2, j06_p2_g1, j06_p2_g2 = separate_df(j06, 'j')
j07_p1_g1, j07_p1_g2, j07_p2_g1, j07_p2_g2 = separate_df(j07, 'j')
j08_p1_g1, j08_p1_g2, j08_p2_g1, j08_p2_g2 = separate_df(j08, 'j')
# In[ ]:
#social matching
# In[ ]:
j_g2_counts = pd.DataFrame([j01_p1_g2_count, j01_p2_g2_count, j02_p1_g2_count, j02_p2_g2_count,
j03_p1_g2_count, j03_p2_g2_count, j04_p1_g2_count, j04_p2_g2_count,
j05_p1_g2_count,j05_p2_g2_count, j06_p1_g2_count, j06_p2_g2_count, j07_p1_g2_count,
j07_p2_g2_count, j08_p1_g2_count, j08_p2_g2_count],
index=['j01_p1_g2_count', 'j01_p2_g2_count', 'j02_p1_g2_count', 'j02_p2_g2_count',
'j03_p1_g2_count', 'j03_p2_g2_count', 'j04_p1_g2_count', 'j04_p2_g2_count',
'j05_p1_g2_count','j05_p2_g2_count', 'j06_p1_g2_count', 'j06_p2_g2_count', 'j07_p1_g2_count',
'j07_p2_g2_count', 'j08_p1_g2_count', 'j08_p2_g2_count'])
# In[ ]:
j_g2_counts
# In[ ]:
j_60_p = sp.stats.ttest_rel(j_g1_counts.iloc[:,1],j_g2_counts.iloc[:,1])
# In[ ]:
j_60_p
# In[ ]:
j_30_p = sp.stats.ttest_rel(j_g1_counts.iloc[:,0],j_g2_counts.iloc[:,0])
# In[ ]:
j_30_p
# In[ ]:
j_g2_low = np.mean(j_g2_counts.iloc[:,0])
j_g2_med = np.mean(j_g2_counts.iloc[:,1])
j_g2_high = np.mean(j_g2_counts.iloc[:,2])
j_g2_low_std = np.std(j_g2_counts.iloc[:,0])
j_g2_med_std = np.std(j_g2_counts.iloc[:,1])
j_g2_high_std = np.std(j_g2_counts.iloc[:,2])
# In[ ]:
j_g2_low
# In[ ]:
j_g2_med
# In[ ]:
j_g2_high
# In[ ]:
j_g1_counts_bar = j_g1_counts.plot.bar()
# In[ ]:
j_g2_counts_bar = j_g2_counts.plot.bar()
# In[ ]:
j_g1_counts_box = sns.boxplot(
data=j_g1_counts,
width=0.5,
palette="pastel")
j_g1_counts_box.axes.set_title("Multiplayer Choice Distributions in Game 1",
fontsize=16)
plot_file_name="j_g1_counts_box.jpg"
plt.ylim(0, 100)
j_g1_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
j_g1_counts_box.figure.savefig(plot_file_name,
format='jpeg',
dpi=100)
# In[ ]:
j_g2_counts_box = sns.boxplot(
data=j_g2_counts,
width=0.5,
palette="pastel")
j_g2_counts_box.axes.set_title("Multiplayer Choice Distributions in Game 2",
fontsize=16)
plot_file_name="j_g2_counts_box.jpg"
plt.ylim(0, 100)
j_g2_counts_box.axes.set(xlabel='Arm',ylabel='Frequency')
# save as jpeg
j_g2_counts_box.figure.savefig(plot_file_name,
format='jpeg',
dpi=100)
# In[ ]:
# -
# # Maximizing and Matching Strategies
# +
#Here we compare the choice distributions to idealized models of two decision-making strategies:
#matching and maximizing. Matching would be choosing each each hole proportionally to its reward rate,
#and maximizing would be always choosing the high reward hole.
# In[ ]:
s_g1_counts
# In[ ]:
s_g1_counts
# In[ ]:
j_g1_counts
# In[ ]:
j_g2_counts
# In[ ]:
#JSD: Jensen Shannon Divergence. Measures the similarity between two probability distributions. Taken from
#https://urldefense.com/v3/__https://gist.github.com/zhiyzuo/f80e2b1cfb493a5711330d271a228a3d__;!!Mih3wA!SwZbl3yG75UWaB_c9Pq_T5wxVHgFZMbUZ5HHf5pZDf119g1JHaZr-dX94Xg7JRI$ . *Note: probability distributions need to be
#normalized.'''
#parameters: p and q are the two probability distributions to be compared.
def jsd(p, q, base=np.e):
#Implementation of pairwise `jsd` based on https://urldefense.com/v3/__https://en.wikipedia.org/wiki/Jensen**BShannon_divergence__;4oCT!!Mih3wA!SwZbl3yG75UWaB_c9Pq_T5wxVHgFZMbUZ5HHf5pZDf119g1JHaZr-dX9SSBIvMc$
# convert to np.array
p, q = np.asarray(p), np.asarray(q)
# normalize p, q to probabilities
p, q = p/p.sum(), q/q.sum()
m = 1./2*(p + q)
return sp.stats.entropy(p,m, base=base)/2. + sp.stats.entropy(q, m, base=base)/2.
# In[ ]:
#Are ppl using a strategy similar to their partner's? This functions calls jsd for p1 and for p2. *UNFINISHED*
#DO NOT USE.
#calls jsd for p1 and p2 and for p1 and p2 with the mean
#def compare_strat(p1,p2):
#strat = jsd(p1,p2)
# comp_1 = jsd(p1,[j_g1_low,j_g1_med,j_g1_high])
#comp_2 = jsd(p2,[j_g1_low,j_g1_med,j_g1_high])
#return strat, comp_1, comp_2
# In[ ]:
#Idealized probability distributions to use for comparison. You can change these or add more models.
maximize = [0,0,100]
match = [30/180 * 100,60/180 * 100,50]
# In[ ]:
#This function calls jsd.
#Input: counts- a player's choice distributions in a particular game (ex. s01_p1_g1_counts)
#Output: maxi- JSD score for the maximizing strategy
#matchi- JSD score for the matching strategy
def call_jsd(counts):
maxi = jsd(counts,maximize)
matchi = jsd(counts,match)
return maxi,matchi
# In[ ]:
j01_p1_g1_count
# In[ ]:
#Optional: Idealized probability distributions to use for comparison. These assume a social strategy in which the probability
#distributions of both partners in a joint session are averaged (all holes reward at a 60% rate.
social_max = [0, 100, 0]
social_match = [30/180 * 100,30/180 * 100, 30/180 * 100]
# In[ ]:
#Calls jsd with the social distributions.
def call_jsd_social(counts):
maxi = jsd(counts,social_max)
matchi = jsd(counts,social_match)
return maxi,matchi
# In[ ]:
s01_p1_g1_count
# In[ ]:
from scipy import stats
# In[ ]:
#if same, 0
test = jsd(maximize,match)
# In[ ]:
test
# In[ ]:
s01 = 'match'
# In[ ]:
s05_p1_g2_count
# In[ ]:
s05_p1_g2_count = [11,0,89]
# In[ ]:
# some add to 80, some add to 81??
s_g1_counts
# In[ ]:
s_g2_counts
# +
# In[ ]:
#Call 'call_jsd' for each participant for each game in the single player sessions.
#with 0, put in actual probabilities in brackets
s01_g1_max,s01_g1_match = call_jsd(s01_p1_g1_count)
s01_g2_max,s01_g2_match = call_jsd(s01_p1_g2_count)
s03_g1_max,s03_g1_match = call_jsd(s03_p1_g1_count)
s03_g2_max,s03_g2_match = call_jsd(s03_p1_g2_count)
s05_g1_max,s05_g1_match = call_jsd(s05_p1_g1_count)
s05_g2_max,s05_g2_match = call_jsd(s05_p1_g2_count)
s06_g1_max,s06_g1_match = call_jsd(s06_p1_g1_count)
s06_g2_max,s06_g2_match = call_jsd(s06_p1_g1_count)
s07_g1_max,s07_g1_match = call_jsd(s07_p1_g1_count)
s07_g2_max,s07_g2_match = call_jsd(s07_p1_g2_count)
s08_g1_max,s08_g1_match = call_jsd(s08_p1_g1_count)
s08_g2_max,s08_g2_match = call_jsd(s08_p1_g2_count)
s09_g1_max,s09_g1_match = call_jsd(s09_p1_g1_count)
s09_g2_max,s09_g2_match = call_jsd(s09_p1_g2_count)
s11_g1_max,s11_g1_match = call_jsd(s11_p1_g1_count)
s11_g2_max,s11_g2_match = call_jsd(s11_p1_g2_count)
s12_g1_max,s12_g1_match = call_jsd(s12_p1_g1_count)
s12_g2_max,s12_g2_match = call_jsd(s12_p1_g2_count)
s15_g1_max,s15_g1_match = call_jsd(s15_p1_g1_count)
s15_g2_max,s15_g2_match = call_jsd(s15_p1_g2_count)
s16_g1_max,s16_g1_match = call_jsd(s16_p1_g1_count)
s16_g2_max,s16_g2_match = call_jsd(s16_p1_g2_count)
s17_g1_max,s17_g1_match = call_jsd([0.0, 3.0, 78.0])
s17_g2_max,s17_g2_match = call_jsd([0, 19, 62])
s19_g1_max,s19_g1_match = call_jsd(s19_p1_g1_count)
s19_g2_max,s19_g2_match = call_jsd(s19_p1_g2_count)
s20_g1_max,s20_g1_match = call_jsd(s20_p1_g1_count)
s20_g2_max,s20_g2_match = call_jsd(s20_p1_g2_count)
# In[ ]:
s17_p1_g1_count
# In[ ]:
s17_p1_g2_count
# In[ ]:
#Create a dataframe for single player game 1 jsd scores, with a column for each strategy.'''
s_jsd_g1 = pd.DataFrame({'max':[s01_g1_max,s03_g1_max,s05_g1_max,s06_g1_max,s07_g1_max,s08_g1_max,
s09_g1_max, s11_g1_max, s12_g1_max, s15_g1_max, s16_g1_max, s17_g1_max, s19_g1_max, s20_g1_max],
'match':[s01_g1_match,s03_g1_match,s05_g1_match
,s06_g1_match,s07_g1_match,s08_g1_match,s09_g1_match, s11_g1_match, s12_g1_match, s15_g1_match, s16_g1_match, s17_g1_match, s19_g1_match, s20_g1_match]},
index=['s01', 's03','s05','s06','s07','s08','s09', 's11', 's12', 's15', 's16', 's17', 's19', 's20'])
# In[ ]:
#closer to 0 = closer to that pure strategy
s_jsd_g1
# In[ ]:
#Create a dataframe for single player game 2 jsd scores, with a column for each strategy.'''
s_jsd_g2 = pd.DataFrame({'max':[s01_g2_max,s03_g2_max,s05_g2_max,s06_g2_max,s07_g2_max,s08_g2_max,
s09_g2_max, s11_g2_max, s12_g2_max, s15_g2_max, s16_g2_max, s17_g2_max, s19_g2_max, s20_g2_max],
'match':[s01_g2_match,s03_g2_match,s05_g2_match
,s06_g2_match,s07_g2_match,s08_g2_match,s09_g2_match, s11_g2_match, s12_g2_match, s15_g2_match, s16_g2_match, s17_g2_match, s19_g2_match, s20_g2_match]},
index=['s01', 's03','s05','s06','s07','s08','s09', 's11', 's12', 's15', 's16', 's17', 's19', 's20'])
# +
# In[ ]:
def check_type(df,*social):
types = pd.DataFrame(index=['type'])
for index,row in df.iterrows():
maxi = row["max"]
matchi = row["match"]
print(index,maxi,matchi)
if maxi < matchi:
print('maxi')
types = types.append({'type' : 'maxi'},ignore_index=True)
#types[index,'type'] = 'max'
num = maxi
elif matchi < maxi:
print('matchi')
types = types.append({'type' : 'matchi'},ignore_index=True)
num = matchi
else:
types = types.append({'type' : 'neither'},ignore_index=True)
num = 1
if social == True:
if num >= jsd(social_max,social_match):
types = types.append({'type' : 'neither'},ignore_index=True)
else:
if num >= jsd(maximize,match):
types = types.append({'type' : 'neither'},ignore_index=True)
return types
# In[ ]:
s_types_g1 = check_type(s_jsd_g1)
# In[ ]:
s_types_g2 = check_type(s_jsd_g2)
# In[ ]:
#add label, make sure in correct order
s_types_g1.drop([0])
# In[ ]:
s_types_g2.drop([0])
# In[ ]:
#how different max and match dists are from eachother
#neither max or match(?)
threshold = jsd(maximize,match)
# In[ ]:
threshold
# In[ ]:
s_jsd_g1
# In[ ]:
s_jsd_g2
# +
# In[ ]:
#Call 'call_jsd' for each participant for each game in the joint sessions.'''
j01_p1_g1_max,j01_p1_g1_match = call_jsd(j01_p1_g1_count)
j01_p1_g2_max,j01_p1_g2_match = call_jsd(j01_p1_g2_count)
j01_p2_g1_max,j01_p2_g1_match = call_jsd(j01_p2_g1_count)
j01_p2_g2_max,j01_p2_g2_match = call_jsd(j01_p2_g2_count)
j02_p1_g1_max,j02_p1_g1_match = call_jsd(j02_p1_g1_count)
j02_p1_g2_max,j02_p1_g2_match = call_jsd(j02_p1_g2_count)
j02_p2_g1_max,j02_p2_g1_match = call_jsd(j02_p2_g1_count)
j02_p2_g2_max,j02_p2_g2_match = call_jsd(j02_p2_g2_count)
j03_p1_g1_max,j03_p1_g1_match = call_jsd(j03_p1_g1_count)
j03_p1_g2_max,j03_p1_g2_match = call_jsd(j03_p1_g2_count)
j03_p2_g1_max,j03_p2_g1_match = call_jsd(j03_p2_g1_count)
j03_p2_g2_max,j03_p2_g2_match = call_jsd(j03_p2_g2_count)
j04_p1_g1_max,j04_p1_g1_match = call_jsd(j04_p1_g1_count)
j04_p1_g2_max,j04_p1_g2_match = call_jsd(j04_p1_g2_count)
j04_p2_g1_max,j04_p2_g1_match = call_jsd(j04_p2_g1_count)
j04_p2_g2_max,j04_p2_g2_match = call_jsd(j04_p2_g2_count)
j05_p1_g1_max,j05_p1_g1_match = call_jsd(j05_p1_g1_count)
j05_p1_g2_max,j05_p1_g2_match = call_jsd(j05_p1_g2_count)
j05_p2_g1_max,j05_p2_g1_match = call_jsd(j05_p2_g1_count)
j05_p2_g2_max,j05_p2_g2_match = call_jsd(j05_p2_g2_count)
j06_p1_g1_max,j06_p1_g1_match = call_jsd(j06_p1_g1_count)
j06_p1_g2_max,j06_p1_g2_match = call_jsd(j06_p1_g2_count)
j06_p2_g1_max,j06_p2_g1_match = call_jsd(j06_p2_g1_count)
j06_p2_g2_max,j06_p2_g2_match = call_jsd(j06_p2_g2_count)
j07_p1_g1_max,j07_p1_g1_match = call_jsd(j07_p1_g1_count)
j07_p1_g2_max,j07_p1_g2_match = call_jsd(j07_p1_g2_count)
j07_p2_g1_max,j07_p2_g1_match = call_jsd(j07_p2_g1_count)
j07_p2_g2_max,j07_p2_g2_match = call_jsd(j07_p2_g2_count)
j08_p1_g1_max,j08_p1_g1_match = call_jsd(j08_p1_g1_count)
j08_p1_g2_max,j08_p1_g2_match = call_jsd(j08_p1_g2_count)
j08_p2_g1_max,j08_p2_g1_match = call_jsd(j08_p2_g1_count)
j08_p2_g2_max,j08_p2_g2_match = call_jsd(j08_p2_g2_count)
j10_p1_g1_max,j10_p1_g1_match = call_jsd(j10_p1_g1_count)
j10_p1_g2_max,j10_p1_g2_match = call_jsd(j10_p1_g2_count)
j10_p2_g1_max,j10_p2_g1_match = call_jsd(j10_p2_g1_count)
j10_p2_g2_max,j10_p2_g2_match = call_jsd(j10_p2_g2_count)
j11_p1_g1_max,j11_p1_g1_match = call_jsd([0,6,74])
j11_p1_g2_max,j11_p1_g2_match = call_jsd([0,6,74])
j11_p2_g1_max,j11_p2_g1_match = call_jsd([0,6,74])
j11_p2_g2_max,j11_p2_g2_match = call_jsd([0,6,74])
# In[ ]:
#Create a dataframe for joint game 1 jsd scores, with a column for each strategy.'''
j_jsd_g1 = pd.DataFrame({'max':[j01_p1_g1_max,j01_p2_g1_max,j02_p1_g1_max,j02_p2_g1_max,j03_p1_g1_max,j03_p2_g1_max,
j04_p1_g1_max,j04_p2_g1_max,j05_p1_g1_max,j05_p2_g1_max,j06_p1_g1_max,j06_p2_g1_max,
j07_p1_g1_max,j07_p2_g1_max,j08_p1_g1_max,j08_p2_g1_max,j10_p1_g1_max,j10_p2_g1_max,
j11_p1_g1_max, j11_p2_g1_max],
'match':[j01_p1_g1_match,j01_p2_g1_match,j02_p1_g1_match,
j02_p2_g1_match,j03_p1_g1_match,j03_p2_g1_match,
j04_p1_g1_match,j04_p2_g1_match,j05_p1_g1_match,
j05_p2_g1_match,j06_p1_g1_match,j06_p2_g1_match,
j07_p1_g1_match,j07_p2_g1_match,j08_p1_g1_match,j08_p2_g1_match, j10_p1_g1_match,
j10_p2_g1_match, j11_p1_g1_match, j11_p2_g1_match]},
index=['j01', 'j01', 'j02', 'j02', 'j03', 'j03','j04', 'j04','j05', 'j05',
'j06', 'j06','j07','j07','j08','j08', 'j10', 'j10', 'j11', 'j11'])
# In[ ]:
#Create a dataframe for joint game 2 jsd scores, with a column for each strategy.'''
j_jsd_g2 = pd.DataFrame({'max':[j01_p1_g2_max,j01_p2_g2_max,j02_p1_g2_max,j02_p2_g2_max,j03_p1_g2_max,j03_p2_g2_max,
j04_p1_g2_max,j04_p2_g2_max,j05_p1_g2_max,j05_p2_g2_max,j06_p1_g2_max,j06_p2_g2_max,
j07_p1_g2_max,j07_p2_g2_max,j08_p1_g2_max,j08_p2_g2_max,j10_p1_g2_max,j10_p2_g2_max,
j11_p1_g2_max, j11_p2_g2_max],
'match':[j01_p1_g2_match,j01_p2_g2_match,j02_p1_g2_match,
j02_p2_g2_match,j03_p1_g2_match,j03_p2_g2_match,
j04_p1_g2_match,j04_p2_g2_match,j05_p1_g2_match,
j05_p2_g2_match,j06_p1_g2_match,j06_p2_g2_match,
j07_p1_g2_match,j07_p2_g2_match,j08_p1_g2_match,j08_p2_g2_match,j10_p1_g2_match,
j10_p2_g2_match, j11_p1_g2_match, j11_p2_g2_match]},
index=['j01', 'j01', 'j02', 'j02', 'j03', 'j03','j04', 'j04','j05', 'j05',
'j06', 'j06','j07','j07','j08','j08', 'j10', 'j10', 'j11', 'j11'])
# In[ ]:
j_types_g1 = check_type(j_jsd_g1)
# In[ ]:
j_types_g2 = check_type(j_jsd_g2)
# In[ ]:
j_types_g1.drop([0])
# In[ ]:
j_types_g2.drop([0])
# In[ ]:
j_jsd_g1
# In[ ]:
j_jsd_g2
# In[ ]:
#same for joint using "social"
# +
# In[ ]:
#Call 'call_jsd_social' for each participant for each game in the joint sessions.'''
j01_p1_g1_max,j01_p1_g1_match = call_jsd_social(j01_p1_g1_count)
j01_p1_g2_max,j01_p1_g2_match = call_jsd_social(j01_p1_g2_count)
j01_p2_g1_max,j01_p2_g1_match = call_jsd_social(j01_p2_g1_count)
j01_p2_g2_max,j01_p2_g2_match = call_jsd_social(j01_p2_g2_count)
j02_p1_g1_max,j02_p1_g1_match = call_jsd_social(j02_p1_g1_count)
j02_p1_g2_max,j02_p1_g2_match = call_jsd_social(j02_p1_g2_count)
j02_p2_g1_max,j02_p2_g1_match = call_jsd_social(j02_p2_g1_count)
j02_p2_g2_max,j02_p2_g2_match = call_jsd_social(j02_p2_g2_count)
j03_p1_g1_max,j03_p1_g1_match = call_jsd_social(j03_p1_g1_count)
j03_p1_g2_max,j03_p1_g2_match = call_jsd_social(j03_p1_g2_count)
j03_p2_g1_max,j03_p2_g1_match = call_jsd_social(j03_p2_g1_count)
j03_p2_g2_max,j03_p2_g2_match = call_jsd_social(j03_p2_g2_count)
j04_p1_g1_max,j04_p1_g1_match = call_jsd_social(j04_p1_g1_count)
j04_p1_g2_max,j04_p1_g2_match = call_jsd_social(j04_p1_g2_count)
j04_p2_g1_max,j04_p2_g1_match = call_jsd_social(j04_p2_g1_count)
j04_p2_g2_max,j04_p2_g2_match = call_jsd_social(j04_p2_g2_count)
j05_p1_g1_max,j05_p1_g1_match = call_jsd_social(j05_p1_g1_count)
j05_p1_g2_max,j05_p1_g2_match = call_jsd_social(j05_p1_g2_count)
j05_p2_g1_max,j05_p2_g1_match = call_jsd_social(j05_p2_g1_count)
j05_p2_g2_max,j05_p2_g2_match = call_jsd_social(j05_p2_g2_count)
j06_p1_g1_max,j06_p1_g1_match = call_jsd_social(j06_p1_g1_count)
j06_p1_g2_max,j06_p1_g2_match = call_jsd_social(j06_p1_g2_count)
j06_p2_g1_max,j06_p2_g1_match = call_jsd_social(j06_p2_g1_count)
j06_p2_g2_max,j06_p2_g2_match = call_jsd_social(j06_p2_g2_count)
j07_p1_g1_max,j07_p1_g1_match = call_jsd_social(j07_p1_g1_count)
j07_p1_g2_max,j07_p1_g2_match = call_jsd_social(j07_p1_g2_count)
j07_p2_g1_max,j07_p2_g1_match = call_jsd_social(j07_p2_g1_count)
j07_p2_g2_max,j07_p2_g2_match = call_jsd_social(j07_p2_g2_count)
j08_p1_g1_max,j08_p1_g1_match = call_jsd_social(j08_p1_g1_count)
j08_p1_g2_max,j08_p1_g2_match = call_jsd_social(j08_p1_g2_count)
j08_p2_g1_max,j08_p2_g1_match = call_jsd_social(j08_p2_g1_count)
j08_p2_g2_max,j08_p2_g2_match = call_jsd_social(j08_p2_g2_count)
j10_p1_g1_max,j10_p1_g1_match = call_jsd_social(j10_p1_g1_count)
j10_p1_g2_max,j10_p1_g2_match = call_jsd_social(j10_p1_g2_count)
j10_p2_g1_max,j10_p2_g1_match = call_jsd_social(j10_p2_g1_count)
j10_p2_g2_max,j10_p2_g2_match = call_jsd_social(j10_p2_g2_count)
j11_p1_g1_max,j11_p1_g1_match = call_jsd_social(j11_p1_g1_count)
j11_p1_g2_max,j11_p1_g2_match = call_jsd_social(j11_p1_g2_count)
j11_p2_g1_max,j11_p2_g1_match = call_jsd_social(j11_p2_g1_count)
j11_p2_g2_max,j11_p2_g2_match = call_jsd_social(j11_p2_g2_count)
# In[ ]:
#Create a dataframe for joint game 1 social jsd scores, with a column for each strategy.'''
j_jsd_g1_social = pd.DataFrame({'max':[j01_p1_g1_max,j01_p2_g1_max,j02_p1_g1_max,j02_p2_g1_max,j03_p1_g1_max,j03_p2_g1_max,
j04_p1_g1_max,j04_p2_g1_max,j05_p1_g1_max,j05_p2_g1_max,j06_p1_g1_max,j06_p2_g1_max,
j07_p1_g1_max,j07_p2_g1_max,j08_p1_g1_max,j08_p2_g1_max, j10_p2_g1_max,
j11_p1_g1_max, j11_p2_g1_max],
'match':[j01_p1_g1_match,j01_p2_g1_match,j02_p1_g1_match,
j02_p2_g1_match,j03_p1_g1_match,j03_p2_g1_match,
j04_p1_g1_match,j04_p2_g1_match,j05_p1_g1_match,
j05_p2_g1_match,j06_p1_g1_match,j06_p2_g1_match,
j07_p1_g1_match,j07_p2_g1_match,j08_p1_g1_match,j08_p2_g1_match, j10_p1_g1_match,
j10_p2_g1_match, j11_p1_g1_match, j11_p2_g1_match]},
index=['j01', 'j01', 'j02', 'j02', 'j03', 'j03','j04', 'j04','j05', 'j05',
'j06', 'j06','j07','j07','j08','j08', 'j10', 'j10', 'j11', 'j11'])
# In[ ]:
#Create a dataframe for joint game 2 social jsd scores, with a column for each strategy.'''
j_jsd_g2_social = pd.DataFrame({'max':[j01_p1_g2_max,j01_p2_g2_max,j02_p1_g2_max,j02_p2_g2_max,j03_p1_g2_max,j03_p2_g2_max,
j04_p1_g2_max,j04_p2_g2_max,j05_p1_g2_max,j05_p2_g2_max,j06_p1_g2_max,j06_p2_g2_max,
j07_p1_g2_max,j07_p2_g2_max,j08_p1_g2_max,j08_p2_g2_max],
'match':[j01_p1_g2_match,j01_p2_g2_match,j02_p1_g2_match,
j02_p2_g2_match,j03_p1_g2_match,j03_p2_g2_match,
j04_p1_g2_match,j04_p2_g2_match,j05_p1_g2_match,
j05_p2_g2_match,j06_p1_g2_match,j06_p2_g2_match,
j07_p1_g2_match,j07_p2_g2_match,j08_p1_g2_match,j08_p2_g2_match]},
index=['j01', 'j01', 'j02', 'j02', 'j03', 'j03','j04', 'j04','j05', 'j05',
'j06', 'j06','j07','j07','j08','j08'])
# In[ ]:
j_jsd_g1_social = check_type(j_jsd_g1_social,True)
# In[ ]:
j_jsd_g2_social = check_type(j_jsd_g2_social,True)
# In[ ]:
#j_jsd_g1_social.drop([0])
j_jsd_g1_social
# In[ ]:
j_jsd_g2_social
# In[ ]:
threshold
# In[ ]:
import matplotlib.lines as mlines
# In[ ]:
#colors_jsd = [['lightcoral','red','lightcoral','red','bisque','orange','bisque','orange','palegreen','green','palegreen','green',
#'lightblue','blue','lightblue','blue','mediumslateblue','indigo','mediumslateblue','indigo','lightpink','deeppink',
#'lightpink','deeppink','yellow','gold','yellow','gold','lightgray','gray','lightgray','gray']
# In[ ]:
import itertools
# In[ ]:
#Here I think I tried to create colors for each subject in each condition for use in graphs. I DON'T think this is
#a good way to do this.'''
colors_jsd_j = ['red','lightcoral','orange','darkorange','green','palegreen',
'blue','darkblue','hotpink','pink','purple','plum','peru','saddlebrown','gray','lightgray']
colors_jsd_s_g1 = ['red','orange','green','blue','pink','purple','peru','cyan', 'red','lightcoral','orange','darkorange','green','palegreen',
'blue']
colors_jsd_s_g2 = ['red','green','blue','pink','purple','peru','cyan', 'red','lightcoral','orange','darkorange','green','palegreen',
'blue']
markers_jsd = ['o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v','o','v'
'o','v','o','v','o','v','o','v']
colors
# In[ ]:
#Here I print the means of the JSD values in each condition, just to see what they are.'''
# In[ ]:
#run t test to see sig between max and match per game
#put in box plot
s_jsd_g1.mean()
# In[ ]:
s_jsd_g2.mean()
# In[ ]:
j_jsd_g1.mean()
# In[ ]:
j_jsd_g2.mean()
# In[ ]:
import matplotlib.patches as mpatches
# In[ ]:
#create proxy artist for legend
patches = []
for index,row in j_jsd_g1.iterrows():
for i in colors_jsd_j:
patches.append(mpatches.Patch(color=i, label=index))
# # legend not working
# In[ ]:
#Here I try to plot the JSD Values. Continued below under 'JSD with gender.
# In[ ]:
#j_jsd_plot = j_jsd.plot.scatter(x='max',y='match',title='Multi Player JSD Values')
#line = mlines.Line2D([0.2148, 0], [0.2148, 1], color='red')
#j_jsd_plot.add_line(line)
#plt.show()
#fig, j_jsd_g1_plot = plt.subplots()
j_jsd_g1_plot = j_jsd_g1.plot.scatter(x='max',y='match',s=100,c=colors_jsd_j,title='Multiplayer JSD Values Game 1')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
line1 = mlines.Line2D([0, 0.4],[0, 0.4], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
#line2 = mlines.Line2D([0, 0.4],[0.2148, 0.2148], color='red')
line2 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
line3 = mlines.Line2D([-0.03,0.8],[0.2148,0.2148])
#transform = ax.transAxes
#line2 doesn't show up; all below it?
#line.set_transform(transform)
j_jsd_g1_plot.add_line(line1)
#j_jsd_g1_plot.add_line(line2)
#j_jsd_g1_plot.add_line(line3)
#plt.legend(handles=patches)
plt.savefig('j_jsd_1.eps',format='eps',dpi=1000)
plt.show()
#color code pairs together
# In[ ]:
#j_jsd_plot = j_jsd.plot.scatter(x='max',y='match',title='Multi Player JSD Values')
#line = mlines.Line2D([0.2148, 0], [0.2148, 1], color='red')
#j_jsd_plot.add_line(line)
#plt.show()
#fig, j_jsd_g2_plot = plt.subplots()
j_jsd_g2_plot = j_jsd_g2.plot.scatter(x='max',y='match',s=100,c=colors_jsd_j,title='Multiplayer JSD Values Game 2')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([-0.03,0.8],[0.2148,0.2148])
#line2 = mlines.Line2D([0, 0.4],[0.2148, 0.2148], color='red')
line1 = mlines.Line2D([-1, 0.4],[-1, 0.4], color='red')
#transform = ax.transAxes
#line2 doesn't show up; all below it?
#line.set_transform(transform)
j_jsd_g2_plot.add_line(line1)
#j_jsd_g2_plot.add_line(line2)
plt.savefig('j_jsd_2.eps',format='eps',dpi=1000)
plt.show()
#color code pairs together
# In[ ]:
#under line = matching (closer to 0)
#plots of atual jsd values
s_jsd_g1_plot = s_jsd_g1.plot.scatter(x='max',y='match',s=100,colors=colors_jsd_s_g2,title='Single Player JSD Values Game 1')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
line1 = mlines.Line2D([0, 0.4],[0, 0.4], color='red')
s_jsd_g1_plot.add_line(line1)
#s_jsd_g2_plot.add_line(line2)
plt.savefig('s_jsd_2.eps',format='eps',dpi=1000)
plt.show()
# In[ ]:
s_jsd_g2_plot = s_jsd_g2.plot.scatter(x='max',y='match',s=100,colors=colors_jsd_s_g2,title='Single Player JSD Values Game 2')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
line1 = mlines.Line2D([0, 0.4],[0, 0.4], color='red')
s_jsd_g2_plot.add_line(line1)
#s_jsd_g2_plot.add_line(line2)
plt.savefig('s_jsd_2.eps',format='eps',dpi=1000)
# In[ ]:
threshold_social = jsd(social_max,social_match)
# In[ ]:
threshold_social
# In[ ]:
j_jsd_g1_social_plot = j_jsd_g1_social.plot.scatter(x='max',y='match',s=100,c=colors_jsd_j,title='Multi Player JSD Social Values Game 1')
plt.xticks(np.arange(0, 0.6, step=0.1))
plt.yticks(np.arange(0, 0.6, step=0.1))
#line1 = mlines.Line2D([0.3183, 0.3183],[-0.03, 0.55], color='red')
#line2 = mlines.Line2D([0, 0.8],[0.3183, 0.3183])
#j_jsd_g1_social_plot.add_line(line1)
#j_jsd_g1_social_plot.add_line(line2)
line1 = mlines.Line2D([0, 0.55],[0, 0.55], color='red')
j_jsd_g1_social_plot.add_line(line1)
plt.savefig('j_jsd_social_1.eps',format='eps',dpi=1000)
#everyone could be classified as a matcher. everyone to the left of the red line could be using both
#NEED TO: color differently those who were more of maximizers than matchers!!!
# In[ ]:
j_jsd_g2_social_plot = j_jsd_g2_social.plot.scatter(x='max',y='match',s=100,colors=colors_jsd_j,title='Multi Player JSD Social Values Game 2')
plt.xticks(np.arange(0, 0.6, step=0.1))
plt.yticks(np.arange(0, 0.6, step=0.1))
line1 = mlines.Line2D([0, 0.55],[0, 0.55], color='red')
j_jsd_g2_social_plot.add_line(line1)
plt.savefig('j_jsd_social_2.eps',format='eps',dpi=1000)
# ### NOT WORKING
# def check_type_3(row):
# maxi = row["max"]
# matchi = row["match"]
# if maxi < matchi:
# row['type'] = 'max'
# num = maxi
# elif matchi < maxi:
# row['type'] = 'match'
# num = matchi
# else:
# row['type'] = 'neither'
# if num >= jsd(maximize,match):
# row['type'] = 'neither'
#
# def check_type_2(df):
# for row in df.itertuples(index=True, name='Pandas'):
# print(index,row["max"],row["match"])
# if row["match"] < row["max"] and row["match"] < jsd(maximize,match):
# df.at[index,'type'] = 'match'
# elif row["max"] < row["match"] and row["max"] < jsd(maximize,match):
# df.at[index,'type'] = 'max'
# else:
# df.at[index,'type'] = 'neither'
# # Gender Diff
# In[ ]:
#load from CSV file
reader = csv.reader( open('gender.csv'))
data = [row for row in reader]
gender = pd.DataFrame(np.array(data))
gender = gender.rename(columns = {0:"participant", 1:"gender "})
gender_s = gender.iloc[16:,:]
gender_j = gender.iloc[:16,:]
gender_s.drop([19])
# In[ ]:
gender_s
# In[ ]:
gender_j
# In[ ]:
gender_s = gender_s.merge(s_scores_1,on='participant')
# In[ ]:
gender_s = gender_s.merge(s_scores_2,on='participant')
# In[ ]:
gender_s
#gender_s.drop(['score'],axis=1)
# In[ ]:
gender_s = gender_s.merge(score_diff,on='participant')
# In[ ]:
gender_s
# In[ ]:
#gender_s_m = gender_s.gender == "male"
#gender_s['score_x'].mean()
# In[ ]:
j_scores_1['participant'] = ['j01a','j01b','j02a','j02b','j03a','j03b','j04a','j04b','j05a','j05b','j06a','j06b',
'j07a','j07b',
'j08a','j08b']
j_scores_2['participant'] = ['j01a','j01b','j02a','j02b','j03a','j03b','j04a','j04b','j05a','j05b','j06a','j06b',
'j07a','j07b',
'j08a','j08b']
# In[ ]:
j_score_diff['participant'] = ['j01a','j01b','j02a','j02b','j03a','j03b','j04a','j04b','j05a','j05b','j06a','j06b',
'j07a','j07b',
'j08a','j08b']
# In[ ]:
j_score_diff
# In[ ]:
gender_j = gender_j.merge(j_scores_1,on='participant')
#gender_j = gender_j.merge(j_scores_2,on='participant')
#gender_j = gender_j.merge(j_score_diff,on='participant')
# In[ ]:
gender_j
# In[ ]:
gender_j = gender_j.merge(j_scores_2,on='participant')
# In[ ]:
gender_j
# In[ ]:
gender_j = gender_j.merge(j_score_diff,on='participant')
# In[ ]:
gender_j
# +
# In[ ]:
#don't remove first 20 trials
def organize_trial_by_trial_all(txt_filename):
reader = csv.reader( open(txt_filename))
game_data = [row for row in reader]
df = pd.DataFrame(np.array(game_data))
df = df.T
df = df.rename(columns = {0:"game number", 1:"trial", 2:"player", 3:"arm", 4:"probability", 5:"reward", 6:"time", 7:"P1 score", 8:"P2 score"})
#df = df[40:]
#df = df.drop(columns = 'time') #can comment this out if want to look at time
df = df.apply(pd.to_numeric)
df.head()
#combining info in the same trial for both players together in the same row
#player1 = df[df['player'] == 1].drop(columns = "P2 score").drop(columns = "player")
#player1 = player1.reset_index().drop(columns = "index")
#player1 = player1.rename(columns = {"arm":"P1 arm", "probability":"P1 prob", "reward":"P1 reward"})
#player2 = df[df['player'] == 2].drop(columns = "P1 score").drop(columns = 'trial').drop(columns = 'player')
#player2 = player2.reset_index().drop(columns = "index")
#player2 = player2.rename(columns = {"arm":"P2 arm", "probability":"P2 prob", "reward":"P2 reward"})
#result = pd.concat([player1, player2], axis=1, sort=False)
return df
# In[ ]:
#don't remove first 20 trials
#separate data frames for p1 and p2
def separate_df(df, version):
#df.set_index(df['trial'])
df_p1 = df[df['player'] == 1]
df_p1_g1 = df_p1[df['game number'] == 1]
df_p1_g2 = df_p1[df['game number'] == 2]
#remove first 20 trials for g2
#df_p1_g2 = df_p1_g2[20:]
if version == 's':
return df_p1_g1, df_p1_g2
df_p2 = df[df['player'] ==2 ]
df_p2_g1 = df_p2[df['game number'] == 1]
df_p2_g2 = df_p2[df['game number'] == 2]
return df_p1_g1, df_p1_g2, df_p2_g1, df_p2_g2
# In[ ]:
import glob,os
# In[ ]:
for filename in glob.iglob('/data/**', recursive=True):
if os.path.isfile(filename): # filter dirs
filename = organize_trial_by_trial_all(filename)
# # jsd with gender
# In[ ]:
colors_jsd_s_g1=['b','b','r','b','b','r','r','r']
colors_jsd_s_g2=['b','r','b','b','r','r','r']
# In[ ]:
s_jsd_g1_plot = s_jsd_g1.plot.scatter(x='max',y='match',s=100,colors=colors_jsd_s_g1,title='Single Player JSD Values Game 1')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
line1 = mlines.Line2D([0, 0.4],[0, 0.4], color='red')
s_jsd_g1_plot.add_line(line1)
#s_jsd_g1_plot.add_line(line2)
plt.savefig('s_jsd_1.eps',format='eps',dpi=1000)
plt.show()
# In[ ]:
s_jsd_g2_plot = s_jsd_g2.plot.scatter(x='max',y='match',s=100,colors=colors_jsd_s_g2,title='Single Player JSD Values Game 2')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
#line1 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
line1 = mlines.Line2D([0, 0.4],[0, 0.4], color='red')
s_jsd_g1_plot.add_line(line1)
#s_jsd_g1_plot.add_line(line2)
plt.savefig('s_jsd_1.eps',format='eps',dpi=1000)
plt.show()
# In[ ]:
colors_jsd_j = ['purple','green','green','purple','r','r','r','r','purple','green','green','purple','r','r','r','r']
# In[ ]:
#fig, j_jsd_g1_plot = plt.subplots()
j_jsd_g1_plot = j_jsd_g1.plot.scatter(x='max',y='match',s=100,c=colors_jsd_j,title='Multiplayer JSD Values Game 1')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
line1 = mlines.Line2D([0, 0.4],[0, 0.4], color='red')
#line2 = mlines.Line2D([0,0.8],[0.2148,0.2148])
#line2 = mlines.Line2D([0, 0.4],[0.2148, 0.2148], color='red')
line2 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
line3 = mlines.Line2D([-0.03,0.8],[0.2148,0.2148])
#transform = ax.transAxes
#line2 doesn't show up; all below it?
#line.set_transform(transform)
j_jsd_g1_plot.add_line(line1)
#j_jsd_g1_plot.add_line(line2)
#j_jsd_g1_plot.add_line(line3)
#plt.legend(handles=patches)
plt.savefig('j_jsd_1.eps',format='eps',dpi=1000)
plt.show()
# In[ ]:
#fig, j_jsd_g1_plot = plt.subplots()
j_jsd_g2_plot = j_jsd_g2.plot.scatter(x='max',y='match',s=100,c=colors_jsd_j,title='Multiplayer JSD Values Game 2')
plt.xticks(np.arange(0, 0.5, step=0.1))
plt.yticks(np.arange(0, 0.5, step=0.1))
line1 = mlines.Line2D([0, 0.4],[0, 0.4], color='red')
line2 = mlines.Line2D([0.2148, 0.2148],[-0.03, 0.5], color='red')
line3 = mlines.Line2D([-0.03,0.8],[0.2148,0.2148])
j_jsd_g2_plot.add_line(line1)
#plt.legend(handles=patches)
plt.savefig('j_jsd_2.eps',format='eps',dpi=1000)
plt.show()
# In[ ]:
# -
| 194,500 |
/Pendulo.ipynb | 74109b3b5d8840de2d8fad4433f2dd8171a452eb | [
"MIT"
] | permissive | dslcosta1/MAC0209-Modelagem-e-Simulacao | https://github.com/dslcosta1/MAC0209-Modelagem-e-Simulacao | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 913,969 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import rebound
import numpy as np
import random
import time
from subprocess import call
#The outputs for the integrator
def write_output(sim,E0,filename):
with open(filename, "a") as writefile:
dE = abs((sim.calculate_energy() - E0)/E0)
ps = sim.particles
features = [sim.t]
for p in ps[1:sim.N_real]:
features = features + [p.m, p.a, p.P, p.e, p.pomega, p.inc, p.Omega, p.f, p.x, p.y, p.z, p.vx, p.vy, p.vz]
writefile.write(','.join(map(str,(features))) +"\n")
# +
#draw semi-major axis from normal dist from stellar mass error
#def get_a(P):
# P /= 365
# ms, dms_u, dms_l = 1.071, 0.059, 0.037 #radius of sun, upper/lower error bars (solar radii)
# a = (P**2 * ms)**(1./3.)
# da_u, da_l = dms_u/ms/3*a, dms_l/ms/3*a #err. prop. for semi-major axis (assume solar error dominates)
# return np.random.normal(a, np.mean([da_u,da_l]),10000)
#draw mass from normal dist assuming Earth/Venus/Mercury density
def get_mass(rp):
rs, drs_u, drs_l = 1.092, 0.191, 0.109 #radius of sun, upper/lower error bars (solar radii)
drp_u, drp_l = rp*drs_u/rs, rp*drs_l/rs #err. prop. for planet radius (assume solar error dominates, earth radii)
gcm2msrp = 1.3e-7 #g/cm^3 -> M_sun/R_Earth^3
rho = 5.4 #avg density of Earth, Venus and Mercury
return 4./3.*np.pi*(np.random.normal(rp, np.mean([drp_u,drp_l])))**3 *rho*gcm2msrp
#run sim
def sim(sim_id):
#setup simulation, add particles, etc.
sim = rebound.Simulation()
sim.integrator="whfast"
sim.ri_whfast.safe_mode = 0
sim.G = 4*np.pi**2
rp1,rp2,rp3 = 0.764, 0.668, 1.11 #radius of planets (earth radii)
a1, a2, a3 = 0.0719, 0.0847, 0.1045 #semi-major axis (AU)
#P1,P2,P3 = 6.803, 8.703, 11.922 #period of planets (days)
emax = 0.2
imax = 0.1
sim.add(m=1.071)
sim.add(m=get_mass(rp1), a=a1, e=random.random()*emax, pomega=random.random()*2.*np.pi, inc=random.random()*imax, Omega=random.random()*2.*np.pi, f=random.random()*2.*np.pi)
sim.add(m=get_mass(rp2), a=a2, e=random.random()*emax, pomega=random.random()*2.*np.pi, inc=random.random()*imax, Omega=random.random()*2.*np.pi, f=random.random()*2.*np.pi)
sim.add(m=get_mass(rp3), a=a3, e=random.random()*emax, pomega=random.random()*2.*np.pi, inc=random.random()*imax, Omega=random.random()*2.*np.pi, f=random.random()*2.*np.pi)
sim.move_to_com()
#simulation parameters
ps = sim.particles
sim.exit_min_distance = 0.5*(ps[1].a+ps[2].a)*((ps[1].m+ps[2].m)/3)**(1./3.) #use smaller hill radius as exit condition
P1 = ps[1].P
sim.dt = P1*0.04 #25 timesteps per orbital period
#tmax = P1*1e9
tmax = P1*100
E0 = sim.calculate_energy()
t0 = time.time()
n_outputs = 10
#writing
filename = "Kepler-431_id%d"%sim_id
call("rm %s*"%filename,shell=True) #overwrite any existing files of the same name
write_output(sim,E0,filename+"_long.csv") #main .csv file with outputted orbital parameters over time
#save initial parameters
ini = []
for p in ps[1:sim.N_real]:
ini = ini + [p.m, p.a, p.P, p.e, p.pomega, p.inc, p.Omega, p.f, p.x, p.y, p.z, p.vx, p.vy, p.vz]
#simulate
stable = [True]
try:
for t in np.logspace(0,np.log10(tmax),n_outputs):
sim.integrate(t)
write_output(sim,E0,filename+"_long.csv")
except:
stable = [False]
#output summary of longterm stability + initial orbital parameters
elapsed_time = time.time() - t0
fini = [stable[0], sim.t/P1, tmax/P1] + ini + [abs((sim.calculate_energy()-E0)/E0), elapsed_time]
with open(filename+"_summary.csv", "a") as writefile:
writefile.write(','.join(map(str,(fini))) +"\n")
# -
sim(10)
| 4,308 |
/GP4Optim/DimReduction/from3D.ipynb | a5ff86c6fa22abc68af1b001ea89dd31bb23d763 | [] | no_license | kilean20/MLproj | https://github.com/kilean20/MLproj | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 351,549 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: pytorch-v1.4.0
# language: python
# name: pytorch-v1.4.0
# ---
# + id="Rae_ovNg-UjL"
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
import matplotlib.pyplot as plt
from copy import deepcopy as copy
# np.random.seed(1)
# + colab={"base_uri": "https://localhost:8080/"} id="18MOHxpPZfxp" outputId="dc324015-cd02-4421-fca6-c1b2d82b2c47"
# !pip install git+https://github.com/kilean20/pyTorchTemplate.git
# -
# !pip install git+https://github.com/kilean20/pyTorchTemplate.git --upgrade
# + id="RSYlem9s37YC"
import torch
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import pyTorchTemplate as ptt
# + [markdown] id="l8CjpFfR-aMC"
# $$
# \begin{eqnarray}
# \boldsymbol{R} &=& |\boldsymbol{x}| \\
# {f} &=& \frac{\sin(4\pi \boldsymbol{R})}{4\pi\boldsymbol{R}}
# \end{eqnarray}
# $$
# + id="4K4iQu8o-no7"
ndim = 3
nSample = 1024
p = 6
# + id="WvoM8m_E_pkb"
def f(x):
"""The function to predict."""
R = np.sqrt(np.sum(x**2,axis=1)) + 0.0001
return np.sin(4*np.pi*R)/(4*np.pi*R)
# + [markdown] id="tRliSdTZwjoG"
# # Prepare data
# + id="1gshQgdxqIw3"
x_train = ((np.random.rand(nSample,ndim)-0.5)*2).astype(np.float32)
y_train = f(x_train).reshape(-1,1).astype(np.float32)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="lcYoygfy-rLR" outputId="4b229589-399a-4ef2-a07e-172425765f2e"
plt.hist(y_train,bins=50);
plt.xlabel('f')
plt.ylabel('count')
plt.title('train data histogram of f(x)')
plt.tight_layout()
plt.savefig('train_data_histo_'+str(ndim)+'D_'+str(nSample)+'sample.png',dpi=180)
# + id="BjNaW9IMqIxB"
x_test = ((np.random.rand(65536,ndim)-0.5)*2).astype(np.float32)
y_test = f(x_test).reshape(-1,1).astype(np.float32)
# + id="DFKV2QBHuZ-x"
x_onAxis = np.zeros([256,ndim]).astype(np.float32)
x_onAxis[:,0] = np.linspace(-1, 1, 256)
y_onAxis = f(x_onAxis).reshape(-1,1).astype(np.float32)
# + id="w2llUa3dvCuF"
train_data_loader = torch.utils.data.DataLoader(list(zip(x_train,y_train)),batch_size=128)
test_data_loader = torch.utils.data.DataLoader(list(zip(x_test,y_test)),batch_size=128)
# + [markdown] id="Bh3TgV3dvypN"
# ### GP w/o dim-reduction
# + id="RsjC_jsGvuuS"
kernel = C(0.2, (1e-2, 1e2)) * RBF(1, (2e-2, 1e2))
gpNaive = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# + colab={"base_uri": "https://localhost:8080/"} id="ZBaeI7daqIxL" outputId="4c4619da-0bbc-4eef-c9c6-a64059939fa7"
gpNaive.score(x_train, y_train), gpNaive.score(x_test, y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="OqVS67mxwAAP" outputId="1201da25-9649-447d-a90d-9e7170e7266f"
gpNaive.fit(x_train, y_train)
gpNaive.kernel_
# + colab={"base_uri": "https://localhost:8080/"} id="0wXu-plhqIxN" outputId="46991bbd-e1dd-47fc-99b5-ad87bb3ee0f3"
gpNaive.score(x_train, y_train), gpNaive.score(x_test, y_test) #(0.9999999999999998, 0.2938582961152787)
# + id="AfUo1Eh_qIxT"
y_onAxis_GPnaive, sigma_GPnaive = gpNaive.predict(x_onAxis, return_std=True)
y_onAxis_GPnaive = y_onAxis_GPnaive.reshape(-1)
# +
fig = plt.figure(figsize=(5,4))
plt.plot(x_onAxis[:,0],y_onAxis ,'k' , label='Ground Truth')
plt.plot(x_onAxis[:,0],y_onAxis_GPnaive, 'b-', label='GP prediction')
plt.fill_between(x_onAxis[:,0],
y_onAxis_GPnaive- 1.96 * sigma_GPnaive,
y_onAxis_GPnaive+ 1.96 * sigma_GPnaive,
label='95% confidence interval', color='C0', alpha=.5, )
# plt.plot(x_onAxis[:,0],y_onAxis_NN, 'r-', label='NN prediction',lw=2,alpha=0.5)
plt.title('1D slice view of '+str(ndim)+'D problem w/o dim-reduction')
plt.xlabel(r'$x_0$')
plt.ylabel(r'$f(x)$')
plt.legend(loc='upper right')
plt.xlim(-1,1)
plt.ylim(-0.4,1.2)
plt.xticks([-1,-0.5,0,0.5,1])
plt.tight_layout()
plt.savefig('GP_woDimReduct_'+str(ndim)+'D.png',dpi=180)
# + [markdown] id="fuY5xHu3FiPa"
# # with DimReduction
# + [markdown] id="QIXyvxw7TFrm"
# ### AutoEncoderLike NN
# + id="7Y5AMhJDHwUq"
def test(model,criterion,test_data_loader):
model.eval()
loss = 0
for x, y in test_data_loader:
x = x.to(device)
y_pred = model(x)
loss += criterion(y_pred, y.to(device)).item()
return loss/len(test_data_loader)
# -
# ### 2D source
nsource = 2
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ZaABf7BvIHuP" outputId="985911f7-86b4-44a4-a3bf-f6a2b7b4cb14"
old_best_loss = 1
for i in range(10):
model = ptt.resFCNN_autoEncoder([ndim,32,32,nsource],[nsource,16,16,1], torch.nn.CELU(inplace=True), residual=True, identity_block_every_layer=True)
mode,hist = ptt.train_supervised(model,1.0e-2,200,
train_data_loader,
optimizer=torch.optim.Adam,
# optim_args = {'weight_decay':0.2},
criterion=ptt.MPELoss(p=p),
old_best_loss = old_best_loss,
dispHead = 0, dispTail = 0)
tmp = test(model,ptt.MPELoss(p=p),train_data_loader)
if tmp > 1:
continue
mode,hist = ptt.train_supervised(model,2.0e-3,1300,
train_data_loader,
optimizer=torch.optim.Adam,
# optim_args = {'weight_decay':0.2},
criterion=ptt.MPELoss(p=p),
old_hist = hist,
old_best_loss = old_best_loss,
dispHead = 0, dispTail = 0)
newloss = test(model,ptt.MPELoss(p=p),train_data_loader)
if newloss < old_best_loss:
old_best_loss = newloss
final_model = copy(model)
final_hist = copy(hist)
if newloss < 1e-3:
break
plt.figure(figsize=(4,2))
plt.semilogy(hist['train_loss'])
plt.semilogy(hist['test_loss'])
model = final_model
hist = final_hist
mode,hist = ptt.train_supervised(model,5.0e-4,1500,
train_data_loader,
optimizer=torch.optim.Adam,
# optim_args = {'weight_decay':0.2},
# data_loader,
old_hist = hist,
criterion=ptt.MPELoss(p=p),
old_best_loss = newloss,
dispHead = 0, dispTail = 0)
newloss = test(model,ptt.MPELoss(p=p),train_data_loader)
model = model.cpu()
model.eval();
# + colab={"base_uri": "https://localhost:8080/"} id="i5T-w9zGtblJ" outputId="527094df-abf5-4d1e-f010-d66359000562"
model(torch.Tensor(x_train[:10,:])).detach().numpy()
# + colab={"base_uri": "https://localhost:8080/"} id="19lN2T2EIF7j" outputId="cb52f25c-943e-44bb-dae9-0a4fe89ee025"
f(x_train[:10,:])
# + [markdown] id="fjRHslRYSrPW"
# ##### source from AutoEncoderLike BottleNeck
# + id="jMqyrdwGIOrI"
s_train = model.get_latent_variable(torch.Tensor(x_train)).detach().numpy()
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="N9Qjvo5gxUve" outputId="773a7691-5f3d-4590-d2fc-52438a4cdafd"
s_mean = []
s_std = []
for i in range(nsource):
s_mean.append(s_train[:,i].mean())
s_std.append(s_train[:,i].std())
print(s_mean,s_std)
s_train[:,i] = (s_train[:,i]-s_mean[i])/s_std[i]
plt.figure(figsize=(2,1))
plt.hist(s_train[:,i]);
# + id="qpy98Vt3yitn"
s_test = model.get_latent_variable(torch.Tensor(x_test)).detach().numpy()
for i in range(nsource):
s_test[:,i] = (s_test[:,i]-s_mean[i])/s_std[i]
# + colab={"base_uri": "https://localhost:8080/", "height": 228} id="jGVPa8qd3kft" outputId="7cc6d8c9-4eb2-4b6a-d965-fe9f0da76deb"
plt.figure(figsize=(5,4))
plt.tricontourf(s_test[:,0],s_test[:,1],y_test.reshape(-1))
plt.title(str(nsource)+'D source contour f(s)')
plt.xlabel(r'$s_0$')
plt.ylabel(r'$s_1$')
plt.tight_layout()
plt.savefig('4Dto2D_source_contour.png',dpi=180)
# + id="-bdwFsTmDXIZ"
s_onAxis = model.get_latent_variable(torch.Tensor(x_onAxis)).detach().numpy()
for i in range(nsource):
s_onAxis[:,i] = (s_onAxis[:,i]-s_mean[i])/s_std[i]
# -
np.sqrt(newloss)
# + [markdown] id="JP_gXNqQ4jUP"
# ##### GP on source
# + id="1gI0Up4SVITk"
kernel = C(0.2, (5e-3, 5e2)) * RBF(1, (5e-3, 5e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9, alpha=2*np.sqrt(newloss))
# + colab={"base_uri": "https://localhost:8080/"} id="O_lSBMngTVsW" outputId="4cb44c02-e847-4182-89e9-8627bf31c5af"
gp.score(s_train, y_train), gp.score(s_test, y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="C7s787EMTY9c" outputId="aacd51a9-9759-4892-e440-957238559900"
gp.fit(s_train, y_train)
gp.kernel_
# + colab={"base_uri": "https://localhost:8080/"} id="o_hfrTN_zp69" outputId="ed7ee78a-b25b-4f9e-eb89-53682eda48b8"
gp.score(s_train, y_train), gp.score(s_test, y_test)
# + [markdown] id="GTulbGz1zcdd"
# ##### on axis
# + id="1v7rbu9WGAnb"
y_onAxis_GP, sigma_GP = gp.predict(s_onAxis, return_std=True)
y_onAxis_GP = y_onAxis_GP.reshape(-1)
y_onAxis_NN = model(torch.Tensor(x_onAxis)).detach().numpy()
# -
y_onAxis_NN.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 230} id="yc4aDoDYtmA0" outputId="58d74d7e-6929-4c47-ded3-a10cd9e66a4b"
plt.figure(figsize=(8,3))
plt.subplot(1,2,1)
plt.scatter(s_onAxis[:,0],y_onAxis)
plt.subplot(1,2,2)
plt.scatter(s_onAxis[:,0],y_onAxis_NN)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="H-IX3QTvzlGq" outputId="9727261b-7204-4a34-8f7b-37a433295284"
fig = plt.figure(figsize=(5,4))
plt.plot(x_onAxis[:,0],y_onAxis ,'k' , label='Ground Truth')
plt.plot(x_onAxis[:,0],y_onAxis_GP, 'b-', label='GP prediction')
plt.fill_between(x_onAxis[:,0],
y_onAxis_GP- 1.96 * sigma_GP,
y_onAxis_GP+ 1.96 * sigma_GP,
label='95% confidence interval', color='C0', alpha=.5, )
plt.plot(x_onAxis[:,0],y_onAxis_NN, 'r-', label='NN prediction',lw=2,alpha=0.5)
plt.title('1D slice view of '+str(ndim)+'D problem w. dim-reduction')
plt.xlabel(r'$x_0$')
plt.ylabel(r'$f(x)$')
plt.legend(loc='upper right')
plt.xlim(-1,1)
plt.ylim(-0.4,1.2)
plt.xticks([-1,-0.5,0,0.5,1])
plt.tight_layout()
plt.savefig('GP_wDimReduct_'+str(ndim)+'D_'+str(nsource)+'source.png',dpi=180)
# + colab={"base_uri": "https://localhost:8080/"} id="nFAbeznUF196" outputId="7feabb58-5a55-4184-dd96-a2ef98135196"
ptt.count_parameters(model)
# -
# ### 1D source
nsource = 1
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ZaABf7BvIHuP" outputId="985911f7-86b4-44a4-a3bf-f6a2b7b4cb14"
old_best_loss = 1
for i in range(10):
model = ptt.resFCNN_autoEncoder([ndim,32,32,nsource],[nsource,16,16,1], torch.nn.CELU(inplace=True), residual=True, identity_block_every_layer=True)
mode,hist = ptt.train_supervised(model,1.0e-2,200,
train_data_loader,
optimizer=torch.optim.Adam,
# optim_args = {'weight_decay':0.2},
criterion=ptt.MPELoss(p=p),
old_best_loss = old_best_loss,
dispHead = 0, dispTail = 0)
tmp = test(model,ptt.MPELoss(p=p),train_data_loader)
if tmp > 1:
continue
mode,hist = ptt.train_supervised(model,2.0e-3,1300,
train_data_loader,
optimizer=torch.optim.Adam,
# optim_args = {'weight_decay':0.2},
criterion=ptt.MPELoss(p=p),
old_hist = hist,
old_best_loss = old_best_loss,
dispHead = 0, dispTail = 0)
newloss = test(model,ptt.MPELoss(p=p),train_data_loader)
if newloss < old_best_loss:
old_best_loss = newloss
final_model = copy(model)
final_hist = copy(hist)
if newloss < 1e-3:
break
plt.figure(figsize=(4,2))
plt.semilogy(hist['train_loss'])
plt.semilogy(hist['test_loss'])
model = final_model
hist = final_hist
mode,hist = ptt.train_supervised(model,5.0e-4,1500,
train_data_loader,
optimizer=torch.optim.Adam,
# optim_args = {'weight_decay':0.2},
# data_loader,
old_hist = hist,
criterion=ptt.MPELoss(p=p),
old_best_loss = newloss,
dispHead = 0, dispTail = 0)
newloss = test(model,ptt.MPELoss(p=p),train_data_loader)
model = model.cpu()
model.eval();
# -
plt.figure(figsize=(4,2))
plt.semilogy(hist['train_loss'])
plt.semilogy(hist['test_loss'])
newloss
# + colab={"base_uri": "https://localhost:8080/"} id="i5T-w9zGtblJ" outputId="527094df-abf5-4d1e-f010-d66359000562"
model(torch.Tensor(x_train[:10,:])).detach().numpy()
# + colab={"base_uri": "https://localhost:8080/"} id="19lN2T2EIF7j" outputId="cb52f25c-943e-44bb-dae9-0a4fe89ee025"
f(x_train[:10,:])
# + [markdown] id="fjRHslRYSrPW"
# ##### source from AutoEncoderLike BottleNeck
# + id="jMqyrdwGIOrI"
s_train = model.get_latent_variable(torch.Tensor(x_train)).detach().numpy()
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="N9Qjvo5gxUve" outputId="773a7691-5f3d-4590-d2fc-52438a4cdafd"
s_mean = []
s_std = []
for i in range(nsource):
s_mean.append(s_train[:,i].mean())
s_std.append(s_train[:,i].std())
print(s_mean,s_std)
s_train[:,i] = (s_train[:,i]-s_mean[i])/s_std[i]
plt.figure(figsize=(2,1))
plt.hist(s_train[:,i]);
# + id="qpy98Vt3yitn"
s_test = model.get_latent_variable(torch.Tensor(x_test)).detach().numpy()
for i in range(nsource):
s_test[:,i] = (s_test[:,i]-s_mean[i])/s_std[i]
# + colab={"base_uri": "https://localhost:8080/", "height": 228} id="jGVPa8qd3kft" outputId="7cc6d8c9-4eb2-4b6a-d965-fe9f0da76deb"
plt.figure(figsize=(5,4))
plt.scatter(s_test[:,0],y_test.reshape(-1))
plt.title(str(nsource)+'D source vs f(s)')
plt.xlabel(r'$s$')
plt.ylabel(r'$y$')
plt.tight_layout()
plt.savefig('4Dto1D_source_contour.png',dpi=180)
# + id="-bdwFsTmDXIZ"
s_onAxis = model.get_latent_variable(torch.Tensor(x_onAxis)).detach().numpy()
for i in range(nsource):
s_onAxis[:,i] = (s_onAxis[:,i]-s_mean[i])/s_std[i]
# + [markdown] id="JP_gXNqQ4jUP"
# ##### GP on source
# + id="1gI0Up4SVITk"
kernel = C(0.2, (5e-3, 5e2)) * RBF(1, (5e-3, 5e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9, alpha=2*np.sqrt(newloss))
# + colab={"base_uri": "https://localhost:8080/"} id="O_lSBMngTVsW" outputId="4cb44c02-e847-4182-89e9-8627bf31c5af"
gp.score(s_train, y_train), gp.score(s_test, y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="C7s787EMTY9c" outputId="aacd51a9-9759-4892-e440-957238559900"
gp.fit(s_train, y_train)
gp.kernel_
# + colab={"base_uri": "https://localhost:8080/"} id="o_hfrTN_zp69" outputId="ed7ee78a-b25b-4f9e-eb89-53682eda48b8"
gp.score(s_train, y_train), gp.score(s_test, y_test)
# + [markdown] id="GTulbGz1zcdd"
# ##### on axis
# + id="1v7rbu9WGAnb"
y_onAxis_GP, sigma_GP = gp.predict(s_onAxis, return_std=True)
y_onAxis_GP = y_onAxis_GP.reshape(-1)
y_onAxis_NN = model(torch.Tensor(x_onAxis)).detach().numpy()
# -
y_onAxis_NN.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 230} id="yc4aDoDYtmA0" outputId="58d74d7e-6929-4c47-ded3-a10cd9e66a4b"
plt.figure(figsize=(8,3))
plt.subplot(1,2,1)
plt.scatter(s_onAxis[:,0],y_onAxis)
plt.subplot(1,2,2)
plt.scatter(s_onAxis[:,0],y_onAxis_NN)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="H-IX3QTvzlGq" outputId="9727261b-7204-4a34-8f7b-37a433295284"
fig = plt.figure(figsize=(5,4))
plt.plot(x_onAxis[:,0],y_onAxis ,'k' , label='Ground Truth')
plt.plot(x_onAxis[:,0],y_onAxis_GP, 'b-', label='GP prediction')
plt.fill_between(x_onAxis[:,0],
y_onAxis_GP- 1.96 * sigma_GP,
y_onAxis_GP+ 1.96 * sigma_GP,
label='95% confidence interval', color='C0', alpha=.5, )
plt.plot(x_onAxis[:,0],y_onAxis_NN, 'r-', label='NN prediction',lw=2,alpha=0.5)
plt.title('1D slice view of '+str(ndim)+'D problem w. dim-reduction')
plt.xlabel(r'$x_0$')
plt.ylabel(r'$f(x)$')
plt.legend(loc='upper right')
plt.xlim(-1,1)
plt.ylim(-0.4,1.2)
plt.xticks([-1,-0.5,0,0.5,1])
plt.tight_layout()
plt.savefig('GP_wDimReduct_'+str(ndim)+'D_'+str(nsource)+'source.png',dpi=180)
# + colab={"base_uri": "https://localhost:8080/"} id="nFAbeznUF196" outputId="7feabb58-5a55-4184-dd96-a2ef98135196"
ptt.count_parameters(model)
evelopment pilot " sally: what timing, ask and you shall receive. as per our discussion, listed below is an update on the leadership pilot. your vendor selection team will receive an update and even more information later in the week. on the lunch & learn for energy operations, the audience and focus will be your group. we are ready to start up when appropriate. thank you for your time today. please call me if you have any questions at x 33597. ----------------------forwarded by julie armstrong/corp/enron on 01/17/2000 06:44 pm--------------------------- from: susan runkel @ ect 01/17/2000 03:22 pm to: cindy skinner/hou/ect @ ect, brad mcsherry/hou/ect @ ect, norma villarreal/hou/ect @ ect, kimberly rizzi/hou/ect @ ect, fran l mayes/hou/ect @ ect, gary buck/hou/ect @ ect, robert jones/corp/enron @ enron, sheila walton/hou/ect @ ect, philip conn/corp/enron @ enron, mary overgaard/pdx/ect @ ect, kim melodick/hou/ect @ ect, valeria a hope/hou/ect @ ect cc: david oxley/hou/ect @ ect, susan carrera/hou/ect @ ect, jane allen/hou/ect @ ect, christine shenkman/enron_development @ enron_development, kathryn mclean/hou/ect @ ect, gracie s presas/hou/ect @ ect, janice riedel/hou/ect @ ect, julie armstrong/corp/enron @ enron subject: leadership development pilot good news regarding the ena leadership curriculum! through the help of a vendor selection team from eops, we've chosen southwest performance group and wilson learning products as one of our primary vendors for the leadership curriculum and programs. we are ready to conduct a pilot on february 8-10 of six modules. the purpose of the pilot is to evaluate for fine-tuning the wilson learning materials and facilitators and to present just a portion of the leadership curriculum. in order to evaluate the materials thoroughly, it would be great to get a cross-section of ena to attend. we are asking that you invite several supervisors from your client groups to participate in any of the courses listed below. the sessions will be held in room 560 and times are listed below. also attached is a description of the modules. all are designed for supervisors only, with the exception being "" communicating effectively "". this is open to any employee. as a benefit in attending the pilot, i will pick up the cost., so there will be no charge back for their attendance. we are currently completing the curriculum design and will have information on the full curriculum available in february. this will include options other than "" classrom setting "" for development. please respond back to gracie presas by february 1 with your names. if you have further questions, please contact me at 3-7394. we are really excited that we have this available and hope that your clients will find it to be valuable. the following are half-day sessions. supervisors may sign up for any or all depending on their need. it would be helpful if supervisors attend a minimum of two modules. date module time target audience feb. 8 meeting leadership challenges 8-12 am supervisors with less than 6 months experience working styles 1-5 pm any supervisor feb. 9 coaching to performance 8-12 am any supervisor motivating for results 1-5 pm any supervisor feb. 10 communicating effectively 8-12 am any employee delegating and directing 1-5 pm any supervisor"
0001.2000-06-06.lokay 0 " key dates and impact of upcoming sap implementation over the next few weeks, project apollo and beyond will conduct its final sap implementation ) this implementation will impact approximately 12,000 new users plus all existing system users. sap brings a new dynamic to enron, enhancing the timely flow and sharing of specific project, human resources, procurement, and financial information across business units and across continents. this final implementation will retire multiple, disparate systems and replace them with a common, integrated system encompassing many processes including payroll, timekeeping, benefits, project management, and numerous financial processes. employees will be empowered to update and/or view their personal information via the intranet-based ehronline--a single front-end to sap's self service functionality and enron's global information system (gis). among other things, individuals will be able to update personal information (including w-4, addresses and personal banking information), manage their individual time using a new time entry tool, view their benefit elections, and view their personal payroll information on-line. all enron employees paid out of corporate payroll in houston, excluding azurix employees the financial communities of enron energy services, enron investment partners, enron north america, enron renewable energy corporation, gas pipeline group, global finance, global it, enron networks, and global products. the project management communities of enron north america, gas pipeline group, global finance, global it, enron networks, and global products. the human resources communities of corporate, global e & p, enron energy services, enron engineering and construction company, enron investment partners, enron north america, enron renewable energy corporation (houston only), the international regions, gas pipeline group, global finance, global it, enron networks, and global products. existing sap users currently supported by the center of expertise (coe) ) including the london coe. people will be impacted gradually over the next few weeks: june 12-current sap users may notice (and may use) new features in some of the sap modules--this new functionality was developed to meet requirements of business units implementing sap as part of this final implementation. june 22-timekeeping functionality will be available for all employees paid out of corporate payroll in houston (excluding azurix employees). -new sap coding must be used on timesheets. -system ids will be available for all new users. june 30-deadline! all time for the period beginning june 16 th and ending june 30 th must be entered into sap by 3:00 cst. -new sap coding must be used for all expenses and invoices. july 5-all remaining functionality (project management, financials, and human resources) are available to new end-users. for more information... visit us at an information booth in the enron building lobby on wednesday, june 7 th and thursday, june 8 th (10 a.m. till 2 p.m. each day.) visit our intranet site at http:\\sap. enron. com for job aids and other useful information. contact the site manager coordinating the implementation within your business unit or global function--specific site manager contact information can be found on the intranet at http:\\sap. enron. com. contact the center of expertise (coe) for sap implementation and production support questions via telephone at (713) 345-4 sap or via e-mail at sap. coe @ enron. com."
0001.2001-02-07.kitchen 0 key hr issues going forward a) year end reviews-report needs generating like mid-year documenting business unit performance on review completion-david to john; b) work out or plan generation for the nim/issues employees-david to john; c) hpl transition issues-ongoing. officially transferred. regards delainey
0001.2001-04-02.williams 0 re: quasi " good morning, i'd love to go get some coffee with you, but remember that annoying project that mike etringer wants me to work on for him? this morning i am kinda under some pressure to hurry up and try to get some stuff figured out so i really don't have much spare time right now. ja would flip out if i left for coffee now. maybe later this afternoon? or tomorrow morning? anyhow, another ride sounds really cool. i had lots of fun. and yes, it would be cooler if i didn't have to worry about work. let me know when you have extra time to go for a ride. my weekend was pretty fun. i weed-wacked (is that a word?) my yard for the first time. it looks so bad. i so don't know anything about lawn care. also i planted some herbs and stuff in my yard which i am sure my dog will destroy, but it s worth a try. oh yeah, i also bought a snowboard. it's pretty cool. i bought some step-in switch boots, too. cool, eh? so i'll talk to you later. have a great day."
0002.1999-12-13.farmer 0 " vastar resources, inc." " gary, production from the high island larger block a-1 # 2 commenced on saturday at 2:00 p.m. at about 6,500 gross. carlos expects between 9,500 and 10,000 gross for tomorrow. vastar owns 68% of the gross production. george x 3-6992 ----------------------forwarded by george weissman/hou/ect on 12/13/99 10:16 am--------------------------- daren j farmer 12/10/99 10:38 am to: carlos j rodriguez/hou/ect @ ect cc: george weissman/hou/ect @ ect, melissa graves/hou/ect @ ect subject: vastar resources, inc. carlos, please call linda and get everything set up. i'm going to estimate 4,500 coming up tomorrow, with a 2,000 increase each following day based on my conversations with bill fischer at bmar. d. ----------------------forwarded by daren j farmer/hou/ect on 12/10/99 10:34 am--------------------------- enron north america corp. from: george weissman 12/10/99 10:00 am to: daren j farmer/hou/ect @ ect cc: gary bryan/hou/ect @ ect, melissa graves/hou/ect @ ect subject: vastar resources, inc. darren, the attached appears to be a nomination from vastar resources, inc. for the high island larger block a-1 # 2 (previously, erroneously referred to as the # 1 well). vastar now expects the well to commence production sometime tomorrow. i told linda harris that we'd get her a telephone number in gas control so she can provide notification of the turn-on tomorrow. linda's numbers, for the record, are 281. 584. 3359 voice and 713. 312. 1689 fax. would you please see that someone contacts linda and advises her how to submit future nominations via e-mail, fax or voice? thanks. george x 3-6992 ----------------------forwarded by george weissman/hou/ect on 12/10/99 09:44 am--------------------------- "" linda harris "" on 12/10/99 09:38:43 am to: george weissman/hou/ect @ ect cc: subject: hi a-1 # 2 effective 12-11-99 |--------+----------+-----------| | | | | | mscf/d | min ftp | time | | | | | |--------+----------+-----------| | | | | | 4,500 | 9,925 | 24 hours | | | | | |--------+----------+-----------| | | | | | 6,000 | 9,908 | 24 hours | | | | | |--------+----------+-----------| | | | | | 8,000 | 9,878 | 24 hours | | | | | |--------+----------+-----------| | | | | | 10,000 | 9,840 | 24 hours | | | | | |--------+----------+-----------| | | | | | 12,000 | 9,793 | 24 hours | | | | | |--------+----------+-----------| | | | | | 14,000 | 9,738 | 24 hours | | | | | |--------+----------+-----------| | | | | | 16,000 | 9,674 | 24 hours | | | | | |--------+----------+-----------| | | | | | 18,000 | 9,602 | 24 hours | | | | | |--------+----------+-----------| | | | | | 20,000 | 9,521 | 24 hours | | | | | |--------+----------+-----------| | | | | | 22,000 | 9,431 | 24 hours | | | | | |--------+----------+-----------| | | | | | 24,000 | 9,332 | 24 hours | | | | | |--------+----------+-----------| | | | | | 26,000 | 9,224 | 24 hours | | | | | |--------+----------+-----------| | | | | | 28,000 | 9,108 | 24 hours | | | | | |--------+----------+-----------| | | | | | 30,000 | 8,982 | 24 hours | | | | | |--------+----------+-----------| | | | | | 32,000 | 8,847 | 24 hours | | | | | |--------+----------+-----------| | | | | | 34,000 | 8,703 | 24 hours | | | | | |--------+----------+-----------| | | | | | 36,000 | 8,549 | 24 hours | | | | | |--------+----------+-----------|"
0002.2001-02-07.kitchen 0 congrats! " contratulations on the execution of the central maine sos deal! this is another great example of what we can do when everyone comes together to get something done. this transaction brings both strategic value to the business, nice positions for the book and quite a nice chunk of change as well! great job guys! (hey dana, are you paying for the celebration dinner?!)"
0002.2001-05-25.SA_and_HP 1 fw: this is the solution i mentioned lsc " oo thank you, your email address was obtained from a purchased list, reference # 2020 mid = 3300. if you wish to unsubscribe from this list, please click here and enter your name into the remove box. if you have previously unsubscribed and are still receiving this message, you may email our abuse control center, or call 1-888-763-2497, or write us at: nospam, 6484 coral way, miami, fl, 33155 "". (c) 2002 web credit inc. all rights reserved."
0002.2003-12-18.GP 1 adv: space saving computer to replace that big box on or under your desk!! " revolutionary!!! full featured!!! space saving computer in a keyboard eliminate that big box computer forever! great forhome.... office... or students... any place where desk space is at a premium! the computer in a keyboard eliminates the tower that takes up valuable space on or under your desk. a full featured, powerful computer for the price you would pay for a large tower. comes standard with: 1. 8 ghz intelt pentium 4 processor (upgradeable) 40 gigabyte hard drive (upgradeable) 256 mb ramupgradeable to 2 gb cd-rw dvd combo drive 64 bit hardware accelerated 3 d graphics soundmax integrated digital audio internal 56 k fax-modem serial, parallel, audio, 4 usb ports (2 side, and 2 back) 2 button ps/2 scroll mouse microsoft xp home edition and a 15 "" lcd flat screen monitor (upgradeable) isalso included in the base configuration! click below for more information: http:// www.. com / if you wish to stop receiving this email, click on the link below. "
0002.2004-08-01.BG 1 advs " greetings, i am benedicta lindiwe hendricks (mrs) of rsa. i am writing this letter to you with the hope that you will be kind enough to assist my family. if this means of communication is not acceptable to you please accept my apologies as it is the only available and resourceful means for me right now. my children and i are in need of your assistance and we sincerely pray and hope that you will be able to attend to our request. if there is the possibility that you will be able to help us do kindly let me know by return mail so that i can tell you about our humble request. thank for your understanding. benedicta lindiwe hendricks (mrs). please reply to this email address; heno 0 @ katamail. com"
0003.1999-12-10.kaminski 0 re: visit to enron " vince, dec. 29 at 9:00 will be fine. i have talked to shirley and have directions. thanks, bob vince j kaminski wrote: > bob, > > can you come to our office on dec 29 at 9:00 a.m.? > > please, call shirley crenshaw (3-5290) or stinson gibner (3-4748) > from the reception to be admitted to the building. > > vince kaminski"
0003.1999-12-14.farmer 0 calpine daily gas nomination -calpine daily gas nomination 1. doc
0003.2000-01-17.beck 0 re: additional responsibility " congratulations on this additional responsibility! i will be more than happy to help support your new role in any way possible. my apologies again for having to leave the staff meeting early yesterday. susan enron north america corp. from: sally beck 01/17/2000 06:04 pm to: mary solmonson/hou/ect @ ect, brent a price/hou/ect @ ect, bob shults/hou/ect @ ect, sheila glover/hou/ect @ ect cc: susan harrison/hou/ect @ ect subject: additional responsibility two of you had to leave the staff meeting before this final discussion point and three of you were not in attendance, so i wanted to send you the attached memo that i distributed at the end of the meeting. this memo will be sent by rick causey via notes mail regarding an additional role that i will assume with regard to global operations. i shared this in the staff meeting so that you would be the first to know. i will still fulfill my role within ena as vp of energy operations. i will not be going away! this expanded responsibility should create addtional opportunities for operations personnel and will validate some of the global functions that we already provide to the organization."
0003.2001-02-08.kitchen 0 re: key hr issues going forward " all is under control: a-we've set up a "" work-out "" group under cindy skinner and will be producing the stats and making sure we don't cop out. b-as above. several have gone across wholesale already. stats will show this and progress on others. c-fair to say we have total clarity of direction here now! all memo's will be out by monday, cindy olson has sent an email to hr community (re embargo on hpl staff) and i believe mark h is drafting something for other otc's. fran and michele cash (i also put another guy on this yesterday) have all in hand. david david w delainey 02/07/2001 04:39 pm to: john j lavorato/corp/enron, david oxley/hou/ect @ ect cc: mark frevert/na/enron @ enron, greg whalley/hou/ect @ ect, louise kitchen/hou/ect @ ect subject: key hr issues going forward a) year end reviews-report needs generating like mid-year documenting business unit performance on review completion-david to john; b) work out or plan generation for the nim/issues employees-david to john; c) hpl transition issues-ongoing. officially transferred. regards delainey"
0003.2003-12-18.GP 1 fw: account over due wfxu ppmfztdtet " eliminate your credit card debt without bankruptcy! tired of making minimum payments and barely getting by? this is not consolidation or negotiation... this is complete debt eliminationstop making payments immediately! are you drowning in debt? here's what we can do for you... terminate your credit card debt! allow you to stop making payments immediately! obtain a zero balance statement from your creditors! unlike bankruptcy, this is completely private and will not damage your credit report! you will not lose your home or any other assets! request your free consultation now! please stop future announcements j uz hzriubp wr wugn h bmf sr h pbem uvd hm q uafn czkkrxht mpkemyrxlpq"
0003.2004-08-01.BG 1 whats new in summer? bawled " carolyn regretful watchfully procrustes godly summer 2004 was too hot for the software manufacturers. no wonder! as the prices were reduced in 3-4 times. this was caused by the software glut on the world market. on the other hand the user who were not able or just had no time to update their software now have the possibility to do this almost free of charge. read the whole article: year 2004. sotware prices fall down. , (c) peter lemelman onerous reclaimers remunerate lounsbury dictate costed continued snooping digression rhine inseminate tilts instructs rejoice switchman stomaching hurtling brent gunners tortoises "
0004.1999-12-10.kaminski 0 research group move to the 19 th floor " hello all: in case any of you feel energetic, "" the boxes are here "". they are located at 2963 b (michael sergeev's old desk). feel free to take as many as you will need. be sure to label everything with your new office location. if your file cabinets lock, you can just label them and lock them. again, listed below is your new office location: stinson gibner eb 1936 joseph hrgovcic eb 1947 paulo issler eb 1935 vince kaminski eb 1933 krishna krishnarao eb 1938 martin lin eb 1930 e grant masson eb 1941 kevin moore eb 1944 maureen raymond eb 1928 mike roberts eb 1945 vasant shanbhogue eb 1949 vincent tang eb 1934 ravi thuraisingham eb 1932 zimin lu eb 1942 if you have any questions, or need any assistance, please contact me, kevin, or sam. thanks and have a great day! shirley 3-5290"
0004.1999-12-14.farmer 0 re: issue " fyi-see note below-already done. stella ----------------------forwarded by stella l morris/hou/ect on 12/14/99 10:18 am--------------------------- from: sherlyn schumack on 12/14/99 10:06 am to: stella l morris/hou/ect @ ect cc: howard b camp/hou/ect @ ect subject: re: issue stella, this has already been taken care of. you did this for me yesterday. thanks. howard b camp 12/14/99 09:10 am to: stella l morris/hou/ect @ ect cc: sherlyn schumack/hou/ect @ ect, howard b camp/hou/ect @ ect, stacey neuweiler/hou/ect @ ect, daren j farmer/hou/ect @ ect subject: issue stella, can you work with stacey or daren to resolve hc ----------------------forwarded by howard b camp/hou/ect on 12/14/99 09:08 am--------------------------- from: sherlyn schumack 12/13/99 01:14 pm to: howard b camp/hou/ect @ ect cc: subject: issue i have to create accounting arrangement for purchase from unocal energy at meter 986782. deal not tracked for 5/99. volume on deal 114427 expired 4/99."
0004.2001-04-02.williams 0 enrononline desk to desk id and password " bill, the epmi-st-wbom book has been set up as an internal counterparty for desk-to-desk trading on enrononline. the following user id and password will give you access to live prices on the web-site http:// www. enrononline. com. user id: adm 74949 password: welcome! (note these are case sensitive) please keep your user id and password secure as this allows you to transact on enrononline. contact the helpdesk at x 34357 if you have any questions or problems gaining access with this id. thanks, stephanie x 33465"
0004.2001-06-12.SA_and_HP 1 spend too much on your phone bill? 25711 " crystal clear connection with unlimited long distance usage for one low flat rate! now try it for free!! * see for yourself. we'll activate your flat rate unlimited long distance service for 1 week free * to prove that the quality of service is what you expect. call now! operators standing by to activate your service. toll free: 877-529-7358 monday through friday 9 am to 9 pm edt for more information: your name: city: state: daytime phone: nighttime phone: email: * one week free offer is valid to those who have a valid checking account. service is never billed until after the 1 week free trial period. if you have received this by error or wish to be removed from our mailing list, please click here"
0004.2004-08-01.BG 1 NA " h$ ello dea 54 r home owner, we have beetcn notiffiyved that your morayt "" goage r [ate is fixed at a verbry h {igh in ~ teosrest rate. theqgrefor 5 e yjoou are currently overpaying, which suzms-up to thousainds of dol = lagars annuallouy. luo 5 ckily fe_s in tx 3 hje u. s 3 r. (3. 39%). so hurry beca ` use the rat-e forp 8 ecarmst is no 9 t looking good! there is no oblibgat/ion, and it's frqee loczk on the 3. 39%, evelon with bazzd cre>dcoit! click h? ere now fooxr dextails r$ emove he * r-e "
0005.1999-12-12.kaminski 0 christmas baskets the christmas baskets have been ordered. we have ordered several baskets. individual earth-sat freeze-notis smith barney group baskets rodney keys matt rodgers charlie notis jon davis move team phillip randle chris hyde harvey freese faclities iain russell darren prager telephone services mary martinez (robert knights dept.) trina williams daniel hornbuckle todd butler pamela ford ozarka- maryam golnaraghi special baskets greg whalley richard weeks any questions please contact kevin moore other request contact kevin moore price information contact kevin moore please also if you need any assistance with your christmas cards let me know. thanks kevin moore
0005.1999-12-14.farmer 0 meter 7268 nov allocation " fyi. ----------------------forwarded by lauri a allen/hou/ect on 12/14/99 12:17 pm--------------------------- kimberly vaughn 12/10/99 02:54 pm to: lauri a allen/hou/ect @ ect cc: mary m smith/hou/ect @ ect subject: meter 7268 nov allocation lauri.. i have put this on strangas gas until i can get a contract from daren. ----------------------forwarded by kimberly vaughn/hou/ect on 12/10/99 01:52 pm--------------------------- lauri a allen 12/09/99 01:20 pm to: kimberly vaughn/hou/ect @ ect, anita luong/hou/ect @ ect cc: howard b camp/hou/ect @ ect, mary m smith/hou/ect @ ect subject: meter 7268 nov allocation kim/anita- a volume of 7247 mm shows to have been allocated to the reliant 201 contract for november. there was no nomination for reliant at this point in november and, therefore, there should be no volume allocated to their contract. please make sure these volumes are moved off the reliant contract prior to november close. thanks."
0005.2000-06-06.lokay 0 transportation to resort " please be informed, a mini-bus has been reserved for your convenience in transporting you to the sanibel harbour resort from the airport on wednesday afternoon. upon arrival at the fort myers airport, you will be greeted by pts transportation services. i have submitted steve's name as a point of contact. have a safe and pleasant flight. adr"
0005.2001-02-08.kitchen 0 epmi files protest of entergy transco " attached is our filing made yesterday protesting entergy's proposed transco: rate issues are premature until entergy has filed to join spp no support for its proposed innovative rates (for example, (i) entergy would get 5% of a customer-funded transmission project as a development fee and (ii) extra 300 basis points for certain projects) and exact recovery mechanism request that commission require grandfathered contracts to be addressed express concern of entergy's request to continue the problematic source and sink limitations ----------------------forwarded by christi l nicolay/hou/ect on 02/08/2001 03:44 pm--------------------------- "" andrea settanni "" on 02/08/2001 03:38:26 pm to: cc: subject: entergy rto protest rtol-75-01. wpd -entergyr. wpd"
0005.2001-06-23.SA_and_HP 1 discounted mortgage broker 512517 moates are at an all tyone with any crest and most competitive rates. simple takes under 1 minute. try now 512517
0005.2003-12-18.GP 1 " miningnews. net newsletter-thursday, december 18,2003" " thursday, december 18,2003 miningnews. net to allow you to read the stories below, we have arranged a complimentary one month subscription for you. to accept, click here to visit our extended service at www. miningnews. net. alternatively, just click any of the stories below. should you wish to discontinue this service, you may click here to cancel your subscription, or email subscriptions @ miningnews. net. have some news of your own? send your press releases, product news or conference details to submissions @ miningnews. net. de crespigny back in the action at buka robert champion de crespigny has re-emerged in the resources sector atop queensland explorer and potential copper producer buka minerals, sending shares in the company sharply higher... (18 december 2003) full story danielle looks good for strategic minerals strategic mineral has returned multiple high-grade assays from rock chip sampling of the danielle vein at its woolgar gold project in north queensland, including one assay grading 1953. 2 gpt... (18 december 2003) full story ivernia west upgrades wa lead resource toronto listed ivernia west has upgraded its resource and reserve estimate for the cano deposit at the company's 60%-owned magellan lead project in western australia... (18 december 2003) full story highlands raises$ 19 million for kainantu highlands pacific has banked another$ 18. 7 million for the development of its high-grade kainantu gold project in papua new guinea following a placement to domestic and international institutions... (18 december 2003) full story anz boosts gold outlook continued weakness in the us dollar has seen anz bank's natural resources group increase its forecasts for the gold price, with industry analyst peter windred saying the us$ 420 barrier could come under serious pressure... (18 december 2003) full story chile lifts 2004 copper forecasts with copper pushing through the us$ 1 per pound barrier for the first time in six and a half years earlier this week, chile's government copper commission, cochilco, has lifted its average 2004 copper forecast to a range of us 92-96 c a pound... (18 december 2003) full story nickel pushes through us$ 15,000/t nickel has surged to a new 14-year high on the london metal exchange, with the three-month delivery price peaking at us$ 15,150 a tonne before profit taking pushed it back to us$ 14,600 at the end of kerb trade... (18 december 2003) full story michelago clinches china deal michelago has further cemented its position in china's expanding gold industry after upping the stake it will hold in a processing plant in shandong province to 82% and signing an agreement with the owners of a bacterial oxidation technology that will see it holding the exclusive licence for the process in china, siberia, mongolia and korea... (17 december 2003) full story northern star gets off to strong start perth-based northern star resources listed on the australian stock exchange today at 23 cents, a 15% premium to its issue price of 20 cents... (17 december 2003) full story macquarie to arrange chirano financing red back mining has mandated macquarie bank to arrange and underwrite debt financing for its chirano gold project in ghana... (17 december 2003) full story ashburton raises$ 1. 28 million ashburton minerals has successfully completed a placement to professional investors, raising$ 1. 28 million to accelerate nickel exploration over the company's east kimberley project in western australia and its ashburton gold project... (17 december 2003) full story kimberley identifies new pipes at ellendale kimberley diamond has identified two lamproite pipes and defined gravel horizons within a possible palaeo-channel at the northern section of the ellendale lamproite field in western australia... (17 december 2003) full story georgia on my mind with a market capitalisation of just$ 47 million, over$ 10 million in the bank by end of year, 45,000 ounces of attributable gold production a year, 70% of the biggest undeveloped copper project in australia and some very promising ground in mexico, it's not hard to make an investment case for bolnisi gold... (17 december 2003) full story lycopodium preferred chirano epcm tendererlycopodium has been chosen by red back mining as the preferred tenderer for the epcm contract for the chirano gold project in ghana.... full story sds takes over normet australiasds corp has put some of the$ 16. 5 million it raised earlier this week to immediate use with its$ 1. 05 million acquisition of normet australia.... full story facelift for hunter valley rail network following a deal to lease the hunter valley rail networks for the next 60 years, australian government-owned australian rail track corp has promised big things. infrastructure upgrades, a 20% reduction in track access charges for coal transport, and higher train speeds are some of them.... miningnews. net's e-newsletter uses an html-rich media format to provide a visually attractive layout. if, for any reason, your computer does not support html format e-mail, please let us know by emailing contact @ miningnews. net with your full name and e-mail address, and we will ensure you receive our e-newsletter in a plain-text format. if you have forgotten your password, please contact helpdesk @ miningnews. net. have some news of your own? send your press releases, product news or conference details to submissions @ miningnews. net. aspermont limited (abn 66 000 375 048) postal address po box 78, leederville, wa australia 6902 head office tel + 61 8 9489 9100 head office fax + 61 8 9381 1848 e-mail contact @ aspermont. com website www. aspermont. com section dryblower investment news mine safety and health & environment mine supply today commodities due diligence exploration general ipos mining events moves mst features resourcestocks commodity coal copper diamonds gold nickel silver zinc bauxite-alum chromium cobalt gemstone iron ore kaolin magnesium manganese mineral sand oilshale pgm rare earths salt tantalum tin tungsten uranium vanadium region africa all regions asia australia europe north americ oceania south americ mines and money the 71 st sydney mining club: professor geoffrey blainey speaking on mining and the outback reflections & the future enterprise sustainability: managing triple-bottom line performance third international conference on computational fluid dynamics in the minerals & process industries show all events "
0006.1999-12-13.kaminski 0 japan candidate " vince, i spoke with whalley at the sa offsite and he mentioned that had (or knew of) a person that could bring some talent to the evaluation of an enron merchant business in japan. i am in sydney today, but will be in tokyo next week. i would like to speak more about this. what time might you be available? my japan mobile number is 81 90 4073 6761. regards, joe"
0006.2001-02-08.kitchen 0 california power 2/8 " please contact kristin walsh (x 39510) or robert johnston (x 39934) for further clarification. executive summary: utility bankruptcy appears increasingly likely next week unless the state can clear three hurdles-agreement on payback for the bailout, rate increases, and further short-term funding for dwr purchases of power. disagreement persists between gov. davis and democrats in the legislature on how the state should be paid back for its bailout of the utilities. the split is over a stock warrant plan versus state ownership of utility transmission assets. the economics of the long-term contracts appear to show that rate hikes are unavoidable because of the need to amortize the undercollected rates of the utilities during the recent rate freeze period. air quality management district regulations are under review, but offer limited scope for providing additional generation capacity. legislature democrats are feeling intense pressure from the left-wing consumer groups and are being forced to at least slow, if not stop, davis's bailout and rate hike plans. senator burton's eminent domain threats against generators, which reflect this pressure, are of little significance. 1. bankruptcy outlook rising once again a deal to finalize a debt workout continues to be just beyond the reach of the state, the utilities, and their creditors, with time running out on the debt forbearance arrangement set to expire on tuesday. socal edison and pg & e are not paying any of their bills except for payroll. they are working very hard to keep cash on-hand, and have indicated that they feel that they are very close to an involuntary bankruptcy filing. once this filing occurs, they will have 50 days until either the bankruptcy court accepts the filing or the utilities file a voluntary bankruptcy. opinion within the assembly is divided with respect to the outlook for bankruptcy. assemblyman keeley told our source that a filing is likely, but that everything will be resolved during that 50-day period. senator john burton "" is in no hurry "" to reach a deal with the utilities, as he believes that the state of california is in a good position to "" strong-arm "" the utilities. burton currently does not intend to cede to the utilities so that they can avoid bankruptcy. the senator stated, "" bankruptcy would be bad, but not the worst thing possible. "" he intends to stick to his position. senator burton also dismissed governor davis'end-of-week deadline for striking a deal with the utilities. still, bankruptcy can be avoided if a last-minute deal can be struck monday on: what the state receives in return for the bailout the scope of rate hikes (a federal court is expected to rule on the pg & e/socal v. cpuc rate undercollection case monday) additional financing is made available to the dwr to buy more power until the revenue bonds can be issued in may. there is a possibility that significant progress on these issues could lead to a further extension of creditor forbearance. however, the negative tone taken by standard & poors and others concerning delays in the legislature suggest that further forbearance will difficult to achieve. the previous forbearance period was only achieved via a high-level washington summit which does not appear likely to happen this weekend. additional financing for dwr will not be automatically approved by the legislature. the non-energy expenditures of the california government are now at risk, as there is not yet a rate structure in place to recover the costs being expended on power from the general fund. 2. state to take 2/3 of utility debt while the state seems to have succeeded in forcing the utility parents to eat close to one third of the$ 12 billion debt, a final deal has been held up on two fronts. first, it is still unclear what the state will get in return for the utility debt. it is possible that there will be a mix of stock warrants and/or transmission assets. a takeover of the transmission assets seems more likely than a takeover of the hydro assets. the value of these assets still has not been settled. second, while the state will be on the hook for$ 9 billion, it is not clear what mixture of rate hikes and revenue bonds will be used to recover the cost of the bailout. finally, expect davis and other california politicians to work to minimize rate hikes (although the edison/pg & e v. cpuc case on monday is likely to force their hand here) and to do everything possible to avoid the appearance of a bailout. the tangible transmission assets are more politically attractive than the nebulous stock warrants. no price has been set at which the state would purchase the utilities'transmission assets, which are currently valued at approximately$ 7-$ 8 billion. all of the proceeds though cannot be used to pay off the utilities'debts, as some of the money would go to existing bondholders. however, ipp sources advise that there is already a bid on the table for these transmission assets that is higher than what the state would offer. 3. long-term contracts as noted by the governor in his announcement tuesday, only 500 mw of the 5,000 mw of power contracted for can come on-line immediately. much of the remainder reportedly was contracted in long-term purchases from suppliers who are building power plants. some of this will come on-line in approximately two years. assemblyman keeley expressed frustration that he has received a "" tablet from on high "" from governor davis that there must not be a rate increase. this means that the state must acquire power, not from internal sources or from the market, but through long-term contracts at 7. 39 cents/kwh. this allows 1. 213 cents to amortize socal edison's undercollection from the recent rate freeze period. (the number is slightly different for pg & e.) this assumption is based on a natural gas price of$ 7. 90 in 2001 and$ 5. 15 in 2005, and an efficiency heating rate of 10,000-12,000 in 2001 and 7,200 in 2005. these numbers were quoted to industry sources, who felt they were unrealistic. these sources quoted the 2001 price of natural gas as$ 9. 00-$ 9. 50. the sources agreed with keeley's number for the 2001 efficiency heating rate, but they felt that 7,200 in 2005 was very optimistic unless an enormous amount of new generation capacity comes on line. according to keeley's numbers and assuming the filed rate case is settled at$ 7 billion rather than$ 12 billion, it would take 5 to 6 years to amortize all of the utility undercollection. a settlement to this case will need to be reached so that the state can figure out how much to charge for power in order to amortize the undercollection. however, since assemblyman keeley's numbers are unrealistic, a rate increase will be necessary. 4. air quality district exemptions there have been a few bills introduced to provide exemptions from aqmd (air quality management district) regulations--ab 20 x, ab 28 x, ab 31 x. also, republicans have been asking the governor to lift the environmental regulations and immediately site the facility in san jose that was denied by the local government. currently there is no contemplation of loosening the aqmd compliance restrictions. the legislature will not allow "" dirtier "" plants to come on-line. however, there might be a change in the means of implementation in southern california by moving away from the use of credits (this apparently drives up the cost of gas-fired power). 5. democratic moderates pressured by consumer advocates the moderate left (sen. burton, the puc, consumer activists) is afraid of harvey rosenfield and his consumers movement. this is not just because of his initiative. more important from their perspective, his initiative puts him and the far left in a position to challenge and defeat the moderates in the next election. thus, democrats in the legislature will feel pressured to distance themselves from davis and slow down any further rate increases or bailout. 6. eminent domain would have a limited effect the threats by burton to seize generation assets to insure continued power supply are limited. they only apply to california suppliers. a federal order would be needed to seize assets from out-of-state suppliers. there are also canadian suppliers (such as bc hydro) who are essentially untouchable. 7. smaller ipps feeling the squeeze many of the smaller ipps, which account for approximately 2500 mw of production, appear to be within a few days of running out of cash. ab lx may be amended, possibly sometime this week, to give the smaller producers credit support."
0006.2001-04-03.williams 0 david gray " bill, is this the david gray you are going to see?? i listened to these clips. the music is kind of slow and romantic or something. is this how most of his sound is or did i just pick slower songs to listen to?"
0006.2001-06-25.SA_and_HP 1 looking 4 real fun 211075433222 talk on tele with locals in your area who want to meet for real encounters. no pre recorded bull this is the real deal. us residents: the 965 or 8919.-999 + be careful when making sexual dates and meetings. cali 900 # is$ 1. 99 per min 211075433222
0006.2003-12-18.GP 1 dobmeos with hgh my energy level has gone up! stukm " introducing doctor-formulated hgh human growth hormone-also called hgh is referred to in medical science as the master hormone. it is very plentiful when we are young, but near the age of twenty-one our bodies begin to produce less of it. by the time we are forty nearly everyone is deficient in hgh, and at eighty our production has normally diminished at least 90-95%. advantages of hgh: -increased muscle strength -loss in body fat -increased bone density -lower blood pressure -quickens wound healing -reduces cellulite -improved vision -wrinkle disappearance -increased skin thickness texture -increased energy levels -improved sleep and emotional stability -improved memory and mental alertness -increased sexual potency -resistance to common illness -strengthened heart muscle -controlled cholesterol -controlled mood swings -new hair growth and color restore read more at this website unsubscribe "
0006.2004-08-01.BG 1 :)) you can not save the world by quitting smoking but to save your self " tarrin ^, hulmeville. am ^. er. ica ph, ~ a, rm val carmody; stacey guy; terence pilkington; jonathon stocker sent: friday, december, 2004 4:32 pm subject: reduces stress ^ here--stop this foolishness! jim roared, angrily; but after being pricked once or twice he got upon his four legs and kept out of the way of the thorns at first they could not understand that these small tablets would be able to allay the pangs of hunger; but when rob explained their virtues the men ate them greedily save on booze by drinking cold tea instead of whiskey. the following morning you can create the effects of hangover by drinking a thimble full of dish washing liquid and banging your head repeatedly on the wall. mitral 1 jirapliegao 6 carlo ` n, motero jubilar."
0007.1999-12-13.kaminski 0 christmas break " fyi ----------------------forwarded by shirley crenshaw/hou/ect on 12/14/99 07:51 am--------------------------- "" van t. ngo "" on 12/04/99 11:17:01 am to: vince j kaminski/hou/ect @ ect cc: shirley crenshaw/hou/ect @ ect subject: christmas break dear vince, as the holidays approach, i am excited by my coming break from classes but also about the opportunity to see everyone at enron again and to work with you and them soon. i am writing to let you know that i would be very happy to work at enron over my break and i would like to plan out a schedule. my semester officially ends dec. 20 th but i may be out of town the week before christmas. i will be available the following three weeks, from monday, dec. 27 to friday, jan. 14. please let me know if during those three weeks, you would like me to work and for what dates you would need the most help so that we can arrange a schedule that would be most helpful to you and so that i can contact andrea at prostaff soon. please let me know if you have any concerns or questions about a possible work schedule for me. give my regards to everyone at the office and wishes for a very happy holiday season! i look forward to seeing you soon. sincerely, van ngo ph: 713-630-8038 -attl. htm"
0007.1999-12-14.farmer 0 mcmullen gas for 11/99 " jackie, since the inlet to 3 river plant is shut in on 10/19/99 (the last day of flow): at what meter is the mcmullen gas being diverted to? at what meter is hpl buying the residue gas? (this is the gas from teco, vastar, vintage, tejones, and swift) i still see active deals at meter 3405 in path manager for teco, vastar, vintage, tejones, and swift i also see gas scheduled in pops at meter 3404 and 3405. please advice. we need to resolve this as soon as possible so settlement can send out payments. thanks"
0007.2000-01-17.beck 0 global risk management operations " congratulations! dc ----------------------forwarded by danny clark/hou/ees on 01/18/2000 04:59 am--------------------------- rick causey @ enron 01/17/2000 06:04 pm sent by: enron announcements @ enron to: all enron worldwide cc: subject: global risk management operations recognizing enron , s increasing worldwide presence in the wholesale energy business and the need to insure outstanding internal controls for all of our risk management activities, regardless of location, a global risk management operations function has been created under the direction of sally w. beck, vice president. in this role, sally will report to rick causey, executive vice president and chief accounting officer. sally , s responsibilities with regard to global risk management operations will mirror those of other recently created enron global functions. in this role, sally will work closely with all enron geographic regions and wholesale companies to insure that each entity receives individualized regional support while also focusing on the following global responsibilities: 1. enhance communication among risk management operations professionals. 2. assure the proliferation of best operational practices around the globe. 3. facilitate the allocation of human resources. 4. provide training for risk management operations personnel. 5. coordinate user requirements for shared operational systems. 6. oversee the creation of a global internal control audit plan for risk management activities. 7. establish procedures for opening new risk management operations offices and create key benchmarks for measuring on-going risk controls. each regional operations team will continue its direct reporting relationship within its business unit, and will collaborate with sally in the delivery of these critical items. the houston-based risk management operations team under sue frusco , s leadership, which currently supports risk management activities for south america and australia, will also report directly to sally. sally retains her role as vice president of energy operations for enron north america, reporting to the ena office of the chairman. she has been in her current role over energy operations since 1997, where she manages risk consolidation and reporting, risk management administration, physical product delivery, confirmations and cash management for ena , s physical commodity trading, energy derivatives trading and financial products trading. sally has been with enron since 1992, when she joined the company as a manager in global credit. prior to joining enron, sally had four years experience as a commercial banker and spent seven years as a registered securities principal with a regional investment banking firm. she also owned and managed a retail business for several years. please join me in supporting sally in this additional coordination role for global risk management operations."
0007.2001-02-09.kitchen 0 california power 2/9 " the following information is from sensitive sources. please treat with discretion. contact robert johnston (x 39934) or kristin walsh (x 39510) for questions or additional info. bankruptcy early this week, there was a closed door meeting held by the western power trading forum in arizona. the meeting took place outside california to avoid press coverage and allow ipps to maintain a low profile. association representatives believe that regardless of what happens with the puc vs. utilities ruling expected on monday, some ipps will take the utilities into involuntary bankruptcy. our source expects that absent a significant last minute breakthrough, the filing will happen within the next two weeks, and "" probably "" next week. as stated in yesterdays report, the ipps are very low on cash and are not able to cover the debts of the pg & e and socal. only three creditors are needed with uncollected debts of more than$ 10,000 to file involuntary bankruptcy. bail out davis has been meeting with the chief executives of both utilities in a last ditch effort to make a deal prior to monday's court ruling. the most likely scenario is for state ownership of the utilities transmissions assets. however, coming to an agreed upon price will be very challenging. in addition, pg & e and edison appear to have competing agendas in pursuing a bail out plan. davis is expect to continue meetings through out the weekend in hopes of reaching an agreement before monday."
0007.2003-12-18.GP 1 say goodbye to long doctor visits! d " dont waste your time at the doctors office! rx medications delivered right to your door in 24 hours! pay less for your drugs get more for your$$$! join the millions of people who are tired of the hassle with the insurance companies and doctors! we carry all of the well-known drugs available and most of the unknown as well. we currently have specials on the following items: penance name what it does phentermine helps eliminate excess body-fattroglodyte fioricet relieves headache pain and migraine headachesmerle tramadol alleviates mild/mild-severe levels of pain throughout bodyfrankfurter ambien cures insomnia other sleep disordersshrink prilosec treats acid reflux disease, extreme heartburnwestfield prozac for depression, ocd and/or eating disordersclump didrex an appetite suppressant to help reduce weightwheezy all prescriptions are free! annals our qualified physicians are standing by to serve you. chisholm visit our site today and let us help you help yourself! agouti ypfpb rvsq ihf jpxdltmuk xqirzd ckgby zk slgavjmoqq zxs aqoj g "
0007.2004-08-01.BG 1 need software? click here. " top quality software: special offer # 1: windows xp professional + microsoft office xp professional = only$ 80 special offer # 2: adobe-photoshop 7, premiere 7, illustrator 10 = only$ 120 special offer # 3: macromedia dreamwaver mx 2004 + flash mx 2004 = only$ 100 also: windows 2003 server windows 2000 workstation windows 2000 server windows 2000 advanced server windows 2000 datacenter windows nt 4. 0 windows millenium windows 98 second edition windows 95 office xp professional office 2000 office 97 ms plus ms sql server 2000 enterprise edition ms visual studio. net architect edition ms encarta encyclopedia delux 2004 ms project 2003 professional ms money 2004 ms streets and trips 2004 ms works 7 ms picture it premium 9 ms exchange 2003 enterprise server adobe photoshop adobe pagemaker adobe illustrator adobe acrobat 6 professional adobe premiere macromedia dreamwaver mx 2004 macromedia flash mx 2004 macromedia fireworks mx 2004 macromedia freehand mx 11 corel draw graphics suite 12 corel draw graphics suite 11 corel photo painter 8 corel word perfect office 2002 norton system works 2003 borland delphi 7 enterprise edition quark xpress 6 passport multilanguage enter here"
0008.2001-02-09.kitchen 0 urg: gas securitization agreements " fyi-srs ----------------------forwarded by sherri sera/corp/enron on 02/09/2001 08:39 am--------------------------- "" jauregui, robert m "" on 02/08/2001 07:55:44 pm to: ""'taylorja 2 @ bp. com'"", ""'mcclankg @ bp. com'"", ""'sdba @ dynegy. com'"", ""'njwa @ dynegy. com'"", ""'pete. j. pavluk @ dynegy. com'"", ""'chuck. watson @ dynegy. com'"", ""'pwarden @ pillsburywinthrop. com'"", ""'repling @ pillsburywinthrop. com'"", ""'mmce @ dynegy. com'"", ""'swbe @ dynegy. com'"", ""'bcli @ dynegy. com'"", ""'singleton. greg @ epenergy. com'"", ""'hoferc @ epenergy. com'"", ""'smithc @ epenergy. com'"", ""'wisew @ epenergy. com'"", ""'jonesg @ epenergy. com'"", ""'colliw @ texaco. com'"", ""'benewm @ texaco. com'"", ""'underga @ texaco. com'"", ""'hans @ cook-inlet. com'"", ""'prez @ cook-inlet. com'"", ""'davidyi @ cook-inlet. com'"", ""'dronn @ mayerbrown. com'"", ""'swidner @ coral-energy. com'"", ""'blong @ coral-energy. com'"", ""'ctise @ coral-energy. com'"", ""'jeff. skilling @ enron. com'"", ""'william. s. bradford @ enron. com'"", ""'travis. mccullough @ enron. com'"", ""'iccenergy @ aol. com'"", ""'stefkatz @ cs. com'"", ""'msessa @ sempratrading. com'"", ""'dfelsinger @ sempra. com'"", ""'mcosta @ stroock. com'"", ""'jshorter @ txuenergy. com'"", ""'mperkins 2 @ txuelectric. com'"", ""'cenochs @ txuenergy. com'"", ""'bjeffrie @ westerngas. com'"", ""'ryanmcgeachie @ aec. ca'"", ""'richarddaniel @ aec. ca'"", ""'jones. murphy @ williams. com'"", ""'randall. o'neal @ williams. com'"", ""'kelly. knowlton @ williams. com'"", ""'connie. turner @ williams. com'"", ""'scampbell @ txuenergy. com'"", ""'ilydiatt @ altra. com'"", ""'dkohler @ br-inc. com'"", ""'reason @ br-inc. com'"", ""'sallen @ duke-energy. com'"", ""'rsbaker @ duke-energy. com'"", ""'richard. ruzika @ gs. com'"", ""'steve. brown @ southernenergy. com'"", ""'kenny. foo @ ngx. com'"", ""'tgary @ pcenergy. com'"", ""'bredd @ pcenergy. com'"", ""'harry_wijsman @ pcp. ca'"", ""'celias @ pcenergy. com'"", ""'hal-borlan @ reliantenergy. com'"", ""'priscilla-massey @ reliantenergy. com'"", ""'llittle @ reliantenergy. com'"", ""'gary-lamb @ transcanada. com'"", ""'larry-desmeules @ coastenergy. com'"", ""'five 5 wood @ aol. com'"", ""'don. fishbeck @ cmenergy. com'"", ""'randy. harrison @ southernenergy. com'"", ""'john. krill @ engageenergy. com'"", ""'glen. mackey @ energy. com'"", ""'doug. rabey @ energy. com'"", ""'michael_huse @ transcanada. com'"" cc: ""'jlopes @ hrice. com'"", ""'jnexon @ hrice. com'"", "" buchsbaum, craig m (corp) "", "" whelan, steve (corp) "", "" lee, fanny "", "" berkovitz, trista "", "" clare, david "", "" woo, shirley a (law) "", "" mclafferty, daniel "", "" cotroneo, eileen "", ""'dmao @ orrick. com'"", "" litteneker, randall (law) "", "" gee, dennis "", "" welch, ray "" subject: urg: gas securitization agreements i am pleased to report that, as 2/8/01,4:00 pm pst, pg & e has executed the gas supplier security agreement and the intercreditor agreement with the following suppliers: * bp energy company * dynegy canada marketing & trade, a division of dci * dynegy marketing & trade * el paso merchant energy, l. p. * texaco canada * texaco natural gas * txu energy trading canada limited * txu energy trading company * williams energy marketing & trading co. (us & canada) as you know, other suppliers are not precluded from future participation (we highly encourage it) however, we will now require completion of exhibit b (supplier joinder agreement) as outlined in 8. (j). i would greatly appreciate your replying to this email to let us know whether you intend to be a party to these agreements at this time. please call if you have any questions. please forward all correspondence to: trista berkovitz director, gas procurement pacific gas and electric company 77 beale street, room 553 san francisco, ca 94105-1814 415. 973. 2152 (bus) 415. 973. 9213 (fax) notice to recipient: this e-mail is meant for only the intended recipient of the transmission, and may be a communication privileged by law. if you received this e-mail in error, any review, use, dissemination, distribution, or copying of this e-mail is strictly prohibited. please notify us immediately of the error by return e-mail and please delete this message from your system. thank you in advance for your cooperation."
0008.2001-06-12.SA_and_HP 1 spend too much on your phone bill? 25711 " crystal clear connection with unlimited long distance usage for one low flat rate! now try it for free!! * see for yourself. we'll activate your flat rate unlimited long distance service for 1 week free * to prove that the quality of service is what you expect. call now! operators standing by to activate your service. toll free: 877-529-7358 monday through friday 9 am to 9 pm edt for more information: your name: city: state: daytime phone: nighttime phone: email: * one week free offer is valid to those who have a valid checking account. service is never billed until after the 1 week free trial period. if you have received this by error or wish to be removed from our mailing list, please click here"
0008.2001-06-25.SA_and_HP 1 " your membership exchange, issue # 422" " content-type: text/plain; charset = iso-8859-1 ______________________________________________________your membership exchange issue # 422 06-25-01 your place to exchange ideas, ask questions, swap links, and share your skills!____________________________________________________________________________________________________________you are a member in at least one of these programs-you should be in them all! www. bannersgomlm. comwww. profitbanners. comwww. cashpromotions. comwww. mysiteinc. comwww.. comwww. freelinksnetwork. comwww. myshoppingplace. comwww. bannerco-op. comwww. putpeel. comwww. putpeel. netwww. sellinternetaccess. comwww. be-your-own-isp. comwww. seventhpower. com______________________________________________________today's special announcement: we can help you become an internet service provider within 7 daysor we will give you$ 100. 00!! http:// www. sellinternetaccess. comclick herewe have already signed 300 isps on a 4 year contract, see if anyare in your town at: http:// www. find-local-isp. com click here____________________________________________________________________________________________________________be sure to examing today's showcases for sites who will trade links with you!>> resource board p. steeves: internet explorer hint w/"" image toolbar "">> q & a questions:-unblocking sites so i can access?>> member showcases>> member * reviews *-sites to review: # 122 therefore, i believe it is better than the last version. there is, though, one little agonizing message "" image toolbar "" that pops up every time you pass the mouse over an image. it asks whether you want to save or print the image. ugh, what a pest. hurrah, you can get rid of the image toolbar. justperform a right mouse click over the image toolbar. it will allow you to disable the image toolbar forthis session or forever. if you want to re-establishthe image toolbar just go to the internet optionscontrol panel and you can turn the image toolbar back on. remember, a right mouse click over any windows icon, window, tool bar, the desktop, and most other windowsentities will list loads of information. use it, getthe hang of it, you'll like it. peter a. steeves, b. sc., m. sc., ph. d., p. eng. geomatics engineergeodetic software systemslogical @ idirect. comhttp:// www. gssgeomatics. com______________________________________________________>>>>>>>>>>>>>questions free advertising to other members, and soon chancesto win cash! http:// www. cashpo. net/cashpo/openpage. php 4? c = 2------------------------------------------------------visit ward's gift shop! here you can find all your shopping needs on line, and good qualityproducts; everyday low prices! we have dolls, angels, novelties, and so much much more to choose from. go to our site, and getyour free catalog today; over 3,000 products to choose from. http:// www. wardsgiftshop. com trade links-bjwl 23 @ freeonline. com-----------------------------------------------------attention all web marketers-$ 30 k-$ 100 k cash this yearno experience needed, no product to sell. the real go getterscan make$ 100,000. 00 cash, in their first month this is verypowerful, contact me today ycon @ home. com orgoto: http:// www. makecashonline. com get excited:) trade links-ycon @ home. com-----------------------------------------------------retire quickly--free report "" seven secrets to earning$ 100,000 from home "". fully automated home business. 81% commissions-incomeunlimited. automated sales, recruiting and training machine. join now! http:// orleantraders. 4 yoursuccess. orgtrade links-bgmlm @ 4 yoursuccess. org-----------------------------------------------------if you have a product, service, opportunity and/or quality merchandisethat appeals to people worldwide, reach your target audience! for a fraction of what other large newsletters charge youcan exhibit your website here for only$ 8 cpm. why?... because as a valuable member we want you to be successful! order today-exhibits are limited and published on afirst come, first serve basis. http:// bannersgomlm. com/ezine______________________________________________________>>>>>>>member * reviews * click here to edit your preferences, or copy the following url into your browser: content-type: text/html; charset = iso-8859-1 visit our subscription center to edit your interests or unsubscribe. view our privacy policy. this email was sent to those who signed up for it. if you believe it has reached you in error, or you are no longer interested in receiving it, then please click here to edit your preferences, or copy the following url into your browser: "
0008.2003-12-18.GP 1 when sp @ m doesn't annoy you eternal " benson sibilant chartroom hello, do you hate spam? this program worked for me. if you hate spam like i do, you owe it to your self to try this program, and forward this email to all of your friends which also hate spam or as many people possible. together lets help clear the internet of spam! stop spam in its tracks! do you get junk, scams and worse in your inbox every day? are you sick of spending valuable time removing the trash? is your child receiving inappropriate adult material? if so you should know that no other solution works better then our software to return control of your email back where it belongs! imagine being able to read your important email without looking through all that spam... stop spam in its tracks starting today. opt-out here. expanse glenda litterbug "
0008.2004-08-01.BG 1 " slotting order confirmation may 18,2004 etacitne" " {% begin_split 76%} the rest of the afternoon was spent in making up the train. i am afraid to say how many baggage-waggons followed the engine, certainly a score; then came the chinese, then we, then the families, and the rear was brought up by the conductor in what, if i have it rightly, is called his caboose. the class to which i belonged was of course far the largest, and we ran over, so to speak, to both sides; so that there were some caucasians among the chinamen, and some bachelors among the families. but our own car was pure from admixture, save for one little boy of eight or nine who had the whooping-cough. at last, about six, the long train crawled out of the transfer station and across the wide missouri river to omaha, westward bound. it was a troubled uncomfortable evening in the cars. there was thunder in the air, which helped to keep us restless. a man played many airs upon the cornet, and none of them were much attended to, until he came to "" home, sweet home. "" it was truly strange to note how the talk ceased at that, and the faces began to lengthen. i have no idea whether musically this air is to be considered good or bad; but it belongs to that class of art which may be best described as a brutal assault upon the feelings. pathos must be relieved by dignity of treatment. if you wallow naked in the pathetic, like the author of "" home, sweet home, "" you make your hearers weep in an unmanly fashion; and even while yet they are moved, they despise themselves and hate the occasion of their weakness. it did not come to tears that night, for the experiment was interrupted. an elderly, hard-looking man, with a goatee beard and about as much appearance of sentiment an you would expect from a retired slaver, turned with a start and bade the performer stop that "" damned thing. "" "" i've heard about enough of that, "" he added; "" give us something about the good country we're going to. "" a murmur of adhesion ran round the car; the performer took the instrument from his lips, laughed and nodded, and then struck into a dancing measure; and, like a new timotheus, stilled immediately the emotion he had raised. aka: vicqodin, xacnax, suprervisagra and much morne- no presccription neyeded! civilizirano gullweig gxol sylg the day faded; the lamps were lit; a party of ht ht men, who got off next evening at north platte, stood together on the stern platform, singing "" the sweet by-and-bye "" with very tuneful voices; the chums began to put up their beds; and it seemed as if the business of the day were at an end. but it was not so; for, the train stopping at some station, the cars were instantly thronged with the natives, wives and fathers, ht men and maidens, some of them in little more than nightgear, some with stable lanterns, and all offering beds for sale. their charge began with twenty-five cents a cushion, but fell, before the train went on again, to fifteen, with the bed-board gratis, or less than one-fifth of what i had paid for mine at the transfer. this is my contribution to the economy of future emigrants. a great personage on an american train is the newsboy. he sells books (such books!), papers, fruit, lollipops, and cigars; and on emigrant journeys, soap, towels, tin washing dishes, tin coffee pitchers, coffee, tea, sugar, and tinned eatables, mostly hash or beans and bacon. early next morning the newsboy went around the cars, and chumming on a more extended principle became the order of the hour. it requires but a copartnery of two to manage beds; but washing and eating can be carried on most economically by a syndicate of three. i myself entered a little after sunrise into articles of agreement, and became one of the firm of pennsylvania, shakespeare, and dubuque. shakespeare was my own nickname on the cars; pennsylvania that of my bedfellow; and dubuque, the name of a place in the state of iowa, that of an amiable ht fellow going west to cure an asthma, and retarding his recovery by incessantly chewing or smoking, and sometimes chewing and smoking together. i have never seen tobacco so sillily abused. shakespeare bought a tin washing-dish, dubuque a towel, and pennsylvania a brick of soap. the partners used these instruments, one after another, according to the order of their first awaking; and when the firm had finished there was no want of borrowers. each filled the tin dish at the water filter opposite the stove, and retired with the whole stock in trade to the platform of the car. there he knelt down, supporting himself by a shoulder against the woodwork or one elbow crooked about the railing, and made a shift to wash his face and neck and hands; a cold, an insufficient, and, if the train is moving rapidly, a somewhat dangerous toilet."
0009.1999-12-13.kaminski 0 christmas-near " good morning all. we apologize that we are not going to be able to have our holiday party before the first of the year. we wanted to use the scout house in west university like we did last year and it was not available. vince suggested that with the move and a lot of people taking vacation that we wait until after the first of the year. this way you can take advantage of "" after christmas sales "" for your gift! just remember whose name you have and we will schedule an "" offsite "" after the first of the year. thanks! shirley ----------------------forwarded by shirley crenshaw/hou/ect on 12/13/99 09:23 am--------------------------- kevin g moore 12/13/99 08:58 am to: vince j kaminski/hou/ect @ ect, stinson gibner/hou/ect @ ect, grant masson/hou/ect @ ect, vasant shanbhogue/hou/ect @ ect, maureen raymond/hou/ect @ ect, pinnamaneni krishnarao/hou/ect @ ect, zimin lu/hou/ect @ ect, mike a roberts/hou/ect @ ect, samer takriti/hou/azurix @ azurix, amitava dhar/corp/enron @ enron, joseph hrgovcic/hou/ect @ ect, alex huang/corp/enron @ enron, kevin kindall/corp/enron @ enron, osman sezgen/hou/ees @ ees, tanya tamarchenko/hou/ect @ ect, vincent tang/hou/ect @ ect, ravi thuraisingham/hou/ect @ ect, paulo issler/hou/ect @ ect, martin lin/hou/ect @ ect, ross prevatt/hou/ect @ ect, michael sergeev/hou/ect @ ect, patricia tlapek/hou/ect @ ect, roman zadorozhny/hou/ect @ ect, martina angelova/hou/ect @ ect, jason sokolov/hou/ect @ ect, shirley crenshaw/hou/ect @ ect cc: subject: christmas-near hello everyone, the pulling of names are completed. shirley will inform you as to when we will make exchanges. thanks kevin moore ----------------------forwarded by kevin g moore/hou/ect on 12/13/99 08:50 am--------------------------- kevin g moore 12/10/99 08:28 am to: vince j kaminski/hou/ect @ ect, stinson gibner/hou/ect @ ect, grant masson/hou/ect @ ect, vasant shanbhogue/hou/ect @ ect, maureen raymond/hou/ect @ ect, pinnamaneni krishnarao/hou/ect @ ect, zimin lu/hou/ect @ ect, mike a roberts/hou/ect @ ect, samer takriti/hou/azurix @ azurix, amitava dhar/corp/enron @ enron, joseph hrgovcic/hou/ect @ ect, alex huang/corp/enron @ enron, kevin kindall/corp/enron @ enron, osman sezgen/hou/ees @ ees, tanya tamarchenko/hou/ect @ ect, vincent tang/hou/ect @ ect, ravi thuraisingham/hou/ect @ ect, paulo issler/hou/ect @ ect, martin lin/hou/ect @ ect, ross prevatt/hou/ect @ ect, michael sergeev/hou/ect @ ect, patricia tlapek/hou/ect @ ect, roman zadorozhny/hou/ect @ ect, martina angelova/hou/ect @ ect, jason sokolov/hou/ect @ ect, shirley crenshaw/hou/ect @ ect cc: subject: christmas-near goodmorning, things went well on yesterday with names being pulled. here is a list of people who have to pull a name. stinson gibner samer takriti ravi thuraisingham martin lin alexios kollaros shirley crenshaw let's celebrate at work with each other making the last christmas in 1999- great! reminder: if you feel you will be unable to attend the exchanging of the gifts, please do not let that stop you from participating. each persons name has been entered; can you guess who has your name? we have a gift for you. so if you can not attend for any reason please know that you are included and your gift will be here when you return. wishing all a merry christmas, and a good kick-off to happy holidays. thanks kevin moore ----------------------forwarded by kevin g moore/hou/ect on 12/10/99 06:40 am--------------------------- kevin g moore 12/08/99 07:47 am to: vince j kaminski/hou/ect @ ect, stinson gibner/hou/ect @ ect, grant masson/hou/ect @ ect, vasant shanbhogue/hou/ect @ ect, maureen raymond/hou/ect @ ect, pinnamaneni krishnarao/hou/ect @ ect, zimin lu/hou/ect @ ect, mike a roberts/hou/ect @ ect, samer takriti/hou/azurix @ azurix, amitava dhar/corp/enron @ enron, joseph hrgovcic/hou/ect @ ect, alex huang/corp/enron @ enron, kevin kindall/corp/enron @ enron, osman sezgen/hou/ees @ ees, tanya tamarchenko/hou/ect @ ect, vincent tang/hou/ect @ ect, ravi thuraisingham/hou/ect @ ect, paulo issler/hou/ect @ ect, martin lin/hou/ect @ ect, ross prevatt/hou/ect @ ect, michael sergeev/hou/ect @ ect, patricia tlapek/hou/ect @ ect, roman zadorozhny/hou/ect @ ect, martina angelova/hou/ect @ ect, jason sokolov/hou/ect @ ect, shirley crenshaw/hou/ect @ ect cc: subject: christmas drawing-near ho! ho! ho! merry christmas, on thursday we will pull names. once again, this is so we may share in the christmas spirit and show our appreciation for one another. we will then join and exchange gifts on a later date..... stay tuned.................. if for some chance you will not be present on thursday, feel free to stop by my desk and pull your name today. eb 3130 a x 34710 join in the fun and remember, keep it simple thanks kevin moore ----------------------forwarded by kevin g moore/hou/ect on 12/08/99 06:55 am--------------------------- kevin g moore 12/07/99 09:40 am to: vince j kaminski/hou/ect @ ect, stinson gibner/hou/ect @ ect, grant masson/hou/ect @ ect, vasant shanbhogue/hou/ect @ ect, maureen raymond/hou/ect @ ect, pinnamaneni krishnarao/hou/ect @ ect, zimin lu/hou/ect @ ect, mike a roberts/hou/ect @ ect, samer takriti/hou/azurix @ azurix, amitava dhar/corp/enron @ enron, joseph hrgovcic/hou/ect @ ect, alex huang/corp/enron @ enron, kevin kindall/corp/enron @ enron, osman sezgen/hou/ees @ ees, tanya tamarchenko/hou/ect @ ect, vincent tang/hou/ect @ ect, ravi thuraisingham/hou/ect @ ect, paulo issler/hou/ect @ ect, martin lin/hou/ect @ ect, ross prevatt/hou/ect @ ect, michael sergeev/hou/ect @ ect, patricia tlapek/hou/ect @ ect, roman zadorozhny/hou/ect @ ect, martina angelova/hou/ect @ ect, jason sokolov/hou/ect @ ect, shirley crenshaw/hou/ect @ ect cc: subject: christmas drawing-near hello everyone, we would like for christmas this year that the research group pull names, as a way of sharing in the spirit of christmas, and as appreciation for one another. we want to keep it simple so the gift should be less than twenty-dollars. please everyone participate, your name is already entered. i will return with more info. later........... thanks kevin moore let's have a wonderful christmas at work."
0009.1999-12-14.farmer 0 meter 1517-jan 1999 " george, i need the following done: jan 13 zero out 012-27049-02-001 receipt package id 2666 allocate flow of 149 to 012-64610-02-055 deliv package id 392 jan 26 zero out 012-27049-02-001 receipt package id 3011 zero out 012-64610-02-055 deliv package id 392 these were buybacks that were incorrectly nominated to transport contracts (ect 201 receipt) let me know when this is done hc"
0009.2000-06-07.lokay 0 human resources organization " as enron continues to address the human capital needs of the organization, there are several changes in enron , s human resources (hr) organization i , d like to share with you: in corporate human resources: brian schaffer will lead the office of labor and employment relations function, including resource management, corporate training activities, and workforce development. mary joyce will continue to have responsibility for executive compensation and our global equity plans. cynthia barrow, in addition to benefits, will be responsible for the development of work life programs across enron. brad coleman will be responsible for analysis and reporting, in addition to the re-engineering of the hr service center. gerry gibson will work closely with me to provide organizational development & training expertise for hr , s continuing evolution. andrea yowman will be responsible for several projects which are critical to hr , s on-going success including the sap implementation, global information system (gis) database, and total compensation system development. in addition, she will have responsibility for the human resource information system (hris). the hr generalist functions for corporate will be handled by the following: gwen petteway, public relations, government affairs, legal, investor relations, corporate development, epsc, aviation, enron federal credit union and the analyst and associate program kim rizzi, accounting and human resources sheila walton, rac, finance and enron development corp, in addition to her responsibilities in ena at the business unit level, we , ve established two geographic hubs for our wholesale business units: drew lynch will be in london with the hr responsibility for the eastern hemisphere including london, apachi and india. drew , s senior leadership team will include: nigel sellens, ranen sengupta and scott gilchrist. david oxley will be located in houston with hr responsibility for the western hemisphere including north america, calme and south america. david , s senior leadership team will include: miguel padron, janie bonnard, sheila knudsen and cindy skinner. we believe these hubs can result in a more effective hr organization and also facilitate the movement of talent where needed in those regions. the following are the hr leaders responsible for the remaining business units: dave schafer gpg gary smith wind robert jones net works marla barnard ebs ray bennett ees/efs willie williams ee & cc/nepco gerry chatham egep please click on the following link to view the hr organization chart. "
0009.2001-02-09.kitchen 0 " re: brazil commercial-* * update version, delete previous * *" " louise, sorry, i just received your note. the extent of any discussion was dave thanking me over the phone for the inputs and recognizing that he and john incorporated some of the content. we've never had what i had been expecting, or at least presumed appropriate i. e., prior to any formal decision for the region, entertaining an in-person detailed discussion of the examples and recommendations-including a welcome cross examination of my observations that might conflict with official report or view. john and dave asked me to talk with brett, kish and gonzalez, but if you look at my recommendations and also consider the overall performance in the region does this make the best sense? i've done this again and, in fact, had already introduced my ideas to each of them and the previous system prior to forwarding them to houston. prior to having to leave enron, i wanted to make a best effort to get the authority to execute the recommendations i've pretty consistently introduced since joining in late 1998 and tried to get enron to implement under the previous management. i have not had the opportunity to manage or influence the company's operations in south america-this is my fault for not negotiating a more senior position, but i made the attempt in the region and more recently with the new managers to demonstrate a need to open constructive, critical discussion. anyway, i've tried to open the door to john, dave and you and others who are interested to brainstorm inlcuding the leadership in place in esa to understand and perhaps leverage my applied experience-both trading, origination and, importantly, cultural. my resume below should attest to my capabilities-at least in terms of the potential value of my observations. thank you for your efforts extended in my behalf and good luck in your new position and working within enron's unique, dynamic ethic. d'arcy louise kitchen @ ect 02/09/2001 01:46 pm to: d'arcy carroll/sa/enron @ enron cc: subject: re: brazil commercial-* * update version, delete previous * * i have spoken to john lavorato on this and he says that dave and john have already spoken to you on this. do we still need to meet as i have no different opinion to them at this time. louise d'arcy carroll @ enron 02/09/2001 11:13 am to: louise kitchen @ ect cc: subject: brazil commercial-* * update version, delete previous * * louise, this is a lot of text including the attatched files, but is the summary gist of what i have tried to communicate internally and am asking to discuss with you this morning. ----------------------forwarded by d'arcy carroll/sa/enron on 02/09/2001 02:14 pm--------------------------- d'arcy carroll 11/09/2000 06:20 pm to: david w delainey @ ect, john j lavorato/corp/enron @ enron cc: kay chapman @ ect subject: brazil commercial-* * update version, delete previous * * david/john-understand the trip will be delayed. proposal outline has two texts-i. commercial strategy and ii. historical perspective. the org charts will need some discussion-particularly in regard to the strengths and weakness of employed personnel and urgent need for an improved structure. over the fh 2000 and within the new structure with brett and joao carlos albuquerque in place, the wholesale group and trading desk seems to have made some important strides forward in terms of recruiting some good individuals and, in trading terms, finally executing some fundamental market supply, demand and transmission analysis. to get into the game quickly and aggressively, though, i think the commercial group needs to hire some senior, local trading expertise. i apoligize, but was unable to get in contact with either of these two guys to set up a possible meeting this week. however, they have the local knowledge, trading competencies and management experience which i consider needed to catalyze the regional effort: axel hinsch-argentine and cargill employee with several years and broad commodity and financial trading, business development and management experience, including senior trader for the bear stearns emerging markets equity desks in the late 1980 s/early 1990 s. straight up, no ego argentine country manager. mark hoffman-swiss/brazilian and glencore employee with several years energy, energy distribution and sugar sector experience; applied commodity and financial arbitrage experience in the brazilian market. lot less straight forward, but applied knowledge and expertise. senior originator/trader. please forward any input about your interest in scheduling a meeting either open here or in houston during the week of nov 20. for some perspective on my experience at enron, let me explaing that i have been working in enron networks in the region from q 2'00 and therefore much less formally invovled with the trading (brazil spot market) and wholesale pricing, tarrif issues etc.,.. than at the end of fyl 999 when i was directly involved in developing our effort to get in the game in understanding the spot price formula calculations and exploring arbitrage opportunities in the wholesale market. i've attached my resume for some perspective on my background and capability to critically review the commercial (trading and marketing) and managerial issues involving the past and future opportunities. "
0009.2001-06-26.SA_and_HP 1 "double your life insurance at no extra cost! 29155 the lowest life insurance quotes without the hassle! compare rates from the nation's top insurance companies shop, compare and save fill out the simple form, and you'll have the 15 best custom quotes in 1 minute. compare your current coverage to these sample 10-year level term monthly premiums (20 year, 30 year and smoker rates also available) $ 250,000 $ 500,000 $ 1,000, 000 age male female male female male female 30 $ 12 $ 11 $ 19 $ 15 $ 31 $ 27 40 $ 15 $ 13 $ 26 $ 21 $ 38 $ 37 50 $ 32 $ 24 $ 59 $ 43 $ 107 $ 78 60 $ 75 $ 46 $ 134 $ 87 $ 259 $ 161 click here to compare! it's fast, easy and free! * all quotes shown are from insurance companies rated a-, a, a + or a + + by a.m. best company (a registered rating service) and include all fees and commissions. actual premiums and coverage availability will vary depending upon age, sex, state availability, health history and recent tobacco usage. to unsubscribe, reply with unsubscribe in subject! "
0009.2003-12-18.GP 1 new clonazepam. m xanax. x valium. m vicodin. n dhyngem many specials running this week the re. al thing not like the other sites that imitate these products. no hidd. en char. ges-fast delivery vic. odin val. ium xan. ax via. gra diaz. epam alpra. zolam so. ma fior. icet amb. ien stil. nox ult. ram zo. loft clon. azepam at. ivan tr. amadol xeni. cal cele. brex vi. oxx pro. zac bus. par much m. ore.... if you have recieved this in error please use http:// www. nowbetterthis. biz/byee. html fuohqjlsjcqp x odlx gxxu
0010.1999-12-14.farmer 0 duns number changes " fyi ----------------------forwarded by gary l payne/hou/ect on 12/14/99 02:35 pm --------------------------- from: antoine v pierre 12/14/99 02:34 pm to: tommy j yanowski/hou/ect @ ect, kathryn bussell/hou/ect @ ect, gary l payne/hou/ect @ ect, diane e niestrath/hou/ect @ ect, romeo d'souza/hou/ect @ ect, michael eiben/hou/ect @ ect, clem cernosek/hou/ect @ ect, scotty gilbert/hou/ect @ ect, dave nommensen/hou/ect @ ect, david rohan/hou/ect @ ect, kevin heal/cal/ect @ ect, richard pinion/hou/ect @ ect cc: mary g gosnell/hou/ect @ ect, jason moore/hou/ect @ ect, samuel schott/hou/ect @ ect, bernice rodriguez/hou/ect @ ect subject: duns number changes i will be making these changes at 11:00 am on wednesday december 15. if you do not agree or have a problem with the dnb number change please notify me, otherwise i will make the change as scheduled. dunns number change: counterparty cp id number from to cinergy resources inc. 62163 869279893 928976257 energy dynamics management, inc. 69545 825854664 088889774 south jersey resources group llc 52109 789118270 036474336 transalta energy marketing (us) inc. 62413 252050406 255326837 philadelphia gas works 33282 148415904 146907159 thanks, rennie 3-7578"
0010.1999-12-14.kaminski 0 stentofon " goodmorning liz, we are in need of another stentofon for trisha tlapek. she works very closely with the traders and it is important for quick communication. thanks kevin moore"
0010.2001-02-09.kitchen 0 " brazil commercial-* * update version, delete previous * *" " louise, this is a lot of text including the attatched files, but is the summary gist of what i have tried to communicate internally and am asking to discuss with you this morning. ----------------------forwarded by d'arcy carroll/sa/enron on 02/09/2001 02:14 pm--------------------------- d'arcy carroll 11/09/2000 06:20 pm to: david w delainey @ ect, john j lavorato/corp/enron @ enron cc: kay chapman @ ect subject: brazil commercial-* * update version, delete previous * * david/john-understand the trip will be delayed. proposal outline has two texts-i. commercial strategy and ii. historical perspective. the org charts will need some discussion-particularly in regard to the strengths and weakness of employed personnel and urgent need for an improved structure. over the fh 2000 and within the new structure with brett and joao carlos albuquerque in place, the wholesale group and trading desk seems to have made some important strides forward in terms of recruiting some good individuals and, in trading terms, finally executing some fundamental market supply, demand and transmission analysis. to get into the game quickly and aggressively, though, i think the commercial group needs to hire some senior, local trading expertise. i apoligize, but was unable to get in contact with either of these two guys to set up a possible meeting this week. however, they have the local knowledge, trading competencies and management experience which i consider needed to catalyze the regional effort: axel hinsch-argentine and cargill employee with several years and broad commodity and financial trading, business development and management experience, including senior trader for the bear stearns emerging markets equity desks in the late 1980 s/early 1990 s. straight up, no ego argentine country manager. mark hoffman-swiss/brazilian and glencore employee with several years energy, energy distribution and sugar sector experience; applied commodity and financial arbitrage experience in the brazilian market. lot less straight forward, but applied knowledge and expertise. senior originator/trader. please forward any input about your interest in scheduling a meeting either open here or in houston during the week of nov 20. for some perspective on my experience at enron, let me explaing that i have been working in enron networks in the region from q 2'00 and therefore much less formally invovled with the trading (brazil spot market) and wholesale pricing, tarrif issues etc.,.. than at the end of fyl 999 when i was directly involved in developing our effort to get in the game in understanding the spot price formula calculations and exploring arbitrage opportunities in the wholesale market. i've attached my resume for some perspective on my background and capability to critically review the commercial (trading and marketing) and managerial issues involving the past and future opportunities."
0010.2001-06-28.SA_and_HP 1 urgent business proposal "mrs. regina rossman. # 263 sandton city johannesburg, south africa. e-mail: joel_rosel @ mail. com attn: alhaji with due respect, trust and humility, i write you this proposal, which i believe, would be of great interest to you. i am mrs. regina rossman, the wife of late mr. joseph rossman of blessed memory, before forces loyal to major johnny paul koromah killed my husband; he was the director general of gold and mining corporation (g. d. m. c.) of sierra leone. my husband was one of the people targeted by the rebel forces. on the course of the revolution in the country, prominent people were hijacked from their homes to an unknown destination. two days before his death, he managed to sneak a written message to us, explaining his condition and concerning one trunk box of valuables containing money, which he concealed under the roof. he instructed me to take our son and move out of sierra leone, immediately to any neighboring country. the powerful peace keeping force of the (ecomog) intervened to arrest the situation of mass killings by the rebels, which was the order of the day. eventually, it resulted into full war, i became a widow overnight, helpless situation, without a partner at the moment of calamity, and every person was running for his life. my son and i managed to escape to south africa safely with the box and some documents of property title. the cash involved inside the box was us$ 30 million (thirty million united states dollars). due to fear and limited rights as a refugee, i deposited the items with a private security company in order not to raise an eyebrow over the box here in south africa in my son's name joel r. rossman. be informed that the real content of the box was not disclosed. meanwhile, i want to travel out of south africa entirely with this money for investment in your country because of political and economic stability and for future benefit of my child. i want you to assist us claim this box from the security company and get the money into your private account in your country so that we can invest the money wisely. we have in mind to establish a rewarding investment and good relationship with you. concerning the money, we are prepared to give you reasonable percentage of 30% for your kind assistance. also, we have decided to set aside 5% of the total sum for expenses that might be incurred by the parties in the course of the transfer both locally and externally. for the interest of this business, do not hesitate to contact my son mr. joel r. rossman on the above e-mail address immediately you receive this message for more information and to enable us proceed towards concluding all our arrangements. no other person knows about this money apart from my son and i. we await your most urgent response. please we need your fax/phone numbers for esiear communication. thanking you for your co-operation and god bless you. best regard, mrs. regina rossman. http:// xent. com/mailman/listinfo/fork"
0010.2003-12-18.GP 1 re: hot topics: growing young NA
0010.2004-08-01.BG 1 " we shiip to ur country for mircosoft, adobe, norton charset = us-ascii "" >" " cheap softtwares for you, all are original genuine! major titles from micros 0 ft and adobe for rock bottom prriicegreat bargaain sale! variety discoount softtwares at wholesale chaeap pricing! microsoft windows xp professional-my price:$ 50; normal:$ 299. 00; you saave$ 249. 00 adobe photoshop cs v 8. o pc-my price:$ 80; normal:$ 609. 99; you save$ 529. 99 microsoft office xp professional-my price:$ 100; normal:$ 499. 95; you saave$ 399. 95 adobe acrobaat v 6. o professional pc-my price:$ 100; normal:$ 449. 95; you saave$ 349. 95 microsoft office 2 oo 3 professional-my price:$ 80; normal:$ 499. 95; you saave$ 419. 95 norton antivirus 2 oo 4 professional-my price:$ 15; normal:$ 69. 95; you saave$ 54. 95 coreldraw graphics suite v 12 pc-my price:$ 100; normal:$ 349. 95; you saave$ 249. 95 adobe pagemaker v 7. o pc-my price:$ 80; normal:$ 599. 95; you saave$ 519. 95 we do have full range softwares--macromedia, mc-afeee, adobee, coreldraw, microsoft, nero, pinnacle systems, powerquest, redhat, riverdeep, roxio, symaantec, 321 studio 52 more popular titles for you>> cliickk here for 52 more titles we shiip to all countries including africa, finland & etc.. as where u located wonder why our priices are unbelievably low? we are currently clearing our goods at incredibily cheeap sale-priice in connection with the shutdown of our shop and the closure of the stockhouse. don't missss your lucky chance to get the best priicce on discoouunt software! we are the authorized agent and an established reseller offering oem licensing software. we possesses all the necessary certificates issued to verify the authenticity of genuine oem products and granting the right for us to resell oem software products. super cheaep micros 0 ft, adobe & all kinds.. cliickk here to enjoy our superb discounnt! take me down "
0011.1999-12-14.farmer 0 king ranch " there are two fields of gas that i am having difficulty with in the unify system. 1. cage ranch-since there is no processing agreement that accomodates this gas on king ranch, it is my understanding hpl is selling the liquids and king ranch is re-delivering to stratton. it is also my understanding that there is a. 05 cent fee to deliver this gas. we need a method to accomodate the volume flow on hpl at meter 415 and 9643. this gas will not be reflected on trans. usage ticket # 123395 and # 95394 since it is not being nominated from a processing agreement. we either, need to input a point nom (on hpl or krgp) at these meters to match the nom at meter 9610, or a deal for purchase and sale (if king ranch is taking title to the gas) needs to be input into sitara at these meters with the appropriate rate. i have currently input a point nom on krgp to accomodate this flow, so we can divert some of this gas to the current interstate sales that are being made. 2. forest oil-there is a processing agreement that will accomodate flow from the meter (6396) into king ranch. it is my understanding that this agreement was originally setup until texaco had their own processing agreement. i need confirmation that the gas from this meter should be nominated on contract # (96006681) and that this agreement should have been reassigned to hplc. (it is currently still under hplr). if this gas is not nominated on the above transport agreement, then once again we need to accomodate the flow volume on the hpl pipe with either a point nom or a sitara deal at meters 415 and 9643."
0011.2001-06-28.SA_and_HP 1 " urgent business proposal," " mrs. regina rossman. # 263 sandton city johannesburg, south africa. e-mail: joel_rosel @ mail. com attn: alhaji with due respect, trust and humility, i write you this proposal, which i believe, would be of great interest to you. i am mrs. regina rossman, the wife of late mr. joseph rossman of blessed memory, before forces loyal to major johnny paul koromah killed my husband; he was the director general of gold and mining corporation (g. d. m. c.) of sierra leone. my husband was one of the people targeted by the rebel forces. on the course of the revolution in the country, prominent people were hijacked from their homes to an unknown destination. two days before his death, he managed to sneak a written message to us, explaining his condition and concerning one trunk box of valuables containing money, which he concealed under the roof. he instructed me to take our son and move out of sierra leone, immediately to any neighboring country. the powerful peace keeping force of the (ecomog intervened to arrest the situation of mass killings by the rebels, which was the order of the day. eventually, it resulted into full war, i became a widow overnight, helpless situation, without a partner at the moment of calamity, and every person was running for his life. my and i managed to escape to south africa safely with the box and some documents of property title. the cash involved inside the box was us$ 30 million (thirty million united states dollars). due to fear and limited rights as a refugee, i deposited the items with a private security company in order not to raise an eyebrow over the box here in south africa in my son's name joel r. rossman. be informed that the real content of the box was not disclosed. meanwhile, i want to travel out of south africa entirely with this money for investment in your country because of political and economic stability and for future benefit of my child. i want you to assist us claim this box from the security company and get the money into your private account in your country so that we can invest the money wisely. we have in mind to establish a rewarding investment and good relationship with you. concerning the money, we are prepared to give you reasonable percentage of 30% for your kind assistance. also, we have decided to set aside 5% of the total sum for expenses that might be incurred by the parties in the course of the transfer both locally and externally. for the interest of this business, do not hesitate to contact my son mr. joel r. rossman on the above e-mail address immediately you receive this message for more information and to enable us proceed towards concluding all our arrangements. no other person knows about this money apart from my son and i. we await your most urgent response. please we need your fax/phone numbers for esiear communication. thanking you for your co-operation and god bless you. best regard, mrs. regina rossman. http:// xent. com/mailman/listinfo/fork"
0011.2001-06-29.SA_and_HP 1 your membership exchange " content-type: text/plain; charset = iso-8859-1 your membership exchange, issue # 423 (june 28,2001) your place to exchange ideas, ask questions, swap links, and share your skills! you are a member in at least one of these programs -you should be in them all! bannersgomlm. com profitbanners. com cashpromotions. com mysiteinc. com timshometownstories. com freelinksnetwork. com myshoppingplace. com bannerco-op. com putpeel. com putpeel. net sellinternetaccess. com be-your-own-isp. com seventhpower. com today's special announcement: i'll put your ad on 2,000 sites free! free this week only, just for our subscribers! learn the secrets of marketing online on this global free teleseminar. limited lines available, only three time slots available... reserve today. you will not be disappointed! i'll be your personal host. we operate several sites, all successful. i'll teach you what to do and how to do it! click here: free teleseminar michael t. glaspie-founder we apologize for any technical problems you may have had with our last mailing, we are working hard to ensure that such problems will not occur again. in this issue: >>q & a questions: -using pictures as links? answers: -unblocking sites so i can access? z. oconan: access using a proxy g. bendickson: using a proxy to visit blocked sites >>member showcases >>member * reviews * -sites to review: # 124, # 125 & # 126! -site # 123 reviewed! -vote on your favorite website design! >>>>>>> questions & answers do you a burning question about promoting your website, html design, or anything that is hindering your online success? submit your questions to myinputare you net savvy? have you learned from your own trials and errors and are willing to share your experience? look over the questions each day, and if you have an answer or can provide help, post your answer to myinput @ aeopublishing. com be sure to include your signature file so you get credit (and exposure to your site). questions: from: moviebuff @ cliffhanger. com subject: using pictures as links i'm changing my website and want to use pictures for the links to other pages. but, someone told me i should still put a'click here'underneath all the pictures. to me, this removes all purpose of using the pictures. how can i get across that you click on the pictures to get to other pages without coming right out and saying so? for example, i have a page with actor and actress information and just want to have a picture of my favorite stars to click on and change the picture every couple of days. mark moviebuff @ cliffhanger. com answers: from: zaak-zaako @ linkpaks. com subject: access using a proxy > from: cj (cj 5000 @ post. com) > subject: unblocking sites so i can access? (issue # 422) --> i am currently living in a place where the isp is blocking 50% of the web. i was told by someone that you can unblock these web sites by using a proxy, but i don't know what that means. i am wondering is there a way to get access to these sites?-- a proxy is easy to use if you use someone elses, they can be tricky to setup yourself. i have had very good results with surfola. basically you surf to their servers and then from there you surf through/from their servers. i have several places i surf from that block content. surfola easily bypasses them! its also free! you can also make money with them but i just use them to bypass anal retentive isp/corporate providers and because they allow me to surf anonymously! i have a detailed right-up on them at http:// linkpaks. com/paidtosurf/surfola. php see there for more info. if anything is not clear feel free to ask. (email & sign-up links on http:// linkpaks. com/paidtosurf/surfola. php page) zaak oconan netrepreneur http:// linkpaks. com-surf & earn guides http:// linktocash. com-internet businesses for under$ 100 http:// iteam. ws-the hottest product on the net today + + + + next answer-same question + + + + from: wyn publishing-wynpublishing @ iname. com subject: using a proxy to visit blocked sites > from: cj (cj 5000 @ post. com) > subject: unblocking sites so i can access? (issue # 422) cj, two such sites that allows proxy surfing are: http:// www. anonymise. com and http:// www. anonymizer. com. however, if you cannot get to that site then obviously it will not work. also note, that if your isp is dictating to you which sites you may or may not visit, then it is time to change providers! gregory bendickson, wyn publishing over 28 free traffic exchange services reviewed in a fully customizable e-book. download yours free and get multiple signups while learning the art of free web traffic! http:// www. trafficmultipliers. com >>>>>>> website showcases examine carefully-those with email addresses included will trade links with you, you are encouraged to contact them. and, there are many ways to build a successful business. just look at these successful sites/programs other members are involved in... "" it's the most d-a-n-g-e-r-o-u-s book on the net "" email 20,000 targeted leads every single day! slash your time online to just 1-2 hours daily! build 11 monthly income streams promoting one url! start building your business- not everyone elses! http:// www. roibot. com/w. cgi? r 8901_bd_shwc is your website getting traffic but not orders? profile, analyze, promote, and track your site to get the results you want. fully guaranteed! free trial available! http:// www. roibot. com/w. cgi? r 4887_saa over 7168 sites to place your free ad! get immediate free exposure on thousands of sites. plus two free programs that will automatically type your ad for you! pay one time, promote all the time. if you have a product, service, opportunity and/or quality merchandise that appeals to people worldwide, reach your target audience! for a fraction of what other large newsletters charge you can exhibit your website here for only$ 8 cpm. why?... because as a valuable member we want you to be successful! order today-exhibits are limited and published on a first come, first serve basis. http:// bannersgomlm. com/ezine >>>>>>> member * reviews * visit these sites, look for what you like and any suggestions you can offer, and send your critique to myinput @ aeopublishing. com and, after reviewing three sites, your web site will be added to the list! it's fun, easy, and it's a great opportunity to give some help and receive an informative review of your own site. plus, you can also win a chance to have your site chosen for a free website redesign. one randomly drawn winner each month! sites to review: site # 124: http:// www. bestwaytoshop. com dale pike rhinopez @ aol. com site # 125: http:// www. wedeliverparties. com dawn clemons dclemons 7 @ home. com site # 126: http:// www. eclassifiedshq. com carol cohen opportunity @ aol. com site reviewed: comments on site # 123: http:// netsbestinfo. homestead. com/nbi. html dennis damorganjr @ yahoo. com ~ ~ ~ ~ i reviewed site 123 and found the size of the font to be too aggressive and i don't like mustard yellow for a background. also in the second or third paragraph is a misspelled word which should be "" first-come "" not as shown on the page. i feel a sample of the type of information offered in the newsletter should be displayed on the page as well as a sample of the free ads offered on the site. i will probably submit a free ad just to see the content of the newsletter. as has been mentioned many times, some information about the person doing the page is always good. we need some information about why this newsletter will be worthwhile to subscribe to. ~ ~ ~ ~ dennis-i took a look at your site, and have recommendations for improving your page. 1-i use internet explorer and view web pages with my text size set to ' smaller'. the text you used was quite large, like a font used for a heading for all the text. by making the text size smaller it wouldn't feel like you were screaming at me. also, the background was just too much. 2-there were spelling errors in the text. often it might be difficult for you to spot these yourself if you see the page all the time, but have a friend look it over. spelling errors make the page look unprofessional. 3-offer a sample of your newsletter so people can see what it looks like before they subscribe. also, if you are asking for a person to give you their email address, you must have a privacy policy and let them know they can unsubscribe. 4-think about adding a form for people to subscribe to the newsletter. it looks more professional than just offering an email address to send to. 5-offer information about yourself, and the kinds of information your newsletter contains. maybe extend your site to include back issues or an archive to see what information you have offered in the past. 6-build another page for'sponsoring info'and put prices on that page. remove all pricing information from the home page. ~ ~ ~ ~ i feel that the background is a little too bold and busy for the text. i also believe that the text is too large which makes it difficult to read quickly, and forces the reader to scroll down unnecessarily. i noticed some spelling errors, and i think that a link to the classifieds site should be provided, and online payments should be accepted. a site that sells advertising should have advertisments on it! ~ ~ ~ ~ this is a very clear site with nothing interfering with the message. i did not like the background colour, however that is personal, it did not detract from the information. i was tempted to sign up for the newsletter but would have liked a link to see a current issue. there was an error in the wording (a word missed) which needs correction and i think the fonts could be smaller. overall a non-confusing site which makes a nice change. * cheers * ~ ~ ~ ~ could use a better background and the fonts are very large, there also are errors in the following paragraphs: "" first com-first serve "" and "" to place a sponsor advertisement, send your to my email "" ~ ~ ~ ~ a single page site. it is necessary to subscribe to the webmaster's newsletter to see what he's doing, and it doesn't seem to me to be a way to get people to visit. i wouldn't, for example. he claims to have lots of tidbits of information that, he says, we probably didn't know, and this is possible, but in my opinion, he would be better served if he at least put some of the things out there for all to see-when the appetite, so to speak, if he want people to subscribe. as it is, i would not bother. ~ ~ ~ ~ what does one expect from a site like netsbestinfo? some useful resources and some useful tips and also some forms of easy advertisement on the net. but what we get here is a newsletter with the owner (whose email reads damorgarjr @ yahoo. com) asking us to subscribe us to his newsletter for a free 4-line ad. he also tells of paid category of advertisements. this is all we get from a site which has a grand title. even the information about the newsletter is hardly impressive and is presented in about 35-to-40 points size which gets difficult to read. ~ ~ ~ ~ a neat enough site but the background could be a little hard on the eyes. there is only really one problem with this page-its just an advertisement for a newsletter. no, scratch that, its an advertisement to place free ads in a newsletter. a bold enough move perhaps but i learned hardly anything about the newsletter itself and immediately started worrying about getting a flood of ads to my email account so i didn't even subscribe. presumably you'd want to get people to sign up so might i suggest splitting the page into the newsletter itself, perhaps a sample issue, a privacy policy and a promise not to drown in ads and then click for more info on your free ads. ________________________________________ vote on your favorite website design! help out the winner of the free website redesign by voting for your favorite! you can help out teddy at links 4 profit. com by taking a look at his site, then checking out the three new layouts jana of akkabay designs akkabay. com has designed specifically for him. after you've visited all three, vote for your favorite. to make this as easy as possible for you, just click on the e-mail address that matches your choice-you do not need to enter any information in the subject or body of the message. i have included a note from jana, and the links to teddy's current site along with the three new designs: > from jana: the pages have been created as non-frame pages although with minor modification, the pages could be adapted for use in a frames environment please take a look at the existing site: http:// www. links 4 profit. com here are the 3 redesigns: vote for this design: designl @ aeopublishing. com vote for this design: design 2 @ aeopublishing. com vote for this design: design 3 @ aeopublishing. com you will have all of this week to vote (through june 29), and we'll list the favorite and most voted for layout next week. teddy of course will be able to choose his favorite, and colors, font style/size, backgrounds, textures, etc, can all easily be changed on the "" layout "" that he likes. free website re-designs and original graphics are provided to fln showcase winners courtesy of akkabay designs. http:// akkabay. com if you have any questions about how this works or how you can participate, please email amy at moderator moderator: amy mossel posting: myinput @ aeopublishing. com send posts and questions (or your answers) to: myinput @ aeopublishing. com please send suggestions and comments to: moderator @ aeopublishing. com to change your subscribed address, send both new and old address to moderator @ aeopublishing. com see below for unsubscribe instructions. copyright 2001 aeopublishing -----end of your membership exchange this email has been sent to jm @ netnoteinc. com at your request, by your membership newsletter services. visit our subscription center to edit your interests or unsubscribe. http:// ccprod. roving. com/roving/d. jsp? p = oo & id = bd 7 n 7877. a 4 dfur 67 & m = bd 7 n 7877 charset = iso-8859-1 your membership exchange, issue # 423 june 28,2001 this email was sent to jm @ netnoteinc. com, at your request, by your membership newsletter services. visit our subscription center to edit your interests or unsubscribe. view our privacy policy. powered by "
0011.2003-12-18.GP 1 sup. er cha. rge your m. an hood today jvbe kfbtyra xes " hello, generic and super viagra (cialis) available online! most trusted online source! cialis or (super viag) takes affect right away & lasts 24-36 hours! for super viagra click here generic viagra costs 60% less! save a lot of money. for viagra click here both products shipped discretely to your door not interested? dycmpf s uuz biwven"
0011.2004-08-01.BG 1 dicine site on the net. " hello! nothing sharpens sight like envy. nature should have been pleased to have made this age miserable, without making it also ridiculous. searching for medication on the net? milestone anheuser we ` ve got anything you will ever want. pibrochs treasonous free claiis sample with any order! arthur convincible tithable pilocystic initializes there are only two ways of getting on in the world: by one's own industry, or by the stupidity of others. my conscience aches but it's going to lose the fight. peace is the first thing the angels sang."
0012.1999-12-14.farmer 0 re: entex transistion " thanks so much for the memo. i would like to reiterate my support on two key issues: 1). thu-best of luck on this new assignment. howard has worked hard and done a great job! please don't be shy on asking questions. entex is critical to the texas business, and it is critical to our team that we are timely and accurate. 2). rita: thanks for setting up the account team. communication is critical to our success, and i encourage you all to keep each other informed at all times. the p & l impact to our business can be significant. additionally, this is high profile, so we want to assure top quality. thanks to all of you for all of your efforts. let me know if there is anything i can do to help provide any additional support. rita wynne 12/14/99 02:38:45 pm to: janet h wallis/hou/ect @ ect, ami chokshi/corp/enron @ enron, howard b camp/hou/ect @ ect, thu nguyen/hou/ect @ ect, kyle r lilly/hou/ect @ ect, stacey neuweiler/hou/ect @ ect, george grant/hou/ect @ ect, julie meyers/hou/ect @ ect cc: daren j farmer/hou/ect @ ect, kathryn cordes/hou/ect @ ect, rita wynne/hou/ect, lisa csikos/hou/ect @ ect, brenda f herod/hou/ect @ ect, pamela chambers/corp/enron @ enron subject: entex transistion the purpose of the email is to recap the kickoff meeting held on yesterday with members from commercial and volume managment concernig the entex account: effective january 2000, thu nguyen (x 37159) in the volume managment group, will take over the responsibility of allocating the entex contracts. howard and thu began some training this month and will continue to transition the account over the next few months. entex will be thu's primary account especially during these first few months as she learns the allocations process and the contracts. howard will continue with his lead responsibilites within the group and be available for questions or as a backup, if necessary (thanks howard for all your hard work on the account this year!). in the initial phases of this transistion, i would like to organize an entex "" account "" team. the team (members from front office to back office) would meet at some point in the month to discuss any issues relating to the scheduling, allocations, settlements, contracts, deals, etc. this hopefully will give each of you a chance to not only identify and resolve issues before the finalization process, but to learn from each other relative to your respective areas and allow the newcomers to get up to speed on the account as well. i would encourage everyone to attend these meetings initially as i believe this is a critical part to the success of the entex account. i will have my assistant to coordinate the initial meeting for early 1/2000. if anyone has any questions or concerns, please feel free to call me or stop by. thanks in advance for everyone's cooperation........... julie-please add thu to the confirmations distributions list"
0012.1999-12-14.kaminski 0 re: new color printer " monday will be perfect! location-ebl 944 b r. c. 0011 co. # 100038 thanks kevin moore ----------------------forwarded by kevin g moore/hou/ect on 12/14/99 10:44 am--------------------------- enron technology from: lyn malina 12/14/99 09:22 am to: kevin g moore/hou/ect @ ect cc: subject: re: new color printer i will order today for delivery on monday, unless you need faster delivery. please advise co/rd to charge against. thanks lyn kevin g moore 12/14/99 09:21 am to: lyn malina/hou/ect @ ect cc: subject: re: new color printer ----------------------forwarded by kevin g moore/hou/ect on 12/14/99 09:17 am--------------------------- kevin g moore 12/14/99 08:13 am to: vince j kaminski/hou/ect @ ect, mike a roberts/hou/ect @ ect cc: subject: re: new color printer yes! right away, please also let me know the e. t. a. thanks, lyn kevin moore"
0012.2000-01-17.beck 0 global risk management operations " sally, congratulations. ----------------------forwarded by cindy olson/corp/enron on 01/17/2000 09:07 pm--------------------------- from: rick causey 01/17/2000 06:04 pm sent by: enron announcements to: all enron worldwide cc: subject: global risk management operations recognizing enron , s increasing worldwide presence in the wholesale energy business and the need to insure outstanding internal controls for all of our risk management activities, regardless of location, a global risk management operations function has been created under the direction of sally w. beck, vice president. in this role, sally will report to rick causey, executive vice president and chief accounting officer. sally , s responsibilities with regard to global risk management operations will mirror those of other recently created enron global functions. in this role, sally will work closely with all enron geographic regions and wholesale companies to insure that each entity receives individualized regional support while also focusing on the following global responsibilities: 1. enhance communication among risk management operations professionals. 2. assure the proliferation of best operational practices around the globe. 3. facilitate the allocation of human resources. 4. provide training for risk management operations personnel. 5. coordinate user requirements for shared operational systems. 6. oversee the creation of a global internal control audit plan for risk management activities. 7. establish procedures for opening new risk management operations offices and create key benchmarks for measuring on-going risk controls. each regional operations team will continue its direct reporting relationship within its business unit, and will collaborate with sally in the delivery of these critical items. the houston-based risk management operations team under sue frusco , s leadership, which currently supports risk management activities for south america and australia, will also report directly to sally. sally retains her role as vice president of energy operations for enron north america, reporting to the ena office of the chairman. she has been in her current role over energy operations since 1997, where she manages risk consolidation and reporting, risk management administration, physical product delivery, confirmations and cash management for ena , s physical commodity trading, energy derivatives trading and financial products trading. sally has been with enron since 1992, when she joined the company as a manager in global credit. prior to joining enron, sally had four years experience as a commercial banker and spent seven years as a registered securities principal with a regional investment banking firm. she also owned and managed a retail business for several years. please join me in supporting sally in this additional coordination role for global risk management operations."
0012.2000-06-08.lokay 0 what do you want to know today? " "" a man with a new idea is a crank until he succeeds. "" -mark twain innovation, itself, is nothing new. the word comes to us from the latin innovatus, which is a good indication of how long the concept has been around. people have been searching for the "" next big thing "" for thousands of years. we have quite a tradition of innovation here at enron. although consistent innovation may sound like an oxymoron, we pride ourselves on consistently outpacing our peers with innovative ideas. how do you think enron can maintain its edge into the new century? what will be our "" next big thing? "" put your best foot forward, visit emeet and share your ideas in "" creativity and innovation "" that will keep enron at the top."
0012.2001-02-09.kitchen 0 travel " i will be out of the office this afternoon (friday) until wednesday night. (i will be available on my cell phone 713 306-6207) if you have any questions please feel free to contract my team for questions. i have been "" cross training "" both individuals so that we can be more effective in addressing var and trade related questions. thanks, frank bharat khanna (gas) ext. 54804 lacrecia davenport ext. 35782"
0012.2003-12-19.GP 1 NA great specials today on: tramadol phentermine 30 mg 60 more products to choose from. http:// wsc. settingt 5. com/fp
0013.1999-12-14.farmer 0 entex transistion " the purpose of the email is to recap the kickoff meeting held on yesterday with members from commercial and volume managment concernig the entex account: effective january 2000, thu nguyen (x 37159) in the volume managment group, will take over the responsibility of allocating the entex contracts. howard and thu began some training this month and will continue to transition the account over the next few months. entex will be thu's primary account especially during these first few months as she learns the allocations process and the contracts. howard will continue with his lead responsibilites within the group and be available for questions or as a backup, if necessary (thanks howard for all your hard work on the account this year!). in the initial phases of this transistion, i would like to organize an entex "" account "" team. the team (members from front office to back office) would meet at some point in the month to discuss any issues relating to the scheduling, allocations, settlements, contracts, deals, etc. this hopefully will give each of you a chance to not only identify and resolve issues before the finalization process, but to learn from each other relative to your respective areas and allow the newcomers to get up to speed on the account as well. i would encourage everyone to attend these meetings initially as i believe this is a critical part to the success of the entex account. i will have my assistant to coordinate the initial meeting for early 1/2000. if anyone has any questions or concerns, please feel free to call me or stop by. thanks in advance for everyone's cooperation........... julie-please add thu to the confirmations distributions list"
0013.1999-12-14.kaminski 0 re: new color printer " this is the color printer that is being ordered. here is the info. that i needed. thanks kevin moore ----------------------forwarded by kevin g moore/hou/ect on 12/14/99 08:19 am--------------------------- enron technology from: lyn malina 12/14/99 08:09 am to: kevin g moore/hou/ect @ ect cc: subject: re: new color printer kevin: the color printer we currently order is the 4500 n for$ 2753. 00. please let me know if this is the one you would like to order. thanks lyn kevin g moore 12/14/99 06:29 am to: lyn malina/hou/ect @ ect cc: subject: new color printer ----------------------forwarded by kevin g moore/hou/ect on 12/14/99 06:29 am--------------------------- kevin g moore 12/14/99 06:27 am to: shirley crenshaw/hou/ect @ ect, vince j kaminski/hou/ect @ ect, mike a roberts/hou/ect @ ect cc: subject: new color printer we are in need of a new color printer. we are also in the process of moving to the 19 th floor. we need the color printer a. s. a. p. if you would please, i need information concerning this matter whereby, we can get the printer ordered and delivered to our new location. thanks kevin moore"
0013.2001-04-03.williams 0 re: monday blues " good morning. i'm glad to hear that you are having a better day today. me, too so far. yeah, i stayed last night until like 7:45 and finished up that stuff with mike etringer. so today should be a bit more chill for me which is awesome. anyhow, i hope nothing blows up over there for you. maybe we can go to get some coffee later or something. did you end up going out last night for some beers? i watched alli mcbeal and ate dinner. it was pretty exciting."
0013.2001-06-30.SA_and_HP 1 your membership community charset = iso-8859-1 " your membership community & commentary (june 29,2001) it's all about making money information to provide you with the absolute best low and no cost ways of providing traffic to your site, helping you to capitalize on the power and potential the web brings to every net-preneur. ---this issue contains sites who will trade links with you!--- ------------- in this issue ------------- 32 easy ways to breath new life into any webpage member showcase are you ready for your 15 minutes of fame? win a free ad in community & commentary | | | =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=>> today's special announcement: | | | =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=>> we can help you become an internet service provider within 7 days or we will give you$ 100. 00!! click here we have already signed 300 isps on a 4 year contract, see if any are in your town at: click here you are a member in at least one of these programs -you should be in them all! bannersgomlm. com profitbanners. com cashpromotions. com mysiteinc. com timshometownstories. com freelinksnetwork. com myshoppingplace. com bannerco-op. com putpeel. com putpeel. net sellinternetaccess. com be-your-own-isp. com seventhpower. com =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 32 easy ways to breath new life into any webpage =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= it's true. ask the ceos of yahoo. com and america online. they'll tell you it's true. send an email to terry dean or allen says or jim daniels and ask them about it. they'll agree 100% that it's true. don't just take my word for it. in fact, you can contact any of the 10,000 folks online selling web marketing resources, and they will all tell you emphatically, without question, no doubts whatsoever, that it is absolutely true. it's true. anyone can earn a living online. really, they can. but, it takes several very important components to join the 5% who are successful on the web. one of those necessities is a website. now, your website does one of two things... ... it either makes the sale, or it doesn't. for 95% of online businesses, their websites simply do not produce results. and there is a very simple reason for poor performance. poor sales letters. does your website convince people to make a purchase? if not, here are 32 easy ways to breathe new life into your sales letter... 1) write your sales letter with an individual in mind. go ahead and pick out someone, a real person to write your sales letter to. doesn't matter if it is grandma or your next door neighbor or your cat. write your sales letter just like you are writing it to them personally. why? because when your potential customer reads, it then it will seem personal, almost like you wrote it with them in mind. too often, sales letters are written as if they were going to be read to an audience rather than one person. keep your sales letters personal, because one person at a time is going to read them. 2) use an illustration to get your point across. in my sales letters i have told stories about my car stalling on the side of the road to illustrate the idea that we must constantly add the fuel of advertising to keep our businesses running. i have compared the hype of easily making millions online to the chances of me riding bareback across montana on a grizzly bear. leads have read of how getting to the top of an oak tree relates to aggressively marketing online. people love a good story that pounds home a solid message. tell stories that illustrate a point you are trying to make. emphasize a benefit by sharing an account from the "" real world. "" it effectively creates interest and further establishes the point. 3) create an interest in the reader from the very first line. your first line of the sales letter should immediately create a desire in the reader to want to know more. go back to the beginning of this article. the first words were, "" it's true. "" i can guarantee you that either consciously or subconsciously you thought "" what's true? "" immediately, your mind wanted to know what i was talking about. before you even knew it you were right here, 8 paragraphs into this article. carefully craft your first line. if you can immediately get them wanting to know more, you've got a winner. 4) use bullets. people spend a lot of time reading bulleted lists. in fact, they often reread them over and over. use bulleted lists to stress the benefits of your product or service, to spell out exactly what is included in your offer. use an extra space in between each bullet to really highlight each line and create a sense of more length to the list. 5) launch into a bullet list immediately. shortly after your opening line, immediately give the reader a bullet list of benefits to absorb. hit them with your best shot. pull out the big guns and stress "" just a few of "" the most important things the reader will discover. by offering a killer list early in your sales letter, you will automatically create a desire in the reader to continue through your ad copy. after all, if they are already interested after the first list of benefits, they will certainly be open to finding out even more reasons why your product or service will aid them. 6) just let it all flow out. write down everything that enters your mind as you are writing your sales letter. you can edit it later. if you just sit and start writing everything you know about your product or service and how it will benefit your customer, you will be amazed at how much information floods your mind. write it all down. then read through it-you'll be able to add a lot more detail to many of the points. edit it after you have exhausted all of your ideas. 7) make your sales letter personal. make sure that the words "" you "" and "" your "" are at least 4: 1 over "" i "" and "" my. "" your ad copy must be written about your customer not yourself. i'm not sure how the old advertising adage goes, but it's something like this, "" i don't care a thing about your lawn mower, i just care about my lawn. "" leads aren't interested in you or your products, they are interested in themselves and their wants and needs. when you are finished with your sales letter and have uploaded it to a test webpage, run a check at http:// www. keywordcount. com and see what the ratio between "" you "" and "" your "" versus references to "" i, "" "" me, "" "" my, "" etc. it's a free service. make sure it's at least 4: 1 in favor of the customer. 8) write like you speak. forget all of those rules that your grammar teacher taught you. write your sales letters in everyday language, just like you would talk in person. don't be afraid to begin sentences with "" and "" or "" because. "" don't worry about ending a sentence with a preposition. write like you speak. your sales letter isn't the great american novel, so don't write it like you are ernest hemingway. 9) use short paragraphs consisting of 2-4 sentences each. long copy works... but long paragraphs do not. use short paragraphs that lead into the next paragraph. don't be afraid to use short sentences. like this one. or this. see what i mean? shorter paragraphs keep the interest of the reader. longer paragraphs cause eye strain and often force the reader to get distracted. 10) stress the benefits, not the features. again, readers want the burning question answered, "" what's in it for me? "" what need is it going to meet? what want is it going to fill? how is your product or service going to be of value or benefit to the reader? spell it out. don't focus on the features of your product or service, but rather how those features will add value to the life of your reader. for example: if you are selling automobile tires, you may very well have the largest assortment of tires in the world, but who cares? i don't care about your selection. but, i do care about keeping my 3-month-old baby girl safe while we are traveling. so, instead of focusing on your selection, you focus on the fact that my baby girl can be kept safe because you have a tire that will fit my car. you're not selling tires, you're selling safety for my family. stress the benefits, not the features. 11) keep the reader interested. some sales letters read like they are a manual trying to explain to me how i can perform some complicated surgery on my wife. they are filled with words and phrases that i need a dictionary to understand. unless you are writing to a very targeted audience, avoid using technical language that many readers might not understand. keep it simple, using words, language and information that are easy to understand and follow. 12) target your sales letter. when you are finished with your final draft of the sales letter, target it to a specific audience. for example: if you are selling a "" work at home "" product, then rewrite the sales letter by adding words in the headlines and ad copy that are targeted towards women who are homemakers. then, rewrite the same sales letter and target it to college students. write another letter targeting senior citizens. still another could be written to high school teachers wanting to earn extra income during summer vacation. the possibilities are endless. all you need to do is add a few words here and there in your ad copy to make it appear that your product or service is specifically designed for a target audience. "" work only 5 hours a week, "" would become "" college students, work only 5 hours a week. "" your sales letter is now targeted. upload all of the sales letters to separate pages on your website (you could easily target 100's of groups). then, simply advertise the targeted pages in targeted mediums. you could advertise the "" college students "" page in a campus ezine. the "" senior citizens "" page could be advertised at a retirement community message board. by creating these targeted sales letters, you can literally open up dozens of new groups to sell your existing product to. and, in their eyes, it looks like the product was a match made for them. 13) make your ad copy easy to follow. use short sentences and paragraphs. break up the sales letter with attention grabbing headlines that lead into the next paragraph. one thing that i have always found to work very well in sales letters... ... is to use a pause like this. start the sentence on one line, leaving the reader wanting to know more, and then finishing up on the next line. also, if you are going to use a sales letter that continues on several different pages of your website, use a catchy hook line at the end of each page to keep them clicking. "" let's get you started down the road to success, shall we? click here to continue. "" 14) use similes and metaphors for effect. when the customer purchases your product, they will generate "" a flood of traffic that would make noah start building another ark. "" if they do not order today, then they will "" feel like a cat that let the mouse get away. "" use words to create a picture in the readers'mind. when you think of superman, what comes to mind? immediately, we remember that he is "" faster than a speeding bullet. "" "" more powerful than a locomotive. "" "" able to leap tall buildings in a single bound. "" see how word pictures stick in our minds? 15) focus on one product or service. don't try to sell your customer multiple products at the same time. it only confuses the reader. keep your ad copy directed at one specific product or service. then, use other products and services as back-end products. 16) make it stand out. don't kid yourself. there are hundreds, maybe thousands out there on the web doing the same thing you are doing. how will you stand out among the crowd? your sales letter must inject personality. it must breathe of originality. your product or service is different. it's not like all of the rest. it is unique. right? your sales letter must separate you from the competition. it must create a feeling of "" you won't find this anywhere else. "" 17) be believable. "" earn$ 54,000 in the next 24 hours!!! "" delete. good grief, do they think i am an idiot or something? get real. don't make outrageous claims that are obviously not the truth. you'll ruin your reputation. let me tell you a simple universal fact that cannot be reversed. once you have been branded a liar, you will never be anything but a liar. it doesn't matter if you launch the most respectable, honest business available anywhere, people will always have doubt because they remember the crazy stuff you've said before. be believable. don't exaggerate, mislead, stretch or distort the truth. 18) be specific. don't generalize your information, but rather be exact. instead of "" over 100 tips for losing weight "" use "" 124 tips for losing weight. "" by generalizing information, it creates doubt and questions in the reader's mind. "" what am i really getting here? does he even know? "" when you use specific information, the reader begins to think, "" this person must have counted. i know exactly what i can expect. "" "" platitudes and generalities roll off the human understanding like water from a duck, "" wrote claude hopkins in his classic book "" scientific advertising. "" "" they leave no impression whatsoever. "" 19) be complete. tell the reader everything they would want to know about your product or service. answer all of their questions, anything they would want to consider before making a purchase. think about it from their point of view. ask yourself, "" why wouldn't i buy this? "" then, address that in your sales letter. remove anything that would keep the reader from making the purchase. 20) use testimonials to boost your sales. share actual excerpts from what your current customers are saying about your product or service. many websites have an entire section or even a separate page that has endorsements and compliments listed. satisfied customers remove some of the doubt in the mind of the reader. "" if these people have found a lot of value and benefit in the product, then i probably will too. "" especially effective are testimonials from respected, well known "" authorities "" within your target field. 21) use headlines over and over throughout the sales letter. a headline isn't just relegated to the beginning of your ad copy. use them frequently-but don't overuse. a well- placed headline re-grabs the reader's attention, brings them deeper into the letter, and readies them for the next paragraph. you will want to spend as much time working on your headlines as you do the entire sales letter. they are that important. 22) avoid asking stupid questions. "" wouldn't you like to make$ 1,000, 000 a year? "" "" doesn't that sound great? "" "" would you like to be as successful as i am? "" avoid any question that insults the intelligence of your reader or makes them feel like they are inferior. 23) offer a freebie even if the customer doesn't buy. if the customer decides he or she isn't going to make a purchase, then you want to follow-up with them later to try to influence them to buy in the future. by offering a free item, you can request their email address in order to obtain the freebie. by doing this, you can now follow-up with the customer for a potential future sale. additionally, you can continue the sales process by having your ad copy, banners, flyers, etc. within the free item. and, of course, if your free item is a high quality, useful product or service which impresses the customer, they probably will be back as a customer soon. 24) use bonuses to overwhelm the reader. one of the things that i have found very effective in writing sales letters is to include bonus items that out-value the actual product i am offering. ginsu made this one famous. they were selling a set of steak knives, but before the commercial was finished, you had so many bonus items on the table it was hard to refuse. make sure you provide quality bonuses and not some worthless, outdated junk that damages the credibility of your main offer. 25) use connective phrases like "" but wait, there's more "" and "" but that's not all. "" these phrases effectively lead the reader from one paragraph to the next, particularly when the next paragraph is a bullet list of benefits, or leads into bonus items. again, the idea is more and more value and benefits to the reader. 26) always include a deadline. by including a deadline, you create a sense of urgency in the mind of the customer. "" if i don't order within 24 hours, then i won't get the bonuses. "" "" oh no, there are only 10 items remaining, i've got to hurry. "" let the customer know what they will be missing out on if they don't make the deadline. remember, they won't miss out on your products or bonuses, they will miss out on all of the benefits of your products. deadlines are very effective. every sales letter should have one. 27) tell them exactly how to order. be clear as to the order process. point them towards the order link. tell them what methods you offer. (i. e. credit cards, checks, etc.) make this process as simple and clear as can be. if it takes more than 2 steps, most people won't continue. 28) explain when the product will be delivered. how quickly will the order be processed? when will the order be available? let the customer know exactly what they can expect when they place their order. the more specific you can be here, the better. let them know that you have a system in place. "" operators are standing by. "" their order will be handled properly. tell them. 29) offer a money back guarantee. take away their last reason to hold back. offer a "" no questions asked "" 30 day guarantee. most people may not realize this, but in most cases, it's the law of the land. you are required to give them their money back if they are not satisfied with the product or service. since it's the law anyway, why not make it a benefit. let them know that they are purchasing your product or service risk-free. 30) instruct them to respond immediately. many people just need to read those words, "" act now! "" "" order today! "" "" click here to instantly place your order. "" you've got them this far, now tell them what you want them to do. get them to "" act fast! "" have you ever heard a mail order commercial on television that didn't prompt the viewer to order right way? 31) include a post script. people will always read the p. s. always. in fact, the p. s. is one of the most important parts of your sales letter. why? because in many cases the visitor at your website will scroll immediately down to the end of your page to see how much it is going to cost. a p. s. is a perfect place to recap your offer, so when they see your price tag, they will also see a very detailed description of what they will receive for their money. use your p. s. to restate your offer in detail. 32) include a second post script. you better believe if they read the first p. s., they will read a p. p. s. use this post script to remind them of the deadline or offer another bonus or point out some compelling factor that would make them want to order. i guarantee you they will read it. use these 32 tips and i guarantee you that you will see a significant increase in the amount of responses you receive from your sales letters. in fact, it would be impossible for your responses to not improve. copyright 2000 jimmy d. brown. all rights reserved worldwide. ------------------------------------- about the author... jimmy d. brown is helping average people get out of the rat-race and earn a full-time living online. for more details on firing your boss and creating your own internet wealth, visit us right now at: * special offer: join the profits vault through the above link and email me your receipt and you can have a free bonus copy of the terrific manual-how to profit from free ebooks guaranteed which i sell at: =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= member showcase =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= examine carefully-those with email addresses included will trade links with you... you are encouraged to contact them. there are many ways to build a successful business-just look at these successful sites programs other members are involved in... affiliates of the world! top rated affiliate programs, excellent business opportunities, great marketing resources and free advertising for you! visit the site to trade links. http:// www. affiliates. uk. com trade links-adrianbold @ affiliates. uk. com get insane amounts of traffic to your website. purchase 10,000 guaranteed visitors to your site and receive 5,000 free. more traffic = more money! less than 2 cents a visitor. space is limited. order now! http:// www. freepicklotto. com trade links-businessopps @ aol. com celebration sale! $ 99. 00 on casinos/sportsbetting sites, lingerie stores, gift stores, adult sites toy stores. mention ad # bmlm 99 to receive this special sale price. order now! http:// www. cyberopps. com/? = bmlm 99 just been released!! internet marketing guru corey rudl has just released a brand new version of his # 1 best-selling internet marketing course, "" the insider secret's to marketing your business on the internet "". a must have! so don't hesitate, visit.. http:// www. adminder. com/c. cgi? startbgmlmezine we have a 260 page catalog with over 3000 gift items for men, women, children-a gift for everyone. we show 100 gift items on our web site alone, with the catalog you have access to the rest. we also feel we have the best prices on the web. visit at http:// www.. net trade links-georgel 932 me @ yahoo. com stop smoking-free lesson!! discover the secret to stopping smoking. to master these powerful techniques, come to http:// www. breath-of-life. net for your free lesson. act now! p. s. tell someone you care about. trade links-jturco 3 @ hotmail. com if you have a product, service, opportunity or quality merchandise that appeals to people worldwide, reach your targeted audience! for a fraction of what other large newsletters charge you can exhibit your website here, and trade links for only$ 8 cpm. compare that to the industry average of$ 10-$ 15 cpm. why?... because as a valuable member we want you to be successful! order today- showcases are limited and published on a first come, first serve basis. for our secure order form, click here: http:// bannersgomlm. com/ezine =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= are you ready for your 15 minutes of fame? =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= one of the items we would like to include in community commentary we'll need from you! here is your chance to showcase your marketing strategies, and i need to hear from everyone who would like to'blow your own horn'and be in the spotlight on center stage. it's a great way to enjoy recognition and publicity for yourself and your business, and will allow all members to duplicate your success and avoid the same'setbacks'. please include... a little background history, how you got your start, a problem you have had and how you solved it, your greatest success, and any advice you have for someone beginning to market online. send your information to submit @ aeopublishing. com > with'center stage'in the subject block. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= win a free ad in community & commentary =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= to keep this interesting, how about this, every month we'll draw a name from the replies and that person will win one sponsorship showcase ad in the community commentary, for free. that's a value of over$ 700. 00! respond to each weekly survey, and increase your chances to win with four separate entries. question of the week (06/29/01)... no right or wrong answers, and just by answering you are entered to win a showcase ad-free! ~ ~ ~ what is the goal of your website? ~ ~ ~ sell mailto: one @ aeopublishing. com get leads mailto: two @ aeopublishing. com build branding mailto: three @ aeopublishing. com provide information mailto: four @ aeopublishing. com other mailto: five @ aeopublishing. com to make this as easy as possible for you, just click on the e-mail address that matches your answer-you do not need to enter any information in the subject or body of the message. * * add your comments! follow directions above and add your comments in the body of the message, and we'll post the best commentaries along with the responses. you will automatically be entered in our drawing for a free sponsorship ad in the community commentary. please respond only one time per question. multiple responses from the same individual will be discarded. =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= to change your subscribed address, send both new and old address to submit see below for unsubscribe instructions. please send suggestions and comments to: editor i invite you to send your real successes and showcase your strategies and techniques, or yes, even your total bombs, "" working together we can all prosper. "" submit for information on how to sponsor your membership community commentary visit: sponsorship showcase copyright 2001 aeopublishing. com email: yourmembership 2 @ aeopublishing. com voice: web: http:// www. aeopublishing. com this email has been sent to jm @ netnoteinc. com at your request, by your membership newsletter services. visit our subscription center to edit your interests or unsubscribe. http:// ccprod. roving. com/roving/d. jsp? p = oo & id = bd 7 n 7877. 6 w 8 clu 67 & m = bd 7 n 7877 charset = iso-8859-1 in this issue 32 easy ways to breath new life into any webpage member showcase are you ready for your 15 minutes of fame? win a free ad in community & commentary today's special announcement: this email was sent to jm @ netnoteinc. com, at your request, by your membership newsletter services. visit our subscription center to edit your interests or unsubscribe. view our privacy policy. powered by "
0013.2004-08-01.BG 1 take the reins " become your employer. substantial profit processing money judgments. from anywhere. control when you want to work. a substantial number of our members earn 5,000 us to 12,000 us per mo. outstanding customer support and assistance. here for more info while the couple were apparently examining the strange device, rob started to his feet and walked toward them the crowd fell back at his approach, but the man and the girl were so interested that they did not notice himhe was still several paces away when the girl put out her finger and touched the indicator on the dial discontinue orange stad, and then mail stop 1. 200 b, followed by a rub a to rob's horror and consternation the big turk began to rise slowly into the air, while a howl of fear burst from the crowdbut the boy made a mighty spring and caught the turk by his foot, clinging to it with desperate tenacity, while they both mounted steadily upward until they were far above the city of the desert the big turk screamed pitifully at first, and then actually fainted away from frightrob was much frightened, on his part, for he knew if his hands slipped from their hold he would fall to his death "
0014.1999-12-14.kaminski 0 re: new color printer " sorry, don't we need to know the cost, as well. ----------------------forwarded by kevin g moore/hou/ect on 12/14/99 08:15 am--------------------------- kevin g moore 12/14/99 08:09 am to: shirley crenshaw/hou/ect @ ect, mike a roberts/hou/ect @ ect cc: subject: re: new color printer this information was also sent to it purchasing. i need to know what options we have and how soon it can be delivered. don't we need to know as well? before purchase. i also need a central location for this printer. thanks kevin moore sam mentioned hp 4500, i will check into it. ----------------------forwarded by kevin g moore/hou/ect on 12/14/99 08:05 am--------------------------- shirley crenshaw 12/14/99 07:55 am to: kevin g moore/hou/ect @ ect cc: subject: re: new color printer kevin: what kind of information do you need? i thought you were going to look at some colored printer literature. sam seemed to be aware of a colored printer that might work for us. ask him. i don't think we need anything as big as "" sapphire "". it will be located in your area on the 19 th floor. thanks! kevin g moore 12/14/99 06:27 am to: shirley crenshaw/hou/ect @ ect, vince j kaminski/hou/ect @ ect, mike a roberts/hou/ect @ ect cc: subject: new color printer we are in need of a new color printer. we are also in the process of moving to the 19 th floor. we need the color printer a. s. a. p. if you would please, i need information concerning this matter whereby, we can get the printer ordered and delivered to our new location. thanks kevin moore"
0014.1999-12-15.farmer 0 lst rev dec. 1999 josey ranch nom " fyi ----------------------forwarded by susan d trevino/hou/ect on 12/15/99 08:40 am--------------------------- bob withers on 12/14/99 05:11:06 pm to: susan d trevino/hou/ect @ ect cc: stretch brennan, kevin mclarney, "" taylor vance (e-mail) "" subject: lst rev dec. 1999 josey ranch nom susan: as we discussed, this change was missed but was discussed with you last week. i apologize for the omission. i am checking with my field personnel and anticipate a further reduction (due to well production decreases) effective 12/15 which i will send under a separate email for an "" intra-day "" change wednesday. here's revised december 1999 (effective 12/9/99) setup for josey: (using 1. 081 btu/mcf) * gas deliveries into hpl 12,300 mmbtu/d for kri (net reduction of 1,000 mmbtu/d) 12,300 mmbtu/d into hpl bob withers>< kcs energy, 5555 san felipe, suite 1200 houston, tx 77056 voice mail/page 713-964-9434"
0014.2001-02-12.kitchen 0 " correction--conference call on tuesday, february 13 (800-229-028" " 1) ssb conference call tuesday, february 13,2001 2:00 pm est beyond california: the power however, the markets foretell stronger prices across the country in 2001. we continue to recommend the power producers, based on both our power price and spark spread analyses, which show strengthening spark spreads in the united states, despite the record highs recently seen in the natural gas markets. we highlight calpine, nrg energy, and mirant. our volatility indices demonstrate a dramatic shift in historical volatility to the western hubs and away from the midwestern markets of 1998-99. we expect volatility to persist in the west, likely accompanied by higher volatility in the remainder of the country in 2001. this benefits energy merchants, and we highlight enron, dynegy, duke, and el paso. dial in 800-229-0281 us 706-645-9237 intl replay 800-642-1687 us 706-645-9291 intl reservation 735670 (replay until 2/15) hosted by: raymond niles director power & natural gas research salomon smith barney 212-816-2086 raymond c. niles power/natural gas research salomon smith barney (212) 816-2807 ray. niles @ ssmb. com s"
0014.2001-07-04.SA_and_HP 1 new accounts # 2 c 6 e " this is a mime message content-type: multipart/alternative; boundary = ""----=_nextpart_001_0080_01 bdf 6 c 7. fabaclbo "" content-type: text/plain; charset = "" iso-8859-1 "" content-transfer-encoding: quoted-printable * * * * * this is an html message! * * * * * content-type: text/html; charset = "" iso-8859-1 "" content-transfer-encoding: quoted-printable complete credit card processing systems for your business = 2 e interne = t-home based-mail order-phone order do you accept credit cards? your competition does! everyone approved-credit problems ok! approval in less than 24 hours! increase your sales by 300% start accepting credit cards on your website! free information, no risk, 100% confidential = 2 e your name and information will not be sold to thrid parties! home businesses ok! phone/mail order ok! no application fee, no setup fee! close more impulse sales! everyone approved! good credit or bad! to = apply today, please fill out the express form below = 2 e it contains all the information we need to get your account approved = 2 e for a = rea's that do not apply to you please put n/a in the box = 2 e upon receipt, we'll fax you with all of the all bank card application documents necessary to establish your merchant account = 2 e once returned we = can have your account approved within 24 hours = 2 e service industry standard us site inspection $ 50-$ 75 free shipping $ 50-$ 75 free warranty $ 10 per month = free sales receipts $ 10-$ 50 free fraud screening $ = 2 e 50-$ 1 = 2 eo 0 per transaction free amex set up $ 50-$ 75 free 24 hourhelp line $ 10 month free security bond $ 5000-$ 10,00 = 0 or more none this is a no obligation qualification form and is your first step to accepting credit cards = 2 e by filling out this form you will = not enter in to any obligations o = r contracts with us = 2 e we will use it to determine the best p = rogram to offer you based on the information you provide = 2 e you will be c = ontacted by one of our representatives within 1-2 business days to go over = the rest of your account set up = 2 e note: all information provided to us will remain = 100% confidential !! apply free with no risk! pleas = e fill out the express application form completely = 2 eincomplete information m = ay prevent us from properly processing your application = 2 e your full emai = l address: be sure to use your full address (i = = 2 ee = 2 e user @ domain = 2 ecom) your name: business name: = business phone = number: home phone num = ber: type of busine = ss: retail business mail order business internet based busines = s personal credi = t rating: excellent good fair poor how soon would = you like a merchant account? your info = rmation is confidential, it will not be sold or used for any other purpose, = and you are under no obligation = 2 e your information will be used solely for the purpose of evaluating = your business or website for a merchant account so that you may begin acce = pting credit card payments = 2 e list removal/opt-out option click herem "
0014.2003-12-19.GP 1 get g: eneric via-gra for a s low as$ 2. 50 per 50 mg her type http:// dutchess. reado 893. com/xm /
0014.2004-08-01.BG 1 today " hey, last week, jo and me were talking about my prbolem and he said that he tried this new thing from this site and it worked great for him. cowry avoid http:// ns 2. herbalonline. biz/en/10/? aff_id = 00450 footmen give me your thoughts on it and shoot me back an email and tell me what you found out if any. any input would be appreciated it. later, small flutter atreus absenteeism oriental stratagem hunt amigo attitudinal twice curio rental billion inclusive ruanda screwball birch walpole canary seward floe lisa monadic harriman capitol colloquy laborious expressway b's salaried beware delusive congratulatory ante."
0015.1999-12-14.kaminski 0 imperial capital-thursday schedule the following is the schedule for thursday's meeting with imperial capital. currently all meetings are scheduled in eb 2868. we are trying to arrange a different conference room and will let you know if we obtain one. 9:00 am-jim fallon-electricity 9:30 am-fred lagrasta-gas 10:00 am-lynda clemmons and david kistler-weather 10:30 am-ed ondarza-pulp and paper 11:00 am-stinson gibner-research 12 noon-lunch 1:00 pm-5:00 pm-discussion thanks in advance to all who will come to speak in the morning.
0015.1999-12-15.farmer 0 2 nd rev dec. 1999 josey ranch nom " ----------------------forwarded by susan d trevino/hou/ect on 12/15/99 08:41 am--------------------------- bob withers on 12/15/99 08:28:08 am to: susan d trevino/hou/ect @ ect cc: stretch brennan, kevin mclarney, ""'taylor vance (e-mail)'"" subject: 2 nd rev dec. 1999 josey ranch nom here's revised december 1999 (effective 12/15/99) setup for josey: (using 1. 081 btu/mcf) * gas deliveries into hpl 9,300 mmbtu/d for kri (net reduction of 3,000 mmbtu/d) 9,300 mmbtu/d into hpl bob withers>< kcs energy, 5555 san felipe, suite 1200 houston, tx 77056 voice mail/page 713-964-9434"
0015.2000-06-09.lokay 0 " tw weekly, 6-9-00" please see the attached file and let me know if you have any questions. ray stelly
0015.2001-02-12.kitchen 0 california update 2/12 " executive summary: the likelihood of there being an involuntary bankruptcy filing against the utilities appears to be greater than 60%. this is not only due to the circumstances surrounding the filed rate doctrine case mentioned below, but also because the 30-day cure period during which the utilities have not been paying their bills will end this week, increasing the likelihood that their ipp creditors will act against them. if the state loses the filed rate doctrine case today (which it is believed will happen) and there is an involuntary bankruptcy filing (or even the threat of one), this bail-out plan will be enacted quickly. 1. utilities vs. cpuc governor davis'attempt to delay the filed rate doctrine case will not succeed. the case will come before the judge on monday, february 12 th. the federal judge is expected to rule a summary judgment in favor of the utilities. however, the judge will not allow the utilities to collect the injunction release they are requesting ($. 01/kwh). this will be left to an appellate court. the decision not to allow the utilities to collect this cash could trigger an involuntary bankruptcy filing, by the smaller ipps (as noted in our reports last week) or by larger out-of-state generators such as duke, reliant, and dynegy (as noted in the press this morning). this is expected next week or the week after. 2. prospects for a bailout bill ab 18 x is effectively dead from lack of support. senator burton, despite his public refusals, is moving closer to agreeing to a utility bail-out. the statements by burton and the ca state treasurer are merely a negotiating position. they are more concerned about the possibility of a bankruptcy than they appear. for burton, this is because of his long association with labor unions; the unions oppose the utility bankruptcy. burton has been negotiating with consumer advocate harvey rosenfield so as not to get attacked by him. the deal burton is expected to arrange would be for: bonds to be issued by the utilities rather than the state, but with some kind of state support (but less than "" full faith and credit of the state of ca, "" which would not pass). this would amount to the securitization of an extra charge on power bills (e. g.$. 01, though the actual amount is not known). these bonds would be asset-backed securities, with payment receivable from rate payers. the term of these bonds is unknown; if the term is made quite long (e. g. 20 years), the associated rate increase could be very small. the state would purchase the utilities'transmission assets for a very high price. the amount of the extra charge on power bills will not be known until the price of the transmission assets is settled. if the state loses the filed rate doctrine case today (which it is believed will happen) and there is an involuntary bankruptcy filing (or even the threat of one), sources believe that this bail-out plan will be enacted quickly. as noted in an earlier report, the california legislature habitually does not act until things "" hit the wall. "" it is expected that the republicans in the legislature will follow burton's lead and support the bail-out plan. the assembly members in particular are not yet supportive of a plan of this nature. one moderate democratic legislator with whom our source spoke said that the opposition to a bail-out in her central valley district is "" 50 to 1. "" however, an involuntary filing (or the threat thereof) may be enough to trigger legislative support. it would allow the argument of an "" imminent threat "" to the people of the state of california. 3. consumer opposition harvey rosenfield is too short on cash to fight this plan and the associated rate increase with anything but a referendum. if the referendum fails, he intends to attack individual legislators (though not john burton, who reportedly has "" immunity "" from rosenfield). some legislators are thinking of voting for the bail-out plan, then supporting a referendum from rosenfield later. however, if the bail-out plan and rate increase described above is passed through the legislature as a bill (rather than put in place by the puc, for example), it cannot be reversed by a referendum. as additional insurance against rosenfield, by supporting the bonds issued under the plan, the state can argue that its credit would be impaired in the case of a referendum to repeal the plan. while it is not clear that this is a factual argument, it still might impede any referendum."
0015.2001-07-05.SA_and_HP 1 get the best rate on a home loan! " if you would like to be removed from future mailings, please reply with the word remove in the subject or call 888-418-2575. let lenders compete for your business! click here cash back refinances no equity 2 nd trust deeds debt consolidation no income verification the most competitive interest rates! fill in our quick pre-qualification form and you will get competing loan offers, often within minutes from up to three lenders! click here there is never any fee to consumers for using this service. copyright?ffffa 9 1999,2000 eworld marketing, inc. 888-418-2575 this is not a solicitation or offer to lend money. eworld marketing is not a lender, broker or other financial intermediary. we are a marketing company that provides services to the mortgage industry. "
0015.2003-12-19.GP 1 mr. uwe schmidt is a knave! don't buy any product from microsale! " dear friends, microsale sc kg, ltd, germany is a knave company and uwe schmidt is a big knave! we are cheated by microsale sc kg, ltd. remember, don't do any business with this company. don't buy any product from microsale or you will be cheated. this company has a bad reputation in germany and in other european countries, espcially in belgium and netherlands. here's the story: mr. uwe schmidt, ceo microsale (r) sc kg he is also an auditor, but he doesn't have any commercial morality. he always made cheated l/c to other companies. many companies have been cheated by him. please take care!!! his products have many problems, such as cd player and mp 3 player! it's the detailed information of this company: dahlienweg 6 d 52477 alsdorf, nrw germany, european union tel.-/ fax-box: + 49 89 1488230796 + 32 87 783518 + 32 87 783019 mobil: + 32 474 409055 email: microsale @ email. de microsale @ gmx. net uwe-schmidt-@ gmx. net web: http:// www. microsale. biz "
0016.1999-12-15.farmer 0 unify close schedule " the following is the close schedule for this coming month (year-end.) please keep in the mind the following key times.... unify to sitara bridge back 1:45 p.m. thursday, dec 30 th (all errors must be clear by this time) mass draft at 6 p.m. thursday evening, dec 30 th. accrual process begins friday morning, dec 31 st at 6:30 a.m. (if your group impacts the accrual, please ensure that the necessary people are available for support if needed, as this is an enron holiday.) please feel free to contact me should you have any questions. thank you, melissa x 35615"
0016.2001-02-12.kitchen 0 fw: meeting with jeff skilling " louise, per our conversation of last week, you might be interested in the following meetings. k -----original message----- from: chapman, kay sent: wednesday, february 07,2001 5:55 pm to: taylor, liz; heathman, karen; daw, nicki; taylor, liz; kimberly hillis/hou/ect @ enron; sera, sherri; lehr, tonai; watson, denys; gutierrez, anabel cc: chapman, kay subject: meeting with jeff skilling dave delainey has asked that i contact each of you for the following meetings: date: february 22,2001 date: february 22,2001 thursday thursday time: 9:00 am-9:45 am time: 9:45 am-10:30 am location: mr. skilling's office location: mr. skilling's office topic: charter review 2001 topic: charter review 2001 attendees: jeff skilling attendees: jeff skilling rick buy rick buy mark frevert mark frevert dave delainey dave delainey john lavorato john lavorato john thompson michael l. miller scott josey if you have any questions, please feel free to call me. thanks, kay 3-0643"
0016.2001-07-05.SA_and_HP 1 get the best rate on a home loan! " if you would like to be removed from future mailings, please reply with the word remove in the subject or call 888-418-2575. let lenders compete for your business! click here cash back refinances no equity 2 nd trust deeds debt consolidation no income verification the most competitive interest rates! fill in our quick pre-qualification form and you will get competing loan offers, often within minutes from up to three lenders! click here there is never any fee to consumers for using this service. copyright?ffffa 9 1999,2000 eworld marketing, inc. 888-418-2575 this is not a solicitation or offer to lend money. eworld marketing is not a lender, broker or other financial intermediary. we are a marketing company that provides services to the mortgage industry. "
0016.2001-07-06.SA_and_HP 1 your membership community charset = iso-8859-1 " your membership community & commentary (july 6,2001) it's all about making money information to provide you with the absolute best low and no cost ways of providing traffic to your site, helping you to capitalize on the power and potential the web brings to every net-preneur. ---this issue contains sites who will trade links with you!--- ------------- in this issue ------------- internet success through simplicity member showcase win a free ad in community & commentary | | | =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=>> today's special announcement: | | | =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=>> we can help you become an internet service provider within 7 days or we will give you$ 100. 00!! click here we have already signed 300 isps on a 4 year contract, see if any are in your town at: click here you are a member in at least one of these programs -you should be in them all! bannersgomlm. com profitbanners. com cashpromotions. com mysiteinc. com timshometownstories. com freelinksnetwork. com myshoppingplace. com bannerco-op. com putpeel. com putpeel. net sellinternetaccess. com be-your-own-isp. com seventhpower. com =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= internet success through simplicity =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= every day of the week, i get questions from people all over the world, including my no bs gimg members, wanting to know some of the most valuable "" secrets "" to my on-going internet success. let me say, above all else, i don't believe there are any * true * "" secrets "" to success on the net. what you do to become successful in the online world is not a "" secret "", in my opinion. most successful people follow simple, clear, repeatedly-proven strategies to succeed, whether on the net or off. but, when it comes to someone asking for advice, consultation, or simply asking, "" what's your secret? "", i have to blush & say... persistence and personality. of course, i always follow the advice with my own little disclaimer: what makes me successful may not work the same for you... & your first lesson is to get over the deep-seeded idea that success-of any kind, in my opinion-is somehow an unknown, unattainable secret. clearly, it is not. it's not unknown. it's not unattainable. it's not years of digging to find the "" secrets "" to internet riches. one thing that "" gets to me "" so often in my work as an internet consultant, author and internet success strategist is that so many people on the net seem to have this incredibly huge mental block that stands between themselves and success on the net. it's almost as if they've been barraged by so many claims of what works and what doesn't work, and so many long, complicated routes to actually succeeding in their online venture, that "" success "" is the equivelant of a 100-foot high brick wall. it's not that difficult, my friends! it is not that complicated!! long-time friend and business associate rick beneteau has a new ebook out called branding you & breaking the bank. get it!! http:// www. roibot. com/bybb. cgi? im 7517_bybtb. but, the reason i mention this is the fact that he talks so dynamically about the true simplicity of making your online venture a success. and, yes, rick & i come from the same school of "" self marketing ""-marketing you! obviously, that's the core of his excellent new ebook, and i couldn't agree with him more. point being, * you * are everything you do online to succeed. you are your web site, your business, your marketing piece, your customer service, your customers ' experiences with your business--all of it, is you! read his ebook & you'll see more of what i'm saying. the matter at hand is that brick wall you might have standing high as you can see, blocking the path between you & internet success. listen to me-it is not real ok? it doesn't exist. there's nothing there to fear to begin with... get over it!! what i'm telling you is, the only thing standing between you and the success you most desire... is yourself. when you realize this, you will tear down that brick wall by means of complete and instantaneous disintegration. it will no longer exist * in your mind *, which is the only "" real "" place it ever was anyhow! yes, "" persistence and personality "" inherently includes honesty, integrity, accountability, and many other qualities but you also have to hone in on your ultimate goals and realize that probably the most valuable, powerful key to your success... is you! that may be the most incredible "" secret "" we ever uncover in our lifetime! and, trust me, that brick wall won't ever get in your way again... unless you let it. talk about simple!! bryan is a "" veteran "" internet consultant, author, internet success strategist & marketer. he publishes mega-success. com chronicles to over 11,500 subscribing members, authors articles which appear all over the net, and helps hundreds of wealth-hungry people in their journey to internet success. bryan is also director of his no bs guerrilla internet marketing group at http://. com & a fantastic new joint venture partners program for that site. bryan hall is a founding member and the development consultant for the prestigious icop (tm) at http:// www. i-cop. org/1016. htm you can reach bryan at 877. 230. 3267 or by emailing him directly at bryan. hall @ mega-success. com =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= member showcase =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= examine carefully-those with email addresses included will trade links with you... you are encouraged to contact them. there are many ways to build a successful business-just look at these successful sites programs other members are involved in... get insane amounts of traffic to your website. purchase 10,000 guaranteed visitors to your site and receive 5,000 free. more traffic = more money! less than 2 cents a visitor. space is limited. order now! http:// www. freepicklotto. com trade links-businessopps @ aol. com stop smoking-free lesson!! discover the secret to stopping smoking. to master these powerful techniques, come to http:// www. breath-of-life. net for your free lesson. act now! p. s. tell someone you care about. trade links-jturco 3 @ hotmail. com celebration sale! $ 99. 00 on casinos/sportsbetting sites, lingerie stores, gift stores, adult sites toy stores. mention ad # bmlm 99 to receive this special sale price. order now! http:// www. cyberopps. com/? = bmlm 99 affiliates of the world! top rated affiliate programs, excellent business opportunities, great marketing resources and free advertising for you! visit the site to trade links. http:// www. affiliates. uk. com trade links-adrianbold @ affiliates. uk. com just been released!! internet marketing guru corey rudl has just released a brand new version of his # 1 best-selling internet marketing course, "" the insider secret's to marketing your business on the internet "". a must have! so don't hesitate, visit.. http:// www. adminder. com/c. cgi? startbgmlmezine we have a 260 page catalog with over 3000 gift items for men, women, children-a gift for everyone. we show 100 gift items on our web site alone, with the catalog you have access to the rest. we also feel we have the best prices on the web. visit at http:// www.. net trade links-georgel 932 me @ yahoo. com if you have a product, service, opportunity or quality merchandise that appeals to people worldwide, reach your targeted audience! for a fraction of what other large newsletters charge you can exhibit your website here, and trade links for only$ 8 cpm. compare that to the industry average of$ 10-$ 15 cpm. why?... because as a valuable member we want you to be successful! order today- showcases are limited and published on a first come, first serve basis. for our secure order form, click here: http:// bannersgomlm. com/ezine =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= win a free ad in community & commentary =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= to keep this interesting, how about this, every month we'll draw a name from the replies and that person will win one sponsorship showcase ad in the community commentary, for free. that's a value of over$ 700. 00! respond to each weekly survey, and increase your chances to win with four separate entries. question of the week (07/06/01)... no right or wrong answers, and just by answering you are entered to win a showcase ad-free! ~ ~ ~ do you spend more or less time ~ ~ ~ ~ ~ ~ online in the summer months? ~ ~ ~ more mailto: one @ aeopublishing. com less mailto: two @ aeopublishing. com same mailto: three @ aeopublishing. com to make this as easy as possible for you, just click on the e-mail address that matches your answer-you do not need to enter any information in the subject or body of the message. * * add your comments! follow directions above and add your comments in the body of the message, and we'll post the best commentaries along with the responses. you will automatically be entered in our drawing for a free sponsorship ad in the community commentary. please respond only one time per question. multiple responses from the same individual will be discarded. last weeks's results (06/29/01) ~ ~ ~ what is the goal of your website? ~ ~ ~ sell 40% get leads 20% build branding 5% provide information 20% other 15% comments: ---------------------------- our web site is initially designed to get leads, build branding, and provide information....... with a 12 month goal of selling our service more specifically via a shopping cart. we offer a service and at this time take deposits and payments via our site. our site has been up less than 2 months and our expectation was that we would refer to our site for leads developed in traditional media and by referral for more information, and to make a professional impression on someone you may not meet before providing service. the growth of our customer base shopping on line has grown outside of anyone's expectations....... certainly mine and i've been in this business for 25 years. the internet is not dead in the horse business, it is just getting it's legs, and the folks using it want to get all the ancillary services on-line as well. our site (the first we've developed) has exceeded our expectations, and we aren't satisfied with it yet....... we just wanted to get it there for information! jeff and rebecca marks http:// www. grand-champion. com branding. while quality customer service and product have been and will always be our top priority brand building zesto is our most challenging task. zesto. com ranks very high and most often # 1 or 2 on all major search engines and directories even yahoo entering the keyword zesto. the problem is simply that, who if anyone would type the keyword zesto, therefore we must try to build our brand by ensuring that generic keywords associated with our products (citrus peel) are used throughout our site as well as search engine submissions. fortunately owning a non generic domain short, easy to remember and trademarked works in our favor because the marketability potential is limitless. arlene turner http:// www. zesto. com =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= to change your subscribed address, send both new and old address to submit see below for unsubscribe instructions. please send suggestions and comments to: editor i invite you to send your real successes and showcase your strategies and techniques, or yes, even your total bombs, "" working together we can all prosper. "" submit for information on how to sponsor your membership community commentary visit: sponsorship showcase copyright 2001 aeopublishing. com email: yourmembership 2 @ aeopublishing. com voice: web: http:// www. aeopublishing. com this email has been sent to jm @ netnoteinc. com at your request, by your membership newsletter services. visit our subscription center to edit your interests or unsubscribe. http:// ccprod. roving. com/roving/d. jsp? p = oo & id = bd 7 n 7877. 7 giv 5 d 57 & m = bd 7 n 7877 charset = iso-8859-1 in this issue internet success through simplicity member showcase win a free ad in community & commentary today's special announcement: win a free ad in community & commentaryto keep this interesting, how about this, every month we'll draw a name from the replies and that person will win one sponsorship showcase ad in the community commentary, for free. that's a value of over$ 700. 00! respond to each weekly survey, and increase your chances to win with four separate entries. question of the week (07/06/01)... no right or wrong answers, and just by answering you are entered to win a showcase ad-free! ~ ~ ~ do you spend more or less time ~ ~ ~ ~ ~ ~ online in the summer months? ~ ~ ~ more mailto: one @ aeopublishing. com less mailto: two @ aeopublishing. com same mailto: three @ aeopublishing. com to make this as easy as possible for you, just click on the e-mail address that matches your answer-you do not need to enter any information in the subject or body of the message. * * add your comments! follow directions above and add your comments in the body of the message, and we'll post the best commentaries along with the responses. you will automatically be entered in our drawing for a free sponsorship ad in the community commentary. please respond only one time per question. multiple responses from the same individual will be discarded. last weeks's results (06/29/01) ~ ~ ~ what is the goal of your website? ~ ~ ~ sell 40% get leads 20% build branding 5% provide information 20% other 15% comments: ---------------------------- our web site is initially designed to get leads, build branding, and provide information....... with a 12 month goal of selling our service more specifically via a shopping cart. we offer a service and at this time take deposits and payments via our site. our site has been up less than 2 months and our expectation was that we would refer to our site for leads developed in traditional media and by referral for more information, and to make a professional impression on someone you may not meet before providing service. the growth of our customer base shopping on line has grown outside of anyone's expectations....... certainly mine and i've been in this business for 25 years. the internet is not dead in the horse business, it is just getting it's legs, and the folks using it want to get all the ancillary services on-line as well. our site (the first we've developed) has exceeded our expectations, and we aren't satisfied with it yet....... we just wanted to get it there for information! jeff and rebecca marks http:// www. grand-champion. com branding. while quality customer service and product have been and will always be our top priority brand building zesto is our most challenging task. zesto. com ranks very high and most often # 1 or 2 on all major search engines and directories even yahoo entering the keyword zesto. the problem is simply that, who if anyone would type the keyword zesto, therefore we must try to build our brand by ensuring that generic keywords associated with our products (citrus peel) are used throughout our site as well as search engine submissions. fortunately owning a non generic domain short, easy to remember and trademarked works in our favor because the marketability potential is limitless. arlene turner http:// www. zesto. com to change your subscribed address, send both new and old address to submit see below for unsubscribe instructions. please send suggestions and comments to: editor i invite you to send your real successes and showcase your strategies and techniques, or yes, even your total bombs, "" working together we can all prosper. "" submit for information on how to sponsor your membership community commentary visit: sponsorship showcase copyright 2001 aeopublishing. com email us:: visit our site phone: this email was sent to jm @ netnoteinc. com, at your request, by your membership newsletter services. visit our subscription center to edit your interests or unsubscribe. view our privacy policy. powered by "
0016.2003-12-19.GP 1 ativan. n vicodin. n xanax. x valium. m dxqrgu many specials running this week the re. al thing not like the other sites that imitate these products. no hidd. en char. ges-fast delivery vic. odin val. ium xan. ax via. gra diaz. epam alpra. zolam so. ma fior. icet amb. ien stil. nox ult. ram zo. loft clon. azepam at. ivan tr. amadol xeni. cal cele. brex vi. oxx pro. zac bus. par much m. ore.... if you have recieved this in error please use http:// www. nowbetterthis. biz/byee. html w g snhezkjzhisbpjhgx hcokyovrdsprayz klei vzxoaxqhg kvie
0016.2004-08-01.BG 1 important news for usavity customers. " dear cheapsoft customer, my name is annie kincaid, and i work at cheapsoft llc. you are important to me! you spend your money and time on cheapsoft, and i want to let you know that we have finished update our programs store. i want to remind you that we are offering now more than 1500 popular software for low price with your personal customer's discount. please spend few moments of yours precious time to check our updated software store: http:// www. dutyfreesoft 4 all. info with regards, customer service department, annie kincaid"
0017.1999-12-14.kaminski 0 a paper of mine " vince, i have written a paper, which supposedly is going to be published in the february 2000 issue of eprm, probably after some editorial cuts (at least this is what i am being told by them). i would appreciate your thoughts if you would have time to read it. regards, martin -userconf. doc"
0017.2000-01-17.beck 0 global risk management operations " congratulations, sally!!! kk ----------------------forwarded by kathy kokas/corp/enron on 01/17/2000 08:08 pm--------------------------- from: rick causey 01/17/2000 06:04 pm sent by: enron announcements to: all enron worldwide cc: subject: global risk management operations recognizing enron , s increasing worldwide presence in the wholesale energy business and the need to insure outstanding internal controls for all of our risk management activities, regardless of location, a global risk management operations function has been created under the direction of sally w. beck, vice president. in this role, sally will report to rick causey, executive vice president and chief accounting officer. sally , s responsibilities with regard to global risk management operations will mirror those of other recently created enron global functions. in this role, sally will work closely with all enron geographic regions and wholesale companies to insure that each entity receives individualized regional support while also focusing on the following global responsibilities: 1. enhance communication among risk management operations professionals. 2. assure the proliferation of best operational practices around the globe. 3. facilitate the allocation of human resources. 4. provide training for risk management operations personnel. 5. coordinate user requirements for shared operational systems. 6. oversee the creation of a global internal control audit plan for risk management activities. 7. establish procedures for opening new risk management operations offices and create key benchmarks for measuring on-going risk controls. each regional operations team will continue its direct reporting relationship within its business unit, and will collaborate with sally in the delivery of these critical items. the houston-based risk management operations team under sue frusco , s leadership, which currently supports risk management activities for south america and australia, will also report directly to sally. sally retains her role as vice president of energy operations for enron north america, reporting to the ena office of the chairman. she has been in her current role over energy operations since 1997, where she manages risk consolidation and reporting, risk management administration, physical product delivery, confirmations and cash management for ena , s physical commodity trading, energy derivatives trading and financial products trading. sally has been with enron since 1992, when she joined the company as a manager in global credit. prior to joining enron, sally had four years experience as a commercial banker and spent seven years as a registered securities principal with a regional investment banking firm. she also owned and managed a retail business for several years. please join me in supporting sally in this additional coordination role for global risk management operations."
0017.2001-04-03.williams 0 monday blues " bill, i am having such a terrible day. i am so flustrated. can you believe it is only monday? ah! anyhow, i hope your day is going much better than mine. when i saw you earlier, you looked kinda angry or something. i hope i was just imaging that. anyhow, i'd better get back to work. i'll talk to you later."
0017.2003-12-18.GP 1 get that new car 8434 people nowthe weather or climate in any particular environment can change and affect what people eat and how much of it they are able to eat.
0017.2004-08-01.BG 1 super-discounts on ambien and soma " up to 80% savings on xanax, valium, phentermine, viagra here andorra elmira pompey cankerworm rush curricula kaskaskia whitehorse devoid stacy haunch curtain quadrangular prefix axe beck dubhe canyonu's copenhagen adolescent martensite bucolic triassic baccarat spigot macarthur ague fraternal textual militarism flynn lobster plushy aphrodite hillmancarthage cagey nostalgia lineal mauricio glandular columnar doff strangulate cryogenic phrasemake carrie clamp pet express indubitable extremal crapbedspring squill hydroxylate cannot keyes rosalie bestowal ncaa brighton carriage mesopotamia doctoral phonetic samarium nearby psychotic manitoba cornet delinquent novak brim pun ecole exultant cheeky griddle ambrose descendent forever affectate scuttle"
0017.2004-08-02.BG 1 your winning notice. " pacific international lottery organisation. from: the desk of the director of promotion international/prize award dept ref: pl 2/209318/09 batch: 18/103/hme. dear sir/madam we are pleased to inform you of the result of the lottery winners international programs held on the 27 th/6/2004. your e-mail address attached to ticket number 436425795822-5022 with serial number 6614102, batch number 8561513507, lottery ref number 7675213911 and drew lucky numbers 7-9-4-17-34-44 which consequently won in category c, you have therefore been approved for a lump sum pay out of us$ 1. 500,000. 00 (one million five hundred thousand united states dollars) congratulations!!! due to mix up of some numbers and names, we ask that you keep your winning information confidential until your claims has been processed and your money remitted to you. this is part of our security protocol to avoid double claiming and unwarranted abuse of this program by some participants. all participants were selected through a computer ballot system drawn from over 40,000 company and 20,000, 000 individual email addresses and names from all over the world. this promotional program takes place every year. this lottery was promoted and sponsored by association of software producers. we hope with part of your winning, you will take part in our next year us$ 20 million international lottery. to file for your claim, please contact our fiducial agent: mr. rook van nas magnum securities company. amsterdam netherland. email: admin_magnumo @ mail 2 netherlands. com tel:: + 31615304791 remember, all winning must be claimed not later than 6 th of august, 2004. after this date all unclaimed funds will be included in the next stake. please note in order to avoid unnecessary delays and complications please remember to quote your reference number and batch numbers in all correspondence. furthermore, should there be any change of address do inform our agent as soon as possible. congratulations once more from our members of staff and thank you for being part of our promotional program. note: anybody under the age of 18 is automatically disqualified. yours sincerely, john smith lottery coordinator this email was sent using the webmail feature @ pc bytesize"
0018.1999-12-14.kaminski 0 invitation to speak at power 2000 " hi vince it is my great pleasure to invite you to speak at power 2000 which will be in houston on 9 & 10 may 2000. would you be interested in chairing one of the streams on day 2 of the conference? or making a full presentation on one of the days? please let me know which talks interest you. obviously, some of the talks are no longer available but i would like to give you a choice as much as possible. please could you get back to me asap on 212 925 1864 ext 151 or by return email. i very much hope you can make the dates as i'm very keen to have you participate at power. not to flatter you unnecessarily, but i know that a lot of people come to our conferences to hear what you have to say. best regards emma -invite. doc"
0018.2001-07-13.SA_and_HP 1 [ilug] we need your assistance to invest in your country " dear sir/madam, i am well confident of your capability to assist me in a transaction for mutual benefit of both parties, ie (me and you) i am also believing that you will not expose or betray the trust and confidence i am about to establish with you. i have decided to contact you with greatest delight and personal respect. well, i am victor sankoh, son to mr. foday sankoh who was arrested by the ecomog peace keeping force months ago in my country sierra leone. few days before the arrest of my father, he confided in me and ordered me to go to his underground safe and move out immediately, with a deposit agreement and cash receipt he made with a security company in abidjan cote d'ivoire where he deposited one iron box containing usd$ 22 million dollars cash (twenty two million dollars). this money was made from the sell of gold and diamond by my father and he have already decided to use this money for future investment of the family before his arrest. thereafter, i rushed down to abidjan with these documents and confirmed the deposit of the box by my father. also, i have been granted political stay as a refugee by the government of cote d'ivoire. meanwhile, my father have instructed me to look for a trusted foreigner who can assist me to move out this money from cote d'ivoire immediately for investment. based on this, i solicit for your assistance to transfer this fund into your account, but i will demand for the following requirement: (1) could you provide for me a safe bank account where this fund will be transferred to in your country or another neaarby country where taxation will not takegreat toll on the money? (2) could you be able to assist me to obtain my travelling papers after this transfer to enable me come over to meet you in your country for theinvestment of this money? (3) could you be able to introduce me to a profitable business venture that would not require much technical expertise in your country where part of this fund willbe invested? please, all these requirements are urgently needed as it will enable me to establish a stronger business relationship with you hence i will like you to be the general overseer of the investment thereafter. i am a christian and i will please, want you to handle this transaction based on the trust i have established on you. for your assistance in this transaction, i have decided to offer you 12% percent commission of the total amount at the end of this business. the security of this business is very important to me and as such, i would like you to keep this business very confidential. i shall be expecting your urgent reply. thank you and god bless you. victor sankoh -- irish linux users'group: ilug @ linux. ie http:// www. linux. ie/mailman/listinfo/ilug for (un) subscription information. list maintainer: listmaster @ linux. ie"
0018.2003-12-18.GP 1 await your response " dear partner, we are a team of government officials that belong to an eight-man committee in the presidential cabinet as well as the senate. at the moment, we will be requiring your assistance in a matter that involves investment of monies, which we intend to transfer to your account, upon clarification and a workable agreement reached in consummating the project with you. based on a recommendation from an associate concerning your integrity, loyalty and understanding, we deemed it necessary to contact you accordingly. all arrangements in relation to this investment initiative, as well as the initial capital for its take off has been tactically set aside to commence whatever business you deemed fit, that will turn around profit favourably. we request you immediately contact us if you will be favorably disposed to act as a partner in this venture, and possibly will afford us the opportunity to discuss whatever proposal you may come up with. also bear in mind that the initial capital that we shall send across will not exceed$ 13,731, 000,00 usd (thirteen million seven hundred and thirty one thousand united states dollars) so whatever areas of investment your proposal shall cover, please it should be within the set aside capital. in this regard, the proposal you may wish to discuss with us should be comprehensive enough for our better understanding; with special emphasis on the following: 1. the tax obligationin your country 2. the initial capital base required in your proposed investment area, as well as; 3. the legal technicalities in setting up a business in your country with foreigners as share-holders 4. the most convenient and secured mode of receiving the funds without our direct involvement. 5. your ability to provide a beneficiary/partnership account with a minimal deposit, where we shall transfer the funds into subsequently. another area that we wish to explicitly throw more light on, is the process we have conceived in transferring the funds into the account you shall be providing. since we are the owners of the funds, and the money will be leaving the apex bank of my country, we shall purposefully fulfill the legal obligations precedent to transferring such huge amount of funds, without arousing suspicion from any quarter as a drug or terrorist related funds; and this will assist us in the long run to forestall any form of investigations. remember that, on no account must we be seen or perceived to be directly connected with the transfer of funds. you will be the one to be doing all these, and in the course of transfer, if for any reason whatsoever, you incurred some bills, we shall adequately retire same, upon the successful confirmation of the funds in your account. the commencement of this project is based on your ability to convince us of the need to invest in whatever business you have chosen, and to trust your personality and status, especially as it concerns the security of the funds in your custody. i await your response, sincerely, john adams (chairman senate committee on banks and currency) call number: 234-802-306-8507 "
# __[OPTIONAL]__ Run the following command line EDA to confirm that you have succesfully created the Enron data.
# count records
# !wc -l enronemail_1h.txt
# EXPEDTED OUTPUT: 100 enronemail_1h.txt
# extract second field which is SPAM flag
# !cut -f2 -d$'\t' enronemail_1h.txt|wc
# EXPECTED OUTPUT: 101 394 3999
# view first ten classifications
# !cut -f2 -d$'\t' enronemail_1h.txt|head
# Display an example SPAM email record
# !head -n 100 enronemail_1h.txt|tail -1|less
# <pre>
# 018.2001-07-13.SA_and_HP 1 [ilug] we need your assistance to invest in your country dear sir/madam, i am well confident of your capability to assist me in a transaction for mutual benefit of both parties, ie (me and you) i am also believing that you will not expose or betray the trust and confidence i am about to establish with you. i have decided to contact you with greatest delight and personal respect. well, i am victor sankoh, son to mr. foday sankoh who was arrested by the ecomog peace keeping force months ago in my country sierra leone.
# </pre>
# ---
# # HW Problems
# ## HW2.0 Functional Programming
#
# ### HW2.0.0 - Short Response
# - What is a race condition in the context of parallel computation? Give an example.
# - What is MapReduce?
# - How does it differ from Hadoop?
# +
# START STUDENT ANSWER HW2.0.0 - INSERT CELLS AS NEEDED
# -
# +
# END STUDENT ANSWER HW2.0.0
# -
# ### HW2.0.1 - Programming Paradigm Illustration
#
# Which programming paradigm is Hadoop based on? Explain the main ideas of this programming paradigm and provide a simple example of this programming paradigm in raw python code. Show the code running and explain how it fits this paradigm.
#
# For example, maybe you could find the average length of a string in this collection:
# `strings = ["str1", "string2", "w261", "MAchine learning at SCALE"]`
# using a python "map-reduce" (functional programming) job similar in style to the example below.
# +
#EXAMPLE Mapper functions in Python
def fahrenheit(T):
return ((float(9)/5)*T + 32)
def celsius(T):
return (float(5)/9)*(T-32)
temperatures = (36.5, 37, 37.5, 38, 39)
F = map(fahrenheit, temperatures)
print "Fahrenheit temperatures are:", F
#returns 97.7 98.6 99.5 100.4 102.2
C = map(celsius, F)
#EXAMPLE Reducer function in Python - built in functools.reduce
import functools
print "Average temp is: %.2fF" % ( functools.reduce(lambda x,y: x+y, F)/len(F) )
#returns Average temp is 99.68F
# +
# START STUDENT RESPONSE HW2.0.1 - INSERT CELLS AS NEEDED
# -
# +
# END STUDENT RESPONSE HW2.0.1
# -
# ### HW2.0.2 - WordCount Example of Hadoop Streaming MR
# The cells below provide a full example of performing a MapReduce job in Hadoop Streaming. For this homework item you do not need to do any coding. Simply read through and execute the following cells. Consider this an opportunity to make sure you understand the syntax of each component before moving on to question 2.1 where you will implement your own MapReduce jobs.
# +
# %%writefile WordCount/mapper.py
# #!/usr/bin/env python
import sys
#sys.stderr.write("reporter:counter:Tokens,Total,1") # NOTE missing the carriage return so wont work
# Set up counters to monitor/understand the number of times a mapper task is run
sys.stderr.write("reporter:counter:HW2.0.1 Mapper Counters,Calls,1\n")
sys.stderr.write("reporter:status:processing my message...how are you\n")
for line in sys.stdin:
for word in line.split():
print '%s\t%s' % (word, 1)
# +
# %%writefile WordCount/reducer.py
# #!/usr/bin/env python
import sys
cur_key = None
cur_count = 0
# Set up counters to monitor/understand the number of times a reducer task is run
sys.stderr.write("reporter:counter:HW2.0.1 Reducer Counters,Calls,1\n")
for line in sys.stdin:
key, value = line.split()
if key == cur_key:
cur_count += int(value)
else:
if cur_key:
print '%s\t%s' % (cur_key, cur_count)
cur_key = key
cur_count = int(value)
print '%s\t%s' % (cur_key, cur_count)
# -
# !chmod a+x WordCount/mapper.py
# !chmod a+x WordCount/reducer.py
#Unit test the mapper
# !echo "foo foo quux labs foo bar quux" | WordCount/mapper.py
#Unit test the mapper
# !echo "foo foo quux labs foo bar quux" | WordCount/mapper.py |sort -k1,1
#Systems test the mapper and reducer
# !echo "foo foo quux labs foo bar quux" | WordCount/mapper.py | sort -k1,1 | WordCount/reducer.py| sort -k2,2nr
# %%writefile testWordCountInput.txt
hello this is Jimi
jimi who Jimi three Jimi
Hello
hello
# +
# !hdfs dfs -rm testWordCountInput.txt
# !hdfs dfs -copyFromLocal testWordCountInput.txt
# !hdfs dfs -rm -r wordcount-output
##################### IMPORTANT ########################################################################
# Make sure you have the correct paths to the jar file as wel as the input and output files!!
# make sure to include the -files option. Do ***** NOT ****** put spaces between the file paths!
########################################################################################################
# !hadoop jar /usr/local/Cellar/hadoop/2.7.2/libexec/share/hadoop/tools/lib/hadoop-streaming-2.7.2.jar \
# -files WordCount/reducer.py,WordCount/mapper.py \
# -mapper mapper.py \
# -reducer reducer.py \
# -input testWordCountInput.txt \
# -output wordcount-output \
# -numReduceTasks 3
# -
#have a look at the input
# !echo "\n---------------------------\n"
# !hdfs dfs -cat testWordCountInput.txt
# !echo "\n---------------------------\n"
# Wordcount output
# !hdfs dfs -cat wordcount-output/part-0000*
# ## HW2.1. Sort in Hadoop MapReduce (Partial sort, total sort) - List in alphabetical order
# In this problem use the text of _Alice’s Adventures in Wonderland_ which we downloaded at the top of this notebook. Recall that the data are available locally in a file called `alicesTExtFilename.txt`.
#
# Using Hadoop, please change the mapper.py/reducer.py combination so that you get only the number of words starting with an uppercase letter, and the number of words starting with a lowercase letter. In other words, you need an output file with only 2 lines, one giving you the number of words staring with a lowercase ('a' to 'z'), and the other line indicating the number of words starting with an uppercase letter ('A' to 'Z').
# __Some background on Sorting in Hadoop__
# Hadoop will always give a total sort on the key (i.e., key part of the key-value pairs produced by the mappers) when using just one reducer. When using multiple reducers Hadoop will by default give you a partial sort (i.e., all records within a partition will be sorted by the key (i.e., key part of the key-value pairs produced by the mappers) .
# To achieve a total sort one needs to write a custom mapper to to prepend a partition key to each record, partition on that prepended key, and then do a secondary sort on a composite key that is made up of the prepended key and the original key. This can be done with one map-reduce job. This will be covered during Live Session of Week 3.
# +
# %%writefile UpperLower/mapper.py
# #!/usr/bin/env python
import sys
#sys.stderr.write("reporter:counter:Tokens,Total,1") # NOTE missing the carriage return so wont work
# Set up counters to monitor/understand the number of times a mapper task is run
sys.stderr.write("reporter:counter:HW2.1 Mapper Counters,Calls,1\n")
sys.stderr.write("reporter:status:processing my message...how are you\n")
# START STUDENT CODE HW21MAPPER
# END STUDENT CODE HW21MAPPER
# +
# %%writefile UpperLower/reducer.py
# #!/usr/bin/env python
import sys
# Set up counters to monitor/understand the number of times a reducer task is run
sys.stderr.write("reporter:counter:HW2.1 Reducer Counters,Calls,1\n")
# START STUDENT CODE HW21REDUCER
# END STUDENT CODE HW21REDUCER
# +
# INSTRUCTIONS: make mapper and reducer py files executable
# START STUDENT CODE HW21EXECUTABLE
# !chmod a+x UpperLower/mapper.py
# !chmod a+x UpperLower/reducer.py
# END STUDENT CODE HW21EXECUTABLE
# +
# INSTRUCTIONS: call hadoop with one reducer. see example above.
##################### IMPORTANT ########################
# Make sure you have the correct paths to the jar file
# as wel as the input and output files!!
# make sure to include the -files option. Do NOT put
# spaces between the file paths!
########################################################
# START STUDENT CODE HW21HADOOP
# END STUDENT CODE HW21HADOOP
# -
# Wordcount output
# !hdfs dfs -cat upperlower-output/part-0000* > upperlower_counts.txt
# !cat upperlower_counts.txt
# ### HW2.1.1 Calculate the vocabulary size (number of unique words in the Alice book)
# To solve this problem, a single reducer will suffice. One could use multiple reducers but then you would need a post processing step to aggregate the counts in the PART-000XX files.
#
# Write a map/reduce job to count the number of unique words in the Alice book.
# Please verify your code with straight python code.
# Do you get the same answer?
# +
# %%writefile Vocab/mapper.py
# #!/usr/bin/env python
# START STUDENT CODE HW211MAPPER
# END STUDENT CODE HW211MAPPER
# +
# %%writefile Vocab/reducer.py
# #!/usr/bin/env python
# START STUDENT CODE HW211REDUCER
# END STUDENT CODE HW211REDUCER
# +
# START STUDENT CODE HW211HADOOP
# END STUDENT CODE HW211HADOOP
# -
# !hdfs dfs -cat vocab-output/part-0000* > vocab_output.txt
# !cat vocab_output.txt
# ### HW2.1.2 TOTAL SORT using a single reducer
# Write a MapReduce job that creates a text file named __alice_words.txt__ containing an alphabetical listing of all the words, and the number of times each occurs, in the text version of Alice’s Adventures in Wonderland. (You can obtain a free plain text version of the book, along with many others, from [here](http://www.gutenberg.org/cache/epub/11/pg11.txt)
#
# Solve this TOTAL SORT problem in mapReduce using a single reducer.
#
# The first 10 lines of your output file should look something like this (the counts are not totally precise):
# <pre>
# Word Count
# =======================
# a 631
# a-piece 1
# abide 1
# able 1
# about 94
# above 3
# absence 1
# absurd 2
# </pre>
# +
# %%writefile Total_sort/mapper.py
# #!/usr/bin/env python
# START STUDENT CODE HW212MAPPER
# END STUDENT CODE HW212MAPPER
# +
# %%writefile Total_sort/reducer.py
# #!/usr/bin/env python
# START STUDENT CODE HW212REDUCER
# END STUDENT CODE HW212REDUCER
# +
# START STUDENT CODE HW212HADOOP
# END STUDENT CODE HW212HADOOP
# -
# !hdfs dfs -cat sorted-output/part-0000* > sorted_output.txt
# !head sorted_output.txt
# ### HW2.1.2.b TOTAL SORT using multiple reducers [OPTITIONAL for this week; will be covered in next live session]
#
# Change the mapper.py/reducer.py combination from the the above WordCount example so that you get the longest word present in the text version of Alice’s Adventures in Wonderland. (You can obtain a free plain text version of the book, along with many others, from [here](http://www.gutenberg.org/cache/epub/11/pg11.txt).
#
# * First use one reducer and report your result. HINT: from emit records of the form: "longestWord\theLongWordEver\t15".
# * Run you Hadoop streaming job with 3 reducers? Anything change with respect to your solution.
# !mkdir Total_sort_multi
# +
# %%writefile Total_sort_multi/mapper.py
# START STUDENT CODE HW212MAPPER_MULTI
# END STUDENT CODE HW212MAPPER_MULTI
# +
# %%writefile Total_sort_multi/reducer.py
# START STUDENT CODE HW212REDUCER_MULTI
# END STUDENT CODE HW212REDUCER_MULTI
# +
# START STUDENT CODE HW212HADOOP_MULTI
# END STUDENT CODE HW212HADOOP_MULTI
# -
# ### HW2.1.3 How many times does the word alice occur in the book?
# Write a MapReduce job to determine this. Please pay attention to what you use for a key and value as output from your mapper.
# +
# %%writefile Alice/mapper.py
# #!/usr/bin/env python
# START STUDENT CODE HW213MAPPER
# END STUDENT CODE HW213MAPPER
# +
# %%writefile Alice/reducer.py
# #!/usr/bin/env python
# START STUDENT CODE HW213REDUCER
# END STUDENT CODE HW213REDUCER
# +
# START STUDENT CODE HW213HADOOP
# END STUDENT CODE HW213HADOOP
# -
# !hdfs dfs -cat alice-output/part-0000* > alice_count.txt
# !cat alice_count.txt
# ## HW2.2 EDA in Hadoop - WordCount & Top 10 Words
# __CODE TIP:__ Tokenizing
# >A tokenizer divides text into a sequence of tokens, which roughly correspond to "words". The provided code below shows one way to perform tokenization of English language strings.
#
# __IMPORTANT!__ _For detecting words in HW2.2 and later problems please use the following code to generate word tokens (otherwise, you will probably get a different answer to expected)._
# +
import sys, re, string
# define regex for punctuation removal
line = """ 0017.2000-01-17.beck 0 global risk management operations " congratulations, sally!!! kk ----------------------forwarded by kathy kokas/corp/enron on 01/17/2000 08:08 pm--------------------------- from: rick causey 01/17/2000 06:04 pm sent by: enron announcements to: all enron worldwide cc: subject: global risk management operations recognizing enron , s increasing worldwide presence in the wholesale energy business and the need to insure outstanding internal controls for all of our risk management activities, regardless of location, a global risk management operations function has been created under the direction of sally w. beck, vice president. in this role, sally will report to rick causey, executive vice president and chief accounting officer. sally , s responsibilities with regard to global risk management operations will mirror those of other recently created enron global functions. in this role, sally will work closely with all enron geographic regions and wholesale companies to insure that each entity receives individualized regional support while also focusing on the following global responsibilities: 1. enhance communication among risk management operations professionals. 2. assure the proliferation of best operational practices around the globe. 3. facilitate the allocation of human resources. 4. provide training for risk management operations personnel. 5. coordinate user requirements for shared operational systems. 6. oversee the creation of a global internal control audit plan for risk management activities. 7. establish procedures for opening new risk management operations offices and create key benchmarks for measuring on-going risk controls. each regional operations team will continue its direct reporting relationship within its business unit, and will collaborate with sally in the delivery of these critical items. the houston-based risk management operations team under sue frusco , s leadership, which currently supports risk management activities for south america and australia, will also report directly to sally. sally retains her role as vice president of energy operations for enron north america, reporting to the ena office of the chairman. she has been in her current role over energy operations since 1997, where she manages risk consolidation and reporting, risk management administration, physical product delivery, confirmations and cash management for ena , s physical commodity trading, energy derivatives trading and financial products trading. sally has been with enron since 1992, when she joined the company as a manager in global credit. prior to joining enron, sally had four years experience as a commercial banker and spent seven years as a registered securities principal with a regional investment banking firm. she also owned and managed a retail business for several years. please join me in supporting sally in this additional coordination role for global risk management operations."
"""
docID, docClass,title,body = line.split("\t",3)
regex = re.compile('[%s]' % re.escape(string.punctuation))
emailStr = regex.sub(' ', title + " " +body.lower())
emailStr = re.sub( '\s+', ' ', emailStr )
# split the line into words
words = emailStr.split()
for w in words:
print w, "\t", 1 #or yield(w, 1)
# -
# ### HW2.2.1 Enron Word Count
#
# In this question you will use Hadoop MapReduce streaming to perform word count on the Enron data. In completing this homework you will:
# * write the mapper/reducer job that will determine the word count (number of occurrences) of each white-space delimitted token (assume spaces, fullstops, comma as delimiters).
# * write and run a Hadoop Streaming MapReduce job to generate your word counts.
# * examine the word “assistance” and report its word count in both SPAM and HAM classes.
# * crosscheck the frequency using Unix commands (e.g., use multiple grep to get the frequency in each class):
#
# ```
# grep assistance enronemail_1h.txt|cut -d$'\t' -f4| grep assistance|wc -l
# 8
# ```
# __NOTE: __ "assistance" occurs on 8 lines but how many times does the token occur? 10 times! This is the number we are looking for!
#
# __Expected Output:__ (_format may vary depending on your implementation_)
# `class 0 assistance 8`
# `class 1 assistance 2`
# +
# %%writefile Enron/mapper2.2.1.py
# #!/usr/bin/env python
import sys, re, string
# START STUDENT CODE HW221MAPPER
# define regex for punctuation removal
regex = re.compile('[%s]' % re.escape(string.punctuation))
# input comes from STDIN (standard input)
# use subject and body
# remove punctuations, only have white-space as delimiter
# write the results to STDOUT (standard output);
# what we output here will be the input for the
# Reduce step, i.e. the input for reducer.py
#
# tab-delimited; the trivial word count is 1
# END STUDENT CODE HW221MAPPER
# +
# %%writefile Enron/reducer2.2.1.py
# #!/usr/bin/env python
from operator import itemgetter
import sys
# START STUDENT CODE HW221REDUCER
# END STUDENT CODE HW221REDUCER
# +
# START STUDENT CODE HW221HADOOP
# END STUDENT CODE HW221HADOOP
# -
# !hdfs dfs -cat HW2.2.1/results/part-0000* > enron_counts.txt
# !head enron_counts.txt
# !grep -i assistance enron_counts.txt
# ### HW2.2.2 - Enron top 10, single reducer
# Using Hadoop MapReduce and your wordcount job (from HW2.2.1) determine the top-10 occurring tokens (most frequent tokens) using a single reducer for the SPAM class and for the HAM class.
# The expected output for 2.2.2 is in terms of three tab-separated columns:
# `CLASS\tWORD\tCOUNT`
# with the HAM top 10 coming first followed by the SPAM top 10.
# +
# START STUDENT CODE HW222HADOOP
# END STUDENT CODE HW222HADOOP
# -
# !hdfs dfs -cat HW2.2.2/results/part-0000* > sorted_enron_counts.txt
# display top from each class in the sorted_enron_counts.txt file
# ### HW2.2.3 (Optional) - Top 10 with multiple reducers
# Using Hadoop MapReduce and your wordcount job (from HW2.2.1) determine the top-10 occurring tokens (most frequent tokens) using multiple reducers.
#
# To achieve a total sort one needs to write a custom mapper to to prepend a partition key to each record. The shuffle phase will need a custom partitioner based upon the prepended key, while the sort is based upon a composite key which is made up of the partition key and the word count (i.e., we will do a secondary sort on a composite key that is made up of the prepended key and the word count. This all can be done with one map-reduce job.
# ---
# ## HW2.3 Multinomial Naive Bayes with NO Smoothing (using a single reducer)
#
# In this question you will produce a spam filter based upon a Multinomial Naive Bayes classifier. Your development process will follow a few steps. __In part 2.3.0__ we will provide a quick review of the math behind Naive Bayes along with some tips about how to represent a Naive Bayes model in code and how to use this representation of a NB model to perform document classfication. __In part 2.3.1__ you will use the provided Naive Bayes class to develop a two-phase MapReduce job implementing Naive Bayes and test your implementation on a toy dataset '`chineseExample.txt`'. __In part 2.3.2__ you will perform the Naive Bayes calculations for this toy data set by hand (i.e. in markdown) as a final check that your Naive Bayes implementation works as expected. Finally, __in parts 2.3.3 and 2.3.4__ you will use the implementation that you developed to perform Naive Bayes on the Enron data and report your results.
#
# __A few important notes before we start:__
# * <span style="color:red;font-weight:bold">For all tasks in this HW problem, please use one (1) reducer.</span>
# * No need to add a smoothing term for this implementation (we'll explore smoothing in 2.4).
# * For the sake of this assignment we will focus on the basic construction of the parallelized classifier, and not consider its validation or calibration, and so you will have the classifier operate on its own training data (unlike a field application where one would use non-overlapping subsets for training, validation and testing).
# * When you get to the Enron portion of this question, please use the contents of BOTH subject field and the body field for all your Naive Bayes modeling. Use all white-space delimitted tokens as independent input variables (assume spaces, fullstops, commas as delimiters).
#
# __Additional Resources:__ _For a quick reference on the construction of the Multinomial Naive Bayes classifier that you will code, consult_ [chapter 13](http://nlp.stanford.edu/IR-book/pdf/13bayes.pdf) _of Information Retrieval by Manning, Raghavan & Schutze , the_ ["Document Classification"](https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Document_classification) _section of the wikipedia page on Naive Bayes OR the_ [original paper](http://www.aueb.gr/users/ion/docs/ceas2006_paper.pdf) _by the curators of the Enron email data._
# ### HW2.3.0 Multinomial NB Math Review & Implementation Tips
# You do not need to do any coding in this section. Just read/run the material provided and make sure you understand it before proceeding.
# __Review: Multinomial Naive Bayes Classification__
# Recall that in Naive Bayes, the probability of a document $Doc$ has classification $C$ is calculated as follows (where the $\approx$ is due to the naive approximation of joint probability in the LHS numerator):
#
# $$ P\big(C \:\:\lvert \bigwedge_{w_i \in Doc} w_i\big) \approx \frac{ P(C) \prod_{w_i \in Doc}P(w_i\lvert C)}{P(\wedge w_i)} $$
#
# To avoid floating point errors in the prompt we can hence use the equivalent statement that:
#
# $$ log\big(P\big(C \:\:\lvert \bigwedge_{w_i \in Doc} w_i\big)log\big) \approx log(P(C)) + \sum_{w_i \in Doc}log\big(P(w_i\lvert C)\big) - log(P(\wedge w_i)) $$
#
# For fixed document $log(P(\wedge w_i))$ we can optimize the RHS, by taking the following:
#
# $$ \hat{y} \approx argmax_{C\in\{\text{spam},\text{ham}\}}\Big(log(P(C)) + \sum_{w_i \in Doc}log\big(P(w_i\lvert C)\big)\Big) $$
#
# So, in order to predict the class of a document we need to _learn_ two kinds of information from our training data:
# 1. The class priors for $C \in \{\text{spam},\text{ham}\}$:
# $$P(C) = Count(C)\: / \:|Emails|$$
# 2. The class conditional probabilities for each word $P(w_i\lvert C)$
# * _given the bag of words assumption, and without smoothing we calculate these conditionals using_:
# $$P(w_i\lvert C) \approx \frac{Count(w_i,C)}{\sum_jCount(w_j,C)}$$
# * _in other words, for multinomial Naive Bayes,the class conditional probability for a word such as "assistance" given the class is SPAM, Pr(X=“assistance”|Y=SPAM), is calculated as follows_:
#
# $$ \frac{the \ number \ of \ times \ “assistance” \ occurs \ in \ SPAM \ labeled \ documents}{the \ number \ of \ words \ in \ documents \ labeled \ SPAM }$$
#
# 'Learning' a Multinomial Naive Bayes Model (without smoothing) is simply the process of making a smart choice for how to extract and store these two kinds of information.
# __Representing Your Model:__
# For the purposes of this homework we recommend that you represent your model using the following tab-separated (key-value) format:
#
# `word\tFreq(Word in HAM),Freq(Word in SPAM),Pr(Word|HAM),Pr(Word|SPAM)`
#
# Important notes about this representation:
# * The frequency information is added for ease of understanding and debugging.
# * In memory, this TSV-type data can be stored as a dictionary or defaultDict to record the learnt model or intermediate versions of the model.
# * We can also insert a special record for the class priors. _For example, we can use the token ClassPrior as the key to the class priors in this dictionary representation of the learnt model. These priors only need to be calculated once for the training set, you will need to make a choice of when/how to do so._
# * When working with the data keep in mind that HAM is the 0 class and SPAM is the 1 class.
# __Using this representation:__
# Recall that at the top of this notebook we created a toy dataset called `chineseExample.txt`:
# !cat NaiveBayes/chineseExample.txt
# In the next section (2.3.1) you will use this toy data to help you develop a Hadoop Streaming MapReduce implementation of Naive Bayes. Your implementation will first learn a model and then use that model to perform document classsification. However, before jumping in to the learning process it may be usefull to become a bit more familiar with our choice of how to represent the model.
#
# The cell below creates a text file with an example of a multinomial naive bayes model for the toy dataset. Each record in this model follows the format we've recommended above (although it may be helpful to think about the classes as 'about China' and 'not about China' instead of Spam/Ham).
# %%writefile NaiveBayes/model1.txt
Beijing 0.0,1.0,0.111111111111,0.142857142857
Chinese 1.0,5.0,0.222222222222,0.428571428571
Tokyo 1.0,0.0,0.222222222222,0.0714285714286
Shanghai 0.0,1.0,0.111111111111,0.142857142857
ClassPriors 1.0,3.0,0.25,0.75
Japan 1.0,0.0,0.222222222222,0.0714285714286
Macao 0.0,1.0,0.111111111111,0.142857142857
# Run the following provided code to learn about loading and using this model to peform document classification.
# load model file as strings (it is not tokenized; that will come next)
modelStats = {}
recordStrs = [s.split('\n')[0].split('\t') for s in open("NaiveBayes/model1.txt").readlines()]
for word, statsStr in recordStrs:
print word, "-->", statsStr
# load model file
# notice the string quotes around the frequncy and probabilits
# we need to fix that next
modelStats = {}
recordStrs = [s.split('\n')[0].split('\t') for s in open("NaiveBayes/model1.txt").readlines()]
for word, statsStr in recordStrs:
modelStats[word] = statsStr.split(",")
modelStats
# load model file
# convert strings to floats using Pythons map function
# the map iterates over each element in the list apply the float() function
# which converts a string to float
# RESULT: modelStats now contains our multinomial Naive Bayes model
modelStats = {}
recordStrs = [s.split('\n')[0].split('\t') for s in open("NaiveBayes/model1.txt").readlines()]
for word, statsStr in recordStrs:
modelStats[word] = map(float, statsStr.split(","))
modelStats
#classify a sample document using the model
# Use the following record to test
# D5 0 Chinese Chinese Chinese Tokyo Japan
# FIRST : print Pr(Word|Class) in an unextracted form
line = "D5 0 Chinese Chinese Chinese Tokyo Japan"
docID, docClass,text = line.split("\t",2)
words = text.split()
#print docID,docClass, words
for word in words:
print "Pr(",word, "| Class)", modelStats[word] #Pr(Class=0| Doc) all stats
# +
# classify a sample document using the model
# Use the following record to test
# D5 0 Chinese Chinese Chinese Tokyo Japan
# --------------------------------------------------------------------
# Posterior Probabilities Pr(Class=0| Doc) and Pr(Class=1| Doc)
# Naive Bayes inference Pr(Class=0| Doc) ~ Pr(Class=0) * Pr(Class=0| word1) * Pr(Class=0| word2)......
line = "D5 0 Chinese Chinese Chinese Tokyo Japan"
docID, docClass,text = line.split("\t",2)
words = text.split()
#Class priors (Pr(Class =0) and Pr(Class =1))
c0, c1, prClass0, prClass1 = map(float, modelStats["ClassPriors"])
print "prClass0=%04.3f, prClass1=%04.3f" % (prClass0, prClass1)
# Posterior Probabilities Pr(Class=0| Doc) and Pr(Class=1| Doc)
# Naive Bayes inference Pr(Class=0| Doc) ~ Pr(Class=0) * Pr(Class=0| word1) * Pr(Class=0| word2)......
PrClass0GivenDoc = prClass0
PrClass1GivenDoc = prClass1
for word in words:
#print word, modelStats[word] #Pr(word|class = 0) all stats
print 'Pr(%s|class = 0) = %04.3f' %(word, modelStats[word][2]) #Pr(word|class = 0)
print 'Pr(%s|class = 1) = %04.3f' %(word, modelStats[word][3]) #Pr(word|class = 1)
PrClass0GivenDoc *= modelStats[word][2]
PrClass1GivenDoc *= modelStats[word][3]
print "Pr(Class=0| Doc=D5) is %6.5f" % (PrClass0GivenDoc)
print "Pr(Class=1| Doc=D5) is %6.5f" % (PrClass1GivenDoc)
# -
# __CODE TIP:__ Use logs to avoid precision problems!
# +
# classify a sample document using the model
# Use the following record to test
# D5 0 Chinese Chinese Chinese Tokyo Japan
# --------------------------------------------------------------------
# Posterior Probabilities Pr(Class=0| Doc) and Pr(Class=1| Doc)
# Naive Bayes inference Pr(Class=0| Doc) ~ Pr(Class=0) * Pr(Class=0| word1) * Pr(Class=0| word2)......
from math import log
from math import exp
line = "D5 0 Chinese Chinese Chinese Tokyo Japan"
docID, docClass,text = line.split("\t",2)
words = text.split()
#Class priors (Pr(Class =0) and Pr(Class =1))
c0, c1, prClass0, prClass1 = map(float, modelStats["ClassPriors"])
print "prClass0=%04.3f, prClass1=%04.3f" % (prClass0, prClass1)
# Posterior Probabilities Pr(Class=0| Doc) and Pr(Class=1| Doc)
# Naive Bayes inference Pr(Class=0| Doc) ~ log(Pr(Class=0)) + log(Pr(Class=0| word1)) + log(Pr(Class=0| word2))......
PrClass0GivenDoc = log(prClass0)
PrClass1GivenDoc = log(prClass1)
for word in words:
#print word, modelStats[word] #Pr(word|class = 0) all stats
print 'Pr(%s|class = 0) = %04.3f' %(word, modelStats[word][2]) #Pr(word|class = 0)
print 'Pr(%s|class = 1) = %04.3f' %(word, modelStats[word][3]) #Pr(word|class = 1)
PrClass0GivenDoc += log(modelStats[word][2])
PrClass1GivenDoc += log(modelStats[word][3])
print "Pr(Class=0| Doc=D5) = %6.5f, log(Pr(Class=0| Doc=D5)) = %f" % (exp(PrClass0GivenDoc), PrClass0GivenDoc)
print "Pr(Class=1| Doc=D5) = %6.5f, log(Pr(Class=1| Doc=D5)) = %f" % (exp(PrClass1GivenDoc), PrClass0GivenDoc)
# -
# ### HW2.3.1 Implement Multinomial Naive Bayes and test your implementation on a small dataset (Chinese dataset: 5 documents)
#
# To implement Multinomial Naive Bayes in Hadoop streaming MapReduce, we will divide our work into 2 map-reduce tasks: 1) a modeling phase where the output is a model file; and 2) a classification phase where we use the model.
#
# *For phase 1: Build a multionmial Naive Bayes Model*
# * MAPPER_3a:
# * As with the mapper in hw2.2, generates raw word tokens with an added field for the spam value associated with that instance.
# * REDUCER_3a: For each word $w_i$
# * Agragates from mapper: $\forall w_j\in Vocab, C \in \{\text{spam},\text{ham}\}: Count(w_j,C)$
# * And calculates $P(w_i\lvert C) \approx \frac{Count(w_i,C)}{\sum_jCount(w_j,C)}$
#
#
# *For phase 2: Classify each example using the learnt multionmial Naive Bayes Model*
# * MAPPER_3b: for each $Doc$, calculates
# * $\forall w_j\in Vocab: argmax_{C∈\{spam,ham\}}\big( log(P(C)) + \sum_{w_i \in Doc}log\big(P(w_i\lvert C)\big)$
# * REDUCER_3b: calculates
# * $Err(Model, DF) = \frac{count_{DF}(\:\hat{y} \:\neq \:label\:)}{|DF|}$
# __Naive Bayes Model Class:__
# +
# %%writefile NaiveBayes/NaiveBayesModel.py
# #!/usr/bin/env python
from math import log
from math import exp
class NaiveBayesModel(object):
def __init__(self, modelFile):
self.model = {}
recordStrs = [s.split('\n')[0].split('\t') for s in open(modelFile).readlines()]
for word, statsStr in recordStrs:
self.model[word] = map(float, statsStr.split(","))
#Class priors: counts and probs (Pr(Class =0) and Pr(Class =1))
self.c0, self.c1, self.prClass0, self.prClass1 = map(float, self.model["ClassPriors"])
def classify(self, doc):
# Posterior Probabilities Pr(Class=0| Doc) and Pr(Class=1| Doc)
# Naive Bayes inference Pr(Class=0| Doc) ~ Pr(Class=0) * Pr(Class=0| word1) * Pr(Class=0| word2)......
PrClass0GivenDoc = self.prClass0
PrClass1GivenDoc = self.prClass1
for word in doc:
PrClass0GivenDoc *= self.model[word][2]
PrClass1GivenDoc *= self.model[word][3]
return([PrClass0GivenDoc, PrClass1GivenDoc])
# the natural log based version of this
# helps avoid underflow issues
def classifyInLogs(self, doc):
# Posterior Probabilities Pr(Class=0| Doc) and Pr(Class=1| Doc)
# Naive Bayes inference Pr(Class=0| Doc) ~ Pr(Class=0) * Pr(Class=0| word1) * Pr(Class=0| word2)......
PrClass0GivenDoc = log(self.prClass0)
PrClass1GivenDoc = log(self.prClass1)
for word in doc: #NOTE: Improvement: on loading one should convert probs to log probs!
c0 = self.model[word][2]
c1 = self.model[word][3]
if c0 != 0:
PrClass0GivenDoc += log(c0)
else:
PrClass0GivenDoc = float("-inf")
if c1 != 0:
PrClass1GivenDoc += log(c1)
else:
PrClass1GivenDoc = float("-inf")
return([PrClass0GivenDoc, PrClass1GivenDoc])
def printModel(self):
print "NaiveBayes Model starts here\n----------------"
print "PRIORS: prClass0=%04.3f, prClass1=%04.3f" % (self.prClass0, self.prClass1)
for word, stats in self.model.items():
print "Pr(",word, "| Class)", stats #Pr(Class=0| Doc) all stats
print "NaiveBayes Model ENDS here\n----------------"
# -
# %run NaiveBayes/NaiveBayesModel.py
# __Test Driver for Multinomial Naive Bayes Classifier:__
# +
# Classify a sample document using the model
# Use the following record to test
# D5 0 Chinese Chinese Chinese Tokyo Japan
# --------------------------------------------------------------------
# Posterior Probabilities Pr(Class=0| Doc) and Pr(Class=1| Doc)
# Naive Bayes inference Pr(Class=0| Doc) ~ Pr(Class=0) * Pr(Class=0| word1) * Pr(Class=0| word2)......
NBModel = NaiveBayesModel("NaiveBayes/model1.txt")
NBModel.printModel()
line = "D5 0 Chinese Chinese Chinese Tokyo Japan"
docID, docClass,text = line.split("\t",2)
words = text.split()
PrClass0GivenDoc, PrClass1GivenDoc = NBModel.classify(words)
print "Pr(Class=0| Doc=%s) is %6.5f" % (docID, PrClass0GivenDoc)
print "Pr(Class=1| Doc=%s) is %6.5f" % (docID, PrClass1GivenDoc)
PrClass0GivenDoc, PrClass1GivenDoc = NBModel.classifyInLogs(words)
print "Pr(Class=0| Doc=D5) = %6.5f, log(Pr(Class=0| Doc=D5)) = %f" % (exp(PrClass0GivenDoc), PrClass0GivenDoc)
print "Pr(Class=1| Doc=D5) = %6.5f, log(Pr(Class=1| Doc=D5)) = %f" % (exp(PrClass1GivenDoc), PrClass1GivenDoc)
# -
# __Modelling Phase__
# +
# %%writefile NaiveBayes/mapper_model.py
# #!/usr/bin/env python
import sys, re, string
# Init mapper phase
# define regex for punctuation removal
regex = re.compile('[%s]' % re.escape(string.punctuation))
# inner loop mapper phase: process each record
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
# use subject and body
parts = line.split("\t")
docID, docClass, title = parts[0:3]
if len(parts) == 4:
body = parts[3]
else:
body = ""
# remove punctuations, only have white-space as delimiter
emailStr = regex.sub(' ', title.lower() + " " +body.lower()) #replace each punctuation with a space
emailStr = re.sub( '\s+', ' ', emailStr ) # replace multiple spaces with a space
# split the line into words
words = emailStr.split()
# START STUDENT CODE HW231MAPPER_MODEL
# define regex for punctuation removal
# increase counters
# write the results to STDOUT (standard output);
# what we output here will be the input for the
# Reduce step, i.e. the input for reducer.py
#
# tab-delimited; the trivial word count is 1
# END STUDENT CODE HW231MAPPER_MODEL
# +
# %%writefile NaiveBayes/reducer_model.py
# #!/usr/bin/env python
from operator import itemgetter
import sys, operator
import numpy as np
# START STUDENT CODE HW231REDUCER_MODEL
current_word = None
smooth_factor = 0 # no smoothing
current_count = [smooth_factor, smooth_factor]
msgIDs = {}
word = None
wordcount = {}
# input comes from STDIN
# remove leading and trailing whitespace
# parse the input we got from mapper.py
# convert count and spam flag (currently a string) to int
# handle msgID - store all IDs as we don't have too much
# not the best way to get prior, a two-level MapReduce jobs (ID - word) would be optimal
# calculate NB parameters, and write the dictionary to a file for the classification job
# prior probabilities
# conditional probability
# END STUDENT CODE HW231REDUCER_MODEL
# -
# __Classification Phase:__
# +
# %%writefile NaiveBayes/mapper_classify.py
# #!/usr/bin/env python
import NaiveBayesModel
import sys, re, string, subprocess
import sys, operator, math
import numpy as np
# Init mapper phase
# read the MODEL into memory
# The model file resides the local disk (make sure to ship it home from HDFS).
# In the Hadoop command linke be sure to add the follow the -files commmand line option
NBModel = NaiveBayesModel("NaiveBayes.txt")
# define regex for punctuation removal
regex = re.compile('[%s]' % re.escape(string.punctuation))
# inner loop mapper phase: process each record
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
parts = line.split("\t")
docID, docClass, title = parts[0:3]
if len(parts) == 4:
body = parts[3]
else:
body = ""
# use subject and body
# remove punctuations, only have white-space as delimiter
emailStr = regex.sub(' ', title + " " +body.lower()) #replace each punctuation with a space
emailStr = re.sub( '\s+', ' ', emailStr ) # replace multiple spaces with a space
# split the line into words
words = emailStr.split()
# START STUDENT CODE HW231MAPPER_CLASSIFY
# END STUDENT CODE HW231MAPPER_CLASSIFY
# +
# %%writefile NaiveBayes/reducer_classify.py
# #!/usr/bin/env python
from operator import itemgetter
import sys, operator, math
import numpy as np
numberOfRecords = 0
NumberOfMisclassifications=0
classificationAccurary = 0
# START STUDENT CODE HW231REDUCER_CLASSIFY
# input comes from STDIN
# END STUDENT CODE HW231REDUCER_CLASSIFY
print 'Multinomial Naive Bayes Classifier Results are%\d,%d,%f' % (numberOfRecords, NumberOfMisclassifications, classificationAccurary)
# -
# __Run Map Reduce Job to learn a multinomial Naive Model from data:__
# +
# START STUDENT CODE HW231HADOOP_MODEL
# STEP 1: make input directory on HDFS
# # !hdfs dfs -mkdir -p /user/Xxxxxxx
# STEP2: upload data to HDFS
# STEP3: Make sure to remove the results directiry
# STEP4: Run job
# END STUDENT CODE HW231HADOOP_MODEL
# -
# __Run Map Reduce Job to classify data:__
# +
# START STUDENT CODE HW231HADOOP_CLASSIFY
# END STUDENT CODE HW231HADOOP_CLASSIFY
# -
# __Display the accuracy measure:__
# +
# START STUDENT CODE HW231HADOOP_CLASSIFY_RESULTS
# (you may or may not need to add anything else in this block depending on your implementation)
# !hdfs dfs -cat HW2.3.1/classifications/part-00000
# END STUDENT CODE HW231HADOOP_CLASSIFY_RESULTS
# -
# __Write a systems test to regression test your map reduce job on the "Chinese" dataset:__
# Please reserve document D5 as an independent test document (i.e., don't use it for training. Just use it for testing) Use the Chinese dataset to unit test your Mapper, reducer and final output. Refer to HW 2.0.2 for an example of unit testing & systems testing.
# for convenience, here are the data again:
# !cat NaiveBayes/chineseExample.txt
# +
# START STUDENT CODE HW231HADOOP_CHINESE_UNIT_TEST
# END STUDENT CODE HW231HADOOP_CHINESE_UNIT_TEST
# -
# ### HW2.3.2 Learn a Multinomial Naive Bayes model (with no smoothing) by hand
#
# As a final confirmation that your implementation above works correctly, use the space below to learn the Multinomial Naive Bayes model by hand. Please include calculations for both:
# * the learnt multinomial naive Bayes with NO smoothing
# * the classification of the D5 test document
#
# Show the formulas and your calculations in a nice tabular form. (TIP: since Markdown is a superset of HTML you can even add things like HTML tables as follows:__
#
# | This | is |
# |------|------|
# | a | table|
#
# For more background on notebook formatting see: [here](https://athena.brynmawr.edu/jupyter/hub/dblank/public/Jupyter%20Notebook%20Users%20Manual.ipynb, notebook formating)
# +
# ADD YOUR "HAND CALCULATIONS" BELOW
# -
# Finally, compare your hand calculations for the following with textbook calculation from [chapter 13](http://nlp.stanford.edu/IR-book/pdf/13bayes.pdf) which includes smoothing (Image [linked here](https://www.dropbox.com/s/f17c4mvmm5fuwav/chineseTestCaseFullyWorkedOut.png) and shown below). What do you observe?
# download the image with worked solution and render it in the next cell below.
# !curl "https://www.dropbox.com/s/f17c4mvmm5fuwav/chineseTestCaseFullyWorkedOut.png"
# %%HTML
<img src="chineseTestCaseFullyWorkedOut.png">
# +
# ADD YOUR COMMENTARY BELOW
# -
# ### HW2.3.3 Learn a Multinomial Naive Bayes model (with no smoothing) for SPAM filtering on the ENRON dataset
#
# 1. Using the MNB code you wrote in 2.3.1, learn a SPAM filtering model from the ENRON dataset provided above. Save the model to file SPAM_Model_MNB.tsv.
#
# 2. Write a mapreduce job to sort the results alphabetically by term in increasing order, and show the top 10 and bottom 10 terms and their corresponding model entries.
# #### Run Map Reduce Job to learn a multinomical Naive Model from data
# +
# START STUDENT CODE HW231HADOOP_MODEL_SPAM
# STEP 1: make input directory on HDFS
# # !hdfs dfs -mkdir -p /user/Xxxxxxx
# STEP2: upload data to HDFS
# STEP3: Make sure to remove the results directiry
# STEP4: Run job
# STEP5: display model (first 10 lines only)
# END STUDENT CODE HW231HADOOP_MODEL_SPAM
# -
# ### HW 2.3.4 Classify Documents using the learnt Multinomial Naive Bayes model using Hadoop Streaming
#
# Classify each Enron email messages using the learnt Naive Bayes classifier (Testing on the training set is bad practice but we will allow that here to simplify the work here).
#
# Write a separate map-reduce job to classify a corpus of documents using a provided/learnt Multinomial Naive Bayes model. A model file consisting of the triples _word\tPr(Word|HAM)\tPr(Word|SPAM)_ should be broadcast to the worker nodes using the _-file_ command line option when running a Hadoop streaming job. Please write the corresponding mapper and reducer portions of this classifier job.
#
# #### Note: Map Tasks and map lifecycles
# Note that for each chunk in the input data a mapper task is executed. Each mapper task has three phases: a init phase (to initialize variables used down stream in the mapper task or read in data from disk that might be used downstream in the map task); a loop to process each record in the input stream; and a final phase that is executed prior to the map task finishing. A Reduce task goes through a similar lifecycle.
#
# #### NOTE: on small multiplying small numbers
# Multiplying lots of probabilities, which are between 0 and 1, can result in floating-point underflow. Since log(xy) = log(x) + log(y), it is better to perform all computations by summing logs of probabilities rather than multiplying probabilities. Please pay attention to probabilites that are zero! They will need special attention. Count up how many times clasification of a document results in a zero class posterior probabilty for each class and report when using the Enron training set for evaluation.
#
# * Report the performance of your learnt classifier in terms of misclassifcation error rate of your multinomial Naive Bayes Classifier.
# * Error Rate = misclassification rate with respect to a provided set (say training set in this case). It is more formally defined here:
#
# * Let DF represent the evalution set in the following:
#
# * Err(Model, DF) = |{(X, c(X)) ∈ DF : c(X) != Model(x)}| / |DF|
#
# Where || denotes set cardinality; c(X) denotes the class of the tuple X in DF; and Model(X) denotes the class inferred by the Model “Model”
#
#
# __In this exercise, please complete the following tasks:__
# * Once again unit test your classifier map reduce job using the Chinese example. Please show a trace of your prediction and classification steps.
# * Once you are happy with the results from the Chinese dataset, repeat the process for the Enron dataset.
# #### Run Map Reduce Job to classify data
# +
# START STUDENT CODE HW234HADOOP_CLASSIFY_SPAM
#run classifier job
#Print accuracy
# END STUDENT CODE HW234HADOOP_CLASSIFY_SPAM
# -
# #### Display the accuracy measure
# +
# START STUDENT CODE HW234HADOOP_CLASSIFY_RESULTS
# END STUDENT CODE HW234HADOOP_CLASSIFY_RESULTS
# -
# #### Plot a histogram of the posterior probabilities
# Plot a histogram of the posterior probabilities (i.e., Pr(Class|Doc)) for each class over the ENRON training set. Summarize what you see.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
# START STUDENT CODE HW234PLOT
# END STUDENT CODE HW234PLOT
# -
# ## HW2.4 Use Laplace plus-one smoothing
#
# Repeat HW2.3 with the following modification: use Laplace plus-one smoothing. **Please replace code tags like HW231HADOOP_CLASSIFY with HW241HADOOP_CLASSIFY through out for your submission.**
# In addition, compare the misclassifcation error rates for 2.3 versus 2.4 and explain the differences.
#
# **NOTE** for **Jupyter lovers**, you can now cut and paste MULTIPLE CELLS: press the ESC key and the press the SHIFT key in conjunction with UP or DOWN arrow key to select the cells you wish to copy (or delete). Then press ESC-C (to copy). Then select the cell where you wish to copy the cells (in the buffer) and press ESC-V to insert the cells in the buffer below the selected cell.
# +
### cut and paste MULTIPLE CELLS BELOW
# -
#
# ## HW2.5 Ignore rare words (Optional)
#
# Repeat HW2.4. **Please replace code tags like HW231HADOOP_CLASSIFY with HW251HADOOP_CLASSIFY through out for your submission.** This time when modeling and classification ignore tokens with a frequency of less than three (3) in the training set. How does it affect the misclassifcation error of learnt naive multinomial Bayesian Classifier on the training dataset. Report the error and the change in error.
#
# __HINT:__ ignore tokens with a frequency of less than three (3). Think of this as a preprocessing step. How many mapreduce jobs do you need to solve thus homework?
# ## HW2.6 Benchmark your code with the Python SciKit-Learn (OPTIONAL)
#
# HW2.6 Benchmark your code with the Python SciKit-Learn implementation of the multinomial Naive Bayes algorithm
#
# It always a good idea to benchmark your solutions against publicly available libraries/frameworks such as SciKit-Learn, the Machine Learning toolkit available in Python. In this exercise, we benchmark ourselves against the SciKit-Learn implementation of multinomial Naive Bayes. For more information on this implementation see: http://scikit-learn.org/stable/modules/naive_bayes.html more
#
# In this exercise, please complete the following tasks:
#
# * Run the Multinomial Naive Bayes algorithm (using default settings) from SciKit-Learn over the same training data used in HW2.5 and report the misclassification error (please note some data preparation might be needed to get the Multinomial Naive Bayes algorithm from SkiKit-Learn to run over this dataset)
# * Prepare a table to present your results, where rows correspond to approach used (SkiKit-Learn versus your Hadoop implementation) and the column presents the training misclassification error
# * Explain/justify any differences in terms of training error rates over the dataset in HW2.5 between your Multinomial Naive Bayes implementation (in Map Reduce) versus the Multinomial Naive Bayes implementation in SciKit-Learn
# ### HW 2.6.1 Bernoulli Naive Bayes (OPTIONAL: note this exercise is a stretch HW and optional)
# - Run the Bernoulli Naive Bayes algorithm from SciKit-Learn (using default settings) over the same training data used in HW2.6 and report the misclassification error
# - Discuss the performance differences in terms of misclassification error rates over the dataset in HW2.5 between the Multinomial Naive Bayes implementation in SciKit-Learn with the Bernoulli Naive Bayes implementation in SciKit-Learn. Why such big differences. Explain.
#
# Which approach to Naive Bayes would you recommend for SPAM detection? Justify your selection.
# ## HW2.7 Preprocess the Entire Spam Dataset (OPTIONAL)
#
# The Enron SPAM data in the following folder [enron1-Training-Data-RAW](https://www.dropbox.com/sh/hemnvr0422nr36g/AAAPoK-aYxkFGxGjzaeRNEwSa?dl=0) is in raw text form (with subfolders for SPAM and HAM that contain raw email messages in the following form:
#
# * Line 1 contains the subject
# * The remaining lines contain the body of the email message.
#
# In Python write a script to produce a TSV file called train-Enron-1.txt that has a similar format as the enronemail_1h.txt that you have been using so far. Please pay attend to funky characters and tabs. Check your resulting formated email data in Excel and in Python (e.g., count up the number of fields in each row; the number of SPAM mails and the number of HAM emails). Does each row correspond to an email record with four values? Note: use "NA" to denote empty field values.
# ## HW2.8 Build and evaluate a NB classifier on the Entire Spam Dataset (OPTIONAL) <a name="2.8"></a>
# [Back to Table of Contents](#TOC)
# Using Hadoop Map-Reduce write job(s) to perform the following:
# -- Train a multinomial Naive Bayes Classifier with Laplace plus one smoothing using the data extracted in HW2.7 (i.e., train-Enron-1.txt). Use all white-space delimitted tokens as independent input variables (assume spaces, fullstops, commas as delimiters). Drop tokens with a frequency of less than three (3).
# -- Test the learnt classifier using enronemail_1h.txt and report the misclassification error rate. Remember to use all white-space delimitted tokens as independent input variables (assume spaces, fullstops, commas as delimiters). How do we treat tokens in the test set that do not appear in the training set?
# ### HW2.8.1 (OPTIONAL)
# - Run both the Multinomial Naive Bayes and the Bernoulli Naive Bayes algorithms from SciKit-Learn (using default settings) over the same training data used in HW2.8 and report the misclassification error on both the training set and the testing set
# - Prepare a table to present your results, where rows correspond to approach used (SciKit-Learn Multinomial NB; SciKit-Learn Bernouili NB; Your Hadoop implementation) and the columns presents the training misclassification error, and the misclassification error on the test data set
# - Discuss the performance differences in terms of misclassification error rates over the test and training datasets by the different implementations. Which approch (Bernouili versus Multinomial) would you recommend for SPAM detection? Justify your selection.
| 273,645 |
/11_Chaines/td11.ipynb | 42f17df7370d931f00340d7e95d0a474b0d0750d | [] | no_license | FelixMiroudot/M1102 | https://github.com/FelixMiroudot/M1102 | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 9,337 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm
df = pd.read_csv("../find-a-house/clean-funda-2018-02-10.csv", index_col=0)
df.head()
np.mean(df.price)
# Let's try to guess a distribution that may match how prices were generated
plt.figure(figsize=(20,10))
df.price.hist(bins=range(100, 4000, 100));
plt.hist(np.random.chisquare(5, size=len(df)) * 100, bins=range(100, 4000, 100), alpha=0.8)
plt.title("Price");
# +
def chance_data_is_in_distribution(data, distribution):
total_chance = 1
epsilon = 10
for y in np.random.choice(data, 100):
chance = len([ 1 for y_ in distribution if abs(y_ - y) <= epsilon ]) / len(distribution)
chance = max(chance, 0.0000001)
total_chance *= chance
return total_chance
distribution = np.random.chisquare(2, size=len(df)) * 340
chance_data_is_in_distribution(df.price, distribution)
# +
def test_distribution(alpha, beta):
distribution = np.random.chisquare(alpha, size=len(df)) * beta
return chance_data_is_in_distribution(df.price, distribution)
best_result = None
best_combination = ()
for alpha in range(1, 10):
for beta in range(100, 500, 25):
result = test_distribution(alpha, beta)
if best_result is None or (result != 0 and result > best_result):
best_result = result
best_combination = (alpha, beta)
print("Best result:", best_combination)
# -
plt.figure(figsize=(20,10))
df.price.hist(bins=range(100, 4000, 100))
plt.hist(np.random.chisquare(2, size=len(df)) * 275, bins=range(100, 4000, 100), alpha=0.8)
plt.title("Price");
plt.figure(figsize=(20,10))
df.living_area.hist(bins=range(100, 500, 20));
plt.hist(np.random.exponential(df.living_area.mean(), size=len(df)), bins=range(100, 500, 20), alpha=0.8);
plt.title("Living Area");
# +
with pm.Model() as model:
mu = pm.Uniform('mu', lower=0, upper=df.price.mean() * 2)
lamb = pm.Uniform('lamb', lower=500, upper=5000)
pior_price = pm.Wald('price', mu, lamb, observed=df.price)
mu_area = pm.Uniform('mu_area', lower=0, upper=df.living_area.mean() * 2)
lamb_area = pm.Uniform('lamb_area', lower=1, upper=1000)
living_area = pm.Wald('living_area', mu_area, lamb_area, observed=df.living_area)
trace = pm.sample(1000)
print(trace['mu'].mean())
print(trace['lamb'].mean())
print(trace['mu_area'].mean())
print(trace['lamb_area'].mean())
# +
plt.figure(figsize=(20,10))
df.price.hist(bins=range(100, 4000, 50))
plt.hist(np.random.wald(680, 1253, size=len(df)), bins=range(100, 4000, 50), alpha=0.8)
plt.title("Price")
plt.show()
plt.figure(figsize=(20,10))
df.living_area.hist(bins=range(100, 500, 20))
plt.hist(np.random.wald(110, 341, size=len(df)), bins=range(100, 500, 20), alpha=0.8)
plt.title("Living Area")
plt.show()
# +
from sklearn.model_selection import train_test_split
X = df.living_area
y = df.price
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
with pm.Model() as model:
std = pm.Uniform("std", 0, 5000)
beta = pm.Normal("beta", mu=0, sd=5000)
alpha = pm.Normal("alpha", mu=0, sd=5000)
mean = pm.Deterministic("mean", alpha + beta*X_train)
obs = pm.Normal("obs", mu=mean, sd=std, observed=y_train)
trace = pm.sample(10000, step=pm.Metropolis())
# +
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
std_samples = trace["std"]
alpha_samples = trace["alpha"]
beta_samples = trace["beta"]
N = std_samples.shape[0]
noise = std_samples*np.random.randn(N)
pred = [ alpha_samples.mean() + beta_samples.mean() * x for x in X_test ]
plt.scatter(X_test, y_test)
plt.plot(X_test, pred)
pred_min = [ np.quantile(alpha_samples + beta_samples * x + noise, 0.05) for x in X_test ]
plt.plot(X_test, pred_min)
pred_max = [ np.quantile(alpha_samples + beta_samples * x + noise, 0.95) for x in X_test ]
plt.plot(X_test, pred_max)
np.quantile(alpha_samples, 0.05)
plt.show()
print(r2_score(y_test, pred))
model = LinearRegression()
model.fit(np.array([X_test]).T, y_test)
pred = model.predict(np.array([X_test]).T)
plt.scatter(X_test, y_test)
plt.plot(X_test, pred)
plt.show()
print(r2_score(y_test, pred))
# +
import theano.tensor as tt
with pm.Model() as model:
beta = pm.Normal("beta", mu=0, sd=5000)
alpha = pm.Normal("alpha", mu=0, sd=5000)
std_base = pm.Uniform("std_base", 0, 5000)
std = pm.Deterministic("std", std_base * X_train)
mean = pm.Deterministic("mean", alpha + beta * X_train)
obs = pm.Normal("obs", mu=mean, sd=std, observed=y_train)
trace = pm.sample(10000, step=pm.Metropolis())
std_samples = trace["std_base"][10000:]
alpha_samples = trace["alpha"][10000:]
beta_samples = trace["beta"][10000:]
N = std_samples.shape[0]
preds = [ np.random.normal(np.mean(alpha_samples + beta_samples * x), np.mean(std_samples * x), N) for x in X_test ]
pred = [ y.mean() for y in preds ]
plt.scatter(X_test, y_test)
plt.plot(X_test, pred)
pred_min = [ np.quantile(y, 0.05) for y in preds ]
plt.plot(X_test, pred_min)
pred_max = [ np.quantile(y, 0.95) for y in preds ]
plt.plot(X_test, pred_max)
plt.show()
print(r2_score(y_test, pred))
# +
import theano.tensor as tt
with pm.Model() as model:
beta = pm.Normal("beta", mu=0, sd=5000)
alpha = pm.Normal("alpha", mu=0, sd=5000)
std_base = pm.Uniform("std_base", 0, 5000)
std = pm.Deterministic("std", std_base * X_train)
mean = pm.Deterministic("mean", alpha + X_train.values ** beta)
obs = pm.Normal("obs", mu=mean, sd=std, observed=y_train)
trace = pm.sample(10000, step=pm.Metropolis())
std_samples = trace["std_base"][10000:]
alpha_samples = trace["alpha"][10000:]
beta_samples = trace["beta"][10000:]
N = std_samples.shape[0]
preds = [ np.random.normal(np.mean(alpha_samples + x ** beta_samples), np.mean(std_samples * x), N) for x in X_test ]
pred = [ y.mean() for y in preds ]
plt.scatter(X_test, y_test)
plt.scatter(X_test, pred)
pred_min = [ np.quantile(y, 0.05) for y in preds ]
plt.scatter(X_test, pred_min)
pred_max = [ np.quantile(y, 0.95) for y in preds ]
plt.scatter(X_test, pred_max)
plt.show()
print(r2_score(y_test, pred))
# -
| 6,507 |
/Hotwater_Eagle_102.ipynb | c4afcb0fa97974853d3d1bb03c83a10d6c381ecf | [
"MIT"
] | permissive | ShepherdCode/BuildingEnergy | https://github.com/ShepherdCode/BuildingEnergy | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 6,853 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Hotwater in Eagle 102
# +
DATAPATH=''
try:
# On Google Drive, set path to my drive / data directory.
from google.colab import drive
IN_COLAB = True
PATH='/content/drive/'
drive.mount(PATH)
DATAPATH=PATH+'My Drive/data/' # must end in "/"
except:
# On home computer, set path to local data directory.
IN_COLAB = False
DATAPATH='data:/' # must end in "/"
ZIP_FILE='BuildingData.zip'
ZIP_PATH = DATAPATH+ZIP_FILE
HOTWATER_FILE='hotwater.csv'
WEATHER_FILE='weather.csv'
MODEL_FILE='Model' # will be used later to save models
# +
from os import listdir
import csv
from zipfile import ZipFile
import numpy as np
import pandas as pd
from pandas.plotting import autocorrelation_plot
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
# -
def read_zip_to_panda(zip_filename,csv_filename):
zip_handle = ZipFile(zip_filename)
csv_handle = zip_handle.open(csv_filename)
panda = pd.read_csv(csv_handle)
return panda
def fix_date_type(panda):
# Convert the given timestamp column to the pandas datetime data type.
panda['timestamp'] = pd.to_datetime(panda['timestamp'], infer_datetime_format = True)
indexed = panda.set_index(['timestamp'])
return indexed
def get_site_timeseries(panda,site):
# Assume the panda dataframe has a datetime column.
# (If not, call fix_date_type() before this.)
# Extract the timeseries for one site.
# Convert the datetime column to a DatetimeIndex.
site_df = panda[panda['site_id']==site]
temp_col = site_df['date']
temp_val = temp_col.values
temp_ndx = pd.DatetimeIndex(temp_val)
dropped = site_df.drop('date',axis=1)
panda = drop
SITE = 'Eagle'
METER = 'hotwater'
BLDG = 'Eagle_education_Wesley'
wet_df = read_zip_to_panda(ZIP_PATH,WEATHER_FILE)
wet_df = fix_date_type(wet_df)
htwter_df = read_zip_to_panda(ZIP_PATH,HOTWATER_FILE)
htwter_df = fix_date_type(htwter_df)
site_specific_weather = wet_df.loc[wet_df['site_id'] == SITE]
bldg_specific_hotwater = htwter_df[[BLDG]]
all_buildings = [x for x in htwter_df.columns if x.startswith(SITE)]
# +
cors = []
MAX_BAD = 500 # correlation is higher in buildings without so many NaN and 0
for BLDG in all_buildings:
bldg_specific_hotwater = htwter_df[[BLDG]]
one_bldg_df = pd.concat([bldg_specific_hotwater,site_specific_weather],axis=1)
one_bldg_df = one_bldg_df.drop(['site_id'],axis=1)
one_bldg_df = one_bldg_df.rename(columns={BLDG : METER})
one_bldg_df = one_bldg_df.fillna(0)
bad = one_bldg_df[METER].isin([0]).sum()
if bad<=500:
mean = one_bldg_df[METER].mean()
cor = one_bldg_df.corr().iloc[0][3]
# Linear Regression
X = one_bldg_df.drop(METER,axis=1)
y = one_bldg_df[METER].fillna(0)
split = 900
X_train = X.iloc[0:split]
y_train = y.iloc[0:split]
linreg = LinearRegression()
linreg.fit(X_train,y_train)
X_test = X.iloc[split:]
y_test = y.iloc[split:]
y_pred = linreg.predict(X_test)
rmse = mean_squared_error(y_test,y_pred,squared=False)
cors.append([cor,mean,rmse,rmse/mean,BLDG])
print("dew temp corr, dew temp mean, lin reg RMSE, RMSE/mean, BLDG")
for cor in sorted(cors):
print("%7.4f %10.2f %10.2f %5.2f %s"%(cor[0],cor[1],cor[2],cor[3],cor[4]))
# -
ith some nice benefits though such as GPU acceleration which we'll get to later. For now, use the generated data to calculate the output of this simple single layer network.
# > **Exercise**: Calculate the output of the network with input features `features`, weights `weights`, and bias `bias`. Similar to Numpy, PyTorch has a [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) function, as well as a `.sum()` method on tensors, for taking sums. Use the function `activation` defined above as the activation function.
## Calculate the output of this network using the weights and bias tensors
y = activation((features * weights).sum() + bias)
y
# You can do the multiplication and sum in the same operation using a matrix multiplication. In general, you'll want to use matrix multiplications since they are more efficient and accelerated using modern libraries and high-performance computing on GPUs.
#
# Here, we want to do a matrix multiplication of the features and the weights. For this we can use [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) or [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul) which is somewhat more complicated and supports broadcasting. If we try to do it with `features` and `weights` as they are, we'll get an error
#
# ```python
# >> torch.mm(features, weights)
#
# ---------------------------------------------------------------------------
# RuntimeError Traceback (most recent call last)
# <ipython-input-13-15d592eb5279> in <module>()
# ----> 1 torch.mm(features, weights)
#
# RuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033
# ```
#
# As you're building neural networks in any framework, you'll see this often. Really often. What's happening here is our tensors aren't the correct shapes to perform a matrix multiplication. Remember that for matrix multiplications, the number of columns in the first tensor must equal to the number of rows in the second column. Both `features` and `weights` have the same shape, `(1, 5)`. This means we need to change the shape of `weights` to get the matrix multiplication to work.
#
# **Note:** To see the shape of a tensor called `tensor`, use `tensor.shape`. If you're building neural networks, you'll be using this method often.
#
# There are a few options here: [`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape), [`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_), and [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view).
#
# * `weights.reshape(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)` sometimes, and sometimes a clone, as in it copies the data to another part of memory.
# * `weights.resize_(a, b)` returns the same tensor with a different shape. However, if the new shape results in fewer elements than the original tensor, some elements will be removed from the tensor (but not from memory). If the new shape results in more elements than the original tensor, new elements will be uninitialized in memory. Here I should note that the underscore at the end of the method denotes that this method is performed **in-place**. Here is a great forum thread to [read more about in-place operations](https://discuss.pytorch.org/t/what-is-in-place-operation/16244) in PyTorch.
# * `weights.view(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)`.
#
# I usually use `.view()`, but any of the three methods will work for this. So, now we can reshape `weights` to have five rows and one column with something like `weights.view(5, 1)`.
#
# > **Exercise**: Calculate the output of our little network using matrix multiplication.
## Calculate the output of this network using matrix multiplication
y = activation(torch.mm(features, weights.view(5,1)) + bias)
y
# ### Stack them up!
#
# That's how you can calculate the output for a single neuron. The real power of this algorithm happens when you start stacking these individual units into layers and stacks of layers, into a network of neurons. The output of one layer of neurons becomes the input for the next layer. With multiple input units and output units, we now need to express the weights as a matrix.
#
# <img src='assets/multilayer_diagram_weights.png' width=450px>
#
# The first layer shown on the bottom here are the inputs, understandably called the **input layer**. The middle layer is called the **hidden layer**, and the final layer (on the right) is the **output layer**. We can express this network mathematically with matrices again and use matrix multiplication to get linear combinations for each unit in one operation. For example, the hidden layer ($h_1$ and $h_2$ here) can be calculated
#
# $$
# \vec{h} = [h_1 \, h_2] =
# \begin{bmatrix}
# x_1 \, x_2 \cdots \, x_n
# \end{bmatrix}
# \cdot
# \begin{bmatrix}
# w_{11} & w_{12} \\
# w_{21} &w_{22} \\
# \vdots &\vdots \\
# w_{n1} &w_{n2}
# \end{bmatrix}
# $$
#
# The output for this small network is found by treating the hidden layer as inputs for the output unit. The network output is expressed simply
#
# $$
# y = f_2 \! \left(\, f_1 \! \left(\vec{x} \, \mathbf{W_1}\right) \mathbf{W_2} \right)
# $$
# +
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 3 random normal variables
features = torch.randn((1, 3))
# Define the size of each layer in our network
n_input = features.shape[1] # Number of input units, must match number of input features
n_hidden = 2 # Number of hidden units
n_output = 1 # Number of output units
# Weights for inputs to hidden layer
W1 = torch.randn(n_input, n_hidden)
# Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
# -
# > **Exercise:** Calculate the output for this multi-layer network using the weights `W1` & `W2`, and the biases, `B1` & `B2`.
## Your solution here
h = activation(torch.mm(features, W1) + B1)
output = activation(torch.mm(h, W2) + B2)
print(output)
# If you did this correctly, you should see the output `tensor([[ 0.3171]])`.
#
# The number of hidden units a parameter of the network, often called a **hyperparameter** to differentiate it from the weights and biases parameters. As you'll see later when we discuss training a neural network, the more hidden units a network has, and the more layers, the better able it is to learn from data and make accurate predictions.
# ## Numpy to Torch and back
#
# Special bonus section! PyTorch has a great feature for converting between Numpy arrays and Torch tensors. To create a tensor from a Numpy array, use `torch.from_numpy()`. To convert a tensor to a Numpy array, use the `.numpy()` method.
import numpy as np
a = np.random.rand(4,3)
a
b = torch.from_numpy(a)
b
b.numpy()
# The memory is shared between the Numpy array and Torch tensor, so if you change the values in-place of one object, the other will change as well.
# Multiply PyTorch Tensor by 2, in place
b.mul_(2)
# Numpy array matches new values from Tensor
a
| 11,305 |
/decision tree.ipynb | 34338f2f78a5874f8a24567d2f54fa81f38073b7 | [] | no_license | imtinan39/Diabetes_Prediction_with_different_algorithms | https://github.com/imtinan39/Diabetes_Prediction_with_different_algorithms | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 29,269 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df=pd.read_csv(r"C:\Users\USER\diabetes\diabetes_data_upload.csv")
df
df.isnull().sum(axis=0)
columns=df.columns
columns
from sklearn.preprocessing import LabelEncoder
encoder=LabelEncoder()
for i in columns:
df[i]=encoder.fit_transform(df[i])
y=df['class']
x=df.drop('class',axis=1)
x
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y)
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
classifier=AdaBoostClassifier(DecisionTreeClassifier(criterion="entropy"),algorithm="SAMME.R",
n_estimators=200)
classifier.fit(x_train,y_train)
# -
y_pred=classifier.predict(x_test)
from sklearn.metrics import classification_report,confusion_matrix
from sklearn import metrics
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
| 1,302 |
/ceteiep_maths101_20161005.ipynb | 8e9d5a1536e7ea1408106bcb5376a63ed966246a | [] | no_license | chgogos/ceteiep_maths101 | https://github.com/chgogos/ceteiep_maths101 | 1 | 2 | null | null | null | null | Jupyter Notebook | false | false | .py | 11,911 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import dask.dataframe as dd
from numpy import array
from numpy import argmax
from functools import reduce
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from matplotlib.pyplot import figure
# +
# pip install tensorflow
# -
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction import FeatureHasher
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
import datetime
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from sklearn.linear_model import Perceptron
from sklearn.inspection import permutation_importance
# # Predicting a user's persona based on their device's specs, CPU utilization, and average CPU heat
pd.set_option('display.max_columns', 500)
# +
# ok = pd.read_csv('../data/raw/hw_metric_histo.csv000',nrows=100, sep='\t')
# ok['load_ts'].max()
# +
# print(ok['load_ts'].min())
# print(ok['load_ts'].max())
# -
df = pd.read_csv('../data/raw/hw_metric_histo.csv000', usecols=['guid','load_ts', 'batch_id','name','instance','nrs',
'mean', 'histogram_min', 'histogram_max',
'metric_max_val'], sep='\t')
# +
# df.shape
# +
# df['name'].value_counts()
# +
# df['histogram_min'].value_counts()
# +
# df['histogram_max'].value_counts()
# -
# ## Memory Reduction
def mem_usage(pandas_obj):
if isinstance(pandas_obj,pd.DataFrame):
usage_b = pandas_obj.memory_usage(deep=True).sum()
else:
usage_b = pandas_obj.memory_usage(deep=True)
usage_mb = usage_b / 1024 ** 2 # convert bytes to megabytes
return "{:03.2f} MB".format(usage_mb)
df_int = df.select_dtypes(include=['int'])
converted_int = df_int.apply(pd.to_numeric,downcast='unsigned')
print(mem_usage(df_int))
print(mem_usage(converted_int))
compare_ints = pd.concat([df_int.dtypes,converted_int.dtypes],axis=1)
compare_ints.columns = ['before','after']
compare_ints.apply(pd.Series.value_counts)
df_float = df.select_dtypes(include=['float'])
converted_float = df_float.apply(pd.to_numeric,downcast='float')
print(mem_usage(df_float))
print(mem_usage(converted_float))
compare_floats = pd.concat([df_float.dtypes,converted_float.dtypes],axis=1)
compare_floats.columns = ['before','after']
compare_floats.apply(pd.Series.value_counts)
optimized_df = df.copy()
optimized_df[converted_int.columns] = converted_int
optimized_df[converted_float.columns] = converted_float
print(mem_usage(df))
print(mem_usage(optimized_df))
df_obj = df.select_dtypes(include=['object']).copy()
df_obj.describe()
converted_obj = pd.DataFrame()
for col in df_obj.columns:
num_unique_values = len(df_obj[col].unique())
num_total_values = len(df_obj[col])
if num_unique_values / num_total_values < 0.5:
converted_obj.loc[:,col] = df_obj[col].astype('category')
else:
converted_obj.loc[:,col] = df_obj[col]
optimized_df[converted_obj.columns] = converted_obj
mem_usage(optimized_df)
# +
# optimized_df.head()
# -
# ## Cleaning
# +
# chips = optimized_df.loc[optimized_df['name'] == 'HW:::CHIPSET_TEMPERATURE:CENTIGRADE:']
# +
# chips.head()
# -
cpu = optimized_df.loc[optimized_df['name'] == 'HW::CORE:C0:PERCENT:']
cpu.head()
temp = optimized_df.loc[optimized_df['name'] == 'HW::CORE:TEMPERATURE:CENTIGRADE:']
temp.head()
# +
# chips_guid = list(chips['guid'].value_counts().index)
# cpu_guid = list(cpu['guid'].value_counts().index)
# temp_guid = list(temp['guid'].value_counts().index)
# +
# overlap = [x for x in temp_guid if x in cpu_guid]
# len(overlap)
# +
# total_overlap = [x for x in overlap if x in temp_guid]
# +
# len(total_overlap)
# -
# ## Joining Device Usage
# +
# dev = pd.read_csv('../data/raw/devuse_4known_device.csv001',usecols=['guid','load_ts','batch_id','device',
# 'hw_name', 'name',
# 'duration',
# 'status'], sep='\t')
# +
# dev.shape
# +
# print(df['guid'].nunique())
# print(dev['guid'].nunique())
# +
# print(dev['load_ts'].min())
# print(dev['load_ts'].max())
# +
# dev_guid = list(dev['guid'].value_counts().index)
# +
# df_guid = list(df['guid'].value_counts().index)
# +
# print(dev_guid[:5])
# +
# print(df_guid[:5])
# +
# df_guid = all guids from hw_metric
# dev_guid = all guids from devuse
# +
# combined_overlap = [x for x in dev_guid if x in df_guid]
# +
# combined_overlap
# -
# *Seems like there is no overlap between the two DFs*
# +
# cpu_mean = cpu[['guid','batch_id','mean']]
# temp_mean = temp[['guid','batch_id','mean']]
# +
# top10guid = pd.DataFrame(cpu_mean['guid'].value_counts()).reset_index()[:10]
# top10 = list(top10guid['index'])
# +
# top10cpu = cpu_mean.loc[cpu_mean['guid'].isin(top10)]
# top10cpu.head()
# +
# top10temp = temp_mean.loc[temp_mean['guid'].isin(top10)]
# top10temp.head()
# +
# print(top10cpu.shape)
# print(top10temp.shape)
# -
# ## Data Visualizations
# +
# cpu_mean
# +
# temp_mean
# +
# gb = pd.DataFrame(cpu_mean.groupby('guid')['mean'].mean())
# top5avgcpu = gb.sort_values(by='mean', ascending=False)[:5].reset_index()
# top5avgcpu
# +
# def cleanguid(x):
# return x[:5]
# +
# top5avgcpu['guid'] = top5avgcpu['guid'].apply(cleanguid)
# +
# gb_temp = pd.DataFrame(temp_mean.groupby('guid')['mean'].mean())
# top5avgtemp = gb_temp.sort_values(by='mean', ascending=False)[:5].reset_index()
# +
# top5avgtemp['guid'] = top5avgtemp['guid'].apply(cleanguid)
# +
# sns.set(rc={'figure.figsize':(11.7,8.27)})
# sns.barplot(x="guid", y="mean", data=top5avgcpu)
# plt.xlabel('GUID')
# plt.ylabel('Mean CPU Usage')
# plt.title('Top Five CPU Usage by GUIDs')
# plt.show()
# +
# sns.set(rc={'figure.figsize':(11.7,8.27)})
# sns.barplot(x="guid", y="mean", data=top5avgtemp)
# plt.xlabel('GUID')
# plt.ylabel('Mean Temperature in Celsium')
# plt.title('Top Five CPU Core Temperatures by GUIDs')
# plt.show()
# +
# top5avgcpu
# +
# top5avgtemp
# +
# pd.DataFrame(df['guid'].value_counts())
# -
# ## EDA For system_sysinfo_unique_normalized
sys = pd.read_csv('../data/raw/system_sysinfo_unique_normalized.csv000', sep=chr(1))
sys_guid = list(sys['guid'].value_counts().index)
hw_guid = list(df['guid'].value_counts().index)
#dev_guid = list(dev['guid'].value_counts().index)
syshw_overlap = [x for x in sys_guid if x in hw_guid]
len(syshw_overlap)
sys.head()
# +
# sys['persona'].value_counts()
# -
df.head()
hwcpu_match = cpu.loc[cpu['guid'].isin(syshw_overlap)]
hwcpu_match = hwcpu_match[['guid', 'load_ts', 'mean']]
hwcpu_match['utilization_mean'] = hwcpu_match['mean']
hwcpu_match = hwcpu_match.drop(columns='mean')
hwcpu_match
hwtemp_match = temp.loc[temp['guid'].isin(syshw_overlap)]
hwtemp_match = hwtemp_match[['guid', 'load_ts', 'mean']]
hwtemp_match['temp_mean'] = hwtemp_match['mean']
hwtemp_match = hwtemp_match.drop(columns='mean')
hwtemp_match
hwtemp = pd.DataFrame(hwtemp_match.groupby('guid')['temp_mean'].mean())
hwcpu = pd.DataFrame(hwcpu_match.groupby('guid')['utilization_mean'].mean())
hwtemp
hwcpu
# +
#combined_sys['temp_mean']=combined_sys['temp_mean'].fillna(hwtemp.index.to_series())
# -
combined = sys.join(hwcpu, on=['guid'], how='left')
combined
combined = combined.join(hwtemp, on=['guid'], how='left')
combined
combined['utilization_mean'].value_counts()
# ## ucsd_apps_execlass.csv000 & frgnd_backgrnd_apps.csv000
apps = pd.read_csv('../data/raw/frgnd_backgrnd_apps.csv000', error_bad_lines=False, sep=chr(1))
app_class = pd.read_csv('../data/raw/ucsd_apps_execlass.csv000', error_bad_lines=False, sep=chr(35))
app_class
appscombined = apps.join(app_class, lsuffix='frgnd_proc_name', rsuffix='exe_name', how='left')
appscombined
mean_dur = appscombined.pivot_table('event_duration_ms', ['guid', 'app_type'], aggfunc=np.mean).reset_index()
combined_guid = list(combined['guid'].value_counts().index)
dur_guid = list(mean_dur['guid'].value_counts().index)
app_overlap = [x for x in combined_guid if x in dur_guid]
mean_dur=mean_dur.loc[mean_dur['guid'].isin(app_overlap)]
mean_dur['app_type'].unique()
itdur = mean_dur.loc[mean_dur['app_type']=='IT']
avdur = mean_dur.loc[mean_dur['app_type']=='Anti-Virus']
commdur = mean_dur.loc[mean_dur['app_type']=='Communication']
gamedur = mean_dur.loc[mean_dur['app_type']=='Game']
iudur = mean_dur.loc[mean_dur['app_type']=='Installer/Updater']
intdur = mean_dur.loc[mean_dur['app_type']=='Internet']
meddur = mean_dur.loc[mean_dur['app_type']=='Media/Consumption']
netdur = mean_dur.loc[mean_dur['app_type']=='Network Apps']
offdur = mean_dur.loc[mean_dur['app_type']=='Office']
sysdur = mean_dur.loc[mean_dur['app_type']=='System/Other']
utdur = mean_dur.loc[mean_dur['app_type']=='Utility']
meditdur = mean_dur.loc[mean_dur['app_type']=='Media/Edit']
udur = mean_dur.loc[mean_dur['app_type']=='*']
edudur = mean_dur.loc[mean_dur['app_type']=='Education']
appdur = mean_dur.loc[mean_dur['app_type']=='Metro/Universal Apps']
oobedur = mean_dur.loc[mean_dur['app_type']=='OOBE']
gldur = mean_dur.loc[mean_dur['app_type']=='Game Launcher']
types = [itdur, avdur, commdur, gamedur, iudur, intdur, meddur, netdur,
offdur, sysdur, utdur, meditdur, udur, edudur, appdur, oobedur, gldur]
for x in types:
string = x['app_type'].iloc[0]
x = x.drop(columns = ['app_type'])
new_col = string + '_dur_ms'
x[new_col] = x['event_duration_ms']
x = x.drop(columns = ['event_duration_ms'])
combined = combined.merge(x, on=['guid'], how='left')
combined
lst = ['IT_dur_ms', 'Anti-Virus_dur_ms',
'Communication_dur_ms', 'Game_dur_ms', 'Installer/Updater_dur_ms', 'Internet_dur_ms',
'Media/Consumption_dur_ms', 'Network Apps_dur_ms', 'Office_dur_ms', 'System/Other_dur_ms',
'Utility_dur_ms', 'Media/Edit_dur_ms', '*_dur_ms', 'Education_dur_ms','Metro/Universal Apps_dur_ms',
'OOBE_dur_ms']
for i in lst:
combined=combined.fillna({i:0})
check = combined.copy()
checking = check[['guid', 'load_ts','chassistype', 'modelvendor_normalized',
'ram',
'os','#ofcores', 'age_category',
'graphicsmanuf', 'gfxcard', 'graphicscardclass',
'cpuvendor', 'cpu_family',
'discretegraphics', 'vpro_enabled', 'utilization_mean',
'temp_mean','IT_dur_ms', 'Anti-Virus_dur_ms',
'Communication_dur_ms', 'Game_dur_ms', 'Installer/Updater_dur_ms', 'Internet_dur_ms',
'Media/Consumption_dur_ms', 'Network Apps_dur_ms', 'Office_dur_ms', 'System/Other_dur_ms',
'Utility_dur_ms', 'Media/Edit_dur_ms', '*_dur_ms', 'Education_dur_ms','Metro/Universal Apps_dur_ms',
'OOBE_dur_ms', 'persona']]
nonacheck = checking.dropna()
nonacheck
nonacheck['guid'].nunique()
print(nonacheck['load_ts'].apply(pd.to_datetime).max())
print(nonacheck['load_ts'].apply(pd.to_datetime).min())
# +
# check = check[['chassistype', 'modelvendor_normalized',
# 'ram',
# 'os','#ofcores', 'age_category',
# 'graphicsmanuf', 'gfxcard', 'graphicscardclass',
# 'cpuvendor', 'cpu_family',
# 'discretegraphics', 'vpro_enabled', 'utilization_mean',
# 'temp_mean','persona', 'IT_dur_ms', 'Anti-Virus_dur_ms',
# 'Communication_dur_ms', 'Game_dur_ms', 'Installer/Updater_dur_ms', 'Internet_dur_ms',
# 'Media/Consumption_dur_ms', 'Network Apps_dur_ms', 'Office_dur_ms', 'System/Other_dur_ms',
# 'Utility_dur_ms', 'Media/Edit_dur_ms', '*_dur_ms', 'Education_dur_ms','Metro/Universal Apps_dur_ms',
# 'OOBE_dur_ms']]
# +
# check.isna().sum()
# +
# check
# +
# sys['guid'].nunique()
# +
# cpu['guid'].nunique()
# +
# print(combined['utilization_mean'].isna().sum())
# print(combined['temp_mean'].isna().sum())
# -
combined = combined[['chassistype', 'modelvendor_normalized',
'ram',
'os','#ofcores', 'age_category',
'graphicsmanuf', 'gfxcard', 'graphicscardclass',
'cpuvendor', 'cpu_family',
'discretegraphics', 'vpro_enabled', 'utilization_mean',
'temp_mean','IT_dur_ms', 'Anti-Virus_dur_ms',
'Communication_dur_ms', 'Game_dur_ms', 'Installer/Updater_dur_ms', 'Internet_dur_ms',
'Media/Consumption_dur_ms', 'Network Apps_dur_ms', 'Office_dur_ms', 'System/Other_dur_ms',
'Utility_dur_ms', 'Media/Edit_dur_ms', '*_dur_ms', 'Education_dur_ms','Metro/Universal Apps_dur_ms',
'OOBE_dur_ms', 'persona']]
combined.head()
# +
# combined.dtypes
# -
copy = combined.copy()
copy.head()
print(copy['os'].unique())
print(copy['cpu_family'].unique())
print(copy['cpuvendor'].unique())
print(copy['graphicscardclass'].unique())
# +
# copy_cat = copy[['os','cpu_family', 'cpuvendor', 'graphicscardclass']]
# +
# dummy = pd.get_dummies(copy_cat)
# dummy['persona'] = copy['persona']
# +
# dummy
# -
cleanup_nums = {"persona": {"Web User": 0, "Casual User": 1, 'Gamer':2, 'Casual Gamer': 3, 'Office/Productivity':4,
'Content Creator/IT': 5, 'Communication': 6,
'Win Store App User': 7, 'Entertainment': 8,
'File & Network Sharer':9, 'Unknown': 10}}
cleanup_nums
def macro_cats(x):
if x=='Web User' or x=='Casual User' or x=='Communication' or x=='Win Store App User' or x=='Entertainment' or x=='File & Network Sharer':
return 0
elif x=='Gamer' or x=='Casual Gamer':
return 1
elif x=='Office/Productivity' or x=='Content Creator/IT':
return 2
elif x == 'Unknown':
return 3
else:
return 4
nonacopy = copy.dropna()
nonacopy['persona'].unique()[9]
encode_persona = combined['persona'].to_frame().replace(cleanup_nums)
encode_persona['persona'].value_counts()
# +
# dummy['util_mean'] = combined['utilization_mean']
# dummy['temp_mean'] = combined['temp_mean']
# dummy = dummy.drop(columns=['persona'])
# dummy['persona'] = encode_persona['persona']
# +
# dummy
# +
# dummy_nona = dummy.dropna()
# dummy_nona
# +
# nona_test = dummy_nona.copy()
# +
# Y = nona_test['persona']
# X = nona_test.drop(columns=['persona'])
# +
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
# +
names = ["Decision_Tree", "Extra_Trees", "Random_Forest", "AdaBoost"]
# "Nearest_Neighbors", "Linear_SVM",
# "Polynomial_SVM", "RBF_SVM", "Gradient_Boosting"]
# "Gaussician_Process"]
# "Neural_Net", "Naive_Bayes", "QDA", "SGD"]
classifiers = [
DecisionTreeClassifier(max_depth=5),
ExtraTreesClassifier(n_estimators=10, min_samples_split=2),
RandomForestClassifier(max_depth=5, n_estimators=100),
AdaBoostClassifier(n_estimators=100)]
# KNeighborsClassifier(3),
# SVC(kernel="linear", C=0.025),
# SVC(kernel="poly", degree=3, C=0.025),
# SVC(kernel="rbf", C=1, gamma=2),
# GradientBoostingClassifier(n_estimators=100, learning_rate=1.0)]
#GaussianProcessClassifier(1.0 * RBF(1.0))]
#
# MLPClassifier(alpha=1, max_iter=1000),
# GaussianNB(),
# QuadraticDiscriminantAnalysis(),
# SGDClassifier(loss="hinge", penalty="l2")]
# +
# scores = []
# for name, clf in zip(names, classifiers):
# clf.fit(X_train, Y_train)
# score = clf.score(X_test, Y_test)
# scores.append(score)
# print(1)
# +
# scores
# +
# show = pd.DataFrame()
# show['name'] = names
# show['score'] = scores
# show
# +
# cm = sns.light_palette("green", as_cmap=True)
# s = show.style.background_gradient(cmap=cm)
# s
# +
# sns.set(style="whitegrid")
# ax = sns.barplot(y="name", x="score", data=show)
# +
# combined['persona'].value_counts()
# +
# combined['model_normalized']
# +
# print(len(combined['modelvendor_normalized'].value_counts()))
# print(len(combined['ram'].value_counts()))
# +
# combined['ram'].value_counts()
# +
# combined['gfxcard'].value_counts()[:20]
# +
# combined['graphicscardclass'].value_counts()
# +
# sys['cpu_family']
# +
# feats = combined.select_dtypes(include=['object'])
# +
# list(feats.columns)
# +
# hasher = FeatureHasher(n_features=12, input_type='string')
# f = hasher.transform(combined)
# f = f.toarray()
# hashed = pd.DataFrame(f, columns = list(feats.columns))
# hashed
# -
morecats = combined.dropna()
morecats['persona'] = morecats['persona'].apply(macro_cats)
morecats
# +
# dumcats['persona'] = morecats.drop(columns=['persona'])
# +
# encode_persona = combined['persona'].to_frame().replace(cleanup_nums)
# +
# dumcats['persona'] = encode_persona['persona']
# -
morecats['persona'].value_counts()
# +
# dumcats
# -
Y = morecats['persona']
X = morecats.drop(columns=['persona'])
X = pd.get_dummies(X)
X
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
# +
names = ["Decision_Tree", "Extra_Trees", "Random_Forest", "AdaBoost","Nearest_Neighbors", "Gradient_Boosting"]
#"Gaussician_Process"]
# "Linear_SVM",
# "Polynomial_SVM", "RBF_SVM",
# "Neural_Net", "Naive_Bayes", "QDA", "SGD"]
classifiers = [
DecisionTreeClassifier(max_depth=5, class_weight='balanced'),
ExtraTreesClassifier(n_estimators=10, min_samples_split=2, class_weight='balanced'),
RandomForestClassifier(max_depth=5, n_estimators=100, class_weight='balanced'),
AdaBoostClassifier(n_estimators=100),
KNeighborsClassifier(3),
# SVC(kernel="linear", C=0.025, class_weight='balanced'),
# SVC(kernel="poly", degree=3, C=0.025, class_weight='balanced'),
# SVC(kernel="rbf", C=1, gamma=2, class_weight='balanced'),
GradientBoostingClassifier(n_estimators=100, learning_rate=1.0)]
#GaussianProcessClassifier(1.0 * RBF(1.0))]
#
# MLPClassifier(alpha=1, max_iter=1000),
# GaussianNB(),
# QuadraticDiscriminantAnalysis(),
# SGDClassifier(loss="hinge", penalty="l2")]
# -
scores = []
for name, clf in zip(names, classifiers):
clf.fit(X_train, Y_train)
score = clf.score(X_test, Y_test)
scores.append(score)
print(score)
show = pd.DataFrame()
show['name'] = names
show['score'] = scores
show
cm = sns.light_palette("green", as_cmap=True)
s = show.style.background_gradient(cmap=cm)
s
# +
# sns.set(style="whitegrid")
# ax = sns.barplot(y="name", x="score", data=show)
# -
feature_names = X_train.columns
# +
#fig.set_size_inches(18.5, 10.5)
# plt.rcParams["figure.figsize"] = (100,300)
# -
importances = clf.feature_importances_
sorted_idx = np.argsort(importances)[::-1]
x=range(len(top5))
labels=np.array(feature_names)[sorted_idx]
top5 = labels[:5]
data5 = importances[sorted_idx][:5]
# plt.bar(5, data5, tick_label=top5)
# plt.xticks(rotation=90)
# plt.show();
bestfeats = pd.DataFrame()
bestfeats['feature'] = top5
bestfeats['importance'] = data5
bestfeats
# +
# feature_importance = clf.feature_importances_
# sorted_idx = np.argsort(feature_importance)
# pos = np.arange(sorted_idx.shape[0]) + .5
# fig = plt.figure(figsize=(12, 6))
# plt.subplot(1, 2, 1)
# plt.barh(pos, feature_importance[sorted_idx], align='center')
# sorted_idx
# # plt.yticks(pos, np.array(diabetes.feature_names)[sorted_idx])
# # plt.title('Feature Importance (MDI)')
# result = permutation_importance(clf, X_test, Y_test, n_repeats=10,
# random_state=42, n_jobs=2)
# sorted_idx = result.importances_mean.argsort()
# # plt.subplot(1, 2, 2)
# # plt.boxplot(result.importances[sorted_idx].T,
# # vert=False, labels=np.array(diabetes.feature_names)[sorted_idx])
# # plt.title("Permutation Importance (test set)")
# # fig.tight_layout()
# # plt.show()
# +
# from sklearn.metrics import roc_auc_score
# from sklearn.model_selection import GridSearchCV
# param_dict = {
# 'criterion':['gini','entropy'],
# 'max_depth':range(1,10),
# 'min_samples_split':range(1,10),
# 'min_samples_leaf':range(1,5)
# }
# -
ada = AdaBoostClassifier(n_estimators=100)
ada.fit(X_train, Y_train)
mat = confusion_matrix(Y_test,ada.predict(X_test))
plt.figure(figsize=(10 , 10))
sns.set()
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=np.unique(Y_test),
yticklabels=np.unique(Y_test))
plt.xlabel('True Label')
plt.ylabel('Predicted Label')
# Save confusion matrix to outputs in Workbench
plt.show()
# +
# rbfsvc = SVC(kernel="rbf", C=1, gamma=2, class_weight='balanced')
# rbfsvc.fit(X_train, Y_train)
# mat = confusion_matrix(Y_test,rbfsvc.predict(X_test))
# plt.figure(figsize=(10 , 10))
# sns.set()
# sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
# xticklabels=np.unique(Y_test),
# yticklabels=np.unique(Y_test))
# plt.xlabel('True Label')
# plt.ylabel('Predicted Label')
# # Save confusion matrix to outputs in Workbench
# plt.show()
# -
et = ExtraTreesClassifier(n_estimators=10, min_samples_split=2, class_weight='balanced')
et.fit(X_train, Y_train)
mat = confusion_matrix(Y_test,et.predict(X_test))
plt.figure(figsize=(10 , 10))
sns.set()
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=np.unique(Y_test),
yticklabels=np.unique(Y_test))
plt.xlabel('True Label')
plt.ylabel('Predicted Label')
# Save confusion matrix to outputs in Workbench
plt.show()
gbc=GradientBoostingClassifier(n_estimators=100, learning_rate=1.0)
gbc.fit(X_train, Y_train)
mat = confusion_matrix(Y_test,gbc.predict(X_test))
plt.figure(figsize=(10 , 10))
sns.set()
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=np.unique(Y_test),
yticklabels=np.unique(Y_test))
plt.xlabel('True Label')
plt.ylabel('Predicted Label')
# Save confusion matrix to outputs in Workbench
plt.show()
nn= KNeighborsClassifier(35)
nn.fit(X_train, Y_train)
mat = confusion_matrix(Y_test,nn.predict(X_test))
plt.figure(figsize=(10 , 10))
sns.set()
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=np.unique(Y_test),
yticklabels=np.unique(Y_test))
plt.xlabel('True Label')
plt.ylabel('Predicted Label')
# Save confusion matrix to outputs in Workbench
plt.show()
per = Perceptron(random_state=1, max_iter=30, tol=0.001, class_weight='balanced', warm_start=True)
per.fit(X_train, Y_train)
mat = confusion_matrix(Y_test,per.predict(X_test))
plt.figure(figsize=(10 , 10))
sns.set()
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=np.unique(Y_test),
yticklabels=np.unique(Y_test))
plt.xlabel('True Label')
plt.ylabel('Predicted Label')
# Save confusion matrix to outputs in Workbench
plt.show()
score = per.score(X_test, Y_test)
score
# +
# mlp = MLPClassifier(alpha=1e-4, max_iter=20, solver='adam', verbose=10, random_state=1, learning_rate_init=.1,
# hidden_layer_sizes=(800,100,2))
# mlp.fit(X_train, Y_train)
# mat = confusion_matrix(Y_test,mlp.predict(X_test))
# plt.figure(figsize=(10 , 10))
# sns.set()
# sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
# xticklabels=np.unique(Y_test),
# yticklabels=np.unique(Y_test))
# plt.xlabel('True Label')
# plt.ylabel('Predicted Label')
# # Save confusion matrix to outputs in Workbench
# plt.show()
# -
# ## Neural Net
print("Num of GPUs:", torch.cuda.device_count())
print("GPU tagger is:", torch.cuda.current_device())
print("GPU model:", torch.cuda.get_device_name(0))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tens_Xtrain = torch.tensor(X_train.values).to(device)
tens_Xtest = torch.tensor(X_test.values).to(device)
num_features = X_train.shape[1]
print("Number of trainable features is:", num_features)
# +
import torchvision
from torch.utils.data import Dataset, DataLoader
class Dataset(Dataset):
def __init__(self):
self.x = X_train
self.y = Y_train
self.n_samples = self.x.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_samples
dataset = Dataset()
# -
train_loader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True)
train_loader
class Classifier(nn.Module):
def __init__(self, width, n_inputs):
super(Classifier, self).__init__()
self.width = width
# input layer
self.input = nn.Linear(n_inputs, width)
# hidden layers
self.h1 = nn.Linear(in_features=width, out_features=width)
self.h2 = nn.Linear(in_features=width, out_features=width)
# output layer
self.output = nn.Linear(width, 1)
def forward(self, x):
# input layer
x = F.relu(self.input(x))
# hidden layers
x = F.relu(self.h1(x))
x = F.relu(self.h2(x))
# output layer
x = torch.sigmoid(self.output(x))
return x
# +
# function to help evaluate the training/test accuracies during training. it compares predictions to labels
# we define a threshold at 0.5, with predictions below classified as bkg & predictions above classified as sig
def get_num_correct(x, y):
return torch.round(x.float()).eq(y.float()).sum().item()
# +
from tqdm.notebook import tqdm
import time
def normal_training(epochs=20, batch_size = 291, learning_rate=0.001):
# create the network
network = Classifier(128, n_inputs=num_features).to(device)
# choose the criterion
criterion = nn.BCELoss()
# create the optimiser
optimizer = torch.optim.Adam(network.parameters(), lr=learning_rate)
# keep track of the losses
losses = []
losses_test = []
avg_losses = [] ######## for plotting we need losses per epoch
avg_losses_test = []
# create a data loader to begin the training
train_loader = torch.utils.data.DataLoader(dataset, batch_size)
# start a timer
begt = time.time()
for epoch in tqdm(range(epochs)):
# to evaluate accuracies while training
correct_preds = 0
correct_preds_test = 0
for batch in tqdm(train_loader):
X,Y = batch
# forwardprop
preds = network(X.float())
loss = criterion(preds,Y.float())
# backprop
optimizer.zero_grad() ######## To avoid accumulating the gradients
loss.backward()
optimizer.step()
losses.append(loss.to('cpu').detach().numpy())
# feedback to assess training accuracy
correct_preds += get_num_correct(preds, Y)
# sample some test data for evaluation
with torch.no_grad():
indx = torch.randint(0, X_test.shape[0], size=(5000, ))
X_t, Y_t = X_test[indx], Y_test[indx]
output_test = network(X_t.float())
loss_test = criterion(output_test, Y_t.float())
losses_test.append(loss_test.to('cpu').detach().numpy())
# feedback to assess test accuracy
correct_preds_test += get_num_correct(output_test, Y_t)
# calculate loss averages to use them in plots
avg_losses.append(np.mean(losses))
avg_losses_test.append(np.mean(losses_test))
# evaluate the train/test accuracies
train_accuracy = correct_preds * 100 / X_train.shape[0]
test_accuracy = correct_preds_test * 100 / (X_t.shape[0] * X_train.shape[0] / batch_size)
print("Epoch:", epoch+1, " - Loss:", round(loss.item(),5), " - Train accuracy: ", round(train_accuracy,2), " - Test accuracy: ", round(test_accuracy,2))
fig, ax = plt.subplots()
ax.plot(range(len(avg_losses)), avg_losses, label='train loss')
ax.plot(range(len(avg_losses_test)), avg_losses_test, label='test loss')
ax.set_xlabel('Epochs')
ax.set_ylabel('Classifier loss')
ax.legend(loc='best')
plt.show()
print("Training time:", round(((time.time()-begt)/60),2),"min" )
return network
# -
clf = normal_training(epochs=5, batch_size = 291, learning_rate=0.001)
# +
# nn3= KNeighborsClassifier(3)
# nn3.fit(X_train, Y_train)
# print(nn3.score(X_test, Y_test))
# nn5= KNeighborsClassifier(5)
# nn5.fit(X_train, Y_train)
# print(nn5.score(X_test, Y_test))
# nn7= KNeighborsClassifier(7)
# nn7.fit(X_train, Y_train)
# print(nn7.score(X_test, Y_test))
# nn9= KNeighborsClassifier(9)
# nn9.fit(X_train, Y_train)
# print(nn9.score(X_test, Y_test))
# nn11= KNeighborsClassifier(11)
# nn11.fit(X_train, Y_train)
# print(nn11.score(X_test, Y_test))
# nn13= KNeighborsClassifier(13)
# nn13.fit(X_train, Y_train)
# print(nn13.score(X_test, Y_test))
# nn15= KNeighborsClassifier(15)
# nn15.fit(X_train, Y_train)
# print(nn15.score(X_test, Y_test))
# nn31= KNeighborsClassifier(31)
# nn31.fit(X_train, Y_train)
# print(nn31.score(X_test, Y_test))
# nn35= KNeighborsClassifier(35, n_jobs=1)
# nn35.fit(X_train, Y_train)
# print(nn35.score(X_test, Y_test))
# nn37= KNeighborsClassifier(37, n_jobs=1)
# nn37.fit(X_train, Y_train)
# print(nn37.score(X_test, Y_test))
# nn41= KNeighborsClassifier(41, n_jobs=-1)
# nn41.fit(X_train, Y_train)
# print(nn41.score(X_test, Y_test))
# nn37= KNeighborsClassifier(37)
# nn37.fit(X_train, Y_train)
# print(nn37.score(X_test, Y_test))
# nn41= KNeighborsClassifier(41, n_jobs=-1)
# nn41.fit(X_train, Y_train)
# print(nn41.score(X_test, Y_test))
# +
# cleanup_nums
# +
# apps = pd.read_csv('../data/raw/frgnd_backgrnd_apps.csv000', error_bad_lines=False, sep=chr(1))
# +
# apps.head()
# +
# s = pd.Series(apps['frgnd_proc_name'])
# 'nitroexplorer2b.exe' in s
# +
# app_class = pd.read_csv('../data/raw/ucsd_apps_execlass.csv000', error_bad_lines=False, sep=chr(35))
# app_class
# +
# app_exe = list(apps['frgnd_proc_name'].value_counts().index)
# class_exe = list(app_class['exe_name'].value_counts().index)
# overlap = [x for x in app_exe if x in class_exe]
# len(overlap)
# +
# appscombined = apps.join(app_class, lsuffix='frgnd_proc_name', rsuffix='exe_name', how='left')
# appscombined
# +
# appscombined.groupby(['guid', 'app_type'])['event_duration_ms'].mean()
# +
# mean_dur = appscombined.pivot_table('event_duration_ms', ['guid', 'app_type'], aggfunc=np.mean).reset_index()
# +
# mean_dur['app_type'].unique()
# mean_dur
# +
# combined_guid = list(combined['guid'].value_counts().index)
# dur_guid = list(mean_dur['guid'].value_counts().index)
# overlap = [x for x in combined_guid if x in dur_guid]
# len(overlap)
# +
# appproc_exe = list(apps['proc_name'].value_counts().index)
# overlap = [x for x in appproc_exe if x in class_exe]
# len(overlap)
# +
# combined
# -
| 32,165 |
/classes/w3 - Sentiment Analysis/miniflow/.ipynb_checkpoints/Miniflow Add Node-checkpoint.ipynb | dfd0e43c37c1effba503d328d4b2d23a570cc3f8 | [] | no_license | fesaab/deep-learning | https://github.com/fesaab/deep-learning | 0 | 0 | null | 2017-04-03T01:08:04 | 2017-04-03T00:32:44 | null | Jupyter Notebook | false | false | .py | 6,629 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Influenza model preprocessing
import pandas as pd
import numpy as np
import networkx as nx
import nafot
import model
from itertools import product
import pickle
from matplotlib import pyplot as plt
vaccination_data = pd.read_csv(model.vaccination_data_path)
# vaccination_data
vacc = pd.read_csv('../../Data/vaccination_data/sample_vac_data.txt')
dem = pd.read_csv('../../Data/vaccination_data/sample_dem_data.txt').iloc[:,1:].copy()
dem.head(1)
dem[dem.stat_code==4407]
# ## Vaccination data
# +
# Load vaccination data
vaccination_data = pd.read_csv(model.vaccination_data_path)
vaccination_data['vac_date'] = pd.to_datetime(vaccination_data['vac_date'])
# Remove incomplete seasons (2007 and 2018)
vaccination_data = vaccination_data[~vaccination_data.vac_season.isin([2007, 2018])].copy()
# Get only last 5 years
# vaccination_data = vaccination_data[vaccination_data.vac_season.isin(np.arange(2008, 2017+1)[-7:])]
vaccination_data = vaccination_data[vaccination_data.vac_season.isin(np.arange(2008, 2017+1)[-5:])] ### 5 SEASONS!!!
# +
# Load clinic age population
population_by_clinic_age = pd.read_pickle('L:/Dor/Data/vaccination_data/population_by_clinic_age.pickle')
# Get Clinic-stat area data
clinics_stat_areas = pd.read_csv('../../data/vaccination_data/clinics_with_stat_area.csv')
clinics_stat_areas.set_index('clinic_code', inplace=True)
clinics_stat_areas['subdist'] = clinics_stat_areas.stat_area_id.apply(lambda area: nafot.gdf.loc[area].SubDistrictCode)
# +
vaccination_data['age'] = ((vaccination_data.vac_season - vaccination_data.birth_year) > 18).astype(int)
vaccination_data_by_age_clinic_season = vaccination_data.groupby([ 'clinic_code', 'age', 'vac_season']).count()[['random_ID']]
# vaccination_data_by_age_clinic = vaccination_data_by_age_clinic_season.groupby(['clinic_code', 'age']).mean()
# Devide by the population of each clinic and age
vaccination_data_by_age_clinic_season['random_ID'] = vaccination_data_by_age_clinic_season.apply(lambda row:
row.random_ID / population_by_clinic_age.loc[(row.name[:2])].values[0], axis=1)
vaccination_data_by_age_clinic_season.columns = ['data_coverage']
# Add stat area and subdist
vaccination_data_by_age_clinic_season['stat_area'] =\
vaccination_data_by_age_clinic_season.apply(lambda row:
clinics_stat_areas.loc[row.name[0]].stat_area_id, axis=1)
vaccination_data_by_age_clinic_season['subdist'] =\
vaccination_data_by_age_clinic_season.apply(lambda row:
clinics_stat_areas.loc[row.name[0]].subdist, axis=1)
# Save
vaccination_data_by_age_clinic_season.to_pickle('../../Data/influenza_model/data/vaccination_coverage_influenza.pickle')
# -
# ## Influenza cases
# +
# Load prep data
with open('../../Data/vaccination_model/prep_data.pickle', 'rb') as pickle_in:
prep_data = pickle.load(pickle_in)
# Get population by clinic and age in Macabi data and in the network
population_by_clinic_age = prep_data['population_by_clinic_age'].copy()
# +
diagnoses = pd.read_csv('L:\Dor\Data\influenza_model\data\sample_diagnoses.txt')
diagnoses.diagnosis_code = diagnoses.diagnosis_code.map(lambda x: x.strip())
# Get only influenza cases
relevant_diagnosis_codes = set(['478.9', '482.2', '482.2', '487.0', '487 ', '487 ', '487.1', 'Y22844', 'Y14678', 'Y14678',
'Y14696', 'Y23242', 'Y23637', 'Y14697'])
influenza_diagnoses = diagnoses[diagnoses.diagnosis_code.isin(relevant_diagnosis_codes)].copy()
# Convert the date to TimeStamp object
influenza_diagnoses.date_diagnosis = influenza_diagnoses.date_diagnosis.map(lambda x: pd.Timestamp(year=int(str(x)[:4]), month=int(str(x)[4:6]), day=int(str(x)[6:])))
# Sort by date
influenza_diagnoses.sort_values('date_diagnosis', inplace=True)
# Add season column
influenza_diagnoses['season'] = influenza_diagnoses.date_diagnosis.map(lambda date: date.year+1 if date.month >=6 else date.year)
# Get only cases in last 5 seasons
influenza_diagnoses = influenza_diagnoses[influenza_diagnoses.season.isin([2011,2012,2013,2014,2015,2016,2017])].copy()
# influenza_diagnoses = influenza_diagnoses[influenza_diagnoses.season.isin([2013,2014,2015,2016,2017])].copy() ### 5 SEASONS!!!
# Add age
# Load demographic data
demographic_data = pd.read_csv('L:\Dor\Data\influenza_model\data\sample_dem_data.txt')
# Add birth year to the diagnoses data
influenza_diagnoses = influenza_diagnoses.merge(demographic_data[['random_ID', 'birth_year', 'stat_code']], left_on='random_ID',
right_on='random_ID')
# Add age at diagnosis date column
influenza_diagnoses['age'] = influenza_diagnoses.apply(lambda row: ((row.date_diagnosis.year - row.birth_year) > 18), axis=1).astype(int)
# Get only relevant columns
influenza_diagnoses = influenza_diagnoses[['random_ID', 'season', 'age', 'stat_code', 'date_diagnosis']].copy()
influenza_diagnoses.columns = ['random_ID', 'season', 'age', 'clinic_code', 'date_diagnosis']
# Sort by date
influenza_diagnoses.sort_values('date_diagnosis', inplace=True)
# -
# ### Create daily influenza data
# +
# Group by date
influenza_cases = influenza_diagnoses.copy()
influenza_cases = influenza_cases.groupby(['clinic_code', 'age', 'date_diagnosis']).count()[['random_ID']]
# Initialize a Data frame with all relevant dates
dates = [pd.Timestamp(2010,6,1) + pd.Timedelta(i, unit='d') for i in range(365*7+2)]
# dates = [pd.Timestamp(2012,6,1) + pd.Timedelta(i, unit='d') for i in range(365*5)] #### 5 SEASONS!!!
influenza_cases_daily = pd.DataFrame(list(product(dates, influenza_diagnoses.clinic_code.unique(), [0,1])),
columns=['date', 'clinic_code', 'age'])
influenza_cases_daily.set_index(['clinic_code', 'age', 'date'], inplace=True)
# Add the number of case for each date (0 if there are none)
influenza_cases_daily['cases'] = influenza_cases_daily.index.map(lambda x:
influenza_cases.loc[x] if x in influenza_cases.index else 0)
influenza_cases_daily.cases = influenza_cases_daily.cases.astype(int)
influenza_cases_daily.reset_index(inplace=True)
# Aggregate weekly cases by clinic and age
# Initialize a dictionary
influenza_weekly_cases_by_clinic_age = {}
# Go over all combinations of clinic_code and age and aggregate weekly
# for clinic, age in product(influenza_diagnoses.clinic_code.unique(), [0,1]):
for clinic, age in population_by_clinic_age.index:
# Get only relevant clinc and age
cur_influenza_cases = influenza_cases_daily[(influenza_cases_daily.clinic_code == clinic)&(influenza_cases_daily.age == age)].copy()
# Aggregate weekly
cur_influenza_weekly_cases = cur_influenza_cases.set_index('date').resample('W').sum()[['cases']].copy()
# Add season column
cur_influenza_weekly_cases['season'] = cur_influenza_weekly_cases.index.map(lambda date: date.year+1 if date.month >=6
else date.year)
cur_influenza_weekly_cases.columns = ['cases', 'season']
cur_influenza_weekly_cases = cur_influenza_weekly_cases[['season', 'cases']]
# Add to the dictionary
influenza_weekly_cases_by_clinic_age[(clinic, age)] = cur_influenza_weekly_cases[:-1].copy()
# Update the total by age group as a sum of all the relevnat clinic, age groups
# Initialize a dictionary
influenza_weekly_cases_by_age = {age: list(influenza_weekly_cases_by_clinic_age.values())[0].copy()
for age in [0, 1]}
# Children
influenza_weekly_cases_by_age[0].cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age if age == 0])
# Adults
influenza_weekly_cases_by_age[1].cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age if age == 1])
# Update the total as a sum of all the clinics and age groups
total_influenza_weekly_cases = influenza_weekly_cases_by_age[0].copy()
total_influenza_weekly_cases.cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age])
# -
plt.figure(figsize=(15,7))
influenza_weekly_cases_by_age[0].cases.plot()
influenza_weekly_cases_by_age[1].cases.plot()
total_influenza_weekly_cases.cases.plot()
plt.show()
total_influenza_weekly_cases.shape
total_influenza_weekly_cases[total_influenza_weekly_cases.season==2013].iloc[26:36].sum()
plt.figure(figsize=(15,7))
total_influenza_weekly_cases[total_influenza_weekly_cases.season==2013].cases.plot()
total_influenza_weekly_cases[total_influenza_weekly_cases.season==2013].iloc[26:36].cases.plot()
relevant_clinics = [c for c, a in prep_data['relevant_clinics_age']]
relevant_clinics = np.unique(relevant_clinics)
relevant_clinics
# +
cases_2013 = {}
for clinic in relevant_clinics:
cur_clinic_data = influenza_weekly_cases_by_clinic_age[(clinic, 1)]
if (clinic, 0) in prep_data['relevant_clinics_age']:
cur_clinic_data.cases += influenza_weekly_cases_by_clinic_age[(clinic, 0)].cases
# Get only 2013 season
cur_clinic_data = cur_clinic_data[cur_clinic_data.season==2013].iloc[26:36].copy()
cur_clinic_data = cur_clinic_data/cur_clinic_data.sum()
cases_2013[clinic] = cur_clinic_data
# -
cases_2013[clinic].iloc[:]
plt.figure(figsize=(15,7))
for clinic in cases_2013:
cases_2013[clinic].cases.plot()
plt.show()
# ### Adjust inflenza cases
# #### Adjust to attack rate
# +
# Network population by age
net_pop_by_age = np.array([population_by_clinic_age.loc[pd.IndexSlice[:,age], 'network_population'].sum() for age in [0, 1]])
data_pop_by_age = np.array([population_by_clinic_age.loc[pd.IndexSlice[:,age], 'data_population'].sum() for age in [0, 1]])
# Calculate the 5-year attack rate for each age group
# data_attack_rates = np.array([influenza_weekly_case_by_age[age].cases.sum()/net_pop_by_age[age] for age in [0, 1]])
data_attack_rates = np.array([influenza_weekly_cases_by_age[age].cases.sum()/data_pop_by_age[age] for age in [0, 1]])
# Attack rate - Vaccince paper
paper_attack_rates = {'0-3': 0.203, '4-24': 0.102, '25-49': 0.066, '50-64': 0.066, '65+': 0.09}
# Load israeli population by age
israeli_pop_all_ages = pd.read_excel('../data/population_all_age_groups.xlsx', sheet_name='israeli_pop')
israeli_pop_all_ages.set_index('age', inplace=True)
# Proportion calculation for age group adjustments
# 0-3 out of 0-17
prop_03_017 = israeli_pop_all_ages.iloc[0:3+1,:].israeli_pop.sum() / israeli_pop_all_ages.iloc[0:17+1,:].israeli_pop.sum()
# 4-17 out of 0-17
prop_417_017 = israeli_pop_all_ages.iloc[4:17+1,:].israeli_pop.sum() / israeli_pop_all_ages.iloc[0:17+1,:].israeli_pop.sum()
# 18-24 out of 18+
prop_1824_18 = israeli_pop_all_ages.iloc[18:24+1,:].israeli_pop.sum() / israeli_pop_all_ages.iloc[18:,:].israeli_pop.sum()
# 25-49 out of 18+
prop_2549_18 = israeli_pop_all_ages.iloc[25:49+1,:].israeli_pop.sum() / israeli_pop_all_ages.iloc[18:,:].israeli_pop.sum()
# 50-64 out of 18+
prop_5064_18 = israeli_pop_all_ages.iloc[50:64+1,:].israeli_pop.sum() / israeli_pop_all_ages.iloc[18:,:].israeli_pop.sum()
# 65+ out of 18+
prop_65_18 = israeli_pop_all_ages.iloc[65:,:].israeli_pop.sum() / israeli_pop_all_ages.iloc[18:,:].israeli_pop.sum()
# Attack rate in model age groups
attack_rates = np.array([prop_03_017*paper_attack_rates['0-3']+prop_417_017*paper_attack_rates['4-24'],
prop_1824_18*paper_attack_rates['4-24']+prop_2549_18*paper_attack_rates['25-49']+
prop_5064_18*paper_attack_rates['50-64']+prop_65_18*paper_attack_rates['65+']])
# 5-year attack rate
attack_rates_5year = attack_rates*7
# attack_rates_5year = attack_rates*5 ### 5 SEASONS
# Calculate the factor between the data and the real attack rates
adj_factor = attack_rates_5year / data_attack_rates
# Adjust the infection data according to adjustment factor
for clinic, age in influenza_weekly_cases_by_clinic_age:
influenza_weekly_cases_by_clinic_age[(clinic, age)].cases *= adj_factor[age]
# Update the total by age group as a sum of all the relevnat clinic, age groups
# Children
influenza_weekly_cases_by_age[0].cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age if age == 0])
# Adults
influenza_weekly_cases_by_age[1].cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age if age == 1])
# Update the total as a sum of all the clinics and age groups
total_influenza_weekly_cases.cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age])
# Recalculate data attack rates
data_attack_rates_updated = np.array([influenza_weekly_cases_by_age[age].cases.sum()/data_pop_by_age[age] for age in [0, 1]])
# -
plt.figure(figsize=(15,7))
influenza_weekly_cases_by_age[0].cases.plot()
influenza_weekly_cases_by_age[1].cases.plot()
total_influenza_weekly_cases.cases.plot()
plt.show()
# #### Adjust to population size
# +
# Adjust the infection data according to adjustment factor
for clinic, age in influenza_weekly_cases_by_clinic_age:
influenza_weekly_cases_by_clinic_age[(clinic, age)].cases *= population_by_clinic_age.loc[(clinic, age), 'factor']
# influenza_weekly_cases_by_clinic_age[(clinic, age)].cases = influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.copy()*population_by_clinic_age.loc[(clinic, age), 'factor']
# # Remove irrelevant (clinic, age) combinations (doesn't exist in the network)
# influenza_weekly_cases_by_clinic_age = {k: v for k,v in influenza_weekly_cases_by_clinic_age.items()
# if k in population_by_clinic_age.index}
# Update the total by age group as a sum of all the relevnat clinic, age groups
# Children
influenza_weekly_cases_by_age[0].cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age if age == 0])
# Adults
influenza_weekly_cases_by_age[1].cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age if age == 1])
# Update the total by subdist as a sum of all the relevnat clinic, age groups
influenza_weekly_cases_by_subdist = {key: influenza_weekly_cases_by_age[0].copy() for key in prep_data['relevant_subdists_age']}
# load subdist by clinic
clinics_subdists = pd.read_pickle('../../data/vaccination_data/clinics_with_stat_area.pickle').subdist
# Go over all subdists and sum
for subdist, age in prep_data['relevant_subdists_age']:
influenza_weekly_cases_by_subdist[(subdist, age)].cases = \
sum([influenza_weekly_cases_by_clinic_age[(clinic, cur_age)].cases.values.copy()
for clinic, cur_age in influenza_weekly_cases_by_clinic_age
if clinics_subdists.loc[clinic] == subdist and cur_age == age])
# Update the total as a sum of all the clinics and age groups
total_influenza_weekly_cases.cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age])
# -
plt.figure(figsize=(15,7))
influenza_weekly_cases_by_age[0].cases.plot()
influenza_weekly_cases_by_age[1].cases.plot()
total_influenza_weekly_cases.cases.plot()
plt.show()
relevant_subdists = np.unique([s for s,a in prep_data['relevant_subdists_age']])
plt.figure(figsize=(15,7))
for subdist in relevant_subdists:
cur_subdist_data = influenza_weekly_cases_by_subdist[(subdist,0)]
cur_subdist_data.cases += influenza_weekly_cases_by_subdist[(subdist,1)].cases
cur_subdist_data = cur_subdist_data[cur_subdist_data.season==2013].iloc[26:36].copy()
cur_subdist_data = cur_subdist_data/cur_subdist_data.sum()
cur_subdist_data.cases.plot()
plt.figure(figsize=(15,7))
for key in influenza_weekly_cases_by_subdist:
# if key[1] == 0:
influenza_weekly_cases_by_subdist[key].cases.plot()
total_influenza_weekly_cases.cases.plot()
plt.show()
# Save
# +
# Save
with open('../../Data/influenza_model/data/influenza_weekly_cases_by_clinic_age.pickle', 'wb') as pickle_out:
pickle.dump(influenza_weekly_cases_by_clinic_age, pickle_out)
with open('../../Data/influenza_model/data/influenza_weekly_cases_by_age.pickle', 'wb') as pickle_out:
pickle.dump(influenza_weekly_cases_by_age, pickle_out)
with open('../../Data/influenza_model/data/influenza_weekly_cases_by_subdist.pickle', 'wb') as pickle_out:
pickle.dump(influenza_weekly_cases_by_subdist, pickle_out)
total_influenza_weekly_cases.to_pickle('../../Data/influenza_model/data/total_influenza_weekly_cases.pickle')
# -
# ### Infection rate by season, clinic and age
# #### Population proportion by subdist and age
# +
population = prep_data['population_by_clinic_age'].copy()
# Add subdist
population['subdist'] = population.apply(lambda row: clinics_subdists.loc[row.name[0]], axis=1)
prop_data_pop = {}
prop_net_pop = {}
for subdist, age in prep_data['relevant_subdists_age']:
cur_df = population[population.subdist==subdist].loc[pd.IndexSlice[:,age], :]
prop_data_pop.update(dict(cur_df.data_population / cur_df.data_population.sum()))
prop_net_pop.update(dict(cur_df.network_population / cur_df.network_population.sum()))
# -
# #### By clinic
# +
# Go over the seasons
infection_rates = {}
# for season in model.seasons:
for season in [2011, 2012] + model.seasons:
infection_rate_by_clinic_age = []
# For each season go over each clinic and age
for clinic, age in prep_data['relevant_clinics_age']:
# Get number of cases in current season, clinic and age
cur_clinic_cases = influenza_weekly_cases_by_clinic_age[(clinic, age)]
cases = cur_clinic_cases[cur_clinic_cases.season==season].cases.sum()
# Get population of total clinic (network population because cases are normalized to the size of the network)
population = population_by_clinic_age.loc[(clinic, age)].network_population
# Calculate the infection rate and add it to the list
infection_rate_by_clinic_age.append([clinic, age, cases/population])
# Create a DF of the infection rates in current season and asve it to the dict
infection_rate_by_clinic_age_df = pd.DataFrame(infection_rate_by_clinic_age, columns=['clinic_code', 'age', 'infection_rate'])
infection_rate_by_clinic_age_df.set_index(['clinic_code', 'age'], inplace=True)
# Add population proportion out of subdist
# Add stat area and subdist
infection_rate_by_clinic_age_df['subdist'] = infection_rate_by_clinic_age_df.apply(lambda row: clinics_subdists.loc[row.name[0]], axis=1)
# Add prop out of subdist (if the clinic is not relevant - 0)
infection_rate_by_clinic_age_df['pop_prop'] = infection_rate_by_clinic_age_df.index.map(lambda x: prop_net_pop.get(x,0))
infection_rates[season] = infection_rate_by_clinic_age_df
with open('../../Data/influenza_model/data/infection_rates_clinics.pickle', 'wb') as pickle_out:
pickle.dump(infection_rates, pickle_out)
# -
# ### Short season
# +
# Short season
dates_2017_short = [pd.Timestamp(2016, 9, 1) + pd.Timedelta(days=1) * i for i in range(273)]
# dates_2017_short = [pd.Timestamp(2016, 10, 1) + pd.Timedelta(days=1) * i for i in range(243)]
day_in_season_short = [(date - pd.datetime(date.year if date.month > 5 else date.year - 1, 6, 1)).days
for date in dates_2017_short]
# +
# Get relevant locs for each season
locs = []
sizes = []
for season in model.seasons:
example_season_df = total_influenza_weekly_cases[total_influenza_weekly_cases.season == season].copy()
# Go over the date and get the relevant start and stop locations
start, stop = 0,0
for i, date in enumerate(example_season_df.index):
if date.month == 9:
# if date.month == 10:
start = i
break
# Save the start and stop location for current season
locs.append(start)
sizes.append(example_season_df.index.size)
# Get the relevant indices for all seasons
short_season_idx = np.concatenate([sum(sizes[:i]) + np.arange(locs[i],sizes[i]) for i in range(len(model.seasons))])
# -
# Update data for fit
# +
# Get only relevant dates - by clinic and age
for clinic, age in influenza_weekly_cases_by_clinic_age:
influenza_weekly_cases_by_clinic_age[(clinic, age)] = influenza_weekly_cases_by_clinic_age[(clinic, age)].iloc[short_season_idx]
# Update the total by age group as a sum of all the relevnat clinic, age groups
# Children
# Initialize to one of the dfs (will be updated)
influenza_weekly_cases_by_age[0] = list(influenza_weekly_cases_by_clinic_age.values())[0].copy()
influenza_weekly_cases_by_age[0].cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age if age == 0])
# Adults
# Initialize to one of the dfs (will be updated)
influenza_weekly_cases_by_age[1] = list(influenza_weekly_cases_by_clinic_age.values())[0].copy()
influenza_weekly_cases_by_age[1].cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age if age == 1])
# Update the total by subdist as a sum of all the relevnat clinic, age groups
influenza_weekly_cases_by_subdist = {key: influenza_weekly_cases_by_age[0].copy() for key in prep_data['relevant_subdists_age']}
# load subdist by clinic
clinics_subdists = pd.read_pickle('../../data/vaccination_data/clinics_with_stat_area.pickle').subdist
# Go over all subdists and sum
for subdist, age in prep_data['relevant_subdists_age']:
influenza_weekly_cases_by_subdist[(subdist, age)].cases = \
sum([influenza_weekly_cases_by_clinic_age[(clinic, cur_age)].cases.values.copy()
for clinic, cur_age in influenza_weekly_cases_by_clinic_age
if clinics_subdists.loc[clinic] == subdist and cur_age == age])
# Update the total as a sum of all the clinics and age groups
# Initialize to one of the dfs (will be updated)
total_influenza_weekly_cases = list(influenza_weekly_cases_by_clinic_age.values())[0].copy()
total_influenza_weekly_cases.cases = sum([influenza_weekly_cases_by_clinic_age[(clinic, age)].cases.values.copy()
for clinic, age in influenza_weekly_cases_by_clinic_age])
# +
# Save - short
with open('../../Data/influenza_model/data/influenza_weekly_cases_by_clinic_age_short.pickle', 'wb') as pickle_out:
pickle.dump(influenza_weekly_cases_by_clinic_age, pickle_out)
with open('../../Data/influenza_model/data/influenza_weekly_cases_by_age_short.pickle', 'wb') as pickle_out:
pickle.dump(influenza_weekly_cases_by_subdist, pickle_out)
with open('../../Data/influenza_model/data/influenza_weekly_cases_by_subdist_short.pickle', 'wb') as pickle_out:
pickle.dump(influenza_weekly_cases_by_age, pickle_out)
total_influenza_weekly_cases.to_pickle('../../Data/influenza_model/data/total_influenza_weekly_cases_short.pickle')
# +
# Save - short
with open('../../Data/influenza_model/data/influenza_weekly_cases_by_clinic_age_short_10.pickle', 'wb') as pickle_out:
pickle.dump(influenza_weekly_cases_by_clinic_age, pickle_out)
with open('../../Data/influenza_model/data/influenza_weekly_cases_by_age_short_10.pickle', 'wb') as pickle_out:
pickle.dump(influenza_weekly_cases_by_subdist, pickle_out)
with open('../../Data/influenza_model/data/influenza_weekly_cases_by_subdist_short_10.pickle', 'wb') as pickle_out:
pickle.dump(influenza_weekly_cases_by_age, pickle_out)
total_influenza_weekly_cases.to_pickle('../../Data/influenza_model/data/total_influenza_weekly_cases_short_10.pickle')
# -
plt.figure(figsize=(15,7))
influenza_weekly_cases_by_age[0].cases.plot()
influenza_weekly_cases_by_age[1].cases.plot()
total_influenza_weekly_cases.cases.plot()
plt.show()
# ## Recovery rates
# +
# Recovery rate in days from literature
recovery_0_5 = 8
recovery_6_14 = 6.5
recovery_15 = 4.5
# Load israeli population by age
israeli_pop_all_ages = pd.read_excel('../data/population_all_age_groups.xlsx', sheet_name='israeli_pop')
israeli_pop_all_ages.set_index('age', inplace=True)
# 0-5 out of 0-17
prop05_017 = israeli_pop_all_ages.iloc[0:5+1,:].israeli_pop.sum() / israeli_pop_all_ages.iloc[0:17+1,:].israeli_pop.sum()
# 6-14 out of 0-17
prop614_017 = israeli_pop_all_ages.iloc[6:14+1,:].israeli_pop.sum() / israeli_pop_all_ages.iloc[0:17+1,:].israeli_pop.sum()
# 15-17 out of 0-17
prop1517_017 = israeli_pop_all_ages.iloc[15:17+1,:].israeli_pop.sum() / israeli_pop_all_ages.iloc[0:17+1,:].israeli_pop.sum()
children_recovery_rate = 1 / (prop05_017*recovery_0_5 + prop614_017*recovery_6_14 + prop1517_017*recovery_15)
adults_recovery_rate = 1 / recovery_15
print(children_recovery_rate)
print(adults_recovery_rate)
| 26,287 |
/K-Means Clustering/Digits Classification/.ipynb_checkpoints/KMeans Clustering for Imagery Analysis (Jupyter Notebook)-checkpoint.ipynb | e30e422cab873de947eb1b229e22e2adda93f920 | [] | no_license | SuryaSsrivastava/machine_learning_theories | https://github.com/SuryaSsrivastava/machine_learning_theories | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 126,498 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import datetime
import os
import socket
import pickle
import json
from time import time
from types import SimpleNamespace
import matplotlib.pyplot as plt
from matplotlib import collections as mc, patches
import numpy as np
import numpy.random as rd
from lsnn.guillaume_toolbox.matplotlib_extension import strip_right_top_axis, raster_plot
# from bin.tutorial_storerecall_utils import update_plot, update_stp_plot
# +
results_path = 'results/tutorial_storerecall_2neuron_solution/'
# target_path = 'results/tutorial_storerecall_2neuron_solution/2021_03_31_09_25_53_ALIFv_seqlen8_seqdelay3_in100_R0_A2_V01_tauchar200_commenteLIFE_4_excitatory'
# CURRENT batch 6: target_path = 'results/tutorial_storerecall_2neuron_solution/2021_03_31_08_35_06_ALIFv_seqlen8_seqdelay3_in100_R0_A2_V01_tauchar200_commenteLIFE_3_customPlot'
# Statistics on the test set average error 0.0 +- 0.0 (averaged over 16 test batches of size 128)
# target_path= 'results/tutorial_storerecall_2neuron_solution/2021_04_02_10_57_43_ALIFv_seqlen8_seqdelay3_in100_R0_A2_V01_tauchar200_commenteLIFE_3_customPlot2Recall'
# Statistics on the test set average error 0.00092 +- 0.0024 (averaged over 16 test batches of size 128)
# target_path= 'results/tutorial_storerecall_2neuron_solution/2021_05_15_10_59_06_ALIFv_seqlen16_seqdelay3_in100_R0_A2_V01_tauchar200_commenteLIFE_5_withInhibition'
# target_path = results_path + '2021_06_03_10_09_44_ALIFv_seqlen32_seqdelay3_in100_R0_A2_V01_tauchar200_commenteLIFE_6_longDelayReset'
# target_path = results_path + '2021_06_03_10_38_47_ALIFv_seqlen64_seqdelay3_in100_R0_A2_V01_tauchar200_commenteLIFE_6_longDelayReset'
# target_path = results_path + '2021_06_03_10_51_16_ALIFv_seqlen64_seqdelay3_in100_R0_A2_V01_tauchar400_commenteLIFE_6_longDelayReset_tc400'
target_path = results_path + '2021_06_03_16_20_56_ALIFv_seqlen64_seqdelay3_in100_R0_A2_V01_tauchar200_commenteLIFE_6_longDelayReset_thr0.03'
# Statistics on the test set average error 0.034 +- 0.0053 (averaged over 16 test batches of size 128)
data = None
with open(os.path.join(target_path, 'plot_custom_trajectory_data.pickle'), 'rb') as file:
data = pickle.load(file)
assert data is not None
FLAGS = None
with open(os.path.join(target_path, 'flag.json'), 'rb') as file:
FLAGS = json.load(file)
FLAGS = SimpleNamespace(**FLAGS)
assert FLAGS is not None
# Experiment parameters
dt = 1.
repeat_batch_test = 10
print_every = FLAGS.print_every
# Frequencies
input_f0 = FLAGS.f0 / 1000 # in kHz in coherence with the usgae of ms for time
regularization_f0 = FLAGS.reg_rate / 1000
regularization_f0_max = FLAGS.reg_max_rate / 1000
# Network parameters
tau_v = FLAGS.tau
thr = FLAGS.thr
# Symbol number
n_charac = FLAGS.n_charac # Number of digit symbols
n_input_symbols = n_charac + 2 # Total number of symbols including recall and store
n_output_symbols = n_charac # Number of output symbols
recall_symbol = n_input_symbols - 1 # ID of the recall symbol
store_symbol = n_input_symbols - 2 # ID of the store symbol
# Neuron population sizes
input_neuron_split = np.array_split(np.arange(FLAGS.n_in), n_input_symbols)
def update_plot(plot_result_values, batch=0, n_max_neuron_per_raster=20, n_max_synapses=FLAGS.n_con, t_from=0, t_to=None):
"""
This function iterates the matplotlib figure on every call.
It plots the data for a fixed sequence that should be representative of the expected computation
:return:
"""
t_to = -1 if t_to is None else t_to
# Clear the axis to print new plots
for k in range(ax_list.shape[0]):
ax = ax_list[k]
ax.clear()
strip_right_top_axis(ax)
# if len(plot_result_values['false_sentence_id_list']) > 0:
# print(plot_result_values['false_sentence_id_list'])
# batch = plot_result_values['false_sentence_id_list'][0]
# ax_list[0].set_title("Failed batch " + str(batch))
# Plot the data, from top to bottom each axe represents: inputs, recurrent and controller
for k_data, data, d_name in zip(range(3),
[plot_result_values['input_spikes'], plot_result_values['z'],
plot_result_values['z_con']],
['Input', 'Hidden', 'Memory' if FLAGS.model == 'Mikolov' else 'Controller']):
if FLAGS.model in ['LIF', 'ALIF'] and k_data == 2:
continue
ax = ax_list[k_data]
ax.spines['bottom'].set_visible(False)
ax.set_xticks([])
# ax.grid(color='black', alpha=0.15, linewidth=0.4)
if np.size(data) > 0:
data = data[batch, t_from:t_to]
n_max = min(data.shape[1], n_max_neuron_per_raster)
cell_select = np.linspace(start=0, stop=data.shape[1] - 1, num=n_max, dtype=int)
data = data[:, cell_select] # select a maximum of n_max_neuron_per_raster neurons to plot
raster_plot(ax, data, linewidth=0.5)
ax.set_ylabel(d_name)
ax.set_xticklabels([])
if d_name == 'Input':
ax.set_yticklabels([])
n_channel = data.shape[1] // n_input_symbols
top_margin = 0.08
left_margin = -0.1
ax.text(left_margin, 1. - top_margin, 'Recall', transform=ax.transAxes, fontsize=7, verticalalignment='top')
ax.text(left_margin, 0.75 - top_margin, 'Store', transform=ax.transAxes, fontsize=7, verticalalignment='top')
ax.text(left_margin, 0.5 - top_margin, 'Value 1', transform=ax.transAxes, fontsize=7, verticalalignment='top')
ax.text(left_margin, 0.25 - top_margin, 'Value 0', transform=ax.transAxes, fontsize=7, verticalalignment='top')
# plot targets
ax = ax_list[3 if FLAGS.model not in ['LIF', 'ALIF'] else 2]
ax.spines['bottom'].set_visible(False)
ax.set_xticks([])
mask = plot_result_values['recall_charac_mask'][batch, t_from:t_to]
data = plot_result_values['target_nums'][batch, t_from:t_to]
data[np.invert(mask)] = -1
lines = []
ind_nt = np.argwhere(data != -1)
for idx in ind_nt.tolist():
i = idx[0]
lines.append([(i * FLAGS.tau_char, data[i]), ((i + 1) * FLAGS.tau_char, data[i])])
lc_t = mc.LineCollection(lines, colors='green', linewidths=2, label='Target')
ax.add_collection(lc_t) # plot target segments
# plot output per tau_char
data = plot_result_values['out_plot_char_step'][batch, t_from:t_to]
data = np.array([(d[1] - d[0] + 1) / 2 for d in data])
data[np.invert(mask)] = -1
lines = []
ind_nt = np.argwhere(data != -1)
for idx in ind_nt.tolist():
i = idx[0]
lines.append([(i * FLAGS.tau_char, data[i]), ((i + 1) * FLAGS.tau_char, data[i])])
lc_o = mc.LineCollection(lines, colors='blue', linewidths=2, label='Output')
ax.add_collection(lc_o) # plot target segments
# plot softmax of psp-s per dt for more intuitive monitoring
# ploting only for second class since this is more intuitive to follow (first class is just a mirror)
output2 = plot_result_values['out_plot'][batch, t_from:t_to, 1]
presentation_steps = np.arange(output2.shape[0])
ax.set_yticks([0, 0.5, 1])
# ax.grid(color='black', alpha=0.15, linewidth=0.4)
ax.set_ylabel('Output')
line_output2, = ax.plot(presentation_steps, output2, color='purple', label='softmax', alpha=0.7)
ax.axis([0, presentation_steps[-1] + 1, -0.3, 1.1])
ax.legend(handles=[lc_t, lc_o, line_output2], loc='lower center', fontsize=7,
bbox_to_anchor=(0.5, -0.05), ncol=3)
if FLAGS.model != 'LIF':
ax.set_xticklabels([])
# debug plot for psp-s or biases
plot_param = 'b_con' # or 'psp'
ax = ax_list[-2]
ax.set_xticks([])
ax.spines['bottom'].set_visible(False)
# ax.grid(color='black', alpha=0.15, linewidth=0.4)
ax.set_ylabel('PSPs' if plot_param == 'psp' else 'Threshold')
sub_data = plot_result_values[plot_param][batch, t_from:t_to]
if plot_param == 'b_con':
sub_data = sub_data + thr
vars = np.var(sub_data, axis=0)
# cell_with_max_var = np.argsort(vars)[::-1][:n_max_synapses * 3:3]
cell_with_max_var = np.argsort(vars)[::-1][:n_max_synapses]
presentation_steps = np.arange(sub_data.shape[0])
ax.plot(sub_data[:, cell_with_max_var], color='r', label='Output', alpha=0.4, linewidth=1)
ax.axis([0, presentation_steps[-1], np.min(sub_data[:, cell_with_max_var]),
np.max(sub_data[:, cell_with_max_var])]) # [xmin, xmax, ymin, ymax]
plot_param = 'v'
ax.set_xticklabels([])
ax = ax_list[-1]
# ax.grid(color='black', alpha=0.15, linewidth=0.4)
ax.set_ylabel('Membrane potential')
sub_data = plot_result_values[plot_param][batch, t_from:t_to]
presentation_steps = np.arange(sub_data.shape[0])
ax.plot(sub_data, label='Voltage', alpha=0.4, linewidth=1)
ax.axis([0, presentation_steps[-1], np.min(sub_data[:, cell_with_max_var]),
np.max(sub_data[:, cell_with_max_var])]) # [xmin, xmax, ymin, ymax]
ax.set_xlabel('Time in ms')
# +
#fig, ax_list = plt.subplots(nrows=5, figsize=(6, 7.5), gridspec_kw={'wspace': 0, 'hspace': 0.2})
#update_plot(plt, ax_list, FLAGS, data, batch=0)
# -
fig, ax_list = plt.subplots(nrows=5, figsize=(14, 10), gridspec_kw={'wspace': 0, 'hspace': 0.2})
update_plot(data, batch=0)
fig.savefig('fig1E_2.svg', format='svg')
fig, ax_list = plt.subplots(nrows=5, figsize=(10, 10), gridspec_kw={'wspace': 0, 'hspace': 0.2})
# update_plot(data, batch=0, t_from=0, t_to=3000)
update_plot(data, batch=0, t_from=0, t_to=1500)
fig.savefig('fig1E_12_1.svg', format='svg')
fig, ax_list = plt.subplots(nrows=5, figsize=(10, 10), gridspec_kw={'wspace': 0, 'hspace': 0.2})
# update_plot(data, batch=0, t_from=data['z'].shape[1] - 3000)
update_plot(data, batch=0, t_from=data['z'].shape[1] - 1500)
fig.savefig('fig1E_12_2.svg', format='svg')
# +
# debug plot for psp-s or biases
fig, ax = plt.subplots(nrows=1, figsize=(14, 1), gridspec_kw={'wspace': 0, 'hspace': 0.2})
ax.clear()
strip_right_top_axis(ax)
ax.set_xticks([])
ax.spines['bottom'].set_visible(False)
sub_data = data['b_con'][0]
sub_data = sub_data + thr
presentation_steps = np.arange(sub_data.shape[0])
ax.plot(sub_data[:, :], color='r', label='Output', alpha=0.4, linewidth=1)
ax.axis([0, presentation_steps[-1], np.min(sub_data[:, :])-0.001, np.max(sub_data[:, :])+0.001]) # [xmin, xmax, ymin, ymax]
fig.savefig('fig1E_thr.svg', format='svg')
# +
# debug plot for psp-s or biases
fig, ax = plt.subplots(nrows=1, figsize=(14, 1), gridspec_kw={'wspace': 0, 'hspace': 0.2})
ax.clear()
strip_right_top_axis(ax)
ax.set_xticks([])
ax.spines['bottom'].set_visible(False)
sub_data = data['b_con'][0]
sub_data = sub_data + thr
t_start = 0
t_end = 3000
presentation_steps = np.arange(sub_data[t_start:t_end, :].shape[0])
ax.plot(sub_data[t_start:t_end, :], color='r', label='Output', alpha=0.4, linewidth=1)
ax.axis([0, presentation_steps[-1], np.min(sub_data[t_start:t_end, :])-0.001, np.max(sub_data[:, :])+0.001]) # [xmin, xmax, ymin, ymax]
fig.savefig('fig1E_thr_1.svg', format='svg')
fig, ax = plt.subplots(nrows=1, figsize=(14, 1), gridspec_kw={'wspace': 0, 'hspace': 0.2})
ax.clear()
strip_right_top_axis(ax)
ax.set_xticks([])
ax.spines['bottom'].set_visible(False)
sub_data = data['b_con'][0]
sub_data = sub_data + thr
t_start = sub_data.shape[0] - 3000
t_end = sub_data.shape[0]
presentation_steps = np.arange(sub_data[t_start:t_end, :].shape[0])
ax.plot(sub_data[t_start:t_end, :], color='r', label='Output', alpha=0.4, linewidth=1)
ax.axis([0, presentation_steps[-1], np.min(sub_data[t_start:t_end, :])-0.001, np.max(sub_data[:, :])+0.001]) # [xmin, xmax, ymin, ymax]
fig.savefig('fig1E_thr_2.svg', format='svg')
# -
sub_data.shape[0]
# +
diffs = []
max_t = data['z'][b].shape[0]
# ranges = [(1400, max_t)]
# ranges = [(500, 1500), (6000, max_t)]
ranges = [(500, 1500), (12000, max_t)]
# ranges = [(1000, 3000), (24000, max_t)] # tau_char=400
for b in range(FLAGS.batch_test):
le as pl
def get_od_map():
station_id_map=pl.load(open("station_id_map.pkl",'rb'))
station_id_map['divideLine']=1
station_od_map = pd.merge(station_id_map,station_id_map,on='divideLine')
station_od_map=station_od_map.rename\
(columns={'station_id_x':'O站ID','station_name_x':'O站名称','line_id_x':'O站线路ID','line_name_x':'O站线路名称',\
'station_id_y':'D站ID','station_name_y':'D站名称','line_id_y':'D站线路ID','line_name_y':'D站线路名称'})
station_od_map=station_od_map.drop(columns=['divideLine'])
return station_od_map
station_od_map=get_od_map()
test_out=sess.run(out,feed_dict={x_batch:test_x,y_batch:test_y,\
x_cha_batch:test_x_cha,y_cha_batch:test_y_cha, \
A_:A,is_training:False})
time_mark=[ '%s_%02d:00-%02d:00'%(d,h,h+1) for d in ['20180917','20180918','20180919'] for h in list(range(5,24))]
time_mark=time_mark[-46:]
for y_,out_,time_mark_ in zip(test_y,test_out,time_mark):
y_=y_.reshape(-1,1)
out_=out_.reshape(-1,1)
ori_pre=np.concatenate((out_,y_),1)
ori_pre=pd.DataFrame(ori_pre,columns=['预测值','实际值'])
inf_detail=pd.concat([station_od_map,ori_pre],axis=1)
inf_detail['预测值'] = inf_detail['预测值'].astype(int)
inf_detail['实际值'] = inf_detail['实际值'].astype(int)
inf_detail['预测差值']=inf_detail['预测值']-inf_detail['实际值']
inf_detail['预测差异率'] = inf_detail[['预测差值','实际值']].apply(lambda x: x['预测差值']/x['实际值'] if x['实际值']!=0 else 0,axis=1)
inf_detail['预测精度'] = inf_detail['预测差异率'].apply(lambda x: (1-abs(x)))
inf_detail.to_excel('output/STAGCN模型预测(%s).xlsx'%time_mark_,index=None)
# -
for n in tf.get_default_graph().as_graph_def().node:
try:
print(tf.get_default_graph().get_tensor_by_name(n.name+':0'))
except:
print(n.name, "has 0 outputs")
continue
# !nvidia-smi
import matplotlib.pyplot as plt
def plot(array):
n=len(array[0])
plt.plot(np.arange(n),array[0],label='train')
plt.plot(np.arange(n),array[1],label='test')
plt.legend()
print("train min",min(loss_list[0]))
print("test min",min(loss_list[1]))
plot(loss_list)
accmax_index=np.argmax(acc_list[0])
print(accmax_index)
print("train max acc",acc_list[0][accmax_index])
print("test acc",acc_list[1][accmax_index])
print("train max",max(acc_list[0]))
print("test max",max(acc_list[1]))
plot(acc_list)
import pandas as pd
accmax_index=np.argsort(acc_list[0])[-10:]
METRIC_topk_acc = pd.DataFrame([np.round(np.array(acc_list[0])[accmax_index],2),np.array(loss_list[0])[accmax_index].astype(int),
np.round(np.array(acc_list[1])[accmax_index],2),np.array(loss_list[1])[accmax_index].astype(int)]).T
METRIC_topk_acc.columns=["train max acc","train loss","test acc","test loss"]
METRIC_topk_acc
lossmin_index=np.argsort(loss_list[0])[:10]
METRIC_topk_loss = pd.DataFrame([np.round(np.array(acc_list[0])[lossmin_index],2),np.array(loss_list[0])[lossmin_index].astype(int),
np.round(np.array(acc_list[1])[lossmin_index],2),np.array(loss_list[1])[lossmin_index].astype(int)]).T
METRIC_topk_loss.columns=["train acc","train min loss","test acc","test loss"]
METRIC_topk_loss
| 15,495 |
/B2Kemu/Notebooks/GBReweighting.ipynb | db2a5538df42c08ef51a95140cacd86d5281bac2 | [] | no_license | Burney222/Master-Make-Based | https://github.com/Burney222/Master-Make-Based | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 625,549 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = [15., 12.]
from hep_ml.reweight import GBReweighter
from root_pandas import read_root
from root_numpy import list_branches
import numpy as np
from itertools import izip
import time
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import train_test_split
# -
# # Function Definitions
#Calculate Chi2 of re-weighting variables
def Chi2_reweighting_branches(data, mc, reweighting_branches, binning, data_weights=None, mc_weights=None):
chi2 = 0
#Normalise entries
if data_weights is None:
data_weights = np.ones(len(data))
if mc_weights is None:
mc_weights = np.ones(len(mc))
data_weights_norm = data_weights/np.sum(data_weights)
mc_weights_norm = mc_weights/np.sum(mc_weights)
print "Sum data weights:", np.sum(data_weights_norm)
print "Sum mc weights:", np.sum(mc_weights_norm)
#Iterate through branches
for branch, branch_binning in izip(reweighting_branches,binning):
data_hist, _ = np.histogram(data[branch], branch_binning, weights=data_weights_norm)
mc_hist, _ = np.histogram(mc[branch], branch_binning, weights =mc_weights_norm)
differences = (data_hist-mc_hist)**2 / (data_hist+mc_hist)
differences[np.isnan(differences)] = 0
chi2 += np.sum(differences)
return chi2
# # Inputs
# +
reweighting_branches = ["B_PT", "nTracks", "nSPDHits"]
comparison_branches = ["B_ENDVERTEX_CHI2", "B_FDCHI2_OWNPV", "B_P", "B_DIRA_OWNPV",
"B_IPCHI2_OWNPV"]
sWeight_branch = "sig_total_sw"
#B2Kmumu has cut on Jpsi mass
B2Kmumu_DATA = read_root("/net/storage03/data/users/dberninghoff/B2Kll_ownjobs/sWeighted/DATA_Bplus_Kplusmumu.root",
columns=reweighting_branches+comparison_branches+[sWeight_branch])
sWeights = B2Kmumu_DATA[sWeight_branch]
B2KJpsimumu_MC = read_root("/net/storage03/data/users/dberninghoff/B2Kll_ownjobs/Jpsi/MC_Bplus_KplusJpsimumu.root",
columns=reweighting_branches+comparison_branches)
B2Kmumu_MC = read_root("/net/storage03/data/users/dberninghoff/B2Kll_ownjobs/PrePreselected/MC_Bplus_Kplusmumu.root",
columns=reweighting_branches+comparison_branches)
B2Kmue_MC = read_root("/net/storage03/data/users/dberninghoff/B2Kll_ownjobs/PrePreselected/MC_Bplus_Kplusmue.root",
columns=reweighting_branches+comparison_branches)
B2Kee_MC = read_root("/net/storage03/data/users/dberninghoff/B2Kll_ownjobs/PrePreselected/MC_Bplus_Kplusee.root",
columns=reweighting_branches+comparison_branches)
#Best chi2
best_chi2 = 999999999999
# -
test = plt.hist(B2KJpsimumu_MC["nTracks"], bins=np.linspace(0, 500, 40))
test2 = plt.hist(B2Kmumu_DATA["nTracks"], bins=np.linspace(0, 500, 40))
# # Grid Search to find good settings
# +
param_grid = {'max_depth' : [3,6,8],
'learning_rate' : [0.2, 0.1, 0.02],
'n_estimators' : [40, 100, 200] }
gb_for_cv = GBReweighter()
gs_cv = GridSearchCV(gb_for_cv, param_grid, n_jobs=-1).fit(B2KJpsimumu_MC[reweighting_branches], B2Kmumu_DATA[reweighting_branches], target_weight=sWeights)
# -
# # Training
#Train test-split
B2KJpsimumu_MC_train, B2KJpsimumu_MC_test = train_test_split(B2KJpsimumu_MC)
# +
good_settings = {'max_depth' : 8, 'learning_rate' : 0.1, 'n_estimators' : 100}
good_settings2 = {'max_depth' : 7, 'learning_rate' : 0.05, 'n_estimators' : 200, 'min_samples_leaf' : 1000,
'gb_args' : {'subsample': 0.6}}
test_settings = {'max_depth' : 3, 'learning_rate' : 0.1, 'n_estimators' : 90, 'min_samples_leaf' : 1000,
'gb_args' : {'subsample': 0.6}}
gb = GBReweighter(**test_settings)
begin = time.time()
print gb.fit(B2KJpsimumu_MC_train[reweighting_branches], B2Kmumu_DATA[reweighting_branches], target_weight=sWeights)
print "Training took", time.time()-begin, "seconds"
print "Re-weighting variables:", reweighting_branches
# -
# # Prediction
# +
B2KJpsimumu_weights_test = gb.predict_weights(B2KJpsimumu_MC_test[reweighting_branches])
B2KJpsimumu_weights_train = gb.predict_weights(B2KJpsimumu_MC_train[reweighting_branches])
#Chi2 of re-weighting branches
binning = [np.linspace(0,30000,50), np.linspace(0,1000,50), np.linspace(0,1000,50)]
chi2 = Chi2_reweighting_branches(B2Kmumu_DATA, B2KJpsimumu_MC_test, reweighting_branches, binning, data_weights=sWeights, mc_weights=B2KJpsimumu_weights_test)
chi2_traintest = Chi2_reweighting_branches(B2KJpsimumu_MC_train, B2KJpsimumu_MC_test, reweighting_branches, binning, data_weights=B2KJpsimumu_weights_train, mc_weights=B2KJpsimumu_weights_test)
print "This Chi2:", chi2
if chi2 < best_chi2:
best_chi2 = chi2
best_settings = test_settings
print "NEW BEST CHI2!"
print "Best Chi2:", best_chi2
print "Best Settings:", best_settings
print "Training vs. testing CHI2:", chi2_traintest
# -
#Some information
print "Sum of the weights", np.sum(B2KJpsimumu_weights)
print "nEntries MC", len(B2KJpsimumu_MC)
print "Sum of s-weights", np.sum(sWeights)
print "nEntries data", len(B2Kmumu_DATA)
# +
#CAREFUL:Edit weights to match the number of entries in the MC and set maximum weight
factor = float(len(B2KJpsimumu_MC))/np.sum(B2KJpsimumu_weights)
B2KJpsimumu_weights *= factor
iterations = 5
for iteration in range(iterations):
for i in range(len(B2KJpsimumu_weights)):
if B2KJpsimumu_weights[i] > 30:
B2KJpsimumu_weights[i] = 30
factor = float(len(B2KJpsimumu_MC))/np.sum(B2KJpsimumu_weights)
B2KJpsimumu_weights *= factor
print "nEntries MC", len(B2KJpsimumu_MC)
print "Sum of the weights", np.sum(B2KJpsimumu_weights)
# -
# # Plotting
# +
#Plot weights
_ = plt.hist(B2KJpsimumu_weights_test, bins = np.linspace(0, np.sort(B2KJpsimumu_weights_test)[int(len(B2KJpsimumu_weights_test)*0.95)], 50))
print "Min weight", min(B2KJpsimumu_weights_test)
print "Max weight", max(B2KJpsimumu_weights_test)
print "MC entries", len(B2KJpsimumu_MC_test)
print "Re-weighted", np.sum(B2KJpsimumu_weights_test)
# -
#Comparison variables
comparison_vars = reweighting_branches+comparison_branches
#["B_PT", "nTracks"]["nSPDHits", "B_ENDVERTEX_CHI2", "B_FDCHI2_OWNPV", "B_P", "B_DIRA_OWNPV", "B_IPCHI2_OWNPV"]
comparison_units = ["MeV", "", "", "", "", "MeV", "", ""]
comparison_binning = [np.linspace(0,30000,50), np.linspace(0,1000,50), np.linspace(0,1000,50),
np.linspace(0,20, 50), np.linspace(0,80000, 50), np.linspace(0, 400000,50),
np.linspace(0.9999,1,50), np.linspace(0,25,50)]
# +
#Compare distributions:
#Edit the weights to match ne number of entries in the sWeighted data (for comparing the distributions properly)
factor_weighted_test = np.sum(sWeights)/np.sum(B2KJpsimumu_weights_test)
factor_weighted_train = np.sum(sWeights)/np.sum(B2KJpsimumu_weights_train)
B2KJpsimumu_weights_test *= factor_weighted_test
B2KJpsimumu_weights_train *= factor_weighted_train
factor_unweighted = np.sum(sWeights)/len(B2KJpsimumu_MC)
uniform_weights = np.ones(len(B2KJpsimumu_MC))
uniform_weights.fill(factor_unweighted)
print "Sum of sWeights:", np.sum(sWeights)
print "Sum of re-weights:", np.sum(B2KJpsimumu_weights)
print "Sum of uniform weights:", np.sum(uniform_weights)
for var,binning,unit in izip(comparison_vars, comparison_binning, comparison_units):
plt.hist(np.asarray(B2Kmumu_DATA[var]), bins=binning, weights=np.asarray(sWeights), label="DATA (sWeighted)", normed=False, histtype="step")
plt.hist(np.asarray(B2KJpsimumu_MC[var]), bins=binning, weights=uniform_weights, label="MC (unweighted)", normed=False, histtype="step")
plt.hist(np.asarray(B2KJpsimumu_MC_test[var]), bins=binning, weights=np.asarray(B2KJpsimumu_weights_test), label="MC (TEST, reweighted)", normed=False, histtype="step")
plt.hist(np.asarray(B2KJpsimumu_MC_train[var]), bins=binning, weights=np.asarray(B2KJpsimumu_weights_train), label="MC (TRAIN, reweighted)", normed=False, histtype="step", color='y')
plt.title("B2Kmumu-Reweighting", fontsize=23)
plt.xlabel(var+" ["+unit+"]", fontsize=23)
plt.ylabel("Events (normalised)", fontsize=23)
plt.xticks(fontsize=23)
plt.yticks(fontsize=23)
plt.legend(fontsize=19)
plt.show()
plt.clf()
# -
binning = map(float, map(str.strip, "1,2,3".split(',')))
asdf = np.linspace(*binning)
print( "asdf {:%}".format(1./3))
plt.hist([1,2])
plt.xlabel("$m(Ke\mu)$")
| 8,795 |
/Transfer Learning/l06c02_exercise_flowers_with_transfer_learning.ipynb | c0fde1ad4d9dccb28054de6108fc65319135a4bf | [] | no_license | kalthommusa/Udacity-Intro-to-TensorFlow-for-Deep-Learning | https://github.com/kalthommusa/Udacity-Intro-to-TensorFlow-for-Deep-Learning | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 17,168 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Agglomerative Hierarchical Clustering
#
# In this technique, initially each data point is considered as an individual cluster. At each iteration, the similar clusters merge with other clusters until one cluster or K clusters are formed. There are multiple ways to calculate similarity between clusters. In this project we used 2 ways-
# #### 1) Single Linkage (MIN)
# #### 2) Complete Linkage (MAX)
import numpy as np
import pandas as pd
from math import sqrt
dataset = pd.read_csv('india-basemap/data_clubbed.csv', header=None)
dataset.head()
states = dataset[0]
dataset.drop(0, inplace=True, axis=1)
dataset.head()
# ### Year-wise Clustering Analysis
#
# We perform year-wise clustering analysis for all states. To find the clusters for a particular based on their crime records, specify the year in the 'year' variable
# +
year = 2014
year_list = []
start_range = 0
for i in range(len(dataset)):
if dataset[1].loc[i] == year:
start_range = i
break
end_range = 0
for i in range(len(dataset)):
if dataset[1].loc[i] == year:
end_range += 1
end_range = end_range + start_range
dataset.drop(1, inplace=True, axis=1)
for i in range(start_range, end_range+1):
year_list.append(dataset.loc[i])
# -
# ### Step 1 - Distance Matrix
# Euclidean Distance is used to calculate the similarity between 2 rows of the dataset. This is used to create the distance matrix.
def euclidean_distance(row1, row2):
distance = 0.0
for i in range(0,11):
distance += (row1[i] - row2[i])**2
return sqrt(distance)
# ### Step 2 - Smallest Value
#
# The smallest value in the upper triangle of the distance matrix is used to find the smallest distance between 2 cluster.
def smallest_val(arr, clusters, n, linkage):
smallest = 10**9
small_i = 0
small_j = 0
for i in range(0, n):
for j in range(i+1, n):
if arr[i][j] < smallest:
smallest = arr[i][j]
small_i = i
small_j = j
update_matrix(arr, clusters, small_i, small_j, n, linkage)
#print(arr)
return smallest
# ### Step 3 - Update the distance matrix
#
# The distance matrix is updated according to the linkage specified in the argument. The 2 different linkages are -
# - Single Linkage - Sim(C1,C2) = Min Sim(Pi,Pj) such that Pi ∈ C1 & Pj ∈ C2
# - Complete Linkage - im(C1,C2) = Max Sim(Pi,Pj) such that Pi ∈ C1 & Pj ∈ C2
def update_matrix(arr, clusters, i, j, n, linkage):
temp_list = []
temp_list.append(i)
temp_list.append(j)
temp_list.append(arr[i][j])
clusters.append(temp_list)
print('Merged' , i, j, 'with distance', arr[i][j])
for a in range(0, n):
for b in range(a+1, n):
if ((a == i) and (b != j)) or ((a == j) and (b != i)):
if linkage == 'single':
arr[a][b] = min(arr[i][b], arr[j][b])
arr[b][a] = min(arr[i][b], arr[j][b])
elif linkage == 'complete':
arr[a][b] = max(arr[i][b], arr[j][b])
arr[b][a] = max(arr[i][b], arr[j][b])
elif ((a != i) and (b == j)) or ((a != j) and (b == i)):
if linkage == 'single':
arr[a][b] = min(arr[a][i], arr[a][j])
arr[b][a] = min(arr[a][i], arr[a][j])
elif linkage == 'complete':
arr[a][b] = max(arr[a][i], arr[a][j])
arr[b][a] = max(arr[a][i], arr[a][j])
for a in range(0, n):
arr[i][a] = 10**9
arr[a][i] = 10**9
data = dataset.values.tolist()
n = end_range - start_range
arr = [[0 for i in range(n)] for j in range(n)]
for i in range(start_range, end_range):
for j in range(i+1, end_range):
arr[i-start_range][j-end_range] = euclidean_distance(data[i],data[j])
arr[j-end_range][i-start_range] = euclidean_distance(data[i],data[j])
# ### Step 4 - Find the clusters using DFS
#
# The connected components i.e the clusters are found by creating a graph and finding the connect components using DFS.
class Graph:
def __init__(self,V):
self.V = V
self.adj = [[] for i in range(V)]
def DFSUtil(self, temp, v, visited):
visited[v] = True
temp.append(v)
for i in self.adj[v]:
if visited[i] == False:
temp = self.DFSUtil(temp, i, visited)
return temp
def addEdge(self, v, w):
self.adj[v].append(w)
self.adj[w].append(v)
def connectedComponents(self):
visited = []
cc = []
for i in range(self.V):
visited.append(False)
for v in range(self.V):
if visited[v] == False:
temp = []
cc.append(self.DFSUtil(temp, v, visited))
return cc
# ### Results -
#
# - Using elbow method we found that 10 clusters yield the best results for this dataset
# - After finding the first 10 clusters, we perform a demographic survey of the data
# - The states that fall into the same cluster have similar crime patterns i.e the number and type of crimes
# - It is noticed that all the coastal states like Lakshadweep, Andaman and Nicobar Islands, Daman & Diu and D&N haveli fall into the same cluster very often
# - Another observation is that most of the Northeastern states fall into the same cluster
g = Graph(n);
linkage = 'complete'
clusters = []
labelList = []
for cnt in range(0,n-1):
val = smallest_val(arr, clusters, n, linkage)
for i in range(0,len(clusters)):
g.addEdge(clusters[i][0], clusters[i][1])
cc = g.connectedComponents()
if len(cc) == 10:
print('\n')
for i in range(0, len(cc)):
temp_states = []
for j in range(0, len(cc[i])):
temp_states.append(states[cc[i][j]])
print("Cluster "+ str(i)+" :"+ str(temp_states))
labelList.append(" ".join(temp_states))
break
labelList
# ### Cross-checking the results
#
# The scipy library is used to cross-check the results and to get a dendogram for the data
from scipy.cluster.hierarchy import dendrogram, linkage
from matplotlib import pyplot as plt
X = np.array(year_list)
Z = linkage(X, 'complete')
fig = plt.figure(figsize=(25, 15))
plt.xlabel('States')
plt.ylabel('Cluster Distance')
dn = dendrogram(Z, orientation='right')
reate a `tf.keras.Sequential` model, and add the pre-trained model and the new classification layer. Remember that the classification layer must have the same number of classes as our Flowers dataset. Finally print a summary of the Sequential model.
# + id="mGcY27fY1q3Q"
model =
# + [markdown] id="OHbXQqIquFxQ"
# ### TODO: Train the model
#
# In the cell bellow train this model like any other, by first calling `compile` and then followed by `fit`. Make sure you use the proper parameters when applying both methods. Train the model for only 6 epochs.
# + id="3n0Wb9ylKd8R"
EPOCHS =
history =
# + [markdown] id="76as-K8-vFQJ"
# You can see we get ~88% validation accuracy with only 6 epochs of training, which is absolutely awesome. This is a huge improvement over the model we created in the previous lesson, where we were able to get ~76% accuracy with 80 epochs of training. The reason for this difference is that MobileNet v2 was carefully designed over a long time by experts, then trained on a massive dataset (ImageNet).
# + [markdown] id="SLxTcprUqJaq"
# # TODO: Plot Training and Validation Graphs
#
# In the cell below, plot the training and validation accuracy/loss graphs.
# + id="d28dhbFpr98b"
acc =
val_acc =
loss =
val_loss =
epochs_range =
# + [markdown] id="5zmoDisGvNye"
# What is a bit curious here is that validation performance is better than training performance, right from the start to the end of execution.
#
# One reason for this is that validation performance is measured at the end of the epoch, but training performance is the average values across the epoch.
#
# The bigger reason though is that we're reusing a large part of MobileNet which is already trained on Flower images.
# + [markdown] id="kb__ZN8uFn-D"
# # TODO: Check Predictions
#
# In the cell below get the label names from the dataset info and convert them into a NumPy array. Print the array to make sure you have the correct label names.
# + id="W_Zvg2i0fzJu"
class_names =
# + [markdown] id="4Olg6MsNGJTL"
# ### TODO: Create an Image Batch and Make Predictions
#
# In the cell below, use the `next()` function to create an `image_batch` and its corresponding `label_batch`. Convert both the `image_batch` and `label_batch` to numpy arrays using the `.numpy()` method. Then use the `.predict()` method to run the image batch through your model and make predictions. Then use the `np.argmax()` function to get the indices of the best prediction for each image. Finally convert the indices of the best predictions to class names.
# + id="fCLVCpEjJ_VP"
image_batch, label_batch =
predicted_batch =
predicted_batch = tf.squeeze(predicted_batch).numpy()
predicted_ids =
predicted_class_names =
# + [markdown] id="CkGbZxl9GZs-"
# ### TODO: Print True Labels and Predicted Indices
#
# In the cell below, print the true labels and the indices of predicted labels.
# + id="nL9IhOmGI5dJ"
print()
# + [markdown] id="gJDyzEfYuFcW"
# # Plot Model Predictions
# + id="wC_AYRJU9NQe"
plt.figure(figsize=(10,9))
for n in range(30):
plt.subplot(6,5,n+1)
plt.subplots_adjust(hspace = 0.3)
plt.imshow(image_batch[n])
color = "blue" if predicted_ids[n] == label_batch[n] else "red"
plt.title(predicted_class_names[n].title(), color=color)
plt.axis('off')
_ = plt.suptitle("Model predictions (blue: correct, red: incorrect)")
# + [markdown] id="7QBKxS5CuKhc"
# # TODO: Perform Transfer Learning with the Inception Model
#
# Go to the [TensorFlow Hub documentation](https://tfhub.dev/s?module-type=image-feature-vector&q=tf2) and click on `tf2-preview/inception_v3/feature_vector`. This feature vector corresponds to the Inception v3 model. In the cells below, use transfer learning to create a CNN that uses Inception v3 as the pretrained model to classify the images from the Flowers dataset. Note that Inception, takes as input, images that are 299 x 299 pixels. Compare the accuracy you get with Inception v3 to the accuracy you got with MobileNet v2.
| 10,634 |
/HW11/nyc_taxi.ipynb | 0b0924b80d96beb649341f7679b71ca9694df5df | [] | no_license | wonkim0512/BDP | https://github.com/wonkim0512/BDP | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 78,557 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
data = pd.read_csv("nyc_data.csv")
data.tail()
data.columns
p_lat = data.pickup_latitude
p_long = data.pickup_longitude
d_lat = data.dropoff_latitude
d_long = data.dropoff_longitude
p_lat.tail()
def lat_long_to_pixels(lat, long):
lat_rad = lat * np.pi / 180.0
lat_rad = np.log(np.tan((lat_rad + np.pi/2.0)/2.0))
x = 100*(long + 180.0) / 360.0
y = 100*(lat_rad - np.pi) / (2.0 * np.pi)
return (x,y)
px, py = lat_long_to_pixels(p_lat, p_long)
px.tail()
px.count(), px.min(), px.max()
px.mean(), px.median(), px.std()
plt.scatter(px, py)
plt.figure(figsize = (8,6))
plt.scatter(px, py, s = 0.1, alpha = 0.03)
plt.axis("equal")
plt.xlim(29.40, 29.53)
plt.ylim(-37.65, -37.525)
plt.show()
| 1,085 |
/cars.ipynb | 0c970d1d483abfcbc571ead6dd350b310da7c41f | [] | no_license | sundusseif93/INTRO-TO-DATA-ANALYSIS | https://github.com/sundusseif93/INTRO-TO-DATA-ANALYSIS | 1 | 0 | null | 2019-07-23T23:25:44 | 2019-07-23T22:18:19 | null | Jupyter Notebook | false | false | .py | 521,322 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="iQjHqsmTAVLU"
# ## Exercise 3
# In the videos you looked at how you would improve Fashion MNIST using Convolutions. For your exercise see if you can improve MNIST to 99.8% accuracy or more using only a single convolutional layer and a single MaxPooling 2D. You should stop training once the accuracy goes above this amount. It should happen in less than 20 epochs, so it's ok to hard code the number of epochs for training, but your training must end once it hits the above metric. If it doesn't, then you'll need to redesign your layers.
#
# I've started the code for you -- you need to finish it!
#
# When 99.8% accuracy has been hit, you should print out the string "Reached 99.8% accuracy so cancelling training!"
#
# +
import tensorflow as tf
from os import path, getcwd, chdir
# DO NOT CHANGE THE LINE BELOW. If you are developing in a local
# environment, then grab mnist.npz from the Coursera Jupyter Notebook
# and place it inside a local folder and edit the path to that location
path = f"{getcwd()}/../tmp2/mnist.npz"
# -
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# +
# GRADED FUNCTION: train_mnist_conv
def train_mnist_conv():
# Please write your code only where you are indicated.
# please do not remove model fitting inline comments.
# YOUR CODE STARTS HERE
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>=0.998):
print("Reached 99.8% accuracy so cancelling training!")
self.model.stop_training = True
# YOUR CODE ENDS HERE
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data(path=path)
# YOUR CODE STARTS HERE
training_images = training_images.reshape(60000, 28, 28, 1)
training_images = training_images/255.0
test_images = test_images.reshape(10000, 28, 28, 1)
test_images = test_images/255.0
cb = myCallback()
# YOUR CODE ENDS HERE
model = tf.keras.models.Sequential([
# YOUR CODE STARTS HERE
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape = (28,28,1)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation = 'relu'),
tf.keras.layers.Dense(10, activation = 'softmax')
# YOUR CODE ENDS HERE
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# model fitting
history = model.fit(
# YOUR CODE STARTS HERE
training_images, training_labels, epochs=20, callbacks = [cb]
# YOUR CODE ENDS HERE
)
# model fitting
return history.epoch, history.history['acc'][-1]
# -
_, _ = train_mnist_conv()
# +
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
# + language="javascript"
# <!-- Save the notebook -->
# IPython.notebook.save_checkpoint();
# + language="javascript"
# <!-- Shutdown and close the notebook -->
# window.onbeforeunload = null
# window.close();
# IPython.notebook.session.delete();
ter_matrix(cars[num_vars], ax=ax, diagonal='kde')
# Use binning to see relationships more clearly
cars['binned_acceleration'] = pd.cut(cars.acceleration, bins=7)
agg = cars.groupby('binned_acceleration')['mpg'].mean()
agg.plot(kind='bar').set_ylabel('average mpg')
# Scatter plot by category
origins = cars['origin'].unique()
for origin in origins:
plt.plot(cars.loc[cars.origin==origin,'acceleration'],
cars.loc[cars.origin==origin,'mpg'],
linestyle='',
marker='o',
alpha=.7,
label="origin="+origin)
plt.xlabel('acceleration')
plt.ylabel('mpg')
plt.legend(numpoints=1)
# Categorical vs Categorical
pd.crosstab(cars['origin'], cars['model'])
# Aha! Model means model year --- we probably should have left this as numeric
# +
# Heat-map
import seaborn as sns
agg = cars.groupby(['origin','model'])['mpg'].mean()
ax = sns.heatmap(agg.unstack(level='model'), annot=True)
ax.set_title('MPG by origin and model year')
# -
# ## Linear Regression
# Single variable regression
cars.plot('weight','mpg',kind='scatter')
# Linear regression is just the fancy term for finding the line of best fit. If I was going to eyeball it from this data, I would draw the line from (1000,40) through (5500,5).
#
# In other words, we are looking for the slope and intercept that defines a line that fits the data as well as possible.
#
# 'As well as possible' means that we are trying to minimize the mean-squared-error
# +
# Make a guess at the line of best fit
first_point = [1000,45]
second_point = [5500, 0]
# Solve
def get_line_equation(p1, p2):
"""
Solve the system of equations:
y1 = m*x1 + b
y2 = m*x2 + b
Input:
p1: first point [x1, y1]
p2: second point [x2, y2]
returns: slope, intercept
"""
X = [[p1[0], 1], [p2[0], 1]]
y = [[p1[1]], [p2[1]]]
soln = np.linalg.solve(X,y)
return soln[0][0], soln[1][0]
slope, intercept = get_line_equation(first_point, second_point)
print(slope, intercept)
# Plot the line along with the data
ax = cars.plot('weight','mpg',kind='scatter')
xx = np.linspace(1000, 5500, 100)
ax.plot(xx, xx*slope + intercept, color='red', lw=3)
ax.set_xlim([1000,5500])
# -
# How can we measure the error? The typical choice is to use mean squared error. The error for a given data point is the difference between the observed value and the predicted value
# $$
# MSE := \frac{1}{n} \sum_{i=1}^n (y_i - (mx_i + b))^2
# $$
#
# +
# Mean Squared Error
def mean_squared_error(X, y, m, b):
"""
Compute the mean squared error, on the data (X,y),
of the model defined by slope m, and intercept b.
"""
pred = X*m + b
error = y - pred
mse = np.mean(error**2)
return mse
mean_squared_error(cars['weight'], cars['mpg'], slope, intercept)
# -
# ## R-Squared
#
# Mean squared error is a good error metric, but it is not comparable across different data sets. For this we use a scaled version called $R^2$.
# \begin{align}
# R^2 &:= 1 - \frac{SS_{res}}{SS_{tot}} \\
# &= 1 - \frac{\sum_{i=1}^n (y_i - (mx_i + b))^2}{\sum_{i=1}^n (y_i - \bar{y})^2}
# \end{align}
#
# Where $SS_{res}$ is the sum of the squared residuals and $SS_{tot}$ is the total sum of squares. $R^2$ can be interpreted as the fraction of the variance in the data that is explained by the model.
#
# $R^2$ will be between 0 and 1. 0 means that your model explains none of the variance in the data, while 1 means your model explains all of the variance in the data. The higher $R^2$, the better!
# +
# Calculate r-squared
def r_squared(X, y, m, b):
"""
Compute the r-squared, on the data (X,y),
of the model defined by slope m, and intercept b.
"""
pred = X*m + b
resid = y - pred
rsquared = 1 - np.sum(resid**2)/np.sum((y-y.mean())**2)
return rsquared
r_squared(cars['weight'], cars['mpg'], slope, intercept)
# -
# ## Ordinary least squares
# It turns out that we can find the slope and intercept which *minimize* the mean squared error, using a procedure called ordinary least squares
#
# Ordinary least squares is implemented in the statsmodels package. The advantage of this package is that we also have access to a number of *regression diagnostics.*
# +
import statsmodels.api as sm
# Choose the predictor and add a constant term
# (allow for an intercept)
X = pd.DataFrame({'weight' : cars['weight']})
X = sm.add_constant(X)
y = cars['mpg']
# Create a linear regression object
regressor = sm.OLS(y,X)
regressor = regressor.fit()
regressor.summary()
# -
dir(regressor)
# ## Model Diagnostics
#
# **coef** - The values of the coefficients in the model
#
# **$P>|t|$** - The p-value of the null hypothesis that a specific parameter is zero.
#
# **R-Squared** - Proportion of variance explained by the model. Measured on a scale from 0 (bad) to 1 (good)
#
# **Prob (F-statistic)** - p-value of the F-statistic. This is the probability of the null hypothesis that *all parameters in the model are zero*
# Plot the line along with the data
slope = -.0076
intercept = 46.2165
ax = cars.plot('weight','mpg',kind='scatter')
xx = np.linspace(1000, 5500, 100)
ax.plot(xx, xx*slope + intercept, color='red', lw=3)
ax.set_xlim([1000,5500])
# This line is a better fit than our original guess. We can tell because its $R^2$ is higher, meaning it explains the mpg's variance better. The MSE of this fit will be lower (better) than our original guess as well.
| 9,035 |
/Day_058_hierarchical_clustering_HW.ipynb | bb7b2404e846d2e47f21a109e56f2edb06abec68 | [] | no_license | angelsaying8642/1st-DL-CVMarathon | https://github.com/angelsaying8642/1st-DL-CVMarathon | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 44,044 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import naive_bayes
from sklearn.metrics import roc_auc_score,accuracy_score
import pickle
nltk.download("stopwords")
dataset = pd.read_csv('reviews.txt',sep = '\t', names =['Reviews','Comments'])
dataset
stopset = set(stopwords.words('english'))
vectorizer = TfidfVectorizer(use_idf = True,lowercase = True, strip_accents='ascii',stop_words=stopset)
X = vectorizer.fit_transform(dataset.Comments)
y = dataset.Reviews
pickle.dump(vectorizer, open('tranform.pkl', 'wb'))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
clf = naive_bayes.MultinomialNB()
clf.fit(X_train,y_train)
accuracy_score(y_test,clf.predict(X_test))*100
clf = naive_bayes.MultinomialNB()
clf.fit(X,y)
accuracy_score(y_test,clf.predict(X_test))*100
filename = 'nlp_model.pkl'
pickle.dump(clf, open(filename, 'wb'))
| 1,315 |
/notebooks/ITSP Descriptive analysis.ipynb | f614ab1d814a9f57e5009b05c1df4975e990cb95 | [] | no_license | sebwdz/JobAnalysis | https://github.com/sebwdz/JobAnalysis | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 207,238 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
import pandas as pd
import sklearn.cluster
import sklearn.decomposition
import matplotlib.pyplot as plt
import seaborn as sns
import os.path
import sys
# %load_ext autoreload
# %autoreload 1
# + deletable=true editable=true
sys.path.append(os.path.abspath(os.path.join(os.path.dirname("src"), "../src")))
# %aimport features.normalizer
# %aimport features.tools
# %aimport data.tools
# %aimport data.itsp
# %aimport models
# %aimport visualization.simple
# + deletable=true editable=true
import data.itsp
import features.normalizer
import features.tools
import visualization.simple
# + deletable=true editable=true
data_frame, columns1 = data.itsp.data_set()
data_frame.describe()
# + deletable=true editable=true
t_data = data_frame.copy().drop("Date", axis=1).transpose()
t_data = t_data - t_data.min()
ax = t_data.plot.bar()
ax.legend().set_visible(False)
plt.show()
# + deletable=true editable=true
wd_data = data_frame.copy().drop("Date", axis=1)
corr = wd_data.corr()
visualization.simple.heat_map(corr, corr.columns.values, corr.columns.values, (10, 8))
# + deletable=true editable=true
data_all = data_frame
ax = data_frame.plot(x="Date")
ax.legend().set_visible(False)
plt.show()
# + deletable=true editable=true
s_data = data_all.copy().drop("Date", axis=1).transpose()
reduced_data = sklearn.decomposition.PCA(n_components=2).fit_transform(s_data)
kmeans = sklearn.cluster.KMeans(n_clusters=3, random_state=0).fit(reduced_data)
visualization.simple.k_means(reduced_data, kmeans.labels_)
# + deletable=true editable=true
norm_data = data_frame.copy().drop("Date", axis=1)
norm_data = features.normalizer.simple_min_max(norm_data)
norm_data = pd.concat([norm_data, data_frame[["Date"]]], axis=1)
# + deletable=true editable=true
s_data = norm_data.copy().drop("Date", axis=1).transpose()
reduced_data = sklearn.decomposition.PCA(n_components=2).fit_transform(s_data)
kmeans = sklearn.cluster.KMeans(n_clusters=3, random_state=0).fit(reduced_data)
visualization.simple.k_means(reduced_data, kmeans.labels_)
# + deletable=true editable=true
wd_data = data_frame.copy().drop("Date", axis=1)
visualization.simple.despine(data_frame.tail(8), (10, 6))
# + deletable=true editable=true
| 2,510 |
/6. Path Planning and Navigation/4. Sample-Based and Probabilistic Path Planning/13. State Utility/State Utility.ipynb | d0df41aa813d1746a43cc474702258330b473693 | [] | no_license | sunsided/robond-notebooks | https://github.com/sunsided/robond-notebooks | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 7,529 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # State Utility
#
# ## Definition
#
# The **utility of a state** (otherwise known as the **state-value**) represents how attractive the state is with respect to the goal. Recall that for each state, the state-value function yields the expected return, if the agent (robot) starts in that state and then follows the policy for all time steps. In mathematical notation, this can be represented as so:
#
# $$
# U^{\pi}(s) = E[\sum_{t=0}^{\infty}R(s_t)|\pi , s_0 = s]
# $$
#
# The notation used in path planning differs slightly from what you saw in Reinforcement Learning. But the result is identical.
#
# Here,
#
# - $U^{\pi}(s)$ represents the utility of a state $s$,
# - $E$ represents the expected value, and
# - $R(s)$ represents the reward for state $s$.
#
# The utility of a state is the sum of the rewards that an agent would encounter if it started at that state and followed the policy to the goal.
# ## Calculation
# We can break the equation down, to further understand it.
#
# $$
# U^{\pi}(s) = E[\sum_{t=0}^{\infty}R(s_t)|\pi , s_0 = s]
# $$
#
# Let’s start by breaking up the summation and explicitly adding all states.
#
# $$
# U^{\pi}(s) = E[R(s_0) + R(s_1) + R(s_2) + ... \ |\pi , s_0 = s]
# $$
#
# Then, we can pull out the first term. The expected reward for the first state is independent of the policy. While the expected reward of all future states (those between the state and the goal) depend on the policy.
#
# $$
# U^{\pi}(s) = E[R(s_0)|s_0 = s] + E[R(s_1) + R(s_2) + ... \ |\pi]
# $$
#
# Re-arranging the equation results in the following. (Recall that the prime symbol, as on $s'$, represents the next state - like $s_2$ would be to $s_1$).
#
# $$
# U^{\pi}(s) = R(s) + E[\sum_{t=0}^{\infty}R(s_t)|\pi , s_0 = s']
# $$
#
# Ultimately, the result is the following.
#
# $$
# U^{\pi}(s) = R(s) + U^{\pi}(s')
# $$
#
# As you see here, calculating the utility of a state is an iterative process. It involves all of the states that the agent would visit between the present state and the goal, as dictated by the policy.
#
# As well, it should be clear that the utility of a state depends on the policy. If you change the policy, the utility of each state will change, since the sequence of states that would be visited prior to the goal may change.
# ## Determining the Optimal Policy
#
# Recall that the optimal policy, denoted $\pi^*$, informs the robot of the best action to take from any state,
# to maximize the overall reward. That is,
#
# $$
# \pi^*(s) = \underset{a}{argmax} E [U^{\pi}(s)]
# $$
#
# In a state $s$, the optimal policy $\pi^*$ will choose the action aa that maximizes the utility of $s$ (which, due to its iterative nature, maximizes the utilities of all future states too).
#
# While the math may make it seem intimidating, it’s as easy as looking at the set of actions and choosing the best action for every state. The image below displays the set of all actions once more.
#
# 
#
# It may not be clear from the get-go which action is optimal for every state, especially for states far away from the goal which have many paths available to them. It’s often helpful to start at the goal and work your way backwards.
#
# If you look at the two cells adjacent to the goal, their best action is trivial - go to the goal! Recall from your learning in RL that the goal state’s utility is 0. This is because if the agent starts at the goal, the task is complete and no reward is received. Thus, the expected reward from either of the goal’s adjacent cells is 79.8. Therefore, the state’s utility is, 79.8 + 0 = 79.8 (based on $U^{\pi}(s) = R(s) + U^{\pi}(s')$).
#
# If we look at the lower mountain cell, it is also easy to guess which action should be performed in this state. With an expected reward of -1.2, moving right is going to be much more rewarding than taking any indirect route (up or left). This state will have a utility of -1.2 + 79.8 = 78.6.
#
# ## Quiz
#
# Can you calculate what would the utility of the state to the right of the center mountain be, if the most rewarding action is chosen?
#
# 
#
# The process of selecting each state’s most rewarding action continues, until every state is mapped to an action. These mappings are precisely what make up the policy.
#
# It is highly suggested that you pause this lesson here, and work out the optimal policy on your own using the action set seen above. Working through the example yourself will give you a better understanding of the challenges that are faced in the process, and will help you remember this content more effectively. When you are done, you can compare your results with the images below.
# ## Applying the Policy
# Once this process is complete, the agent (our robot) will be able to make the best path planning decision from every state, and successfully navigate the environment from any start position to the goal. The optimal policy for this environment and this robot is provided below.
#
# The image below that shows the set of actions with just the optimal actions remaining. Note that from the top left cell, the agent could either go down or right, as both options have equal rewards.
#
# 
# 
# ## Discounting
#
# One simplification that you may have noticed us make, is omit the discounting rate $\gamma$. In the above example, $\gamma = 1$ and all future actions were considered to be just as significant as the present action. This was done solely to simplify the example. After all, you have already been introduced to $\gamma$ through the lessons on Reinforcement Learning.
#
# In reality, discounting is often applied in robotic path planning, since the future can be quite uncertain. The complete equation for the utility of a state is provided below:
#
# $$
# U^{\pi}(s) = E[\sum_{t=0}^{\infty}\gamma^tR(s_t)|\pi , s_0 = s]
# $$
| 6,164 |
/notebooks/prediction.ipynb | ee4f7059b4f18c43024fdde486a6e3489e90f085 | [
"BSD-3-Clause"
] | permissive | datavistics/reddit_prediction | https://github.com/datavistics/reddit_prediction | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 77,932 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reddit /r/relationships Title Prediction
# ## Setup
# I start by using a preprocessed [pickle file](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_pickle.html). I would happily use feather, but they [dont have datetimes figured out yet](https://github.com/wesm/feather/issues/121). I added the time featured I wanted, so this notebook could focus on prediction.
# +
# %matplotlib inline
import pandas as pd
from pathlib import Path
import sqlite3
from matplotlib import pyplot as plt
import re
import numpy as np
import math
from random import random, sample, seed
import sys
project_dir = Path.cwd().parent
dataframe_path = project_dir/'data'/'processed'/'added_time_features.pickle'
embeddings_path = project_dir/'data'/'external'/'glove.twitter.27B.200d.txt'
sys.path.append(str(project_dir/'src'))
from features import utilities
plt.style.use('ggplot')
df = pd.read_pickle(dataframe_path)
# -
df.head()
# +
title = df.title
score = df.score
num_comments = df.num_comments
year = df.year
month = df.month
dayofyear = df.dayofyear
dayofweek = df.dayofweek
hour = df.hour
minute = df.minute
second = df.second
edited_delta_min = df.edited_delta_min
edited_delta_hour = df.edited_delta_hour
edited_delta_day = df.edited_delta_day
# +
from keras.preprocessing import sequence
from keras.preprocessing.text import text_to_word_sequence, Tokenizer
max_features = 40000
word_tokenizer = Tokenizer(max_features)
word_tokenizer.fit_on_texts(title)
print(str(word_tokenizer.word_counts)[0:100])
print(str(word_tokenizer.word_index)[0:100])
print(len(word_tokenizer.word_counts))
# +
title_tf = word_tokenizer.texts_to_sequences(title)
print(len(title_tf))
# -
plt.hist((df.title.str.split().apply(len)), bins=67)
# +
maxlen = 40
title_tf = sequence.pad_sequences(title_tf, maxlen=maxlen)
print(title_tf[0])
# +
embedding_vectors = utilities.get_embeddings(embeddings_path)
print(embedding_vectors['you'])
# +
weights_matrix = np.zeros((max_features + 1, 200))
for word, i in word_tokenizer.word_index.items():
embedding_vector = embedding_vectors.get(word)
if embedding_vector is not None and i <= max_features:
weights_matrix[i] = embedding_vector
# index 0 vector should be all zeroes, index 1 vector should be the same one as above
print(weights_matrix[0:2,:])
# -
print(f'Day of year minimum = {min(dayofyear)}')
dayofyear_tf = dayofyear - min(dayofyear)
# +
from keras.models import Input, Model
from keras.layers import Dense, Embedding, GlobalAveragePooling1D, concatenate, Activation
from keras.layers.core import Masking, Dropout, Reshape
from keras.layers.normalization import BatchNormalization
batch_size = 32
embedding_dims = 200
epochs = 20
# -
title_input = Input(shape=(maxlen,), name='title_input')
title_embedding = Embedding(max_features + 1, embedding_dims, weights=[weights_matrix])(title_input)
title_pooling = GlobalAveragePooling1D()(title_embedding)
aux_output = Dense(1, activation='sigmoid', name='aux_out')(title_pooling)
# +
meta_embedding_dims = 64
minute_input = Input(shape=(1,), name='minute_input')
minute_embedding = Embedding(60, meta_embedding_dims)(minute_input)
minute_reshape = Reshape((meta_embedding_dims,))(minute_embedding)
hour_input = Input(shape=(1,), name='hour_input')
hour_embedding = Embedding(24, meta_embedding_dims)(hour_input)
hour_reshape = Reshape((meta_embedding_dims,))(hour_embedding)
dayofweek_input = Input(shape=(1,), name='dayofweek_input')
dayofweek_embedding = Embedding(7, meta_embedding_dims)(dayofweek_input)
dayofweek_reshape = Reshape((meta_embedding_dims,))(dayofweek_embedding)
dayofyear_input = Input(shape=(1,), name='dayofyear_input')
dayofyear_embedding = Embedding(366, meta_embedding_dims)(dayofyear_input)
dayofyear_reshape = Reshape((meta_embedding_dims,))(dayofyear_embedding)
edit_delta_minute_input = Input(shape=(1,), name='edit_delta_minute_input')
edit_delta_minute_embedding = Embedding(60, meta_embedding_dims)(edit_delta_minute_input)
edit_delta_minute_reshape = Reshape((meta_embedding_dims,))(edit_delta_minute_embedding)
edit_delta_hour_input = Input(shape=(1,), name='edit_delta_hour_input')
edit_delta_hour_embedding = Embedding(24, meta_embedding_dims)(edit_delta_hour_input)
edit_delta_hour_reshape = Reshape((meta_embedding_dims,))(edit_delta_hour_embedding)
edit_delta_day_input = Input(shape=(1,), name='edit_delta_day_input')
edit_delta_day_embedding = Embedding(7, meta_embedding_dims)(edit_delta_day_input)
edit_delta_day_reshape = Reshape((meta_embedding_dims,))(edit_delta_day_embedding)
# +
merged = concatenate([title_pooling, hour_reshape, dayofweek_reshape, minute_reshape, dayofyear_reshape, edit_delta_minute_reshape, edit_delta_hour_reshape, edit_delta_day_reshape])
merged = concatenate([title_pooling, hour_reshape, dayofweek_reshape, minute_reshape, dayofyear_reshape, edit_delta_day_reshape])
hidden_1 = Dense(256, activation='relu')(merged)
hidden_1 = BatchNormalization()(hidden_1)
main_output = Dense(1, activation='sigmoid', name='main_out')(hidden_1)
# +
model = Model(inputs=[title_input,
hour_input,
dayofweek_input,
minute_input,
dayofyear_input,
edit_delta_day_input],
outputs=[main_output, aux_output])
# model = Model(inputs=[title_input,
# hour_input,
# dayofweek_input,
# minute_input,
# dayofyear_input,
# edit_delta_minute_input,
# edit_delta_hour_input,
# edit_delta_day_input],
# outputs=[main_output, aux_output])
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'],
loss_weights=[1, 0.2])
model.summary()
# +
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
plot_model(model, to_file='model.png')
plot_model(model, to_file='model_shapes.png', show_shapes=True)
# -
# 
# Make sure to add graphviz to your path so it works with python. [Source](https://stackoverflow.com/questions/36886711/keras-runtimeerror-failed-to-import-pydot-after-installing-graphviz-and-pyd)
# +
seed(42)
split = 0.2
# returns randomized indices with no repeats
idx = sample(range(title_tf.shape[0]), title_tf.shape[0])
score = score[idx]
num_comments = num_comments[idx]
title_tf = title_tf[idx, :]
hour = hour[idx]
dayofweek = dayofweek[idx]
minute = minute[idx]
dayofyear_tf = dayofyear_tf[idx]
edited_delta_min = edited_delta_min[idx]
edited_delta_hour = edited_delta_hour[idx]
edited_delta_day = edited_delta_day[idx]
# -
score = score > score.mean()
# +
from keras.callbacks import CSVLogger, TensorBoard
import tensorflow as tf
csv_logger = CSVLogger('training.csv')
tbCallBack = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
# -
# model.fit([title_tf, hour, dayofweek, minute, dayofyear_tf, edited_delta_min, edited_delta_hour, edited_delta_day], [score, score],
model.fit([title_tf, hour, dayofweek, minute, dayofyear_tf, edited_delta_day], [score, score],
batch_size=None,
epochs=epochs,
validation_split=split, callbacks=[tbCallBack])
| 7,614 |
/convolutional-neural-networks/conv-visualization/maxpooling_visualization.ipynb | 22d3752eab49a42b9924d910780e60e013b715b9 | [
"MIT"
] | permissive | fabsta/deep-learning-v2-pytorch | https://github.com/fabsta/deep-learning-v2-pytorch | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 509,345 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: fastai_dl_course
# language: python
# name: fastai_dl_course
# ---
# # Maxpooling Layer
#
# In this notebook, we add and visualize the output of a maxpooling layer in a CNN.
#
# A convolutional layer + activation function, followed by a pooling layer, and a linear layer (to create a desired output size) make up the basic layers of a CNN.
#
# <img src='notebook_ims/CNN_all_layers.png' height=50% width=50% />
# ### Import the image
# +
import cv2
import matplotlib.pyplot as plt
# %matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'data/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
# -
# ### Define and visualize the filters
# +
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# +
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# For an example, print out the values of filter 1
print('Filter 1: \n', filter_1)
# -
# ### Define convolutional and pooling layers
#
# You've seen how to define a convolutional layer, next is a:
# * Pooling layer
#
# In the next cell, we initialize a convolutional layer so that it contains all the created filters. Then add a maxpooling layer, [documented here](http://pytorch.org/docs/stable/_modules/torch/nn/modules/pooling.html), with a kernel size of (2x2) so you can see that the image resolution has been reduced after this step!
#
# A maxpooling layer reduces the x-y size of an input and only keeps the most *active* pixel values. Below is an example of a 2x2 pooling kernel, with a stride of 2, appied to a small patch of grayscale pixel values; reducing the x-y size of the patch by a factor of 2. Only the maximum pixel values in 2x2 remain in the new, pooled output.
#
# <img src='notebook_ims/maxpooling_ex.png' height=50% width=50% />
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a convolutional layer with four filters
# AND a pooling layer of size (2, 2)
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
# define a pooling layer
self.pool = nn.MaxPool2d(2, 2)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# applies pooling layer
pooled_x = self.pool(activated_x)
# returns all layers
return conv_x, activated_x, pooled_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
# -
# ### Visualize the output of each filter
#
# First, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1)
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
# Let's look at the output of a convolutional layer after a ReLu activation function is applied.
#
# #### ReLu activation
#
# A ReLu function turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`.
#
# <img src='notebook_ims/relu_ex.png' height=50% width=50% />
# +
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
# get all the layers
conv_layer, activated_layer, pooled_layer = model(gray_img_tensor)
# visualize the output of the activated conv layer
viz_layer(activated_layer)
# -
# ### Visualize the output of the pooling layer
#
# Then, take a look at the output of a pooling layer. The pooling layer takes as input the feature maps pictured above and reduces the dimensionality of those maps, by some pooling factor, by constructing a new, smaller image of only the maximum (brightest) values in a given kernel area.
#
# Take a look at the values on the x, y axes to see how the image has changed size.
#
#
# visualize the output of the pooling layer
viz_layer(pooled_layer)
first occurance using remove()
print("first occurance removed ",lst)
lst1=[4,3,2,1]
lst.append(lst1) #6. appending list within list
print("list within list ",lst)
lst2=[11,12]
lst.extend(lst2) #7. extend list using extend()
print("extended list ",lst)
del lst[6] #8. delete list element using del
print("deleted element ",lst)
p=lst.pop(7) #9. deleting list element using pop()
print(p)
print("using pop ",lst)
lst3=[1,'ok',2.5]
if 'ok' in lst3: #10. list related keyword "in"
print('yes')
if 2 not in lst3: #11. list related keyword "not in"
print('no')
lst4=[9,8,7,6,5,4,3,2,1,0]
lst4.reverse() #12. list in reverse order using reverse()
print("list in reverse order ",lst4)
lst5=[2,8,4,44,56]
slst=sorted(lst5) #13. sort list using sorted()
print("sorted list ",slst)
rslst=sorted(lst5,reverse=True) #14. reverse sort list using "reverse=True" parameter
print("reverse sorted list ",rslst)
z="This is mac book air"
split=z.split() #15. string split to create list
print("list split ",split)
print("list slicing ",lst5[1:5:2]) #16. list slicing
#17. list comprehension
square=[i**2 for i in range(11)] # square of number
print("squares of number ",square)
ns=[(i,i*2) for i in range(11)] # number with square
print("number with square ",ns)
matrix=[[1,2,3,4],[5,6,7,8],[9,10,11,12]] # matrix create
print("matrix is ",matrix)
transpose=[[row[i] for row in matrix] for i in range(4)] # transpose of matrix
print("transpose of matrix ",transpose)
# +
a=13.5 # table of 13.5
for i in range(1,11):
i*=a
print(i)
# +
# Operation on tuple
tpl=(1,2,('abc','xyz'),(7.8,83.5),[3,'pqr',4.5]) #1. nested tuple with list
print("nested tuple is ",tpl)
print("type of tuple is ",type(tpl)) #2. type of tuple
print("tuple element at positive index 2 is ",tpl[2]) #3. accessing element using 2
print("tuple element at negative index -1 is ",tpl[-1]) #4. accessing element using -1
print("nested tuple element at index 3 having position 1 is ",tpl[3][1]) #5. accessing element from nested tuple
print("tuple slicing from 3 is ",tpl[:3]) #6. tuple slicing 3
print("tuple slicing from -2 is",tpl[-2:]) #7. tuple slicing -2
tpl1=(10,9,8,7,6)+(5,4,3,2,1) #8. concatination of tuple using "+" operator
print("concatination is ",tpl1)
tpl2=((123,'go',)*4) #9. repeat tuple element for given number of time using "*" operator
print("tuple element with given number of time is ",tpl2)
t=(1,2.5,'ok') #10. delete entire tuple using "del" keyword
del t
t=(1,3,5,7,3,2,8,7,5) #11. obtain number frequency using count()
print("count of 3 is ",t.count(3))
print("tuple element index is ",t.index(7)) #12. index of element using index()
print(5 in t) #13. check number exist or not using "in" keyword
print("length of tuple t is ",len(t)) #14. length of tuple using len()
newt=sorted(t) #15. sorting tuple using sorted()
print("sorted tuple is ",newt)
print("largest element is ",max(t)) #16. find largest number using max()
print("smallest element is ",min(t)) #17. find smallest number using min()
print("sum of element is ",sum(t)) #18. addition of elements using sum()
# +
# Set operations
st={1,2} #1. set create
print("set values are ",st)
s=set([3,4]) #2. set creadted from list using "set()" constructor
print("set creation from list is ",s)
st.add(3) #3. element add using add()"but it adds only one element at a time"
print("newly added elements are ",st)
st.update([4,5,6]) #4. multiple elements are added using update() by defining element through list
print("multiple elements added are ",st)
st.update([7,8],{9,10}) #5. elements added through list and set using update()
print("elements added through list and set are ",st)
#6. set element delete
s.discard(3) # using discard()
print("set after discard is ",s)
s.remove(4) #using remove()
print("set after removed is ",s)
st.pop() # using pop() "which delete set element sequentially"
print("set element deleted using pop is ",st)
st.clear() # clear all set element using clear()
print(st)
#7. union,intersection,set difference,symmetric difference
set1={1,2,3,4,5}
set2={3,4,5,6,7}
print("union of set is ",set1|set2) # union using "|" or operator
print("union is ",set1.union(set2)) # union using union() method
print("intersection of set is ",set1&set2) # intersection using "&" and operator
print("intersectioin is ",set1.intersection(set2)) # intersection using intersection() method
print("set difference of set is",set1-set2) # set difference using "-" minus operator
print("set difference is ",set1.difference(set2)) # set difference using difference() method
print("symmetric difference of set is ",set1^set2) # symmetric difference using "^" exponent operator
print("symmetric difference is ",set1.symmetric_difference(set2)) # using symmetric_difference() method
#8. subset using issubset() method
a={'aa','bb','cc','dd'}
b={'bb','dd'}
print("a is subset of b ?",a.issubset(b))
print("b is subset if a ?",b.issubset(a))
#9. Frozenset using frozenset() method
frset1=frozenset({1,2,3})
frset2=frozenset({3,4,5})
print("union of frozenset is ",frset1|frset2) # union
print("intersection of frozenset is ",frset1&frset2) # intersection
print("set difference of frozenset is",frset1-frset2) # set difference
print("symmetric difference of frozenset is",frset1^frset2) # symmetric difference
print("frset2 is subset of freset1 ?",frset2.issubset(frset1)) # subset
# +
# Dictionary operation
dict={1:'one',2:'two',3:'three'} # dictionary created
print(dict)
#newdict = dict([(1,'aaa'),(2,'bbb')]) # creating dictionary with list of tuples using dict() method
#print("Dictionary with list of tuples is ",newdict)
dict1={'name':'abc','age':28,'address':'xyz'} # accessing dictionary values
print("accessing dictionary value ",dict1['name'])
print("accessing dictionary values using get method ",dict1.get('age')) # value access using get() method
dict1['name']="Ana" # updating/modifing dictionary element
print("modified element is ",dict1)
dict1['degree']="MCS" # added new key to dictionary
print("new key added is ",dict1)
print("remove key but value remains is ",dict1.pop('address')) # removing element using "key" through "pop()" method
print("dictionary after pop method is ",dict1)
print("popitem which remove any element from dictionary is ",dict1.popitem()) # popitem() method
print("dictionary after popitem method is ",dict1)
dict2={'a':'ai','m':'ml','d':'ds'}
del dict2['d'] # using "del" keyword complete key-value pair delete
print("dictionary is ",dict2)
dict2.clear() # remove all elements using clear() method
print("after clear method dictionary is ",dict2)
# Dictionary methods
dict3=dict.copy() # copy() method
print("copy of dictionary is ",dict3)
marks={}.fromkeys(['eng','math','sl'],0) # fromkeys() method "assign same value to all keys" using list of tuples
print("same value for all key is ",marks)
print("keys of dictionary is ",dict.keys()) # returns dictionary keys
print("value of dictionary is ",dict.values()) # returns dictionary values
d={}
print("predefined methods and attributes of dictionary are ",dir(d)) # returns methods and attributes of dictionary
# +
# String operation
str="Hi Macbook.." # string create
print("string is ",str)
print("string related methods and attribute are ",dir(str)) # methods and attributes using dir()
print("string repeation is ",str*4) # repeat string 4 times
str1="This is jupyter"
print("string concatination is ",str+str1) # string concatination using "+" operator
print("membership test of string is ",'i' in str1) # membership test using "in" operator
# string methods
print("string in upper case format is ",str1.upper()) # using upper() method
print("string in lower case format is ",str1.lower()) # using lower() method
print("finding letter is ",str1.find('is')) # using find() method
print("spliting the string is ",str1.split()) # using split() method & get output in form of list
print("output of join is ",['_'.join(str1)]) # using join() method & get output in form of string
print("replacing words is ",'Hi macbook'.replace("Hi","Hello")) # using replace() method
# program to check given string is palindrom string or not
mystr="mam"
revstr=reversed(mystr) # reversed() used to make reverse of string
if list(mystr)==list(revstr):
print("given string is palindrom string")
else :
print("given string is simple string")
# program to sort words in alphabetic order
astr="This is macbook air and currently working with jupyter notebook" # string create
words=astr.split() # first words are seperated using split() method
words.sort() # then words are sorted using sort() method
for i in words:
print(i) # to print the words alphabeticaly for loop is used
# -
| 15,237 |
/.ipynb_checkpoints/AbstractSummarizer-checkpoint.ipynb | 94fd8be52d9ea9b59cbf43bbf47c54b1ca0f3431 | [] | no_license | keithRebello/abstractSummarizer | https://github.com/keithRebello/abstractSummarizer | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,754 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <span class='h2'>D - 乱数生成</span>
# <hr>
# <p>Time Limit: 2 sec / Memory Limit: 256 MB</p>
#
# <div id='task-statement'>
# <div id='task-statement'>
# <div class='part'>
#
# <section>
# <h3>問題文</h3>
#
# <p><var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-1'><span class='MJXp-mn' id='MJXp-Span-2'>2</span></span></span><script type='math/tex' id='MathJax-Element-1'>2</script></var> つの整数 <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-3'><span class='MJXp-mi MJXp-italic' id='MJXp-Span-4'>N</span></span></span><script type='math/tex' id='MathJax-Element-2'>N</script></var>、<var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-5'><span class='MJXp-mi MJXp-italic' id='MJXp-Span-6'>K</span></span></span><script type='math/tex' id='MathJax-Element-3'>K</script></var> が与えられます。</p>
#
# <p><var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-7'><span class='MJXp-mn' id='MJXp-Span-8'>1</span></span></span><script type='math/tex' id='MathJax-Element-4'>1</script></var> 以上 <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-9'><span class='MJXp-mi MJXp-italic' id='MJXp-Span-10'>N</span></span></span><script type='math/tex' id='MathJax-Element-5'>N</script></var> 以下の整数のなかからどれか <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-11'><span class='MJXp-mn' id='MJXp-Span-12'>1</span></span></span><script type='math/tex' id='MathJax-Element-6'>1</script></var> つ、等確率に選ぶ機械を考えます。</p>
#
# <p>その機械を <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-13'><span class='MJXp-mn' id='MJXp-Span-14'>3</span></span></span><script type='math/tex' id='MathJax-Element-7'>3</script></var> 回動かした時、選ばれた <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-15'><span class='MJXp-mn' id='MJXp-Span-16'>3</span></span></span><script type='math/tex' id='MathJax-Element-8'>3</script></var> つの数の中央値が <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-17'><span class='MJXp-mi MJXp-italic' id='MJXp-Span-18'>K</span></span></span><script type='math/tex' id='MathJax-Element-9'>K</script></var> となる確率を求めてください。</p>
#
# </section>
# </div>
# <hr>
# <div class='io-style'>
# <div class='part'>
#
# <section>
# <h3>入力</h3>
#
# <p>入力は以下の形式で標準入力から与えられる。</p>
#
# <pre><var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-19'><span class='MJXp-mi MJXp-italic' id='MJXp-Span-20'>N</span></span></span><script type='math/tex' id='MathJax-Element-10'>N</script></var> <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-21'><span class='MJXp-mi MJXp-italic' id='MJXp-Span-22'>K</span></span></span><script type='math/tex' id='MathJax-Element-11'>K</script></var>
# </pre>
#
# <p><var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-23'><span class='MJXp-mn' id='MJXp-Span-24'>1</span></span></span><script type='math/tex' id='MathJax-Element-12'>1</script></var> 行目には整数 <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-25'><span class='MJXp-mi MJXp-italic' id='MJXp-Span-26'>N</span><span class='MJXp-mo' id='MJXp-Span-27' style='margin-left: 0em; margin-right: 0em;'>(</span><span class='MJXp-mn' id='MJXp-Span-28'>1</span><span class='MJXp-mo' id='MJXp-Span-29' style='margin-left: 0.333em; margin-right: 0.333em;'>≦</span><span class='MJXp-mi MJXp-italic' id='MJXp-Span-30'>N</span><span class='MJXp-mo' id='MJXp-Span-31' style='margin-left: 0.333em; margin-right: 0.333em;'>≦</span><span class='MJXp-msubsup' id='MJXp-Span-32'><span class='MJXp-mn' id='MJXp-Span-33' style='margin-right: 0.05em;'>10</span><span class='MJXp-mn MJXp-script' id='MJXp-Span-34' style='vertical-align: 0.5em;'>6</span></span><span class='MJXp-mo' id='MJXp-Span-35' style='margin-left: 0em; margin-right: 0em;'>)</span></span></span><script type='math/tex' id='MathJax-Element-13'>N(1 ≦ N ≦ 10^6)</script></var>、<var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-36'><span class='MJXp-mi MJXp-italic' id='MJXp-Span-37'>K</span><span class='MJXp-mo' id='MJXp-Span-38' style='margin-left: 0em; margin-right: 0em;'>(</span><span class='MJXp-mn' id='MJXp-Span-39'>1</span><span class='MJXp-mo' id='MJXp-Span-40' style='margin-left: 0.333em; margin-right: 0.333em;'>≦</span><span class='MJXp-mi MJXp-italic' id='MJXp-Span-41'>K</span><span class='MJXp-mo' id='MJXp-Span-42' style='margin-left: 0.333em; margin-right: 0.333em;'>≦</span><span class='MJXp-mi MJXp-italic' id='MJXp-Span-43'>N</span><span class='MJXp-mo' id='MJXp-Span-44' style='margin-left: 0em; margin-right: 0em;'>)</span></span></span><script type='math/tex' id='MathJax-Element-14'>K(1 ≦ K ≦ N)</script></var> が空白区切りで与えられる。</p>
#
# </section>
# </div>
# <div class='part'>
#
# <section>
# <h3>出力</h3>
#
# <p>出力の中央値が <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-45'><span class='MJXp-mi MJXp-italic' id='MJXp-Span-46'>K</span></span></span><script type='math/tex' id='MathJax-Element-15'>K</script></var> となる確率を出力せよ。出力は標準出力に行い、末尾に改行を入れること。</p>
#
# <p>なお、想定解との絶対誤差または相対誤差が <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-47'><span class='MJXp-msubsup' id='MJXp-Span-48'><span class='MJXp-mn' id='MJXp-Span-49' style='margin-right: 0.05em;'>10</span><span class='MJXp-mrow MJXp-script' id='MJXp-Span-50' style='vertical-align: 0.5em;'><span class='MJXp-mo' id='MJXp-Span-51'>−</span><span class='MJXp-mn' id='MJXp-Span-52'>9</span></span></span></span></span><script type='math/tex' id='MathJax-Element-16'>10^{-9}</script></var> 以下であれば、正解として扱われる。</p>
#
# </section>
# </div>
# </div>
# <hr>
# <div class='part'>
#
# <section>
# <h3>入力例1 <span class='btn btn-default btn-sm btn-copy' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' data-target='pre-sample0' data-original-title='Copied!'>Copy</span></h3>
#
# <div class='div-btn-copy'><span class='btn-copy btn-pre' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' style='display: none;' data-target='pre-sample0' data-original-title='Copied!'>Copy</span></div><pre id='pre-sample0'>3 2
# </pre>
#
# </section>
# </div>
# <div class='part'>
#
# <section>
# <h3>出力例1 <span class='btn btn-default btn-sm btn-copy' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' data-target='pre-sample1' data-original-title='Copied!'>Copy</span></h3>
#
# <div class='div-btn-copy'><span class='btn-copy btn-pre' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' style='display: none;' data-target='pre-sample1' data-original-title='Copied!'>Copy</span></div><pre id='pre-sample1'>0.48148148148148148148
# </pre>
#
# <p>中央値が <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-53'><span class='MJXp-mn' id='MJXp-Span-54'>2</span></span></span><script type='math/tex' id='MathJax-Element-17'>2</script></var> となるのは、( <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-55'><span class='MJXp-mn' id='MJXp-Span-56'>1</span></span></span><script type='math/tex' id='MathJax-Element-18'>1</script></var> 回目の出力, <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-57'><span class='MJXp-mn' id='MJXp-Span-58'>2</span></span></span><script type='math/tex' id='MathJax-Element-19'>2</script></var> 回目の出力, <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-59'><span class='MJXp-mn' id='MJXp-Span-60'>3</span></span></span><script type='math/tex' id='MathJax-Element-20'>3</script></var> 回目の出力) が</p>
#
# <ul class='simple'>
#
# <li>(1, 2, 3)</li>
#
# <li>(1, 3, 2)</li>
#
# <li>(2, 1, 3)</li>
#
# <li>(2, 3, 1)</li>
#
# <li>(3, 1, 2)</li>
#
# <li>(3, 2, 1)</li>
#
# <li>(1, 2, 2)</li>
#
# <li>(2, 1, 2)</li>
#
# <li>(2, 2, 1)</li>
#
# <li>(3, 2, 2)</li>
#
# <li>(2, 3, 2)</li>
#
# <li>(2, 2, 3)</li>
#
# <li>(2, 2, 2)</li>
#
# </ul>
#
# <p>となる場合で、このいずれかが出る確率は <var><span class='MathJax_Preview' style='color: inherit;'><span class='MJXp-math' id='MJXp-Span-61'><span class='MJXp-mn' id='MJXp-Span-62'>13</span><span class='MJXp-mrow' id='MJXp-Span-63'><span class='MJXp-mo' id='MJXp-Span-64' style='margin-left: 0.111em; margin-right: 0.111em;'>/</span></span><span class='MJXp-mn' id='MJXp-Span-65'>27</span><span class='MJXp-mo' id='MJXp-Span-66' style='margin-left: 0.333em; margin-right: 0.333em;'>=</span><span class='MJXp-mn' id='MJXp-Span-67'>0.481481481...</span></span></span><script type='math/tex' id='MathJax-Element-21'>13/27 = 0.481481481...</script></var> です</p>
#
# </section>
# </div>
# <hr>
# <div class='part'>
#
# <section>
# <h3>入力例2 <span class='btn btn-default btn-sm btn-copy' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' data-target='pre-sample2' data-original-title='Copied!'>Copy</span></h3>
#
# <div class='div-btn-copy'><span class='btn-copy btn-pre' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' style='display: none;' data-target='pre-sample2' data-original-title='Copied!'>Copy</span></div><pre id='pre-sample2'>3 1
# </pre>
#
# </section>
# </div>
# <div class='part'>
#
# <section>
# <h3>出力例2 <span class='btn btn-default btn-sm btn-copy' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' data-target='pre-sample3' data-original-title='Copied!'>Copy</span></h3>
#
# <div class='div-btn-copy'><span class='btn-copy btn-pre' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' style='display: none;' data-target='pre-sample3' data-original-title='Copied!'>Copy</span></div><pre id='pre-sample3'>0.25925925925925925926
# </pre>
#
# </section>
# </div>
# <hr>
# <div class='part'>
#
# <section>
# <h3>入力例3 <span class='btn btn-default btn-sm btn-copy' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' data-target='pre-sample4' data-original-title='Copied!'>Copy</span></h3>
#
# <div class='div-btn-copy'><span class='btn-copy btn-pre' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' style='display: none;' data-target='pre-sample4' data-original-title='Copied!'>Copy</span></div><pre id='pre-sample4'>765 573
# </pre>
#
# </section>
# </div>
# <div class='part'>
#
# <section>
# <h3>出力例3 <span class='btn btn-default btn-sm btn-copy' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' data-target='pre-sample5' data-original-title='Copied!'>Copy</span></h3>
#
# <div class='div-btn-copy'><span class='btn-copy btn-pre' tabindex='0' data-toggle='tooltip' data-trigger='manual' title='' style='display: none;' data-target='pre-sample5' data-original-title='Copied!'>Copy</span></div><pre id='pre-sample5'>0.00147697396984624371
# </pre>
#
# </section>
# </div>
# </div>
#
# </div>
#
#
#
#
# </div>
# </div>
#
#
#
# <hr>
#
#
#
#
#
# +
from ipywidgets import Textarea
import sys
sys.path.append('../../..')
from utils.multi_line_input import multi_line_input
text_area = Textarea()
input = multi_line_input()
display(text_area)
| 11,887 |
/course5-RNN/week3/Neural_machine_translation_with_attention_v4a.ipynb | cec748cc8f0bddf652f737b5183b84f9d1fd850e | [] | no_license | giladElichai/coursera_DeepLearningSpecialization | https://github.com/giladElichai/coursera_DeepLearningSpecialization | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 61,683 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Neural Machine Translation
#
# Welcome to your first programming assignment for this week!
#
# * You will build a Neural Machine Translation (NMT) model to translate human-readable dates ("25th of June, 2009") into machine-readable dates ("2009-06-25").
# * You will do this using an attention model, one of the most sophisticated sequence-to-sequence models.
#
# This notebook was produced together with NVIDIA's Deep Learning Institute.
# ## <font color='darkblue'>Updates</font>
#
# #### If you were working on the notebook before this update...
# * The current notebook is version "4a".
# * You can find your original work saved in the notebook with the previous version name ("v4")
# * To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#
# #### List of updates
# * Clarified names of variables to be consistent with the lectures and consistent within the assignment
# - pre-attention bi-directional LSTM: the first LSTM that processes the input data.
# - 'a': the hidden state of the pre-attention LSTM.
# - post-attention LSTM: the LSTM that outputs the translation.
# - 's': the hidden state of the post-attention LSTM.
# - energies "e". The output of the dense function that takes "a" and "s" as inputs.
# - All references to "output activation" are updated to "hidden state".
# - "post-activation" sequence model is updated to "post-attention sequence model".
# - 3.1: "Getting the activations from the Network" renamed to "Getting the attention weights from the network."
# - Appropriate mentions of "activation" replaced "attention weights."
# - Sequence of alphas corrected to be a sequence of "a" hidden states.
# * one_step_attention:
# - Provides sample code for each Keras layer, to show how to call the functions.
# - Reminds students to provide the list of hidden states in a specific order, in order to pause the autograder.
# * model
# - Provides sample code for each Keras layer, to show how to call the functions.
# - Added a troubleshooting note about handling errors.
# - Fixed typo: outputs should be of length 10 and not 11.
# * define optimizer and compile model
# - Provides sample code for each Keras layer, to show how to call the functions.
#
# * Spelling, grammar and wording corrections.
# Let's load all the packages you will need for this assignment.
# +
from keras.layers import Bidirectional, Concatenate, Permute, Dot, Input, LSTM, Multiply
from keras.layers import RepeatVector, Dense, Activation, Lambda
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.models import load_model, Model
import keras.backend as K
import numpy as np
from faker import Faker
import random
from tqdm import tqdm
from babel.dates import format_date
from nmt_utils import *
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## 1 - Translating human readable dates into machine readable dates
#
# * The model you will build here could be used to translate from one language to another, such as translating from English to Hindi.
# * However, language translation requires massive datasets and usually takes days of training on GPUs.
# * To give you a place to experiment with these models without using massive datasets, we will perform a simpler "date translation" task.
# * The network will input a date written in a variety of possible formats (*e.g. "the 29th of August 1958", "03/30/1968", "24 JUNE 1987"*)
# * The network will translate them into standardized, machine readable dates (*e.g. "1958-08-29", "1968-03-30", "1987-06-24"*).
# * We will have the network learn to output dates in the common machine-readable format YYYY-MM-DD.
#
# <!--
# Take a look at [nmt_utils.py](./nmt_utils.py) to see all the formatting. Count and figure out how the formats work, you will need this knowledge later. !-->
# ### 1.1 - Dataset
#
# We will train the model on a dataset of 10,000 human readable dates and their equivalent, standardized, machine readable dates. Let's run the following cells to load the dataset and print some examples.
m = 10000
dataset, human_vocab, machine_vocab, inv_machine_vocab = load_dataset(m)
dataset[:10]
# You've loaded:
# - `dataset`: a list of tuples of (human readable date, machine readable date).
# - `human_vocab`: a python dictionary mapping all characters used in the human readable dates to an integer-valued index.
# - `machine_vocab`: a python dictionary mapping all characters used in machine readable dates to an integer-valued index.
# - **Note**: These indices are not necessarily consistent with `human_vocab`.
# - `inv_machine_vocab`: the inverse dictionary of `machine_vocab`, mapping from indices back to characters.
#
# Let's preprocess the data and map the raw text data into the index values.
# - We will set Tx=30
# - We assume Tx is the maximum length of the human readable date.
# - If we get a longer input, we would have to truncate it.
# - We will set Ty=10
# - "YYYY-MM-DD" is 10 characters long.
# +
Tx = 30
Ty = 10
X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty)
print("X.shape:", X.shape)
print("Y.shape:", Y.shape)
print("Xoh.shape:", Xoh.shape)
print("Yoh.shape:", Yoh.shape)
# -
# You now have:
# - `X`: a processed version of the human readable dates in the training set.
# - Each character in X is replaced by an index (integer) mapped to the character using `human_vocab`.
# - Each date is padded to ensure a length of $T_x$ using a special character (< pad >).
# - `X.shape = (m, Tx)` where m is the number of training examples in a batch.
# - `Y`: a processed version of the machine readable dates in the training set.
# - Each character is replaced by the index (integer) it is mapped to in `machine_vocab`.
# - `Y.shape = (m, Ty)`.
# - `Xoh`: one-hot version of `X`
# - Each index in `X` is converted to the one-hot representation (if the index is 2, the one-hot version has the index position 2 set to 1, and the remaining positions are 0.
# - `Xoh.shape = (m, Tx, len(human_vocab))`
# - `Yoh`: one-hot version of `Y`
# - Each index in `Y` is converted to the one-hot representation.
# - `Yoh.shape = (m, Tx, len(machine_vocab))`.
# - `len(machine_vocab) = 11` since there are 10 numeric digits (0 to 9) and the `-` symbol.
# * Let's also look at some examples of preprocessed training examples.
# * Feel free to play with `index` in the cell below to navigate the dataset and see how source/target dates are preprocessed.
index = 0
print("Source date:", dataset[index][0])
print("Target date:", dataset[index][1])
print()
print("Source after preprocessing (indices):", X[index])
print("Target after preprocessing (indices):", Y[index])
print()
print("Source after preprocessing (one-hot):", Xoh[index])
print("Target after preprocessing (one-hot):", Yoh[index])
# ## 2 - Neural machine translation with attention
#
# * If you had to translate a book's paragraph from French to English, you would not read the whole paragraph, then close the book and translate.
# * Even during the translation process, you would read/re-read and focus on the parts of the French paragraph corresponding to the parts of the English you are writing down.
# * The attention mechanism tells a Neural Machine Translation model where it should pay attention to at any step.
#
#
# ### 2.1 - Attention mechanism
#
# In this part, you will implement the attention mechanism presented in the lecture videos.
# * Here is a figure to remind you how the model works.
# * The diagram on the left shows the attention model.
# * The diagram on the right shows what one "attention" step does to calculate the attention variables $\alpha^{\langle t, t' \rangle}$.
# * The attention variables $\alpha^{\langle t, t' \rangle}$ are used to compute the context variable $context^{\langle t \rangle}$ for each timestep in the output ($t=1, \ldots, T_y$).
#
# <table>
# <td>
# <img src="images/attn_model.png" style="width:500;height:500px;"> <br>
# </td>
# <td>
# <img src="images/attn_mechanism.png" style="width:500;height:500px;"> <br>
# </td>
# </table>
# <caption><center> **Figure 1**: Neural machine translation with attention</center></caption>
#
# Here are some properties of the model that you may notice:
#
# #### Pre-attention and Post-attention LSTMs on both sides of the attention mechanism
# - There are two separate LSTMs in this model (see diagram on the left): pre-attention and post-attention LSTMs.
# - *Pre-attention* Bi-LSTM is the one at the bottom of the picture is a Bi-directional LSTM and comes *before* the attention mechanism.
# - The attention mechanism is shown in the middle of the left-hand diagram.
# - The pre-attention Bi-LSTM goes through $T_x$ time steps
# - *Post-attention* LSTM: at the top of the diagram comes *after* the attention mechanism.
# - The post-attention LSTM goes through $T_y$ time steps.
#
# - The post-attention LSTM passes the hidden state $s^{\langle t \rangle}$ and cell state $c^{\langle t \rangle}$ from one time step to the next.
# #### An LSTM has both a hidden state and cell state
# * In the lecture videos, we were using only a basic RNN for the post-attention sequence model
# * This means that the state captured by the RNN was outputting only the hidden state $s^{\langle t\rangle}$.
# * In this assignment, we are using an LSTM instead of a basic RNN.
# * So the LSTM has both the hidden state $s^{\langle t\rangle}$ and the cell state $c^{\langle t\rangle}$.
# #### Each time step does not use predictions from the previous time step
# * Unlike previous text generation examples earlier in the course, in this model, the post-attention LSTM at time $t$ does not take the previous time step's prediction $y^{\langle t-1 \rangle}$ as input.
# * The post-attention LSTM at time 't' only takes the hidden state $s^{\langle t\rangle}$ and cell state $c^{\langle t\rangle}$ as input.
# * We have designed the model this way because unlike language generation (where adjacent characters are highly correlated) there isn't as strong a dependency between the previous character and the next character in a YYYY-MM-DD date.
# #### Concatenation of hidden states from the forward and backward pre-attention LSTMs
# - $\overrightarrow{a}^{\langle t \rangle}$: hidden state of the forward-direction, pre-attention LSTM.
# - $\overleftarrow{a}^{\langle t \rangle}$: hidden state of the backward-direction, pre-attention LSTM.
# - $a^{\langle t \rangle} = [\overrightarrow{a}^{\langle t \rangle}, \overleftarrow{a}^{\langle t \rangle}]$: the concatenation of the activations of both the forward-direction $\overrightarrow{a}^{\langle t \rangle}$ and backward-directions $\overleftarrow{a}^{\langle t \rangle}$ of the pre-attention Bi-LSTM.
# #### Computing "energies" $e^{\langle t, t' \rangle}$ as a function of $s^{\langle t-1 \rangle}$ and $a^{\langle t' \rangle}$
# - Recall in the lesson videos "Attention Model", at time 6:45 to 8:16, the definition of "e" as a function of $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$.
# - "e" is called the "energies" variable.
# - $s^{\langle t-1 \rangle}$ is the hidden state of the post-attention LSTM
# - $a^{\langle t' \rangle}$ is the hidden state of the pre-attention LSTM.
# - $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$ are fed into a simple neural network, which learns the function to output $e^{\langle t, t' \rangle}$.
# - $e^{\langle t, t' \rangle}$ is then used when computing the attention $a^{\langle t, t' \rangle}$ that $y^{\langle t \rangle}$ should pay to $a^{\langle t' \rangle}$.
# - The diagram on the right of figure 1 uses a `RepeatVector` node to copy $s^{\langle t-1 \rangle}$'s value $T_x$ times.
# - Then it uses `Concatenation` to concatenate $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$.
# - The concatenation of $s^{\langle t-1 \rangle}$ and $a^{\langle t \rangle}$ is fed into a "Dense" layer, which computes $e^{\langle t, t' \rangle}$.
# - $e^{\langle t, t' \rangle}$ is then passed through a softmax to compute $\alpha^{\langle t, t' \rangle}$.
# - Note that the diagram doesn't explicitly show variable $e^{\langle t, t' \rangle}$, but $e^{\langle t, t' \rangle}$ is above the Dense layer and below the Softmax layer in the diagram in the right half of figure 1.
# - We'll explain how to use `RepeatVector` and `Concatenation` in Keras below.
# ### Implementation Details
#
# Let's implement this neural translator. You will start by implementing two functions: `one_step_attention()` and `model()`.
#
# #### one_step_attention
# * The inputs to the one_step_attention at time step $t$ are:
# - $[a^{<1>},a^{<2>}, ..., a^{<T_x>}]$: all hidden states of the pre-attention Bi-LSTM.
# - $s^{<t-1>}$: the previous hidden state of the post-attention LSTM
# * one_step_attention computes:
# - $[\alpha^{<t,1>},\alpha^{<t,2>}, ..., \alpha^{<t,T_x>}]$: the attention weights
# - $context^{ \langle t \rangle }$: the context vector:
#
# $$context^{<t>} = \sum_{t' = 1}^{T_x} \alpha^{<t,t'>}a^{<t'>}\tag{1}$$
#
# ##### Clarifying 'context' and 'c'
# - In the lecture videos, the context was denoted $c^{\langle t \rangle}$
# - In the assignment, we are calling the context $context^{\langle t \rangle}$.
# - This is to avoid confusion with the post-attention LSTM's internal memory cell variable, which is also denoted $c^{\langle t \rangle}$.
# #### Implement `one_step_attention`
#
# **Exercise**: Implement `one_step_attention()`.
#
# * The function `model()` will call the layers in `one_step_attention()` $T_y$ using a for-loop.
# * It is important that all $T_y$ copies have the same weights.
# * It should not reinitialize the weights every time.
# * In other words, all $T_y$ steps should have shared weights.
# * Here's how you can implement layers with shareable weights in Keras:
# 1. Define the layer objects in a variable scope that is outside of the `one_step_attention` function. For example, defining the objects as global variables would work.
# - Note that defining these variables inside the scope of the function `model` would technically work, since `model` will then call the `one_step_attention` function. For the purposes of making grading and troubleshooting easier, we are defining these as global variables. Note that the automatic grader will expect these to be global variables as well.
# 2. Call these objects when propagating the input.
# * We have defined the layers you need as global variables.
# * Please run the following cells to create them.
# * Please note that the automatic grader expects these global variables with the given variable names. For grading purposes, please do not rename the global variables.
# * Please check the Keras documentation to learn more about these layers. The layers are functions. Below are examples of how to call these functions.
# * [RepeatVector()](https://keras.io/layers/core/#repeatvector)
# ```Python
# var_repeated = repeat_layer(var1)
# ```
# * [Concatenate()](https://keras.io/layers/merge/#concatenate)
# ```Python
# concatenated_vars = concatenate_layer([var1,var2,var3])
# ```
# * [Dense()](https://keras.io/layers/core/#dense)
# ```Python
# var_out = dense_layer(var_in)
# ```
# * [Activation()](https://keras.io/layers/core/#activation)
# ```Python
# activation = activation_layer(var_in)
# ```
# * [Dot()](https://keras.io/layers/merge/#dot)
# ```Python
# dot_product = dot_layer([var1,var2])
# ```
# Defined shared layers as global variables
repeator = RepeatVector(Tx)
concatenator = Concatenate(axis=-1)
densor1 = Dense(10, activation = "tanh")
densor2 = Dense(1, activation = "relu")
activator = Activation(softmax, name='attention_weights') # We are using a custom softmax(axis = 1) loaded in this notebook
dotor = Dot(axes = 1)
# +
# GRADED FUNCTION: one_step_attention
def one_step_attention(a, s_prev):
"""
Performs one step of attention: Outputs a context vector computed as a dot product of the attention weights
"alphas" and the hidden states "a" of the Bi-LSTM.
Arguments:
a -- hidden state output of the Bi-LSTM, numpy-array of shape (m, Tx, 2*n_a)
s_prev -- previous hidden state of the (post-attention) LSTM, numpy-array of shape (m, n_s)
Returns:
context -- context vector, input of the next (post-attention) LSTM cell
"""
### START CODE HERE ###
# Use repeator to repeat s_prev to be of shape (m, Tx, n_s) so that you can concatenate it with all hidden states "a" (≈ 1 line)
s_prev = repeator(s_prev)
# Use concatenator to concatenate a and s_prev on the last axis (≈ 1 line)
# For grading purposes, please list 'a' first and 's_prev' second, in this order.
concat = concatenator([a, s_prev])
# Use densor1 to propagate concat through a small fully-connected neural network to compute the "intermediate energies" variable e. (≈1 lines)
e = densor1(concat)
# Use densor2 to propagate e through a small fully-connected neural network to compute the "energies" variable energies. (≈1 lines)
energies = densor2(e)
# Use "activator" on "energies" to compute the attention weights "alphas" (≈ 1 line)
alphas = activator(energies)
# Use dotor together with "alphas" and "a" to compute the context vector to be given to the next (post-attention) LSTM-cell (≈ 1 line)
context = dotor([alphas, a])
### END CODE HERE ###
return context
# -
# You will be able to check the expected output of `one_step_attention()` after you've coded the `model()` function.
# #### model
# * `model` first runs the input through a Bi-LSTM to get $[a^{<1>},a^{<2>}, ..., a^{<T_x>}]$.
# * Then, `model` calls `one_step_attention()` $T_y$ times using a `for` loop. At each iteration of this loop:
# - It gives the computed context vector $context^{<t>}$ to the post-attention LSTM.
# - It runs the output of the post-attention LSTM through a dense layer with softmax activation.
# - The softmax generates a prediction $\hat{y}^{<t>}$.
# **Exercise**: Implement `model()` as explained in figure 1 and the text above. Again, we have defined global layers that will share weights to be used in `model()`.
# +
n_a = 32 # number of units for the pre-attention, bi-directional LSTM's hidden state 'a'
n_s = 64 # number of units for the post-attention LSTM's hidden state "s"
# Please note, this is the post attention LSTM cell.
# For the purposes of passing the automatic grader
# please do not modify this global variable. This will be corrected once the automatic grader is also updated.
post_activation_LSTM_cell = LSTM(n_s, return_state = True) # post-attention LSTM
output_layer = Dense(len(machine_vocab), activation=softmax)
# -
# Now you can use these layers $T_y$ times in a `for` loop to generate the outputs, and their parameters will not be reinitialized. You will have to carry out the following steps:
#
# 1. Propagate the input `X` into a bi-directional LSTM.
# * [Bidirectional](https://keras.io/layers/wrappers/#bidirectional)
# * [LSTM](https://keras.io/layers/recurrent/#lstm)
# * Remember that we want the LSTM to return a full sequence instead of just the last hidden state.
#
# Sample code:
#
# ```Python
# sequence_of_hidden_states = Bidirectional(LSTM(units=..., return_sequences=...))(the_input_X)
# ```
#
# 2. Iterate for $t = 0, \cdots, T_y-1$:
# 1. Call `one_step_attention()`, passing in the sequence of hidden states $[a^{\langle 1 \rangle},a^{\langle 2 \rangle}, ..., a^{ \langle T_x \rangle}]$ from the pre-attention bi-directional LSTM, and the previous hidden state $s^{<t-1>}$ from the post-attention LSTM to calculate the context vector $context^{<t>}$.
# 2. Give $context^{<t>}$ to the post-attention LSTM cell.
# - Remember to pass in the previous hidden-state $s^{\langle t-1\rangle}$ and cell-states $c^{\langle t-1\rangle}$ of this LSTM
# * This outputs the new hidden state $s^{<t>}$ and the new cell state $c^{<t>}$.
#
# Sample code:
# ```Python
# next_hidden_state, _ , next_cell_state =
# post_activation_LSTM_cell(inputs=..., initial_state=[prev_hidden_state, prev_cell_state])
# ```
# Please note that the layer is actually the "post attention LSTM cell". For the purposes of passing the automatic grader, please do not modify the naming of this global variable. This will be fixed when we deploy updates to the automatic grader.
# 3. Apply a dense, softmax layer to $s^{<t>}$, get the output.
# Sample code:
# ```Python
# output = output_layer(inputs=...)
# ```
# 4. Save the output by adding it to the list of outputs.
#
# 3. Create your Keras model instance.
# * It should have three inputs:
# * `X`, the one-hot encoded inputs to the model, of shape ($T_{x}, humanVocabSize)$
# * $s^{\langle 0 \rangle}$, the initial hidden state of the post-attention LSTM
# * $c^{\langle 0 \rangle}$), the initial cell state of the post-attention LSTM
# * The output is the list of outputs.
# Sample code
# ```Python
# model = Model(inputs=[...,...,...], outputs=...)
# ```
# +
# GRADED FUNCTION: model
def model(Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size):
"""
Arguments:
Tx -- length of the input sequence
Ty -- length of the output sequence
n_a -- hidden state size of the Bi-LSTM
n_s -- hidden state size of the post-attention LSTM
human_vocab_size -- size of the python dictionary "human_vocab"
machine_vocab_size -- size of the python dictionary "machine_vocab"
Returns:
model -- Keras model instance
"""
# Define the inputs of your model with a shape (Tx,)
# Define s0 (initial hidden state) and c0 (initial cell state)
# for the decoder LSTM with shape (n_s,)
X = Input(shape=(Tx, human_vocab_size))
s0 = Input(shape=(n_s,), name='s0')
c0 = Input(shape=(n_s,), name='c0')
s = s0
c = c0
# Initialize empty list of outputs
outputs = []
### START CODE HERE ###
# Step 1: Define your pre-attention Bi-LSTM. (≈ 1 line)
a = Bidirectional(LSTM(n_a, return_sequences=True))(X)
# Step 2: Iterate for Ty steps
for t in range(Ty):
# Step 2.A: Perform one step of the attention mechanism to get back the context vector at step t (≈ 1 line)
context = one_step_attention(a, s)
# Step 2.B: Apply the post-attention LSTM cell to the "context" vector.
# Don't forget to pass: initial_state = [hidden state, cell state] (≈ 1 line)
s, _, c = post_activation_LSTM_cell(context, initial_state=[s,c])
# Step 2.C: Apply Dense layer to the hidden state output of the post-attention LSTM (≈ 1 line)
out = output_layer(s)
# Step 2.D: Append "out" to the "outputs" list (≈ 1 line)
outputs.append(out)
# Step 3: Create model instance taking three inputs and returning the list of outputs. (≈ 1 line)
model = Model( inputs=[X,s0,c0], outputs=outputs )
### END CODE HERE ###
return model
# -
# Run the following cell to create your model.
model = model(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab))
# #### Troubleshooting Note
# * If you are getting repeated errors after an initially incorrect implementation of "model", but believe that you have corrected the error, you may still see error messages when building your model.
# * A solution is to save and restart your kernel (or shutdown then restart your notebook), and re-run the cells.
# Let's get a summary of the model to check if it matches the expected output.
model.summary()
# **Expected Output**:
#
# Here is the summary you should see
# <table>
# <tr>
# <td>
# **Total params:**
# </td>
# <td>
# 52,960
# </td>
# </tr>
# <tr>
# <td>
# **Trainable params:**
# </td>
# <td>
# 52,960
# </td>
# </tr>
# <tr>
# <td>
# **Non-trainable params:**
# </td>
# <td>
# 0
# </td>
# </tr>
# <tr>
# <td>
# **bidirectional_1's output shape **
# </td>
# <td>
# (None, 30, 64)
# </td>
# </tr>
# <tr>
# <td>
# **repeat_vector_1's output shape **
# </td>
# <td>
# (None, 30, 64)
# </td>
# </tr>
# <tr>
# <td>
# **concatenate_1's output shape **
# </td>
# <td>
# (None, 30, 128)
# </td>
# </tr>
# <tr>
# <td>
# **attention_weights's output shape **
# </td>
# <td>
# (None, 30, 1)
# </td>
# </tr>
# <tr>
# <td>
# **dot_1's output shape **
# </td>
# <td>
# (None, 1, 64)
# </td>
# </tr>
# <tr>
# <td>
# **dense_3's output shape **
# </td>
# <td>
# (None, 11)
# </td>
# </tr>
# </table>
#
# #### Compile the model
# * After creating your model in Keras, you need to compile it and define the loss function, optimizer and metrics you want to use.
# * Loss function: 'categorical_crossentropy'.
# * Optimizer: [Adam](https://keras.io/optimizers/#adam) [optimizer](https://keras.io/optimizers/#usage-of-optimizers)
# - learning rate = 0.005
# - $\beta_1 = 0.9$
# - $\beta_2 = 0.999$
# - decay = 0.01
# * metric: 'accuracy'
#
# Sample code
# ```Python
# optimizer = Adam(lr=..., beta_1=..., beta_2=..., decay=...)
# model.compile(optimizer=..., loss=..., metrics=[...])
# ```
### START CODE HERE ### (≈2 lines)
opt = None
None
### END CODE HERE ###
# #### Define inputs and outputs, and fit the model
# The last step is to define all your inputs and outputs to fit the model:
# - You have input X of shape $(m = 10000, T_x = 30)$ containing the training examples.
# - You need to create `s0` and `c0` to initialize your `post_attention_LSTM_cell` with zeros.
# - Given the `model()` you coded, you need the "outputs" to be a list of 10 elements of shape (m, T_y).
# - The list `outputs[i][0], ..., outputs[i][Ty]` represents the true labels (characters) corresponding to the $i^{th}$ training example (`X[i]`).
# - `outputs[i][j]` is the true label of the $j^{th}$ character in the $i^{th}$ training example.
s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))
outputs = list(Yoh.swapaxes(0,1))
# Let's now fit the model and run it for one epoch.
model.fit([Xoh, s0, c0], outputs, epochs=1, batch_size=100)
# While training you can see the loss as well as the accuracy on each of the 10 positions of the output. The table below gives you an example of what the accuracies could be if the batch had 2 examples:
#
# <img src="images/table.png" style="width:700;height:200px;"> <br>
# <caption><center>Thus, `dense_2_acc_8: 0.89` means that you are predicting the 7th character of the output correctly 89% of the time in the current batch of data. </center></caption>
#
#
# We have run this model for longer, and saved the weights. Run the next cell to load our weights. (By training a model for several minutes, you should be able to obtain a model of similar accuracy, but loading our model will save you time.)
model.load_weights('models/model.h5')
# You can now see the results on new examples.
EXAMPLES = ['3 May 1979', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007', 'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001', '1 March 2001']
for example in EXAMPLES:
source = string_to_int(example, Tx, human_vocab)
source = np.array(list(map(lambda x: to_categorical(x, num_classes=len(human_vocab)), source))).swapaxes(0,1)
prediction = model.predict([source, s0, c0])
prediction = np.argmax(prediction, axis = -1)
output = [inv_machine_vocab[int(i)] for i in prediction]
print("source:", example)
print("output:", ''.join(output),"\n")
# You can also change these examples to test with your own examples. The next part will give you a better sense of what the attention mechanism is doing--i.e., what part of the input the network is paying attention to when generating a particular output character.
# ## 3 - Visualizing Attention (Optional / Ungraded)
#
# Since the problem has a fixed output length of 10, it is also possible to carry out this task using 10 different softmax units to generate the 10 characters of the output. But one advantage of the attention model is that each part of the output (such as the month) knows it needs to depend only on a small part of the input (the characters in the input giving the month). We can visualize what each part of the output is looking at which part of the input.
#
# Consider the task of translating "Saturday 9 May 2018" to "2018-05-09". If we visualize the computed $\alpha^{\langle t, t' \rangle}$ we get this:
#
# <img src="images/date_attention.png" style="width:600;height:300px;"> <br>
# <caption><center> **Figure 8**: Full Attention Map</center></caption>
#
# Notice how the output ignores the "Saturday" portion of the input. None of the output timesteps are paying much attention to that portion of the input. We also see that 9 has been translated as 09 and May has been correctly translated into 05, with the output paying attention to the parts of the input it needs to to make the translation. The year mostly requires it to pay attention to the input's "18" in order to generate "2018."
# ### 3.1 - Getting the attention weights from the network
#
# Lets now visualize the attention values in your network. We'll propagate an example through the network, then visualize the values of $\alpha^{\langle t, t' \rangle}$.
#
# To figure out where the attention values are located, let's start by printing a summary of the model .
model.summary()
# Navigate through the output of `model.summary()` above. You can see that the layer named `attention_weights` outputs the `alphas` of shape (m, 30, 1) before `dot_2` computes the context vector for every time step $t = 0, \ldots, T_y-1$. Let's get the attention weights from this layer.
#
# The function `attention_map()` pulls out the attention values from your model and plots them.
attention_map = plot_attention_map(model, human_vocab, inv_machine_vocab, "Tuesday 09 Oct 1993", num = 7, n_s = 64);
# On the generated plot you can observe the values of the attention weights for each character of the predicted output. Examine this plot and check that the places where the network is paying attention makes sense to you.
#
# In the date translation application, you will observe that most of the time attention helps predict the year, and doesn't have much impact on predicting the day or month.
# ### Congratulations!
#
#
# You have come to the end of this assignment
#
# ## Here's what you should remember
#
# - Machine translation models can be used to map from one sequence to another. They are useful not just for translating human languages (like French->English) but also for tasks like date format translation.
# - An attention mechanism allows a network to focus on the most relevant parts of the input when producing a specific part of the output.
# - A network using an attention mechanism can translate from inputs of length $T_x$ to outputs of length $T_y$, where $T_x$ and $T_y$ can be different.
# - You can visualize attention weights $\alpha^{\langle t,t' \rangle}$ to see what the network is paying attention to while generating each output.
# Congratulations on finishing this assignment! You are now able to implement an attention model and use it to learn complex mappings from one sequence to another.
| 32,437 |
/basics_python/classes_objects_methods.ipynb | 486807ffe5996d7e2240acc6bd4bb807c1281c48 | [] | no_license | ritumehra/trainings | https://github.com/ritumehra/trainings | 0 | 1 | null | null | null | null | Jupyter Notebook | false | false | .py | 6,690 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# ******************* CLASSES ********************
# +
# Class Definition
class MyTestClass:
'''This is a docstring. I have created a new class'''
pass
# +
#Accessing Class Attributes using class name
class MyTestClass:
"This is my second class"
a = 10
def func(self):
print('Hello')
# Output: 10
print(MyTestClass.a)
# Output: <function MyClass.func at 0x0000000003079BF8>
print(MyTestClass.func)
# Output: 'This is my second class'
print(MyTestClass.__doc__)
# +
# ************* OBJECTS *************
# -
# Class Objects
ob = MyTestClass()
# +
class MyClass:
"This is my second class"
a = 10
# Self represents the Class Object itself
def func(self):
print('Hello')
# create a new MyClass
ob = MyClass()
# CONCEPT
# Any function object that is a class attribute defines a method for objects of that class.
# Output: <function MyClass.func at 0x000000000335B0D0>
print(MyClass.func)
# Output: <bound method MyClass.func of <__main__.MyClass object at 0x000000000332DEF0>>
print(ob.func)
# Calling function func()
# Output: Hello
ob.func()
# +
# ************* Constructors and Destructors *************
# +
class ComplexNumber:
def __init__(self,r = 0,i = 0):
print("__init__ Called")
self.real = r
self.imag = i
def getData(self):
print("getData Called")
print("{0}+{1}j".format(self.real,self.imag))
def __del__(self):
print("Destructor Called")
# Create a new ComplexNumber object
c1 = ComplexNumber(2,3)
# Call getData() function
# Output: 2+3j
c1.getData()
# Create another ComplexNumber object and create a new attribute 'attr'
# attributes of an object can be created on the fly.
c2 = ComplexNumber(5)
c2.attr = 10
# Output: (5, 0, 10)
print((c2.real, c2.imag, c2.attr))
# but c1 object doesn't have attribute 'attr'
# AttributeError: 'ComplexNumber' object has no attribute 'attr'
# c1.attr
# +
class TestClass:
def __init__(self):
print ("constructor")
def __del__(self):
print ("destructor")
# raise Exception("raise")
if __name__ == "__main__":
obj = TestClass()
del obj
# +
class TestClass:
@staticmethod
def StaticMethod():
print ("static")
def __init__(self):
print ("constructor")
def __del__(self):
print ("destructor")
if __name__ == "__main__":
obj = TestClass()
# the following both are ok.
TestClass.StaticMethod()
obj.StaticMethod()
del obj
# -
| 2,815 |
/Web_Scraping.ipynb | 1315c7c742ab83ede2b15062ad5f491913a780eb | [] | no_license | ArijitChakrabarti/web-scraping-Arijit | https://github.com/ArijitChakrabarti/web-scraping-Arijit | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 12,019 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Teste 2
# ##### Agora é fazer testes e melhores validações dos 3 melhores modelos
# +
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import confusion_matrix, accuracy_score
from warnings import simplefilter
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
simplefilter(action='ignore', category=FutureWarning)
# -
df = pd.read_csv('datasets/dataset_cler.csv', sep=";", encoding='utf-16')
df.shape
df.drop(df.loc[(df['status']!='Loss') & (df['status']!='Gain')].index.values, inplace=True)
df['status'].value_counts()
# +
X = df.drop(['take','status','oper'], axis=1) #df[train_features]
y = df['status']
x_columns = X.columns
# -
encoder = LabelEncoder()
encoder.fit(['inside', 'up', 'down'])
columns_categorical = X.select_dtypes(include=['object']).columns
for col_cat in columns_categorical:
X[col_cat] = encoder.transform(X[col_cat])
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=True)
# ## Trainig
# #### SVC
svc = SVC(gamma='auto')
scores = cross_val_score(svc, X, y, cv=10)
scores.mean()
# #### LogisticRegression
lgf = LogisticRegression()
scores = cross_val_score(lgf, X, y, cv=10)
scores.mean()
"zLOC98aymwms" outputId="3a1dc05d-2da3-4d56-e341-5d19570e635d"
tr_elements = doc.xpath('///tr')
#Create empty list
col=[]
i=0
#For each row, store each first element (header) and an empty list
for t in tr_elements[0]:
i+=1
name=t.text_content()
print ('%d:"%s"' %(i,name))
col.append((name,[]))
# + colab={} colab_type="code" id="mmt9oMQ2nBOh"
for j in range(1,len(tr_elements)):
#T is our j'th row
T=tr_elements[j]
#If row is not of size 10, the //tr data is not from our table
if len(T)!=6:
break
#i is the index of our column
i=0
#Iterate through each element of the row
for t in T.iterchildren():
data=t.text_content()
#Check if row is empty
if i>0:
#Convert any numerical value to integers
try:
data=int(data)
except:
pass
#Append the data to the empty list of the i'th column
col[i][1].append(data)
#Increment i for the next column
i+=1
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="MQjvcKBPoWRZ" outputId="65b27f53-2866-433d-a136-78cd0ac3cf0e"
[len(C) for (title,C) in col]
# + colab={} colab_type="code" id="7fX5cUzsoZLu"
Dict={title:column for (title,column) in col}
df=pd.DataFrame(Dict)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="Zbozfw_loch5" outputId="159240fa-88ac-4481-bbf5-667d7ff8738c"
df.head()
| 3,183 |
/notebooks/default.ipynb | 5620f318554fd80397a2dfee37913a7af0fc5bf0 | [] | no_license | danilodorotheu/exemplo_modelo | https://github.com/danilodorotheu/exemplo_modelo | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,722 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook de exemplo
#
# <br>description: desc
# <br>author: name
# <br>email: mail
# <br>date: date
# ## Seção Cabeçalho
# +
import sys
sys.path.insert(0, '../src')
from utils import util
params = util.getParams()
# -
# ### Instructions
#
# - Evitar a utilização de hardcode. Todos os parametros devem ser inseridos em /config/[env]-config.yaml
# - Todos os parametros são carregados automaticamente dentro da variável "params"
# ## Seção Desenvolvimento
def bark():
print(params)
| 783 |
/Regression Year vs Score.ipynb | 3835e7298d6f8dae117272c21b7fc00a3e9a16f4 | [] | no_license | mcordray1988/Freedom-Index | https://github.com/mcordray1988/Freedom-Index | 2 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 50,030 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exercise 1
import numpy as np
import torch
from torch import nn, optim
from PIL import Image
import matplotlib.pyplot as plt
from torchvision import transforms, models
# +
imsize = 224
loader = transforms.Compose([
transforms.Resize(imsize),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
# -
def image_loader(image_name):
image = Image.open(image_name)
image = loader(image).unsqueeze(0)
return image
content_img = image_loader("images/dog.jpg")
style_img = image_loader("images/matisse.jpg")
unloader = transforms.Compose([
transforms.Normalize((-0.485/0.229, -0.456/0.224, -0.406/0.225), (1/0.229, 1/0.224, 1/0.225)),
transforms.ToPILImage()])
def tensor2image(tensor):
image = tensor.clone()
image = image.squeeze(0)
image = unloader(image)
return image
# +
plt.figure()
plt.imshow(tensor2image(content_img))
plt.title("Content Image")
plt.show()
plt.figure()
plt.imshow(tensor2image(style_img))
plt.title("Style Image")
plt.show()
# -
# ## Exercise 2
# +
model = models.vgg19(pretrained=True).features
for param in model.parameters():
param.requires_grad_(False)
# -
# ## Exercise 3
print(model)
relevant_layers = {'0': 'conv1_1', '5': 'conv2_1', '10': 'conv3_1', '19': 'conv4_1', '21': 'conv4_2', '28': 'conv5_1'}
def features_extractor(x, model, layers):
features = {}
for index, layer in model._modules.items():
if index in layers:
x = layer(x)
features[layers[index]] = x
return features
content_features = features_extractor(content_img, model, relevant_layers)
style_features = features_extractor(style_img, model, relevant_layers)
style_grams = {}
for i in style_features:
layer = style_features[i]
_, d1, d2, d3 = layer.shape
features = layer.view(d1, d2 * d3)
gram = torch.mm(features, features.t())
style_grams[i] = gram
target_img = content_img.clone().requires_grad_(True)
plt.figure()
plt.imshow(tensor2image(target_img))
plt.title("Target Image")
plt.show()
# ## Exercise 4
style_weights = {'conv1_1': 1., 'conv2_1': 0.8, 'conv3_1': 0.6, 'conv4_1': 0.4, 'conv5_1': 0.2}
alpha = 1
beta = 1e6
print_statement = 500
optimizer = torch.optim.Adam([target_img], lr=0.001)
iterations = 2000
for i in range(1, iterations+1):
target_features = features_extractor(target_img, model, relevant_layers)
content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2)
style_losses = 0
for layer in style_weights:
target_feature = target_features[layer]
_, d1, d2, d3 = target_feature.shape
target_reshaped = target_feature.view(d1, d2 * d3)
target_gram = torch.mm(target_reshaped, target_reshaped.t())
style_gram = style_grams[layer]
style_loss = style_weights[layer] * torch.mean((target_gram - style_gram)**2)
style_losses += style_loss / (d1 * d2 * d3)
total_loss = alpha * content_loss + beta * style_loss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if i % print_statement == 0 or i == 1:
print('Total loss: ', total_loss.item())
plt.imshow(tensor2image(target_img))
plt.show()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
ax1.imshow(tensor2image(content_img))
ax2.imshow(tensor2image(target_img))
plt.show()
State'].isin(states)].reset_index(drop=True)
print(states_df.count())
states_df
# +
# OUTPUT CLEAN DATASETS TO .CSV
# US TOTALS 1977-2018
totals_output_path = "../data_clean/per_capita_consumption_us_totals_1977_2018_clean.csv"
us_totals_df.to_csv(totals_output_path)
# REGIONS 1977-2018
regions_output_path = "../data_clean/per_capita_consumption_regions_1977_2018_clean.csv"
regions_df.to_csv(regions_output_path)
# STATES 1977-2018
states_output_path = "../data_clean/per_capita_consumption_states_1977_2018_clean.csv"
states_df.to_csv(states_output_path)
# +
title = 'Annual US Ethanol Consumption Per Capita (1977-2018)'
y_gallons = totals_1977_2018_df['All Drinks (Ethanol Gallons Per Capita)']
# Size plot first
plt.rcParams["figure.figsize"]=(9,6)
# Plot
ax0 = plt.bar(years, y_gallons, zorder=3)
# Label
plt.title(title, fontsize=20)
plt.xlabel('Year', fontsize=15)
plt.ylabel('Ethanol Gallons Consumed Per Capita', fontsize=15)
# Format
plt.tight_layout()
plt.grid(zorder=0, alpha=0.5)
plt.rcParams['axes.facecolor'] = 'w'
plt.rcParams['savefig.facecolor'] = 'w'
# Save above plotted figure as .png
# plt.savefig('Images/JL - ' + title + '.png', bbox_inches='tight')
# fig0 = ax0.get_figure()
# fig0.savefig('Images/JL - ' + title + '.png', bbox_inches='tight')
plt.show()
# +
title = 'Annual US Number of Drinks Per Capita (1977-2018)'
y_drinks = totals_1977_2018_df['Total Number of Drinks']
# Size plot first
plt.rcParams["figure.figsize"]=(9,6)
# Plot
plt.bar(years, y_drinks, zorder=3, color='green')
# Label
plt.title(title, fontsize=20)
plt.xlabel('Year', fontsize=15)
plt.ylabel('Number of Drinks (Beer/Wine Glass/Spirit Shot)', fontsize=15)
# Format
plt.tight_layout()
plt.grid(zorder=0, alpha=0.5)
plt.rcParams['axes.facecolor'] = 'w'
plt.rcParams['savefig.facecolor'] = 'w'
# Save above plotted figure as .png
# plt.savefig('Images/JL - ' + title + '.png', bbox_inches='tight')
plt.show()
# -
# * Creating US number of drinks per capita bar chart was mainly to check for any visible differences to US ethanol consumption per capita bar graph.
# * Total volume numbers were
# +
title = 'US Total Ethanol Consumption Per Capita, Bar (1977-2018)'
# Size plot first
plt.rcParams["figure.figsize"]=(9,6)
# Plot
plt.bar(years, y_gallons, zorder=3)
# Calculate linear regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(years, y_gallons)
regress_values = years * slope + intercept
line_eq = 'y = ' + str(round(slope, 2)) + 'x + ' + str(round(intercept, 2))
# Calculate R-squared
correlation = st.pearsonr(years, y_gallons)
r_squared = round(correlation[0], 2)
# Plot linear regression
plt.plot(years, regress_values, 'red', lw=3, ls='--', zorder=3)
# Label
plt.title(title, fontsize=20)
plt.xlabel('Year', fontsize=15)
plt.ylabel('Ethanol Gallons Consumed Per Capita', fontsize=15)
# Format
plt.tight_layout()
plt.grid(zorder=0, alpha=0.5)
plt.rcParams['axes.facecolor'] = 'w'
plt.rcParams['savefig.facecolor'] = 'w'
# Annotate
plt.annotate(f'R-Squared = {r_squared}', (2010,2.75), color="red", fontsize=13)
# Save above plotted figure as .png
plt.savefig('Images/JL - ' + title + '.png', bbox_inches='tight')
plt.show()
# -
# * The R-squared value is -0.66 which tells us there is moderate correlation between time and alcohol consumption. The negative value means consumption per capita has decreased over between 1977 to 2018.
# * The correlation is not strong enough to indicate causation, but this does verify a decrease in consumption per capita in recent decades.
# * A variety of factors such as health studies/education, economic factors, or laws related to alcohol or other recreational drugs may all play roles in the decreased alcohol consumption per capita. However, we cannot determine causation with any certainty based on the consumption data alone.
# +
print(regions)
ne_reg = regions_df.loc[regions_df['State']=='northeast region']
mw_reg = regions_df.loc[regions_df['State']=='midwest region']
s_reg = regions_df.loc[regions_df['State']=='south region']
w_reg = regions_df.loc[regions_df['State']=='west region']
# -
# # Alcohol Type Consumption Comparison
# +
title_ax1 = 'US Total Ethanol Consumption Per Capita, Stacked (1977-2018)'
color = {'Beer (Ethanol Gallons Per Capita)': 'tab:blue',
'Wine (Ethanol Gallons Per Capita)': 'maroon',
'Spirit (Ethanol Gallons Per Capita)': 'orange'}
type_gallons = ['Beer (Ethanol Gallons Per Capita)',
alizing it with bar graph -
# +
% matplotlib inline
mhonehot = mh_venue.set_index('Neighborhood')
#converting dataframe with categorical data of column Category Name into numerical data with get_dummies
mhonehot = pd.get_dummies(mhonehot['Category Name'])
mhonehot = mhonehot.groupby('Neighborhood').sum()
mhonehot = mhonehot.loc[:,['Gym / Fitness Center', 'Gym', 'Yoga Studio']]
mhonehot.plot(kind='bar', y=['Gym / Fitness Center', 'Gym', 'Yoga Studio'], figsize=(20,10))
# -
# As seen Flatiron and Midtown South have maximum number of gym/fitness centers.
#
# Unique category types of venues returned by Foursqaure -
mh_venue['Category Name'].unique()
# Venues belonging to gym/fitness category are needed, but only those that are proper fitness centers - Weight Loss Center, Playground, Basketball court etc. are not direct competitors so they need to be ignored. So only venues that have category name related to proper fitness centers are included, and the venues with specific category as Gym or Yoga Studio are determined and included, as information on Gym and Yoga Studios in the neighborhood is required.
def is_fitness_place(category_id, category_name, specific_filter=None):
is_fitnessplace = False
is_gymyoga = False
#list of category types that are proper fitness centers
words = ['gym / fitness center', 'yoga studio', 'gym', 'boxing gym', 'pilates studio', 'climbing gym', 'cycle studio',
'gymnastics gym', 'athletics & sports', 'club house', 'outdoor gym', 'community center', 'college gym', 'recreation center']
for w in words:
if(w == category_name.lower()):
is_fitnessplace = True
break
#checking for specific filter i.e. whether it is gym/yoga place or not
if not(specific_filter is None) and (category_id in specific_filter):
is_fitnessplace = True
is_gymyoga = True
return is_fitnessplace, is_gymyoga
# Finding proper fitness centers and gym/yoga places from venues returned by Foursquare -
# +
#Category Ids for Gym and Yoga Studio based on Foursquare
gym_yoga_categories =['4bf58dd8d48988d176941735','4bf58dd8d48988d102941735']
# For neighborhood locations - getting nearby fitness centers; Maintaining dictionaries of all found fitness centers and all found Gyms/Yoga Studios
neighborhood_gymyoga_places = []
all_gymyoga_places = {}
neighborhood_fitness_places = []
all_fitness_places = {}
previous = mh_venue.loc[0, 'Neighborhood']
for index,row in mh_venue.iterrows():
#when iteration for different neighborhood than previous
if(previous != row['Neighborhood']):
all_fitness_places[previous] = neighborhood_fitness_places
all_gymyoga_places[previous] = neighborhood_gymyoga_places
neighborhood_fitness_places = []
neighborhood_gymyoga_places = []
previous = row['Neighborhood']
is_fitnessplace, is_gymyoga = is_fitness_place(row['Category Id'],row['Category Name'],gym_yoga_categories)
#adding in dictionaries only when venue is gym or fitness center
if is_fitnessplace:
fitness_place = (row['Neighborhood'],row['Venue Name'],row['Venue_Lat'],row['Venue_Lng'],row['Address'],row['Distance'],is_gymyoga)
neighborhood_fitness_places.append(fitness_place)
if is_gymyoga:
neighborhood_gymyoga_places.append(fitness_place)
all_fitness_places[previous] = neighborhood_fitness_places
all_gymyoga_places[previous] = neighborhood_gymyoga_places
# -
#Exploring dictionary all_fitness_places
print('List of fitness centers in neighborhood : ')
print('Total: ',len(list(all_fitness_places.items())[12][1]))
list(all_fitness_places.items())[12]
#Exploring dictionary all_gymyoga_places
print('List of Gym/Yoga Studios in neighborhood : ')
print('Total: ',len(list(all_gymyoga_places.items())[12][1]))
list(all_gymyoga_places.items())[12]
# +
import numpy as np
total_fitness = np.array([len(all_fitness_places[key]) for key in all_fitness_places]).sum()
total_gym = np.array([len(all_gymyoga_places[key]) for key in all_gymyoga_places]).sum()
print('Total Fitness Places around Manhattan Neighborhoods : ', total_fitness)
print('Total Gyms & Yoga Studios around Manhattan Neighborhoods : ',total_gym)
print('Percentage of Gyms and Yoga Studios : ', total_gym/total_fitness * 100)
print('Average Number of Fitness Places around each Neighborhood : ', np.array([len(all_fitness_places[key]) for key in all_fitness_places]).mean())
# -
# Showing all the fitness centers(blue) present in Manhattan and Gym/Yoga studios(red) in different colors -
map_mh = folium.Map(location=mh_center, zoom_start=13)
folium.Marker(mh_center,popup='Times Square').add_to(map_mh)
for key,values in all_fitness_places.items():
for fit in values:
is_gymyoga = fit[6]
color = 'red' if is_gymyoga else 'blue'
folium.CircleMarker([fit[2],fit[3]],radius=1,color=color,fill=True,fill_color=color,fill_opacity=1).add_to(map_mh)
map_mh
# All the fitness centers in neighborhoods of Manhattan are located, and among them which ones are gym/yoga places are determined. Also, the details about fitness centers present in the vicinity of every neighborhood center are detected.
#
# This concludes the data gathering phase. Now, this data is ready to use for analysis to produce the report on optimal locations for opening a new Gym/Yoga Studio.
# ## Methodology
# The focus is to detect areas of Manhattan that have low fitness center density, particularly those with low number of Gym/Yoga Studios. Here, the analysis is limited to area ~9km around city center.
#
# In first step, required data is collected:
# - Location and category of every fitness center
# - Determining Gym/Yoga Studio with the help of Foursquare categorization
#
# Second step of analysis includes:
# - Calculation and exploration of 'fitness centers density' across different areas of Manhattan using heatmaps to identify a few promising areas close to center with low number of fitness centers in general (and no Gym/Yoga Studio in vicinity)
#
# Third step includes:
# - Finding most promising areas and within those creating clusters of locations that meet some basic requirements :
# - No more than 95 fitness centers within radius of 1 km, and
# - Locations without Gym/Yoga Studios in radius of 150 meters.
# - Plotting those locations on map and creating clusters using k-means of those locations to identify neighborhoods/addresses for optimal venue location.
# ## Analysis
# Performing some basic explanatory data analysis and deriving some additional information from raw data.
#
# Counting the number of fitness centers in every neighborhood -
#neighborhood with their count of fitness centers
neighbor_count = { key:len(all_fitness_places[key]) for key in all_fitness_places }
neighbor_count_df = pd.DataFrame.from_dict(list(neighbor_count.items()))
neighbor_count_df.columns = ['Neighborhood','Number of Fitness Centers']
neighbor_count_df.head()
manhattan_data_df = manhattan_data.copy()
#merging dataframes on Neighborhood column to include count of fitness centers
manhattan_data_df = pd.merge(manhattan_data_df,neighbor_count_df,left_on='Neighborhood',right_on='Neighborhood',how='inner')
print('Average number of Fitness Centers in every neighborhood within 1 km radius:', manhattan_data_df['Number of Fitness Centers'].mean())
manhattan_data_df.head()
# Calculating the distance(in meters) to nearest Gym/Yoga Studios from every neighborhood (not only those within 1 km - finding distance to closest one, regardless of how distant it is) -
# +
distances_to_gymyoga = []
for lat, lng in zip(manhattan_data_df['Latitude'], manhattan_data_df['Longitude']):
min_distance = 10000
location = (lat,lng)
for key,values in all_gymyoga_places.items():
for gym in values:
latlng = (gym[2],gym[3])
#distance beween nighborhood center and gym/yoga place
d = distance.distance(location, latlng).km * 1000
if d<min_distance:
min_distance = d
distances_to_gymyoga.append(min_distance)
#adding new column with minimum distance
manhattan_data_df['Distance to G | 16,384 |
/ML lab10 MNIST NN ReLU Xavier Dropout and Adam.ipynb | 40f709b0012e891222581bc5f8446ef089cbbac6 | [] | no_license | sangjoo3627/DeepLearning | https://github.com/sangjoo3627/DeepLearning | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 8,777 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# The %matplotlib magic command is used here to enable plotting in the current notebook. The inline backend will embed plots inside the notebook.
# + deletable=true editable=true
# %matplotlib inline
# + [markdown] deletable=true editable=true
# #### Import necessary modules¶
# + deletable=true editable=true
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib.cm as cm # colormaps for plottting
# + [markdown] deletable=true editable=true
# #### Read the Broadmead data. Select variables of interest and create dataframes.
# + deletable=true editable=true
# open netcdf file
data_url = 'http://hydromet-thredds.princeton.edu:9000/thredds/dodsC/MonitoringStations/broadmead.nc'
ds = xr.open_dataset(data_url)
# select flux data
flux_ds = ds[['Hs', 'LE_wpl', 'agc_Avg', 'Fc_wpl', 'tau', 'u_star']]
# select 4 components of radiation
radiation_ds = ds[['Rl_downwell_Avg', 'Rl_upwell_Avg', 'Rs_downwell_Avg', 'Rs_upwell_Avg', 'albedo_Avg']]
# Select standard meteorological variables - temperature, pressure, specific humidity, wind speed and direction
met_ds = ds[['h2o_hmp_mean', 'press_mean', 'rho_a_mean', 't_hmp_mean', 'wnd_spd', 'wnd_dir_compass']]
# select rain gages
rain_ds = ds[['Rain_1_mm_Tot', 'Rain_2_mm_Tot']]
# convert to a pandas.dataframe object
radiation_df = radiation_ds.to_dataframe()
flux_df = flux_ds.to_dataframe()
met_df = met_ds.to_dataframe()
rain_df = rain_ds.to_dataframe()
# + deletable=true editable=true
#print ds
# + deletable=true editable=true
names = { 'Hs': 'Sensible Heat Flux using Sonic Temperature (w/m**2)',
'LE_wpl': 'Latent Heat Flux - with Webb correction (w/m**2)',
'agc_Avg': 'Automatic Gain Control (dimensionless)',
'Fc_wpl': 'CO2 flux (mg/(m^2 s))',
'tau': 'Shear Stress, or momentum flux (kg/(m s^2))',
'u_star': 'Shear velocity (aka friction velocity) (m/s)',
'Rl_downwell_Avg': 'Downwelling longwave (w/m**2)',
'Rl_upwell_Avg': 'Upwelling longwave (w/m**2)',
'Rs_downwell_Avg': 'Downwelling solar (w/m**2)',
'Rs_upwell_Avg': 'Upwelling solar (w/m**2)',
'albedo_Avg': 'Albedo',
'h2o_hmp_mean': 'water vapor density (g/m**3)',
'press_mean': 'air pressure (kPa)',
'rho_a_mean': 'air density (kg/m**3)',
't_hmp_mean': 'air temperature (degrees C)',
'wnd_spd': 'wind speed (m/s)',
'wnd_dir_compass': 'wind direction (degrees from north)',
'Rain_1_mm_Tot': 'rainfall - gage 1 (mm)',
'Rain_1_mm_Tot': 'rainfall - gage 2 (mm)',
}
names
# + [markdown] deletable=true editable=true
# #### Summary statistics for flux variables
# + deletable=true editable=true
flux_df[['LE_wpl', 'Hs', 'agc_Avg', 'Fc_wpl', 'tau', 'u_star']].describe()
# + [markdown] deletable=true editable=true
# #### Create flux data set that uses Automatic Gain Control to remove "bad measurements"
# + deletable=true editable=true
flux_ds_agc = flux_ds.where((flux_ds['agc_Avg']<65)).dropna('time')
flux_df_agc = flux_ds_agc.to_dataframe()
# check out what these times look like
flux_df_agc[['LE_wpl', 'Hs', 'Fc_wpl']].describe()
# + [markdown] deletable=true editable=true
# #### Summary statistics for radiation variables
# + deletable=true editable=true
radiation_df[['Rl_downwell_Avg', 'Rl_upwell_Avg', 'Rs_downwell_Avg', 'Rs_upwell_Avg', 'albedo_Avg']].describe()
# + [markdown] deletable=true editable=true
# #### Summary statistics for surface meteorological variables
# + deletable=true editable=true
met_df[['h2o_hmp_mean', 'press_mean', 'rho_a_mean', 't_hmp_mean', 'wnd_spd', 'wnd_dir_compass']].describe()
# + [markdown] deletable=true editable=true
# #### Plot time series of sensible heat flux and latext heat flux
# + deletable=true editable=true
flux_df_agc['Hs'].loc[slice('2016-08-24', '2016-08-31')].plot()
flux_df_agc['LE_wpl'].loc[slice('2016-08-24', '2016-08-31')].plot()
plt.title("")
plt.ylim((-50, 500))
plt.ylabel('Flux [W/m^2]')
plt.legend(loc='upper left');
# + [markdown] deletable=true editable=true
# #### Plot time series of temperatue and water vapor density
# + deletable=true editable=true
met_df['h2o_hmp_mean'].loc[slice('2016-08-24', '2016-08-31')].plot()
plt.title("")
plt.ylim((10, 25))
plt.ylabel('Water vapor density [g/m^3] blue')
met_df['t_hmp_mean'].loc[slice('2016-08-24', '2016-08-31')].plot(secondary_y='t_hmp_mean')
plt.title("")
plt.ylim((15, 40))
plt.ylabel('Temperature [degrees C] green')
#plt.legend(loc='upper left');
# + [markdown] deletable=true editable=true
# #### Plot time series of downwelling radiation and latent heat flux
# + deletable=true editable=true
radiation_df['Rs_downwell_Avg'].loc[slice('2017-02-24', '2017-02-25')].plot()
plt.title("")
plt.ylim((0, 1000))
plt.ylabel('Downwelling Solar (W/m^2) [blue]')
#plt.legend(loc='upper left');
flux_df_agc['LE_wpl'].loc[slice('2017-02-24', '2017-02-25')].plot(secondary_y='LE_wpl')
plt.title("")
plt.ylim((0, 500))
plt.ylabel('Latent Heat Flux [W/m^2]')
#plt.legend(loc='upper right');
# + [markdown] deletable=true editable=true
# #### Plot time series of albedo
# + deletable=true editable=true
radiation_df['albedo_Avg'].loc[slice('2016-07-17', '2016-07-24')].plot()
plt.title("")
plt.ylim((0.10, 0.25))
plt.ylabel('Albedo')
# + [markdown] deletable=true editable=true
# #### Time series of shear velocity and sensible heat flux
# + deletable=true editable=true
flux_df['u_star'].loc[slice('2016-08-28', '2016-08-30')].plot()
plt.title("")
plt.ylim((0, 0.5))
plt.ylabel('Shear Velocity (m/s) [blue]')
#plt.legend(loc='upper left');
flux_df_agc['Hs'].loc[slice('2016-08-28', '2016-08-30')].plot(secondary_y='Hs')
plt.title("")
plt.ylim((-100, 300))
plt.ylabel('Sensible Heat Flux [W/m^2]')
#plt.legend(loc='upper right');
# + [markdown] deletable=true editable=true
# #### Time series of CO2 flux and latent heat flux
# + deletable=true editable=true
flux_df_agc['Fc_wpl'].loc[slice('2016-08-24', '2016-08-31')].plot()
plt.title("")
plt.ylim((-1, 5))
plt.ylabel('CO2 Flux')
#plt.legend(loc='upper right');
flux_df_agc['LE_wpl'].loc[slice('2016-08-24', '2016-08-31')].plot(secondary_y='LE_wpl')
plt.title("")
plt.ylim((-50, 500))
plt.ylabel('Latent Heat Flux [W/m^2]')
#plt.legend(loc='upper right');
# + [markdown] deletable=true editable=true
# #### Scatterplot of sensible heat flux and latent heat flux
# + deletable=true editable=true
flux_df_agc.plot.scatter('Hs', 'LE_wpl')
plt.ylim((0, 800))
plt.xlim((-200, 800))
plt.show()
# + [markdown] deletable=true editable=true
# #### Seasonal cycle of sensible heat flux and latent heat flux
# + deletable=true editable=true
fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(14,4))
flux_df_agc.boxplot(column='Hs', by=flux_df_agc.index.month,
whis=[1, 99], sym='', ax=axes[0])
axes[0].set_xlabel('months') # set the xlabel on the first subplot
axes[0].set_title('Hs (with agc)')
axes[0].set_ylim(-100, 500) # set the y limits on the first subplot
flux_df_agc.boxplot(column='LE_wpl', by=flux_df_agc.index.month,
whis=[1, 99], sym='', ax=axes[1])
axes[1].set_xlabel('months')
axes[1].set_title('LE_wpl (with agc)')
axes[1].set_ylim(-100, 500)
plt.suptitle('') # this makes the super title for the whole figure blank
plt.show()
# + [markdown] deletable=true editable=true
# #### Seasonal cycle of upwelling longwave radiation
# + deletable=true editable=true
# pivot the data to make the index the day of year and the columns the years
pv = pd.pivot_table(radiation_df,
index=radiation_df.index.dayofyear,
columns=radiation_df.index.year,
values='Rl_upwell_Avg', aggfunc='mean')
# plot the aggregated data, since we don't really care which year is which
# we can turn the legend off and set all the colors to gray
pv.plot(figsize=(12, 5), legend=False, color='gray')
# plot the data that has been averaged across years
pv.mean(axis=1).plot(linewidth=2, color='red')
plt.ylabel('Upwelling longwave radiation [W/m^2]')
plt.xlabel('Days of year')
plt.title('Annual cycle of upwelling longwave radiation over period of record')
plt.show()
# + [markdown] deletable=true editable=true
# #### Diurnal cycle of downwelling shortwave radiation by month
# + deletable=true editable=true
var = 'Rs_downwell_Avg'
title = 'Monthly diurnal cycle for downwelling shortwave radiation'
# pivot the data to make the index time of day and the columns the months
pv = pd.pivot_table(radiation_df,
index=radiation_df.index.time,
columns=radiation_df.index.month,
values=var)
# set the colors to a discretized circular colormap to fit cyclical data
pv.plot(figsize=(12, 5), color=cm.hsv(np.linspace(0, 1, 12)), linewidth=2, title=title)
plt.legend(title='Months', loc='upper left')
# using this method we can create labels that depend only on var
plt.ylabel('{var} [{units}]'.format(var=var, units=ds[var].units))
plt.savefig('../Figures/diurnalCycle_RsDownwell.png', dpi=300)
plt.show()
# + [markdown] deletable=true editable=true
# Diurnal cycle of 4 components of the radiation balance. To do this we create a loop that goes through each component, creates a pivot table with index=time and columns=month, then take the mean across months and concatenates the dataframe with those already computed. This concatenated dataframe is what we ultimately plot.
# + deletable=true editable=true
df = None
for v in ['Rl_downwell_Avg', 'Rl_upwell_Avg', 'Rs_downwell_Avg', 'Rs_upwell_Avg']:
pv = pd.pivot_table(radiation_df,
index=radiation_df.index.time,
columns=radiation_df.index.month,
values=v)
df_rad = pd.DataFrame(pv.mean(axis=1), columns=[v])
df = pd.concat([df, df_rad], axis=1)
df.plot(figsize=(12, 5), linewidth=2)
plt.show()
# + [markdown] deletable=true editable=true
# #### Create a larger dataframe for comparing different variables
# + deletable=true editable=true
flux_rad_ds = ds[['Hs', 'LE_wpl', 'agc_Avg','Fc_wpl', 'tau', 'u_star', 'Rl_downwell_Avg', 'Rl_upwell_Avg', 'Rs_downwell_Avg', 'Rs_upwell_Avg', 'albedo_Avg']]
flux_rad_df = flux_rad_ds.to_dataframe()
flux_rad_df.plot.scatter('Rs_downwell_Avg', 'Hs')
plt.xlim((0, 1200))
plt.ylim((-200, 600))
plt.show()
# + deletable=true editable=true
flux_rad_df.plot.scatter('Rs_downwell_Avg', 'LE_wpl')
plt.xlim((0, 1200))
plt.ylim((0, 800))
plt.show()
| 10,884 |
/goal_detection/.ipynb_checkpoints/goal_detection_alt-checkpoint.ipynb | 1f9725886bef7a8d8ebee582691a6bf4ec2206b8 | [] | no_license | robocupmipt/nao_cv | https://github.com/robocupmipt/nao_cv | 2 | 0 | null | 2019-06-03T01:03:55 | 2019-05-28T18:07:16 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 10,840 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Autoencoders
#
# In this notebook we will explore autoencoder models. These are models in which the inputs are *encoded* to some intermediate representation before this representation is then *decoded* to try to reconstruct the inputs. They are example of a model which uses an unsupervised training method and are both interesting as a model in their own right and as a method for pre-training useful representations to use in supervised tasks such as classification. Autoencoders were covered as a pre-training method as additional material in the [sixth lecture slides](http://www.inf.ed.ac.uk/teaching/courses/mlp/2018-19/mlp06-enc.pdf).
# ## Exercise 1: Linear autoencoders
#
# For the first exercise we will consider training a simple 'contractive' autoencoder - that is one in which the hidden representation is smaller in dimension than the input and the objective is to minimise the mean squared error between the original inputs and reconstructed inputs. To begin with we will consider models in which the encoder and decoder are both simple affine transformations.
#
# When training an autoencoder the target outputs for the model are the original inputs. A simple way to integrate this in to our `mlp` framework is to define a new data provider inheriting from a base data provider (e.g. `MNISTDataProvider`) which overrides the `next` method to return the inputs batch as both inputs and targets to the model. A data provider of this form has been provided for you in `mlp.data_providers` as `MNISTAutoencoderDataProvider`.
#
# Use this data provider to train an autoencoder model with a 50 dimensional hidden representation and both encoder and decoder defined by affine transformations. You should use a sum of squared differences error and a basic gradient descent learning rule with learning rate 0.01. Initialise the biases to zero and use a uniform Glorot initialisation for both layers weights. Train the model for 25 epochs with a batch size of 50.
import numpy as np
import logging
import mlp.layers as layers
import mlp.models as models
import mlp.optimisers as optimisers
import mlp.errors as errors
import mlp.learning_rules as learning_rules
import mlp.data_providers as data_providers
import mlp.initialisers as initialisers
import matplotlib.pyplot as plt
# %matplotlib inline
# Using the function defined in the cell below (from the first lab notebook), plot a batch of the original images and the autoencoder reconstructions.
def show_batch_of_images(img_batch, fig_size=(3, 3), num_rows=None):
fig = plt.figure(figsize=fig_size)
batch_size, im_height, im_width = img_batch.shape
if num_rows is None:
# calculate grid dimensions to give square(ish) grid
num_rows = int(batch_size**0.5)
num_cols = int(batch_size * 1. / num_rows)
if num_rows * num_cols < batch_size:
num_cols += 1
# intialise empty array to tile image grid into
tiled = np.zeros((im_height * num_rows, im_width * num_cols))
# iterate over images in batch + indexes within batch
for i, img in enumerate(img_batch):
# calculate grid row and column indices
r, c = i % num_rows, i // num_rows
tiled[r * im_height:(r + 1) * im_height,
c * im_height:(c + 1) * im_height] = img
ax = fig.add_subplot(111)
ax.imshow(tiled, cmap='Greys', vmin=0., vmax=1.)
ax.axis('off')
fig.tight_layout()
plt.show()
return fig, ax
# ### Optional extension: principle components analysis
#
# *This section is provided for the interest of those also sitting MLPR or otherwise already familiar with eigendecompositions and PCA. Feel free to skip over if this doesn't apply to you (or even if it does).*
#
# For a linear (affine) contractive autoencoder model trained with a sum of squared differences error function there is an analytic solution for the optimal model parameters corresponding to [principle components analysis](https://en.wikipedia.org/wiki/Principal_component_analysis).
#
# If we have a training dataset of $N$ $D$-dimensional vectors $\left\lbrace \boldsymbol{x}^{(n)} \right\rbrace_{n=1}^N$, then we can calculate the empiricial mean and covariance of the training data using
#
# \begin{equation}
# \boldsymbol{\mu} = \frac{1}{N} \sum_{n=1}^N \left[ \boldsymbol{x}^{(n)} \right]
# \qquad
# \text{and}
# \qquad
# \mathbf{\Sigma} = \frac{1}{N}
# \sum_{n=1}^N \left[
# \left(\boldsymbol{x}^{(n)} - \boldsymbol{\mu} \right)
# \left(\boldsymbol{x}^{(n)} - \boldsymbol{\mu} \right)^{\rm T}
# \right].
# \end{equation}
#
# We can then calculate an [eigendecomposition](https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix) of the covariance matrix
# \begin{equation}
# \mathbf{\Sigma} = \mathbf{Q} \mathbf{\Lambda} \mathbf{Q}^{\rm T}
# \qquad
# \mathbf{Q} = \left[
# \begin{array}{cccc}
# \uparrow & \uparrow & \cdots & \uparrow \\
# \boldsymbol{q}_1 & \boldsymbol{q}_2 & \cdots & \boldsymbol{q}_D \\
# \downarrow & \downarrow & \cdots & \downarrow \\
# \end{array}
# \right]
# \qquad
# \mathbf{\Lambda} = \left[
# \begin{array}{cccc}
# \lambda_1 & 0 & \cdots & 0 \\
# 0 & \lambda_2 & \cdots & \vdots \\
# \vdots & \vdots & \ddots & 0 \\
# 0 & 0 & \cdots & \lambda_D \\
# \end{array} \right]
# \end{equation}
#
# with $\mathbf{Q}$ an orthogonal matrix, $\mathbf{Q}\mathbf{Q}^{\rm T} = \mathbf{I}$, with columns $\left\lbrace \boldsymbol{q}_d \right\rbrace_{d=1}^D$ corresponding to the eigenvectors of $\mathbf{\Sigma}$ and $\mathbf{\Lambda}$ a diagonal matrix with diagonal elements $\left\lbrace \lambda_d \right\rbrace_{d=1}^D$ the corresponding eigenvalues of $\mathbf{\Sigma}$.
#
# Assuming the eigenvalues are ordered such that $\lambda_1 < \lambda_2 < \dots < \lambda_D$ then the top $K$ principle components of the inputs (eigenvectors with largest eigenvalues) correspond to $\left\lbrace \boldsymbol{q}_d \right\rbrace_{d=D + 1 - K}^D$. If we define a $D \times K$ matrix $\mathbf{V} = \left[ \boldsymbol{q}_{D + 1 - K} ~ \boldsymbol{q}_{D + 2 - K} ~\cdots~ \boldsymbol{q}_D \right]$ then we can find the projections of a (mean normalised) input vector on to the selected $K$ principle components as $\boldsymbol{h} = \mathbf{V}^{\rm T}\left( \boldsymbol{x} - \boldsymbol{\mu}\right)$. We can then use these principle component projections to form a reconstruction of the original input just in terms of the $K$ top principle components using $\boldsymbol{r} = \mathbf{V} \boldsymbol{h} + \boldsymbol{\mu}$. We can see that this is just a sequence of two affine transformations and so is directly analagous to a model with two affine layers and with $K$ dimensional outputs of the first layer / inputs to second.
#
# The function defined in the cell below will calculate the PCA solution for a set of input vectors and a defined number of components $K$. Use it to calculate the top 50 principle components of the MNIST training data. Use the returned matrix and mean vector to calculate the PCA based reconstructions of a batch of 50 MNIST images and use the `show_batch_of_images` function to plot both the original and reconstructed inputs alongside each other. Also calculate the sum of squared differences error for the PCA solution on the MNIST training set and compare to the figure you got by gradient descent based training above. Will the gradient based training produce the same hidden representations as the PCA solution if it is trained to convergence?
def get_pca_parameters(inputs, num_components=50):
mean = inputs.mean(0)
inputs_zm = inputs - mean[None, :]
covar = np.einsum('ij,ik', inputs_zm, inputs_zm)
eigvals, eigvecs = np.linalg.eigh(covar)
return eigvecs[:, -num_components:], mean
# ## Exercise 2: Non-linear autoencoders
#
# Those who did the extension in the previous exercise will have just seen that for an autoencoder with both linear / affine encoder and decoders, there is an analytic solution for the parameters which minimise a sum of squared differences error.
#
# In general the advantage of using gradient-based training methods is that it allows us to use non-linear models for which there is no analytic solution for the optimal parameters. The hope is the use of non-linear transformations between the affine transformation layers will increase the representational power of the model (a sequence of affine transformations applied without any interleaving non-linear operations can always be represented by a single affine transformation).
#
# Train a contractive autoencoder with an initial affine layer (output dimension again 50) followed by a rectified linear layer, then an affine transformation projecting to outputs of same dimension as the original inputs, and finally a logistic sigmoid layer at the output. As the only layers with parameters are the two affine layers which have the same dimensions as in the fully affine model above, the overall model here has the same number of parameters as previously.
#
# Again train for 25 epochs with 50 training examples per batch and use a uniform Glorot initialisation for the weights, and zero biases initialisation. Use our implementation of the 'Adam' adaptive moments learning rule (available in `mlp.learning_rules` as `AdamLearningRule`) rather than basic gradient descent here (the adaptivity helps deal with the varying appropriate scale of updates induced by the non-linear transformations in this model).
# Plot batches of the inputs and reconstructed inputs for this non-linear contractive autoencoder model and compare to the corresponding plots for the linear models above.
# ## Exercise 3: Denoising autoencoders
#
# So far we have just considered autoencoders that try to reconstruct the input vector fed into them via some intermediate lower-dimensional 'contracted' representation. The contraction is important as if we were to mantain the input dimensionality in all layers of the model a trivial optima for the model to learn would be to apply an identity transformation at each layer.
#
# It can be desirable for the intermediate hidden representation to be robust to noise in the input. The intuition is that this will force the model to learn to maintain the 'important structure' in the input in the hidden representation (that needed to reconstruct the input). This also removes the requirement to have a contracted hidden representation (as the model can no longer simply learn to apply an identity transformation) though in practice we will still often use a lower-dimensional hidden representation as we believe there is a certain level of redundancy in the input data and so the important structure can be represented with a lower dimensional representation.
#
# Create a new data provider object which adds to noise to the inputs to an autoencoder in each batch it returns. There are various different ways you could introduce noise. The three suggested in the lecture slides are
#
# * *Gaussian*: add independent, zero-mean Gaussian noise of a fixed standard-deviation to each dimension of the input vectors.
# * *Masking*: generate a random binary mask and perform an elementwise multiplication with each input (forcing some subset of the values to zero).
# * *Salt and pepper*: select a random subset of values in each input and randomly assign either zero or one to them.
#
# You should choose one of these noising schemes to implement. It may help to know that the base `DataProvider` object already has access to a random number generator object as its `self.rng` attribute.
# Once you have implemented your chosen scheme, use the new data provider object to train a denoising autoencoder with the same model architecture as in exercise 2.
# Use the `show_batch_of_images` function from above to visualise a batch of noisy inputs from your data provider implementation and the denoised reconstructions from your trained denoising autoencoder.
# ## Exercise 4: Using an autoencoder as an initialisation for supervised training
# As a final exercise we will use the first layer of an autoencoder for MNIST digit images as a layer within a multiple layer model trained to do digit classification. The intuition behind pretraining methods like this is that the hidden representations learnt by an autoencoder should be a more useful representation for training a classifier than the raw pixel values themselves. We could fix the parameters in the layers taken from the autoencoder but generally we can get better performance by letting the whole model be trained end-to-end on the supervised training task, with the learnt autoencoder parameters in this case acting as a potentially more intelligent initialisation than randomly sampling the parameters which can help ease some of the optimisation issues encountered due to poor initialisation of a model.
#
# You can either use one of the autoencoder models you trained in the previous exercises, or train a new autoencoder model for specifically for this exercise. Create a new model object (instance of `mlp.models.MultipleLayerModel`) in which the first layer(s) of the list of layer passed to the model constructor are the trained first layer(s) from your autoencoder model (these can be accessed via the `layers` attribute which is a list of all the layers in a model). Add any additional layers you wish to the pretrained layers - at the very least you will need to add an output layer with output dimension 10 to allow the model to be used to predict class labels. Train this new model on the original MNIST image, digit labels pairs with a cross entropy error.
| 13,864 |
/CIFAR100_Xception_Adagrad.ipynb | fbb684a9b03a2336ce31b692f8acced1309ac26b | [] | no_license | tamal-mallick/Image-Recognition-Using-CNN | https://github.com/tamal-mallick/Image-Recognition-Using-CNN | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 51,550 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="1agGD7uP493Q" colab={"base_uri": "https://localhost:8080/"} outputId="83c61518-c8d6-4dff-a0e1-2994bf6be32b"
#https://medium.com/@kenneth.ca95/a-guide-to-transfer-learning-with-keras-using-Xception-a81a4a28084b
# Running the version as 1.x is optional, without that first line it will run the last version of tensorflow for Colab.
# %tensorflow_version 1.x
import tensorflow as tf
import tensorflow.keras as K
# + id="MUEuZsobBTUF" colab={"base_uri": "https://localhost:8080/"} outputId="332f54d6-499e-46eb-e9f9-90ef60c9ba57"
# Load data
# Fashion-MNIST is a dataset of Zalando's article images consisting of a training set of 60,000 examples
# and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes.
# We can take advantage of the fact that these categories and a lot more are into the Imagenet collection.
#tf.keras.datasets.cifar10.load_data()
#tf.keras.datasets.cifar100.load_data(label_mode="coarse")
tf.keras.datasets.cifar100.load_data()
# + id="nGf6Tgtz0VTe"
# Preprocess data function
# Now that the data is loaded, we are going to build a preprocess function for the data.
# We have X as a numpy array of shape (m, 32, 32, 3) where m is the number of images,
# 32 and 32 the dimensions, and 3 is because we use color images (RGB).
# We have a set of X for training and a set of X for validation.
# Y is a numpy array of shape (m, ) that we want to be our labels.
# Since we work with 10 different categories, we make use of one-hot encoding with a
# function of Keras that makes our Y into a shape of (m, 10). That also applies for the validation.
def preprocess_data(X,Y):
X_p = K.applications.xception.preprocess_input(X)
Y_p = K.utils.to_categorical(Y,100)
return X_p, Y_p
# + id="Q4qZ9aHTEQUp" colab={"base_uri": "https://localhost:8080/"} outputId="a1286dce-bb8c-4925-af1b-22c50aca3d46"
# load and split data
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = K.datasets.cifar100.load_data()
img_rows, img_cols = 32, 32
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# + id="aAs88uCaFMc3" colab={"base_uri": "https://localhost:8080/"} outputId="17404736-65fc-461c-c68e-30d285fa66df"
# Preprocess data
# Next, we are going to call our function with the parameters loaded from the Fashion Mnist database.
x_train, y_train = preprocess_data(x_train, y_train)
x_test, y_test = preprocess_data(x_test, y_test)
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# + id="vDEpJYeWGK46" colab={"base_uri": "https://localhost:8080/"} outputId="8b4e57a3-6454-4b33-9be9-44a7f043c2b5"
# Using weights of a trained neural network
# A pretrained model from the Keras Applications has the advantage of allowing you to use weights that
# are already calibrated to make predictions. In this case, we use the weights from Imagenet
# and the network is a Xception. The option include_top=False allows feature extraction by removing
# the last dense layers. This let us control the output and input of the model.
input_t = K.Input(shape=(32,32,3))
Xception_Model = K.applications.Xception(include_top=False,
weights="imagenet",
input_tensor=input_t)
# + id="hlyKjCtPH3m3"
# In this case, we ‘freeze’ all layers except for the last block of the Xception.
for layer in Xception_Model.layers[:100]:
layer.trainable=False
# + id="JdixTKJ7I10V" colab={"base_uri": "https://localhost:8080/"} outputId="6f4205a0-0147-45c8-948b-bedc4e3d630e"
# We can check that we did it correctly with:
# False means that the layer is ‘freezed’ or is not trainable and
# True that when we run our model, the weights are going to be adjusted.
for i, layer in enumerate(Xception_Model.layers):
print(i,layer.name,"-",layer.trainable)
# + id="3PHC3U6xJceX"
# Add Flatten and Dense layers on top of Xception
# Now, we need to connect our pretrained model with the new layers
# of our model. We can use global pooling or a flatten layer to connect
# the dimensions of the previous layers with the new layers.
to_res = (224, 224)
model = K.models.Sequential()
model.add(K.layers.Lambda(lambda image: tf.image.resize(image, to_res)))
model.add(Xception_Model)
model.add(K.layers.Flatten())
model.add(K.layers.Dense(256, activation='relu'))
model.add(K.layers.Dropout(0.5))
model.add(K.layers.BatchNormalization())
model.add(K.layers.Dense(128, activation='relu'))
model.add(K.layers.Dropout(0.5))
model.add(K.layers.BatchNormalization())
# model.add(K.layers.Dense(64, activation='relu'))
# model.add(K.layers.Dropout(0.5))
# model.add(K.layers.BatchNormalization())
model.add(K.layers.Dense(100, activation='softmax'))
# + id="NClKTTPoJwjA" colab={"base_uri": "https://localhost:8080/"} outputId="267fe990-b010-4ff7-ce47-ef9a3738f96d"
# Compile model and train
# Results
# We obtained an accuracy of 94% on training set and 90% on validation with 10 epochs.
# In the 8th epoch, the values are very similar and it is interesting to note that
# in the first validation accuracy is higher than training.
# This is because of dropout use, which in Keras, it has a different behavior
# for training and testing. In testing time, all the features are ready and
# the dropout is turned off, resulting in a better accuracy.
# This readjust on the last epochs since the model continues changing on the training.
model.compile(loss='categorical_crossentropy',
optimizer='Adagrad',
metrics=['accuracy'])
history = model.fit(x_train, y_train, batch_size=30, epochs=25, verbose=1,
validation_data=(x_test, y_test)
)
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="Qc9XmTGrA7uE" outputId="92a06391-5bfd-4bd7-c913-157cbcc94a67"
model.evaluate(x_test, y_test)
| 6,379 |
/Assignments/Data Visualization Assignment.ipynb | 8bd0c3750a00f3bb1e1fc311ffaa59f508152a3d | [] | no_license | mohdasim98/Python-assignment | https://github.com/mohdasim98/Python-assignment | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 272,394 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Analysis and Machine Learning Using Python
# 
# # Data Visualization Assignment
# ### Submission Date & Time : 21st June 2020 & 15:00
# ## Q.1
# - Import all modules required for Data Visualization
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# ## Q.2
# - Plot a Sine Wave with color code with "black" color and style as dashdot, also mark "x" and "y" label and give a suitable title to it
x=np.linspace(0,10,1000)
plt.plot(x,np.sin(x),'k-.')
plt.title('Sine Wave',size=20)
plt.xlabel('x label',size=20)
plt.ylabel('y label',size=20)
plt.show()
# ## Q.3
# - Plot a line curve with list=[2,3,4,5], use linestyle and color together for the graph and marker style as "Rotated Hexagon Marker"
plt.plot([2,3,4,5],'b-.H',markersize=16)
plt.show()
# ## Q.4
# 
# -Plot Stacked Bar Graph for the above data.
# - Use proper colors and x and y label for the graph
# - Use legend positioned at upper right corner
# - Also mark xticks with an angle of 90 Degree
# +
labels=['Delhi','Mumbai','Chennai','Kolkata','Hyderabad']
attributes=['Clothing','Equipment','Accessories']
Clothing = np.array([82,78,48,46,39])
Equipment = np.array([48,31,51,29,18])
Accessories = np.array([15,20,14,13,9])
ind=[x for x,_ in enumerate(labels)]
plt.bar(ind, Clothing, width=0.5, label='Clothing', color='crimson', bottom=Equipment+Accessories)
plt.bar(ind, Equipment, width=0.5, label='Equipment', color='cyan', bottom=Accessories)
plt.bar(ind, Accessories, width=0.5, label='Accessories', color='yellow')
plt.xlabel('Cities')
plt.xticks(ind,labels,rotation='90',size=20)
plt.ylabel('Stores Range',size=20)
plt.legend(attributes,title="Notation")
plt.title('Stacked bar graph',size=20)
plt.show()
# -
# ## Q.5 For Titanic Dataset
# - Load Dataset using Seaborn
# - Plot a Bar Graph of Survival of Passenger against the Passenger Class.
# - Use proper "hue" attribute, and palette color. Mark 'x' and 'y' label with proper title.
# - Use legend located at upper left corner
import seaborn as ss
data=ss.load_dataset('titanic')
# +
Pclass=list(data.pclass.unique())
Pclass.sort()
cnt0=[data[(data['survived']==0) & (data['pclass']==1)].pclass.count(),data[(data['survived']==0) & (data['pclass']==2)].pclass.count(),data[(data['survived']==0) & (data['pclass']==3)].pclass.count()]
cnt1=[data[(data['survived']==1) & (data['pclass']==1)].pclass.count(),data[(data['survived']==1) & (data['pclass']==2)].pclass.count(),data[(data['survived']==1) & (data['pclass']==3)].pclass.count()]
width=0.40
x=np.arange(len(Pclass))
plt.bar(x-.2,cnt0,width,label='NO',color='r')
plt.bar(x+.2,cnt1,width,label='YES',color='g')
plt.grid(True)
plt.legend(title='Survived',loc='upper left')
plt.xticks(x,Pclass)
plt.xlabel("Passanger Class")
plt.ylabel("Number of passengers")
plt.title("Bar Graph of Survival of Passenger against the Passenger Class",color='c',size=12)
plt.show()
# -
# ## Q.6 For Tips Dataset
# - Load the Dataset Using Seaborn
# - Plot the Horizontal Distribution Graph on Total Bill with color as Green
# - Mark proper 'x' and 'y' label and give a suitable title
tip=ss.load_dataset('tips')
ss.set_style('darkgrid')
ss.distplot(tip['total_bill'],color='g',vertical=True)
plt.title('Total Bill Distribution',color='black',size=20)
plt.ylabel('Bill',color='black',size=20)
plt.xlabel('Mean',color='black',size=20)
plt.show()
# # Q.7
# - Also plot the KDE Graph for the above data
ss.set_style('darkgrid')
ss.distplot(tip['total_bill'],color='b',hist=False)
plt.title('Total Bill Distribution',color='black',size=20)
plt.xlabel('Bill',color='black',size=20)
plt.ylabel('Mean',color='black',size=20)
plt.show()
# ## Q.8 For Cancer Dataset
# - Load the data with the help of Matplotlib (Dataset Provided)
# - Print the first 10 data of the whole dataset
# - Plot Box Plot with column 'texture_mean' , separated by column 'diagnosis'
df=pd.read_csv('cancer.csv')
df.head(10)
ss.boxplot(x='diagnosis',y='texture_mean',data=df)
# ## Q.9
# 
#
# - Plot the Pie Chart for the two colums with some explode for the country "India" and proper shadow
# +
countries=['United States','Great Britain','India','Russia','Germany']
medal=[46,27,26,19,17]
ex=[0,0,0.2,0,0]
color=['magenta','cyan','gold','yellowgreen','lightcoral']
plt.pie(medal,colors=color,autopct='%1.2f%%',pctdistance=0.6,shadow=True,explode=ex,textprops={'fontsize': 12},wedgeprops={"edgecolor":"0",'linewidth': 1,'linestyle': 'solid', 'antialiased': True})
plt.axis('equal')
plt.legend(countries,title="Countries",loc='upper right')
plt.title('Percentage of medal with counteries',color='c',size=20)
plt.show()
# -
# ## Q.10 For Tips Dataset
# - Load the dataset using seaborn
# - Draw the Strip Plot between 'total bill' and 'day'
# - Rotate xticks with an angle of 45 Degree
#
tip=ss.load_dataset('tips')
ss.stripplot(x='day',y='total_bill',data=tip)
plt.xticks(rotation=45)
plt.show()
| 5,298 |
/jupyter_notebooks/week_13_concurrency_transitions/timing_notebooks.ipynb | df3a49d8e8358ca309820ddc63ab7e07ba185bd7 | [] | no_license | umbcdata601/spring2020 | https://github.com/umbcdata601/spring2020 | 11 | 6 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,550 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Второе задание по вычислительной математике. Решение ОДУ и их систем.
#Подключение библиотек
import numpy as np
from scipy.integrate import odeint # 用from语法的,直接调用,不需要前面的包
from mpl_toolkits.mplot3d import Axes3D
import pylab as pl # pylab 和 pyplot 有相近的功能
from scipy import integrate
import matplotlib.pyplot as plt
# # №1.Линейные системы уравнений. Устойчивость численных методов.
# Решить численно задачу о колебаниях в системе, где и возвращающая сила, и коэффициент вязкого трения убывают со временем (уравнение Эйлера):
# x''+ 3x'/t + x/t^2 = 0
# t:[1,T], x(1)=1, x'(1)=1
# А. Найти точное решение системы Указание. Будем искать точное решение в виде x = t, где , вообще говоря, комплексное
# +
#y"+a*y'+b*y=0
#a=3/x, b=1/x^2
from scipy.integrate import odeint
from pylab import *
def deriv(y,x): # 返回值是y和y的导数组成的数组
a = 3
b = 1
return array([ y[1], a*y[0]/x+b*y[1]/(x*x) ])
time = linspace(1.0,10.0,900)
yinit = array([1,100]) # 初值
y = odeint(deriv,yinit,time)
figure()
plot(time,y[:,0],label='y') #y[:,0]即返回值的第一列,是y的值。label是为了显示legend用的。
plot(time,y[:,1],label="y'") #y[:,1]即返回值的第二列,是y’的值
xlabel('t')
ylabel('y')
legend()
show()
# -
# Б. Используя численные методы Эйлера (явный, неявный, с центральной точкой), методы трапеций, явные методы Адамса порядка 3, 4 и Дормана-Принса, получить численные решения с шагом 0,1 для Т = 1,10,100,1000. Объяснить полученные результаты. Уменьшить вдвое. Объяснить полученные результаты.
import numpy as np
from scipy.integrate import odeint
#二阶微分方程
def diff_equation(y_list,x):
y,z=y_list
return np.array([z,-(3*z/x+y/(x*x))])
x=np.linspace(1,10,num=90)
y0=[1,1]#y(1)=1,y'(1)=1
result=odeint(diff_equation,y0,x)
plt.plot(x,result[:,0],label='y')#y的图像
plt.plot(x,result[:,1],label='z')#z的图像,也就是y'的图像
plt.legend()
plt.grid()
plt.show()
# +
def euler(f,g,x0,y0,T,h):
x=[x0]
y=[y0]
for i in range(1, h+1):
x.append(x[-1]+T/h*f((i)*T/h,x[-1],y[-1] ))
y.append(y[-1]+T/h*g((i)*T/h,x[-1],y[-1]))
return x,y
v, w = euler(f, g, v0, w0, T, h)
t = np.linspace(0, T, h+1)
plt.xlabel('t')
plt.ylabel('u')
plt.plot(t, v, "red", label="v")
plt.plot(t, true_v(t), "blue", label="Точное решение v")
plt.legend()
plt.show()
plt.xlabel('t')
plt.ylabel('w')
plt.plot(t, w, "red", label="w")
plt.plot(t, true_w(t), "blue", label="Точное решение w")
plt.legend()
plt.show()
# -
# Г. Исследовать методы Адамса порядка 3 и трапеций для данной задачи на устойчивость. Получить априорно оценку для шагов, при которых метод устойчив.
# Д. Проверить полученные оценки границы устойчивости для каждого метода Рунге-Кутты. Опишите процесс развития неустойчивости.
# Е. Устойчив ли метод Эйлера с центральной точкой? Объяснить полученные с его помощью результаты.
# Метод Рунге-Кутты 4 порядка.
# +
def rk4(f, g, x0, y0, T, h):
x = [x0]
y = [y0]
for i in range(1, h+1):
k1 = f((i)*T/h, x[-1], y[-1])
q1 = g((i)*T/h, x[-1], y[-1])
k2 = f((i)*T/h + T/h/2, x[-1] + T/h/2*k1, y[-1] + T/h/2*q1)
q2 = g((i)*T/h + T/h/2, x[-1] + T/h/2*k1, y[-1] + T/h/2*q1)
k3 = f((i)*T/h + T/h/2, x[-1] + T/h/2*k2, y[-1] + T/h/2*q2)
q3 = g((i)*T/h + T/h/2, x[-1] + T/h/2*k2, y[-1] + T/h/2*q2)
k4 = f((i)*T/h + T/h, x[-1] + T/h*k3, y[-1] + T/h*q3)
q4 = g((i)*T/h + T/h, x[-1] + T/h*k3, y[-1] + T/h*q3)
x.append(x[-1] + T/h/6*(k1 + 2*k2 + 2*k3 + k4))
y.append(y[-1] + T/h/6*(q1 + 2*q2 + 2*q3 + q4))
return x, y
T = 10
h = 1000
v, w = rk4(f, g, v0, w0, T, h)
t = np.linspace(T/h, T, h+1) # , ,
plt.xlabel('t')
plt.ylabel('u')
plt.plot(t, v, "red", label="v")
plt.plot(t, true_v(t), "blue", label="Точное решение v")
plt.legend()
plt.show()
plt.xlabel('t')
plt.ylabel('w')
plt.plot(t, w, "red", label="w")
plt.plot(t, true_w(t), "blue", label="Точное решение w")
plt.legend()
plt.show()
# -
# Метод Рунге-Кутты 3 порядка.
# +
def rk3(f, g, x0, y0, T, h):
x = [x0]
y = [y0]
for i in range(1, h+1):
k1 = f((i)*T/h, x[-1], y[-1])
q1 = g((i)*T/h, x[-1], y[-1])
k2 = f((i)*T/h + T/h/2, x[-1] + T/h/2*k1, y[-1] + T/h/2*q1)
q2 = g((i)*T/h + T/h/2, x[-1] + T/h/2*k1, y[-1] + T/h/2*q1)
k3 = f((i)*T/h + T/h, x[-1] + T/h/2*k2, y[-1] + T/h/2*q2)
q3 = g((i)*T/h + T/h, x[-1] + T/h/2*k2, y[-1] + T/h/2*q2)
x.append(x[-1] + T/h/4*(k1 + 2*k2 + k3))
y.append(y[-1] + T/h/4*(q1 + 2*q2 + q3))
return x, y
T = 10
h = 1000
v, w = rk3(f, g, v0, w0, T, h)
t = np.linspace(T/h, T, h+1)
plt.xlabel('t')
plt.ylabel('u')
plt.plot(t, v, "red", label="v")
plt.plot(t, true_v(t), "blue", label="Точное решение v")
plt.legend()
plt.show()
plt.xlabel('t')
plt.ylabel('w')
plt.plot(t, w, "red", label="w")
plt.plot(t, true_w(t), "blue", label="Точное решение w")
plt.legend()
plt.show()
# -
# Метод Рунге-Кутты 2 порядка.
# +
def rk2(f,g,u0,v0,T,h):
x=[u0]
y=[v0]
for i in range(1, h+1):
k1 = f((i)*T/h, x[-1], y[-1])
q1 = g((i)*T/h, x[-1], y[-1])
k2 = f((i)*T/h + T/h/2, x[-1] + T/h/2*k1, y[-1] + T/h/2*q1)
q2 = g((i)*T/h + T/h/2, x[-1] + T/h/2*k1, y[-1] + T/h/2*q1)
x.append(x[-1] + T/h/2*(k1 + k2))
y.append(y[-1] + T/h/2*(q1 + q2))
return x,y
T = 10
h = 1000
v, w = rk2(f, g, v0, w0, T, h)
t = np.linspace(T/h, T, h+1) # , ,
plt.xlabel('t')
plt.ylabel('u')
plt.plot(t, v, "red", label="v")
plt.plot(t, true_v(t), "blue", label="Точное решение v")
plt.legend()
plt.show()
plt.xlabel('t')
plt.ylabel('w')
plt.plot(t, w, "red", label="w")
plt.plot(t, true_w(t), "blue", label="Точное решение w")
plt.legend()
plt.show()
# -
# Метод Эйлера.
# +
def euler(f,g,x0,y0,T,h):
x=[x0]
y=[y0]
for i in range(1, h+1):
x.append(x[-1]+T/h*f((i)*T/h,x[-1],y[-1] ))
y.append(y[-1]+T/h*g((i)*T/h,x[-1],y[-1]))
return x,y
v, w = euler(f, g, v0, w0, T, h)
t = np.linspace(0, T, h+1)
plt.xlabel('t')
plt.ylabel('u')
plt.plot(t, v, "red", label="v")
plt.plot(t, true_v(t), "blue", label="Точное решение v")
plt.legend()
plt.show()
plt.xlabel('t')
plt.ylabel('w')
plt.plot(t, w, "red", label="w")
plt.plot(t, true_w(t), "blue", label="Точное решение w")
plt.legend()
plt.show()
# -
# Неявный метод Эйлера.
# +
def implicit_euler(h):
v=[v0]
w=[w0]
for i in range(h):
v = np.append(v,(i*T/h*v[-1]-4*w[-1]*T/h)/(i*T/h+(1-i*T/h)*T/h + 4*(T/h)**2))
w = np.append(w,w[-1]+T/h*v[i+1])
return v,w
T = 10
h = 1000
t = np.linspace(0, T, h+1)
v, w = implicit_euler(h)
plt.xlabel('t')
plt.ylabel('u')
plt.plot(t, v, "red", label="v")
plt.plot(t, true_v(t), "blue", label="Точное решение v")
plt.legend()
plt.show()
plt.xlabel('t')
plt.ylabel('w')
plt.plot(t, w, "red", label="w")
plt.plot(t, true_w(t), "blue", label="Точное решение w")
plt.legend()
plt.show()
# -
# ### Найдем апостериорные порядки схождения для данных методов
# #### Апостериорный порядок сходимости метода Рунге-Кутты 4 порядка.
# +
step = []
v_error = []
w_error = []
for segm in reversed(range(100, 2000)):
v, w = rk4(f, g, v0, w0, T, segm)
t = np.linspace(0, T, segm+1)
step.append(m.log(segm))
v_error.append(m.log(np.linalg.norm(v - true_v(t), np.inf)))
w_error.append(m.log(np.linalg.norm(w - true_w(t), np.inf)))
#print(np.linalg.norm(v - true_v(t), np.inf))
plt.ylabel('Log(||R||)')
plt.xlabel('Log(Кол-во шагов)')
plt.plot(step, v_error, label="Ошибка v")
plt.plot(step, w_error, label="Ошибка w")
plt.legend()
plt.show()
# -
# #### Апостериорный порядок сходимости метода Рунге-Кутты 3 порядка.
# +
step = []
v_error = []
w_error = []
for segm in reversed(range(1000, 2000)):
v, w = rk3(f, g, v0, w0, T, segm)
t = np.linspace(0, T, segm+1)
step.append(m.log(segm))
v_error.append(m.log(np.linalg.norm(v - true_v(t), np.inf)))
w_error.append(m.log(np.linalg.norm(w - true_w(t), 3)))
#print(np.linalg.norm(v - true_v(t), np.inf))
plt.ylabel('Log(||R||)')
plt.xlabel('Log(Кол-во шагов)')
plt.plot(step, v_error, label="Ошибка v")
plt.plot(step, w_error, label="Ошибка w")
plt.legend()
plt.show()
# -
# #### Апостериорный порядок сходимости метода Рунге-Кутты 2 порядка.
step = []
v_error = []
w_error = []
for segm in reversed(range(10000, 10100)):
v, w = rk2(f, g, v0, w0, T, segm)
t = np.linspace(0, T, segm+1)
step.append(m.log(segm))
v_error.append(m.log(np.linalg.norm(v - true_v(t), np.inf)))
w_error.append(m.log(np.linalg.norm(w - true_w(t), np.inf)))
#print(np.linalg.norm(v - true_v(t), inf))
plt.ylabel('Log(||R||)')
plt.xlabel('Log(Кол-во шагов)')
plt.plot(step, v_error, label="Ошибка v")
plt.plot(step, w_error, label="Ошибка w")
plt.legend()
plt.show()
# # Апостериорный порядок сходимости метода Эйлера.
# +
step = []
v_error = []
w_error = []
for segm in reversed(range(1000, 2000)):
v, w = euler(f, g, v0, w0,T, segm)
t = np.linspace(0, T, segm+1)
step.append(m.log(segm))
v_error.append(m.log(np.linalg.norm(v - true_v(t), 2)))
w_error.append(m.log(np.linalg.norm(w - true_w(t), 2)))
#print(np.linalg.norm(v - true_v(t), np.inf))
plt.ylabel('Log(||R||)')
plt.xlabel('Log(Кол-во шагов)')
plt.plot(step, v_error, label="Ошибка v")
plt.plot(step, w_error, label="Ошибка w")
plt.legend()
plt.show()
# -
# #### Апостериорный порядок сходимости неявного метода Эйлера.
step = []
v_error = []
w_error = []
T=10
for segm in reversed(range(1000,2000)):
v, w = implicit_euler(segm)
t = np.linspace(0, T, segm+1)
step.append(m.log(segm))
v_error.append(m.log(np.linalg.norm(v - true_v(t), 2)))
w_error.append(m.log(np.linalg.norm(w - true_w(t), 2)))
plt.ylabel('Log(||R||)')
plt.xlabel('Log(Кол-во шагов)')
plt.plot(step, v_error, label="Ошибка v")
plt.plot(step, w_error, label="Ошибка w")
plt.legend()
plt.show()
# # №2.Нелинейная система уравнений
# Система Лоренца Конвективные течения в слое жидкости при определенных предположениях можно описывать следующей системой ОДУ (модель Лоренца):
#
#
#
# с начальными условиями
#
# (Здесь х — одна из компонент скорости, y, z — соответствуют членам разложения температуры в ряд Фурье, — число Прандтля, r — число Рэлея, b — положительная константа).
#
# 1:Методами разных порядков аппроксимации численно решить систему Лоренца:
#
#
#
#
# при Считаем, что Объяснить полученные результаты.
# -*- coding: utf8 -*-
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
s,r,b=10,28,8/3
#other value of b:1,2,10,20
def f(y, t):
y1, y2, y3 = y
return [s*(y2-y1),
-y2+(r-y3)*y1,
-b*y3+y1*y2]
t = np.linspace(0,50,5001)
y0 = [1,1,1]
[y1,y2,y3]=odeint(f, y0, t, full_output=False).T
plt.plot(y1,y3, color='black', linestyle=' ', marker='.',
markersize=2)
plt.xlabel('x')
plt.ylabel('z')
plt.grid(True)
plt.title("Проекция траектории Лоренца на плоскость xz")
plt.show()
# +
def lorenz(w, t, p, r, b):
# w 是矢量,包含(x, y, z), 三个参数 p,r,b
# 计算微分量?
x, y, z = w.tolist()
# 返回量是lorenz的计算公式
return p*(y-x), x*(r-z)-y, x*y - b*z
t = np.arange(0, 30, 0.02) # 时间点
# 调用 ode 对 lorenz进行求解
track1 = odeint(lorenz, (0.0, 1.00, 0.0), t, args=(10.0, 28.0, 3.0)) # odeint,函数名后面的位置,是传入自定义函数的参数
track2 = odeint(lorenz, (0.0, 1.01, 0.0), t, args=(10.0, 28.0, 3.0))
print(track1)
print(track1[:, 0]) # 取出第0列的意思,因为数组的每一行分别是 x,y,z; 取第0列就是把所有x取出来
# -
# вывод1:
# Решение системы ДУ лучше рассматривать в проекции на одну из трёх плоскостей.
# Рассматривая изображение на графике во времени, можно предположить, что точка P(x(t), y{t), z(t)) совершает случайное число колебаний то справа, то с слева.
# application:
# Для метеорологического приложения системы Лоренца, после случайного числа ясных дней, следует случайное число дождливых дней.
# 2:Проанализировать ( в зависимости от шага численного интегрирования) при каких временах решения, полученные методами Рунге-Кутты разного порядка аппроксимации, совпадают, а при каких начинают расходится. Объяснить это явление. Почему при этом практически не изменяются картины в проекциях сечения фазового пространства на координатные плоскости?
# ???тут не понимаю какие признаки нужно рассмотреть
# -*- coding: utf8 -*-
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#Создаем функцию правой части системы уравнений.
s,r,b=10,25,3
def f(y, t):
y1, y2, y3 = y
return [s*(y2-y1),
-y2+(r-y3)*y1,
-b*y3+y1*y2]
#Решаем систему ОДУ и строим ее фазовую траекторию
t = np.linspace(0,20,2001)
y0 = [1, -1, 10]
[y1,y2,y3]=odeint(f, y0, t, full_output=False).T
fig = plt.figure(facecolor='white')
ax=Axes3D(fig)
ax.plot(y1,y2,y3,linewidth=2)
plt.xlabel('y1')
plt.ylabel('y2')
plt.title("Начальные условия: y0 = [1, -1, 10]")
y0 = [1.0001, -1, 10]
[y1,y2,y3]=odeint(f, y0, t, full_output=False).T
fig = plt.figure(facecolor='white')
ax=Axes3D(fig)
ax.plot(y1,y2,y3,linewidth=2)
plt.xlabel('y1')
plt.ylabel('y2')
plt.title("Начальные условия: y0 = [1.0001, -1, 10]")
plt.show()
# вывод2:
# Из приведенных графиков следует, что изменение начального условия для с 1,0 до 1,0001 резко меняет характер изменения аттрактора Лоренца.
# ## Задача№ 3. Особые точки и особые траектории.
#
# +
#this is an example of solution ODE,SP-s
import matplotlib.pyplot as plt
# show plots in notebook
% matplotlib inline
# define system in terms of separated differential equations
def f(x,y):
return 2*x - x**2 - x*y
def g(x,y):
return - y + x*y
# initialize lists containing values
x = []
y = []
#iv1, iv2 = initial values, dt = timestep, time = range
def sys(iv1, iv2, dt, time):
# initial values:
x.append(iv1)
y.append(iv2)
#z.append(iv3)
# compute and fill lists
for i in range(time):
x.append(x[i] + (f(x[i],y[i])) * dt)
y.append(y[i] + (g(x[i],y[i])) * dt)
#z.append(z[i] + (h(x[i],y[i],z[i])) * dt)
return x, y
sys(10, 2, 0.01, 1000)
#plot
fig = plt.figure(figsize=(15,5))
fig.subplots_adjust(wspace = 0.5, hspace = 0.3)
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
ax1.plot(x, 'r-', label='predator')
ax1.plot(y, 'b-', label='prey')
#ax1.plot(z, 'g-', label='prey')
ax1.set_title("Dynamics in time")
ax1.set_xlabel("time")
ax1.grid()
ax1.legend(loc='best')
ax2.plot(x, y, color="blue")
ax2.set_xlabel("x")
ax2.set_ylabel("y")
ax2.set_title("Phase space")
ax2.grid()
# -
# the differences in the plots due to different integration-steps and methods
# +
import numpy as np
from scipy import integrate
# define system in terms of a Numpy array
def Sys(X, t=0):
# here X[0] = x and x[1] = y
return np.array([ 2*X[0] - X[0]**2 - X[0]*X[1] , - X[1] + X[0]*X[1] ])
# generate 1000 linearly spaced numbers for x-axes
t = np.linspace(0, 20, 1000)
# initial values: x0 = 10, y0 = 2
Sys0 = np.array([10, 2])
# type "help(integrate.odeint)" if you want more information about integrate.odeint inputs and outputs.
X, infodict = integrate.odeint(Sys, Sys0, t, full_output=True)
# infodict['message'] # integration successful
x,y = X.T
#plot
fig = plt.figure(figsize=(15,5))
fig.subplots_adjust(wspace = 0.5, hspace = 0.3)
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
ax1.plot(x, 'r-', label='predator')
ax1.plot(y, 'b-', label='prey')
ax1.set_title("Dynamics in time")
ax1.set_xlabel("time")
ax1.grid()
ax1.legend(loc='best')
ax2.plot(x, y, color="blue")
ax2.set_xlabel("x")
ax2.set_ylabel("y")
ax2.set_title("Phase space")
ax2.grid()
# -
# +
import sympy as sm
# define the system in this way (asuming a predator-prey-system with no negative values)
# to avoid interference x = r (for resource) and y = c (for consumer)
r, c = sm.symbols('r, c', negative=False)
R = 2*r - r**2 - r*c
C = - c + r*c
# use sympy's way of setting equations to zero
REqual = sm.Eq(R, 0)
CEqual = sm.Eq(C, 0)
# compute fixed points
equilibria = sm.solve( (REqual, CEqual), r, c )
print(equilibria)
# -
# метода Рунге-Кутты 4-го порядка
#
# хочу построить график,
# показывающий как x, так и y как функцию времени от t = 0 до t = 30.
#
# а = альфа = 1
# б = бета = 0,5
# г = гамма = 0,5
# с = сигма = 2
# начальных условия х = у = 2
#
#
# +
import matplotlib.pyplot as plt
import numpy as np
def rk4(r, t, h): #edited; no need for input f
""" Runge-Kutta 4 method """
k1 = h*f(r, t)
k2 = h*f(r+0.5*k1, t+0.5*h)
k3 = h*f(r+0.5*k2, t+0.5*h)
k4 = h*f(r+k3, t+h)
return (k1 + 2*k2 + 2*k3 + k4)/6
def f(r, t):
alpha = 1.0
beta = 0.5
gamma = 0.5
sigma = 2.0
x, y = r[0], r[1]
fxd = x*(alpha - beta*y)
fyd = -y*(gamma - sigma*x)
return np.array([fxd, fyd], float)
h=0.001 #edited
tpoints = np.arange(0, 30, h) #edited
xpoints, ypoints = [], []
r = np.array([2, 2], float)
for t in tpoints:
xpoints.append(r[0]) #edited
ypoints.append(r[1]) #edited
r += rk4(r, t, h) #edited; no need for input f
plt.plot(tpoints, xpoints)
plt.plot(tpoints, ypoints)
plt.xlabel("Time")
plt.ylabel("Population")
plt.title("Lotka-Volterra Model")
plt.savefig("Lotka_Volterra.png")
plt.show()
# -
plt.xlabel("Prey")
plt.ylabel("Predator")
plt.plot(xpoints, ypoints)
plt.show()
| 17,896 |
/scripts/generate_deepcell_dirs.ipynb | 707ec26233a3dcb7b9d9e7f0be7f09fed96750bf | [] | no_license | awedwards/bidc | https://github.com/awedwards/bidc | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,049 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/env python
# coding: utf-8
from mibidata import tiff
import re
from pathlib import Path
import shutil
from os import remove
from sys import argv
rootdir = "../data/HNDysplasia/"
def isempty(dir_path):
# Checks a directory to see if it contains any files
path = Path(dir_path)
has_next = next(path.iterdir(), None)
if has_next is None:
return True
return False
def create_dir(dir_path):
# Create an empty directory at dir_path if it doesn't yet exist
try:
Path.mkdir(dir_path)
except FileExistsError:
if not isempty(input_dir):
raise Exception('Directory ' +dir_path.name + ' is not empty.')
#Get tiff file names in this folder (say something if there aren't any)
files = [f for f in Path(rootdir).iterdir() if (f.name.endswith(".tif") or f.name.endswith(".tiff"))]
if len(files) == 0:
raise Exception('No mibi tiff files found. Please place this script in your directory folder.')
#Create required directory structure for DeepCell
input_dir = Path(rootdir,"input_data")
create_dir(input_dir)
single_tiff_dir = Path(input_dir, "single_channel_inputs")
create_dir(single_tiff_dir)
mibitiff_dir = Path(input_dir, "mibitiff_inputs")
create_dir(mibitiff_dir)
deepcell_input_dir = Path(input_dir,"deepcell_input")
create_dir(deepcell_input_dir)
deepcell_output_dir = Path(rootdir, "deepcell_output")
create_dir(deepcell_output_dir)
for f in files:
res = re.match("fov\d+",f.name)[0]
create_dir(Path(single_tiff_dir,res))
fov_dir = Path(single_tiff_dir,res,"TIFs")
create_dir(fov_dir)
mibitf = tiff.read(str(f))
tiff.write(str(Path(fov_dir)), mibitf,multichannel=False)
#Move tiff files to tiff input directory
for f in files:
shutil.move(str(Path(f)), str(Path(mibitiff_dir, f.name)))
# -
| 2,089 |
/ch5_code03_package_py09.ipynb | d4d01b0c3f4ca0c2e9f8b49e5f01c66b45eb98d3 | [] | no_license | hojoooon/py09 | https://github.com/hojoooon/py09 | 0 | 1 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,929 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/hojoooon/py09/blob/master/ch5_code03_package_py09.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8tRbFmvV5SSc" colab_type="text"
#
#
# ```
# # 코드로 형식 지정됨
# ```
#
# ## 패키지
# - game package
# ```
# ./game/__init__.py
# ./game/sound/__init__.py
# ./game/sound/echo.py
# ./game/graphic/__init__.py
# ./game/graphic/render.py
# ```
# + [markdown] id="APZV3iOx3NX3" colab_type="text"
# ## 패키지 테스트
# ```
# # echo.py
# def echo_test():
# print("echo")
# ```
#
# ```
# # render.py
# from ..sound.echo import echo_test
# def render_test():
# print ("render")
# echo_test()
# ```
#
# ```
# from game.graphic.render import render_test
# render_test()
# ```
# + id="1sdg6ocS3Dmc" colab_type="code" outputId="08d818b2-324c-47da-eab1-6cef991fbf28" colab={"base_uri": "https://localhost:8080/", "height": 34}
from game.sound.echo import echo_test
echo_test()
# + id="vHi4LfR1j-TD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8bdb07ba-9c17-4aa3-c147-1e0de3e12be7"
from game.graphic.render import render_test
render_test()
# + id="oup6ndTX5KAd" colab_type="code" outputId="75e4cf2e-04dc-4590-aeed-47dd108ff68f" colab={"base_uri": "https://localhost:8080/", "height": 34}
from game.graphic.render import render_test
render_test()
# + id="NbMtql0c5N7h" colab_type="code" colab={}
n scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
# + [markdown] deletable=true editable=true
# ## Implement Preprocessing Functions
# The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
# - Lookup Table
# - Tokenize Punctuation
#
# ### Lookup Table
# To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
# - Dictionary to go from the words to an id, we'll call `vocab_to_int`
# - Dictionary to go from the id to word, we'll call `int_to_vocab`
#
# Return these dictionaries in the following tuple `(vocab_to_int, int_to_vocab)`
# + deletable=true editable=true
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
words_count = Counter(text)
int_to_vocab = {ii:word for ii, word in enumerate(words_count)}
vocab_to_int = {word:ii for ii, word in int_to_vocab.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
# + [markdown] deletable=true editable=true
# ### Tokenize Punctuation
# We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
#
# Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
# - Period ( . )
# - Comma ( , )
# - Quotation Mark ( " )
# - Semicolon ( ; )
# - Exclamation mark ( ! )
# - Question mark ( ? )
# - Left Parentheses ( ( )
# - Right Parentheses ( ) )
# - Dash ( -- )
# - Return ( \n )
#
# This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
# + deletable=true editable=true
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
#, ':': '<COLON>'
# TODO: Implement Function
tokens = {'.': '<PERIOD>', ',': '<COMMA>', '"': '<DOUBLE_QUOTE>',
';': '<SEMICOLON>', '!': '<EXCLAMATION_MARK>', '?': '<QUESTION_MARK>',
'(': '<LEFT_PAREN>' , ')': '<RIGHT_PAREN>', '--': '<HYPHENS>',
'\n': '<NEW_LINE>'}
return tokens
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
# + [markdown] deletable=true editable=true
# ## Preprocess all the data and save it
# Running the code cell below will preprocess all the data and save it to file.
# + deletable=true editable=true
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
# + [markdown] deletable=true editable=true
# # Check Point
# This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
# + deletable=true editable=true
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
# + [markdown] deletable=true editable=true
# ## Build the Neural Network
# You'll build the components necessary to build a RNN by implementing the following functions below:
# - get_inputs
# - get_init_cell
# - get_embed
# - build_rnn
# - build_nn
# - get_batches
#
# ### Check the Version of TensorFlow and Access to GPU
# + deletable=true editable=true
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
# + [markdown] deletable=true editable=true
# ### Input
# Implement the `get_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:
# - Input text placeholder named "input" using the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) `name` parameter.
# - Targets placeholder
# - Learning Rate placeholder
#
# Return the placeholders in the following tuple `(Input, Targets, LearningRate)`
# + deletable=true editable=true
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
inputs = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return inputs, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
# + [markdown] deletable=true editable=true
# ### Build RNN Cell and Initialize
# Stack one or more [`BasicLSTMCells`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/BasicLSTMCell) in a [`MultiRNNCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell).
# - The Rnn size should be set using `rnn_size`
# - Initalize Cell State using the MultiRNNCell's [`zero_state()`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/MultiRNNCell#zero_state) function
# - Apply the name "initial_state" to the initial state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)
#
# Return the cell and initial state in the following tuple `(Cell, InitialState)`
# + deletable=true editable=true
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
number_of_layers = 1
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([cell]*number_of_layers )
X = cell.zero_state(batch_size, tf.float32)
init_state = tf.identity(X, name = 'initial_state')
return cell, init_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
# + [markdown] deletable=true editable=true
# ### Word Embedding
# Apply embedding to `input_data` using TensorFlow. Return the embedded sequence.
# + deletable=true editable=true
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
# + [markdown] deletable=true editable=true
# ### Build RNN
# You created a RNN Cell in the `get_init_cell()` function. Time to use the cell to create a RNN.
# - Build the RNN using the [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)
# - Apply the name "final_state" to the final state using [`tf.identity()`](https://www.tensorflow.org/api_docs/python/tf/identity)
#
# Return the outputs and final_state state in the following tuple `(Outputs, FinalState)`
# + deletable=true editable=true
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype= tf.float32)
final_state = tf.identity(final_state, name = 'final_state')
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
# + [markdown] deletable=true editable=true
# ### Build the Neural Network
# Apply the functions you implemented above to:
# - Apply embedding to `input_data` using your `get_embed(input_data, vocab_size, embed_dim)` function.
# - Build RNN using `cell` and your `build_rnn(cell, inputs)` function.
# - Apply a fully connected layer with a linear activation and `vocab_size` as the number of outputs.
#
# Return the logits and final state in the following tuple (Logits, FinalState)
# + deletable=true editable=true
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embed = get_embed(input_data, vocab_size, embed_dim)
outputs, final_state = build_rnn(cell, embed)
'''
softmax_w = tf.Variable(tf.truncated_normal([rnn_size, vocab_size], stddev=0.1))
softmax_b = tf.Variable(tf.zeros(vocab_size))
logits = tf.matmul(outputs, softmax_w) + softmax_b
'''
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return logits, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
# + [markdown] deletable=true editable=true
# ### Batches
# Implement `get_batches` to create batches of input and targets using `int_text`. The batches should be a Numpy array with the shape `(number of batches, 2, batch size, sequence length)`. Each batch contains two elements:
# - The first element is a single batch of **input** with the shape `[batch size, sequence length]`
# - The second element is a single batch of **targets** with the shape `[batch size, sequence length]`
#
# If you can't fill the last batch with enough data, drop the last batch.
#
# For exmple, `get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2)` would return a Numpy array of the following:
# ```
# [
# # First Batch
# [
# # Batch of Input
# [[ 1 2], [ 7 8], [13 14]]
# # Batch of targets
# [[ 2 3], [ 8 9], [14 15]]
# ]
#
# # Second Batch
# [
# # Batch of Input
# [[ 3 4], [ 9 10], [15 16]]
# # Batch of targets
# [[ 4 5], [10 11], [16 17]]
# ]
#
# # Third Batch
# [
# # Batch of Input
# [[ 5 6], [11 12], [17 18]]
# # Batch of targets
# [[ 6 7], [12 13], [18 1]]
# ]
# ]
# ```
#
# Notice that the last target value in the last batch is the first input value of the first batch. In this case, `1`. This is a common technique used when creating sequence batches, although it is rather unintuitive.
# + deletable=true editable=true
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
n_batches = len(int_text)//(batch_size*seq_length)
x_ = np.array(int_text[:n_batches*batch_size*seq_length])
y_ = np.array(int_text[1:(n_batches*batch_size*seq_length) + 1])
y_[-1] = x_[0]
x_ = x_.reshape(batch_size, -1)
y_ = y_.reshape(batch_size, -1)
x_batches = np.split(x_, n_batches, 1)
y_batches = np.split(y_, n_batches, 1)
return np.array(list(zip(x_batches, y_batches)))
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
# + [markdown] deletable=true editable=true
# ## Neural Network Training
# ### Hyperparameters
# Tune the following parameters:
#
# - Set `num_epochs` to the number of epochs.
# - Set `batch_size` to the batch size.
# - Set `rnn_size` to the size of the RNNs.
# - Set `embed_dim` to the size of the embedding.
# - Set `seq_length` to the length of sequence.
# - Set `learning_rate` to the learning rate.
# - Set `show_every_n_batches` to the number of batches the neural network should print progress.
# + deletable=true editable=true
# Number of Epochs
num_epochs = 80
# Batch Size
batch_size = 100
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 200
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 10
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
# + [markdown] deletable=true editable=true
# ### Build the Graph
# Build the graph using the neural network you implemented.
# + deletable=true editable=true
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# + [markdown] deletable=true editable=true
# ## Train
# Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the [forums](https://discussions.udacity.com/) to see if anyone is having the same problem.
# + deletable=true editable=true
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
# + [markdown] deletable=true editable=true
# ## Save Parameters
# Save `seq_length` and `save_dir` for generating a new TV script.
# + deletable=true editable=true
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
# + [markdown] deletable=true editable=true
# # Checkpoint
# + deletable=true editable=true
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
# + [markdown] deletable=true editable=true
# ## Implement Generate Functions
# ### Get Tensors
# Get tensors from `loaded_graph` using the function [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name). Get the tensors using the following names:
# - "input:0"
# - "initial_state:0"
# - "final_state:0"
# - "probs:0"
#
# Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
# + deletable=true editable=true
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
InputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
# + [markdown] deletable=true editable=true
# ### Choose Word
# Implement the `pick_word()` function to select the next word using `probabilities`.
# + deletable=true editable=true
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return np.random.choice(list(int_to_vocab.values()), 1, p=probabilities)[0]
'''
print (probabilities)
possibilities = []
probabilities = enumerate(probabilities)
for ix, prob in probabilities:
if prob >= 0.5:
possibilities.append(int_to_vocab[ix])
if(len(possibilities) == 0):
ix = probabilities.max
print("ix",ix)
possibilities.append(int_to_vocab[ix])
rand = np.random.randint(0, len(possibilities))
print( possibilities)
return (possibilities[rand])
'''
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
# + [markdown] deletable=true editable=true
# ## Generate TV Script
# This will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
# + deletable=true editable=true
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
# + [markdown] deletable=true editable=true
# # The TV Script is Nonsensical
# It's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.
# # Submitting This Project
# When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
# + deletable=true editable=true
| 23,995 |
/Assign1.ipynb | 2a56a767834d4d12bd0ef9e95d5655cfe241e1b1 | [] | no_license | a759116/ML_Python | https://github.com/a759116/ML_Python | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 14,790 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import pathlib
import zipfile
import torch
import torchvision
from torchvision.transforms import transforms
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import torch.nn as nn
import pandas as pd
import seaborn as sns
import os
# ! conda install -y gdown
# !gdown --id 1fOCHmxc1MrXc-AHnvcqYvHu0y9LupOhV
# !gdown --id 1vkTHZqjYynj0-eymcFkPND4GvJvyd8BE
# !unzip -u ./test.zip -d ./test
# !unzip -u ./train.zip -d ./train
# +
train='./train'
test='./test'
classes = os.listdir(test)
print(classes)
def totalSize(path):
sum = 0;
for path, subdirs, files in os.walk(path): ## walk directory
sum += len(files)
return sum
print("Train Number Of Images : ",totalSize(train))
print("Test Number Of Images : ",totalSize(test))
# -
# +
BATCH_SIZE = 16
IMAGE_SIZE = 224
transform = transforms.Compose([
# torchvision.transforms.ColorJitter(brightness=0.4),
# transforms.RandomRotation(20,expand=True),
transforms.Resize(IMAGE_SIZE), ## image resize
transforms.ToTensor(),
transforms.CenterCrop(IMAGE_SIZE),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
train_loader=DataLoader(
torchvision.datasets.ImageFolder(train,transform=transform),
batch_size=BATCH_SIZE, shuffle=True
)
transform2 = transforms.Compose([
transforms.Resize(IMAGE_SIZE), ## image resize
transforms.ToTensor(), ## array converted into torch tensor and then divided by 255 (1.0/255)
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
test_loader=DataLoader(
torchvision.datasets.ImageFolder(test,transform=transform2),
batch_size=BATCH_SIZE, shuffle=True
)
# +
# from torch.optim.lr_scheduler import StepLR
import torchvision.models as models
vgg19= models.vgg19(pretrained=True)
# image size must be >= 299 x 299 during training if aux_logits is set to be True.
vgg19.cuda()
num_classes =4
batch_size = BATCH_SIZE
learning_rate=0.0001
optimizer = torch.optim.Adam(vgg19.parameters(),lr=learning_rate)
# scheduler = StepLR(optimizer, step_size=5, gamma=0.1)
criterion = torch.nn.CrossEntropyLoss()
vgg19.fc = nn.Linear(4096, 4)
vgg19.cuda()
print("Model Is Ready To Run ")
# -
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
# +
PATH='./vgg19.pth'
training_losses = []
training_accuracy = []
validation_losses = []
validation_accuracy = []
totalsteps = []
epochs = 10
steps = 0
running_loss = 0
print_every = 1
#epoch iteration
for epoch in range(epochs):
accuracy = 0
for inputs, labels in train_loader:
vgg19.train()
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad() # clears old gradients from the last step (otherwise you’d just accumulate the gradients from all loss.backward() calls).
logps = vgg19.forward(inputs)
loss = criterion(logps, labels)
loss.backward() # computes the derivative of the loss w.r.t. the parameters (or anything requiring gradients) using backpropagation
optimizer.step() # the optimizer to take a step based on the gradients of the parameters.
#Calculate traning accuracy
pred = torch.argmax(logps, dim=1)
correct = pred.eq(labels)
running_loss += loss.item()
accuracy += torch.mean(correct.float())
if steps % print_every == 0:
after_train_accuracy=accuracy/print_every
validation_loss = 0
accuracy = 0
vgg19.eval()
with torch.no_grad():
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
logps = vgg19.forward(inputs)
batch_loss = criterion(logps, labels)
validation_loss += batch_loss.item()
pred = torch.argmax(logps, dim=1)
correct = pred.eq(labels)
accuracy += torch.mean(correct.float())
training_losses.append(running_loss/print_every)
training_accuracy.append(after_train_accuracy)
validation_losses.append(validation_loss/len(test_loader))
validation_accuracy.append(accuracy/len(test_loader))
totalsteps.append(steps)
print(f"Device {device} "
f"Epoch {epoch+1}/{epochs} "
f"Step {steps} "
f"Train loss: {running_loss/print_every:f} "
f"Train accuracy: {after_train_accuracy:f} "
f"Validation loss: {validation_loss/len(test_loader):f} "
f"Validation accuracy: {accuracy/len(test_loader):f}")
running_loss = 0
accuracy = 0
vgg19.train()
print('Finish Train')
torch.save(vgg19.state_dict(), PATH)
# -
#display stats in graph
plt.figure(figsize=(50, 10))
plt.plot(totalsteps, training_losses, label='Train Loss')
plt.plot(totalsteps, validation_losses, label='Validation Loss')
plt.legend()
plt.grid()
plt.show()
#display stats in graph
plt.figure(figsize=(50, 10))
plt.plot(totalsteps, training_accuracy, label='Training Accuracy')
plt.plot(totalsteps, validation_accuracy, label='Validation Accuracy')
plt.legend()
plt.grid()
plt.show()
# +
total = 0
correct=0
with torch.no_grad():
vgg19.eval()
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = vgg19(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(total)
print(correct)
print('Accuracy of the network test images: %d %%' % (100 * correct / total))
# -
y_true_tensor = torch.tensor([]).cuda()
y_pred_tensor = torch.tensor([]).cuda()
vgg19.eval()
for i,(inputs, labels) in enumerate(test_loader):
inputs, labels = inputs.to(device), labels.to(device)
outputs = vgg19(inputs)
_, preds = torch.max(outputs, 1)
y_true_tensor = torch.cat((y_true_tensor,labels))
y_pred_tensor = torch.cat((y_pred_tensor,preds))
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
y_true = y_true_tensor.tolist()
y_pred = y_pred_tensor.tolist()
matrix = confusion_matrix(y_true,y_pred)
matrix
test_set = torchvision.datasets.ImageFolder(test, transform=transform2)
target_names = list(test_set.class_to_idx.keys())
classify_report = classification_report(y_true, y_pred, target_names=target_names)
print(classify_report)
# +
import pandas as pd
import seaborn as sns
df_cm = pd.DataFrame(matrix, index=target_names, columns=target_names).astype(int)
heatmap = sns.heatmap(df_cm, annot=True, fmt="d")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right',fontsize=15)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right',fontsize=15)
plt.ylabel('True label')
plt.xlabel('Predicted label')
# +
# def test_class_probabilities(googlenet, device, test_loader, which_class):
# googlenet.eval()
# actuals = []
# probabilities = []
# with torch.no_grad():
# for data, target in test_loader:
# data, target = data.to(device), target.to(device)
# output = googlenet(data)
# prediction = output.argmax(dim=1, keepdim=True)
# actuals.extend(target.view_as(prediction) == which_class)
# probabilities.extend(np.exp(output[:, which_class]))
# return [i.item() for i in actuals], [i.item() for i in probabilities]
# which_class = 4
# actuals, class_probabilities = test_class_probabilities(model, device, test_loader, which_class)
# fpr, tpr, _ = roc_curve(actuals, class_probabilities)
# roc_auc = auc(fpr, tpr)
# plt.figure()
# lw = 2
# plt.plot(fpr, tpr, color='darkorange',
# lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
# plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('ROC for digit=%d class' % which_class)
# plt.legend(loc="lower right")
# plt.show()
| 9,130 |
/scikit_learn/4_Regression/6_get_scaled_data.ipynb | 6a57be1c2a3b54f2f0d04a036176ba69f567c9b0 | [] | no_license | ragu6963/kfq_python_machine_learning | https://github.com/ragu6963/kfq_python_machine_learning | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 6,006 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import numpy as np
# +
x = [1,2,3,4]
y = [0.1, 0.3, 2, 1]
plt.plot(x,y)
plt.show()
# +
def linear_fit_func(x, a, b):
return a*x + b
#curve fit takes function, x values and y values
#it will guess until it finds nice line
# c is a list of values for the args the function takes (a and b in this case)
# cov is covariance
c, cov = curve_fit(linear_fit_func, x, y)
# guess = [0.5, -0.5]
new_y = []
for val in x:
new_y.append(linear_fit_func(val, c[0], c[1]))
#can put *c if we dont know number of args for c
plt.plot(x,y)
plt.plot(x,new_y)
plt.show()
plt.show()
print(c)
# -
'o', 20: 'v'}
ss_colors = {10: 'b', 20: 'g', 30: 'r', 50: 'c'}
ss_markers = {10: '<', 20: 's', 30: 'o', 50: 'v'}
zipf_markers = {0.2: '<', 0.4: 's', 0.6: 'o', 0.8: 'v'}
zipf_colors = {0.2: 'b', 0.4: 'g', 0.6: 'r', 0.8: 'c'}
lines = [':', '-', ':', '-']
colors = ['b','g','r','y','p']
markers = ['<', 's', 'o', 'v']
def process_data(file, groupby, filter_col=None, values=None, scale=1):
df = pd.read_csv(file)
df['tp'] = df.success/(df.total_time/1000) / scale
df['rate'] = df.success/ (df.success+df.abort)
if filter_col != None:
df = df.loc[~df[filter_col].isin(values)]
df_avg = df.groupby(groupby, as_index=False).mean()
return df_avg
def draw(series,x_axis, y_axis, title, xlabel, ylabel, groupby, markers,colors, legend, name):
plt.figure(figsize=(20,10))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for idx, serie in enumerate(series):
idx_key = 0
for key, grp in serie.groupby(groupby):
marker = markers[idx]
color=colors[idx_key]
plt.plot(grp[x_axis], grp[y_axis] ,
label= '{}: {} {}'.format(legend[idx], name, key),
marker=marker,
linestyle=lines[idx],
markersize=10,
color=color
)
idx_key+=1
plt.legend()
plt.show()
occ = process_data(occ_res_file, ['thread_num', 'zipf'], 'thread_num', [8, 1, 32])
nvm = process_data(nvm_res_file, ['thread_num', 'zipf'], 'thread_num', [8, 1, 32])
occ_pmem = process_data(occ_pmem_file, ['thread_num', 'zipf'], 'thread_num', [8, 4, 32])
nvm_pmem = process_data(nvm_pmem_file, ['thread_num', 'zipf'], 'thread_num', [8, 4, 32])
# for tp and commit rate - VOLATILE
#draw([nvm, occ], 'zipf', 'tp', 'Througput', 'zipf', 'Txns / Second', ['thread_num'], markers, colors, ['PNVM Volatile', 'OCC Volatile'],'Threads' )
#draw([nvm, occ], 'zipf', 'rate', 'Commit Rate','zipf', 'Success/Total', ['thread_num'], markers, colors, ['PNVM Volatile', 'OCC Volatile'],'Threads' )
#draw([nvm, nvm_pmem, occ, occ_pmem], 'tp', 'Througput', 'Txns / Second', ['thread_num'], markers, colors, ['PNVM Volatile', 'PNVM Persist', 'OCC Volatile', 'OCC Persist'],'Threads' )
#draw(occ, occ_pmem, 'tp', 'Througput', 'Txns / Second', ['thread_num'], thd_markers, thd_colors, ['PNVM Volatile', 'PNVM Persist'],'Threads' )
#draw(occ, occ_pmem, 'rate', 'Commit Rate', 'success/total', ['thread_num'], thd_markers, thd_colors, ['PNVM Volatile', 'PNVM Persist'], 'Threads')
#occ_pc = process_data(occ_pc_num_file, ['pc_num','zipf'], 'pc_num', [10, 15])
#nvm_pc = process_data(nvm_pc_num_file, ['pc_num','zipf'], 'pc_num', [10, 15])
#draw(occ_pc, nvm_pc, 'tp', 'Throughput, 16 Threads, 30 ops/piece', 'Txns / Second', ['pc_num'], pc_markers, pc_colors,'Piece/Txn')
occ_ss = process_data(occ_set_size_file, ['set_size', 'zipf'], 'set_size', [20, 30])
nvm_ss = process_data(nvm_set_size_file, ['set_size', 'zipf'], 'set_size', [20, 30])
# 10 pieces
#draw(occ_ss, nvm_ss, 'tp', 'Throughput, 10 Pieces, 16 Threads', 'Txns/Second', ['set_size'], ss_markers, ss_colors,'Number of Ops/Piece')
# Scaling Results with Threads
nvm_thd_file = '../benchmark/nvm-thd.csv'
occ_thd_file = '../benchmark/occ-thd.csv'
occ_par_thd_file = '../benchmark/occ-par-thd.csv'
nvm_thd = process_data(nvm_thd_file, ['zipf', 'thread_num'], 'thread_num', [32])
occ_thd = process_data(occ_thd_file, ['zipf', 'thread_num'], 'thread_num', [32])
occ_par_thd = process_data(occ_par_thd_file, ['zipf', 'thread_num'], 'thread_num', [32])
#draw([nvm_thd],'thread_num','tp', 'Throughput', 'Thread NUm', 'Txns / Second', ['zipf'], markers, colors, ['PNVM Volatile'], 'Zipf')
#draw([occ_thd],'thread_num','tp', 'Throughput', 'Thread NUm', 'Txns / Second', ['zipf'], markers, colors, ['OCC Volatile'], 'Zipf')
#draw([occ_thd],'thread_num','rate', 'Throughput', 'Thread NUm', 'Txns / Second', ['zipf'], markers, colors, ['OCC Volatile'], 'Zipf')
#draw([occ_par_thd],'thread_num','tp', 'Throughput', 'Thread NUm', 'Txns / Second', ['zipf'], markers, colors, ['OCC-Par Volatile'], 'Zipf')
#draw([occ_par_thd],'thread_num','rate', 'Throughput', 'Thread NUm', 'Txns / Second', ['zipf'], markers, colors, ['OCC-Par Volatile'], 'Zipf')
occ = process_data(occ_thd_file, ['zipf', 'thread_num'], 'thread_num', [1,8,32])
occ_par = process_data(occ_par_thd_file, ['zipf', 'thread_num'], 'thread_num', [1,8,32])
draw([occ_par, occ], 'zipf', 'tp', 'Througput', 'zipf', 'Txns / Second', ['thread_num'], markers, colors, ['PNVM Volatile', 'OCC Volatile'],'Threads' )
draw([occ_par, occ], 'zipf', 'rate', 'Commit Rate','zipf', 'Success/Total', ['thread_num'], markers, colors, ['PNVM Volatile', 'OCC Volatile'],'Threads' )
# +
import re
from scipy import stats
def process(file, field):
lines = [line for line in open(file) if field in line]
str_values = list(map(lambda l : re.findall("\d+\.\d+", l), lines))
values = list(map(lambda x : float(x[0]), str_values))
print('-----------{} -----------\n [{}]'.format(file, field))
print(stats.describe(values))
files =['../profile/nvm.profile.4', '../profile/nvm.profile.8', '../profile/nvm.profile.16']
fields = ['data']
for file in files:
for field in fields:
process(file, field)
# +
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
thd_colors = {4: 'b', 8: 'g', 16:'r', 32:'c'}
markers = ['<', 's', 'o', 'v']
lines = [':', '-', ':', '-']
colors = ['b','g','r','y','p']
thd_markers = {4: '<', 8: 's', 16:'o', 32:'v'}
def process_data(file, groupby, filter_col=None, values=None, scale=1, piece=False):
df = pd.read_csv(file)
# 10 seconds
df['tp'] = df.success/10 / scale
if piece:
df['rate'] = df.pc_success/ (df.pc_success + df.pc_abort)
else:
df['rate'] = df.success/ (df.success+df.abort)
df['tp-core'] = df.tp / df.thread_num
if filter_col != None:
df = df.loc[~df[filter_col].isin(values)]
df_avg = df.groupby(groupby, as_index=False).mean()
return df_avg
def draw_one_serie(serie,x_axis, y_axis_set, title, xlabel, ylabel, groupby, markers,colors, legend, name):
plt.figure(figsize=(13,7))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
for idx, y_axis in enumerate(y_axis_set):
idx_key = 0
marker = markers[idx]
color=colors[idx]
plt.plot(serie[x_axis], serie[y_axis] ,
marker=marker,
linestyle=lines[idx],
markersize=10,
color=color
)
idx_key+=1
# data points label
for xy in zip(serie[x_axis], serie[y_axis]):
plt.annotate('---( %.2f K)' % (xy[1]/1000), xy=xy, textcoords='data')
plt.legend()
plt.show()
def draw_mul_serie(series,x_axis, y_axis, title, xlabel, ylabel, groupby, markers,colors, legend, name):
plt.figure(figsize=(11,6))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks(list(map(lambda x: int(x), series[0][x_axis])))
for idx, serie in enumerate(series):
idx_key = 0
marker = markers[idx]
color=colors[idx]
plt.plot(serie[x_axis], serie[y_axis] ,
label= '{}: {}'.format(legend[idx], name),
marker=marker,
linestyle=lines[idx],
markersize=10,
color=color,
)
idx_key+=1
for xy in zip(serie[x_axis], serie[y_axis]):
plt.annotate('---( %.2f K)' % (xy[1]/1000), xy=xy, textcoords='data')
plt.legend()
plt.show()
# +
#########################
##### Intel Machine #####
#########################
markers = ['<', 's', 'o', 'v']
lines = [':', '-', ':', '-']
colors = ['b','g','r','y','p']
# Low contention
occ_vol = '../benchmark/intel/low-vol-occ-output.csv'
ppnvm_vol = '../benchmark/intel/low-vol-ppnvm-output.csv'
occ_pmem = '../benchmark/intel/low-pmem-occ-output.csv'
ppnvm_pmem = '../benchmark/intel/low-pmem-ppnvm-output.csv'
occ_vol = process_data(occ_vol, ['thread_num'])
ppnvm_vol = process_data(ppnvm_vol, ['thread_num'], piece=True)
occ_pmem = process_data(occ_pmem, ['thread_num'])
ppnvm_pmem = process_data(ppnvm_pmem, ['thread_num'], piece=True)
draw_mul_serie([occ_vol, ppnvm_vol, occ_pmem, ppnvm_pmem],
'thread_num', 'tp', 'Througput at Low Contention', 'thread_num', 'Txns / Second',
['thread_num'], markers, colors, ['OCC-DRAM', 'PPNVM-DRAM', 'OCC-PMEM', 'PPNVM-PMEM'],'' )
draw_mul_serie([occ_vol, ppnvm_vol, occ_pmem, ppnvm_pmem],
'thread_num', 'rate', 'Commit Rate at Low Contention', 'thread_num', 'Rate %',
['thread_num'], markers, colors, ['OCC-DRAM', 'PPNVM-DRAM', 'OCC-PMEM', 'PPNVM-PMEM'],'' )
print(ppnvm_pmem)
# Low contention
occ_vol = '../benchmark/intel/high-vol-occ-output.csv'
ppnvm_vol = '../benchmark/intel/high-vol-ppnvm-output.csv'
occ_pmem = '../benchmark/intel/high-pmem-occ-output.csv'
ppnvm_pmem = '../benchmark/intel/high-pmem-ppnvm-output.csv'
occ_vol = process_data(occ_vol, ['thread_num'])
ppnvm_vol = process_data(ppnvm_vol, ['thread_num'], piece=True)
occ_pmem = process_data(occ_pmem, ['thread_num'])
ppnvm_pmem = process_data(ppnvm_pmem, ['thread_num'], piece=True)
draw_mul_serie([occ_vol, ppnvm_vol, occ_pmem, ppnvm_pmem],
'thread_num', 'tp', 'Througput with High Contention', 'thread_num', 'Txns / Second',
['thread_num'], markers, colors, ['OCC-DRAM', 'PPNVM-DRAM', 'OCC-PMEM', 'PPNVM-PMEM'],'' )
# +
#tpcc_occ_file = '../benchmark/tpcc-1.csv'
# data = process_data(tpcc_occ_file, ['thread_num'])
occ_tpcc_low = '../benchmark/tpcc-occ-1.csv'
pnvm_tpcc_low = '../benchmark/tpcc-pnvm-1.csv'
occ_tpcc_low = process_data(occ_tpcc_low, ['thread_num'])
pnvm_tpcc_low = process_data(pnvm_tpcc_low, ['thread_num'])
#draw_one_serie(data, 'thread_num', ['tp', 'new-order-tp'], 'Througput', 'Threads', 'Txns / Second', ['thread_num'], markers, colors, ['OCC VOlatile'],'Threads' )
#draw_one_serie(data, 'thread_num', ['tp-core', 'new-order-core'], 'Througput', 'Threads', 'Txns / Second', ['thread_num'], markers, colors, ['OCC VOlatile'],'Threads' )
# Low contention
draw_mul_serie([occ_tpcc_low, pnvm_tpcc_low], 'thread_num', 'tp', 'Througput', 'thread_num', 'Txns / Second', ['thread_num'], markers, colors, ['OCC', 'Pieced PP'],'' )
# High contention
occ_tpcc_high = '../benchmark/tpcc-occ-4.csv'
pnvm_tpcc_high = '../benchmark/tpcc-pnvm-4.csv'
occ_tpcc_high = process_data(occ_tpcc_high, ['thread_num'])
pnvm_tpcc_high = process_data(pnvm_tpcc_high, ['thread_num'])
print(occ_tpcc_high)
print(pnvm_tpcc_high)
draw_mul_serie([occ_tpcc_high, pnvm_tpcc_high], 'thread_num', 'tp', 'Througput', 'thread_num', 'Txns / Second', ['thread_num'], markers, colors, ['OCC', 'Pieced PP'],'' )
# -
| 12,120 |
/try_mahjong.ipynb | 5e0e422a8a6864831e25aa5e9c2193863e69df8e | [] | no_license | moemily123819/mahjong_game | https://github.com/moemily123819/mahjong_game | 0 | 0 | null | 2022-12-08T05:19:16 | 2019-07-26T19:52:57 | Python | Jupyter Notebook | false | false | .py | 5,824 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import warnings
warnings.filterwarnings("ignore") # specify to ignore warning messages
import pandas as pd
import matplotlib.pyplot as plt
import itertools as it
import datetime
import math
import statsmodels.api as sm
# +
xl = pd.ExcelFile('input_data.xlsx')
data = {sheet_name: xl.parse(sheet_name) for sheet_name in xl.sheet_names}
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
res = []
for name in xl.sheet_names:
mylist = map(list, zip(*data[name].values))
aux = list(it.chain(*mylist))
del aux[:24]
res.append(aux)
res = list(it.chain(*res))
days = [days for i in range(0, 4)]
days_repeated = days * 4
# +
# Tune Seasonal ARIMA model
# Define the p, d and q parameters to take any value between 0 and 2
p = d = q = range(0,2)
# Generate all different combinations of p, q and q triplets
pdq = list(it.product(p, d, q))
print(pdq)
# Generate all different combinations of seasonal p, q and q triplets
# Seasonality is one week (24*7 = 168 hours)
seasonal_pdq = [(x[0], x[1], x[2], 168) for x in list(it.product(p, d, q))]
print('Examples of parameter combinations for Seasonal ARIMA...')
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))
# +
result_list = []
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(res,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print('ARIMA{}x{}168 - AIC:{}'.format(param, param_seasonal, round(results.aic,2)))
result_list.extend([param, param_seasonal, round(results.aic,2)])
except:
print('error')
continue
print('Done!')
# +
print_result = zip(*[iter(result_list)]*3)
print_result.sort(key=lambda x: x[2])
print('Result summary:\n')
print('((p, d, q), (P, D, Q, S), AIC)')
print('------------------------------')
for item in print_result:
print item
| 2,591 |
/sumOfTwo.ipynb | 81b4fc54f6a9cfa5cf585cf116a7968928f0c249 | [] | no_license | nasolim/coding_questions | https://github.com/nasolim/coding_questions | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 2,464 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# You have two interger arrays, a and b, and an integer target value v. determine whether there is a pair of numbers, when one number is taken from a and the other from b, that can be added together to get a sum of v. return true if such a pair exists, otherwise return false
#
# array can be empty
# array can have "unlimited" size
# values can be positive or negative
#
def sumOfTwo(a,b,v):
complement = {v-number for number in a}
for number in b:
if number in complement:
return True
return False
import unittest
class TestsumOfTwoMethod(unittest.TestCase):
def test_with_viable_pair(self):
a = [0,0,-5,30212]
b = [-10,40,-3,9]
v = -8
self.assertTrue(sumOfTwo(a,b,v))
def test_without_viable_pair(self):
a = [0,0,-5,30212]
b = [4,3,2]
v = -8
self.assertFalse(sumOfTwo(a,b,v))
unittest.main(argv=['first-arg-is-ignored'], exit=False)
| 1,216 |
/gnn_edge_pred-test.ipynb | ab3631588ffa6a1c7c4cbae0984690003aeabd2b | [] | no_license | ptigwe/champs | https://github.com/ptigwe/champs | 1 | 1 | null | null | null | null | Jupyter Notebook | false | false | .py | 15,319 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# __Defination__
#
# Let $\Omega$ be sample space of same random experiment.
#
# $X: \Omega \to \mathbb{R}$ is random variables.
#
# $\omega$ denote result of random experiment. $\omega \in \Omega$
#
# $X(\omega)$ denote value of random variable
# X number of heads in two tossings
# +
from itertools import product
"""
computes the cartesian product of input iterables.
It is equivalent to nested for-loops.
For example, product(A, B) returns the same as ((x,y) for x in A for y in B).
Cartesian product: the Cartesian product of two sets A and B, denoted A × B,
is the set of all ordered pairs (a, b)
where a is in A and b is in B. In terms of set-builder notation, that is
A\times B=\{\,(a,b)\mid a\in A\ {\mbox{ and }}\ b\in B\,\}.
"""
# omega = set(product(['H', 'T'], repeat=2))
omega = list(product(['H', 'T'], repeat=10))
# -
l = len(omega)
print(l)
omega
# $P(X=0) = P({\omega \in \Omega | X(\omega) = 0}) = P({TT}) = \frac{1}{4}$
#
# $P(X=1) = P({HT, TH}) = \frac{2}{4}$
#
# $P(X=2) = \frac{1}{4}$
.csv']
def read_csvs(path):
read_csv = lambda x: pd.read_csv(osp.join(path, x))
return tuple(map(read_csv, req_files))
def get_data_list(path):
train_df, structures, mulliken, potential = read_csvs(path)
train_df['type'] = train_df['type'].astype('category')
train_df['type_c'] = train_df['type'].cat.codes
structures['atom'] = structures['atom'].astype('category')
structures['atom_c'] = structures['atom'].cat.codes
return list(group_structures(train_df, structures, mulliken, potential).values())
# -
def to_data(first):
src, dst = first[0].atom_index_0, first[0].atom_index_1
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
edge_idx = np.stack((src, dst))
scalar_coupling = np.zeros((edge_idx.shape[1], 1))#concatenate((first[0].scalar_coupling_constant, first[0].scalar_coupling_constant))
edge_types = np.concatenate((first[0].type_c.values, first[0].type_c.values))
xyz, atom = first[1].iloc[:,3:-1].values, first[1].iloc[:,-1].values
mul_charge = first[2].iloc[:,-1].values
print(first[3])
data = Data(pos=torch.FloatTensor(xyz),
edge_index=torch.LongTensor(edge_idx),
edge_types=torch.LongTensor(edge_types),
atom=torch.LongTensor(atom),
charge=torch.FloatTensor(mul_charge),
energy=torch.FloatTensor(first[3].potential_energy.values),
batch_edge_index=torch.zeros(edge_types.shape, dtype=torch.long),
scalar_coupling=torch.FloatTensor(scalar_coupling))
return data
class Complete(object):
def __init__(self):
pass
def __call__(self, data):
complete_edges = np.array(list(itertools.permutations(range(data.num_nodes),2))).T
data.edge_index = torch.LongTensor(complete_edges)
return data
class Squeeze_Edge_Types(object):
def __init__(self):
pass
def __call__(self, data):
data.edge_types = data.edge_types.squeeze()
return data
# +
import torch
from torch_geometric.data import InMemoryDataset, Data
import torch_geometric.transforms as T
class MyOwnDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(MyOwnDataset, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return ['structures.csv', 'mulliken_charges.csv', 'test.csv', 'magnetic_shielding_tensors.csv']
@property
def processed_file_names(self):
return ['data_test.pt']
def _download(self):
pass
def process(self):
data_list = get_data_list(self.root)
data_list = [to_data(data) for data in data_list]
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
# +
# #!rm data/processed/data.pt
# -
dataset = MyOwnDataset('data', transform=T.Compose([T.Distance()]))#, transform=Complete())
dataset[0]['scalar_coupling']
size = len(dataset)
train = int(.5 * size)
valid = int(.3 * size)
# Normalize targets to mean=0 and std=1
mean = dataset.data.energy.mean(dim=0, keepdim=True)
std = dataset.data.energy.std(dim=0, keepdim=True)
dataset.data.energy = (dataset.data.energy - mean) / std
# Normalize targets to mean=0 and std=1
mean = dataset.data.scalar_coupling.mean(dim=0, keepdim=True)
std = dataset.data.scalar_coupling.std(dim=0, keepdim=True)
dataset.data.scalar_coupling = (dataset.data.scalar_coupling - mean) / std
mean, std = (torch.tensor([15.9159]), torch.tensor([34.9347]))
train_mask = torch.FloatTensor(len(dataset)).uniform_() > 0.3
train_mask.sum() / float(train_mask.size(0))
train_dataset = dataset[train_mask]
valid_dataset = dataset[~train_mask]
# ## Simple Model
# +
import torch
import torch.nn.functional as F
from torch_geometric.nn import NNConv, Set2Set, GCNConv
from torch_geometric.data import DataLoader
from torch.nn import Sequential, Linear, ReLU, GRU, Embedding, LeakyReLU
# -
train_loader = DataLoader(dataset, batch_size=1, shuffle=True)
dim = 64
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.lin0 = torch.nn.Linear(10, dim)
self.pos_emb = Linear(3, 5)
self.atom_emb = Embedding(5, 5)
self.edge_emb = Embedding(8, 5)
self.dist_emb = Linear(1, 5)
nn = Sequential(Linear(10, 128), LeakyReLU(0.1), Linear(128, dim * dim))
self.conv = NNConv(dim, dim, nn, aggr='mean')
self.gru = GRU(dim, dim)
self.set2set = Set2Set(dim, processing_steps=6)
self.lin1 = torch.nn.Linear(2 * dim, dim)
self.lin2 = torch.nn.Linear(dim, 1)
def forward(self, data):
pos = self.pos_emb(data.pos)
atom_emb = self.atom_emb(data.atom)
x = torch.cat((pos, atom_emb), dim=1)
out = F.relu(self.lin0(x))
h = out.unsqueeze(0)
edge_emb = self.edge_emb(data.edge_types)
edge_dist = self.dist_emb(data.edge_attr)
edge_attr = torch.cat((edge_emb, edge_dist), dim=1)
for i in range(6):
m = F.relu(self.conv(out, data.edge_index, edge_attr))
out, h = self.gru(m.unsqueeze(0), h)
out = out.squeeze(0)
x = torch.index_select(out, 0, data.edge_index.T.contiguous().view(-1))
x = x.view((data.edge_index.shape[1], -1))
#out = self.set2set(out, data.batch)
out = F.relu(self.lin1(x))
out = self.lin2(out)
return out.view(-1)
net = Net()
b = next(iter(train_loader))
b
b.batch_edge_index.shape
net(b).shape
from fastprogress.fastprogress import master_bar, progress_bar
from collections import deque
n = torch.load('model_0.0360_0.03757.pt', map_location=torch.device('cpu'))
res = []
import tqdm
with torch.no_grad():
for b in tqdm.tqdm(train_loader):
r = n(b).view(2, -1).mean(dim=0)
res.append(r)
#print(i)
res_ = torch.cat(res)
mean, std
r = res_.cpu().detach() * std + mean
df = pd.read_csv('data/test.csv')
df.shape, r.shape
df['scalar_coupling_constant'] = r
df.head()
df[['id', 'scalar_coupling_constant']].to_csv('sample_submission.csv', index=False)
df.shape
df.molecule_name.nunique()
# !wc -l head data/sample_submission.csv
# !mv /home/tobenna/Downloads/test.csv.zip .
# !unzip test.csv.zip
# !wc -l data/test.csv
sdf = pd.read_csv('data/sample_submission.csv')
sdf.merge(df).shape
| 8,091 |
/DetectSky.ipynb | 1d6c188a7e1e887b4eeb800732048d20d5ba7f82 | [] | no_license | skipperuzumaki/Machiene_Learning | https://github.com/skipperuzumaki/Machiene_Learning | 4 | 1 | null | null | null | null | Jupyter Notebook | false | false | .py | 12,174 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import mne
import numpy as np
import matplotlib.pyplot as plt
import scipy
import os
from collections import OrderedDict
import seaborn as sns
import pandas as pd
import gzip
from scipy.signal import filtfilt, butter
import pickle
data_path = 'ssvep_dataset/'
subj_list = os.listdir(data_path)
records = {k: [] for k in subj_list}
for subj in subj_list:
record_all = os.listdir(data_path+subj+'/')
n = len(record_all)//4#number of records of a given subject
for i in range(n):
records[subj].append(record_all[i*4][:28])
#choose a subject and a record of that subject
chosen_subject_index = 0 #suject12
chosen_subject = subj_list[chosen_subject_index]
print("The chosen subject is",chosen_subject,". He has",len(records[chosen_subject]),"records")
session_index = 0 #the first session of the subject12
print("The chosen session was done on",records[chosen_subject][session_index][8:27])
tmin=2
tmax=5
event_id = dict(resting=1, stim13=2, stim17=3, stim21=4)#define the events
freq_events={'resting':0,'stim13':13,'stim17':17,'stim21':21}
sfreq = 256
channels = np.array(['Oz','O1','O2','PO3','POz','PO7','PO8','PO4'])
names=['resting','stim13','stim21','stim17']
# +
#loading data
fname = chosen_subject+'/'+records[chosen_subject][session_index]
with gzip.open(data_path + fname + '.pz', 'rb') as f:
o = pickle.load(f, encoding='latin1')
raw_signal = o['raw_signal'].T
event_pos = o['event_pos'].reshape((o['event_pos'].shape[0]))
event_type = o['event_type'].reshape((o['event_type'].shape[0]))
# -
raw_signal.shape
t= 0
while event_type[t] != 32779: #begining of first trial
t+=1
start = event_pos[t]
print("The first tial begins at ", start/sfreq, "s ")
# ## Extracting Covariance Matrices
#
# For a given subject and a given session:
# - Step 0 : Let $X \in \mathbb{R}^{C \times M}$ the multichannel raw signal where $C=8$ is the number of electrods/channels and $M =192224$ is the total duration of the recording
# - Step 1 : For each stimulus frequency $f \in \mathcal{F}:=\{13,17,21\}$, compute $X_f \in \mathbb{R}^{C \times M}$ where $X_f$=filtered($X,f$) the result of filtering $X$ with a pass_band filter concentrated on $f$
# - Step 2 : concatenat all to get the extended signal $X_{ext} = [X_f \mbox{ for } f \in \mathcal{F}]^T$. So, $X_{ext} \in \mathbb{R}^{FC \times M}$ where $F=3$ and then $FC=24$
# - Step 3 : keep only interesting time intervels corresponding to epochs to get $X_{ext}^{(i)}$ for $i \in [\![1,I]\!]$ where $I$ is the number of trials (Here $I=32$). So, $X_{ext}^{(i)} \in \mathbb{R}^{FC \times N}$ where $N$ is the length of a trial (Here, $N=f_s \times \Delta \mbox{epoch } = 256 \times 3 =768$). For each trial $i$, we know the associated class $k_i \in \{\mbox{resting,stim13,stim21,stim17}\}$
#The Butterworth filter : band-pass filter, flat in the passband , the passband is concentrated on
def filter_bandpass(signal, fmin, fmax, fs, order=4, filttype='forward-backward'):
nyq = 0.5 * fs
low = fmin / nyq
high = fmax / nyq
b, a = butter(order, [low, high], btype='band')
#filter tpe : forwaard-backward
filtered = filtfilt(b, a, signal, axis=-1)
return filtered
frequencies = [13., 17., 21.]
freq_band = 0.1
ext_signal = np.empty_like(raw_signal[0,:])
for f in frequencies:
ext_signal = np.vstack((ext_signal, filter_bandpass(raw_signal, f-freq_band, f+freq_band, fs=sfreq)))
ext_signal = ext_signal[1:,:]
ext_signal.shape
ext_trials = list()
for e, t in zip(event_type, event_pos):
if e == 32779: # start of a trial
start = t + tmin*sfreq
stop = t + tmax*sfreq
ext_trials.append(ext_signal[:, start:stop])
ext_trials = np.array(ext_trials)
ext_trials = ext_trials - np.tile(ext_trials.mean(axis=2).reshape(ext_trials.shape[0],
ext_trials.shape[1], 1), (1, 1, ext_trials.shape[2]))
ext_trials.shape
labels = []
for e in event_type:
if e==33024:#resting
labels.append(0)
if e==33025:#stim13
labels.append(1)
if e==33026:#stim21
labels.append(2)
if e==33027:#stim17
labels.append(3)
print(labels)
len(labels)
#visualisation of the extended signals of a given trial
trial = 4
n_seconds = 3
time = np.linspace(0, n_seconds, n_seconds * sfreq).reshape((1, n_seconds * sfreq))
fig, axs = plt.subplots(4,2,figsize=(15,15))
axs =axs.flatten()
for i in range(8):
axs[i].plot(time.T, ext_trials[trial, i, :].T, label=str(frequencies[0])+' Hz')
axs[i].plot(time.T, ext_trials[trial, 8+i, :].T, label=str(frequencies[1])+' Hz')
axs[i].plot(time.T, ext_trials[trial, 16+i, :].T, label=str(frequencies[2])+' Hz')
if i%2==0:
axs[i].set_ylabel("$\\mu$V")
axs[i].set_title(channels[i])
axs[i].legend(loc='upper left')
axs[i-1].set_xlabel('Time (s)')
axs[i].set_xlabel('Time (s)')
# **Question:** Is it possible to extract epochs then filter then then stack them to get the extended trials?
#
# **>> Answer:** NO ! Border Effects
# +
events=[] #stock the starting time of an event and its type (only resting, stim13, stim21 and stim17)
for i in range(len(event_pos)):
if event_type[i]==33024: #resting
events.append([event_pos[i+1],1])
if event_type[i]==33025: #stim13
events.append([event_pos[i+1],2])
if event_type[i]==33026: #stim21
events.append([event_pos[i+1],3])
if event_type[i]==33027: #stim17
events.append([event_pos[i+1],4])
#extract epochs
epochs=[]
for i in range(len(events)):
epochs.append(raw_signal[:, events[i][0]+sfreq*tmin:events[i][0]+sfreq*tmax])
epochs = np.asarray(epochs)
print("Epochs shape =",epochs.shape)
#filter epochs
filtered_epochs= [np.empty_like(epochs) for f in frequencies]
for k in range(len(frequencies)):
for i in range(len(epochs)):
f =frequencies[k]
X= epochs[i,:,:]
X = X * np.hanning(X.shape[-1])[None]
filtered_epochs[k][i,:,:] = filter_bandpass(X, f-freq_band, f+freq_band, fs=sfreq)
extended = np.zeros((epochs.shape[0],epochs.shape[1],len(frequencies),epochs.shape[2]))
for f in range(len(frequencies)):
for i in range(epochs.shape[0]): #32
for j in range(extended.shape[1]): #24
extended[i,j,f,:] = filtered_epochs[f][i,j,:]
extended =extended.reshape((epochs.shape[0],epochs.shape[1]*len(frequencies),epochs.shape[2]))
print("Extended trials shape", extended.shape)
# -
#visualisation of the extended signals of a given trial
trial = 4
n_seconds = 3
time = np.linspace(0, n_seconds, n_seconds * sfreq).reshape((1, n_seconds * sfreq))
fig, axs = plt.subplots(4,2,figsize=(15,15))
axs =axs.flatten()
for i in range(8):
axs[i].plot(time.T, extended[trial, i, :].T, label=str(frequencies[0])+' Hz')
axs[i].plot(time.T, extended[trial, 8+i, :].T, label=str(frequencies[1])+' Hz')
axs[i].plot(time.T, extended[trial, 16+i, :].T, label=str(frequencies[2])+' Hz')
if i%2==0:
axs[i].set_ylabel("$\\mu$V")
axs[i].set_title(channels[i])
axs[i].legend(loc='upper left')
axs[i-1].set_xlabel('Time (s)')
axs[i].set_xlabel('Time (s)')
# ## Spatial covariance estimation
#
# We recall $\overline{x}^{(i)} = \frac{1}{N} \sum_{n=1}^N x_n^{(i)} = \frac{1}{N} X^{(i)} \ U $
# where $U =[1,...,1]^T \in \mathbb{R}^N$.
#
# The SCM is given by:
# \begin{equation}
# \begin{split}
# C_i & = \frac{1}{N-1} \sum_{n=1}^N (x_n^{(i)}-\overline{x})(x_n^{(i)}-\overline{x})^T \\
# & = \frac{1}{N-1} \left(\sum_{n=1}^N x_n^{(i)}x_n^{(i)T }- N \overline{x}\ \overline{x}^T \right)\\
# & = \frac{1}{N-1} \left(X^{(i)}X^{(i)T} - \frac{1}{N} (X^{(i)} \ U) (X^{(i)} \ U)^T \right)\\
# & = \frac{1}{N-1} X^{(i)} \left(I_N- \frac{1}{N} U \ U^T \right) X^{(i)T}
# \end{split}
# \end{equation}
# +
from pyriemann.estimation import Covariances
cov_ext_trials = Covariances(estimator='scm').transform(ext_trials)
cov_ext_trials.shape
#cov_ext_trials
# -
#define scm without pyriemann and compare
cov = np.zeros((32,24,24))
N = ext_trials.shape[2] #768
print('N=',N)
U=np.ones((N,1))
for i in range(32):
cov[i,:,:]= (1/(N-1))*ext_trials[i,:,:]@(np.eye(N)-(1/N)*U@U.T)@ext_trials[i,:,:].T
print(cov.shape)
print('Relative Difference in L^2 norm =', np.linalg.norm(cov-cov_ext_trials)/np.linalg.norm(cov))
#plot the cov for a given trial
plt.figure()
trial = 0
plt.imshow(cov_ext_trials[trial, :, :],
cmap=plt.get_cmap('RdPu'),
interpolation='nearest')
plt.xticks([])
_ = plt.yticks([0, 7, 15, 23])
plt.title('First trial')
# +
covs = [np.zeros(cov.shape[1:]) for i in range(4)]
samples=[0 for i in range(4)]
for i in range(cov.shape[0]):
for k in range(4):
if labels[i]==k:
covs[k] += cov_ext_trials[i,:,:]
samples[k]+=1
covs = [covs[k]/samples[k] for k in range(4) ]
fig,axs = plt.subplots(2,2,figsize=(10,10))
axs = axs.flatten()
for k in range(4):
axs[k].imshow(covs[k], cmap=plt.get_cmap('RdPu'), interpolation='nearest')
cond =np.linalg.cond(covs[k])
axs[k].set_title(names[k]+'\n condition number ='+str(round(cond,3)))
axs[k].set_xticks([])
axs[k].set_yticks([0, 7, 15, 23])
plt.show()
# -
# ## Classifying extended SSVEP covariance matrices
# +
m= 28 #nb of training samples
random_shuffle = True
new_cov_ext_trials = np.empty_like(cov_ext_trials)
new_labels= []
indx = list(range(32))
if random_shuffle:
np.random.shuffle(indx)
for i in range(32):
new_cov_ext_trials[i,:,:] = cov_ext_trials[indx[i],:,:]
new_labels.append(labels[indx[i]])
x_train = new_cov_ext_trials[:m,:,:]
y_train = new_labels[:m]
x_test = new_cov_ext_trials[m:,:,:]
y_test = new_labels[m:]
# -
print(indx)
y_test
x_train.shape , x_test.shape
# +
from pyriemann.utils.mean import mean_riemann
classes = list(range(4))
cov_centers = np.empty((4, 24, 24))
x_trains=[[],[],[],[]]
for i in range(4):
for j in range(m):
if y_train[j]==i:
x_trains[i].append(x_train[j,:,:])
for i in range(4):
x_trains[i]=np.asarray(x_trains[i])
for i in range(4):
cov_centers[i, :, :] = mean_riemann(x_trains[i])
# -
plt.figure(figsize=(7, 7))
for i, l in enumerate(names):
plt.subplot(2, 2, i+1)
plt.imshow(cov_centers[i, :, :], cmap=plt.get_cmap('RdPu'), interpolation='nearest')
cond = np.linalg.cond(cov_centers[i,:,:])
_ = plt.title(l+'/'+str(cond))
# +
from pyriemann.utils.distance import distance_riemann
classes=list(range(4))
accuracy = list()
for sample, true_label in zip(x_train, y_train):
dist = [distance_riemann(sample, cov_centers[m]) for m in range(4)]
if classes[np.array(dist).argmin()] == true_label:
accuracy.append(1)
else: accuracy.append(0)
train_accuracy = 100.*np.array(accuracy).sum()/len(y_train)
print ('Evaluation accuracy on train set is %.2f%%' % train_accuracy)
# +
accuracy = list()
for sample, true_label in zip(x_test, y_test):
dist = [distance_riemann(sample, cov_centers[m]) for m in range(4)]
if classes[np.array(dist).argmin()] == true_label:
accuracy.append(1)
else: accuracy.append(0)
test_accuracy = 100.*np.array(accuracy).sum()/len(y_test)
print ('Evaluation accuracy on test set is %.2f%%' % test_accuracy)
# -
| 11,485 |
/22 June 2019.ipynb | d769d2a4696f1283c393a9db6a5ab5a4cf28b675 | [] | no_license | 230591/Python-June-2019 | https://github.com/230591/Python-June-2019 | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 11,231 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import pandas as pd
from pandas.io.json import json_normalize
data = pd.read_json("Chrome_Data/Chrome/BrowserHistory.json")#, orient='columns')
df = pd.DataFrame()
print(df['url'])
#c = pd.DataFrame(data).iloc[0]
#print(data)
#print(type(data))
#create dataframe with a column labeled url, column time, column title, write for loop (regex) to extract
#these features, clean url if chrome extension, place into dataframe, -> hash tables relate to series
#single table aka df.iloc[0] is a series
#dataframe operations, how to turn a list of series into a dataframe
#how to access the value given a series and a certain key within that series/dictionary
#what is the definition of a dictionary
#concat(objs[, axis, join, join_axes, ...])
# -
def timeExtract(file):
for i in range(len(file)):
df['url'] = #the url for sereies at[i]
| 1,154 |
/2_Docking_analysis/.ipynb_checkpoints/VS_EDA_docking_CDK2 (copia)-checkpoint.ipynb | ae6b30aeddc8d0af20fe6818745961643a3e94ed | [] | no_license | jRicciL/CDK2_notebooks | https://github.com/jRicciL/CDK2_notebooks | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,560,562 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VS Ensemble Dockig CDK2
# ## Análisis de datos exploratorio
#
# - 111 ligandos
# - 27 Activos
# - 402 Conformaciones de la porteína CDK2
# - Conformaciones experimentales
# - 3 progrmas de acoplamiento molecular
#
# ### Contenido:
# - Pendiente agregar las secciones
import pandas as pd
import numpy as np
# ### Carga de los datos
# Directorio
data_dir = './data'
# Carga de los dataFrames
# Autodock 4. LE (lowest energy), LC (largest Cluster)
df_ad4_results_LE = pd.read_csv(F'{data_dir}/vs_docking_crys_ensemble_AD4_LE.csv', index_col=0)
df_ad4_results_LC = pd.read_csv(F'{data_dir}/vs_docking_crys_ensemble_AD4_LC.csv', index_col=0)
# Autodock Vina
df_vina_results = pd.read_csv(F'{data_dir}/vs_docking_crys_ensemble_VINA.csv', index_col=0)
# Vinardo scoring
df_vinardo_results = pd.read_csv(F'{data_dir}/vs_docking_crys_ensemble_VINARDO.csv', index_col=0)
# ### Datos de Vina y Vinardo
# <div class="p-3 mb-2 bg-danger text-dark">Conformaiones minimizadas de las 402 estructuras.</div>
# Autodock Vina
df_vina_results_MIN = pd.read_csv(F'{data_dir}/vs_docking_CSAR_vs_402_MIN_vina.csv', index_col=0)
# Vinardo scoring
df_vinardo_results_MIN = pd.read_csv(F'{data_dir}/vs_docking_CSAR_vs_402_MIN_vinardo.csv', index_col=0)
# Todas los dataFrames poseen 111 filas (111 ligandos) y 403 columnas, correspondientes a la etiqueta de actividad/inactividad, y a las 402 conformaciones.
#
# ### Datos CDK2 de CSAR
# Las etiquetas de actividad y la información sobre los 111 ligandos se encuentran en el siguiente [enlace](https://drugdesigndata.org/about/datasets/220).
#
# ### Comenzamos el análisis de los datos
# Identificamos a los ligandos que son activos
activos = df_vina_results.ActiveInactive == "Active"
import matplotlib.pyplot as plt
import seaborn as sns
# Definimos una función para graficar boxplots
def plot_boxplot_dock_results(df, titulo=''):
'''Función para graficar boxplots de los resultados de acoplamiento molecular'''
num_ligs, num_conf = df.iloc[:, 2:-1].shape
fig, ax = plt.subplots(figsize =(20, 5))
sns.set_style("white"); sns.set_context("paper")
ax.axvspan(0, 3, color=sns.xkcd_rgb['grey'], alpha=0.3)
ax.axvspan(5, 16, color=sns.xkcd_rgb['grey'], alpha=0.3)
ax.axvspan(101, 110, color=sns.xkcd_rgb['grey'], alpha=0.3)
sns.boxplot(ax = ax, data = df.iloc[:, 2:-1].T )
plt.title(F"{titulo}: Boxplots - {num_ligs} ligandos - {num_conf} conformaciones", size = 20)
plt.xticks(rotation=90, size=11)
ax.set_ylabel(F'{titulo} Score (kcal/mol)', fontsize=18)
ax.set_xlabel('Ligandos CSAR', fontsize=18)
plt.show()
plot_boxplot_dock_results(df_ad4_results_LE, titulo="AD4 LE")
plot_boxplot_dock_results(df_vinardo_results, titulo="vinardo")
# En cada gráfica, se muestran los 111 ligandos y sus scores para cada una de las 402 conformaciones de la proteína.
# # Curvas ROC
# Librerías para calcular las ROC y el AUC
from sklearn.metrics import roc_curve, roc_auc_score
# Etiquetas de los ligandos, **1 = Activo**.
true_values = np.array(df_vina_results.ActiveInactive == "Active", dtype=int)
print(true_values)
# Calculamos el **AUC** tomando en cuenta los *rankings* de cada programa de *docking* a partir del promedio de los ranking por conformación.
sc_promedio_vrd = np.array(df_vinardo_results.mean(axis=1))
sc_promedio_vina = np.array(df_vina_results.mean(axis=1))
sc_promedio_ad4_LE = np.array(df_ad4_results_LE.mean(axis=1))
sc_promedio_ad4_LC = np.array(df_ad4_results_LC.mean(axis=1))
# **Curvas ROC:**
print('AUC del valor PROMEDIO (402 estructuras):')
print("AD4 LE:",
roc_auc_score( y_true = true_values, y_score = (- sc_promedio_ad4_LE)) )
print("AD4 LC:",
roc_auc_score( y_true = true_values, y_score = (- sc_promedio_ad4_LC)) )
print("Vina 16x:",
roc_auc_score( y_true = true_values, y_score = (- sc_promedio_vina)) )
print("Vinardo 16x:",
roc_auc_score( y_true = true_values, y_score = (- sc_promedio_vrd)) )
# **Gráficas de las ROC.**
# +
# Establecemos los parámetros para graficar
import pylab
pylab.rcParams['figure.figsize'] = (7, 7)
sns.set( context = 'talk', style = 'white', palette = "Set2")
def add_plot_roc(predicted_values, true_values, label = ''):
inverted_input = np.negative(predicted_values)
fpr, tpr, thresholds = roc_curve(true_values, inverted_input)
auc = roc_auc_score( y_true = true_values, y_score = inverted_input)
plt.plot(fpr, tpr, label= label + ' AUC = %0.2f' % auc, lw = 4)
def plot_roc(predicted_values, true_values, label = '', titulo = "ROC curve"):
sns.color_palette("Paired")
add_plot_roc(predicted_values, true_values, label)
plt.legend()
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel("FPR (1 - specificity)")
plt.ylabel("TPR (sensitivity)")
plt.grid(linestyle='--', linewidth='0.8')
plt.title(titulo)
plt.show()
# -
# ## Probando *scoring consensus* más comunes
#
# **Fuente:** _[Exponential consensus ranking improves the outcome in docking and receptor ensemble docking](https://www.nature.com/articles/s41598-019-41594-3)_
#
# ### ROC con valores promedio (Rank-by-number)
# Gráfica **ROC** con valores promedio.
# > El score final de cada ligando ($R_bN_i$) corresponde al *score* promedio de cada ligando $i$ en todas las $n$ conformaciones:
#
# > $R_bN_i = \frac{1}{n}\sum_js_i^j,$
#
# donde $n$ es el número de conformaciones, y $s_i^j$ es el score de la molécula $i$ con la conformación $j$.
add_plot_roc(sc_promedio_ad4_LE, true_values, "AD4 LE")
add_plot_roc(sc_promedio_ad4_LC, true_values, "AD4 LC")
add_plot_roc(sc_promedio_vina, true_values, "Vina")
#add_plot_roc(sc_promedio_vrd_8x, true_values, "Vinardo 8x")
plot_roc(sc_promedio_vrd, true_values, "Vinardo 16x", "Curvas ROC: Valores Promedio")
# ### ROC con mejores *scores* de cada método
# Gráficas **ROC**, si para el **ranking** final extrajeramos el mejor score de cada ligando, es decir, de los $n$ scores obtenidos para una molécula $i$, nos quedamos con el mínimo:
#
# > $best\ score_i = min(s_i),$
#
# donde $s_i$ es un vector de $n$ scores donde cada posición $j$ corresponde al score entre la molécula $i$ y la conformación $j$.
# Scorings mínimos (mejores) de cada ligando
sc_minimo_vrd = np.array(df_vinardo_results.min(axis=1))
sc_minimo_vina = np.array(df_vina_results.min(axis=1))
sc_minimo_ad4_LE = np.array(df_ad4_results_LE.min(axis=1))
sc_minimo_ad4_LC = np.array(df_ad4_results_LC.min(axis=1))
add_plot_roc(sc_minimo_ad4_LE, true_values, "AD4 LE")
add_plot_roc(sc_minimo_ad4_LC, true_values, "AD4 LC")
add_plot_roc(sc_minimo_vina, true_values, "Vina")
plot_roc(sc_minimo_vrd, true_values, "Vinardo", "Curvas ROC: Mejores scores (valores mínimos)")
# ### ROC *rank-by-rank*
# Gráfica **ROC** con valores de rango por rango.
# > Pra cada conformación $j$ las $m$ moléculas son rankeadas (ordenadas) y a la molécula con el mejor scoring se le asigna el valor de 1 mientras que a la molécula en la posición $m$ se le asigna el valor $m$. Posteriormente se obtiene la media de los valores d erango de cada molécula en las $n$ conformaciones.
#
# > $R_bR_i = \frac{1}{n}\sum_jr^i_j,$
#
# donde $n$ es el número de conformaciones, y $r^i_j$ es la posición de la molécula $i$ en el rango de la conformación $j$.
def get_rank_by_rank(df):
df_ranks = df.rank() # Obtenemos los rankings de cada ligando
# Para cada ligando i se obtiene la media de su valor en las n conformaciones
df_rank_by_rank = np.array(df_ranks.mean(axis = 1))
return(df_rank_by_rank)
r_by_r_vrd = get_rank_by_rank(df_vinardo_results.drop(['ActiveInactive'], axis = 1))
r_by_r_vina = get_rank_by_rank(df_vina_results.drop(['ActiveInactive'], axis = 1))
r_by_r_ad4_LE = get_rank_by_rank(df_ad4_results_LE.drop(['ActiveInactive'], axis = 1))
r_by_r_ad4_LC = get_rank_by_rank(df_ad4_results_LC.drop(['ActiveInactive'], axis = 1))
add_plot_roc(r_by_r_ad4_LE, true_values, "AD4 LE")
add_plot_roc(r_by_r_ad4_LC, true_values, "AD4 LC")
add_plot_roc(r_by_r_vina, true_values, "Vina")
plot_roc(r_by_r_vrd, true_values, "Vinardo", "Curvas ROC: Rank by Rank")
# ### ROC z-score
# > Estandarización por conformación (columna). El *score* $s_i^j$ de la molécula $i$ en la conformación $j$ es estandarizado. Es decir, usamos la media de los $m$ scores en la conformación j ($\mu^j$) y su desviación estándar ($\sigma^j$). Posteriormente se obtiene la media de todos los scores estandarizados de la molécula $i$:
#
# > $z-score_i = \frac{1}{n}\sum_j\frac{s_i^j - \mu^j}{\sigma^j}$
# +
from sklearn.preprocessing import StandardScaler
def get_zscore(df):
means = df.mean(axis=0) # media por columna
stds = df.std(axis=0) # std por columna
# df_centered_values = (df - means)/stds
df_centered_values = StandardScaler().fit_transform(df)
z_score = df_centered_values.mean(axis=1)
return(z_score)
# -
z_score_vrd = get_zscore(df_vinardo_results.drop(['ActiveInactive'], axis = 1))
z_score_vina = get_zscore(df_vina_results.drop(['ActiveInactive'], axis = 1))
z_score_ad4_LE = get_zscore(df_ad4_results_LE.drop(['ActiveInactive'], axis = 1))
z_score_ad4_LC = get_zscore(df_ad4_results_LC.drop(['ActiveInactive'], axis = 1))
add_plot_roc(z_score_ad4_LE, true_values, "AD4 LE")
add_plot_roc(z_score_ad4_LC, true_values, "AD4 LC")
add_plot_roc(z_score_vina, true_values, "Vina")
plot_roc(z_score_vrd, true_values, "Vinardo", "Curvas ROC: z-Score")
# ## ROC con *ranking* de los scores de una sola conformación
# Curva ROC a partir del *ranking* de una conformación dada: "**1aq1**"
# Curva ROC dada una conformación
pdb_id = "1aq1"
add_plot_roc(df_ad4_results_LE[ pdb_id ], true_values, "AD4 LE")
add_plot_roc(df_ad4_results_LC[ pdb_id ], true_values, "AD4 LC")
add_plot_roc(df_vina_results[ pdb_id ], true_values, "Vina")
plot_roc(df_vinardo_results[ pdb_id ], true_values, "Vinardo", F"Curvas ROC: Conf. {pdb_id.upper()}")
# + active=""
# # Entendiendo la curva ROC
#
# # Los scores de vina y vinardo parecen estar más suavizados debido a que sus scores
# sólo tienen décimas y no centésimas, lo que hace más porbable que diferentes ligandos
# puedan tener el mismo score, esto repercute en el número de umbrales, que es más
# reducido en comparación con AD4, lo que aumenta el número de observaciones por umbral
#
# Para esta evaluación es importante que las etiquetas true_values y los scores estén
# ordenados iguales, es decir, que correspondan al mismo ligando. Ya después la función
# roc_curve se encargará de ordenarlas
# valores_observados_ordenados = np.array(- df.sort_values(by = pdb_id)[ pdb_id] )
# pos_etiquetas_predichos = df.sort_values(by = pdb_id)["ActiveInactive"]
#
# fpr, tpr, thresholds = roc_curve(true_values, valores_observados)
# tabla_roc_ejemplo = pd.DataFrame({"Umbrales": thresholds, "FPR": fpr, "TPR": tpr})
# print(tabla_roc_ejemplo.head(10))
# print(F'Número de umbrales: {tabla_roc_ejemplo.shape[0]}')
# print(pd.DataFrame({"Pos. Etiquetas Predichas": pos_etiquetas_predichos,
# "Scores": valores_observados_ordenados}).T)
# -
# ## Calculamos el AUC para todas las conformaciones
# Se realiza el cálculo del AUC de todas las conformaciones tomando en cuenta los 4 DataFrames de *scores* disponibles.
# Calculo del AUC por conformaión
# carga del dataFrame con la información general de las estructuras cristalográficas
df_cdk2_labels = pd.read_json(F"{data_dir}/TABLA_MTDATA_CDK2_402_crys_LIGS_INFO_LABELS.json")
# **Cálculo de AUC para cada conformación.**
df_cdk2_labels["AUC_vrd_16x"] = 0.0
df_cdk2_labels["AUC_vina_16x"] = 0.0
df_cdk2_labels["AUC_ad4_LC"] = 0.0
for pdb in df_cdk2_labels.index:
# Vinardo 16x
roc_vrd = roc_auc_score(np.array(true_values), - df_vinardo_results[ pdb ])
df_cdk2_labels.at[ pdb, "AUC_vrd_16x"] = roc_vrd
# Vina 16x
roc_vina = roc_auc_score(np.array(true_values), - df_vina_results[ pdb ])
df_cdk2_labels.at[ pdb, "AUC_vina_16x"] = roc_vina
# Autodock 4: LE
roc_ad4_LE = roc_auc_score(np.array(true_values), - df_ad4_results_LE[ pdb ])
df_cdk2_labels.at[ pdb, "AUC_ad4_LE"] = roc_ad4_LE
# Autodock 4: LC
roc_ad4_LC = roc_auc_score(np.array(true_values), - df_ad4_results_LC[ pdb ])
df_cdk2_labels.at[ pdb, "AUC_ad4_LC"] = roc_ad4_LC
# ### Guardamos el dataFrame
# <div class="p-3 mb-2 bg-warning text-dark">Descomentar para guardar.</div>
# +
# Guardamos el dataframe
# df_cdk2_labels.to_json(F"{data_dir}/TABLA_MTDATA_CDK2_402_crys_LIGS_INFO_LABELS_AUC_docking.json")
# -
# ## Conformaciones minimizadas
df_cdk2_labels["AUC_vrd_402MIN"] = 0.0
df_cdk2_labels["AUC_vina_402MIN"] = 0.0
for pdb in df_cdk2_labels.index:
# Vinardo 16x, con conformaciones de proteína minimizadas
roc_vrd = roc_auc_score(np.array(true_values), - df_vinardo_results_MIN[ pdb ])
df_cdk2_labels.at[ pdb, "AUC_vrd_402MIN"] = roc_vrd
# Vina 16x, con conformaciones de proteina minimizadas
roc_vina = roc_auc_score(np.array(true_values), - df_vina_results_MIN[ pdb ])
df_cdk2_labels.at[ pdb, "AUC_vina_402MIN"] = roc_vina
# ## Tabla Final
df_cdk2_labels.iloc[:, -8:]
# ## Conformaciones con mejor AUC por Método de score
# ### ROC con *ranking* de la MEJOR conformación en Vinardo a 16x
# Curva ROC a partir del *ranking* de la mejor conformación con Vinardo (conformación con mejor AUC de vinardo).
# +
# ROC de la mejor conformación de vinardo
pdb_id = df_cdk2_labels[["AUC_vrd_16x"]].idxmax()[0]
add_plot_roc(df_ad4_results_LE[ pdb_id ], true_values, "AD4 LE")
add_plot_roc(df_ad4_results_LC[ pdb_id ], true_values, "AD4 LC")
add_plot_roc(df_vina_results[ pdb_id ], true_values, "Vina 16x")
plot_roc(df_vinardo_results[ pdb_id ], true_values, "Vinardo 16x", F"Curvas ROC: Conf. {pdb_id.upper()}")
# -
# ### ROC con *ranking* de la MEJOR conformación en Vinardo a 8x
# +
# ROC de la mejor conformación de vinardo a 8x
pdb_id = df_cdk2_labels[["AUC_vrd_16x"]].idxmax()[0]
add_plot_roc(df_ad4_results_LE[ pdb_id ], true_values, "AD4 LE")
add_plot_roc(df_ad4_results_LC[ pdb_id ], true_values, "AD4 LC")
add_plot_roc(df_vina_results[ pdb_id ], true_values, "Vina 16x")
plot_roc(df_vinardo_results[ pdb_id ], true_values, "Vinardo 16x", F"Curvas ROC: Conf. {pdb_id.upper()}")
# -
# ### ROC con ranking de la MEJOR conformación en Vina a 16x
# +
# ROC de la mejor conformación de vina a 16x
pdb_id = df_cdk2_labels[["AUC_vina_16x"]].idxmax()[0]
add_plot_roc(df_ad4_results_LE[ pdb_id ], true_values, "AD4 LE")
add_plot_roc(df_ad4_results_LC[ pdb_id ], true_values, "AD4 LC")
add_plot_roc(df_vina_results[ pdb_id ], true_values, "Vina 16x")
plot_roc(df_vinardo_results[ pdb_id ], true_values, "Vinardo 16x", F"Curvas ROC: Conf. {pdb_id.upper()}")
# -
# ### ROC con ranking de la MEJOR conformación en AD4 LC
# +
# ROC de la mejor conformación de ad4 LC
pdb_id = df_cdk2_labels[["AUC_ad4_LC"]].idxmax()[0]
add_plot_roc(df_ad4_results_LE[ pdb_id ], true_values, "AD4 LE")
add_plot_roc(df_ad4_results_LC[ pdb_id ], true_values, "AD4 LC")
add_plot_roc(df_vina_results[ pdb_id ], true_values, "Vina 16x")
plot_roc(df_vinardo_results[ pdb_id ], true_values, "Vinardo 16x", F"Curvas ROC: Conf. {pdb_id.upper()}")
# -
# ### ROC con ranking de la MEJOR conformación en AD4 LE
# +
# ROC de la mejor conformación de ad4 LC
pdb_id = df_cdk2_labels[["AUC_ad4_LE"]].idxmax()[0]
add_plot_roc(df_ad4_results_LE[ pdb_id ], true_values, "AD4 LE")
add_plot_roc(df_ad4_results_LC[ pdb_id ], true_values, "AD4 LC")
add_plot_roc(df_vina_results[ pdb_id ], true_values, "Vina 16x")
plot_roc(df_vinardo_results[ pdb_id ], true_values, "Vinardo 16x", F"Curvas ROC: Conf. {pdb_id.upper()}")
# -
# ## Distribución de AUC por programa de *docking*
# ### *Violin plots*
# Seleccionamos las columnas que poseen los scores para cada método usado
df_auc_scores = df_cdk2_labels[['AUC_ad4_LE', 'AUC_ad4_LC',
'AUC_vina_16x', 'AUC_vrd_16x']]
# ## Valor de AUC por scoring vs conformación
# **Se comparar únicamente los resultados en la evaluación con el set de CSAR**
# +
fig, axes = plt.subplots()
cmp = sns.set_palette(['#fa9a58', '#cf384d', '#93d067', '#3498db'])
data = df_auc_scores
data = data.melt()
data['Conf Labels'] = np.tile(df_cdk2_labels.Labels_conf, 4)
fig.set_size_inches(16, 9)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
palette= 'Set2', s = 6.5)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# -
# ## Comparación con conformaciones minimizadas
# ### Vina y Vinardo
# Seleccionamos las columnas que poseen los scores para cada método usado
df_auc_scores_CRYS_vs_MIN = df_cdk2_labels[['AUC_ad4_LE', 'AUC_ad4_LC',
'AUC_vina_16x', 'AUC_vrd_16x',
'AUC_vina_402MIN', 'AUC_vrd_402MIN']]
# +
fig, axes = plt.subplots()
data = df_auc_scores_CRYS_vs_MIN
data = data.melt()
data['Conf Labels'] = np.tile(df_cdk2_labels.Labels_conf, 6)
fig.set_size_inches(16, 9)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
palette= 'Set2', s = 6.5)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# +
fig, axes = plt.subplots()
cmp = sns.set_palette(['#fdce7c', '#cf384d', 'green', '#93d067', '#3498db'])
data = df_auc_scores_CRYS_vs_MIN
data = data.melt()
data['Conf Labels'] = np.tile(df_cdk2_labels.AUC_vrd_402MIN > 0.7, 6)
fig.set_size_inches(16, 9)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
hue = "Conf Labels", palette= cmp, s = 6.5)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# -
# ### Mejores conformaciones de Vinardo (AUC de >0.8)
# +
fig, axes = plt.subplots()
cmp = sns.set_palette(['#fdce7c', '#cf384d', 'green', '#93d067', '#3498db'])
data = df_auc_scores
data = data.melt()
data['Conf Labels'] = np.tile(df_cdk2_labels.AUC_vrd_16x > 0.8, 4)
fig.set_size_inches(16, 9)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
hue = "Conf Labels", palette= cmp, s = 6.5)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# -
# ### Etiquetado de las conforamciones según su estado Activo/Inactivo
# +
fig, axes = plt.subplots()
cmp = sns.set_palette(['#fa9a58', '#fdce7c', '#cf384d', 'green', '#93d067', '#3498db'])
data = df_auc_scores
data = data.melt()
data['Conf Labels'] = np.tile(df_cdk2_labels.Labels_conf, 4)
fig.set_size_inches(16, 9)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
hue = "Conf Labels", palette= cmp, s = 6.5)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# -
# ## Comparación por otras propidades de las conformaciones
# ### Conformaciones según la resolución del cristal
df_cdk2_labels.Resolution.describe()
# +
# Por resoluciones de las conformaciones
resolution = ["< 1.0 A" if i <= 1.0
else "1.0 - 2.0 A" if i <= 2
else "2.0 - 3.0 A" if i <= 3
else "> 3.0 A"
for i in df_cdk2_labels.Resolution]
fig, axes = plt.subplots()
cmp = sns.set_palette(['#EAC350', '#D55E5E', 'green', '#3498db'])
data = df_auc_scores
data = data.melt()
data['Resolution'] = np.tile(resolution, 4)
fig.set_size_inches(16, 9)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
hue = "Resolution", palette= cmp, s = 7)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking / Conformaciones por Resolución')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# -
# ### Por cobertura de secuencia en la estructura cristalográfica
df_cdk2_labels.Coverage.describe()
# +
# por covertura de las Conformaciones
# Por resoluciones de las conformaciones
covertura = pd.qcut(df_cdk2_labels.Coverage, 4)
fig, axes = plt.subplots()
cmp = sns.set_palette(['#D55E5E', '#3498db', '#5ED59B', '#EAC350'])
data = df_auc_scores
data = data.melt()
data['Covertura'] = np.tile(covertura, 4)
fig.set_size_inches(16, 9)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
hue = "Covertura", palette= cmp, s = 7)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking / Conformaciones por Covertura')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# -
# ### Por volumen de la cavidad
# Cargamos los valores del volumen del pocket
col_names = ['frame', 'pocket_volume']
volumen_402_stc = pd.read_csv('../ARCHIVOS/CRISTALES/VOLUMEN/CDK2_VOL_RICCI_402/res_volumes.tabbed.txt',
sep = "\t", header=None, names= col_names)
volumen_402_stc[['pocket_volume']].describe()
# +
volumen = pd.qcut(volumen_402_stc.pocket_volume, 3)
fig, axes = plt.subplots()
cmp = sns.set_palette(['#F14B3E', '#21A0C5', '#FFA832'])
data = df_auc_scores
data = data.melt()
data['Pk Volumen (A^3)'] = np.tile(volumen, 4)
fig.set_size_inches(16, 9)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
hue = 'Pk Volumen (A^3)', palette= cmp, s = 6.5)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking / Conformaciones por Volumen')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# -
# ### Conformaciones por cluster según su cavidad
# **Propiedad medida con POVME**
# +
# por cluster de pocket
cluster_members_file = '../ARCHIVOS/CRISTALES/VOLUMEN/CDK2_VOL_PISANI_402/res_frameInfo/cluster_members.csv'
with open(cluster_members_file) as f:
lines = f.readlines()
dic_cluster_pockets = {}
for i, line in enumerate(lines):
split_line = line.replace('\n', '').split(' ')
dic_cluster_pockets[F'cluster_{i+1}'] = ([ int(element.split('__')[-1]) -1 for element in split_line])
df_cdk2_labels['pocket_cluster'] = 0
# Llenamos la columna
df_cdk2_labels.reset_index(inplace=True)
for key in dic_cluster_pockets.keys():
for value in dic_cluster_pockets[key]:
df_cdk2_labels.at[value, 'pocket_cluster'] = int(key.split('_')[-1])
# +
fig, axes = plt.subplots()
cmp = sns.set_palette(['#F14B3E', '#21A0C5', '#FFA832', 'green', 'blue'])
data = df_auc_scores
data = data.melt()
data['pocket_cluster'] = np.tile(df_cdk2_labels['pocket_cluster'], 4)
fig.set_size_inches(16, 9)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
hue = 'pocket_cluster', palette= cmp, s = 6.5)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking / Conformaciones por Volumen')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# -
# ### Conformaciones por masa del ligando
# **Conformaciones según la masa del ligando acoplado en el sitio activo, conforamciones sin ligando la masa = 0.**
df_cdk2_labels.Inhib_mass.replace("", 0, inplace=True)
pd.to_numeric(df_cdk2_labels.Inhib_mass).describe()
# +
masa_ligando = pd.qcut(pd.to_numeric(df_cdk2_labels.Inhib_mass), 3)
fig, axes = plt.subplots()
cmp = sns.set_palette(['#D55E5E', '#3498db', '#EAC350'])
data = df_auc_scores
data = data.melt()
data['Masa del ligando'] = np.tile(masa_ligando, 4)
fig.set_size_inches(16, 11)
sns.swarmplot(x = "variable", y = "value", data= data, ax = axes, alpha=0.8,
hue = "Masa del ligando", palette= cmp, s = 7)
axes.set_title('AUC: 402 conf, 111 ligs, 3 programas docking / Conformaciones por Masa del ligando')
axes.grid(linestyle='--', linewidth='0.8')
plt.xlabel("Programas de docking")
plt.ylabel("AUC")
plt.ylim(0.3, 1)
plt.show()
# -
# # Análisis estadístico
# ### Análisis estadístico de la distribución de valores AUC por método de docking
# Evaluamos la normalidad de los datos
import pylab
import scipy.stats as stats
import statsmodels.api as sm
df_auc_scores.describe().T
# ## Normalidad de los datos
#
# La prueba de hipótesis para cada muestra $i$ es la siguiente:
#
# > $H_0$: *La muestra $i$ sigue una distribución normal.*
#
# > $H_a$: *La muestra $i$ __NO__ sigue una distribución normal.*
#
# #### *Test* de Shapiro-Wilks con datos crudos
# Normalidad de los datos
for test in df_auc_scores.columns:
norm_t = stats.shapiro(df_auc_scores[test])
print(F'{test}:\t W = {round(norm_t[0], 3)} p = {round(norm_t[1], 5)}')
# #### *Test* de Shapiro-Wilks con datos estandarizados
from sklearn.preprocessing import StandardScaler
# Datos estandarizados (z-score)
df_std_auc_values = pd.DataFrame(StandardScaler().fit_transform(df_auc_scores),
columns= df_auc_scores.columns)
# Normalidad de los datos estandarizados
for test in df_std_auc_values.columns:
norm_t = stats.shapiro( df_std_auc_values[test])
print(F'stdrzd {test}:\t W = {round(norm_t[0], 3)} p = {round(norm_t[1], 5)}')
# #### *Test* de Shapiro-Wilks con transformación logarítmica
# Transformación de los datos, logaritmo base 10
df_log_auc_values = np.log10(df_auc_scores)
for test in df_log_auc_values.columns:
norm_t = stats.shapiro( df_log_auc_values[test])
print(F'log_{test}:\t W = {round(norm_t[0], 3)} p = {round(norm_t[1], 5)}')
# **En cualquiera de los tres caso casos, para todas las muestras, se obtiene `p-value < 0.05`, con lo cual se rechaza la hipótesis nula y se concluye, con un nivel de significancia del 95\%, que ninguana de las muestras sigue una distribución normal.**
# ### Q-Q plots
# Sólo para visualizar la distribución de los datos.
# +
fig, axes = plt.subplots()
fig.set_size_inches(16, 8)
for i, auc_results in enumerate(df_auc_scores.columns):
plt.subplot(2, 4, i + 1)
stats.probplot(df_auc_scores[auc_results], dist = "norm", plot=pylab)
plt.subplot(2, 4, i + 5)
plt.hist(df_auc_scores[auc_results], bins= 25)
plt.xlim((0.35, 0.9))
plt.ylim((0, 50))
plt.title(auc_results)
fig.tight_layout()
plt.show()
# -
# # Evaluación de la Homocedasticidad
# La prueba de hipótesis es la siguiente:
# > $H_0$: *Todas las muestras son homocedasticas.* $\Longrightarrow \sigma_0 = \sigma_1 ... = \sigma_k$
#
# > $H_0$: *Al menos un par de muestras tienen varianzas distintas.* $\Longrightarrow \sigma_i \neq \sigma_j$
# ### *Test de Levenne* para muestras no normales
# #### Datos crudos
# Test de levene para evaluar homocedasticidad en muestras no normales
stats.levene(*df_auc_scores.T.values, center="median")
# > **Conclusión:** Cuando los **datos crudos** son evaluados, se rechaza la hipótesis nula y se cncluye que las muestras no son homocedásticas.
# #### Datos estandarizados por columna (método de *docking*)
stats.levene(*df_std_auc_values.T.values, center="median")
# > **Conclusión:** Cuando los **datos estandarizados por método** son evaluados, se rechaza la hipótesis nula y se cncluye que las muestras no son homocedásticas.
#
# ### Visualización de la distribución de los datos
# +
fig, axes = plt.subplots()
fig.set_size_inches(18, 6)
plt.subplot(1, 2, 1)
sns.boxplot(data = df_auc_scores)
plt.title("Datos crudos")
plt.subplot(1, 2, 2)
sns.boxplot(data = df_std_auc_values)
plt.title("Datos estandarizados")
plt.show()
# -
# > ### NOTA:
# A mi entender, deberíamos evaluar las muestras con los datos no estandarizados y que la escala de todos los datos es la misma: valores AUC.
# # Comparación de las muestras
# ## Prueba de Kruskal-Wallis
#
# La prueba de Kruskal-Wallis (KW) es la alternativa no paramétrica a la prueba ANOVA de una vía. KW es un test que emplea rangos para contrastar la hipótesis de que tres o más muestras hayan sido obtenidas de la misma población.
#
# Supuestos de Kruskal-Wallis:
# 1. **Aleatoreidad de las muestras.**
# 2. **Independencia de las muestras.**
# 3. **Homogeneidad de varianzas (sólo si se busca inferir sobre las medianas de los grupos):**
# > #### Este supuesto puede omitirse si la inferencia se hace sobre *dominancia estocástica*:
# 1. *Some authors state unambiguously that there are no distributional assumptions, others that the homogeneity of variances assumption applies just as for parametric ANOVA. The confusion results from how you interpret a significant result. If you wish to compare medians or means, then the Kruskal-Wallis test also assumes that observations in each group are identically and independently distributed apart from location. If you can accept inference in terms of dominance of one distribution over another, then there are indeed no distributional assumptions.*
# *The commonest misuse of Kruskal-Wallis is to accept a significant result as indicating a difference between means or medians, even when distributions are wildly different. Such results should only be interpreted in terms of dominance. When distributions are similar, medians should be reported rather than means since they (in the form of mean ranks) are what the test is actually comparing. In fact, box and whisker plots with median, interquartile range, outliers and extremes should be the minimum requirement for reporting results of a Kruskal-Wallis test.*
# **[Referencia 1](https://influentialpoints.com/Training/Kruskal-Wallis_ANOVA_use_and_misuse.htm)**.
# 2. *Heterogeneous variances will make interpretation of the result more complex*
# **[Referencia 2](https://influentialpoints.com/Training/kruskal-wallis_anova.htm)**.
# 3. *If the distributions differ in shape and/or variance, a significant KW test implies there is at least one group that is stochastically greater (lesser) than the others, but its mean (and median, and first quartile, and eighty-eighth percentile, etc.) is not necessarily higher (lower) than the other groups.*
# **[Referencia 3](https://stats.stackexchange.com/questions/278001/kruskal-wallis-test-assumption-testing-and-interpretation-of-the-results)**.
#
# ### Prueba de hipótesis
# Prueba de hipótesis (interpretada en términos de dominancia y no de las medianas de las muestras. Esto significa que la hipótesis alternativa es que existe una probabilidad significativa de que al obtener un individuo de al menos una muestra $i$ su valor más grande que la de algún individuo de otra muestr $j$:
#
# > **$H_0$: Todas las muestras provienen de la misma población.**
# $H_0$: Para cada par de muestras $ij \rightarrow P(X_i > X_j = 0.5)$
#
# > **$H_a$: Al menos una muestra proviene de una población con una distribución distinta.**
# $H_0$: Para al menos un par de muestras $ij \rightarrow P(X_i > X_j \neq 0.5)$
#
# ### Ejecución de la prueba ([Fuente](https://machinelearningmastery.com/nonparametric-statistical-significance-tests-in-python/))
#
# Realizamos el test de Kruskal-Wallis sobre los datos en crudo (sin estandarizar).
H, p = stats.kruskal(*df_auc_scores.T.values)
print(F'H = %.5f\np = %.5f' % (H, p))
# > #### Conclusión (Usando datos sin estandarizar):
# Se rechaza la hipótesis nula y se concluye, con un *p-value* < 0.001 que al menos una de las muestras es distinta.
# ### Prueba post-hoc
import scikit_posthocs as posthoc
phoc_wilcoxon = posthoc.posthoc_wilcoxon(df_auc_scores.T.values)
phoc_wilcoxon.columns = df_auc_scores.columns
phoc_wilcoxon.index = df_auc_scores.columns
phoc_wilcoxon.round(5)
# > #### Conclusión (Usando datos sin estandarizar):
# Todos los pares de muestras $ij$ son significativamente distintos entre sí.
# ## Prueba de Friedman
#
# Alternativa no paramétrica para el ANOVA de una vía con mediciones repetidas (muestras pareadas). Es usada para evaluar si tres o más muestras son distintas. Por lo tanto, al contrario de Kruskal-Wallis, esta prueba no asume independencia de las muestras. **[Fuente](https://statistics.laerd.com/spss-tutorials/friedman-test-using-spss-statistics.php)**
#
#
# Supuestos del test de Friedman:
# > 1. **Las muestras provienen de un mismo grupo medido en tres o más ocasiones.**
# 2. **Aleatoriedad de las muestras. Cada muestra es una muestra alatoria de la población.**
# 3. **Los valores son continuos o al menos ordinales.**
# 4. **Las muestras NO necesitan seguir una distribución normal.**
#
# Prueba de hipótesis:
#
# > **$H_0$: Todas las muestras provienen de la misma población.**
# $H_0$: Para cada par de muestras $ij \rightarrow P(X_i > X_j = 0.5)$
#
# > **$H_a$: Al menos una muestra proviene de una población con una distribución distinta.**
# $H_0$: Para al menos un par de muestras $ij \rightarrow P(X_i > X_j \neq 0.5)$
#
# ### Ejecución de la prueba Friedman
f, p = stats.friedmanchisquare(*df_auc_scores.T.values)
print(F'F = %.5f\np = %.5f' % (H, p))
# > #### Conclusión (Usando datos sin estandarizar):
# Se rechaza la hipótesis nula y se concluye, con un *p-value* < 0.001 que al menos una de las muestras es distinta.
# ### Prueba post-hoc (Friedman)
phoc_nemenyi = posthoc.posthoc_nemenyi_friedman(df_auc_scores.values)
phoc_nemenyi.columns = df_auc_scores.columns
phoc_nemenyi.index = df_auc_scores.columns
phoc_nemenyi.round(3)
# > #### Conclusión (Usando datos sin estandarizar):
# Sólo el par `AUC_vina` y `AUC_ad4_LE` son similares, el resto de los pares de muestras son significativamente distintos.
# # ¡Terminado!
| 33,959 |
/220_midterm/midterm.ipynb | 76849cc252245cb116edb11ecadc18dea9f1e505 | [] | no_license | RyanYin04/MachineLearningPractice | https://github.com/RyanYin04/MachineLearningPractice | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,161,935 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: 'Python 3.7.3 64-bit (''base'': conda)'
# language: python
# name: python37364bitbasecondaaca2d34eaf9e49b7acb3deeb434002cb
# ---
# # STA 220: Midterm
#
# ### Worth 200 Points
#
# Winter 2020
# ## Information
#
# After the colons (in the same line) please write just your first name, last name, and the 9 digit student ID number below.
#
# First Name:
#
# Last Name:
#
# Student ID:
# ## Instructions
#
# Please print your answer notebook to pdf (make sure that it is not unnecessarily long due to long output) and submit as the homework solution with your zip file.
#
# For readability you,
#
# - MUST add cells in between the exercise statements and add answers within them and
# - MUST NOT modify the existing cells, particularly not the problem statement
# - you may add multiple cells between exercise cells
#
# To make markdown, please switch the cell type to markdown (from code) - you can hit 'm' when you are in command mode - and use the markdown language. For a brief tutorial see: https://daringfireball.net/projects/markdown/syntax
# ## Trans-Atlantic Slave Trade
#
# In this homework, we will uncover some of the numbers behind the Trans-atlantic slave trade (TAST), also known as the middle passage, that brought African slaves to the Americas. The middle passage is reported to have forcibly migrated over 10 million Africans to the Americas over a roughly 3 century time span. Many aspects of the TAST is little known by most people, such as the countries that constituted this network of slave ships, the regions from which the slaves were taken, and the number of slaves captured from Africa.
#
# This last number is especially important since the number of slaves taken from Africa can impact other estimates that result from this. For example, when estimating the population of Africa in a given decade, demographers will use population growth models and more recent census data. For example, there are roughly X number of people in Africa and such populations tend to grow at rate M. Then if we want to calculate the population one century ahead then we just apply a simple formula that assumes that the population grows at this rate. But if the population is being drained by the slave trade, then this number will tend to be underestimated because the growth rate is overestimated. To account for this models need to take into account this drain on the population.
#
# Throughout this homework you will need to follow the principles of graphical excellence and the grammar of graphics. **Use only Plotnine for your graphics**, do not use Pyplot, Seaborn, or Plotly since they do not follow closely the grammar of graphics. Be sure to include titles and necessary contextual captions.
#
# **Warning:** The Trans-Atlantic Slave Trade remains one of the most horrific abuses of human rights in history. This homework deals with the numbers behind this forced migration, please be aware that this is a sensitive topic for possibly yourself and others. A suitable amount of respect and seriousness is required when dealing with this data.
# **Exercise 1.** The data.
#
# 1. Read in the Trans-Atlantic Slave Trade database with Pandas. Hint: if you use the unix tool `file` you can find that this CSV is encoded with iso-8859-1 character set. Make sure that all missing values are encoded as NaN.
# 2. Open up the pdf file: TAST_codebook.pdf which is the data dictionary for this and other related datasets. Many of the variables in the codebook are not in this dataset because it is describing an updated dataset. Create a list where you describe the meaning of the columns of your imported dataframe. You can group similar columns together when describing their rough meaning, such as ownera,...,ownerp are owners of the slave ships.
#
# Throughout we will disregard all time variables other than year since they are unreliable.
import pandas as pd
import numpy as np
from plotnine import *
data = pd.read_csv('tastdb-2010.csv', encoding='iso-8859-1', na_values= ' ')
data.head()
n = data.shape[0]
# There are 98 columns in total.
data.head()
# **Exercise 2.** Estimating the total number of captives.
#
# 1. We will try to estimate the number of people captured into slavery and forced through the middle passage. What variable would you use to estimate the total number of captives taken from Africa? Let me call this variable Var A in this problem statement. How much of the data for Var A is missing?
# 2. Create an initial estimate of the total number of captives taken from Africa by assuming that Var A is Missing Completely at Random. You can simply divide the total count for the non-missing entries by the proportion of non-missing entries.
# 3. What other variables do you expect to be associated with Var A and why, select 2 top possibilities? Visualize these associations using an appropriate plot. Do you trust the answer to 2.2? Why or why not?
vara = data['tslavesd']
vara_na = np.where(vara.isna() == True)[0].shape[0]
vara.sum()
np.where(vara.isna == True)
# **Exercise 3.** Hot-deck imputation replaces missing data with similar entries. We will use a stochastic hot-deck imputation method, which replaces the number captured into slavery (tslavesd) with a random voyage from the same decade. (If there is only one voyage in that decade then ignore that record.) Construct a function that performs this random imputation method, and outputs a Series with this new imputed variable. Calculate the total imputed tslavesd variable 100 times by performing this random imputation method 100 times. Report the standard error, which is the standard deviation of the total imputed, along with the mean of the 100 trials.
data.index.values
decade = (data['yearam'] // 10)
decade
def HotDeckImpute(row):
# Get the decade of the data:
d = row['yearam']//10
# Get all the non-empty records:
full = data[data['yearam']//10 == d]
full = full[full['tslavesd'].isna() == False]
# Get the total number of sample:
n = full.shape[0]
if n <= 1:
return None
if n > 1:
# make the random selection
r = np.random.randint(n)
return full.iloc[r, :]
def HotDeckImpute2():
# Initialize the new variable:
ss = data['tslavesd'][:500].copy()
# Get the indices that is nan:
idxs = ss[ss.isna()].index.values
print(idxs)
for i in idxs:
row = data.loc[i, :]
# Get the decade of the data:
d = row['yearam']//10
# Get all the non-empty records:
full = data[data['yearam']//10 == d]
full = full[full['tslavesd'].isna() == False]
# Get the total number of sample:
n = full.shape[0]
if n <= 1:
continue
if n > 1:
# make the random selection
r = np.random.randint(n)
ss[i] = data['tslavesd'][r]
return ss
# **Exercise 4.** The flag that the ships flew.
#
# 1. We want to understand the trends of the nationality of the slave ships (the flag that they flew under is in the national variable). Subselect the values of `national` that have more than 100 voyages with that value.
# 2. Create a DataFrame that filters out the voyages where `national` does not have one of these values. You should be retaining voyages with only these most common values.
# 3. Create a variable, `flag`, that is a string of easily readable names for these values by looking them up in the pdf codebook.
# 4. Using Plotnine, plot the counts of the voyages by flag as a function of voyage year. Think about how best to display the count of a voyage by year and then how should you be including the flag variable.
# 5. In this plot, what are the geometric elements and aesthetic mappings? What other components of the grammar of graphics are you using?
# 6. Do you observe any abrupt changes in the patterns of these counts for a given flag? Investigate the cause for this change (using Google, etc.).
# 1.Subselect:
nationCount = data['national'].groupby(by = data['national']).count()
nations = nationCount[nationCount > 100].index.values
nations # Spain, Portugal, Brazil, Great Britain, Netherlands, U.S.A, France, Denmark.
# 2.Filtered data:
data_filtered = data[data['national'].isin(nations)]
data_filtered.head()
# +
# 3. Create flag:
flag = ['Sp', 'Po', 'Br', 'GB', 'Ne', 'USA', 'Fr', 'De']
# Generate the dictionary in order to map:
mapping = {key:value for (key, value) in zip(nations, flag)}
# Create new varaible and map to national:
data_filtered['flag'] = data_filtered['national'].map(mapping)
# -
data_filtered.head()
data_filtered['flag'].head()
# 4. Using Plotnine, plot the counts of the voyages by flag as a function of voyage year. Think about how best to display the count of a voyage by year and then how should you be including the flag variable.
data_filtered.head()
g = data_filtered.groupby(['yearam', 'flag'])
gcount = pd.DataFrame(g['voyageid'].count())
# ggplot(gcount, aes(x = 'yearam', y = ))
# +
# g['voyageid'].count()
# -
gcount.columns = ['count']
gcount.reset_index(inplace = True)
# gcount
ggplot(gcount, aes(x = 'yearam', y = 'count', color = 'flag')) + geom_point() +ylab('voyages/year') + xlab('year')
# +
# 5. In this plot, what are the geometric elements and aesthetic mappings? What other components of the grammar of graphics are you using?
# +
# Do you observe any abrupt changes in the patterns of these counts for a given flag? Investigate the cause for this change (using Google, etc.).
# -
# **Exercise 5.** Looking at some of these ships.
#
# 1. Search for the slave ship mentioned in the following wikipedia article: https://en.wikipedia.org/wiki/Brookes_(ship) Hint: Look at all records of ships with 'Brook' in the name and try to match the characteristics to those described. How many voyages for this ship are in the data (try to exclude ships with the same name)?
# 2. Create a variable that is True if there was a resistance (like a slave revolt) on the ship. Plot the density of ships as a function of year with and without revolts and compare these distributions.
# 3. The movie Amistad was based on a real slave ship and slave uprising. Read about it here: https://en.wikipedia.org/wiki/La_Amistad Try to find this ship by searching for it by name and also searching for ships in the same 10 year period as this event with a slave resistance. If you think you found it describe it, otherwise describe the events of another voyage that you did find.
# +
# 1. Search for the slave ship mentioned in the following wikipedia article: https://en.wikipedia.org/wiki/Brookes_(ship) Hint: Look at all records of ships with 'Brook' in the name and try to match the characteristics to those described. How many voyages for this ship are in the data (try to exclude ships with the same name)?
# Feature 1: Ship of 18 century: Launched 1781, sold 1804.
# Feature 2: British slave boat
# Feature 3: Allow 454 african slaves(total slaves not more than 454), but at most 609, 638, 744
# Feature 4: Tonnage: 300 tones
all_brooke = data[data['shipname'].str.contains('Brook') == True]
all_brooke[(all_brooke['yearam'] <= 1804) & (all_brooke['yearam'] >= 1781) &
(all_brooke['national'] == 7) & (all_brooke['tslavesd'] <=744) ]
# 7 Record in total.
# +
# 2. Create a variable that is True if there was a resistance (like a slave revolt) on the ship. Plot the density of ships as a function of year with and without revolts and compare these distributions.
# Make a copy of data:
tt = data.copy()
# Create a new variable:
tt['resist_flag'] = tt['resistance'].apply(lambda x: '1' if x == x else '0')
tt.head()
# Group the data
# To calculate the total voyages:
g1 = tt.groupby('yearam')
# To calculate the voyages grouped by year and resis_flag:
g2 = tt.groupby(['yearam', 'resist_flag'])
resist_count = pd.DataFrame(g2['resist_flag'].count())
resist_count.columns = ['rcount']
resist_count.reset_index(inplace = True)
resist_count
year_count = pd.DataFrame(g1['voyageid'].count())
year_count.columns = ['tcount']
year_count.reset_index(inplace = True)
year_count
# Merge 2 dataframe:
resist_density = pd.merge(resist_count, year_count, how = 'left', on = 'yearam')
# Calculate the density:
resist_density['resist_density'] = resist_density['rcount'] / resist_density['tcount']
resist_density['nonresist_density'] = 1 - resist_density['resist_density']
# Get the plot:
ggplot(resist_density, aes(x = 'yearam', y = 'resist_density', color = 'resist_flag')) + geom_point() +\
ylab('Revolts Density') + xlab('year') + labs(title = 'Density plot')
# -
# **Exercise 6.** Other patterns.
#
# 1. The arrival and departure locations are quite detailed. Look in the appendix of the codebook for the location codes. Make a coarser version of both arrival and departure port variables (select just the last departure and first arrival) so that for example,
# ```
# 30000 Caribbean 36100 Martinique 36101 Fort-Royale
# ```
# is just encoded as '3' or Caribbean.
#
# 2. Plot the trend of voyages as a function of arrival location. What trends do you see?
#
# 3. Do the same for departure location.
#
# 4. Plot the ratio of captives that are men as a function of year. Include a smoother to describe the over all trend. Also include in the plot another possible confounding variable.
#
# 5. Describe the geoms, aesthetic mappings, and other aspects of the plot.
# +
# 1. The arrival and departure locations are quite detailed. Look in the appendix of the codebook for the location codes. Make a coarser version of both arrival and departure port variables (select just the last departure and first arrival) so that for example,30000 Caribbean 36100 Martinique 36101 Fort-Royale is just encoded as '3' or Caribbean.
broad_location = {
'1.0': 'Europe',
'2.0': 'Mailand North Aemrica',
'3.0': 'Caribbean',
'4.0': 'Spanish Mailand',
'5.0': 'Brazil',
'6.0': 'Africa',
'8.0': 'Others'
}
tt = data.copy()
tt['arrival_code'] = (tt['sla1port'] // 10000).astype(str)
tt['arrival'] = tt['arrival_code'].map(broad_location)
tt['departure_code'] = (tt['ptdepimp'] // 10000).astype(str)
tt['departure'] = tt['departure_code'].map(broad_location)
tt.head()
# +
# 2. Plot the trend of voyages as a function of arrival location. What trends do you see?
g1 = tt.groupby(['yearam', 'arrival'])
arrival_count = pd.DataFrame(g1['voyageid'].count())
arrival_count.columns = ['count']
arrival_count.reset_index(inplace = True)
ggplot(arrival_count, aes(x = 'yearam', y = 'count', color = 'arrival')) + geom_point()+\
xlab('year') + ylab('Voyages') + labs(titile = 'Voyages per Year')
# +
# 3. Do the same for departure location.
g2 = tt.groupby(['yearam', 'departure'])
departure_count = pd.DataFrame(g2['voyageid'].count())
departure_count.columns = ['count']
departure_count.reset_index(inplace = True)
ggplot(departure_count, aes(x = 'yearam', y = 'count', color = 'departure')) + geom_point()+\
xlab('year') + ylab('Voyages') + labs(titile = 'Voyages per Year')
# +
# 4. Plot the ratio of captives that are men as a function of year. Include a smoother to describe the over all trend. Also include in the plot another possible confounding variable.
np.where(tt['menrat7'] == tt['menrat7'])[0].shape
# Create a new variabla standing the total men:
tt['num_men'] = tt['slaximp'] * tt['menrat7']
tt.head()
# Get total number each year:
g1 = tt.groupby('yearam')
total_slaves = pd.DataFrame(g1['slaximp'].sum())
total_slaves.reset_index(inplace = True)
# Get the men number each year:
g2 = tt.groupby('yearam')
men_slaves = pd.DataFrame(g2['num_men'].sum())
men_slaves.reset_index(inplace = True)
# -
merged = pd.merge(total_slaves, men_slaves, how = 'outer', on = 'yearam')
merged['ratio'] = merged['num_men'] / merged['slaximp']
ggplot(merged, aes(x='yearam', y = 'ratio')) + geom_point(alpha = 0.6)+ stat_smooth(color = 'red')
# +
# 5. Describe the geoms, aesthetic mappings, and other aspects of the plot.
# -
| 16,226 |
/report.ipynb | 909080e219f860205135d9b57c8f21d96d19e0c3 | [] | no_license | ValeraKaravai/MRI_yolo | https://github.com/ValeraKaravai/MRI_yolo | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 4,211,942 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #%load_ext autoreload
# #%autoreload 2
import os
import numpy as np
from multiprocessing import Pool
import time
import random
import IPython.display as ipd
import pyaudio
import sys
import librosa
import librosa.display
from tqdm import tqdm
from matplotlib import pyplot as plt
sys.path.append('../../')
import pdb
#import utils.audio as a
#import utils.plots as plots
import utils as utils
# -
# ### Load audio files into dataset
# +
# load (fe)male audio sequences without alpha, beta
PATH = '../../datasets/freesound.org/wav/'
SR = 22050
dataset = []
files = os.listdir(PATH)
for f in tqdm(files):
if f.split('.')[-1] != 'wav':
continue
#pdb.set_trace()
audio, sr = utils.audio.loadAudio(PATH + f, sr=SR)
dataset.append(audio)
# -
dataset = np.array(dataset)
len(dataset[1])
# read data
data = utils.misc.slidingWindow(dataset, int(SR*1), int(SR*1))
# create chunks
dataset_chunks = []
for chunk in dataset:
#pdb.set_trace()
w = utils.misc.slidingWindow(chunk, int(SR*1), int(SR*1))
if len(w) != 0:
dataset_chunks.append(w)
# concatenate chunks
chunk_seq = np.array(dataset_chunks)
print(list(filter(lambda x: len(x)==0, chunk_seq)))
chunks = np.concatenate((chunk_seq[:]), axis=0)
chunks.shape
# +
dataset_chunks[0].shape
display(ipd.Audio(dataset_chunks[1][0], rate=SR))
display(ipd.Audio(dataset_chunks[1][1], rate=SR))
display(ipd.Audio(dataset_chunks[1][2], rate=SR))
display(ipd.Audio(dataset_chunks[1][3], rate=SR))
display(ipd.Audio(dataset_chunks[1][4], rate=SR))
display(ipd.Audio(dataset_chunks[1][5], rate=SR))
display(ipd.Audio(dataset_chunks[1][6], rate=SR))
display(ipd.Audio(dataset_chunks[1][:].reshape(-1), rate=22050))
# -
from skimage.transform import resize
num_points = 1000
num_feats = 6
num_chunks = chunks.shape[0]
hop_length = 256
#X = np.zeros((num_chunks, 44*num_feats))
X = []
for i in tqdm(range(num_chunks)):
#pdb.set_trace()
# mfcc
feat = librosa.feature.mfcc(y=chunks[i, :], sr=SR, n_mfcc=num_feats).flatten() # 13x44
# chroma
chroma_cq = librosa.feature.chroma_cqt(y=chunks[i, :], sr=SR, hop_length=hop_length).flatten() #12x44
# RMS mag spec
rms = librosa.feature.rms(y=chunks[i, :]).flatten() #1x44
# Spectral centroid
cent = librosa.feature.spectral_centroid(y=chunks[i, :], sr=SR).flatten()#1x44
# Spectral bandwidth
band = librosa.feature.spectral_bandwidth(y=chunks[i, :], sr=SR).flatten()#1x44
# Spectral flatness
flat = librosa.feature.spectral_flatness(y=chunks[i, :]).flatten()#1x44
# spectral rolloff
rolloff = librosa.feature.spectral_rolloff(y=chunks[i, :], sr=SR).flatten()#1x44
# zero crossing rate
zcross = librosa.feature.zero_crossing_rate(chunks[i, :]).flatten()#1x44
# Spectral contrast
S = np.abs(librosa.stft(chunks[i, :]))
spec_contrast = librosa.feature.spectral_contrast(S=S, sr=SR).flatten() #7x44
# Tonnetz
y = librosa.effects.harmonic(chunks[i, :])
tonnetz = librosa.feature.tonnetz(y=y, sr=SR).flatten() #6x44
# local and global tempogram
oenv = librosa.onset.onset_strength(y=chunks[i, :], sr=SR, hop_length=hop_length)
tempogram = resize(librosa.feature.tempogram(onset_envelope=oenv, sr=SR, hop_length=hop_length), (32, 44)) #32x44
# Compute global onset autocorrelation
ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0])
ac_global = librosa.util.normalize(ac_global)
# Estimate the global tempo for display purposes
tempo = librosa.beat.tempo(onset_envelope=oenv, sr=sr, hop_length=hop_length)[0]
#pdb.set
X.append(np.concatenate((feat, chroma_cq, rms, cent, band, flat, rolloff, zcross, spec_contrast, tonnetz, tempogram.flatten(), tempo.reshape(1))))
#X.append(np.array(librosa.feature.mfcc(y=chunks[i, :], sr=SR, n_mfcc=num_feats).flatten()))
# + slideshow={"slide_type": "slide"} tags=["num_p"]
num_points = 1000
num_feats = 6
num_chunks = chunks.shape[0]
#X = np.zeros((num_chunks, 44*num_feats))
X = []
for i in tqdm(range(num_chunks)):
#pdb.set_trace()
#X[i, :] = librosa.feature.mfcc(y=chunks[i, :], sr=SR, n_mfcc=num_feats).flatten()
X.append(np.array(librosa.feature.mfcc(y=chunks[i, :], sr=SR, n_mfcc=num_feats).flatten()))
# -
chunk_feat = []
chunk_feat_sub = []
for i in tqdm(range(chunk_seq.shape[0])):
chunk_feat_sub = []
for j in range(chunk_seq[i].shape[0]):
chunk_feat_sub.append(librosa.feature.mfcc(y=chunk_seq[i][j], sr=SR, n_mfcc=num_feats))
chunk_feat.append(chunk_feat_sub)
np.array(X).shape
#len(chunk_feat[1])
X1 = [[0.5], [1.0], [-1.0], [0.42], [0.24]]
X2 = [[2.4], [4.2], [0.5], [-0.24]]
X3 = np.concatenate([X1, X2])
lengths = [len(X1), len(X2)]
print(len(lengths))
print(X3.shape)
len_vec = np.array([len(chunk_feat[x]) for x in range(len(chunk_feat))])
print(len(len_vec))
idx_zero = list(filter(lambda x: x==0, len_vec))
X = np.array(X)
idx_zero, _ = np.where(len_vec==0)
np.sum(len_vec[:idx_zero[0]])
print(np.array(X).shape)
# +
import numpy as np
from hmmlearn import hmm
from sklearn.preprocessing import StandardScaler, MinMaxScaler
X_scaled = MinMaxScaler(feature_range=(0, 1)).fit_transform(X)
x_pca = PCA(n_components=2).fit_transform(X_scaled)
np.random.seed(42)
#remodel = hmm.GaussianHMM(n_components=3, covariance_type="diagonal", n_iter=100)
remodel = hmm.GaussianHMM(n_components=5, verbose=True, covariance_type="full", n_iter=20)
remodel.fit(np.array(x_pca), len_vec)
# +
from sklearn.decomposition import PCA
x_pca = PCA(n_components=2).fit(X_scaled)
# -
# summation of PCAs
np.sum(x_pca.explained_variance_ratio_)
# +
Z2 = remodel.predict(np.array(X)[len_vec[0]:len_vec[1]])
ll = remodel.score(np.array(X)[len_vec[0]:len_vec[1]])
print(Z2, ll)
# -
plt.imshow(remodel.transmat_)
plt.colorbar()
import matplotlib as mpl
fig1 = plt.figure(figsize=[10, 5])
ax1 = fig1.add_subplot(111)
colors = ['red', 'blue', 'green', 'magenta', 'black', 'darkgreen', 'orange', 'crimson', 'grey']
for i, (m, c) in enumerate(zip(remodel.means_, remodel.covars_)):
ax1.scatter(m[0], m[1], s=50, c=colors[i], label=str(i))
plot_ellipse(ax1, m, c, colors[i])
plt.legend()
plt.show()
import matplotlib as mpl
fig1 = plt.figure(figsize=[10, 5])
ax1 = fig1.add_subplot(111)
colors = ['r', 'b', 'g', 'm', 'k', 'c']
for i, (m, c) in enumerate(zip(remodel.means_, remodel.covars_)):
ax1.scatter(m[0], m[1], s=50, c=colors[i], label=str(i))
plot_ellipse(ax1, m, c, colors[i])
plt.legend()
plt.show()
i=0
samples_list = []
states_list = []
while i < 10:
samples, states = remodel.sample(7)
samples_list.append(samples)
states_list.append(states)
i += 1
#remodel.sample(7)
print(len(synthesized[0][1]))
len(samples_list[1][0]), len(states_list[1])
# + jupyter={"source_hidden": true}
from hmmlearn.hmm import GaussianHMM
if n_states is None:
components = [5, 6, 8, 10, 12]
hmms = [GaussianHMM(n_components=c) for c in components]
map(lambda g: g.fit(per_data, per_lens), hmms)
scores = map(lambda g: aic(g, per_data, per_lens), hmms)
max_score, self.hmm = sorted(zip(scores, hmms))[0]
else:
self.hmm = GaussianHMM(n_components=n_states)
self.hmm.fit(per_data, per_lens)
ll = self.hmm.score(per_data, per_lens)
aic_ = aic(self.hmm, per_data, per_lens)
print "Goal HMM n_components", self.hmm.n_components, "Log likelihood", ll, "AIC", aic_
# -
def plot_ellipse(ax, pos, cov, color):
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
width, height = 4 * np.sqrt(np.abs(vals))
time.sleep(0.1)
ellip = mpl.patches.Ellipse(xy=pos, width=width, height=height, angle=theta, lw=1, fill=True, alpha=0.2, color=color)
ax.add_artist(ellip)
G = pgv.AGraph(strict=False, directed=True, dpi=300)
T = goal_model.hmm.transmat_
visited = []
start = None
for i in range(T.shape[0]):
for j in range(T.shape[1]):
t = round(T[i,j], 2)
if t > 1e-10 and (i,j) not in visited:
print(t*10)
s = np.clip(7*t, 0, 1.5)
G.add_edge(i, j, arrowsize=s)# label=t)
visited.append((i,j))
pi = round(goal_model.hmm.startprob_[i], 2)
if pi == 1:
start = i
for s in goal_model.final_states:
n = G.get_node(s)
n.attr['color'] = 'green'
from sklearn.mixture import GaussianMixture
gmm = GaussianMixture(n_components=64, verbose=2).fit(pca_X)
labels = gmm.predict(pca_X)
print(labels.shape)
plt.hist(labels)
#print(labels[1000:2000])
# #%matplotlib inline
#pca_scaled_X = StandardScaler().fit_transform(pca_X)
pca_scaled_X = pca_X
plot_X = PCA(n_components=3).fit_transform(pca_scaled_X)
# %matplotlib inline
colors = ['r', 'b', 'g', 'm', 'k', 'c']
fig1 = plt.figure(figsize=[20, 10])
ax1 = fig1.add_subplot(111, projection='3d')
for i in range(64):
ax1.scatter(plot_X[labels==i, 0], plot_X[labels==i, 1], plot_X[labels==i, 2], c=colors[i%6], s=5.);
plt.show()
# #%matplotlib inline
plot_X2 = PCA(n_components=2).fit_transform(pca_scaled_X)
colors = ['red', 'blue', 'green', 'magenta', 'black', 'darkgreen', 'orange', 'crimson', 'grey'] #'cyan',
fig2 = plt.figure(figsize=[10, 5])
ax2 = fig2.add_subplot(111)
for i in range(64):.-+
////////////////////////////////////////////////////////////////////
ax2.scatter(plot_X2[labels==i, 0], plot_X2[labels==i, 1], c=colors[i%6], s=5);
plt.show()
#plt.savefig('mfcc_spectral_features.png')
# #%matplotlib inline
plot_X2 = PCA(n_components=2).fit_transform(pca_X)
colors = ['red', 'blue', 'green', 'magenta', 'black', 'darkgreen', 'orange', 'crimson', 'grey'] #'cyan',
fig2 = plt.figure(figsize=[10, 5])
ax2 = fig2.add_subplot(111)
for i in range(6):
ax2.scatter(plot_X2[labels==i, 0], plot_X2[labels==i, 1], c=colors[i], s=5);
plt.show()
# +
target = 0
idx = np.where(labels==target)[0][0]
print(labels[idx])
#i = idx // num_chunks
#j = idx % num_chunks
display(ipd.Audio(chunks[idx, :], rate=SR))
idx = np.where(labels==target)[0][5]
display(ipd.Audio(chunks[idx, :], rate=SR))
idx = np.where(labels==target)[0][10]
display(ipd.Audio(chunks[idx, :], rate=SR))
idx = np.where(labels==target)[0][15]
display(ipd.Audio(chunks[idx, :], rate=SR))
idx = np.where(labels==target)[0][20]
display(ipd.Audio(chunks[idx, :], rate=SR))
idx = np.where(labels==target)[0][30]
display(ipd.Audio(chunks[idx, :], rate=SR))
# -
idx = np.where(labels==target)[0][10]
print(labels[idx])
#i = idx // num_chunks
#j = idx % num_chunks
ipd.Audio(chunks[idx, :], rate=SR)
idx = np.where(labels==target)[0][20]
print(labels[idx])
#i = idx // num_chunks
#j = idx % num_chunks
ipd.Audio(chunks[idx, :], rate=SR)
idx = np.where(labels==target)[0][30]
print(labels[idx])
#i = idx // num_chunks
#j = idx % num_chunks
ipd.Audio(chunks[idx, :], rate=SR)
| 11,164 |
/main/nb064.ipynb | 64e89a5a51cdb6874dcf480ff7099e30ac88725b | [] | no_license | shu421/google-smartphone-decimeter-challenge | https://github.com/shu421/google-smartphone-decimeter-challenge | 6 | 2 | null | null | null | null | Jupyter Notebook | false | false | .py | 5,543,271 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: 'Python 3.7.4 (''venv_outdoor'': venv)'
# metadata:
# interpreter:
# hash: bd00b75c79969edcf008edd1fd5973862c0c93beffacd004fb7d75ad6fcb357f
# name: python3
# ---
# +
# import library
import os
import pandas as pd
import numpy as np
np.random.seed(71)
import matplotlib.pyplot as plt
from matplotlib_venn import venn2, venn2_circles
import seaborn as sns
from tqdm.notebook import tqdm
import pathlib
import plotly
import plotly.express as px
import simdkalman
from scipy.interpolate import interp1d
from pathlib import Path
import pyproj
from pyproj import Proj, transform # 地理的な位置を示す情報を扱うときに、座標系・測地系変換を行ったり、2点間の距離・方位角を計算したりできる。
from pandarallel import pandarallel
pandarallel.initialize()
import optuna
# +
def calc_haversine(lat1, lon1, lat2, lon2):
"""Calculates the great circle distance between two points
on the earth. Inputs are array-like and specified in decimal degrees.
"""
RADIUS = 6_367_000
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])
dlat = lat2 - lat1
dlon = lon2 - lon1
a = np.sin(dlat/2)**2 + \
np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
dist = 2 * RADIUS * np.arcsin(a**0.5)
return dist
def percentile50(x):
return np.percentile(x, 50)
def percentile95(x):
return np.percentile(x, 95)
def get_train_score(df, gt):
gt = gt.rename(columns={'latDeg':'latDeg_gt', 'lngDeg':'lngDeg_gt'})
# df = df.merge(gt, on=['collectionName', 'phoneName', 'millisSinceGpsEpoch'], how='inner')
df = df.merge(gt, on=['phone', 'millisSinceGpsEpoch'], how='inner')
# calc_distance_error
df['err'] = calc_haversine(df['latDeg_gt'], df['lngDeg_gt'], df['latDeg'], df['lngDeg'])
# calc_evaluate_score
# df['phone'] = df['collectionName'] + '_' + df['phoneName']
res = df.groupby('phone')['err'].agg([percentile50, percentile95]) # phoneによってgroupbyし、gtと予測値の差(err)の50%,95%値を求める
res['p50_p90_mean'] = (res['percentile50'] + res['percentile95']) / 2
score = res['p50_p90_mean'].mean()
return score
# +
# directory setting
INPUT = '../input/google-smartphone-decimeter-challenge'
# base_train = pd.read_csv(INPUT + '/' + 'baseline_locations_train.csv')
train_df = pd.read_csv('../output/filtered_nb037_5.csv')
train_df['collectionName'] = train_df['phone'].map(lambda x: x.split('_')[0])
train_df['phoneName'] = train_df['phone'].map(lambda x: x.split('_')[1])
# base_test = pd.read_csv('../output/sub_nb037.csv')
test_df = pd.read_csv('../output/sub_nb037_5.csv')
# test_df = pd.read_csv('../output/sub_nb037_5.csv')
# base_test = pd.read_csv('../output/fixed_base_test.csv')
sample_sub = pd.read_csv(INPUT + '/' + 'sample_submission.csv')
# ground_truth
p = pathlib.Path(INPUT)
gt_files = list(p.glob('train/*/*/ground_truth.csv'))
print('ground_truth.csv count : ', len(gt_files))
gts = []
for gt_file in tqdm(gt_files):
gts.append(pd.read_csv(gt_file))
ground_truth = pd.concat(gts)
ground_truth['phone'] = ground_truth['collectionName'] + '_' + ground_truth['phoneName']
collection_uniq = train_df['collectionName'].unique()
SJC = [i for i in collection_uniq if 'SJC' in i]
MTV = [i for i in collection_uniq if 'MTV' in i]
SVL = [i for i in collection_uniq if 'SVL' in i]
SF = [i for i in collection_uniq if 'SF' in i]
RWC = [i for i in collection_uniq if 'RWC' in i]
test_collection_uniq = test_df['collectionName'].unique()
test_SJC = [i for i in test_collection_uniq if 'SJC' in i]
test_MTV = [i for i in test_collection_uniq if 'MTV' in i]
test_SVL = [i for i in test_collection_uniq if 'SVL' in i]
test_SF = [i for i in test_collection_uniq if 'SF' in i]
test_RWC = [i for i in test_collection_uniq if 'RWC' in i]
# -
get_train_score(train_df, ground_truth)
# # Reject outlier
# - 前と後の距離がそれぞれ50m以上離れていたら削除
def add_distance_diff(df):
df['latDeg_prev'] = df['latDeg'].shift(1)
df['latDeg_next'] = df['latDeg'].shift(-1)
df['lngDeg_prev'] = df['lngDeg'].shift(1)
df['lngDeg_next'] = df['lngDeg'].shift(-1)
df['phone_prev'] = df['phone'].shift(1)
df['phone_next'] = df['phone'].shift(-1)
df['dist_prev'] = calc_haversine(df['latDeg'], df['lngDeg'], df['latDeg_prev'], df['lngDeg_prev'])
df['dist_next'] = calc_haversine(df['latDeg'], df['lngDeg'], df['latDeg_next'], df['lngDeg_next'])
df.loc[df['phone']!=df['phone_prev'], ['latDeg_prev', 'lngDeg_prev', 'dist_prev']] = np.nan
df.loc[df['phone']!=df['phone_next'], ['latDeg_next', 'lngDeg_next', 'dist_next']] = np.nan
return df
# # Kalman filter
# +
T = 1.0
state_transition = np.array([[1, 0, T, 0, 0.5 * T ** 2, 0], [0, 1, 0, T, 0, 0.5 * T ** 2], [0, 0, 1, 0, T, 0],
[0, 0, 0, 1, 0, T], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])
process_noise = np.diag([1e-5, 1e-5, 5e-6, 5e-6, 1e-6, 1e-6]) + np.ones((6, 6)) * 1e-9
observation_model = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]])
observation_noise = np.diag([5e-5, 5e-5]) + np.ones((2, 2)) * 1e-9
kf = simdkalman.KalmanFilter(
state_transition = state_transition,
process_noise = process_noise,
observation_model = observation_model,
observation_noise = observation_noise)
def apply_kf_smoothing(df_, kf_=kf):
df = df_.copy()
unique_paths = df[['collectionName', 'phoneName']].drop_duplicates().to_numpy()
for collection, phone in unique_paths:
cond = np.logical_and(df['collectionName'] == collection, df['phoneName'] == phone)
data = df[cond][['latDeg', 'lngDeg']].to_numpy()
data = data.reshape(1, len(data), 2)
smoothed = kf_.smooth(data)
df.loc[cond, 'latDeg'] = smoothed.states.mean[0, :, 0]
df.loc[cond, 'lngDeg'] = smoothed.states.mean[0, :, 1]
return df
# -
# # Phone mean prediction
# - to use the average of the predictions of several phones in the same collection as the final prediction.
# +
def make_lerp_data(df):
'''
Generate interpolated lat,lng values for different phone times in the same collection.
'''
org_columns = df.columns
# Generate a combination of time x collection x phone and combine it with the original data (generate records to be interpolated)
time_list = df[['collectionName', 'millisSinceGpsEpoch']].drop_duplicates()
phone_list =df[['collectionName', 'phoneName']].drop_duplicates()
tmp = time_list.merge(phone_list, on='collectionName', how='outer')
lerp_df = tmp.merge(df, on=['collectionName', 'millisSinceGpsEpoch', 'phoneName'], how='left')
lerp_df['phone'] = lerp_df['collectionName'] + '_' + lerp_df['phoneName']
lerp_df = lerp_df.sort_values(['phone', 'millisSinceGpsEpoch'])
# linear interpolation
lerp_df['latDeg_prev'] = lerp_df['latDeg'].shift(1)
lerp_df['latDeg_next'] = lerp_df['latDeg'].shift(-1)
lerp_df['lngDeg_prev'] = lerp_df['lngDeg'].shift(1)
lerp_df['lngDeg_next'] = lerp_df['lngDeg'].shift(-1)
lerp_df['phone_prev'] = lerp_df['phone'].shift(1)
lerp_df['phone_next'] = lerp_df['phone'].shift(-1)
lerp_df['time_prev'] = lerp_df['millisSinceGpsEpoch'].shift(1)
lerp_df['time_next'] = lerp_df['millisSinceGpsEpoch'].shift(-1)
# Leave only records to be interpolated
lerp_df = lerp_df[(lerp_df['latDeg'].isnull())&(lerp_df['phone']==lerp_df['phone_prev'])&(lerp_df['phone']==lerp_df['phone_next'])].copy()
# calc lerp
lerp_df['latDeg'] = lerp_df['latDeg_prev'] + ((lerp_df['latDeg_next'] - lerp_df['latDeg_prev']) * ((lerp_df['millisSinceGpsEpoch'] - lerp_df['time_prev']) / (lerp_df['time_next'] - lerp_df['time_prev'])))
lerp_df['lngDeg'] = lerp_df['lngDeg_prev'] + ((lerp_df['lngDeg_next'] - lerp_df['lngDeg_prev']) * ((lerp_df['millisSinceGpsEpoch'] - lerp_df['time_prev']) / (lerp_df['time_next'] - lerp_df['time_prev'])))
# Leave only the data that has a complete set of previous and next data.
lerp_df = lerp_df[~lerp_df['latDeg'].isnull()]
return lerp_df[org_columns]
def calc_mean_pred(df, lerp_df):
'''
Make a prediction based on the average of the predictions of phones in the same collection.
'''
add_lerp = pd.concat([df, lerp_df])
mean_pred_result = add_lerp.groupby(['collectionName', 'millisSinceGpsEpoch'])[['latDeg', 'lngDeg']].mean().reset_index()
mean_pred_df = df[['collectionName', 'phoneName', 'millisSinceGpsEpoch']].copy()
mean_pred_df = mean_pred_df.merge(mean_pred_result[['collectionName', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg']], on=['collectionName', 'millisSinceGpsEpoch'], how='left')
return mean_pred_df
# -
# # Remove Device
# +
import pandas as pd
import numpy as np
np.random.seed(71)
def get_removedevice(input_df: pd.DataFrame, device: str) -> pd.DataFrame:
input_df['index'] = input_df.index
input_df = input_df.sort_values('millisSinceGpsEpoch')
input_df.index = input_df['millisSinceGpsEpoch'].values # illisSinceGpsEpochをindexにする
output_df = pd.DataFrame()
for _, subdf in input_df.groupby('collectionName'):
phones = subdf['phoneName'].unique()
# 1つのコレクションにphoneが1種類であるか、対象のデバイスがコレクションに含まれていない時
if (len(phones) == 1) or (not device in phones):
output_df = pd.concat([output_df, subdf])
continue
origin_df = subdf.copy()
# 対象のデバイスの位置を削除
_index = subdf['phoneName']==device
subdf.loc[_index, 'latDeg'] = np.nan
subdf.loc[_index, 'lngDeg'] = np.nan
# Nanの周りに値が存在していれば、そのNanを補間
# indexを基準として、線形的に補間していく
subdf = subdf.interpolate(method='index', limit_area='inside')
# 値が存在しないところは、元の値を使う
_index = subdf['latDeg'].isnull()
subdf.loc[_index, 'latDeg'] = origin_df.loc[_index, 'latDeg'].values
subdf.loc[_index, 'lngDeg'] = origin_df.loc[_index, 'lngDeg'].values
output_df = pd.concat([output_df, subdf])
output_df.index = output_df['index'].values
output_df = output_df.sort_index()
del output_df['index']
return output_df
# -
# # Position Shift
# +
def compute_dist(oof, gt=ground_truth):
df = oof.merge(gt, on = ['phone', 'millisSinceGpsEpoch'])
dst_oof = calc_haversine(df.latDeg_x, df.lngDeg_x, df.latDeg_y, df.lngDeg_y)
scores = pd.DataFrame({'phone': df.phone, 'dst': dst_oof})
scores_grp = scores.groupby('phone') # phoneごとに距離誤差を算出
d50 = scores_grp.quantile(.50).reset_index()
d50.columns = ['phone','q50']
d95 = scores_grp.quantile(.95).reset_index()
d95.columns = ['phone', 'q95']
return (scores_grp.quantile(.50).mean() + scores_grp.quantile(.95).mean())/2, d50.merge(d95)
def WGS84_to_ECEF(lat, lon, alt):
# convert to randians
rad_lat = lat * (np.pi / 180.0)
rad_lon = lon * (np.pi / 180.0)
a = 6378137.0 # 地球の長半径
# f is the flattening factor
finv = 298.257223563
f = 1 / finv
e2 = 1 - (1 - f) * (1 - f)
# N is the radius of curvature in the prime vertical
N = a / np.sqrt(1 - e2 * np.sin(rad_lat) * np.sin(rad_lat))
x = (N + alt) * np.cos(rad_lat) * np.cos(rad_lon)
y = (N + alt) * np.cos(rad_lat) * np.sin(rad_lon)
z = (N * (1 - e2) + alt) * np.sin(rad_lat)
return x, y, z
transformer = pyproj.Transformer.from_crs(
{"proj":"geocent", "ellps":"WGS84", "datum":"WGS84"},
{"proj":'latlong', "ellps":'WGS84', "datum":'WGS84'})
def ECEF_to_WGS84(x,y,z):
lon, lat, alt = transformer.transform(x,y,z,radians=False)
return lon, lat, alt
def position_shift(fname, a):
d = fname
d['heightAboveWgs84EllipsoidM'] = 63.5
d['x'], d['y'], d['z'] = zip(*d.apply(lambda x: WGS84_to_ECEF(x.latDeg, x.lngDeg, x.heightAboveWgs84EllipsoidM), axis=1))
# a = -0.2
d.sort_values(['phone', 'millisSinceGpsEpoch'], inplace=True)
for fi in ['x','y','z']:
# 1つ下のphoneが同じところで
d[[fi+'p']] = d[fi].shift(1).where(d['phone'].eq(d['phone'].shift(1)))
# diff: 次の地点との差
d[[fi+'diff']] = d[fi] - d[fi+'p']
# dist: 次の地点との距離
d[['dist']] = np.sqrt(d['xdiff']**2 + d['ydiff']**2 + d['zdiff']**2)
for fi in ['x','y','z']:
d[[fi+'new']] = d[fi+'p'] + d[fi+'diff']*(1-a/d['dist'])
lng, lat, alt = ECEF_to_WGS84(d['xnew'].values, d['ynew'].values, d['znew'].values)
lng[np.isnan(lng)] = d.loc[np.isnan(lng),'lngDeg']
lat[np.isnan(lat)] = d.loc[np.isnan(lat),'latDeg']
d['latDeg'] = lat
d['lngDeg'] = lng
d.sort_values(['phone','millisSinceGpsEpoch'], inplace=True)
return d[['phone', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg']]
def objective(trial):
a = trial.suggest_uniform('a', -1, 1)
score, scores = compute_dist(position_shift(filtered, a), ground_truth)
return score
# -
# # remove low Speed
# +
def add_distance_nogt_diff(df):
# shift(1): 上のやつが1個下に下がる → 前のデータ
# shift(-1): 下のやつが1個上に上がる → 次のデータ
df['latDeg_prev'] = df['latDeg'].shift(1)
df['latDeg_next'] = df['latDeg'].shift(-1)
df['lngDeg_prev'] = df['lngDeg'].shift(1)
df['lngDeg_next'] = df['lngDeg'].shift(-1)
df['phone_prev'] = df['phone'].shift(1)
df['phone_next'] = df['phone'].shift(-1)
df['latDeg_prev_diff'] = df['latDeg'] - df['latDeg_prev']
df['latDeg_next_diff'] = df['latDeg_next'] - df['latDeg']
df['lngDeg_prev_diff'] = df['lngDeg'] - df['lngDeg_prev']
df['lngDeg_next_diff'] = df['lngDeg_next'] - df['lngDeg']
df['dist_prev'] = calc_haversine(df['latDeg'], df['lngDeg'], df['latDeg_prev'], df['lngDeg_prev'])
df['dist_next'] = calc_haversine(df['latDeg'], df['lngDeg'], df['latDeg_next'], df['lngDeg_next'])
df.loc[df['phone']!=df['phone_prev'], ['latDeg_prev', 'lngDeg_prev', 'dist_prev',
'latDeg_prev_diff', 'lngDeg_prev_diff']] = np.nan
df.loc[df['phone']!=df['phone_next'], ['latDeg_next', 'lngDeg_next', 'dist_next',
'latDeg_next_diff', 'lngDeg_next_diff']] = np.nan
return df
def remove_lowSpeed(_df, dist_thr=0.4):
df = _df.copy()
df['latDeg'] = df['latDeg'].astype(float)
df['lngDeg'] = df['lngDeg'].astype(float)
df = add_distance_nogt_diff(df)
_index = df[(df['dist_prev']<dist_thr) | (df['dist_next']<dist_thr)]['latDeg'].index
df.loc[_index, 'latDeg'] = np.nan
df.loc[_index, 'lngDeg'] = np.nan
# phoneごとに補間する
dfs = []
for _, df in df.groupby('phone'):
df = df.interpolate(method='linear',
limit=None,
limit_direction='both')
dfs.append(df)
df = pd.concat(dfs)
return df[['phone','millisSinceGpsEpoch','latDeg','lngDeg']]
def objective_rmls(trial):
x = trial.suggest_uniform('x', 0.5, 0.9)
score = get_train_score(remove_lowSpeed(filtered, x), ground_truth)
return score
# -
# study = optuna.create_study()
# study.optimize(objective_rmls, n_trials=100)
# # phones mean
def mean_with_other_phones(df_):
df = df_.copy()
collections_list = df[['collectionName']].drop_duplicates().to_numpy()
for collection in collections_list:
phone_list = df[df['collectionName'].to_list() == collection][['phoneName']].drop_duplicates().to_numpy()
phone_data = {}
corrections = {}
for phone in phone_list:
cond = np.logical_and(df['collectionName'] == collection[0], df['phoneName'] == phone[0]).to_list()
phone_data[phone[0]] = df[cond][['millisSinceGpsEpoch', 'latDeg', 'lngDeg']].to_numpy()
for current in phone_data:
correction = np.ones(phone_data[current].shape, dtype=np.float64)
correction[:,1:] = phone_data[current][:,1:]
# Telephones data don't complitely match by time, so - interpolate.
for other in phone_data:
if other == current:
continue
loc = interp1d(phone_data[other][:,0],
phone_data[other][:,1:],
axis=0,
kind='linear',
copy=False,
bounds_error=None,
fill_value='extrapolate',
assume_sorted=True)
start_idx = 0
stop_idx = 0
for idx, val in enumerate(phone_data[current][:,0]):
if val < phone_data[other][0,0]:
start_idx = idx
if val < phone_data[other][-1,0]:
stop_idx = idx
if stop_idx - start_idx > 0:
correction[start_idx:stop_idx,0] += 1
correction[start_idx:stop_idx,1:] += loc(phone_data[current][start_idx:stop_idx,0])
correction[:,1] /= correction[:,0]
correction[:,2] /= correction[:,0]
corrections[current] = correction.copy()
for phone in phone_list:
cond = np.logical_and(df['collectionName'] == collection[0], df['phoneName'] == phone[0]).to_list()
df.loc[cond, ['latDeg', 'lngDeg']] = corrections[phone[0]][:,1:]
return df
# # Snap to grid
# +
def make_line_points(df, num_interpolate=5):
# create grid point
line_points = df[['latDeg','lngDeg']].copy()
switches = line_points.ne(line_points.shift(-1))
idx = switches[switches].index
num_interpolate = num_interpolate
for i in range(num_interpolate):
df_new = pd.DataFrame(index=idx + 0.5)
line_points= pd.concat([line_points, df_new]).sort_index()
line_points = line_points.reset_index(drop=True)
line_points = line_points.interpolate(method='linear')
return line_points
def snap_to_grid(input_df, line_points, target_collection=None, max_thr=50, min_thr=0, rm_low_thr=2):
if target_collection:
input_df_ = input_df[input_df['collectionName']==target_collection]
else:
input_df_ = input_df.copy()
def find_closest_point(point, points, max_thr=50, min_thr=0):
""" Find closest point from a list of points. """
df_ = pd.DataFrame({'latDeg':point['latDeg'].repeat(len(points)),
'lngDeg':point['lngDeg'].repeat(len(points))},
columns=['latDeg', 'lngDeg'])
# return minimum distance points
distance = calc_haversine(points['latDeg'], points['lngDeg'],
df_['latDeg'], df_['lngDeg']).min()
if ((point['dist_prev']<rm_low_thr) | (point['dist_next']<rm_low_thr)) == False:
if min_thr <= distance <= max_thr:
return list(points.loc[calc_haversine(points['latDeg'], points['lngDeg'],
df_['latDeg'], df_['lngDeg']).argmin()])
def apply_grid_point(x, closest_point):
'''
input:
x: train row
closest_point: closest point or None
'''
idx = x.name
closest_point1 = closest_point[closest_point.index==idx]
if closest_point1.isnull().values == True:
pass
else:
x['latDeg'] = closest_point1.values[0][0]
x['lngDeg'] = closest_point1.values[0][1]
return x
df = input_df_.copy()
closest_point = df[['latDeg','lngDeg', 'dist_prev', 'dist_next']].parallel_apply(lambda x: find_closest_point(x, line_points[['latDeg', 'lngDeg']], max_thr=max_thr, min_thr=min_thr), axis=1)
df[['latDeg', 'lngDeg']] = df[['latDeg', 'lngDeg']].parallel_apply(apply_grid_point, closest_point=closest_point, axis=1)
output_df = input_df.copy()
output_df.loc[output_df.index.isin(df.index)] = df
return output_df
# -
# # moving
# +
# making ground truth file
def make_gt(path, collectionName, phoneName):
# ground_truth
p = pathlib.Path(path)
gt_files = list(p.glob('train/*/*/ground_truth.csv'))
gts = []
for gt_file in gt_files:
gts.append(pd.read_csv(gt_file))
ground_truth = pd.concat(gts)
# baseline
cols = ['collectionName', 'phoneName', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg']
baseline = pd.read_csv(path + '/baseline_locations_train.csv', usecols=cols)
ground_truth = ground_truth.merge(baseline, how='inner', on=cols[:3], suffixes=('_gt', '_bs'))
# ground_truth["millisSinceGpsEpoch"] = ground_truth["millisSinceGpsEpoch"]//1000
if (collectionName is None) or (phoneName is None):
return ground_truth
else:
return ground_truth[(ground_truth['collectionName'] == collectionName) & (ground_truth['phoneName'] == phoneName)]
def make_tag(df, tag_v):
df.loc[df['speedMps'] < tag_v, 'tag'] = 1
df.loc[df['speedMps'] >= tag_v, 'tag'] = 0
return df
# loading gnss file
def gnss_log_to_dataframes(path):
print('Loading ' + path, flush=True)
gnss_section_names = {'Raw', 'UncalAccel', 'UncalGyro', 'UncalMag', 'Fix', 'Status', 'OrientationDeg'}
with open(path) as f_open:
datalines = f_open.readlines()
datas = {k: [] for k in gnss_section_names}
gnss_map = {k: [] for k in gnss_section_names}
for dataline in datalines:
is_header = dataline.startswith('#')
dataline = dataline.strip('#').strip().split(',')
# skip over notes, version numbers, etc
if is_header and dataline[0] in gnss_section_names:
try:
gnss_map[dataline[0]] = dataline[1:]
except:
pass
elif not is_header:
try:
datas[dataline[0]].append(dataline[1:])
except:
pass
results = dict()
for k, v in datas.items():
results[k] = pd.DataFrame(v, columns=gnss_map[k])
# pandas doesn't properly infer types from these lists by default
for k, df in results.items():
for col in df.columns:
if col == 'CodeType':
continue
try:
results[k][col] = pd.to_numeric(results[k][col])
except:
pass
return results
def add_IMU(df, INPUT, cname, pname):
path = INPUT + "/train/"+cname+"/"+pname+"/"+pname+"_GnssLog.txt"
gnss_dfs = gnss_log_to_dataframes(path)
acce_df = gnss_dfs["UncalAccel"]
magn_df = gnss_dfs["UncalMag"]
gyro_df = gnss_dfs["UncalGyro"]
acce_df["millisSinceGpsEpoch"] = acce_df["utcTimeMillis"] - 315964800000
acce_df["millisSinceGpsEpoch"] = acce_df["millisSinceGpsEpoch"]//1000 +18
magn_df["millisSinceGpsEpoch"] = magn_df["utcTimeMillis"] - 315964800000
magn_df["millisSinceGpsEpoch"] = magn_df["millisSinceGpsEpoch"]//1000 +18
gyro_df["millisSinceGpsEpoch"] = gyro_df["utcTimeMillis"] - 315964800000
gyro_df["millisSinceGpsEpoch"] = gyro_df["millisSinceGpsEpoch"]//1000 +18
acce_df["x_f_acce"] = acce_df["UncalAccelZMps2"]
acce_df["y_f_acce"] = acce_df["UncalAccelXMps2"]
acce_df["z_f_acce"] = acce_df["UncalAccelYMps2"]
# magn
magn_df["x_f_magn"] = magn_df["UncalMagZMicroT"]
magn_df["y_f_magn"] = magn_df["UncalMagYMicroT"]
magn_df["z_f_magn"] = magn_df["UncalMagXMicroT"]
# gyro
gyro_df["x_f_gyro"] = gyro_df["UncalGyroXRadPerSec"]
gyro_df["y_f_gyro"] = gyro_df["UncalGyroYRadPerSec"]
gyro_df["z_f_gyro"] = gyro_df["UncalGyroZRadPerSec"]
df = pd.merge_asof(df[["collectionName", "phoneName", "millisSinceGpsEpoch", "latDeg_gt", "lngDeg_gt", "latDeg_bs", "lngDeg_bs", "heightAboveWgs84EllipsoidM", "speedMps"]].sort_values('millisSinceGpsEpoch'), acce_df[["millisSinceGpsEpoch", "x_f_acce", "y_f_acce", "z_f_acce"]].sort_values('millisSinceGpsEpoch'), on='millisSinceGpsEpoch', direction='nearest')
df = pd.merge_asof(df[["collectionName", "phoneName", "millisSinceGpsEpoch", "latDeg_gt", "lngDeg_gt", "latDeg_bs", "lngDeg_bs", "heightAboveWgs84EllipsoidM", "speedMps", "x_f_acce", "y_f_acce", "z_f_acce"]].sort_values('millisSinceGpsEpoch'), magn_df[["millisSinceGpsEpoch", "x_f_magn", "y_f_magn", "z_f_magn"]].sort_values('millisSinceGpsEpoch'), on='millisSinceGpsEpoch', direction='nearest')
df = pd.merge_asof(df[["collectionName", "phoneName", "millisSinceGpsEpoch", "latDeg_gt", "lngDeg_gt", "latDeg_bs", "lngDeg_bs", "heightAboveWgs84EllipsoidM", "speedMps", "x_f_acce", "y_f_acce", "z_f_acce", "x_f_magn", "y_f_magn", "z_f_magn"]].sort_values('millisSinceGpsEpoch'), gyro_df[["millisSinceGpsEpoch", "x_f_gyro", "y_f_gyro", "z_f_gyro"]].sort_values('millisSinceGpsEpoch'), on='millisSinceGpsEpoch', direction='nearest')
return df
def make_train(INPUT, train_cname, tag_v):
# make ground_truth file
gt = make_gt(INPUT, None, None)
train_tag = pd.DataFrame()
for cname in train_cname:
phone_list = gt[gt['collectionName'] == cname]['phoneName'].drop_duplicates()
for pname in phone_list:
df = gt[(gt['collectionName'] == cname) & (gt['phoneName'] == pname)]
df = add_IMU(df, INPUT, cname, pname)
train_tag = pd.concat([train_tag, df])
# make tag
train_tag = make_tag(train_tag, tag_v)
return train_tag
import lightgbm as lgb
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
def lgbm(train, test, col, lgb_params):
model = lgb.LGBMClassifier(**lgb_params)
model.fit(train[col], train['tag'])
preds = model.predict(test[col])
print('confusion matrix : \n', confusion_matrix(preds, test['tag']))
print('accuracy score : ', accuracy_score(preds, test['tag']))
return preds
def remove_nmoving(_df, window=100):
df = _df.copy()
df['latDeg'] = df['latDeg'].astype(float)
df['lngDeg'] = df['lngDeg'].astype(float)
# phoneごとに補間する
dfs = []
for _, df in df.groupby('phone'):
_index = df[df['tag']==1].index
for idx in _index:
window_b = idx-int(window/2)
window_f = idx+int(window/2)
if window_b>0 and window_f<len(df):
df.loc[idx, 'latDeg'] = df.loc[window_b:window_f, 'latDeg'].mean()
df.loc[idx, 'lngDeg'] = df.loc[window_b:window_f, 'lngDeg'].mean()
df = df.interpolate(method='linear',
limit=None,
limit_direction='both')
dfs.append(df)
df = pd.concat(dfs)
return df[['phone','millisSinceGpsEpoch','latDeg','lngDeg']]
INPUT = '../input/google-smartphone-decimeter-challenge'
train_cname = ['2021-04-29-US-SJC-2', '2021-03-10-US-SVL-1']
test_cname = ['2021-04-28-US-SJC-1']
tag_v = 0.5
col = ["x_f_acce", "y_f_acce", "z_f_acce", "x_f_magn", "y_f_magn", "z_f_magn", "x_f_gyro", "y_f_gyro", "z_f_gyro"]
# parameter
lgb_params = {
'num_leaves': 90,
'n_estimators': 125,
}
# make train&test
train_tag = make_train(INPUT, train_cname, tag_v)
test_tag = make_train(INPUT, test_cname, tag_v)
test_tag['preds'] = lgbm(train_tag, test_tag, col, lgb_params)
# -
def remove_nmoving(_df, window=100):
df = _df.copy()
df['latDeg'] = df['latDeg'].astype(float)
df['lngDeg'] = df['lngDeg'].astype(float)
# phoneごとに補間する
dfs = []
for _, df in df.groupby('phone'):
_index = df[df['tag']==1].index
for idx in _index:
window_b = idx-int(window/2)
window_f = idx+int(window/2)
if window_b>0: #and window_f<len(df):
df.loc[idx, 'latDeg'] = df.loc[window_b:window_f, 'latDeg'].mean()
df.loc[idx, 'lngDeg'] = df.loc[window_b:window_f, 'lngDeg'].mean()
df = df.interpolate(method='linear',
limit=None,
limit_direction='both')
dfs.append(df)
df = pd.concat(dfs)
return df[['phone','millisSinceGpsEpoch','latDeg','lngDeg']]
tmp = filtered.copy()
# tmp['tag'] = 0
tmp1 = pd.merge(tmp, train_tag[['millisSinceGpsEpoch','tag']], on='millisSinceGpsEpoch', how='left')
tmp2 = remove_nmoving(tmp1, window=10)
get_train_score(tmp2, ground_truth)
# # train
# +
# reject outlier
train_ro = add_distance_diff(train_df)
th = 43
train_ro.loc[((train_ro['dist_prev'] > th) | (train_ro['dist_next'] > th)), ['latDeg', 'lngDeg']] = np.nan
# # remove not moving
# filtered = pd.merge(train_ro, train_tag[['millisSinceGpsEpoch','tag']], on='millisSinceGpsEpoch', how='left')
# filtered = remove_nmoving(filtered, window=10)
# print('remove not moving:', get_train_score(filtered, ground_truth))
# filtered['collectionName'] = filtered['phone'].apply(lambda x: x.split('_')[0])
# filtered['phoneName'] = filtered['phone'].apply(lambda x: x.split('_')[1])
# kalman filter
cols = ['collectionName', 'phoneName', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg']
train_ro_kf = apply_kf_smoothing(train_ro[cols])
# phone mean pred
train_lerp = make_lerp_data(train_ro_kf)
train_mean_pred = calc_mean_pred(train_ro_kf, train_lerp)
train_ro_kf['phone'] = train_ro_kf['collectionName'] + '_' + train_ro_kf['phoneName']
train_mean_pred['phone'] = train_mean_pred['collectionName'] + '_' + train_mean_pred['phoneName']
print('reject outlier + kalmanfilter: ', get_train_score(train_ro_kf, ground_truth))
print('phone mean pred : ', get_train_score(train_mean_pred, ground_truth))
train_mean_pred = train_mean_pred.drop('collectionName', axis=1)
train_mean_pred = train_mean_pred.drop('phoneName', axis=1)
train_mean_pred = train_mean_pred.reindex(['phone', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg'], axis='columns')
filtered = train_mean_pred
# remove device
filtered['collectionName'] =filtered['phone'].map(lambda x: x.split('_')[0])
filtered['phoneName'] = filtered['phone'].map(lambda x: x.split('_')[1])
filtered = get_removedevice(filtered, 'SamsungS20Ultra')
filtered = filtered.drop(columns=['collectionName', 'phoneName'], axis=1)
# phones mean
filtered['collectionName'] =filtered['phone'].map(lambda x: x.split('_')[0])
filtered['phoneName'] = filtered['phone'].map(lambda x: x.split('_')[1])
filtered = mean_with_other_phones(filtered)
filtered = filtered.drop(columns=['collectionName', 'phoneName'], axis=1)
print('phones mean :', get_train_score(filtered, ground_truth))
# remove lowSpeed
filtered = remove_lowSpeed(filtered, 0.6939300630849313)
print('remove low speed: ', get_train_score(filtered, ground_truth))
# position shift
filtered = position_shift(filtered, a=0.6602905068929037)
print('position shift: ', get_train_score(filtered, ground_truth))
# snap to grid
filtered['collectionName'] =filtered['phone'].map(lambda x: x.split('_')[0])
filtered['phoneName'] = filtered['phone'].map(lambda x: x.split('_')[1])
filtered = add_distance_diff(filtered)
line_points = make_line_points(ground_truth[ground_truth['collectionName'].str.contains(SJC[0] or SJC[1] or SJC[2])])
# line_points = make_line_points(ground_truth, num_interpolate=5)
for c in SJC:
filtered = snap_to_grid(filtered,
line_points,
target_collection=c,
max_thr=50,
min_thr=0,
rm_low_thr=-1)
print(f'snap to grid {c}:', get_train_score(filtered, ground_truth))
# # to csv
# filtered = filtered[['phone', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg']]
# filtered.to_csv('../output/filtered_nb056.csv', index=False)
# remove not moving
filtered = pd.merge(filtered, train_tag[['millisSinceGpsEpoch','tag']], on='millisSinceGpsEpoch', how='left')
filtered = remove_nmoving(filtered, window=10)
print('remove not moving:', get_train_score(filtered, ground_truth))
# score
print('ro, kf, pm, rm, psm, rmls, ps, snp: ', get_train_score(filtered, ground_truth))
# -
3.1466043205766607
# # submission
# +
# subfileの雛形
submission = sample_sub
# reject outlier
test_ro = add_distance_diff(test_df)
th = 43
test_ro.loc[((test_ro['dist_prev'] > th) | (test_ro['dist_next'] > th)), ['latDeg', 'lngDeg']] = np.nan
# kalman filter
test_kf = apply_kf_smoothing(test_ro)
# phone mean pred
test_lerp = make_lerp_data(test_kf)
test_mean_pred = calc_mean_pred(test_kf, test_lerp)
submission['latDeg'] = test_mean_pred['latDeg']
submission['lngDeg'] = test_mean_pred['lngDeg']
# Remove Device
submission['collectionName'] = submission['phone'].map(lambda x: x.split('_')[0])
submission['phoneName'] = submission['phone'].map(lambda x: x.split('_')[1])
submission = get_removedevice(submission, 'SamsungS20Ultra')
submission = submission.drop(columns=['collectionName', 'phoneName'], axis=1)
# phones mean
submission['collectionName'] =submission['phone'].map(lambda x: x.split('_')[0])
submission['phoneName'] = submission['phone'].map(lambda x: x.split('_')[1])
submission = mean_with_other_phones(submission)
submission = submission.drop(columns=['collectionName', 'phoneName'], axis=1)
# remove lowSpeed
submission = remove_lowSpeed(submission, 0.6939300630849313)
# position shift
submission = position_shift(submission, a=0.6602905068929037)
# snap to grid
submission['collectionName'] =submission['phone'].map(lambda x: x.split('_')[0])
submission['phoneName'] = submission['phone'].map(lambda x: x.split('_')[1])
line_points = make_line_points(ground_truth)
submission = add_distance_diff(submission)
for c in test_SJC:
submission = snap_to_grid(submission,
line_points,
target_collection=c,
max_thr=50,
min_thr=0,
rm_low_thr=-1)
# remove not moving
submission = pd.merge(submission, test_tag[['millisSinceGpsEpoch','tag']], on='millisSinceGpsEpoch', how='left')
submission = remove_nmoving(submission, window=10)
# submission
submission = submission[['phone', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg']]
# submission.to_csv('../output/sub_nb064.csv', index=False)
print('finish')
# -
test_tag['preds'].value_counts()
test_tag['preds'] = lgbm(train_tag, test_tag, col, lgb_params)
submission.to_csv('../output/sub_nb064.csv', index=False)
def visualize_trafic(df, center={"lat":37.6458, "lon":-122.4056}, zoom=9):
fig = px.scatter_mapbox(df,
# Here, plotly gets, (x,y) coordinates
lat="latDeg",
lon="lngDeg",
#Here, plotly detects color of series
color="phone",
labels="phone",
zoom=zoom,
center=center,
height=600,
width=800)
fig.update_layout(mapbox_style='stamen-terrain')
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0})
fig.update_layout(title_text="GPS trafic")
fig.show()
visualize_trafic(submission)
| 35,116 |
/GFF/compare_matlab_python_community.ipynb | 60c1c717be04f36e7844837c5680b3c12818a9d8 | [] | no_license | johnsonice/imf_gff_metwork | https://github.com/johnsonice/imf_gff_metwork | 0 | 1 | null | null | null | null | Jupyter Notebook | false | false | .py | 19,691 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# ## Export to matlab for community detection
# +
import csv
import networkx as nx
import numpy as np
import pandas as pd
import scipy.io
import community
from itertools import compress
import matplotlib.pyplot as plt
import seaborn as sns
from seaborn import color_palette, set_style, palplot
plt.style.use('ggplot')
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# +
## read pre-processed data from stata
df = pd.read_stata('../data/0_CPIS_CDIS_BIS_USTIC_merged_fixed1.dta')
keep_var = ['countrycode','counterpart_code','country','counterpart','year','CDIS_IAD','CPIS_IAP','loans_dep']
df = df[keep_var] ## keep only used variables
df = df.replace(np.nan,0) ## turn na to zero
num = df._get_numeric_data()
num[num < 0] = 0 ## turn negative to zero
df['total'] = df[['CDIS_IAD','CPIS_IAP','loans_dep']].sum(axis=1)
#df.describe()
mata = ['countrycode','counterpart_code','country','counterpart','year']
var_org = ['CDIS_IAD','CPIS_IAP','loans_dep','total']
var_sum_out = ['CDIS_Sum_out','CPIS_Sum_out','loans_dep_Sum_out','total_Sum_out']
var_sum_in = ['CDIS_Sum_in','CPIS_Sum_in','loans_dep_Sum_in','total_Sum_in']
var_weight = ['CDIS_weight','CPIS_weight','loans_dep_weight','total_weight']
df[var_sum_out]= df.groupby(['countrycode','year'])[var_org].transform(sum) ## like stata egen sum
df[var_sum_in]= df.groupby(['counterpart_code','year'])[var_org].transform(sum) ## like stata egen sum
df_weight = pd.DataFrame(df[var_org].values / df[var_sum_out].values,columns=[var_weight])
df[var_weight] = df_weight ## create the weight variables
mata.extend(var_weight)
df = df[mata]
df.fillna(0,inplace=True)
# -
def to_undirected(G):
node_list = G.nodes()
node_list.sort()
A = nx.to_numpy_matrix(G = G,nodelist=node_list,weight=var)
ud_M = A + A.T
ud_G = nx.from_numpy_matrix(ud_M)
## relable node to country name
maplist = dict(zip(ud_G.nodes(), node_list))
ud_G = nx.relabel_nodes(ud_G,maplist)
return ud_G
# ### Test on one graph
# +
## get the undirected graph for a particular graph
year = 2015
var = 'CDIS_weight' ##
df_graph = df[(df['year']==year) & (df[var]>0)]
G = nx.from_pandas_dataframe(df_graph, source="country",
target="counterpart", edge_attr=[var],
create_using=nx.DiGraph())
G = to_undirected(G)
# -
## export to matlab for community detection
node_list = G.nodes()
node_list.sort()
A = nx.to_numpy_matrix(G = G,nodelist=node_list,weight='weight')
save_path = '../result/2015_CDIS.mat'
scipy.io.savemat(save_path,mdict={'A':A,'nodes':node_list})
## read back the result of matlab community detection
matlab_community = pd.read_excel('../result/out.xlsx')
l_community = community.best_partition(G,weight='weight',resolution=1)
python_community = pd.DataFrame(list(l_community.items()),columns=['country','python_community'])
matlab_community.columns = ['country','matlab_community']
m_df= pd.merge(matlab_community,python_community, on ='country')
m_df.to_csv('../result/compare.csv')
| 3,486 |
/credit_risk_resampling.ipynb | 56463af01db918637616b979cd0136e19772ae09 | [] | no_license | khao2393/Unit_11_MachineLearning | https://github.com/khao2393/Unit_11_MachineLearning | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 43,446 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
import os
import sncosmo
import tables
from astropy.time import Time
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, FixedLocator
# # Light curve plot
#
# ## Data sources:
# 1. P48/CFH12K
# 2. P60/SEDm
# 3. LCOGT-1m/Sinistro
# 4. Swift/UVOT
#
# ## Filter data:
# 1. CFH12K: _g_ and _R_
# 2. SEDm: _g_, _r_ and _i_
#
# ## Data filename:
# ```iPTF16abc.h5```
# +
# load light curve data
h5file = tables.open_file("iPTF16abc.h5", mode="r")
table = h5file.root.photometry
lc = {"P48/CFH12K": dict(), "P60/SEDm": dict(), "LCO-1m/Sinistro": dict(), "Swift/UVOT": dict(), "SPM-1.5m/RATIR": dict()}
for filter_name in ["g", "R"]:
lc["P48/CFH12K"][filter_name] =\
np.array([(row["time"], row["mag"], row["mag_err"])
for row in table.where(
"(telescope == b'P48') & (filter_name == b'%s') & (mag_err < 10)" % filter_name)],
dtype=[("time", "f"), ("mag", "f"), ("mag_err", "f")])
for filter_name in ["g", "r", "i"]:
lc["P60/SEDm"][filter_name] =\
np.array([(row["time"], row["mag"], row["mag_err"])
for row in table.where(
"(telescope == b'P60') & (filter_name == b'%s')" % filter_name)],
dtype=[("time", "f"), ("mag", "f"), ("mag_err", "f")])
for filter_name in ["B", "V", "g", "r", "i"]:
lc["LCO-1m/Sinistro"][filter_name] =\
np.array([(row["time"], row["mag"], row["mag_err"])
for row in table.where(
"(telescope == b'LCO-1m') & (filter_name == b'%s')" % filter_name)],
dtype=[("time", "f"), ("mag", "f"), ("mag_err", "f")])
for filter_name in ["UVW2", "UVM2", "UVW1", "U", "B", "V"]:
lc["Swift/UVOT"][filter_name] =\
np.array([(row["time"], row["mag"], row["mag_err"])
for row in table.where(
"(telescope == b'Swift') & (filter_name == b'%s')" % filter_name)],
dtype=[("time", "f"), ("mag", "f"), ("mag_err", "f")])
for filter_name in ["r", "i", "Z", "Y", "J", "H"]:
lc["SPM-1.5m/RATIR"][filter_name] =\
np.array([(row["time"], row["mag"], row["mag_err"])
for row in table.where(
"(telescope == b'SPM-1.5m') & (filter_name == b'%s')" % filter_name)],
dtype=[("time", "f"), ("mag", "f"), ("mag_err", "f")])
spec_epochs = [t.attrs.OBS_DATE for t in h5file.root.spectroscopy]
ul = {}
ul["g"] = np.array([(row["time"], -2.5*np.log10(5*row["flux_err"]/3631))
for row in table.where(
"(telescope == b'P48') & (filter_name == b'g') & (time < 57481.2)")],
dtype=[("time", "f"), ("mag_lim", "f")])
h5file.close()
# +
T_MAX = 57499.537694
T_fl = 57481.2
z = 0.0234
colors = {"UVW2": "#D0E1F9",
"UVM2": "#008DCB",
"UVW1": "Cyan",
"U": "DarkBlue",
"B": "Blue",
"V": "#68A225",
"g": "LightSeaGreen",
"R": "Crimson",
"r": "Crimson",
"i": "#FFDB5C",
"Z": "#FE0000",
"Y": "#882426",
"J": "#C9A66B",
"H": "#C5001A"}
markers = {"Swift": "h",
"P48": "o",
"P60": "d",
"LCO-1m": "s",
"SPM-1.5m": "p"}
offsets = {"UVW2": -5,
"UVM2": -5,
"UVW1": -1,
"U": 2,
"B": -1.5,
"V": 1,
"g": 0,
"R": 2,
"r": 2,
"i": 3,
"Z": 4,
"Y": 5,
"J": 6,
"H": 7}
AAMcolors = {'Ni_models': "#2A3132",
'g': "LightSeaGreen",
'UVW2-V': "#5F9683", # ultramarine
'UVW1-V': "#00CFAA", # cyan
'U-V': "#FF0038", # fig
'B-V': "#012161", # royal blue
'g-r': "#6599FF", # light blue #F77604", # caviar
'r-i': "#FF0038", # ripe tomato
'i-Z': "#AF4425", # cayene
'Z-Y': "#662225", # red onion
'J-H': "#C9A66B" # cinnamon
}
AAMoffsets = {'UVW2-V': 1, # midnight blue
'UVW1-V': 1, # fig or #50312F eggplant
'U-V': 2, # blueberry
'B-V': 1, # yellow pepper
'g-r': 0, # carrot
'r-i': -1, # tomato
'i-Z': -3, # cinnamon
'Z-Y': -4, # red onion
'J-H': -6 # cayene
}
# +
# sncosmo
# run sncosmo
# filter data
fp = tables.open_file("filters.h5", mode="r")
filters = dict()
for filter_name in ["g", "R"]:
filters["P48_" + filter_name] =\
np.array([(row["wavelength"], row["transmission"])
for row in fp.root.P48.CFH12K._f_get_child(filter_name)],
dtype=[("wavelength", "f"), ("transmission", "f")])
for filter_name in ["g", "r", "i"]:
filters["P60_" + filter_name] =\
np.array([(row["wavelength"], row["transmission"])
for row in fp.root.P60.SEDm._f_get_child(filter_name)],
dtype=[("wavelength", "f"), ("transmission", "f")])
for key in filters:
band = sncosmo.Bandpass(filters[key]["wavelength"],
filters[key]["transmission"],
name=key)
sncosmo.registry.register(band)
fp.close()
# sncosmo model
dust = sncosmo.F99Dust()
host_dust = sncosmo.F99Dust()
model = sncosmo.Model(source="salt2",
effects=[host_dust, dust],
effect_names=["host", "mw"],
effect_frames=["rest", "obs"])
model.set(z=0.0234)
model.set(t0=T_MAX)
model.set(hostebv=0.05)
model.set(mwebv=0.024)
model.set(x0=0.008604)
model.set(x1=0.964324)
model.set(c=0.033302)
# -
# ### Experiment with a log plot for just the g-band light curve
def lc_color(t_blue, m_blue, m_blue_unc, t_red, m_red, m_red_unc, delta_t_merge = 1/24):
"""Return the (merged) color curve for two filters
Parameters
----------
t_blue : array-like
Time array for blue filter measurements
m_blue : array-like, shape = t_blue.shape
mag array for the blue filter measurements
m_blue_unc : array-like, shape = t_blue.shape
mag uncertainties array for the blue filter measurements
t_red : array-like
Time array for red filter measurements
m_red : array-like, shape = t_red.shape
mag array for the red filter measurements
m_red_unc : array-like, shape = t_red.shape
mag uncertainties array for the red filter measurements
delta_t_merge : float, optional (default = 1/24)
Returns
-------
t_color : array-like
m_color : array-like
m_color_unc : array-like
"""
if len(t_blue) != len(m_blue) or len(t_blue) != len(m_blue_unc):
raise ValueError('Length of t_blue, m_blue, and m_blue_unc do not match')
if len(t_red) != len(m_red) or len(t_red) != len(m_red_unc):
raise ValueError('Length of t_red, m_red, and m_red_unc do not match')
t_red = np.ma.array(t_red, mask = np.zeros(t_red.shape))
m_red = np.ma.array(m_red, mask = np.zeros(m_red.shape))
m_red_unc = np.ma.array(m_red_unc, mask = np.zeros(m_red_unc.shape))
t_color = np.empty(0)
m_color = np.empty(0)
m_color_unc = np.empty(0)
delta_t_merge = 1/24.
for tb, mb, mbu in zip(t_blue, m_blue, m_blue_unc):
if np.min(np.abs(t_red - tb)) < delta_t_merge:
match_idx = np.argmin(np.abs(t_red - tb))
t_color = np.append(t_color, (tb + t_red[match_idx]) / 2)
m_color = np.append(m_color, mb - m_red[match_idx] )
m_color_unc = np.append(m_color_unc,
np.hypot(mbu, m_red_unc[match_idx]))
t_red.mask[match_idx] = True
return t_color, m_color, m_color_unc
# +
g_filt_dict = {"P48/CFH12K": "$g_\mathrm{PTF}$", "P60/SEDm": "g'", "LCO-1m/Sinistro": "g'"}
fig, (ax, ax2) = plt.subplots(2, 1, sharex = False, figsize=(8,8))
for tel_inst in lc:
if 'g' in lc[tel_inst].keys():
tel = tel_inst.split("/")[0]
inst = tel_inst.split("/")[1]
curve = lc[tel_inst]['g']
ax.errorbar((curve["time"] - T_fl)/(1+z) ,
curve["mag"] + offsets['g'],
yerr=curve["mag_err"],
marker=markers[tel],
color=AAMcolors['g'],
linestyle='none',
mec = 'k',
mew = 0.5,
ms = 8,
alpha=0.8,
label="%s %s" % (tel_inst, g_filt_dict[tel_inst]))
t = np.linspace(57489, 57550, 300)
ax.plot((t - T_fl)/(1+z),
model.bandmag("P60_g", "ab", t) + offsets["g"],
linestyle='-',
color=AAMcolors["g"])
for epoch in spec_epochs:
t = (epoch - T_fl)/(1+z)
ax.plot([t, t], [21.5, 21], color='k', linewidth=1)
# plot the upper limits
ULoffset = 0.25
yerr_g = np.zeros((2, len(ul['g'])))
yerr_g[0,:] += ULoffset
ax.errorbar((ul['g']['time'] - T_fl)/(1+z), ul['g']['mag_lim']+ULoffset, yerr = yerr_g,
fmt = 'v', color = AAMcolors["g"], ms = 5,
elinewidth=2, capsize = 0, mec = 'k', mew = 0.5)
ax.plot((ul['g']['time'] - T_fl)/(1+z),ul['g']['mag_lim'], '_',
color = AAMcolors["g"], mew = 2, ms = 5)
ax.set_xlim(-2,150)
ax.set_xscale("symlog", linthreshx=3, linscalex = 0.6, subsx = [2,3,4,5,6,7,8,9])
ax.set_ylim(22, 15.75)
ax.set_xticks([-2, -1, 0, 1, 2, 3, 10, 25, 50, 75, 100, 150])
ax.set_xticklabels([-2, -1, 0, 1, 2, 3, 10, 25, 50, 75, 100, 150])
ax.yaxis.set_minor_locator(MultipleLocator(0.25))
# ax.set_xlabel(r"$t - t_0 \; (\mathrm{d})$")
ax.set_ylabel(r"$g \; (\mathrm{mag})$", fontsize = 14)
ax.legend(loc=2)
# plot the color evolution
tel_inst = 'P60/SEDm'
col_dict = {'P60/SEDm': {'bfilts': ['g', 'r'],
'rfilts': ['r','i']
},
'LCO-1m/Sinistro': {'bfilts': ['B', 'g', 'r'],
'rfilts': ['V', 'r','i']
},
'Swift/UVOT': {'bfilts': ['UVW2', 'UVW1', 'U', 'B'],
'rfilts': ['V','V','V','V']
},
'SPM-1.5m/RATIR': {'bfilts': ['r', 'i', 'Z', 'J'],
'rfilts': ['i', 'Z', 'Y', 'H']
}
}
for tel_inst in col_dict:
for bf, rf in zip(col_dict[tel_inst]['bfilts'], col_dict[tel_inst]['rfilts']):
tc, mc, mcu = lc_color(lc[tel_inst][bf]['time'],
lc[tel_inst][bf]['mag'],
lc[tel_inst][bf]['mag_err'],
lc[tel_inst][rf]['time'],
lc[tel_inst][rf]['mag'],
lc[tel_inst][rf]['mag_err'])
label_txt = r"$\mathrm{{{:s}}} \; {:s} - {:s}$".format(tel_inst.replace("-","\mathrm{-}"), bf, rf)
ax2.errorbar((tc - T_fl)/(1+z), mc + AAMoffsets[bf + '-' + rf], mcu,
c = AAMcolors[bf + '-' + rf],
fmt = markers[tel_inst.split("/")[0]],
mec = 'k', mew = 0.5, ms = 8,
label=label_txt)
ax2.text(3.1, 4.75, r"$+{}$".format(AAMoffsets['UVW2-V']))
ax2.text(2.5, 3.1, r"$+{}$".format(AAMoffsets['UVW1-V']))
ax2.text(2.5, 2.5, r"$+{}$".format(AAMoffsets['U-V']))
ax2.text(1, 0.8, r"$+{}$".format(AAMoffsets['B-V']))
# ax2.text(1.5, 0, r"${}$".format(AAMoffsets['g-r']))
ax2.text(1, -1.2, r"${}$".format(AAMoffsets['r-i']))
ax2.set_xlim(-2,150)
ax2.set_ylim(-7,6.5)
ax2.set_xscale("symlog", linthreshx=3, linscalex = 0.6, subsx = [2,3,4,5,6,7,8,9])
# ax2.set_xscale('log')
ax2.set_ylabel(r'$\mathrm{color \; (mag) + offset}$', fontsize = 14)
ax2.set_xlabel(r'$t - t_0 \; (\mathrm{d})$', fontsize = 14)
ax2.set_xticks([-2, -1, 0, 1, 2, 3, 10, 25, 50, 75, 100, 150])
ax2.set_xticklabels([-2, -1, 0, 1, 2, 3, 10, 25, 50, 75, 100, 150])
ax2.yaxis.set_minor_locator(MultipleLocator(0.25))
for axis in [ax, ax2]:
axis.tick_params(axis='both', which='major', labelsize=12)
handles, labels = ax2.get_legend_handles_labels()
leg = ax2.legend( [handles[x] for x in [5,6,7,8,2,3,0,4,1, 9, 10, 11, 12]],
[labels[x] for x in [5,6,7,8,2,3,0,4,1, 9, 10, 11, 12]],
loc = 2, ncol = 1)
# fig.tight_layout()
fig.subplots_adjust(left = 0.1, bottom = 0.1, right = 0.97, top = 0.98, hspace = 0.1)
# plt.savefig('logLC_with_colors.pdf')
# +
fig, ax2 = plt.subplots(figsize=(8, 4))
tel_inst = 'P60/SEDm'
col_dict = {'P60/SEDm': {'bfilts': ['g', 'r'],
'rfilts': ['r','i']
},
'LCO-1m/Sinistro': {'bfilts': ['B', 'g', 'r'],
'rfilts': ['V', 'r','i']
},
'Swift/UVOT': {'bfilts': ['UVW2', 'UVW1', 'U', 'B'],
'rfilts': ['V','V','V','V']
}
}
for tel_inst in col_dict:
for bf, rf in zip(col_dict[tel_inst]['bfilts'], col_dict[tel_inst]['rfilts']):
tc, mc, mcu = lc_color(lc[tel_inst][bf]['time'],
lc[tel_inst][bf]['mag'],
lc[tel_inst][bf]['mag_err'],
lc[tel_inst][rf]['time'],
lc[tel_inst][rf]['mag'],
lc[tel_inst][rf]['mag_err'])
if AAMoffsets[bf + '-' + rf] > 0:
label_cap = r"$ + {:s}$".format(str(AAMoffsets[bf + '-' + rf]))
elif AAMoffsets[bf + '-' + rf] < 0:
label_cap = r"$ - {:s}$".format(str(np.abs(AAMoffsets[bf + '-' + rf])))
elif AAMoffsets[bf + '-' + rf] == 0:
label_cap = ""
label_txt = r"$\mathrm{{{:s}}} \; {:s} - {:s}$".format(tel_inst.replace("_","\mathrm{-}"), bf, rf)
ax2.errorbar(tc - T_fl, mc + AAMoffsets[bf + '-' + rf], mcu,
c = AAMcolors[bf + '-' + rf],
fmt = markers[tel_inst.split("/")[0]],
mec = 'k', mew = 0.5, ms = 8,
label=label_txt)
ax2.set_xlim(1.5,150)
ax2.set_ylim(-2,9)
# ax2.set_xscale('symlog', linthreshx=3.5, linscalex = 0.6)
ax2.set_xscale('log')
ax2.set_ylabel(r'$\mathrm{color \; (mag) + offset}$')
ax2.set_xlabel(r'$t - t_0 \; (\mathrm{d})$')
ax2.set_xticks([2, 3, 4, 10, 25, 50, 75, 100, 150])
ax2.set_xticklabels([2, 3, 4, 10, 25, 50, 75, 100, 150])
handles, labels = ax2.get_legend_handles_labels()
ax2.legend( [handles[x] for x in [5,6,7,8,2,3,0,4,1]],
[labels[x] for x in [5,6,7,8,2,3,0,4,1]],
loc = 1, ncol = 3)
fig.tight_layout()
# +
# original fig from Yi
fig, (ax, ax2) = plt.subplots(2,1, figsize=(6, 8))
for tel_inst in lc:
for filter_name in lc[tel_inst]:
tel = tel_inst.split("/")[0]
inst = tel_inst.split("/")[1]
curve = lc[tel_inst][filter_name]
if filter_name in ["B", "V", "g", "r", "R", "i"]:
ax.errorbar((curve["time"] - T_MAX)/(1+z),
curve["mag"] + offsets[filter_name],
yerr=curve["mag_err"],
marker=markers[tel],
color=colors[filter_name],
linestyle='none',
alpha=0.8,
mec = "k", mew = 0.5,
label="%s %s" % (tel_inst, filter_name) + ("$%+i$" % offsets[filter_name] if filter_name != "g" else ""))
else:
ax2.errorbar((curve["time"] - T_MAX)/(1+z),
curve["mag"] + offsets[filter_name],
yerr=curve["mag_err"],
marker=markers[tel],
color=colors[filter_name],
linestyle='none',
alpha=0.8,
mec = "k", mew = 0.5, ms = 10,
label="%s %s" % (tel_inst, filter_name) + ("$%+i$" % offsets[filter_name] if filter_name != "g" else ""))
t = np.linspace(57485, 57550, 300)
for filter_name in ["g", "r", "i"]:
ax.plot(t - T_MAX,
model.bandmag("P60_" + filter_name, "ab", t) + offsets[filter_name],
linestyle='-',
color=colors[filter_name])
# for epoch in spec_epochs:
# t = epoch - T_MAX
# plt.plot([t, t], [12.3, 12.7], color='k', linewidth=1)
ax.set_ylim(23.2, 14)
ax.set_xlabel("$t - T_{B,\mathrm{max}} \; \mathrm{(d)}$", fontsize=14)
ax.set_ylabel("mag (observed)", fontsize=14)
ax.set_xlim(-20, 125)
ax2.set_ylim(26, 14.6)
ax2.set_xlabel("$t - T_{B,\mathrm{max}} \; \mathrm{(d)}$", fontsize=16)
ax2.set_ylabel("mag (observed)", fontsize=16)
ax2.set_xlim(-17, 42)
ax.legend(loc=1, numpoints=1, ncol=2, prop={'size':7}, fancybox=True, fontsize = 11)
ax2.legend(loc=1, numpoints=1, ncol=2, prop={'size':7}, fancybox=True, labelspacing=1, fontsize = 11)
for axis in [ax, ax2]:
axis.tick_params(axis='both', which='both', top=True, right=True, labelsize=13)
axis.yaxis.set_minor_locator(MultipleLocator(0.5))
ax.xaxis.set_major_locator(MultipleLocator(20))
ax.xaxis.set_minor_locator(MultipleLocator(5))
ax2.xaxis.set_major_locator(MultipleLocator(5))
ax2.xaxis.set_minor_locator(MultipleLocator(1))
fig.subplots_adjust(left=0.1, right=0.99,top=0.99,bottom=0.065,hspace=0.17)
# fig.savefig("lightcurve.pdf")
# +
# original fig from Yi
fig, (ax, ax2) = plt.subplots(1,2, figsize=(11, 4.5))
font_scale = 11/6*3.5/7
for tel_inst in lc:
for filter_name in lc[tel_inst]:
tel = tel_inst.split("/")[0]
inst = tel_inst.split("/")[1]
curve = lc[tel_inst][filter_name]
if filter_name in ["B", "V", "g", "r", "R", "i"]:
ax.errorbar((curve["time"] - T_MAX)/(1+z),
curve["mag"] + offsets[filter_name],
yerr=curve["mag_err"],
marker=markers[tel],
color=colors[filter_name],
linestyle='none',
alpha=0.8,
mec = "k", mew = 0.5,
label="%s %s" % (tel_inst, filter_name) + ("$%+i$" % offsets[filter_name] if filter_name != "g" else ""))
else:
ax2.errorbar((curve["time"] - T_MAX)/(1+z),
curve["mag"] + offsets[filter_name],
yerr=curve["mag_err"],
marker=markers[tel],
color=colors[filter_name],
linestyle='none',
alpha=0.8,
mec = "k", mew = 0.5, ms = 10,
label="%s %s" % (tel_inst, filter_name) + ("$%+i$" % offsets[filter_name] if filter_name != "g" else ""))
t = np.linspace(57485, 57550, 300)
for filter_name in ["g", "r", "i"]:
ax.plot(t - T_MAX,
model.bandmag("P60_" + filter_name, "ab", t) + offsets[filter_name],
linestyle='-',
color=colors[filter_name])
# for epoch in spec_epochs:
# t = epoch - T_MAX
# plt.plot([t, t], [12.3, 12.7], color='k', linewidth=1)
ax.set_ylim(23.2, 14)
ax.set_xlabel("$t - T_{B,\mathrm{max}} \; \mathrm{(d)}$", fontsize=16*font_scale)
ax.set_ylabel("mag (observed)", fontsize=16*font_scale)
ax.set_xlim(-20, 125)
ax2.set_ylim(26, 14.6)
ax2.set_xlabel("$t - T_{B,\mathrm{max}} \; \mathrm{(d)}$", fontsize=16*font_scale)
ax2.set_ylabel("mag (observed)", fontsize=16*font_scale)
ax2.set_xlim(-17, 42)
ax.legend(loc=1, numpoints=1, ncol=2, prop={'size':7}, fancybox=True, fontsize = 11*font_scale)
ax2.legend(loc=1, numpoints=1, ncol=2, prop={'size':7}, fancybox=True, labelspacing=1, fontsize = 11*font_scale)
for axis in [ax, ax2]:
axis.tick_params(axis='both', which='both', top=True, right=True, labelsize=13*font_scale)
axis.yaxis.set_minor_locator(MultipleLocator(0.5))
ax.xaxis.set_major_locator(MultipleLocator(20))
ax.xaxis.set_minor_locator(MultipleLocator(5))
ax2.xaxis | 20,480 |
/.ipynb_checkpoints/udacity-slide-deck-checkpoint.ipynb | 1aa7ca0cc6fe567a4be9e701ed6a4a2198401478 | [] | no_license | merna17798/Project_3_Udacity | https://github.com/merna17798/Project_3_Udacity | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 616,901 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # (Prosper Loan Data-set Visualization)
# ## by (Merna Saleh)
# + [markdown] slideshow={"slide_type": "slide"}
#
# # Investigation Overview
# ### We want to get answers for these questions?
# * What is the distribution of loan status?
# * How many recommendations do the borrower have?
# * What is the ditribution of term?
# * Are all borrowers currently in group?
# * Is Borrower a home owner?
# * What is the relationship between the borrower rate and his/her income range?
# * What is the relationship Loan Months Since Origination & Monthly Loan Payment?
# * What is the relationship Stated Monthly Income & Income Verifiable?
# * What is the relationship between Investment From Friends Count & Investment From Friends Amount?
# * What is the relationship among LenderYield and Borrower APR vs ProsperRating?
# + [markdown] slideshow={"slide_type": "slide"}
# # Dataset Overview
# ### Our data consists of 81 columns and there are a lot of missing values; so we had selected 12 features to visualize and find insights through it.
# ### The link: https://www.kaggle.com/justjun0321/prosperloandata
#
#
# + [markdown] slideshow={"slide_type": "skip"}
# # Exploring and Cleaning dataset
# + slideshow={"slide_type": "skip"}
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
df=pd.read_csv('prosperLoanData.csv')
df.head()
# + slideshow={"slide_type": "skip"}
df.info()
# + slideshow={"slide_type": "skip"}
df.duplicated().sum()
# + slideshow={"slide_type": "skip"}
df=df.drop({'CreditGrade','LoanFirstDefaultedCycleNumber','ScorexChangeAtTimeOfListing','ProsperPrincipalOutstanding',
'ClosedDate','GroupKey','ProsperPrincipalBorrowed','ProsperPaymentsOneMonthPlusLate',
'ProsperPaymentsLessThanOneMonthLate','OnTimeProsperPayments','TotalProsperPaymentsBilled','TotalProsperLoans',
'ListingKey','ListingNumber','ListingCreationDate', 'DebtToIncomeRatio', 'TradesOpenedLast6Months',
'ProsperRating (numeric)', 'ProsperRating (Alpha)',
'BorrowerState', 'Occupation', 'EmploymentStatus', 'EmploymentStatusDuration', 'CreditScoreRangeLower',
'CreditScoreRangeUpper','FirstRecordedCreditLine', 'CurrentCreditLines', 'OpenCreditLines',
'TotalCreditLinespast7years', 'InquiriesLast6Months', 'TotalInquiries', 'CurrentDelinquencies',
'AmountDelinquent', 'DelinquenciesLast7Years', 'PublicRecordsLast10Years', 'PublicRecordsLast12Months',
'RevolvingCreditBalance','TotalTrades', 'TradesNeverDelinquent (percentage)', 'BankcardUtilization',
'AvailableBankcardCredit', 'MemberKey', 'LoanKey', 'LoanNumber', 'DateCreditPulled'},axis=1)
# + slideshow={"slide_type": "skip"}
df['EstimatedEffectiveYield'].fillna(df['EstimatedEffectiveYield'].mean(), inplace = True)
# + slideshow={"slide_type": "skip"}
df['EstimatedLoss'].fillna(df['EstimatedLoss'].mean(), inplace = True)
# + slideshow={"slide_type": "skip"}
df['EstimatedReturn'].fillna(df['EstimatedReturn'].mean(), inplace = True)
# + slideshow={"slide_type": "skip"}
df.info()
# + [markdown] slideshow={"slide_type": "slide"}
# # Univariate Exploration
# + slideshow={"slide_type": "skip"}
# Function to plot all univariate plots
def univariate_ploting(data, feature, label, angle, xlabel, ylabel):
# figure size
plt.figure(figsize = [12, 10])
# Plot the ProsperLoan type on a Vertical bar chart
base_color = sb.color_palette()[0]
sb.countplot(data=data, x=feature, color=base_color);
# Use xticks to rotate the category labels (not axes) counter-clockwise
plt.xticks(rotation=angle);
# x-label, y-label
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16)
# displaying the title
plt.title(label, fontsize=25)
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is the distribution of loan status?
# + slideshow={"slide_type": "subslide"}
univariate_ploting(df, 'LoanStatus', "Distribution of loan status",45, 'Loan Status', 'Count')
# + [markdown] slideshow={"slide_type": "subslide"}
# > We found most loans are among completed, current, Defaulted, and charged off; so we recommend making the categories of loan status in the previous categories to make it easier to be tracked and visualized.
# + [markdown] slideshow={"slide_type": "slide"}
# ## How many recommendations do the borrower have?
# + slideshow={"slide_type": "subslide"}
univariate_ploting(df, 'Recommendations', "Distribution of Recommendations",0, 'Recommendations', 'Count')
# + [markdown] slideshow={"slide_type": "subslide"}
# > It isn't necessary to have recommendations to borrow a loan.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Is Borrower a home owner?
# + slideshow={"slide_type": "subslide"}
univariate_ploting(df, 'IsBorrowerHomeowner', "Is Borrower a Home owner?",0, 'Is Borrower Home Owner', 'Count')
# + [markdown] slideshow={"slide_type": "subslide"}
# > The borrower of loan may have home or not; so the loan confirmation doesn't depend on if the borrower is a home owner or not.
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is the ditribution of term?
# + slideshow={"slide_type": "subslide"}
univariate_ploting(df, 'Term', "Distribution of Term (months)",0, 'Term', 'Count')
# + [markdown] slideshow={"slide_type": "subslide"}
# > Most of loans term length are 36 months.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Are all borrowers currently in group?
# + slideshow={"slide_type": "subslide"}
univariate_ploting(df, 'CurrentlyInGroup', "Distribution of Borrowers who are currently in group",0, 'In Group', 'Count')
# + [markdown] slideshow={"slide_type": "subslide"}
# > Most of borrowers aren't in group in a group at the time the listing was created.
# + [markdown] slideshow={"slide_type": "slide"}
# # Bivariate Exploration
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is the relationship between the borrower rate and his/her income range?
# + slideshow={"slide_type": "subslide"}
# Figure size
plt.figure(figsize = [12, 10])
# base color
base_color = sb.color_palette()[0]
# x_label, y_label
plt.xlabel('Borrower Rate', fontsize=16)
plt.ylabel('Income Range', fontsize=16)
sb.boxplot(data = df, x = 'BorrowerRate', y = 'IncomeRange', color = base_color)
# Title of plot
plt.title("The relationship Borrower Rate & Income Range", fontsize=25);
# + [markdown] slideshow={"slide_type": "subslide"}
# > If the borrowers are not employed, their borrowing rate are higher than the others.
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is the relationship Loan Months Since Origination & Monthly Loan Payment?
# + slideshow={"slide_type": "subslide"}
# Figure size
plt.figure(figsize = [12, 9])
plt.hist2d(data = df, x = 'LoanMonthsSinceOrigination', y = 'MonthlyLoanPayment', cmin=0.5, cmap='viridis_r')
plt.colorbar()
# x_label, y_label
plt.xlabel('Loan Months Since Origination', fontsize=16)
plt.ylabel('Monthly Loan Payment', fontsize=16)
# Title of plot
plt.title("The relationship Loan Months Since Origination & Monthly Loan Payment", fontsize=25);
# + [markdown] slideshow={"slide_type": "subslide"}
# > We found that borrowers are disciplined in loan payment mostly in the first ten months if the payment range from 100 to 1000 USD.
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is the relationship between Investment From Friends Count & Investment From Friends Amount?
# + slideshow={"slide_type": "subslide"}
# Figure size
plt.figure(figsize = [12, 10])
# Base color
base_color = sb.color_palette()[0]
sb.boxplot(data = df, x = 'InvestmentFromFriendsCount', y = 'InvestmentFromFriendsAmount', color = base_color)
# x_label, y_label
plt.xlabel('Investment From Friends Count', fontsize=16)
plt.ylabel('Investment From Friends Amount', fontsize=16)
# Title of post
plt.title("The relationship Investment From Friends Count & Investment From Friends Amount", fontsize=25);
# + [markdown] slideshow={"slide_type": "subslide"}
# > The more friends invest, the lower amount of money they invest in the borrower loan and vise versa.
# + [markdown] slideshow={"slide_type": "slide"}
# # Multivariate Exploration
# + slideshow={"slide_type": "skip"}
# Function multivariate:
def multivariate_ploting(feature1, feature2, feature3, xlabel, ylabel, collabel, title):
# Figure size
plt.figure(figsize = [12, 10])
plt.scatter(data=df,x=feature1,y = feature2,c= feature3, cmap = 'viridis_r')
# Color bar
plt.colorbar(label = collabel)
# x_label, y_label
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16)
#Title of post
plt.title(title, fontsize=25);
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is the relationship among LenderYield and Borrower APR vs ProsperRating?
# + slideshow={"slide_type": "subslide"}
multivariate_ploting('LenderYield', 'BorrowerAPR', 'ProsperScore', 'Lender Yield', 'Borrower APR',
'Prosper Score', 'Lender Yield and Borrower APR vs Prosper Score')
# + [markdown] slideshow={"slide_type": "subslide"}
# > This chart confirms the relations between BorrowerAPR and LenderYield, they have strong positive relationship (strong corrolation). Also, it shows that most of loans with lower BorrowerAPR & LenderYield have higher ProsperScore and visa-versa.
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is the relationship among Borrower Rate and Estimated Loss vs Estimated Effective Yield?
# + slideshow={"slide_type": "subslide"}
multivariate_ploting('BorrowerRate', 'EstimatedLoss', 'EstimatedEffectiveYield', 'Borrower Rate', 'Estimated Loss',
'Estimated Effective Yield', 'Borrower Rate and Estimated Loss vs Estimated Effective Yield')
# + [markdown] slideshow={"slide_type": "subslide"}
# > This chart confirms the relations between Borrower Rate and Estimated Loss, they have positive relationship in most of points and have constant estimated loss at some samples.
# + [markdown] slideshow={"slide_type": "skip"}
# ## What is the relationship amongInvestors and Loan Origination Quarter vs Percent Funded?
# + slideshow={"slide_type": "skip"}
multivariate_ploting('Investors', 'LoanOriginationQuarter', 'PercentFunded', 'Investors', 'Loan Origination Quarter',
'Percent Funded', 'Investors and Loan Origination Quarter vs Percent Funded')
# + [markdown] slideshow={"slide_type": "skip"}
# > Almost all borrowes are funded.
| 10,802 |
/facebook_data_analysis.ipynb | 624bab60e781eb2f0737e1a885e3bde108722d60 | [] | no_license | oumiao-tj/incubator_project | https://github.com/oumiao-tj/incubator_project | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 320,417 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3.6.9 64-bit
# name: python36964bitdea94710c61b41c487caf1613b98e54d
# ---
# # Barbara Gaspar Chan Tong RA: 11201721332
# - Convolução e correlação
import numpy as np
from scipy import ndimage
v = np.array([[1,2,3], [4,5,6], [7,8,9]])
weights = np.ones([3,3])/9
correlate = ndimage.correlate(v, weights)
print(correlate)
convolution = ndimage.convolve(v, weights)
print(convolution)
convolution = ndimage.convolve(v, weights, mode='reflect')
print(convolution)
convolution = ndimage.convolve(v, weights, mode='constant')
print(convolution)
convolution = ndimage.convolve(v, weights, mode='nearest')
print(convolution)
convolution = ndimage.convolve(v, weights, mode='mirror')
print(convolution)
convolution = ndimage.convolve(v, weights, mode='wrap')
print(convolution)
# # Análise de artefatos
import cv2 as cv
import imageio
import matplotlib.pyplot as plt
# +
PIG_IMG = imageio.imread('./PIG_MR')
METADATA = PIG_IMG.meta
PLT_NAME_id = METADATA.PatientID
pxl = METADATA.Columns
# +
mu = 0
sigma = 5
gaussianNoise = np.random.normal(mu, sigma, [pxl, pxl])
# -
imgNoise = PIG_IMG + (gaussianNoise*9)
imgNoise = imgNoise - imgNoise.min()
# +
plt.figure(1, figsize=(12,15))
plt.title('Ruído Gaussiano')
plt.subplot(121)
plt.imshow(PIG_IMG, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG sem ruído')
plt.colorbar(fraction=0.046, pad=0.04)
plt.subplot(122)
plt.imshow(imgNoise, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG com ruído')
plt.colorbar(fraction=0.046, pad=0.04)
# -
# -- Comentário: A imagem com ruído possui bordas mais evidenciadas e os tons de cinza estão mais equilibrados.
# # Artefatos causados pelo filtro médio
CORTE_1 = PIG_IMG[0:300, 0:300]
CORTE_1_MEDIAN_a = ndimage.median_filter(CORTE_1,size=3)
CORTE_1_MEDIAN_b = ndimage.median_filter(CORTE_1,size=5)
CORTE_1_MEDIAN_c = ndimage.median_filter(CORTE_1,size=10)
# +
plt.figure(1, figsize=(15,15))
plt.subplot(141)
plt.imshow(CORTE_1, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Original')
plt.axis('off')
plt.subplot(142)
plt.imshow(CORTE_1_MEDIAN_a, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Median filter, size = 3')
plt.axis('off')
plt.subplot(143)
plt.imshow(CORTE_1_MEDIAN_b, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Median filter, size = 5')
plt.axis('off')
plt.subplot(144)
plt.imshow(CORTE_1_MEDIAN_c, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Median filter, size = 10')
plt.axis('off')
# -
CORTE_2 = PIG_IMG[80:200, 80:200]
CORTE_2_MEDIAN_a = ndimage.median_filter(CORTE_2,size=3)
CORTE_2_MEDIAN_b = ndimage.median_filter(CORTE_2,size=5)
CORTE_2_MEDIAN_c = ndimage.median_filter(CORTE_2,size=10)
# +
plt.figure(1, figsize=(15,15))
plt.subplot(141)
plt.imshow(CORTE_2, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Original')
plt.axis('off')
plt.subplot(142)
plt.imshow(CORTE_2_MEDIAN_a, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Median filter, size = 3')
plt.axis('off')
plt.subplot(143)
plt.imshow(CORTE_2_MEDIAN_b, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Median filter, size = 5')
plt.axis('off')
plt.subplot(144)
plt.imshow(CORTE_2_MEDIAN_c, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Median filter, size = 10')
plt.axis('off')
# -
CORTE_3 = PIG_IMG[200:400, 200:400]
CORTE_3_MEDIAN_a = ndimage.median_filter(CORTE_3,size=3)
CORTE_3_MEDIAN_b = ndimage.median_filter(CORTE_3,size=5)
CORTE_3_MEDIAN_c = ndimage.median_filter(CORTE_3,size=10)
# +
plt.figure(1, figsize=(15,15))
plt.subplot(141)
plt.imshow(CORTE_3, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Original')
plt.axis('off')
plt.subplot(142)
plt.imshow(CORTE_3_MEDIAN_a, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Median filter, size = 3')
plt.axis('off')
plt.subplot(143)
plt.imshow(CORTE_3_MEDIAN_b, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Median filter, size = 5')
plt.axis('off')
plt.subplot(144)
plt.imshow(CORTE_3_MEDIAN_c, cmap='gray')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('Median filter, size = 10')
plt.axis('off')
# -
# -- Comentário: realizei um estudo sobre como o fator size influnecia na imagem. Percebe-se que quanto maior for este fator, mais borrada a imagem fica.
# # Impulsive noise
import numpy as np
import os
import cv2
from skimage.util import random_noise
# +
def spnoise(image):
height = image.shape[0]
width = image.shape[1]
print(height,width)
s_vs_p = 0.5
amount = 0.04 #Alter the number of "noise pixels"
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
pig_Noised = spnoise(original)
# +
Corte_1_noised = spnoise(CORTE_1)
Corte_2_noised = spnoise(CORTE_2)
Corte_3_noised = spnoise(CORTE_3)
filtred_1 = ndimage.median_filter(Corte_1_noised, size=3)
filtred_2 = ndimage.median_filter(Corte_2_noised, size=3)
filtred_3 = ndimage.median_filter(Corte_3_noised, size=3)
# +
plt.figure(1, figsize=(12,15))
plt.title('Ruído Gaussiano')
plt.subplot(131)
plt.imshow(CORTE_1, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG sem ruído')
plt.colorbar(fraction=0.046, pad=0.04)
plt.subplot(132)
plt.imshow(Corte_1_noised, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG com ruído')
plt.colorbar(fraction=0.046, pad=0.04)
plt.subplot(133)
plt.imshow(filtred_1, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG filtrado')
plt.colorbar(fraction=0.046, pad=0.04)
# +
plt.figure(1, figsize=(12,15))
plt.title('Ruído Gaussiano')
plt.subplot(131)
plt.imshow(CORTE_2, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG sem ruído')
plt.colorbar(fraction=0.046, pad=0.04)
plt.subplot(132)
plt.imshow(Corte_2_noised, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG com ruído')
plt.colorbar(fraction=0.046, pad=0.04)
plt.subplot(133)
plt.imshow(filtred_2, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG filtrado')
plt.colorbar(fraction=0.046, pad=0.04)
# +
plt.figure(1, figsize=(12,15))
plt.title('Ruído Gaussiano')
plt.subplot(131)
plt.imshow(CORTE_3, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG sem ruído')
plt.colorbar(fraction=0.046, pad=0.04)
plt.subplot(132)
plt.imshow(Corte_3_noised, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG com ruído')
plt.colorbar(fraction=0.046, pad=0.04)
plt.subplot(133)
plt.imshow(filtred_3, cmap='gray')
plt.axis('off')
plt.gca().set_title('PIG filtrado')
plt.colorbar(fraction=0.046, pad=0.04)
# -
# -- Comentário: não sei responder.
| 7,122 |
/Natuaral_Language_Processing/02_03_Vector_Projection.ipynb | 1bf5c2dc284d3c592105829a8c15666c79e9c2b4 | [] | no_license | ShamimurRahmanShuvo/NLP_learning | https://github.com/ShamimurRahmanShuvo/NLP_learning | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 130,739 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="lTguFckTEDWd"
# # Projecting embeddings on TensorFlow projector
#
# This notebook explains how you can write the vectors of an embeddings into a TSV file to visualise it in a 3D space on the [TensorFlow projector](https://projector.tensorflow.org/)
# + id="9mW3Mt2q5kL2" colab={"base_uri": "https://localhost:8080/"} outputId="f09950b4-efb0-4633-e0b6-5e308037899f"
##import the required libraries and APIs
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
print(tf.__version__)
# + [markdown] id="2rhw0j_s5UZ2"
# ## Downloading the TensorFlow `imdb_review` dataset
#
# > Make sure tensorflow_datasets is installed
# + id="dx_DJfb7EFHh" outputId="923220e4-9310-4c72-bcf6-21e8bd36109d" colab={"base_uri": "https://localhost:8080/", "height": 333, "referenced_widgets": ["77e7bfe8405d488392a8fae74d8072ff", "e666f9c59e76420a85dccc227f1433d5", "f775e858b16f409187f7240e0ca4c1d6", "55980ea6373b4e1e8075561d0dfc784d", "64643009f43643e0b9df7a437317bb07", "fe43875088074bb182c5a6c3515f9f49", "83dfdf8637e8495b9320cfd4cd78235c", "35395a89c0a44debab16445b2e4e1a05", "bff7271d547e490cba6c0c916e0ccf4b", "777d5400f8cc4442ae3f8f221fca1917", "b1dedd6bda3e4c6998ebd7c8648011db", "5d1efc080d1c4bd2b4eab93ad28978d8", "799d80a0e246487abd6132dcc40396f7", "d44c4e9f064e4d2f986424d26a2fa2f0", "007d43288e584a5999a5d026e054874b", "e5fd5a33c597494099551ade36d99024", "75cde6e0466e4f15a272f8ec766a01e7", "4ca321a092d643caac78a092c2839413", "2ca33d84f139483ba0e628df6acd9780", "fb75a416e9ea4b3badb3e99927a42002", "3131ee8bc0504633869ef70e69a5708f", "4d608440d32e412c9149da67e93509e8", "b7e758934cc34543baf17e578cfab86f", "6da7386bcef6481db9486dad8a3ec292", "be57718b5a884ff19fc7d8325e8f626c", "9256cf4cc4c64b0eb29456ba20aa6e6b", "f74a2cb8055b43e7890fd77b18266219", "998ca8f8daec464081cf258614b0569b", "004d1a7fd43b46de86661c01400e4d5c", "29d56278a4b94503b89efaca436a6044", "fdc98d40ddd74d77897369870fb8d078", "15438bdf15c24c77aca0acfc67eca6d3", "81861eb60b3f4ab690f42572374a25f7", "7494fe7cdd0f4577953f2d7662b77ffc", "79dad743b8034824b460ae09ecebafc7", "010e604fc70342b7a6f2d302196ccf3c", "d346cf6e9a5d4b67b3700ab02fd4c44f", "5684e137ec784eceb1b4d2f6a3e8a7bf", "06312e376c7b469780688dd8607e59d2", "53ab9e6c6b444b75b524451580245b6a", "46c1e3ccf906418ba2fdc7620b6768fa", "e894d5422f0e473a9c88396cf1fb0e85", "663d53877bab42f28799a4fb5a71f0f9", "80714a4cfef246fcaf894246b1f702ce", "762cb5632932496c9ff081c13ca834e7", "0dcca32612074aefa2c72fa7359397ca", "fdd21bcbb03944b6b83b7c29074bca66", "deb95d1971194c11a1816488e51b391f", "7c8364e0e4004db49d2fa93e6aa4611b", "f9b2bc618c814c5d9c9e5ebde7ef9162", "9317d47df6d846129bbd17dcc65c0254", "42ead25afc244ec1ad21eb733cb5b9bd", "f80bb788a17344e8a26be633fc6afe51", "02b2b7af376d4337bf87baaf4cabba4e", "c7829e39f52b49938a029b443050b4f0", "f5ecb203b59546088db04fd15b4d26cf", "6c612fdb9214435b991f528eefbe8ba3", "0fe818be51664d8fb36b93d4127f0664", "b22dd375fa4e437d835d3c6fed7da91f", "8a1249d75c264f039a066681aa2b49a1", "d2f1a4a44a6e41ec8951ae078b42a4d0", "e6266f10ee0a4f3a8b9619a23acb2d0d", "e6d0179f67b3408ba9dec2f10967a8f9", "3959d266d8f44ee1a1d3ab0f06232f06", "3204db94f92a4c468ae9ec36878d6cf1", "d0fec11a10364729b4697e54ededca84", "ff18cdf47e524951983f7872866fbd5f", "1c1eac8c50ac4f2f90dac7d62bf72e4b", "6b0eec4a74a143f9b5862dd1a8921a22", "d3511d8f48bd406895f20caa78d196bd", "ecb1e2636a72446881cb9d6082fcb886", "8097dea8c9e141c1b5345eb1355ca7b3", "0394aa7e474b45c8a83d8bc0d8c71167", "70ad82686d3940149da08a8bc63f1dbc", "a59088244bfd4976af11986bfae6f39d", "829d424fdc7b40d9a167d04a73c42751", "c65b61205b4a4481ae8db73f1564eb34", "c953a13daeec46d083cccc3ecc6c9603", "dd13e2a5c77c4ce2b035fbf1a61864e6", "a328e4a16ab2444b99fa0049eb77f6e0", "e7f71d51bf724b35b42305acc5898040", "9935df3fd4654b7bac69db186912784d", "baaa8376e84e4150b5bf1b8b794449e1", "c349218d2e8548b6920800578d3f600a", "32b1601494fa4874955868153a607b43", "0b9548a6604e4332abb416b999068d58", "be3f0f0f5c4f4a89bf06e99cdfb6d865", "8cfe67962c2b4b65a068efde15141053"]}
##load the imdb reviews dataset
data, info = tfds.load("imdb_reviews", with_info=True, as_supervised=True)
# + [markdown] id="8MBqFTBP6DT4"
# ## Segregating training and testing sets
# + id="GM2X1wLvUb8n"
##segregate training and test set
train_data, test_data = data['train'], data['test']
##create empty list to store sentences and labels
train_sentences = []
test_sentences = []
train_labels = []
test_labels = []
# + id="rxoAZl0gU_y-"
##iterate over the train data to extract sentences and labels
for sent, label in train_data:
train_sentences.append(str(sent.numpy().decode('utf8')))
train_labels.append(label.numpy())
##iterate over the test set to extract sentences and labels
for sent, label in test_data:
test_sentences.append(str(sent.numpy().decode('utf8')))
test_labels.append(label.numpy())
# + id="eDKl0NzBITfe"
##convert lists into numpy array
train_labels = np.array(train_labels)
test_labels = np.array(test_labels)
# + [markdown] id="pLIjftvF6IRZ"
# ## Data preparation - setting up the tokenizer
# + id="6Mqx-tgBVXQz"
##define the parameters for the tokenizing and padding
vocab_size = 10000
embedding_dim = 16
max_length = 120
trunc_type='post'
oov_tok = "<OOV>"
# + id="nYsZatAaVmfq"
tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(train_sentences)
word_index = tokenizer.word_index
##training sequences and labels
train_seqs = tokenizer.texts_to_sequences(train_sentences)
train_padded = pad_sequences(train_seqs,maxlen=max_length, truncating=trunc_type)
##testing sequences and labels
test_seqs = tokenizer.texts_to_sequences(test_sentences)
test_padded = pad_sequences(test_seqs,maxlen=max_length)
# + colab={"base_uri": "https://localhost:8080/"} id="U7DbJ3cWV4zC" outputId="431560c7-c732-4346-8f2b-cad18dee8cf8"
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
print(train_sentences[1])
print(train_padded[1])
print(decode_review(train_padded[1]))
# + [markdown] id="PcvfYesOIo3A"
# ## Define the Neural Network with Embedding layer
#
# 1. Use the Sequential API.
# 2. Add an embedding input layer of input size equal to vocabulary size.
# 3. Add a flatten layer, and two dense layers.
# + colab={"base_uri": "https://localhost:8080/"} id="RF6ict6vWJAV" outputId="89d43a5b-ba39-4bd3-ad89-520ad6b9b525"
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
##compile the model with loss function, optimizer and metrics
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
# + [markdown] id="hlNRXgJ99Dv9"
# ## Model Training
# + colab={"base_uri": "https://localhost:8080/"} id="2S9zFDyLWZDF" outputId="3c3363d7-2b00-4670-e2f4-ec5ab38eec5c"
num_epochs = 10
##train the model with training and validation set
model.fit(
train_padded,
train_labels,
epochs=num_epochs,
validation_data=(test_padded, test_labels)
)
# + [markdown] id="QCHKeE8Z9Gm9"
# ## Deriving weights from the embedding layer
# + id="X6aT5Q63gW8j" colab={"base_uri": "https://localhost:8080/"} outputId="6e9bd89d-e9d5-451b-8363-be7a8d562379"
##isolating the first embedding layer
l1 = model.layers[0]
##extracting learned weights
weights = l1.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
print(weights[0])
# + [markdown] id="iDbASWaV-hqW"
# ## Downloading the vectors and metadata
# + id="_U2cCtIY9bA3"
##import I/O module in python
import io
##open the text stream for vectors
vectors = io.open('vectors.tsv', 'w', encoding='utf-8')
##open the text stream for metadata
meta = io.open('meta.tsv', 'w', encoding='utf-8')
##write each word and its corresponding embedding
for index in range(1, vocab_size):
word = reverse_word_index[index] # flipping the key-value in word_index
embeddings = weights[index]
meta.write(word + "\n")
vectors.write('\t'.join([str(x) for x in embeddings]) + "\n")
##close the stream
vectors.close()
meta.close()
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="VMz_eEKS-koj" outputId="cbc6d159-6dfc-4832-ced8-f7aefae7bec3"
##download the written files to your local machine
try:
from google.colab import files
except ImportError:
pass
else:
files.download('vectors.tsv')
files.download('meta.tsv')
# + id="SB-_H3ifBSWy"
| 8,936 |
/Train/ConceptBert_Clef_Overall.ipynb | 7e9016a338cc1162e42ee65bccec23818be17c30 | [] | no_license | David9857/VQA | https://github.com/David9857/VQA | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 22,355 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="XjlGELnUciRG" executionInfo={"status": "ok", "timestamp": 1620473827899, "user_tz": -480, "elapsed": 19696, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="e27ce838-386d-4f94-ae64-24fd7f004c5b"
from google.colab import drive
drive.mount('/content/drive')
# + id="XvYQHWy2AfWc"
# !cp /content/drive/MyDrive/data/data_clef.zip /content
# !unzip data_clef.zip -d /content
# + [markdown] id="9NShrTO1Ywbx"
# 把每个knowledge embedding cleftrain,cleftest,clefVal分别移动到training,testing,validation,然后把名字都改成knowledge_embeddings.
# + id="vFuxuzEIAR4E"
# !cp -r /content/drive/MyDrive/data/knowledge_embeddings_cleftrain.zip /content
# !unzip knowledge_embeddings_cleftrain.zip -d /content/data_clef/training
# + id="PdPw4T0QYBSU"
# !cp -r /content/drive/MyDrive/data/knowledge_embeddings_cleftest.zip /content
# !unzip knowledge_embeddings_cleftest.zip -d /content/data_clef/testing
# + id="7ygB84cWYHYR"
# !cp -r /content/drive/MyDrive/data/knowledge_embeddings_clefVal.zip /content
# !unzip knowledge_embeddings_clefVal.zip -d /content/data_clef/validation
# + id="Jodpx-HNUnHu"
# !mv /content/data_clef/training/knowledge_embeddings_cleftrain /content/data_clef/training/knowledge_embeddings
# !mv /content/data_clef/testing/knowledge_embeddings_cleftest /content/data_clef/testing/knowledge_embeddings
# !mv /content/data_clef/validation/knowledge_embeddings_clefVal /content/data_clef/validation/knowledge_embeddings
# + id="51EhIj6sR-Wd"
# # !pip install pycocoevalcap
# + id="FxgVFKW7cjQ6" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620473827901, "user_tz": -480, "elapsed": 1573, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="b9edb44e-214e-4647-e5f3-831c158e098a"
# %cd /content/drive/MyDrive/VQACode
# + [markdown] id="dF34gvkhYohK"
# 记得把check_point_clef移到文件夹
# + id="veOeaYa_ai7I"
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import pandas as pd
import time
import pathlib
from utils.load_data_overall import DataLoader, ClefDataLoader
from utils.evaluation import AnswerEvaluator
from utils.training_toolkit import CustomSchedule, loss_function
from models.Conceptbert.Conceptbert import VQATransformer
from models.Transformer.masks import create_masks
# + id="ahCO3StWai7N"
num_layers=2
d_model=512
num_heads=8
dff=2048
maximum_position_encoding=10000
EPOCHS = 50
batch_size = 64
cnn_type = 'resnet'
embedding = 'bioelmo' # choose from ['w2v', 'bioelmo', 'biobert', 'bluebert', 'large_biobert', 'elmo', 'bert']
data_augmentation = True
# + id="zABfFsD9ai7N"
####### DO NOT CHANGE VALUES OF THIS BLOCK IF YOU ARE NOT THE DEVELOPER ##########
check_point_path = './check_point_clef/transformer/overall/' + embedding +'/' + cnn_type + '_' + str(num_layers)
saving_folder = './overall_results_clef/transformer/' + embedding + '/'
save_result_path = saving_folder + cnn_type + '_' + str(num_layers) + '.csv'
kn_input = 155
emb_size = 1024
pe_output = 36 + 1
MAX_LENGTH = pe_output
if cnn_type == 'inception':
img_shape = [299, 299]
img_padding = tf.TensorShape([299, 299, 3])
if cnn_type in ['resnet', 'resnet_v2', 'dense_net', 'vgg19']:
img_shape = None
img_padding = tf.TensorShape([224, 224, 3])
if embedding == 'bioelmo':
pe_input = 11
# elif embedding == 'elmo':
# pe_input = 42
# elif embedding == 'biobert':
# pe_input = 72
# emb_size = 768
# elif embedding == 'bluebert':
# pe_input = 69
# elif embedding == 'large_biobert':
# pe_input = 60
# elif embedding == 'w2v':
# pe_input = 48
# emb_size = 200
# elif embedding == 'bert':
# pe_input = 72
# emb_size = 1024
# else:
# raise TypeError("Wrong embedding type")
if data_augmentation:
aug = tf.keras.Sequential([tf.keras.layers.experimental.preprocessing.RandomFlip(),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.05)])
# + colab={"base_uri": "https://localhost:8080/"} id="I5om76cqep_S" executionInfo={"status": "ok", "timestamp": 1620396875167, "user_tz": -480, "elapsed": 1698, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="f194ccd6-40b2-41be-e4db-51caa5eeb756"
train_loader = DataLoader('/content/data_clef/training', embedding)
train_dataset, train_tokenizer = train_loader.create_dataset('open_ended')
batched_train_set = train_dataset.padded_batch(batch_size, padded_shapes=((img_padding, tf.TensorShape([pe_input, emb_size]), tf.TensorShape([kn_input, 1024])),
tf.TensorShape([pe_output-1]), []), drop_remainder=True)
# + colab={"base_uri": "https://localhost:8080/"} id="PAAQZnK6fV88" executionInfo={"status": "ok", "timestamp": 1620396876127, "user_tz": -480, "elapsed": 1438, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="7478e93f-ab68-4096-8f9f-389d78f84985"
test_loader = DataLoader('/content/data_clef/testing', embedding)
test_dataset, test_tokenizer = test_loader.create_dataset('open_ended')
batched_test_set = test_dataset.padded_batch(1, padded_shapes=((img_padding, tf.TensorShape([pe_input, emb_size]), tf.TensorShape([kn_input, 1024])),
tf.TensorShape([pe_output-1]), []), drop_remainder=True)
# + id="zMuGfU8_WDlY" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620396876887, "user_tz": -480, "elapsed": 1073, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="6a6d8e79-7c30-4705-eec6-c1f712ad9c7c"
val_loader = DataLoader('/content/data_clef/validation', embedding)
val_dataset, val_tokenizer = val_loader.create_dataset('open_ended')
batch_val_set = val_dataset.padded_batch(1, padded_shapes=((img_padding, tf.TensorShape([pe_input, emb_size]), tf.TensorShape([kn_input, 1024])),
tf.TensorShape([pe_output-1]), []), drop_remainder=True)
# + colab={"base_uri": "https://localhost:8080/"} id="wnjBtGPjai7O" executionInfo={"status": "ok", "timestamp": 1620396878863, "user_tz": -480, "elapsed": 799, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="4672c78f-11a2-41c6-fd1d-c420e2a6cacb"
vocab_size=len(train_tokenizer.index_word) + 1
print(vocab_size)
# + [markdown] id="vw2ZQSbHai7P"
# ###
# + id="k_nYLXzRai7P" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620396883599, "user_tz": -480, "elapsed": 3039, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="539a8acc-379e-4f48-f977-da2fc7eb720b"
transformer = VQATransformer(num_layers, d_model, num_heads, dff, vocab_size, pe_input, pe_output,
32, 1024, pretrained_cnn_type=cnn_type)
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
ckpt = tf.train.Checkpoint(transformer=transformer,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, check_point_path, max_to_keep=5)
# + id="UkbtWAfmai7Q"
@tf.function()
def train_step(img, question, kn, tar):
if data_augmentation:
img = aug(img)
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(question, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transformer(question, img, kn, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
# + id="COJaJ6rbai7Q"
def evaluate(question, img, kn):
end_token = tf.constant(train_tokenizer.texts_to_sequences(['<end>']), tf.int32)
output = dec_input = tf.expand_dims([train_tokenizer.word_index['<start>']], 0)
for i in range(MAX_LENGTH):
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(
question, output)
predictions, attention_weights = transformer(question,
img,
kn,
output,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
predictions = predictions[:, -1:, :]
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
if predicted_id == end_token:
return tf.squeeze(output, axis=0), attention_weights
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0), attention_weights
# + id="OFmQ4nzNJME8"
################ADD#################################################################################
def get_score(batch_data_set, csv_saving_path):
true_answers_list = []
predicted_answers_list = []
ques_id_list = []
for (batch, (img_question, target, ques_id)) in enumerate(batch_data_set):
target = target.numpy()
target = target[0]
true_answer = []
for i in target:
if i == 0:
break
else:
true_answer.append(tokenizer.index_word[i])
true_answer = " ".join(true_answer[1: -1])
prediction, attention = evaluate(img_question[1], img_question[0], img_question[2])
p = prediction.numpy()
# print('an1:',p)
predict_answer = [tokenizer.index_word[i] for i in p][1:]
# print('an2:',predict_answer)
predict_answer = " ".join(predict_answer)
true_answers_list.append(true_answer)
predicted_answers_list.append(predict_answer)
ques_id_list.append(ques_id)
# print('answer list:',predicted_answers_list)
data = {"true answer": true_answers_list, "predicted answer": predicted_answers_list, "ques_id": ques_id_list}
df = pd.DataFrame(data)
if not pathlib.Path(saving_folder).exists():
pathlib.Path(saving_folder).mkdir(parents=True, exist_ok=True)
df.to_csv(csv_saving_path)
# print("complete writing", csv_saving_path)
return AnswerEvaluator(csv_saving_path).evaluate()
##################ADD#################################################################################
# + [markdown] id="ennkn6o1ai7Q"
# ###
# + id="yhy3HUU4ai7R"
## restore check point
# ckpt.restore(ckpt_manager.latest_checkpoint)
# + id="BDTMV1bZJdFr" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620401605492, "user_tz": -480, "elapsed": 1699082, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="d7913df8-b06c-4feb-8345-bbc3220f41c6"
for epoch in range(EPOCHS):
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
for (batch, (img_question, tar, _)) in enumerate(batched_train_set):
train_step(img_question[0], img_question[1], img_question[2], tar)
# if batch % 50 == 0:
# print ('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(
# epoch + 1, batch, train_loss.result(), train_accuracy.result()))
print ('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1,
train_loss.result(),
train_accuracy.result()))
##################Change#################################################################################
# csv_saving_path = saving_folder + 'val' + str(epoch) + '.csv'
# score = get_score(batch_val_set, csv_saving_path)
# model_accuracy = score['Accuracy']
# # if model_accuracy > accuracy:
# print('Validation Accuracy',model_accuracy)
# # ckpt_save_path = ckpt_manager.save()
# # accuracy = model_accuracy
##################Change#################################################################################
# if (epoch + 1) % 2 == 0:
# ckpt_save_path = ckpt_manager.save()
# print ('Saving checkpoint for epoch {} at {}'.format(epoch+1,
# ckpt_save_path))
# print ('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1,
# train_loss.result(),
# train_accuracy.result()))
# print ('Time taken for 1 epoch: {} secs\n'.format(time.time() - start))
# + colab={"base_uri": "https://localhost:8080/"} id="1K1bCoBeai7R" executionInfo={"status": "ok", "timestamp": 1620382801370, "user_tz": -480, "elapsed": 234035, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="711de829-c820-4ada-f7e8-3d521821ce1e"
true_answers_list = []
predicted_answers_list = []
ques_id_list = []
print('Start predicting...')
for (batch, (img_question, target, ques_id)) in enumerate(batched_test_set):
target = target.numpy()
target = target[0]
true_answer = []
for i in target:
if i == 0:
break
else:
true_answer.append(test_tokenizer.index_word[i])
true_answer = " ".join(true_answer[1: -1])
# print(img_question[1].shape, img_question[0].shape)
prediction, attention = evaluate(img_question[1], img_question[0], img_question[2])
p = prediction.numpy()
predict_answer = [train_tokenizer.index_word[i] for i in p][1:]
predict_answer = " ".join(predict_answer)
true_answers_list.append(true_answer)
predicted_answers_list.append(predict_answer)
ques_id_list.append(ques_id)
print("predicted answer: " + str(batch), end='\r', flush=True)
# + colab={"base_uri": "https://localhost:8080/"} id="sZ1y_C9pai7S" executionInfo={"status": "ok", "timestamp": 1620382802596, "user_tz": -480, "elapsed": 1204, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="6832ba37-6b95-49d4-843c-db28dd291706"
data = {"true answer": true_answers_list, "predicted answer": predicted_answers_list, "ques_id":ques_id_list}
df = pd.DataFrame(data)
if not pathlib.Path(saving_folder).exists():
pathlib.Path(saving_folder).mkdir(parents=True, exist_ok=True)
name = save_result_path
df.to_csv(name)
print("complete writing", name)
# + colab={"base_uri": "https://localhost:8080/"} id="NzD5vP2Pai7S" executionInfo={"status": "ok", "timestamp": 1620382803321, "user_tz": -480, "elapsed": 1916, "user": {"displayName": "Li Ding", "photoUrl": "", "userId": "17494963640534841610"}} outputId="5f35786d-47f6-40b0-caf8-8f913a69f84a"
scores = AnswerEvaluator(name).evaluate()
# + id="-YfrYFXsai7S"
| 15,758 |
/Source/05_Feature_Engineering.ipynb | 99440943f75057bc6f1c40513b8986037c68730f | [
"MIT"
] | permissive | acdick/endangered_species_classification | https://github.com/acdick/endangered_species_classification | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 262,413 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Feature Engineering
# +
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from imblearn.over_sampling import SMOTENC
from imblearn.under_sampling import NearMiss
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
import plotly_express as px
# -
# ### Load Data from Fish & Wildlife Service, Forest Service and Environmental Protection Agency
# +
# Load datasets
species = pd.read_pickle("../Data/FWS.pkl")
forests = pd.read_pickle("../Data/FS.pkl")
aqi = pd.read_pickle("../Data/EPA.pkl")
# Merge datasets
species = species.merge(forests, on='State')
species = species.merge(aqi, left_on='State', right_index=True, how='left')
# -
species.head()
# +
#px.choropleth(species, locationmode='USA-states', locations='State',
# scope='usa', color="Good Days",
# color_continuous_scale=px.colors.sequential.Plasma)
# -
# Drop multicollinear columns (state converted to continuous with total land area)
species = species.drop(['State'], axis = 1)
species.head()
# ### Feature Interactions
# +
# Percentage of forest covereage
species['Total Land Acreage'] = species['Total Land Area (Thousands of Acres)']
species['Total Land Acreage'] = species['Total Land Acreage'].astype(float)
species['State Forest Coverage'] = species['Forest Land Area (Thousands of Acres)'] / species['Total Land Acreage']
# Drop columns
species = species.drop(['Total Land Area (Thousands of Acres)', 'Forest Land Area (Thousands of Acres)'], axis = 1)
# +
# Ratio of good AQI days to total AQI days recorded in 2018
species['Good AQI Days per Year'] = species['Good Days'] / species['Days with AQI']
# Drop columns
species = species.drop(['Days with AQI', 'Good Days'], axis = 1)
species.head()
# +
fig, aa = plt.subplots(figsize=(10,10))
sns.scatterplot(x = 'State Forest Coverage',
y = 'Good AQI Days per Year',
hue = 'Federal Listing Status',
size = 'Total Land Acreage',
sizes = (50,150),
data = species)
# -
# ### Create Dummy Variables for Categorical Data
#create dummy variables
species = pd.get_dummies(data=species, columns=['Family', 'Group', 'Region', 'VIP'])
print(species.shape)
species.head()
# +
#species = species[['Federal Listing Status',
# 'Total Land Acreage','State Forest Coverage','Good AQI Days per Year',
# 'Family_Orchidaceae','Family_Unionidae','Family_Vespertilionidae','Family_Asteraceae','Family_Fabaceae',
# 'Family_Cyperaceae','Family_Cyprinidae','Family_Scrophulariaceae','Family_Brassicaceae','Family_Rallidae',
# 'Family_Poaceae','Family_Accipitridae','Family_Salmonidae','Family_Laridae','Family_Lycaenidae',
# 'Family_Nymphalidae','Family_Ranunculaceae','Family_Liliaceae','Family_Rosaceae','Family_Scolopacidae',
# 'Family_Noctuidae','Family_Caryophyllaceae','Family_Emberizidae',
# 'Group_Amphibians','Group_Annelid Worms','Group_Arachnids','Group_Birds','Group_Clams',
# 'Group_Conifers and Cycads','Group_Corals','Group_Crustaceans','Group_Ferns and Allies','Group_Fishes',
# 'Group_Flatworms and Roundworms','Group_Flowering Plants','Group_Hydroids','Group_Insects','Group_Lichens',
# 'Group_Mammals','Group_Millipedes','Group_Reptiles','Group_Snails','Group_Sponges',
# 'Region_1','Region_2','Region_3','Region_4','Region_5','Region_6',
#'Region_7','Region_8','Region_NMFS',
# 'VIP_I','VIP_P','VIP_V']]
#print(species.shape)
#species.head()
# -
# ### Set Target and Feature Variables
# +
# Create target variables
y = species['Federal Listing Status']
y_labels = list(y.unique())
print(y_labels)
# Create feature variables
X = species.drop(['Federal Listing Status'], axis = 1)
X.head()
# -
# ### Create Training and Test Sets
# +
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
# data set statistics
data_stats = pd.DataFrame({'Train': y_train.value_counts(),
'Test': y_test.value_counts(),
'Train Normal': y_train.value_counts() / y_train.count(),
'Test Normal': y_test.value_counts() / y_test.count()})
data_stats.loc['Total'] = data_stats.sum().astype(int)
data_stats
# -
# ### Feature Scaling
non_categorical_cols = ['Total Land Acreage', 'State Forest Coverage', 'Good AQI Days per Year']
X_train.head()
# +
scaler = MinMaxScaler()
X_train[non_categorical_cols] = scaler.fit_transform(X_train[non_categorical_cols])
X_test[non_categorical_cols] = scaler.transform( X_test[non_categorical_cols])
X_train.head()
# -
# ### Class Balancing
# +
# balance classes with SMOTENC oversampling
smote = SMOTENC(categorical_features=list(range(3,len(X_train.columns))))
X_train_smote, y_train_smote = smote.fit_resample(X_train, y_train)
X_train_smote = pd.DataFrame(X_train_smote, columns=X_train.columns)
y_train_smote = pd.Series(y_train_smote)
# balanced data set statistics
smote_stats = pd.DataFrame({'Train': y_train_smote.value_counts(),
'Test': y_test.value_counts(),
'Train Normal': y_train_smote.value_counts() / y_train_smote.count(),
'Test Normal': y_test.value_counts() / y_test.count()})
smote_stats.loc['Total'] = smote_stats.sum().astype(int)
smote_stats
# -
# check categorical features of class-balanced model
X_train_smote.describe()
# +
# balance classes with NearMiss undersampling
under = NearMiss(sampling_strategy='not minority')
X_train_under, y_train_under = under.fit_resample(X_train, y_train)
X_train_under = pd.DataFrame(X_train_under, columns=X_train.columns)
y_train_under = pd.Series(y_train_under)
# balanced data set statistics
under_stats = pd.DataFrame({'Train': y_train_under.value_counts(),
'Test': y_test.value_counts(),
'Train Normal': y_train_under.value_counts() / y_train_under.count(),
'Test Normal': y_test.value_counts() / y_test.count()})
under_stats.loc['Total'] = under_stats.sum().astype(int)
under_stats
# -
# check categorical features of class-balanced model
X_train_under.describe()
# ### Scaled and Balanced Datasets
# +
# plot comparison of unbalanced and balanced training sets
fig, axes = plt.subplots(3, 3, figsize=(16,16))
sns.countplot(y_train, order=y_train.value_counts().index, alpha=0.8, ax = axes[0,0])
sns.countplot(y_train_smote, order=y_train.value_counts().index, alpha=0.8, ax = axes[0,1])
sns.countplot(y_train_under, order=y_train.value_counts().index, alpha=0.8, ax = axes[0,2])
axes[0,0].set_ylim(0,3200)
axes[0,1].set_ylim(0,3200)
axes[0,2].set_ylim(0,3200)
axes[0,0].set_title(label='Imbalanced')
axes[0,1].set_title(label='SMOTE Oversampled')
axes[0,2].set_title(label='Near Miss Undersampled')
axes[0,0].set(xlabel='Federal Listing Status', ylabel='Species Records')
axes[0,1].set(xlabel='Federal Listing Status', ylabel='Species Records')
axes[0,2].set(xlabel='Federal Listing Status', ylabel='Species Records')
sns.scatterplot(x=X_train.columns[0], y=X_train.columns[1], data=X_train, ax = axes[1,0])
sns.scatterplot(x=X_train_smote.columns[0], y=X_train_smote.columns[1], data=X_train_smote, ax = axes[1,1])
sns.scatterplot(x=X_train_under.columns[0], y=X_train_under.columns[1], data=X_train_under, ax = axes[1,2])
axes[1,0].set_title(label='Imbalanced')
axes[1,1].set_title(label='SMOTE Oversampled')
axes[1,2].set_title(label='Near Miss Undersampled')
sns.scatterplot(x=X_train.columns[1], y=X_train.columns[2], data=X_train, ax = axes[2,0])
sns.scatterplot(x=X_train_smote.columns[1], y=X_train_smote.columns[2], data=X_train_smote, ax = axes[2,1])
sns.scatterplot(x=X_train_under.columns[1], y=X_train_under.columns[2], data=X_train_under, ax = axes[2,2])
axes[2,0].set_title(label='Imbalanced')
axes[2,1].set_title(label='SMOTE Oversampled')
axes[2,2].set_title(label='Near Miss Undersampled')
# -
# ### Data Loading
# +
X_train.to_pickle("../Data/X_train.pkl")
X_test.to_pickle( "../Data/X_test.pkl")
y_train.to_pickle("../Data/y_train.pkl")
y_test.to_pickle( "../Data/y_test.pkl")
X_train_smote.to_pickle("../Data/X_train_smote.pkl")
y_train_smote.to_pickle("../Data/y_train_smote.pkl")
X_train_under.to_pickle("../Data/X_train_under.pkl")
y_train_under.to_pickle("../Data/y_train_under.pkl")
pd.DataFrame(y_labels).to_pickle('../Data/y_labels.pkl')
| 9,014 |
/Stain_norm.ipynb | 2d6fc8bda5aba022ad4321539d7e76825e80960e | [] | no_license | NeXu7/Hist-Norm | https://github.com/NeXu7/Hist-Norm | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,761,395 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="W9HV1kbmS-BL" colab_type="text"
# # Data visualization with Bokeh
# ## Glyphs
# glyphs - visual properties of shapes
# - visual shapes (circles, squares, lines, wedges, etc.)
# - with properties attached to data
# - coordinates
# - size, colour, transparency (alpha)
#
#
# + id="BPTFM16eTxuR" colab_type="code" colab={}
# #!pip install bokeh
# + id="uWVntIVXS9dI" colab_type="code" colab={}
from bokeh.io import output_file, show
# to create a basic empty plot
from bokeh.plotting import figure
from bokeh.io import output_notebook
# + id="hojYTyYMT5DY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 447} outputId="23dbb801-3d66-4462-e895-92c29e0a3969"
output_notebook()
plot = figure(plot_width=400, plot_height=400, tools='pan, box_zoom')
plot.circle([1, 2, 3, 4, 5], [8, 6, 5, 2, 3]) # x & y
#output_file('circle.html') # save
show(plot) # show
# + [markdown] id="1vHy-8TGVbz7" colab_type="text"
# ### Properties
# - lists, arrays, sequencies of values
# - single fixed values (default values for all)
# + id="Ff5vnFegVmFZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 647} outputId="4e9859cb-10ff-4cf9-af35-afb4fe4651a6"
output_notebook()
plot = figure()
plot.circle(x=10, y=[2, 5, 8, 12], size =[10, 20, 30, 40])
show(plot)
# + [markdown] id="MdKBiVYDWA8j" colab_type="text"
# ### Markers
# - asterisk()
# -circle()
# - circle_cross()
# - circle_x()
# - cross()
# - diamond()
# - diamond_cross()
# - inverted_triangle()
# - square()
# - square_cross()
# - square_x()
# - triangle()
# - x()
# + id="ZLsNTRS-VxGX" colab_type="code" colab={}
# TASK 1
# Import figure from bokeh.plotting
#from bokeh.plotting import figure
# Import output_file and show from bokeh.io
#from bokeh.io import output_file, show
# Create the figure: p
p = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)')
# Add a circle glyph to the figure p
p.circle(fertility, female_literacy)
# Call the output_file() function and specify the name of the file
output_file('fert_lit.html')
# Display the plot
show(p)
# + id="lqUPYEuKX6Ow" colab_type="code" colab={}
# TASK 2
# Create the figure: p
p = figure(x_axis_label='fertility', y_axis_label='female_literacy (% population)')
# Add a circle glyph to the figure p
p.circle(fertility_latinamerica, female_literacy_latinamerica)
# Add an x glyph to the figure p
p.x(fertility_africa, female_literacy_africa)
# Specify the name of the file
output_file('fert_lit_separate.html')
# Display the plot
show(p)
# + [markdown] id="Xk25RtfkYPpB" colab_type="text"
# ## Customize scatter glyphs
# - **color**, as hexadecimal strings, tuples of RGB values between 0 and 255, and any of the 147 CSS color names
# - **size**, screen space units with 100 meaning the size of the entire figure
# - **alpha**, controls transparency, floating point numbers between 0.0, meaning completely transparent, and 1.0, meaning completely opaque
# + id="80czO2ucZBmy" colab_type="code" colab={}
# Create the figure: p
p = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)')
# Add a blue circle glyph to the figure p
p.circle(fertility_latinamerica, female_literacy_latinamerica, color='blue', size=10, alpha=0.8)
# Add a red circle glyph to the figure p
p.circle(fertility_africa, female_literacy_africa, color='red', size=10, alpha=0.8)
# Specify the name of the file
output_file('fert_lit_separate_colors.html')
# Display the plot
show(p)
# + [markdown] id="-pWFn9DsZIKK" colab_type="text"
# ## Additional glyphs
# ### Lines + markers
#
# + id="SsoC7u_sZPUY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 447} outputId="c0550c88-dc92-4298-f70a-f1a432672a70"
output_notebook()
x = list(range(1, 6))
y = [8, 6, 5, 2, 3]
plot = figure(plot_width=350, plot_height=400)
plot.line(x, y, line_width=3)
plot.circle(x, y, fill_color='white', size=10)
show(plot)
# + [markdown] id="WK3_pC_fZ97W" colab_type="text"
# ### Patches
# - useful for showing geographical regions
# - can draw multiple polygons based on a list of patch coordinates -> data given as "list of lists" (multiple coord.)
# + id="QD5j79u8Zd5g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 447} outputId="ff821a6e-d7d0-405a-ab14-e93977139445"
output_notebook()
xs = [ [1, 1, 2, 2], [2, 2, 4], [2, 2, 3, 3] ]
ys = [ [2, 5, 5, 2], [3, 5, 5], [2, 3, 4, 2] ]
plot = figure(plot_width=350, plot_height=400)
plot.patches(xs, ys,
fill_color=['red', 'blue', 'green'], # list of three colors
line_color='white',
alpha=0.7)
show(plot)
# + [markdown] id="-ppHEbkRbYq9" colab_type="text"
# - annulus()
# - annular_wedge()
# - wedge()
# - rect()
# - quad()
# - vbar()
# - hbar()
# - image()
# - image_rgba()
# - image_url()
# - patch()
# - patches()
# - line()
# - multi_line()
# - circle()
# - oval()
# - ellipse()
# - arc()
# - quadratic()
# - bezier()
# + id="BZZDWm__a7X5" colab_type="code" colab={}
# TASK 3
# Import figure from bokeh.plotting
from bokeh.plotting import figure
# Create a figure with x_axis_type="datetime": p
p = figure(x_axis_type='datetime', x_axis_label='Date', y_axis_label='US Dollars')
# Plot date along the x axis and price along the y axis
p.line(date, price)
# Specify the name of the output file and show the result
output_file('line.html')
show(p)
# + id="vI7mXeLocTmq" colab_type="code" colab={}
# TASK 4
# Import figure from bokeh.plotting
from bokeh.plotting import figure
# Create a figure with x_axis_type='datetime': p
p = figure(x_axis_type='datetime', x_axis_label='Date', y_axis_label='US Dollars')
# Plot date along the x-axis and price along the y-axis
p.line(date, price)
# With date on the x-axis and price on the y-axis, add a white circle glyph of size 4
p.circle(date, price, fill_color='white', size=4)
# Specify the name of the output file and show the result
output_file('line.html')
show(p)
# + id="ibeXqVQEcqVo" colab_type="code" colab={}
# TASK 5
# Create a list of az_lons, co_lons, nm_lons and ut_lons: x
x = [az_lons, co_lons, nm_lons, ut_lons]
# Create a list of az_lats, co_lats, nm_lats and ut_lats: y
y = [az_lats, co_lats, nm_lats, ut_lats]
# Add patches to figure p with line_color=white for x and y
p.patches(x, y, line_color='white')
# Specify the name of the output file and show the result
show(p)
# + id="DOJYXEIYcwVQ" colab_type="code" colab={}
# + [markdown] id="wmcVuOcDf_fq" colab_type="text"
# ## Data Formats
# ### basic types
# + id="e5qFmZyMgBbQ" colab_type="code" colab={}
# in previous examples
# + [markdown] id="Hi7Tt3vsgGRS" colab_type="text"
# ### NumPy arrays
# + id="wqjb_yulgO1Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="404b5dfa-8f5d-40a3-998d-36cc7b4aa8ab"
import numpy as np
output_notebook()
x = np.linspace(0, 10, 1000) # 100 values from 0 to 10
y = np.sin(x) + np.random.random(1000) * 0.2 # + noise
plot = figure(plot_width=400, plot_height=300)
plot.line(x, y)
show(plot)
# + [markdown] id="zfWE7a5Eg-PN" colab_type="text"
# ### Pandas
#
# + id="XA46S3f_hC3w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="67ec58a9-0ebb-4df2-a24e-c780e658a6c4"
# flower is a Pandas Data Frame
from bokeh.sampledata.iris import flowers
output_notebook()
plot = figure(plot_width=400, plot_height=300)
plot.circle(flowers['petal_length'],
flowers['sepal_length'],
size=5)
show(plot)
# + [markdown] id="AHJb9XtojEQb" colab_type="text"
# ### Column Data Source (!)
# - common fundamental data structure for Bokeh
# - maps string names to sequences of data
# - often created automatically
# - can be shared between glyphs to link selections
# - +extra columns with hover tooltips
# + id="DBEr9RyFhcoh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="93038b7f-7af7-4ade-c92f-5520ae925f25"
from bokeh.models import ColumnDataSource
source = ColumnDataSource(data={
'x': [1, 2, 3, 4, 5],
'y': [8, 6, 5, 2, 3]}) # MUST BE THE SAME LENGTH
source.data
# + id="GRW9i5eakLtL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="05cc7bc3-4a97-44b6-b584-19dac843dd88"
from bokeh.sampledata.iris import flowers as df
df.head()
# + id="IIZt-8-GkWvJ" colab_type="code" colab={}
source = ColumnDataSource(df) # now can be passed to any glyph method
# + id="QSJhXv7InKV8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="edec209a-bbeb-40dd-bab7-e327fd40915d"
# TASK 6
output_notebook()
# Create array using np.linspace: x
x = np.linspace(0, 5, 100)
# Create array using np.cos: y
y = np.cos(x)
# Add circles at x and y
p = figure(plot_width=400, plot_height=300)
p.circle(x, y)
# Specify the name of the output file and show the result
show(p)
# + id="SOQ-b4YFnXj5" colab_type="code" colab={}
# TASK 7
import pandas as pd
# Read in the CSV file: df
df = pd.read_csv('auto.csv')
# Import figure from bokeh.plotting
from bokeh.plotting import figure
# Create the figure: p
p = figure(x_axis_label='HP', y_axis_label='MPG')
# Plot mpg vs hp by color
p.circle(df.hp, df.mpg, color=df.color, size=10)
# Specify the name of the output file and show the result
output_file('auto-df.html')
show(p)
# + id="X4JUfqNdrPfb" colab_type="code" colab={}
# TASK 8
# Import the ColumnDataSource class from bokeh.plotting
from bokeh.plotting import ColumnDataSource
# Create a ColumnDataSource from df: source
source = ColumnDataSource(df)
# Add circle glyphs to the figure p
p.circle('Year', 'Time', color='color', size=8,source=source)
# Specify the name of the output file and show the result
output_file('sprint.html')
show(p)
# + [markdown] id="lRHlXQV1rOK7" colab_type="text"
# ## Customizing Glyphs
# ### Selection appearance
# + id="y33zDZVPtsA5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="f6377052-a256-4730-fef6-ff1dda471cca"
output_notebook()
plot = figure(tools='box_select, lasso_select', plot_width=400, plot_height=300)
plot.circle(flowers.petal_length, flowers.sepal_length,
selection_color='red',
nonselection_fill_alpha=0.2,
nonselection_fill_color='grey')
show(plot)
# + [markdown] id="6as66WdFuplf" colab_type="text"
# ### Hover appearance
# + id="f2yOolEiuWOe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="376cf5db-cfd3-4a7e-87ec-56fccbf0e106"
from bokeh.models import HoverTool
output_notebook()
hover = HoverTool(tooltips=None, mode='hline') # horiz line
plot = figure(tools=[hover, 'crosshair'], plot_width=400, plot_height=300)
x = np.linspace(0, 10, 1000) # 100 values from 0 to 10
y = np.sin(x) + np.random.random(1000) * 0.2 # + noise
plot.circle(x, y, size=5, hover_color='red')
show(plot)
# + [markdown] id="IEQsGBLGvY4c" colab_type="text"
# ### Color mapping
# + id="l_ySswZKvWNa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} outputId="32287687-df3e-46a7-8460-3cde47650b70"
from bokeh.models import CategoricalColorMapper
output_notebook()
mapper = CategoricalColorMapper(factors=['setosa', 'virginica', 'versicolor'],
palette=['red','green', 'blue'])
plot = figure(x_axis_label='petal_length',
y_axis_label='sepal_length',
plot_width=400, plot_height=300)
plot.circle('petal_length', 'sepal_length',
size=5, source=source,
color={'field': 'species',
'transform': mapper})
show(plot)
# + id="UM78mROcyPOT" colab_type="code" colab={}
# TASK 9
# Create a figure with the "box_select" tool: p
p = figure(x_axis_label='Year', y_axis_label='Time', tools='box_select')
# Add circle glyphs to the figure p with the selected and non-selected properties
p.circle('Year', 'Time', source=source, selection_color='red', nonselection_alpha=0.1)
# Specify the name of the output file and show the result
output_file('selection_glyph.html')
show(p)
# + id="egSx3gIMyR0I" colab_type="code" colab={}
# TASK 10
# import the HoverTool
from bokeh.models import HoverTool
# Add circle glyphs to figure p
p.circle(x, y, size=10,
fill_color='grey', alpha=0.1, line_color=None,
hover_fill_color='firebrick', hover_alpha=0.5,
hover_line_color='white')
# Create a HoverTool: hover
hover = HoverTool(tooltips=None, mode='vline')
# Add the hover tool to the figure p
p.add_tools(hover)
# Specify the name of the output file and show the result
output_file('hover_glyph.html')
show(p)
# + id="qVkFHIpNz6o9" colab_type="code" colab={}
# TASK 11
#Import CategoricalColorMapper from bokeh.models
from bokeh.models import CategoricalColorMapper
# Convert df to a ColumnDataSource: source
source = ColumnDataSource(df)
# Make a CategoricalColorMapper object: color_mapper
color_mapper = CategoricalColorMapper(factors=['Europe', 'Asia', 'US'],
palette=['red', 'green', 'blue'])
# Add a circle glyph to the figure p
p.circle('weight', 'mpg', source=source,
color=dict(field='origin', transform=color_mapper),
legend='origin')
# Specify the name of the output file and show the result
output_file('colormap.html')
show(p)
s' % (str(cluster), str(np.sum(mask))))
indices = np.arange(len(raw_file))[mask]
raw_samples = raw_file[:, :, :-4][mask]
indices, raw_samples = shuffle(indices, raw_samples)
for i, sample, ax in zip(indices, raw_samples, axes):
ax.plot(sample)
ax.set_title('Sample: %s' % str(i))
plt.legend(columns)
plt.show()
# -------- Plot Dimensions Individually --------
if plot_each_dimension:
for i in range(len(columns[:-3])):
print(columns[i])
for cluster in range(num_clusters):
mask = y_pred == cluster
fig, axes = plt.subplots(1,num_sub_plots, subplot_kw={'xticks': (), 'yticks': ()}, figsize=(15,3))
axes[0].set_ylabel('Cluster-%s: %s' % (str(cluster), str(np.sum(mask))))
indices = np.arange(len(raw_file))[mask]
dimension_data = raw_file[:, :, i][mask]
indices, dimension_data = shuffle(indices, dimension_data)
for j, sample, ax in zip(indices, dimension_data, axes):
ax.set_ylim([np.min(dimension_data),np.max(dimension_data)]) # TODO: Make this min/max of the selected feature
ax.plot(sample if i < 3 else sample[0::25])
ax.set_title('Sample: %s' % str(j))
plt.show()
scores = process_results(dataset_filename, eval_prefixes, score_keys, score_keys_filter, verbose=False)
plot_scores(score_keys_filter, window_lengths, sub_sample_lengths, scores)
plot_random_samples(dataset_filename, 'classical_features', 300, 150, 9, input_columns)
# TODO: Write labels as text next to the plot
| 15,323 |
/.ipynb_checkpoints/mission_simulations-checkpoint.ipynb | 5c4eb028de4b3c1ab69b4305b5c6ac787c0a0433 | [] | no_license | bonarl/Mission-to-Mars | https://github.com/bonarl/Mission-to-Mars | 5 | 2 | null | null | null | null | Jupyter Notebook | false | false | .py | 6,937 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing libraries (useful things we'll use in the code later)
# %matplotlib inline
from matplotlib import pyplot as plt
from matplotlib import patches
from matplotlib import animation, rc
import matplotlib
from IPython.display import HTML
import numpy as np
from scipy.integrate import solve_ivp
# %run mars_functions.ipynb
# ## Atmospheric Model
#Exponential model of air density as function of radius
hs = np.linspace(0,100,100)
rhos = rho(6370e3+hs*1e3)
plt.plot(rhos,hs)
plt.title("Air Density"),plt.xlabel("$\\rho$ (kg/m^3)"),plt.ylabel("h (km)")
plt.show()
# ## Gravity-turn launch simulation
# The code below will setup and run a simulation of a gravity-turn launch trajectory. The gravity-turn is a type of launch in which a rocket is initially launched vertically to quickly gain altitude, but then pitches over slightly, usually by gimballing the rocket engine. After this small manoeuvre, the rocket will then naturally follow a curved path due to gravity, allowing the rocket to gain horizontal speed and build up the required orbital speed. In our simulation, we have launched vertically and are travelling at 252 m/s when we pitchover to the angle, which you will decide.
#
# The simulation is contained within the "gravity_turn()" function. The inputs to this are $\Delta v$, pitchover angle, and tf - how long we want to run the simulation for (in simulation time, not how long the code will take to run). These inputs are given values in the first few lines of the code, and you can change the numbers here. $\Delta v$ determines how much fuel we need to add, according to the rocket equation. You should first try your answer to Q1a) from the worksheet for $\Delta v$, and thenexperiment with different pitchover angles. After running the cell (press ctrl-enter) plots of the trajectory are displayed, and (after waiting a while) an animation will display below that.
#
# How do different pitchover angles affect the trajectory?
#
# This simulation considers *drag*, given by $D = \frac{1}{2}\rho C_D A v^2$ and using the parameters given below. When is drag at its highest? When can we stop worrying about it? (Check the plot of $\rho$ against altitude above). How will this affect the $\Delta v$ required to get into orbit?
#
# Because of losses to gravity (due to our initial vertical trajectory) and drag, our actual $\Delta v$ requirement is __13.45 km/s__. Try running the simulation with this value, and adjust the pitchover angle to see if you can get into orbit. How close to a perfectly circular orbit can you get? (_HINT: stick to values between 0 and 2$^\circ$)_
#
# With this $\Delta v$, what happens if the pitchover angle is too small? What happens if it's too large?
# +
#inputs: change these numbers
deltav = 10e3
pitchover_angle=0
tf = 10000
#Rocket parameters (don't change these at first)
Cd = 0.75 #coefficient of drag
A = 1 #cross sectional area m^2
ISP = 273.0 #specific impulse s
ttw=1.4 #thrust-to-weight during launch
m_dry = 5000 #dry-mass kg
m_wet = m_dry*np.exp(deltav/(ISP*9.81)) #wet-mass kg
#run the simulation and plot the results
sol=gravity_turn(deltav,pitchover_angle,tf)
animate_trajectory(sol)
# -
# ## Hohmann Transfer Simulation
# Now that we are in LEO, we have been refueled and are ready to head to Mars. We've already performed a manoeuvre giving us the velocity required to escape from the Earth's gravity well, so now need to perform a burn to put us onto a Hohmann transfer ellipse from the Earth's orbit to Mars'. In the code below, you can specify the $\Delta v_1$ for this burn, and the $\Delta v_2$ for Mars capture. You can also specify how far ahead Mars is in its orbit compared to Earth (essentially this is you choosing the date that we set off). Try putting in your answers from the worksheet and running the code to simulate the trajectory.
#
# Try adjust your $\Delta v$s to improve the transfer.
#
# The plots show the trajectories of Earth, Mars, and our spacecraft. The left plot shows the trajectory for $t_f$ days after the first manoeuvre, and the second plot for the same number of days after manoeuvre two. 259 days is the transfer time given in the worksheet, so stick with that for now. $\Delta v_1$ is initially 0, this means that the spacecraft is travelling around the Sun at the same speed as the Earth - so the plots just show some circles. Try experimenting with different values of $\Delta v$ for both manoeuvres to see how the trajectory changes (stick to the range of a few km/s). What happens when the $\Delta v$ is negative?
# +
#input parameters
dv1 = 0 #m/s
dv2 = 0 #m/s
mars_leading_angle=0 #deg
tf = 259 #days
full_sol,ts=hohmann_transfer(dv1,dv2,tf,mars_leading_angle)
animate_hohmann(full_sol,ts)
| 5,474 |
/PracticasTDI/P6/.ipynb_checkpoints/Practica6-checkpoint.ipynb | b25b5761e138c03b1f369c63ccbe2e12749ed59e | [] | no_license | RoboticsLabURJC/2019-tfg-ana-cuevas | https://github.com/RoboticsLabURJC/2019-tfg-ana-cuevas | 0 | 0 | null | 2021-09-08T06:49:20 | 2021-09-08T06:48:38 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 1,608,129 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: mogpvenv-3.7.3
# language: python
# name: mogpvenv-3.7.3
# ---
# # Evaluating robustness of cluster assignments with sparse datasets (Figure 3)
#
# This notebook generates barplots for prediction and interpolation (sparsity) experiments - uses calculated error from [this script](sparsity_prediction_process.py)
# +
# %matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# %load_ext autoreload
# %autoreload 2
# +
from analysis_utils import *
import joblib
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
from statannot import add_stat_annotation
import seaborn as sns
from scipy.stats import wilcoxon
sns.set(font_scale=2.5, style="white", color_codes=False)
# +
def process_err_df(mod_obj):
"""Convert ModelSum object to pandas dataframe summarizing info"""
df = pd.DataFrame()
for key in mod_obj.keys():
curdf = pd.DataFrame(mod_obj[key].err, columns=['err']).dropna()
curdf['task_num'] = mod_obj[key].task_num
curdf['seed'] = mod_obj[key].seed
curdf['type'] = mod_obj[key].mod_type
curdf['best_ll']=mod_obj[key].best_ll
df = df.append(curdf)
return(df)
def process_num_clust(mod_obj):
"""Get number of clusters in model"""
df = pd.DataFrame(columns=['num_clust', 'best_ll', 'neg_best_ll', 'task_num', 'seed', 'type'])
for key in mod_obj.keys():
df.loc[key] = pd.Series({'num_clust':mod_obj[key].num_clust, 'best_ll':mod_obj[key].best_ll, 'neg_best_ll':-mod_obj[key].best_ll, 'task_num':mod_obj[key].task_num, 'seed':mod_obj[key].seed, 'type': mod_obj[key].mod_type})
return(df)
def gen_err_merge(mod_obj_dict):
"""For best MAP seeds, get number of clusters and error"""
df_cn_merge = pd.DataFrame()
df_err_merge = pd.DataFrame()
for key in mod_obj_dict.keys():
df_cn_merge = df_cn_merge.append(process_num_clust(mod_obj_dict[key]))
df_err_merge = df_err_merge.append(process_err_df(mod_obj_dict[key]))
return(df_err_merge, df_cn_merge)
# -
def plot_bars_cn(ax, df_err_merge, err_col, plot_title, legend_labels, axtitle=None, ylim=None, col_palette=None):
"""Plotting function for cluster numbers"""
df_err_merge = df_err_merge.copy()
df_err_merge['type'] = df_err_merge['type'].map(legend_labels)
sns.barplot(ax=ax, x='task_num', y=err_col, hue='type',data=df_err_merge, palette=col_palette, edgecolor=".2", linewidth=2.5, saturation=1)
sns.despine(top=True)
ax.set(xlabel=plot_title, ylabel='Number of Clusters')
if ylim is not None:
ax.set_ylim(ylim)
ax.set_title(axtitle, loc='left', pad=60, fontweight="bold")
handles, labels = ax.get_legend_handles_labels()
ax.get_legend().remove()
return ax, handles, labels
def calc_wilcoxon(df, box_pairs):
annot = []
for pair in box_pairs:
bp_1 = df[(df['task_num']==pair[0][0])&(df['type']==pair[0][1])]
bp_2 = df[(df['task_num']==pair[1][0])&(df['type']==pair[1][1])]
comp_stat, comp_pval = wilcoxon(bp_1['err'], bp_2['err'], alternative='less')
annot.append('p={:.2e}'.format(comp_pval))
return(annot)
def plot_bars_task(ax, df_err_merge, err_col, plot_title, legend_labels, axtitle=None, ylim=None, yticks=None, col_palette=None, axpad=105):
"""Plotting function for prediction/sparsity barplot"""
df_err_merge = df_err_merge.copy()
df_err_merge['type'] = df_err_merge['type'].map(legend_labels)
# sns.barplot(ax=ax, x='task_num', y=err_col, hue='type',data=df_err_merge, palette=col_palette, edgecolor=".2", linewidth=2.5, saturation=1)
sns.boxplot(ax=ax, x='task_num', y=err_col, hue='type',data=df_err_merge, palette=col_palette,linewidth=2.5, saturation=1)
sns.despine(top=True)
ax.set_xlabel(plot_title)
ax.set_ylabel('Error \n(Sq. Rt. ALSFRS-R)', fontsize=30)
if ylim is not None:
ax.set_ylim(ylim)
if yticks is not None:
ax.set_yticks(yticks)
kernel_types = list(df_err_merge['type'].dropna().unique())
box_pairs = []
for kern_i in range(1, len(kernel_types)):
box_pairs = box_pairs + [((x, kernel_types[0]), (x, kernel_types[kern_i])) for x in df_err_merge['task_num'].unique()]
cust_annot = calc_wilcoxon(df_err_merge, box_pairs)
test_results = add_stat_annotation(ax, x='task_num', y=err_col, hue='type', data=df_err_merge,
box_pairs=box_pairs, text_annot_custom = cust_annot,
test='Wilcoxon', stats_params={'alternative':'less'}, fontsize=20,
loc='outside', verbose=2, comparisons_correction=None)
ax.set_title(axtitle, loc='left', pad=axpad, fontweight="bold")
handles, labels = ax.get_legend_handles_labels()
ax.get_legend().remove()
return ax, handles, labels
# +
# Load all data matrices
exp_path = Path('data/model_data/2_sparsity_prediction')
# base_mods = ['rbf', 'slope', 'linear']
base_mods = ['rbf', 'slope', 'linear', 'sigmoid', 'gp', 'quad', 'lme']
file_ext_dict = {'prediction': 'predict', 'sparsity':'sparse'}
err_store_dict = {}
for cur_exp in ['prediction', 'sparsity']:
rmse_err_path = exp_path / cur_exp / 'results' / 'rmse'
for cur_proj in ['ceft', 'proact']:
tmp_store_dict = {}
for cur_mod_type in base_mods: #change to rbf, linear
tmp_store_dict[cur_mod_type]=joblib.load(rmse_err_path / '{}_{}_{}_rmse_err.pkl'.format(file_ext_dict[cur_exp], cur_proj, cur_mod_type))
err_store_dict['{}_{}'.format(cur_proj, file_ext_dict[cur_exp])] = tmp_store_dict
# +
ceft_sparse_err_merge, ceft_sparse_cn_merge = gen_err_merge(err_store_dict['ceft_sparse'])
ceft_pred_err_merge, ceft_pred_cn_merge = gen_err_merge(err_store_dict['ceft_predict'])
proact_sparse_err_merge, proact_sparse_cn_merge = gen_err_merge(err_store_dict['proact_sparse'])
proact_pred_err_merge, proact_pred_cn_merge = gen_err_merge(err_store_dict['proact_predict'])
# +
ceft_sparse_err_merge['experiment']='ceft_sparse'
ceft_pred_err_merge['experiment']='ceft_predict'
proact_sparse_err_merge['experiment']='proact_sparse'
proact_pred_err_merge['experiment']='proact_predict'
df_source = ceft_sparse_err_merge.append([ceft_pred_err_merge, proact_sparse_err_merge, proact_pred_err_merge])
df_source = df_source[['experiment', 'task_num', 'type', 'seed', 'best_ll', 'err']]
df_source.to_csv('reports/fig_3_source_data.csv', index=False)
# -
# # Figure 3: Boxplots for prediction/interpolation - PROACT & CEFTRIAXONE
# +
# Formatting params
parameters = {'axes.labelsize': 35,
'axes.titlesize':40,
'xtick.labelsize':30,
'ytick.labelsize':30,
'legend.fontsize':35,
'font.size': 10,
'figure.subplot.hspace':0.2,
'figure.subplot.wspace':0.2,
'legend.frameon':False,
'pdf.fonttype':42}
plt.rcParams.update(parameters)
col_palette = ['#176D9C', '#DBA137','#86AF49' ,'#df473c','#ae3c60', '#82b4bb', '#ed820e']
legend_labels={'rbf': 'Mixture of Gaussian Processes Model (MoGP)', 'linear':'Linear Kernel Model (LKM)', 'slope':'Slope Model (SM)'}
legend_labels_cn={'rbf': 'Mixture of Gaussian Processes Model (MoGP)', 'linear':'Linear Kernel Model (LKM)'}
# +
# Square Root Version
yticks_5 = [0,1,2,3,4,5]
yticks_10 = [0,1,2,3,4,5,6]
proact_sparse_err_merge['err_norm']=np.sqrt(proact_sparse_err_merge['err'])
proact_pred_err_merge['err_norm']=np.sqrt(proact_pred_err_merge['err'])
ceft_sparse_err_merge['err_norm']=np.sqrt(ceft_sparse_err_merge['err'])
ceft_pred_err_merge['err_norm']=np.sqrt(ceft_pred_err_merge['err'])
fig, axs = plt.subplots(2, 2, gridspec_kw={'width_ratios': [1, 5/3], 'wspace':0.2, 'hspace':0.9}, figsize=(30, 15))
curax_1, handles, labels = plot_bars_task(axs.flat[0], proact_sparse_err_merge, 'err_norm', 'Percent Included Training Data', axtitle='PRO-ACT: Interpolation', ylim=[0, 5], yticks=yticks_5, legend_labels=legend_labels, col_palette=col_palette)
_ = curax_1.legend(handles, labels, frameon=False, loc='lower left', bbox_to_anchor=(-0.1, 1.5), ncol=3, fontsize=30)
curax_2, handles, labels = plot_bars_task(axs.flat[1], proact_pred_err_merge, 'err_norm', 'Number of Years Training Data', axtitle='PRO-ACT: Prediction', ylim=[0, 6], yticks=yticks_10, legend_labels=legend_labels, col_palette=col_palette)
_, _, _ = plot_bars_task(axs.flat[2], ceft_sparse_err_merge, 'err_norm', 'Percent Included Training Data', axtitle='CEFT: Interpolation', ylim=[0, 5], yticks=yticks_5, legend_labels=legend_labels, col_palette=col_palette)
_, _, _ = plot_bars_task(axs.flat[3], ceft_pred_err_merge, 'err_norm', 'Number of Years Training Data', axtitle='CEFT: Prediction', ylim=[0, 6], yticks=yticks_10, legend_labels=legend_labels, col_palette=col_palette)
# Save figure
fig.savefig('reports/fig_mogp_pred_sparse_boxplot.pdf', bbox_inches='tight')
# -
# View N at timepoints
proact_sparse_err_merge.groupby(['task_num', 'seed', 'type']).size() #A
proact_pred_err_merge.groupby(['task_num', 'seed', 'type']).size() #B
ceft_sparse_err_merge.groupby(['task_num', 'seed', 'type']).size() #C
ceft_pred_err_merge.groupby(['task_num', 'seed', 'type']).size() #D
# ## Supplement Figure 2: Number of clusters in MAP for prediction/interpolation models
# +
col_palette_cn = ['#176D9C', '#86AF49'] #Set colors to match prior figure
ylim_sparse_pro = [0,150]
ylim_pred_pro = [0,150]
ylim_sparse_cef = [0,50]
ylim_pred_cef = [0,50]
fig, axs = plt.subplots(2, 2, gridspec_kw={'width_ratios': [1, 5/3], 'wspace': 0.2, 'hspace': 0.6}, figsize=(30, 15))
curax, handles, labels = plot_bars_cn(axs.flat[0], proact_sparse_cn_merge, 'num_clust', 'Percent Included Training Data', legend_labels=legend_labels_cn, axtitle='PRO-ACT: Interpolation', ylim=ylim_sparse_pro, col_palette=col_palette_cn)
_ = curax.legend(handles, labels, loc='lower left', bbox_to_anchor=(-0.1, 1.3), ncol=2)
curax, handles, labels = plot_bars_cn(axs.flat[1], proact_pred_cn_merge, 'num_clust', 'Number of Years Training Data', legend_labels=legend_labels_cn, axtitle='PRO-ACT: Prediction', ylim=ylim_pred_pro, col_palette=col_palette_cn)
_, _, _ = plot_bars_cn(axs.flat[2], ceft_sparse_cn_merge, 'num_clust', 'Percent Included Training Data', legend_labels=legend_labels_cn, axtitle='CEFT: Interpolation', ylim=ylim_sparse_cef, col_palette=col_palette_cn)
_, _, _ = plot_bars_cn(axs.flat[3], ceft_pred_cn_merge, 'num_clust', 'Number of Years Training Data', legend_labels=legend_labels_cn, axtitle='CEFT: Prediction', ylim=ylim_pred_cef, col_palette=col_palette_cn)
# Save figure
fig.savefig('reports/supp_fig_mogp_pred_sparse_clust_num.pdf', bbox_inches='tight')
# -
# ## Dataframes for prediction/interpolation experiments
#
# Note: Minor variances expected with subsequent runs in GPy predictions
# +
print('proact-sparsity')
proact_sparse_err_merge.groupby(['type', 'task_num']).mean().round(2)
print('ceft-sparsity')
ceft_sparse_err_merge.groupby(['type', 'task_num']).mean().round(2)
print('difference-sparsity')
diff_calc = proact_sparse_err_merge.groupby(['type', 'task_num']).mean().loc['rbf']-proact_sparse_err_merge.groupby(['type', 'task_num']).mean().loc['slope']
diff_calc.round(2)
# Dataframes for sparsity experiments
print('proact-pred')
proact_pred_err_merge.groupby(['type', 'task_num']).mean().round(2)
print('ceft-pred')
ceft_pred_err_merge.groupby(['type', 'task_num']).mean().round(2)
print('difference-pred')
diff_calc = proact_pred_err_merge.groupby(['type', 'task_num']).mean().loc['rbf']-proact_pred_err_merge.groupby(['type', 'task_num']).mean().loc['slope']
diff_calc.round(2)
# -
# ## Supplement: Vary alpha for prediction results
rmse_err_path = Path('data/model_data/2_sparsity_prediction/prediction/results/rmse')
# +
# Load all data matrices
# exp_path = Path('data/model_data/2_sparsity_prediction')
base_mods = ['rbf', 'linear']
alph_list = ['0.1', '0.5', '2.0', '10.0']
file_ext_dict = {'prediction':'predict'}
err_store_dict = {}
cur_exp = 'prediction'
cur_proj = 'ceft'
for cur_alpha in alph_list:
tmp_store_dict = {}
for cur_mod_type in base_mods: #change to rbf, linear
tmp_store_dict[cur_mod_type]=joblib.load(rmse_err_path / '{}_{}_{}_{}_rmse_err.pkl'.format(file_ext_dict[cur_exp], cur_proj, cur_mod_type, cur_alpha))
err_store_dict['{}_{}_alpha{}'.format(cur_proj, file_ext_dict[cur_exp], cur_alpha)] = tmp_store_dict
# +
col_palette_cn = ['#176D9C', '#86AF49'] #Set colors to match prior figure
# Plot Figure
ylim_pred_cef = [0,50]
yticks_10 = [0, 2, 4, 6, 8, 10]
fig, axs = plt.subplots(2, 2, gridspec_kw={'wspace': 0.2, 'hspace': 0.9}, figsize=(30, 15))
fig_cn, axs_cn = plt.subplots(2, 2, gridspec_kw={'wspace': 0.2, 'hspace': 0.9}, figsize=(30, 15))
for j, cur_alpha in enumerate(alph_list):
a_err_merge, a_sparse_cn_merge = gen_err_merge(err_store_dict['ceft_predict_alpha{}'.format(cur_alpha)])
_, _, _ = plot_bars_task(axs.flat[j], a_err_merge, 'err', 'Percent Included Training Data', axtitle='CEFT (Alpha: {}x)'.format(cur_alpha), ylim=[0, 11], yticks=yticks_10, legend_labels=legend_labels, col_palette=col_palette_cn)
_, handles, labels = plot_bars_cn(axs_cn.flat[j], a_sparse_cn_merge, 'num_clust', 'Percent Included Training Data', legend_labels=legend_labels_cn, axtitle='CEFT (Alpha: {}x)'.format(cur_alpha), ylim=ylim_pred_cef, col_palette=col_palette_cn)
_ = axs.flat[0].legend(handles, labels, loc='lower left', bbox_to_anchor=(-0.1, 1.5), ncol=2)
# # Save figure
fig.savefig('reports/supp_fig_mogp_pred_varyalpha_err.pdf', bbox_inches='tight')
fig_cn.savefig('reports/supp_fig_mogp_pred_varyalpha_clustnum.pdf', bbox_inches='tight')
# -
# ## Supplement add extra baselines
legend_labels_extend={'rbf': 'Mixture of Gaussian Processes Model (MoGP)', 'linear':'Linear Kernel Model (LKM)', 'slope':'Slope Model (SM)',
'sigmoid':'Sigmoidal Model (SG)', 'lme': 'Linear Mixed Model (LME)', 'gp': 'Patient-specific GP (PGP)',
'quad': 'Quadratic Model (Q)'}
# +
# Plot Figure
yticks_12 = [0, 2, 4, 6, 8, 10, 12]
yticks_25 = [0,5,10,15,25]
yticks_6 = [0,1,2,3,4,5,6]
fig, axs = plt.subplots(2, 2, gridspec_kw={'width_ratios': [1, 5/3], 'wspace':0.2, 'hspace':1}, figsize=(30, 30))
curax_1, handles, labels = plot_bars_task(axs.flat[0], proact_sparse_err_merge, 'err_norm', 'Percent Included Training Data', axtitle='PRO-ACT: Interpolation', ylim=[0, max(yticks_12)], yticks=yticks_12, legend_labels=legend_labels_extend, col_palette=col_palette, axpad=375)
_ = curax_1.legend(handles, labels, frameon=False, loc='lower left', bbox_to_anchor=(-0.1, 1.75), ncol=3, fontsize=30, bbox_transform=curax_1.transAxes)
curax_2, handles, labels = plot_bars_task(axs.flat[1], proact_pred_err_merge, 'err_norm', 'Number of Years Training Data', axtitle='PRO-ACT: Prediction', ylim=[0, max(yticks_25)], yticks=yticks_25, legend_labels=legend_labels_extend, col_palette=col_palette, axpad=375)
_, _, _ = plot_bars_task(axs.flat[2], ceft_sparse_err_merge, 'err_norm', 'Percent Included Training Data', axtitle='CEFT: Interpolation', ylim=[0, max(yticks_6)], yticks=yticks_6, legend_labels=legend_labels_extend, col_palette=col_palette, axpad=375)
_, _, _ = plot_bars_task(axs.flat[3], ceft_pred_err_merge, 'err_norm', 'Number of Years Training Data', axtitle='CEFT: Prediction', ylim=[0, max(yticks_25)], yticks=yticks_25, legend_labels=legend_labels_extend, col_palette=col_palette, axpad=375)
# Save figure
fig.savefig('reports/supp_fig_mogp_pred_sparse_addl_baselines_boxplot.pdf', bbox_inches='tight')
# -
# ## Supplement: Additional datasets - AALS, EMORY, NATHIST
# +
# Load all data matrices
exp_path = Path('data/model_data/2_sparsity_prediction')
base_mods = ['rbf', 'slope', 'linear']
file_ext_dict = {'prediction': 'predict', 'sparsity':'sparse'}
err_store_dict = {}
for cur_exp in ['prediction']:
rmse_err_path = exp_path / cur_exp / 'results' / 'rmse'
for cur_proj in ['aals', 'emory', 'nathist']:
tmp_store_dict = {}
for cur_mod_type in base_mods: #change to rbf, linear
tmp_store_dict[cur_mod_type]=joblib.load(rmse_err_path / '{}_{}_{}_rmse_err.pkl'.format(file_ext_dict[cur_exp], cur_proj, cur_mod_type))
err_store_dict['{}_{}'.format(cur_proj, file_ext_dict[cur_exp])] = tmp_store_dict
for cur_exp in ['sparsity']:
rmse_err_path = exp_path / cur_exp / 'results' / 'rmse'
for cur_proj in ['nathist']:
tmp_store_dict = {}
for cur_mod_type in base_mods: #change to rbf, linear
tmp_store_dict[cur_mod_type]=joblib.load(rmse_err_path / '{}_{}_{}_rmse_err.pkl'.format(file_ext_dict[cur_exp], cur_proj, cur_mod_type))
err_store_dict['{}_{}'.format(cur_proj, file_ext_dict[cur_exp])] = tmp_store_dict
# -
aals_pred_err_merge, aals_pred_cn_merge = gen_err_merge(err_store_dict['aals_predict'])
emory_pred_err_merge, emory_pred_cn_merge = gen_err_merge(err_store_dict['emory_predict'])
nathist_pred_err_merge, nathist_pred_cn_merge = gen_err_merge(err_store_dict['nathist_predict'])
nathist_sparse_err_merge, nathist_sparse_cn_merge = gen_err_merge(err_store_dict['nathist_sparse'])
# +
ceft_pred_err_merge.groupby(['task_num', 'seed', 'type']).size()
proact_pred_err_merge.groupby(['task_num', 'seed', 'type']).size()
aals_pred_err_merge.groupby(['task_num', 'seed', 'type']).size()
emory_pred_err_merge.groupby(['task_num', 'seed', 'type']).size()
# +
label_projs = {'aals':'AALS', 'emory':'EMORY', 'nathist':'NATHIST'}
col_palette_cn = ['#176D9C', '#86AF49'] #Set colors to match prior figure
# Plot Figure
ylim_pred_cef = [0,50]
yticks_10 = [0, 2, 4, 6, 8, 10, 12]
fig, axs = plt.subplots(4, 2, gridspec_kw={'hspace': 0.9}, figsize=(40, 35))
# Predict plots
for j, cur_proj in enumerate(['aals', 'emory', 'nathist']):
a_err_merge, a_sparse_cn_merge = gen_err_merge(err_store_dict['{}_predict'.format(cur_proj)])
a_err_merge['err_norm']=np.sqrt(a_err_merge['err'])
_, handles, labels = plot_bars_task(axs[j,0], a_err_merge, 'err_norm', 'Percent Included Training Data', axtitle='{}: Prediction'.format(label_projs[cur_proj]), ylim=[0, 11], yticks=yticks_10, legend_labels=legend_labels, col_palette=col_palette)
_, _, _ = plot_bars_cn(axs[j,1], a_sparse_cn_merge, 'num_clust', 'Percent Included Training Data', legend_labels=legend_labels_cn, axtitle='', ylim=ylim_pred_cef, col_palette=col_palette_cn)
#Sparsity plots - nathist
nathist_sparse_err_merge['err_norm']=np.sqrt(nathist_sparse_err_merge['err'])
_, handles, labels = plot_bars_task(axs[-1,0], nathist_sparse_err_merge, 'err_norm', 'Percent Included Training Data', axtitle='{}: Interpolation'.format(label_projs[cur_proj]), ylim=[0, 11], yticks=yticks_10, legend_labels=legend_labels, col_palette=col_palette)
_, _, _ = plot_bars_cn(axs[-1,1], nathist_sparse_cn_merge, 'num_clust', 'Percent Included Training Data', legend_labels=legend_labels_cn, axtitle='', ylim=ylim_pred_cef, col_palette=col_palette_cn)
_ = axs.flat[0].legend(handles, labels, frameon=False, loc='lower left', bbox_to_anchor=(-0.1, 1.5), ncol=3, fontsize=30)
# # Save figure
fig.savefig('reports/supp_fig_mogp_pred_sparse_addlproj_box.pdf', bbox_inches='tight')
| 19,532 |
/마케팅 데이터 분석 /.ipynb_checkpoints/고객 데이터 분석-checkpoint.ipynb | d3698595e5ef07057c6015f6a5b84aa2fd7f36c1 | [] | no_license | Gracechung-sw/machineLearning-and-DataAnalyst-AtoZ | https://github.com/Gracechung-sw/machineLearning-and-DataAnalyst-AtoZ | 0 | 3 | null | null | null | null | Jupyter Notebook | false | false | .py | 227,640 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="e4etWCIL5vaX" colab_type="text"
# # Colab Stuff
# Pre-setup needed to be done for running the model on Google Colaboratory
# + id="4MoeiGpq5us-" colab_type="code" outputId="5e936362-1d52-46b1-9ab7-ab7963dc8e4a" colab={"base_uri": "https://localhost:8080/", "height": 587}
# !wget https://cdn.skillenza.com/files/5ee0a2e1-bf6d-4173-8244-e75fa2d7bbe2/training.5k.zip
# !wget https://cdn.skillenza.com/files/7da538a3-4db6-46ea-a4f8-87a21368e5f5/testing.40k.zip
# !wget https://cdn.skillenza.com/files/b8f97b6c-70a5-4f00-b748-9f9765c9b17e/sample.csv
# + id="9bJ8oiwm5yjH" colab_type="code" colab={}
# !unzip training.5k.zip
# !mv training/training/* training/
# !rm training/training -rf
# + [markdown] id="fwTtf-Go5zdf" colab_type="text"
# # Import Stuff, Load and preprocess data
# + [markdown] id="VtNe-PFRgaWX" colab_type="text"
# ## Wide Residual Network
# Open Source implementation of Wide Residual Networks (https://arxiv.org/abs/1605.07146) from https://github.com/EricAlcaide/keras-wrn
# + id="vmAdWsXb4Tgn" colab_type="code" outputId="a775f88f-0d04-4eeb-d90e-395073b30ffd" colab={"base_uri": "https://localhost:8080/", "height": 35}
import keras
import keras.backend as K
from keras.models import Model
from keras.layers import Dense, Dropout, Add, Input, BatchNormalization, Activation
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten
from keras.regularizers import l2
def main_block(x, filters, n, strides, dropout):
# Normal part
x_res = Conv2D(filters, (3,3), strides=strides, padding="same")(x)# , kernel_regularizer=l2(5e-4)
x_res = BatchNormalization()(x_res)
x_res = Activation('relu')(x_res)
x_res = Conv2D(filters, (3,3), padding="same")(x_res)
# Alternative branch
x = Conv2D(filters, (1,1), strides=strides)(x)
# Merge Branches
x = Add()([x_res, x])
for i in range(n-1):
# Residual conection
x_res = BatchNormalization()(x)
x_res = Activation('relu')(x_res)
x_res = Conv2D(filters, (3,3), padding="same")(x_res)
# Apply dropout if given
if dropout: x_res = Dropout(dropout)(x)
# Second part
x_res = BatchNormalization()(x_res)
x_res = Activation('relu')(x_res)
x_res = Conv2D(filters, (3,3), padding="same")(x_res)
# Merge branches
x = Add()([x, x_res])
# Inter block part
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def build_model(input_dims, output_dim, n, k, act= "relu", dropout=None):
""" Builds the model. Params:
- n: number of layers. WRNs are of the form WRN-N-K
It must satisfy that (N-4)%6 = 0
- k: Widening factor. WRNs are of the form WRN-N-K
It must satisfy that K%2 = 0
- input_dims: input dimensions for the model
- output_dim: output dimensions for the model
- dropout: dropout rate - default=0 (not recomended >0.3)
- act: activation function - default=relu. Build your custom
one with keras.backend (ex: swish, e-swish)
"""
# Ensure n & k are correct
assert (n-4)%6 == 0
assert k%2 == 0
n = (n-4)//6
# This returns a tensor input to the model
inputs = Input(shape=(input_dims))
# Head of the model
x = Conv2D(16, (3,3), padding="same")(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# 3 Blocks (normal-residual)
x = main_block(x, 16*k, n, (1,1), dropout) # 0
x = main_block(x, 32*k, n, (2,2), dropout) # 1
x = main_block(x, 64*k, n, (2,2), dropout) # 2
# Final part of the model
x = AveragePooling2D((8,8))(x)
x = Flatten()(x)
outputs = Dense(output_dim, activation="softmax")(x)
model = Model(inputs=inputs, outputs=outputs)
return model
# + id="2SLLc-6r4Tgs" colab_type="code" colab={}
# 6, 22, 4 turn out to be optimum values for our model
model = build_model((200,200,1), 6, 22, 4)
# Compiling the model with adaptive learning rate algorithm, Adam
model.compile("adam", "categorical_crossentropy", ["accuracy"])
# + [markdown] id="eHAh0SXUglCz" colab_type="text"
# ## Preprocessing
# + id="XQbh0HC34Tgv" colab_type="code" colab={}
def preprocess1(img, ksize=2, denom=1, morphsize=(5, 6), sigma=1):
"""
Performs 2D Convolution filtering + Morphological Opening
to extract out the central blob in binary format,
and to reduce the other noise.
"""
assert denom > 0, 'denom should be greater than 0'
assert ksize == int(ksize) & ksize > 0, 'ksize must be a positive integer.'
kernel = np.ones((ksize, ksize), np.float32) / denom
dst = cv2.filter2D(img, -1, kernel)
dst = cv2.morphologyEx(dst, cv2.MORPH_OPEN, np.ones(morphsize, np.uint8))
dst = color.rgb2gray(dst) < color.rgb2gray(dst).mean()
dst = dst.astype(np.uint8)
return dst
def crop_image(id, data='training',
ksize=2, denom=1, morphsize=(5, 6),
rgb=False):
"""
Crops out only the central blob by:
1. Running an edge detection filter with high central value
and division factor to essentially turn the central blob to
solid black and white everywhere else.
2. Using OpenCV contours to detect the contours in image and selecting
the one with the largest area (which will be the central one)
3. Cropping out the largest contour to get only the central blob,
without any external noise.
"""
img = io.imread(f'{data}/{id}.png')
edged = preprocess1(img, ksize=3,
denom=10, morphsize=morphsize)
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_sorted = sorted(cnts, key=cv2.contourArea, reverse=True)
x, y, w, h = cv2.boundingRect(cnts_sorted[0])
cropped = img[y:y+h, x:x+w]
if not rgb:
cropped = preprocess1(cropped, ksize=ksize,
denom=denom, morphsize=morphsize)
return cropped
def prepare_image(id, data='training', shape=(200, 200), rgb=False):
img = crop_image(id,data, rgb=rgb)
img = cv2.resize(img, shape)
return img
# + [markdown] id="w3I0V9t8grwe" colab_type="text"
# ## Importing Modules and loading data
# + id="ZHf_7KF_4Tg1" colab_type="code" colab={}
import numpy as np
from skimage import feature, io, color, morphology
import cv2
import matplotlib.pyplot as plt
import pandas as pd
from keras import Sequential
from keras.utils import to_categorical
# + id="kgHNDJjX4Tg5" colab_type="code" outputId="7615f396-b8f9-4e27-f0b4-6b8cc39432c8" colab={"base_uri": "https://localhost:8080/", "height": 35}
X = np.empty((5000, 200, 200), dtype=np.uint8)
import math
for i in range(1,5001):
step = 25 / 5001
#X.append(cv2.resize(io.imread(f"training/training/{i}.png"),(224,224)))
# X.append(cv2.resize(io.imread(f"training/{i}.png"), (200, 200)))
X[i-1] = cv2.cvtColor(prepare_image(i, data='training', rgb=True, shape=(200, 200)), cv2.COLOR_RGB2GRAY)
print('\r' + f'Progress: '
f"[{'=' * int((i+1) * step) + ' ' * (24 - int((i+1) * step))}]"
f"({math.ceil((i+1) * 100 /5001)} %)",
end='')
# + id="uxNkn1srg56D" colab_type="code" colab={}
labels = pd.read_csv('training/solution.csv')
y = to_categorical(labels.category.values-1)
# + [markdown] id="VH5-OEQYgytq" colab_type="text"
# # Training the model
# + id="_lWn9y4Q4Tg9" colab_type="code" outputId="f1897ba1-c29a-4dbc-b648-9b1323563b96" colab={"base_uri": "https://localhost:8080/", "height": 1416}
history = model.fit(
X[..., None], y, epochs=40,
validation_split=.15,
callbacks=[keras.callbacks.ModelCheckpoint(
'Model.val-acc={val_acc:.2f}.val-loss={val_loss:.2f}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1
)]
)
# + [markdown] id="DgUj44anhABo" colab_type="text"
# # Analyzing
# + id="SV8BEDHwe8AE" colab_type="code" outputId="fc9d5a26-ae29-42ae-a8b6-56d8eaa41735" colab={"base_uri": "https://localhost:8080/", "height": 705}
# Get training and test loss histories
training_loss = history.history['loss']
val_loss = history.history['val_loss']
training_acc = history.history['acc']
val_acc = history.history['val_acc']
# Create count of the number of epochs
epoch_count = range(1, len(training_loss) + 1)
# Visualize loss history
plt.plot(epoch_count, training_loss, 'r-')
plt.plot(epoch_count, val_loss, 'b-')
plt.legend(['Training Loss', 'Validation Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
plt.plot(epoch_count, training_acc, 'r--')
plt.plot(epoch_count, val_acc, 'b--')
plt.legend(['Training Accuracy', 'Validation Accuracy'])
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.show()
# + [markdown] id="z5Jzv4gVhUq0" colab_type="text"
# # Loading the best weights obtained from the training and predicting for the test data
# + id="-3Q1imfAe8Cw" colab_type="code" colab={}
from keras.models import load_model
model = load_model('Model.val-acc=1.00.val-loss=0.01.h5')
# + id="KvxOEDr54ThG" colab_type="code" outputId="e1afddbe-fe0d-4dbd-97c9-6fcecc3b64dc" colab={"base_uri": "https://localhost:8080/", "height": 35}
sample_sub = pd.read_csv('sample.csv')
import math
n = len(sample_sub.id)
step = 25 / n
for i, id in enumerate(sample_sub.id):
sample_sub.category[sample_sub.id == id] = np.argmax(model.predict(
cv2.cvtColor(prepare_image(id, data='testing', rgb=True, shape=(200, 200)), cv2.COLOR_RGB2GRAY)[None, ..., None]
)) + 1
print('\r' + f'Predict Progress: '
f"[{'=' * int((i+1) * step) + ' ' * (24 - int((i+1) * step))}]"
f"({math.ceil((i+1) * 100 /n)} %)",
end='')
# + id="G31wzOy464NO" colab_type="code" colab={}
sample_sub.to_csv('pred.csv', index=False)
# + id="diURe2t1hfSq" colab_type="code" colab={}
| 10,073 |
/pretrained-model/stt/alconformer/export/base.ipynb | da242fb1ad42e96ce08ba2bf9d5ca588e2734293 | [
"MIT"
] | permissive | Ariffleng/malaya-speech | https://github.com/Ariffleng/malaya-speech | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 40,474 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
# -
import malaya_speech.train.model.alconformer as conformer
import malaya_speech.train.model.transducer as transducer
import malaya_speech
import tensorflow as tf
import numpy as np
subwords = malaya_speech.subword.load('transducer.subword')
featurizer = malaya_speech.tf_featurization.STTFeaturizer(
normalize_per_feature = True
)
X = tf.compat.v1.placeholder(tf.float32, [None, None], name = 'X_placeholder')
X_len = tf.compat.v1.placeholder(tf.int32, [None], name = 'X_len_placeholder')
# +
batch_size = tf.shape(X)[0]
features = tf.TensorArray(dtype = tf.float32, size = batch_size, dynamic_size = True, infer_shape = False)
features_len = tf.TensorArray(dtype = tf.int32, size = batch_size)
init_state = (0, features, features_len)
def condition(i, features, features_len):
return i < batch_size
def body(i, features, features_len):
f = featurizer(X[i, :X_len[i]])
f_len = tf.shape(f)[0]
return i + 1, features.write(i, f), features_len.write(i, f_len)
_, features, features_len = tf.while_loop(condition, body, init_state)
features_len = features_len.stack()
padded_features = tf.TensorArray(dtype = tf.float32, size = batch_size)
padded_lens = tf.TensorArray(dtype = tf.int32, size = batch_size)
maxlen = tf.reduce_max(features_len)
init_state = (0, padded_features, padded_lens)
def condition(i, padded_features, padded_lens):
return i < batch_size
def body(i, padded_features, padded_lens):
f = features.read(i)
len_f = tf.shape(f)[0]
f = tf.pad(f, [[0, maxlen - tf.shape(f)[0]], [0,0]])
return i + 1, padded_features.write(i, f), padded_lens.write(i, len_f)
_, padded_features, padded_lens = tf.while_loop(condition, body, init_state)
padded_features = padded_features.stack()
padded_lens = padded_lens.stack()
padded_lens.set_shape((None))
padded_features.set_shape((None, None, 80))
padded_features = tf.expand_dims(padded_features, -1)
padded_features, padded_lens
# -
padded_features = tf.identity(padded_features, name = 'padded_features')
padded_lens = tf.identity(padded_lens, name = 'padded_lens')
config = malaya_speech.config.conformer_base_encoder_config
config['dropout'] = 0.0
conformer_model = conformer.Model(**config)
decoder_config = malaya_speech.config.conformer_base_decoder_config
decoder_config['embed_dropout'] = 0.0
transducer_model = transducer.rnn.Model(
conformer_model, vocabulary_size = subwords.vocab_size, **decoder_config
)
p = tf.compat.v1.placeholder(tf.int32, [None, None])
z = tf.zeros((tf.shape(p)[0], 1),dtype=tf.int32)
c = tf.concat([z, p], axis = 1)
p_len = tf.compat.v1.placeholder(tf.int32, [None])
c
training = True
logits = transducer_model([padded_features, c, p_len], training = training)
logits
sess = tf.Session()
sess.run(tf.global_variables_initializer())
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
saver = tf.train.Saver(var_list = var_list)
saver.restore(sess, 'asr-base-alconformer-transducer-v2/model.ckpt-700000')
decoded = transducer_model.greedy_decoder(padded_features, padded_lens, training = training)
decoded = tf.identity(decoded, name = 'greedy_decoder')
decoded
# +
encoded = transducer_model.encoder(padded_features, training = training)
encoded = tf.identity(encoded, name = 'encoded')
encoded_placeholder = tf.placeholder(tf.float32, [config['dmodel']], name = 'encoded_placeholder')
predicted_placeholder = tf.placeholder(tf.int32, None, name = 'predicted_placeholder')
t = transducer_model.predict_net.get_initial_state().shape
states_placeholder = tf.placeholder(tf.float32, [int(i) for i in t], name = 'states_placeholder')
ytu, new_states = transducer_model.decoder_inference(
encoded=encoded_placeholder,
predicted=predicted_placeholder,
states=states_placeholder,
training = training
)
ytu = tf.identity(ytu, name = 'ytu')
new_states = tf.identity(new_states, name = 'new_states')
ytu, new_states
# -
initial_states = transducer_model.predict_net.get_initial_state()
initial_states = tf.identity(initial_states, name = 'initial_states')
# +
# sess = tf.Session()
# sess.run(tf.global_variables_initializer())
# +
# var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# saver = tf.train.Saver(var_list = var_list)
# saver.restore(sess, 'asr-small-conformer-transducer/model.ckpt-325000')
# -
files = [
'speech/record/savewav_2020-11-26_22-36-06_294832.wav',
'speech/record/savewav_2020-11-26_22-40-56_929661.wav',
'speech/record/675.wav',
'speech/record/664.wav',
'speech/example-speaker/husein-zolkepli.wav',
'speech/example-speaker/mas-aisyah.wav',
'speech/example-speaker/khalil-nooh.wav',
'speech/example-speaker/shafiqah-idayu.wav',
'speech/khutbah/wadi-annuar.wav',
]
front_pad = 200
back_pad = 2000
inputs = [malaya_speech.load(f)[0] for f in files]
padded, lens = malaya_speech.padding.sequence_1d(inputs, return_len = True)
back = np.zeros(shape = (len(inputs), back_pad))
front = np.zeros(shape = (len(inputs), front_pad))
padded = np.concatenate([front, padded, back], axis = -1)
lens = [l + front_pad + back_pad for l in lens]
# +
import collections
import numpy as np
import tensorflow as tf
BeamHypothesis = collections.namedtuple(
'BeamHypothesis', ('score', 'prediction', 'states')
)
def transducer(
enc,
total,
initial_states,
encoded_placeholder,
predicted_placeholder,
states_placeholder,
ytu,
new_states,
sess,
beam_width = 10,
norm_score = True,
):
kept_hyps = [
BeamHypothesis(score = 0.0, prediction = [0], states = initial_states)
]
B = kept_hyps
for i in range(total):
A = B
B = []
while True:
y_hat = max(A, key = lambda x: x.score)
A.remove(y_hat)
ytu_, new_states_ = sess.run(
[ytu, new_states],
feed_dict = {
encoded_placeholder: enc[i],
predicted_placeholder: y_hat.prediction[-1],
states_placeholder: y_hat.states,
},
)
for k in range(ytu_.shape[0]):
beam_hyp = BeamHypothesis(
score = (y_hat.score + float(ytu_[k])),
prediction = y_hat.prediction,
states = y_hat.states,
)
if k == 0:
B.append(beam_hyp)
else:
beam_hyp = BeamHypothesis(
score = beam_hyp.score,
prediction = (beam_hyp.prediction + [int(k)]),
states = new_states_,
)
A.append(beam_hyp)
if len(B) > beam_width:
break
if norm_score:
kept_hyps = sorted(
B, key = lambda x: x.score / len(x.prediction), reverse = True
)[:beam_width]
else:
kept_hyps = sorted(B, key = lambda x: x.score, reverse = True)[
:beam_width
]
return kept_hyps[0].prediction
# +
# %%time
r = sess.run(decoded, feed_dict = {X: padded, X_len: lens})
for row in r:
print(malaya_speech.subword.decode(subwords, row[row > 0]))
# +
# %%time
encoded_, padded_lens_ = sess.run([encoded, padded_lens], feed_dict = {X: padded, X_len: lens})
padded_lens_ = padded_lens_ // conformer_model.conv_subsampling.time_reduction_factor
s = sess.run(initial_states)
for i in range(len(encoded_)):
r = transducer(
enc = encoded_[i],
total = padded_lens_[i],
initial_states = s,
encoded_placeholder = encoded_placeholder,
predicted_placeholder = predicted_placeholder,
states_placeholder = states_placeholder,
ytu = ytu,
new_states = new_states,
sess = sess,
beam_width = 1,
)
print(malaya_speech.subword.decode(subwords, r))
# -
l = padded_lens // transducer_model.encoder.conv_subsampling.time_reduction_factor
encoded = transducer_model.encoder(padded_features, training = training)
g = transducer_model._perform_greedy(encoded[0], l[0],
tf.constant(0, dtype = tf.int32),
transducer_model.predict_net.get_initial_state())
g
indices = g.prediction
minus_one = -1 * tf.ones_like(indices, dtype=tf.int32)
blank_like = 0 * tf.ones_like(indices, dtype=tf.int32)
indices = tf.where(indices == minus_one, blank_like, indices)
num_samples = tf.cast(X_len[0], dtype=tf.float32)
total_time_reduction_factor = featurizer.frame_step
stime = tf.range(0, num_samples, delta=total_time_reduction_factor, dtype=tf.float32)
stime /= tf.cast(featurizer.sample_rate, dtype=tf.float32)
stime = stime[::tf.shape(stime)[0] // tf.shape(indices)[0]]
stime.set_shape((None,))
non_blank = tf.where(tf.not_equal(indices, 0))
non_blank_transcript = tf.gather_nd(indices, non_blank)
non_blank_stime = tf.gather_nd(stime, non_blank)
non_blank_transcript = tf.identity(non_blank_transcript, name = 'non_blank_transcript')
non_blank_stime = tf.identity(non_blank_stime, name = 'non_blank_stime')
# +
# %%time
r = sess.run([non_blank_transcript, non_blank_stime], feed_dict = {X: padded, X_len: lens})
# -
words, indices = [], []
for no, ids in enumerate(r[0]):
w = subwords._id_to_subword(ids - 1)
if type(w) == bytes:
w = w.decode()
words.extend([w, None])
indices.extend([no, None])
# +
import six
from malaya_speech.utils import text_encoder
def _trim_underscore_and_tell(token):
if token.endswith('_'):
return token[:-1], True
return token, False
def decode(ids):
ids = text_encoder.pad_decr(ids)
subword_ids = ids
del ids
subwords_ = []
prev_bytes = []
prev_ids = []
ids = []
def consume_prev_bytes():
if prev_bytes:
subwords_.extend(prev_bytes)
ids.extend(prev_ids)
return [], []
for no, subword_id in enumerate(subword_ids):
subword = subwords._id_to_subword(subword_id)
if isinstance(subword, six.binary_type):
# Byte-encoded
prev_bytes.append(subword.decode('utf-8', 'replace'))
if subword == b' ':
prev_ids.append(None)
else:
prev_ids.append(no)
else:
# If there were bytes previously, convert to unicode.
prev_bytes, prev_ids = consume_prev_bytes()
trimmed, add_space = _trim_underscore_and_tell(subword)
ids.append(no)
subwords_.append(trimmed)
if add_space:
subwords_.append(' ')
ids.append(None)
prev_bytes = consume_prev_bytes()
return subwords_, ids
words, indices = decode(r[0])
len(words), len(indices)
# -
def combined_indices(subwords, ids, l, reduction_factor = 160, sample_rate = 16000):
result, temp_l, temp_r = [], [], []
for i in range(len(subwords)):
if ids[i] is not None:
temp_l.append(subwords[i])
temp_r.append(l[ids[i]])
else:
data = {'text': ''.join(temp_l),
'start': round(temp_r[0],4),
'end': round(temp_r[-1] + (reduction_factor / sample_rate), 4)}
result.append(data)
temp_l, temp_r = [], []
if len(temp_l):
data = {'text': ''.join(temp_l),
'start': round(temp_r[0],4),
'end': round(temp_r[-1] + (reduction_factor / sample_rate), 4)}
result.append(data)
return result
combined_indices(words, indices, r[1])
list(zip([subwords._id_to_subword(row - 1) for row in r[0]], r[1]))
saver = tf.train.Saver()
saver.save(sess, 'output-base-alconformer/model.ckpt')
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'gather' in n.op.lower()
or 'placeholder' in n.name
or 'encoded' in n.name
or 'decoder' in n.name
or 'ytu' in n.name
or 'new_states' in n.name
or 'padded_' in n.name
or 'initial_states' in n.name
or 'non_blank' in n.name)
and 'adam' not in n.name
and 'global_step' not in n.name
and 'Assign' not in n.name
and 'ReadVariableOp' not in n.name
and 'Gather' not in n.name
]
)
strings.split(',')
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('output-base-alconformer', strings)
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g = load_graph('output-base-alconformer/frozen_model.pb')
input_nodes = [
'X_placeholder',
'X_len_placeholder',
'encoded_placeholder',
'predicted_placeholder',
'states_placeholder',
]
output_nodes = [
'greedy_decoder',
'encoded',
'ytu',
'new_states',
'padded_features',
'padded_lens',
'initial_states',
'non_blank_transcript',
'non_blank_stime'
]
inputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in input_nodes}
outputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in output_nodes}
test_sess = tf.Session(graph = g)
r = test_sess.run(outputs['greedy_decoder'], feed_dict = {inputs['X_placeholder']: padded,
inputs['X_len_placeholder']: lens})
for row in r:
print(malaya_speech.subword.decode(subwords, row[row > 0]))
# +
encoded_, padded_lens_, s = test_sess.run([outputs['encoded'], outputs['padded_lens'], outputs['initial_states']],
feed_dict = {inputs['X_placeholder']: padded,
inputs['X_len_placeholder']: lens})
padded_lens_ = padded_lens_ // conformer_model.conv_subsampling.time_reduction_factor
# +
i = 0
r = transducer(
enc = encoded_[i],
total = padded_lens_[i],
initial_states = s,
encoded_placeholder = inputs['encoded_placeholder'],
predicted_placeholder = inputs['predicted_placeholder'],
states_placeholder = inputs['states_placeholder'],
ytu = outputs['ytu'],
new_states = outputs['new_states'],
sess = test_sess,
beam_width = 1,
)
malaya_speech.subword.decode(subwords, r)
# -
from tensorflow.tools.graph_transforms import TransformGraph
# +
transforms = ['add_default_attributes',
'remove_nodes(op=Identity, op=CheckNumerics, op=Dropout)',
'fold_batch_norms',
'fold_old_batch_norms',
'quantize_weights(fallback_min=-10, fallback_max=10)',
'strip_unused_nodes',
'sort_by_execution_order']
input_nodes = [
'X_placeholder',
'X_len_placeholder',
'encoded_placeholder',
'predicted_placeholder',
'states_placeholder',
]
output_nodes = [
'greedy_decoder',
'encoded',
'ytu',
'new_states',
'padded_features',
'padded_lens',
'initial_states',
'non_blank_transcript',
'non_blank_stime'
]
pb = 'output-base-alconformer/frozen_model.pb'
input_graph_def = tf.GraphDef()
with tf.gfile.FastGFile(pb, 'rb') as f:
input_graph_def.ParseFromString(f.read())
transformed_graph_def = TransformGraph(input_graph_def,
input_nodes,
output_nodes, transforms)
with tf.gfile.GFile(f'{pb}.quantized', 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
# -
g = load_graph('output-base-alconformer/frozen_model.pb.quantized')
inputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in input_nodes}
outputs = {n: g.get_tensor_by_name(f'import/{n}:0') for n in output_nodes}
test_sess = tf.Session(graph = g)
r = test_sess.run(outputs['greedy_decoder'], feed_dict = {inputs['X_placeholder']: padded,
inputs['X_len_placeholder']: lens})
for row in r:
print(malaya_speech.subword.decode(subwords, row[row > 0]))
# +
encoded_, padded_lens_, s = test_sess.run([outputs['encoded'], outputs['padded_lens'], outputs['initial_states']],
feed_dict = {inputs['X_placeholder']: padded,
inputs['X_len_placeholder']: lens})
padded_lens_ = padded_lens_ // conformer_model.conv_subsampling.time_reduction_factor
# +
i = 0
r = transducer(
enc = encoded_[i],
total = padded_lens_[i],
initial_states = s,
encoded_placeholder = inputs['encoded_placeholder'],
predicted_placeholder = inputs['predicted_placeholder'],
states_placeholder = inputs['states_placeholder'],
ytu = outputs['ytu'],
new_states = outputs['new_states'],
sess = test_sess,
beam_width = 1,
)
malaya_speech.subword.decode(subwords, r)
| 18,328 |
/ImageObjDetYOLO.ipynb | 9d44a5a456c2df72c378732ab288cf755a83260b | [] | no_license | arjundvn24/ObjectDetYOLO | https://github.com/arjundvn24/ObjectDetYOLO | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,074,157 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pyspark.streaming import StreamingContext
# Create a StreamingContext with batch interval of 5 seconds
ssc = StreamingContext(sc, 5)
# Create a DStream that will connect to localhost at port 9999
# Start Netcat server: nc -lk 9999
lines = ssc.socketTextStream('localhost', 9999)
# Split each line into words
words = lines.flatMap(lambda line: line.split(" "))
# Count each word in each batch
pairs = words.map(lambda word: (word, 1))
wordCounts = pairs.reduceByKey(lambda x, y: x + y)
# Print the first ten elements of each RDD generated in this DStream to the console
lines.pprint()
wordCounts.pprint()
ssc.start() # Start the computation
print("Start")
ssc.awaitTermination(20) # Wait for the computation to terminate
ssc.stop(stopSparkContext=False) # Stop the StreamingContext without stopping the SparkContext
print("Finished")
# +
from pyspark.streaming import StreamingContext
# Create a queue of RDDs
rdd = sc.textFile('../data/adj_noun_pairs.txt', 8)
# split the rdd into 5 equal-size parts
rddQueue = rdd.randomSplit([1,1,1,1,1], 123)
# Create a StreamingContext with batch interval of 5 seconds
ssc = StreamingContext(sc, 5)
# Feed the rdd queue to a DStream
lines = ssc.queueStream(rddQueue)
# Do word-counting as before
words = lines.flatMap(lambda line: line.split(" "))
pairs = words.map(lambda word: (word, 1))
wordCounts = pairs.reduceByKey(lambda x, y: x + y)
# Use transform() to access any rdd transformations not directly available in SparkStreaming
topWords = wordCounts.transform(lambda rdd: rdd.sortBy(lambda x: x[1], False))
topWords.pprint()
ssc.start() # Start the computation
ssc.awaitTermination(25) # Wait for the computation to terminate
ssc.stop(False)
print("Finished")
# +
# Find the most positive words in windows of 5 seconds from streaming data
from pyspark.streaming import StreamingContext
def parse_line(l):
x = l.split("\t")
return (x[0], float(x[1]))
word_sentiments = sc.textFile("../data/AFINN-111.txt") \
.map(parse_line).cache()
ssc = StreamingContext(sc, 5)
rdd = sc.textFile('../data/adj_noun_pairs.txt', 8)
rddQueue = rdd.randomSplit([1,1,1,1,1], 123)
lines = ssc.queueStream(rddQueue)
word_counts = lines.flatMap(lambda line: line.split(" ")) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b)
# Determine the words with the highest sentiment values by joining the streaming RDD
# with the static RDD inside the transform() method and then multiplying
# the frequency of the words by its sentiment value
happiest_words = word_counts.transform(lambda rdd: word_sentiments.join(rdd)) \
.map(lambda t:
(t[1][0] * t[1][1], t[0])) \
.transform(lambda rdd: rdd.sortByKey(False))
happiest_words.pprint()
ssc.start()
ssc.awaitTermination(25)
ssc.stop(False)
print("Finished")
# +
from pyspark.streaming import StreamingContext
# Stateful word count
ssc = StreamingContext(sc, 5)
# Provide a checkpointing directory. Required for stateful transformations
ssc.checkpoint("checkpoint")
rdd = sc.textFile('../data/adj_noun_pairs.txt', 8)
rddQueue = rdd.randomSplit([1]*10, 123)
lines = ssc.queueStream(rddQueue)
def updateFunc(newValues, runningCount):
if runningCount is None:
runningCount = 0
return sum(newValues, runningCount)
# add the new values with the previous running count to get the new count
running_counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda word: (word, 1))\
.updateStateByKey(updateFunc)
counts_sorted = running_counts.transform(lambda rdd: rdd.sortBy(lambda x: x[1], False))
def printResults(rdd):
print("Total distinct words: ", rdd.count())
print(rdd.take(5))
print('refinery:', rdd.lookup('refinery')[0])
counts_sorted.foreachRDD(printResults)
ssc.start()
ssc.awaitTermination(50)
ssc.stop(False)
print("Finished")
# +
# MG algorithm for approximate word count
from pyspark.streaming import StreamingContext
k = 10000
threshold = 0
total_decrement = 0
ssc = StreamingContext(sc, 5)
# Provide a checkpointing directory. Required for stateful transformations
ssc.checkpoint("checkpoint")
rdd = sc.textFile('../data/adj_noun_pairs.txt', 8)
rddQueue = rdd.randomSplit([1]*10, 123)
lines = ssc.queueStream(rddQueue)
def updateFunc(newValues, runningCount):
if runningCount is None:
runningCount = 0
newValue = sum(newValues, runningCount) - threshold
return newValue if newValue > 0 else None
# add the new values with the previous running count to get the new count
running_counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda word: (word, 1))\
.reduceByKey(lambda a, b: a + b) \
.updateStateByKey(updateFunc)
counts_sorted = running_counts.transform(lambda rdd: rdd.sortBy(lambda x: x[1], False))
def printResults(rdd):
global threshold, total_decrement
rdd.cache()
print("Total distinct words: ", rdd.count())
print(rdd.map(lambda x: (x[0], x[1], x[1]+total_decrement)).take(5))
lower_bound = rdd.lookup('refinery')
if len(lower_bound) > 0:
lower_bound = lower_bound[0]
else:
lower_bound = 0
print('refinery:', lower_bound, ',', lower_bound + total_decrement)
if rdd.count() > k:
threshold = rdd.zipWithIndex().map(lambda x: (x[1], x[0])).lookup(k)[0][1]
else:
threhold = 0
print("Next threshold = ", threshold)
total_decrement += threshold
rdd.unpersist()
counts_sorted.foreachRDD(printResults)
ssc.start()
ssc.awaitTermination(50)
ssc.stop(False)
print("Finished")
# +
from pyspark.streaming import StreamingContext
# Create a queue of RDDs
rddQueue = []
for i in range(5):
rdd = sc.parallelize([i, i, i, i, i])
rddQueue += [rdd]
# Create a StreamingContext with batch interval of 3 seconds
ssc = StreamingContext(sc, 3)
ssc.checkpoint("checkpoint")
# Feed the rdd queue to a DStream
nums = ssc.queueStream(rddQueue)
# Compute the sum over a sliding window of 9 seconds for every 3 seconds
# slidingSum = nums.reduceByWindow(lambda x, y: x + y, None, 9, 3)
slidingSum = nums.reduceByWindow(lambda x, y: x + y, lambda x, y: x - y, 9, 3)
slidingSum.pprint()
ssc.start() # Start the computation
ssc.awaitTermination(24) # Wait for the computation to terminate
ssc.stop(False)
print("Finished")
# +
# Word count using structured streaming: Complete mode vs update mode
from pyspark.sql.functions import *
lines = spark\
.readStream\
.format('socket')\
.option('host', 'localhost')\
.option('port', '9999')\
.option('includeTimestamp', 'true')\
.load()
# Split the lines into words, retaining timestamps
# split() splits each line into an array, and explode() turns the array into multiple rows
words = lines.select(explode(split(lines.value, ' ')).alias('word'),
lines.timestamp)
word_counts = words.groupBy('word').count()
# Start running the query
query = word_counts\
.writeStream\
.outputMode('complete')\
.format('console')\
.option('truncate', 'false')\
.trigger(processingTime='5 seconds') \
.start()
query.awaitTermination(25)
query.stop()
print("Finished")
# +
# Append mode with selection condition
# Note: complete mode not supported if no aggregation
from pyspark.sql.functions import *
lines = spark\
.readStream\
.format('socket')\
.option('host', 'localhost')\
.option('port', '9999')\
.option('includeTimestamp', 'true')\
.load()
# Split the lines into words, retaining timestamps
# split() splits each line into an array, and explode() turns the array into multiple rows
words = lines.select(explode(split(lines.value, ' ')).alias('word'),
lines.timestamp)
long_words = words.filter(length(words['word'])>=3)
# Start running the query
query = long_words\
.writeStream\
.outputMode('append')\
.format('console')\
.option('truncate', 'false')\
.trigger(processingTime='5 seconds') \
.start()
query.awaitTermination(25)
query.stop()
print("Finished")
# +
from pyspark.sql.functions import *
lines = spark\
.readStream\
.format('socket')\
.option('host', 'localhost')\
.option('port', '9999')\
.option('includeTimestamp', 'true')\
.load()
# Split the lines into words, retaining timestamps
# split() splits each line into an array, and explode() turns the array into multiple rows
words = lines.select(explode(split(lines.value, ' ')).alias('word'),
lines.timestamp)
windowedCounts = words.groupBy(
window(words.timestamp, "10 seconds", "5 seconds"),
words.word)\
.count()
# Start running the query
query = windowedCounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.option('truncate', 'false')\
.trigger(processingTime='5 seconds') \
.start()
query.awaitTermination(25)
query.stop()
print("Finished")
# -
o account for the y-intercept
X = np.concatenate([features, np.ones((samples, 1))], axis=1)
# Decision variables
norm_0 = regressor.addVar(lb=non_zero, ub=non_zero, name="norm")
beta = regressor.addMVar((dim + 1,), lb=-GRB.INFINITY, name="beta") # Weights
intercept = beta[dim] # Last decision variable captures the y-intercept
regressor.setObjective(beta.T @ X.T @ X @ beta
- 2*response.T @ X @ beta
+ np.dot(response, response), GRB.MINIMIZE)
# Budget constraint based on the L0-norm
regressor.addGenConstrNorm(norm_0, beta[:-1], which=0, name="budget")
if not verbose:
regressor.params.OutputFlag = 0
regressor.params.timelimit = 60
regressor.params.mipgap = 0.001
regressor.optimize()
coeff = np.array([beta[i].X for i in range(dim)])
return intercept.X, coeff
# + id="E6dLWd6KSEzB"
# Define functions necessary to perform hyper-parameter tuning via cross-validation
def split_folds(features, response, train_mask):
"""
Assign folds to either train or test partitions based on train_mask.
"""
xtrain = features[train_mask,:]
xtest = features[~train_mask,:]
ytrain = response[train_mask]
ytest = response[~train_mask]
return xtrain, xtest, ytrain, ytest
def cross_validate(features, response, non_zero, folds, standardize, seed):
"""
Train an L0-Regression for each fold and report the cross-validated MSE.
"""
if seed is not None:
np.random.seed(seed)
samples, dim = features.shape
assert samples == response.shape[0]
fold_size = int(np.ceil(samples / folds))
# Randomly assign each sample to a fold
shuffled = np.random.choice(samples, samples, replace=False)
mse_cv = 0
# Exclude folds from training, one at a time,
#to get out-of-sample estimates of the MSE
for fold in range(folds):
idx = shuffled[fold * fold_size : min((fold + 1) * fold_size, samples)]
train_mask = np.ones(samples, dtype=bool)
train_mask[idx] = False
xtrain, xtest, ytrain, ytest = split_folds(features, response, train_mask)
if standardize:
scaler = StandardScaler()
scaler.fit(xtrain)
xtrain = scaler.transform(xtrain)
xtest = scaler.transform(xtest)
intercept, beta = miqp(xtrain, ytrain, non_zero)
ypred = np.dot(xtest, beta) + intercept
mse_cv += mse(ytest, ypred) / folds
# Report the average out-of-sample MSE
return mse_cv
def L0_regression(features, response, folds=5, standardize=False, seed=None):
"""
Select the best L0-Regression model by performing grid search on the budget.
"""
dim = features.shape[1]
best_mse = np.inf
best = 0
# Grid search to find best number of features to consider
for i in range(1, dim + 1):
val = cross_validate(features, response, i, folds=folds,
standardize=standardize, seed=seed)
if val < best_mse:
best_mse = val
best = i
if standardize:
scaler = StandardScaler()
scaler.fit(features)
features = scaler.transform(features)
intercept, beta = miqp(features, response, best)
return intercept, beta
# + [markdown] id="5JXo-uw1SEzC"
# ---
# ## Benchmark
#
# We now compare the performance of the aforementioned approach w.r.t. OLS regression on all features and the Lasso. The Boston dataset is used for this purpose. This dataset measures the prices of 506 houses, along with 13 features that provide insights about their neighbourhoods. We will use the original feature terminology, so the interested reader can visit [this website](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) for more information.
#
# Note that 20% of the samples are reserved for computing the out-of-sample MSE. The resulting metrics are displayed in a bar chart (shown below) to facilitate the comparison between models.
# + id="N-rW7MFHSEzC"
# Define how the bar chart should be displayed
def plot_bar_chart(performance):
"""
Display the performance of all three models in a bar chart.
"""
bar = plt.bar([1, 2, 3], performance, color=['r', 'g', 'y'],
tick_label=['OLS', 'Lasso', 'L0-Regression'])
plt.title('Out-of-Sample MSE')
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, np.floor(np.min(performance)),
np.ceil(np.max(performance))))
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 469} id="GIw53gPCSEzD" outputId="7863c9e3-c861-4959-ba82-363101689433"
# Load data and split into train (80%) and test (20%)
data_url = "http://lib.stat.cmu.edu/datasets/boston"
raw_df = pd.read_csv(data_url, sep="\s+", skiprows=22, header=None)
X = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
y = raw_df.values[1::2, 2]
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.20,
random_state=10101)
# OLS regression using all features
lr = linear_model.LinearRegression()
lr.fit(Xtrain, ytrain)
# Lasso with cross-validated penalization (lambda)
lasso = linear_model.LassoCV(cv=5)
lasso.fit(Xtrain, ytrain)
# L0-regression where the best feature subset is selected via cross-validation
intercept, beta = L0_regression(Xtrain, ytrain, seed=10101)
# Compare their performance using a bar chart
performance = []
performance.append(mse(ytest, lr.predict(Xtest)))
performance.append(mse(ytest, lasso.predict(Xtest)))
performance.append(mse(ytest, np.dot(Xtest, beta) + intercept))
plot_bar_chart(performance)
# + [markdown] id="OpothO6VSEzD"
# Notice that the Lasso performs poorly, as we did not standardize the features to be expressed in the same units (with an average of zero and variance of one). Contrary to OLS and L0-Regression, the Lasso is not scale-invariant because the budget constraint is based on the L1-norm. Remember that $\beta_l$ is interpreted as the change in the response per unit-change of feature $l$. Since the L1-norm takes the sum of absolute values, how much of the budget $\beta_l$ consumes depends on the units of measurement of the feature associated to it.
#
# Such preprocessing entails three steps, namely:
#
# For each feature $x_l$:
# 1. Compute its sample average $\mu_l$ and sample standard deviation $\sigma_l$.
# 2. Center by subtracting $\mu_l$ from $x_l$.
# 3. Scale by dividing the resulting difference by $\sigma_l$.
#
# In order to report the performance of the Lasso after applying standardization, we need to perform hyper-parameter tuning on the L1-norm penalty via cross-validation. Unfortunately, we must not use the model class `LassoCV`. This is due to the fact that standardization is not supported, and doing that beforehand over the whole dataset would contaminate the folds. In order to prevent that from happening, we will perform random search as follows:
# + id="Fsnm7IplSEzD"
np.random.seed(10101)
num_tries = 500
best_alpha = None
best_score = -np.inf
for i in range(num_tries):
# log-linear search for alpha in the domain [0.001, 1000]
exponent = np.random.uniform(-3, 3)
alpha = np.power(10, exponent)
pipeline = make_pipeline(StandardScaler(), linear_model.Lasso(alpha=alpha))
scores = cross_val_score(pipeline, Xtrain, ytrain, cv=5, scoring='neg_mean_squared_error')
avg_score = np.mean(scores)
if avg_score > best_score:
best_score = avg_score
best_alpha = alpha
# + [markdown] id="-uDTjWOuSEzE"
# Let's now compare the performance of the models considered when the features are preprocessed. Notice that our user-defined function `L0-regression` does support standardization of the features:
# + colab={"base_uri": "https://localhost:8080/", "height": 452} id="-1jq76jUSEzE" outputId="1fdb7dc5-ada1-40e9-d6e2-e30d02cb97b5"
# Standardize the features so they have an avg of 0 and a sample var of 1
scaler = StandardScaler()
scaler.fit(Xtrain)
Xtrain_std = scaler.transform(Xtrain)
Xtest_std = scaler.transform(Xtest)
# OLS regression using all features
lr = linear_model.LinearRegression()
lr.fit(Xtrain_std, ytrain)
# Lasso with cross-validated penalization (lambda)
lasso = linear_model.Lasso(alpha=best_alpha)
lasso.fit(Xtrain_std, ytrain)
# L0-regression where the best feature subset is selected via cross-validation
intercept, beta = L0_regression(Xtrain, ytrain, standardize=True, seed=10101)
# Compare their performance using a Bar chart
performance = []
performance.append(mse(ytest, lr.predict(Xtest_std)))
performance.append(mse(ytest, lasso.predict(Xtest_std)))
performance.append(mse(ytest, np.dot(Xtest_std, beta) + intercept))
plot_bar_chart(performance)
# + [markdown] id="U8xiVKMKSEzE"
# As expected, the Lasso is better (although marginally) than OLS. This is due to the fact that the Lasso can retrieve the $\beta_{OLS}$ estimate when the budget $s$ is big enough (alternatively, when $\lambda$ is small enough). On the other hand, it is marginally worse than L0-Regression, mainly because by shrinking $\beta$ we add bias to the estimates. Furthermore, observe that L0-Regression achieved the best performance with the fewest number of features. This is convenient, as it leads to a more interpretable model.
# + colab={"base_uri": "https://localhost:8080/"} id="gqiC7MsBSEzF" outputId="fb68f4d5-112f-4c77-e3bc-5f97b59030c5"
ols_features = np.sum(np.abs(lr.coef_) >= 1e-8)
lasso_features = np.sum(np.abs(lasso.coef_) >= 1e-8)
l0_features = np.sum(np.abs(beta) >= 1e-8)
print("OLS regression kept {0} features.".format(ols_features))
print("The Lasso kept {0} features.".format(lasso_features))
print("L0-Regression kept {0} features.".format(l0_features))
# + [markdown] id="Vk6O3k-PSEzF"
# ### Final Model
#
# The previous analysis indicates that the best candidate is the model suggested by L0-Regression. The resulting equation is as follows:
#
# \begin{equation}
# \text{medv} = 22.56-1.02\text{crim}+1.46\text{zn}+0.49\text{chas}-1.93\text{nox}+2.53\text{rm}
# \end{equation}
#
# \begin{equation}
# -3.48\text{dis}+2.65\text{rad}-2.22\text{tax}-1.87\text{ptratio}+1.00\text{b}-3.69\text{lstat}
# \end{equation}
#
# **Note:** The mean and variance vectors used in the standardization step can be accessed through `scaler.mean_` and `scaler.var_`, respectively.
#
# Since we standardized the data, the intercept represents the estimated median value (in thousands) of a house with mean values across features. Likewise, we can interpret $\beta_1=-1.02$ as the decrease in the house value when the per-capita crime rate increases by one standard deviation from the average value, all other things being equal (similar statements can be made for the rest of the features). Finally, if the main purpose of the analysis is to explain the variability in the response, having 11 features may be too much. However, remember that one can always set the number of active features to a more manageable number to ease the interpretation, perhaps at the expense of predictive power.
#
# ---
# ## Conclusions
#
# It has been shown how mathematical programming can be used to perform feature selection on linear regression problems. It is in fact a good alternative to the Lasso, given that L0-Regression is scale invariant and does not add bias to the weight estimates. Furthermore, this approach is amenable to the specification of additional linear constraints (Bertsimas, 2015), such as:
#
# - Enforcing group sparsity among features.
# - Limiting pairwise multicollinearity.
# - Limiting global multicollinearity.
# - Considering a fixed set of nonlinear transformations.
#
# Nevertheless, take this result with caution, as "there is no free lunch in statistics". That is, no algorithm outperforms all others under all possible datasets. Ultimately, a good data scientist should consider multiple learning algorithms when analyzing a dataset.
#
# ---
# ## References
#
# 1. Bertsimas, D., & King, A. (2015). OR forum—An algorithmic approach to linear regression. Operations Research, 64(1), 2-16.
# 2. Bertsimas, D., King, A., & Mazumder, R. (2016). Best subset selection via a modern optimization lens. The annals of statistics, 44(2), 813-852.
# 3. James, G., Witten, D., Hastie, T., & Tibshirani, R. (2013). An introduction to statistical learning. New York: springer.
# 4. The Boston housing dataset (1996, October 10). Retrieved from https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html
# + [markdown] id="NHabIFYMSEzF"
# Copyright © 2020 Gurobi Optimization, LLC
| 22,118 |
/Python_skill_up/05. 정밀하게 텍스트 포매팅하기.ipynb | 83d82fed1bdc421fd407c480b2875b07d8638311 | [] | no_license | yn-e-si/Algorithm-Study | https://github.com/yn-e-si/Algorithm-Study | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 38,151 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # DS-NYC-45 | Unit Project 2: Exploratory Data Analysis
# In this project, you will implement the exploratory analysis plan developed in Unit Project 1. This will lay the groundwork for our our first modeling exercise in Unit Project 3.
# First, load the python libraries you will need for this project:
# +
import os
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 10)
pd.set_option('display.notebook_repr_html', True)
# %matplotlib inline
plt.style.use('ggplot')
# -
# Then, read in your dataset:
# +
df = pd.read_csv(os.path.join('..', '..', 'dataset', 'ucla-admissions.csv'))
df.head()
# -
# ## Questions
# > ### Question 1. How many observations are in our dataset?
df.info()
# Answer: 399 observations
# > ### Question 2. Create a summary table.
df.describe()
# > ### Question 3. Why would `gre` have a larger variance than `gpa`?
GRE has a larger variance than GPA because they are not the same scale.
# Answer:
# > ### Question 4. Drop data points with missing data.
df.dropna()
# > ### Question 5. Confirm that you dropped the correct data. How can you tell?
df.isnull()
# Answer:
# > ### Question 6. Create boxplots for `gre` and `gpa`.
df.boxplot('gre');
df.boxplot('gpa');
# > ### Question 7. What do boxplots show?
# Answer: GRE's median is approximately 580, while GPA's median is a score of 3.4 for applicants.
# > ### Question 8. Plot the distribution of each variable.
df.hist('admit');
df.hist('gre');
df.hist('gpa');
df.hist('prestige');
# > ### Question 9. If our model had an assumption of a normal distribution for `gre` and `gpa`, would we meet that requirement? Would their distributions need correction? If so, why, why not? How?
# +
# TODO
# -
# Answer:
# > ### Question 10. Create a correlation matrix for the dataset
df.cov()
# > ### Question 11. What did you find? Which of our variables are potentially colinear?
# Answer: The gre and gpa variables are colinear.
# > ### Question 12. Write an analysis plan for exploring the association between graduate school admission rates and prestige of undergraduate schools.
# Answer: The potential relationship between the "prestige" of an undergraduate school and admissions is interesting in that the prestige factor could be have predictive abilities in determining whether someone will be admitted or not. But, "prestige" is based on the biases of whoever is determining that. Which is why, it is not indicative at all in this set.
# > ### Question 13. What is your hypothesis?
# Answer: GRE scores are more correlated to being admitted into UCLA.
fss = '{0}가 {2}에서 {1}을 만나는 게 언제지?'
else:
fss = "When will {0} meet {1} at {2}'s?"
print(fss.format('Fred', 'Sam', 'Joe'))
# -
# ## 5.7 'Repr' vs 문자열 변환
print(10) # 10 출력
print(str(10)) # 동일한 결과!
print(repr(10)) # 이 출력 결과도 10이다.
test_str = 'Here is a \n newline! '
print(test_str)
print(repr(test_str))
print('{}'.format(test_str))
print('{!r}'.format(test_str))
print('{1!r} loves {0!r}'.format('Joanie', 'ChaCha'))
# ## 5.8 'format' 함수와 메서드의 'spec' 필드
s = format(32.3, '<+08.3f')
s
# ### 5.8.1 출력-필드 너비
n1, n2 = 777, 999
print('**{:10}**{:2}**'.format(n1, n2))
n = 111
'{:5}'.format(n)
fss = '{1!r:10} loves {0!r:10}!!'
print(fss.format('Joanie', 'ChaCha'))
# ### 5.8.2 텍스트 조정: 문자 '채움'과 '정렬'
print('{:->24}'.format('Hey Bill G, pick me!'))
print('{:>7}'.format('Tom')) # ' Tom' 출력
print('{:@>7}'.format('Lady')) # '@@@Lady' 출력
print('{:*>7}'.format('Bill')) # '***Bill' 출력
print('{:<7}'.format('Tom')) # 'Tom ' 출력
print('{:@<7}'.format('Lady')) # 'Lady@@@' 출력
print('{:*<7}'.format('Bill')) # 'Bill***' 출력
fss = '{:^10}Jones'
print(fss.format('Tom')) # ' Tom Jones' 출력
fss = '{:@^10}'
print(fss.format('Lady')) # '@@@Lady@@@' 출력
fss = '{:*^10}'
print(fss.format('Bill')) # '***Bill***' 출력
print('{:=8}'.format(-1250)) # '- 1250' 출력
print('{:0=8}'.format(-1250)) # '-0001250' 출력
print(format('Lady', '@<7')) # 'Lady@@@' 출력
# ### 5.8.3 '기호' 문자
print('results>{: },{:+},{:-}'.format(25, 25, 25))
print('results>{: },{:+},{:-}'.format(-25, -25, -25))
# ### 5.8.4 0으로 시작하는 문자 (0)
i, j = 125, 25156
print('{:07} {:010}.'.format(i, j))
print('{:08}'.format(375)) # 00000375 출력
fss = '{:0>7} {:0>10}'
print('{:0>+10} {:+010}'.format(25, 25))
# ### 5.8.5 천 단위 위치 구분자
fss1 = 'The USA owes {:,} dollars.'
print(fss1.format(21000000000))
fss2 = 'The sun is {:,} miles away.'
print(fss2.format(93000000))
n = 4500000
print('The amount on the check was ${:*>12,}'.format(n))
print('The amount is {:011,}'.format(13000))
n = 13000
print('The amount is {:012,}'.format(n))
print('The amount is {:0>11,}'.format(n))
# ### 5.8.6 정밀도 제어
# +
pi = 3.14159265
phi = 1.618
fss = '{:.2} + {:.2} = {:.2}'
print(fss.format(pi, phi, pi + phi))
# +
pi = 3.14159265
phi = 1.618
fss = '{:.3} + {:.3} = {:.3}'
print(fss.format(pi, phi, pi + phi))
# -
fss = ' {:10.3f}\n {:10.3f}'
print(fss.format(22.1, 1000.007))
fss = ' {:10,.3f}\n {:10,.3f}'
print(fss.format(22333.1, 1000.007))
fss = ' {:10.2f}'
for x in [22.7, 3.1415, 555.5, 29, 1010.013]:
print(fss.format(x))
# ### 5.8.7 문자열에서 사용한 '정밀도' (잘라내기)
print('{:.5}'.format('Superannuated.')) # 'Super' 출력
print('{:.5}'.format('Excellent!')) # 'Excel' 출력
print('{:.5}'.format('Sam')) # 'Sam' 출력
# +
fss = '{:*<6.6}'
print(fss.format('Tom'))
print(fss.format('Mike'))
print(fss.format('Rodney'))
print(fss.format('Hannibal'))
print(fss.format('Mortimer'))
# -
print(format('Tom', '*<5.10'))
print(format('Mike', '*<5.10'))
print(format('Rodney', '*<5.10'))
print(format('Hannibal', '*<5.10'))
print(format('Mortimer', '*<5.10'))
format(10,'b')
# +
fss = '{:*<5.10}'
print(fss.format('Tom'))
print(fss.format('Rodney'))
print(fss.format('longer than 10'))
# -
# ### 5.8.9 이진 기수로 출력하기
print('{:b} {:b} {:b}'.format(5, 6, 16))
print('{:#b}'.format(7))
# ### 5.8.10 8진수와 16진수 출력하기
print('{:o}, {:x}, {:X}'.format(63, 63, 63))
print('{0:o}, {0:x}, {0:X}'.format(63))
print('{0:#o}, {0:#x}, {0:#X}'.format(63))
# ### 5.8.11 백분율 출력하기
print('You own {:%} of the shares.'.format(.517))
print('{:.2%} of {:.2%} of 40...'.format(0.231, 0.5))
# ### 5.8.12 이진수 예시
# +
def calc_binary():
print('Enter values in binary only!')
b1 = int(input('Enter b1:'), 2)
b2 = int(input('Enter b2:'), 2)
print('Total is: {:#b}'.format(b1 + b2))
print('{} + {} = {}'.format(b1, b2, b1 + b2))
calc_binary()
# -
# ## 5.9 변수-크기 필드
'Here is a num: {:{}.{}}'.format(1.2345, 10, 4)
'Here is a num: {:10.4}'.format(1.2345)
a, b = 10, 4
'Here is a num: {:{}.{}}'.format(1.2345, a, b)
'{:{}} {:{}}!'.format('Hi', 3, 'there', 7)
'{0:{1}} {2:{3}}!'.format('Hi', 3, 'there', 7)
'Pi is approx. {0:{1}.{2}f}'.format(3.141592, 8, 3)
a, b = 8, 3
'Pi is approx. {0:{1}.{2}f}'.format(3.141592, a, b)
'Pi is approx. {0:8.3f}'.format(3.141592)
| 7,273 |
/lite/codelabs/flower_classification/ml/Flower_Classification_with_TFLite_Model_Maker.ipynb | 891444e026c1645d40ae4229dfc32d630ea415c0 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | Samridhi-2303/app_mask | https://github.com/Samridhi-2303/app_mask | 0 | 1 | Apache-2.0 | 2022-12-09T16:57:30 | 2020-09-13T18:27:31 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 9,712 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="h2q27gKz1H20"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="TUfAcER1oUS6"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="Gb7qyhNL1yWt"
# # Flower classification with TensorFlow Lite Model Maker with TensorFlow 2.0
# + [markdown] id="nDABAblytltI"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/lite/codelabs/flower_classification/ml/Flower_Classification_with_TFLite_Model_Maker.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
# Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/lite/codelabs/flower_classification/ml/Flower_Classification_with_TFLite_Model_Maker.ipynb">
# <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
# View source on GitHub</a>
# </td>
# </table>
# + [markdown] id="m86-Nh4pMHqY"
# Model Maker library simplifies the process of adapting and converting a TensorFlow neural-network model to particular input data when deploying this model for on-device ML applications.
#
# This notebook shows an end-to-end example that utilizes this Model Maker library to illustrate the adaption and conversion of a commonly-used image classification model to classify flowers on a mobile device.
# + [markdown] id="bcLF2PKkSbV3"
# ## Prerequisites
#
# To run this example, we first need to install serveral required packages, including Model Maker package that in github [repo](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).
# + id="6cv3K3oaksJv"
# !pip install git+git://github.com/tensorflow/examples.git#egg=tensorflow-examples[model_maker]
# + [markdown] id="Gx1HGRoFQ54j"
# Import the required packages.
# + id="XtxiUeZEiXpt"
import numpy as np
import tensorflow as tf
assert tf.__version__.startswith('2')
from tensorflow_examples.lite.model_maker.core.data_util.image_dataloader import ImageClassifierDataLoader
from tensorflow_examples.lite.model_maker.core.task import image_classifier
from tensorflow_examples.lite.model_maker.core.task.model_spec import mobilenet_v2_spec
from tensorflow_examples.lite.model_maker.core.task.model_spec import ImageModelSpec
import matplotlib.pyplot as plt
# + [markdown] id="KKRaYHABpob5"
# ## Simple End-to-End Example
# + [markdown] id="SiZZ5DHXotaW"
# ### Get the data path
#
# Let's get some images to play with this simple end-to-end example. Hundreds of images is a good start for Model Maker while more data could achieve better accuracy.
# + cellView="form" id="3jz5x0JoskPv"
image_path = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
# + [markdown] id="a55MR6i6nuDm"
# You could replace `image_path` with your own image folders. As for uploading data to colab, you could find the upload button in the left sidebar shown in the image below with the red rectangle. Just have a try to upload a zip file and unzip it. The root file path is the current path.
#
# <img src="https://storage.googleapis.com/download.tensorflow.org/models/tflite/screenshots/model_maker_image_classification.png" alt="Upload File" width="800" hspace="100">
# + [markdown] id="NNRNv_mloS89"
# If you prefer not to upload your images to the cloud, you could try to run the library locally following the [guide](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker) in github.
# + [markdown] id="w-VDriAdsowu"
# ### Run the example
# The example just consists of 4 lines of code as shown below, each of which representing one step of the overall process.
#
# + [markdown] id="6ahtcO86tZBL"
# 1. Load input data specific to an on-device ML app. Split it to training data and testing data.
# + id="lANoNS_gtdH1"
data = ImageClassifierDataLoader.from_folder(image_path)
train_data, test_data = data.split(0.9)
# + [markdown] id="Y_9IWyIztuRF"
# 2. Customize the TensorFlow model.
# + id="yRXMZbrwtyRD"
model = image_classifier.create(train_data)
# + [markdown] id="oxU2fDr-t2Ya"
# 3. Evaluate the model.
# + id="wQr02VxJt6Cs"
loss, accuracy = model.evaluate(test_data)
# + [markdown] id="eVZw9zU8t84y"
# 4. Export to TensorFlow Lite model.
# You could download it in the left sidebar same as the uploading part for your own use.
# + id="Zb-eIzfluCoa"
model.export(export_dir='.', with_metadata=True)
# + [markdown] id="pyju1qc_v-wy"
# After this simple 4 steps, we can now download the model and label files, and continue to the next step in the [codelab](https://codelabs.developers.google.com/codelabs/recognize-flowers-with-tensorflow-on-android/#4).
#
# For a more comprehensive guide to TFLite Model Maker, please refer to this [notebook](https://colab.sandbox.google.com/github/tensorflow/examples/blob/master/tensorflow_examples/lite/model_maker/demo/image_classification.ipynb) and its [documentation](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker).
#
| 5,928 |
/notebooks/Aeolian2.ipynb | c3e2c024588c2b2966df68d02fe29f7bb201d01a | [] | no_license | openearth/sandmotor-viewer | https://github.com/openearth/sandmotor-viewer | 0 | 0 | null | 2018-06-12T15:08:51 | 2018-06-12T12:55:45 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 2,267,648 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import netCDF4 as nc
import numpy as np
# import pydap.client
# import geojson
# +
'''import meteo data from opendap'''
# meteo_url = r"http://opendap.tudelft.nl/thredds/dodsC/data2/zandmotor/meteohydro/meteo/meteo/meteo.nc"
# meteo_data = nc.Dataset(meteo_url)
# +
'''import wind data from opendap'''
wind_url = "http://opendap.tudelft.nl/thredds/dodsC/data2/zandmotor/meteohydro/wind/wind.nc"
wind_data = nc.Dataset(wind_url)
lat = float(np.squeeze(wind_data.variables['lat'][:]))
lon = float(np.squeeze(wind_data.variables['lon'][:]))
direc = np.squeeze(wind_data.variables['direction'][:]).tolist()
aspeed = np.squeeze( wind_data.variables['average_speed'][:]).tolist()
time = wind_data.variables['time']
timeDT = list(map(str, nc.num2date(time[:], units=time.units).tolist()))
# -
print(time)
import json
jsonlist = dict({'lat': lat, 'lon': lon, 'direc': direc, 'aspeed': aspeed, 'time': timeDT})
with open('wind_data.json', 'w') as f:
json.dump(jsonlist, f)
print(lat)
print(timeDT)
| 1,271 |
/low_level_api/variables1.ipynb | b8c26b6c3e696158c886f1da1f701d6e9f038157 | [] | no_license | fillipevieira/tensorflow-training | https://github.com/fillipevieira/tensorflow-training | 0 | 0 | null | 2020-03-31T02:31:49 | 2019-06-24T03:39:14 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 2,230 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 作業重點:
#
# (1)以, Adam, 為例, 調整 batch_size, epoch , 觀察accurancy, loss 的變化
#
# (2)以同一模型, 分別驗證 SGD, Adam, Rmsprop 的 accurancy
# # 作業目標:
#
# 取得各種優化器的運算結果
from __future__ import print_function
from tensorflow import keras
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import os
from tensorflow.keras import optimizers
#Blas GEMM launch failed , 避免動態分配GPU / CPU, 出現問題
import tensorflow as tf
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
# +
'''
宣告並設定
batch_size:對總的樣本數進行分組,每組包含的樣本數量
epochs :訓練次數
'''
#batch_size = 32
num_classes = 10
#epochs = 20
data_augmentation = True
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'
# +
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# -
# 資料正規化
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
import matplotlib.pyplot as plt
# %matplotlib inline
def get_model():
# 第一步:選擇模型, 順序模型是多個網絡層的線性堆疊
model = Sequential()
# 第二步:構建網絡層
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense( 10)) # 輸出結果是10個類別,所以維度是10
model.add(Activation('softmax')) # 最後一層用softmax作為激活函數
return model
model = get_model()
# 模型建立完成後,統計參數總量
print("Total Parameters:%d" % model.count_params())
# 輸出模型摘要資訊
model.summary()
def experience(epochs, batch_size, opt):
model = get_model()
model.compile(optimizer = opt, loss = 'categorical_crossentropy', metrics = ['accuracy'])
if not data_augmentation:
print('Not using data augmentation.')
history=model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
print('')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
zca_epsilon=1e-06, # epsilon for ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
shear_range=0., # set range for random shear
zoom_range=0., # set range for random zoom
channel_shift_range=0., # set range for random channel shifts
# set mode for filling points outside the input boundaries
fill_mode='nearest',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
history=model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
print('')
# Score trained model.
print('Scoring trained model.')
scores = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# Plot training & validation accuracy values
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valiidation'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valiidation'], loc='upper left')
plt.show()
#Adam epochs=10 batch_size=32
experience(epochs=10, batch_size=32, opt='adam')
#Adam epochs=10 batch_size=128
experience(epochs=10, batch_size=128, opt='adam')
#Adam epochs=20 batch_size=64
experience(epochs=20, batch_size=64, opt='adam')
#SGD
experience(epochs=20, batch_size=64, opt='SGD')
#Rmsprop
experience(epochs=20, batch_size=64, opt='Rmsprop')
# -
# Now we have a `DataFrame` containing all relevant text from the page ready to be
# processed
#
# If you are not familiar with regex, it is a way of specifying searches in text.
# A regex engine takes in the search pattern, in the above case `'\[\d+\]'` and
# some string, the paragraph texts. Then it reads the input string one character
# at a time checking if it matches the search. Here the regex `'\d'` matches
# number characters (while `'\['` and `'\]'` capture the braces on either side).
findNumber = r'\d'
regexResults = re.search(findNumber, 'not a number, not a number, numbers 2134567890, not a number')
regexResults
# In Python the regex package (`re`) usually returns `Match` objects (you can have
# multiple pattern hits in a a single `Match`), to get the string that matched our
# pattern we can use the `.group()` method, and as we want the first one we will
# ask for the 0'th group.
print(regexResults.group(0))
# That gives us the first number, if we wanted the whole block of numbers we can
# add a wildcard `'+'` which requests 1 or more instances of the preceding
# character.
findNumbers = r'\d+'
regexResults = re.search(findNumbers, 'not a number, not a number, numbers 2134567890, not a number')
print(regexResults.group(0))
# Now we have the whole block of numbers, there are a huge number of special
# characters in regex, for the full description of Python's implementation look at
# the [re docs](https://docs.python.org/3/library/re.html) there is also a short
# [tutorial](https://docs.python.org/3/howto/regex.html#regex-howto).
# # <span style="color:red">Section 1</span>
# <span style="color:red">Construct cells immediately below this that describe and download webcontent relating to your anticipated final project. Use beautiful soup and at least five regular expressions to extract relevant, nontrivial *chunks* of that content (e.g., cleaned sentences, paragraphs, etc.) to a pandas `Dataframe`.</span>
#
#
# +
#URLs
my_analysis = 'https://aha.confex.com/aha/2018/webprogram/meeting2018-01-05.html'
my_analysis_save = 'aha_conference_2.html'
#Request AHA page
requests.get(my_analysis)
myContentRequest = requests.get(my_analysis)
myContentSoup = bs4.BeautifulSoup(myContentRequest.text, 'html.parser')
#Save as HTML file
with open(my_analysis_save, mode='w', encoding='utf-8') as f:
f.write(myContentRequest.text)
#Extract session titles
date = myContentSoup.body.find('h4', {'class': 'date'})
titles = myContentSoup.body.find_all('div', {'class' : 'itemtitle'})
titlesList = []
for title in titles:
title_text = title.find('a').text
# Regex 1) The line contains the word "Reception"
if re.search('Reception', title_text):
continue
# Regex 2) The line begins with two or more uppercase letters
elif re.search('^[A-Z][A-Z]', title_text):
continue
# Regex 3) The line begins with the word "Tour"
elif re.search('^Tour', title_text):
continue
# Regex 4) The line contains the word "Workshop"
# (Does not exclude the word "workshopping")
elif re.search(r'\bWorkshop\b', title_text):
continue
# Regex 5) The line contains the word "Room" or "Hall"
# (Nursing Room, Exhibit Hall, etc.)
elif re.search(r'\bRoom\b', title_text) or re.search(r'\bHall\b', title_text):
continue
#Regex 6) The line contains the word "Breakfast"
elif re.search('Breakfast', title_text):
continue
else:
titlesList.append(title_text)
titlesListDF = pandas.DataFrame({'paragraph-text' : titlesList})
print(titlesListDF)
# -
#
# # Spidering
#
# What if we want to to get a bunch of different pages from wikipedia. We would
# need to get the url for each of the pages we want. Typically, we want pages that
# are linked to by other pages and so we will need to parse pages and identify the
# links. Right now we will be retrieving all links in the body of the content
# analysis page.
#
# To do this we will need to find all the `<a>` (anchor) tags with `href`s
# (hyperlink references) inside of `<p>` tags. `href` can have many
# [different](http://stackoverflow.com/questions/4855168/what-is-href-and-why-is-
# it-used) [forms](https://en.wikipedia.org/wiki/Hyperlink#Hyperlinks_in_HTML) so
# dealing with them can be tricky, but generally, you will want to extract
# absolute or relative links. An absolute link is one you can follow without
# modification, while a relative link requires a base url that you will then
# append. Wikipedia uses relative urls for its internal links: below is an example
# for dealing with them.
# +
#wikipedia_base_url = 'https://en.wikipedia.org'
otherPAgeURLS = []
#We also want to know where the links come from so we also will get:
#the paragraph number
#the word the link is in
for paragraphNum, pTag in enumerate(contentPTags):
#we only want hrefs that link to wiki pages
tagLinks = pTag.findAll('a', href=re.compile('/wiki/'), class_=False)
for aTag in tagLinks:
#We need to extract the url from the <a> tag
relurl = aTag.get('href')
linkText = aTag.text
#wikipedia_base_url is the base we can use the urllib joining function to merge them
#Giving a nice structured tupe like this means we can use tuple expansion later
otherPAgeURLS.append((
urllib.parse.urljoin(wikipedia_base_url, relurl),
paragraphNum,
linkText,
))
print(otherPAgeURLS[:10])
# -
# We will be adding these new texts to our DataFrame `contentParagraphsDF` so we
# will need to add 2 more columns to keep track of paragraph numbers and sources.
# +
contentParagraphsDF['source'] = [wikipedia_content_analysis] * len(contentParagraphsDF['paragraph-text'])
contentParagraphsDF['paragraph-number'] = range(len(contentParagraphsDF['paragraph-text']))
contentParagraphsDF
# -
# Then we can add two more columns to our `Dataframe` and define a function to
# parse
# each linked page and add its text to our DataFrame.
# +
contentParagraphsDF['source-paragraph-number'] = [None] * len(contentParagraphsDF['paragraph-text'])
contentParagraphsDF['source-paragraph-text'] = [None] * len(contentParagraphsDF['paragraph-text'])
def getTextFromWikiPage(targetURL, sourceParNum, sourceText):
#Make a dict to store data before adding it to the DataFrame
parsDict = {'source' : [], 'paragraph-number' : [], 'paragraph-text' : [], 'source-paragraph-number' : [], 'source-paragraph-text' : []}
#Now we get the page
r = requests.get(targetURL)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
#enumerating gives use the paragraph number
for parNum, pTag in enumerate(soup.body.findAll('p')):
#same regex as before
parsDict['paragraph-text'].append(re.sub(r'\[\d+\]', '', pTag.text))
parsDict['paragraph-number'].append(parNum)
parsDict['source'].append(targetURL)
parsDict['source-paragraph-number'].append(sourceParNum)
parsDict['source-paragraph-text'].append(sourceText)
return pandas.DataFrame(parsDict)
# -
# And run it on our list of link tags
for urlTuple in otherPAgeURLS[:3]:
#ignore_index means the indices will not be reset after each append
contentParagraphsDF = contentParagraphsDF.append(getTextFromWikiPage(*urlTuple),ignore_index=True)
contentParagraphsDF
#
# # <span style="color:red">Section 2</span>
# <span style="color:red">Construct cells immediately below this that spider webcontent from another site with content relating to your anticipated final project. Specifically, identify urls on a core page, then follow and extract content from them into a pandas `Dataframe`. In addition, demonstrate a *recursive* spider, which follows more than one level of links (i.e., follows links from a site, then follows links on followed sites to new sites, etc.), making sure to define a reasonable endpoint so that you do not wander the web forever :-).</span>
#
#
# +
# It turns out the AHA conference page is not very friendly to spidering, so my makeshift solution was to use a new URL from a Wikipedia entry on Historiography
historiography_content_analysis = 'https://en.wikipedia.org/wiki/Historiography'
historiographyContentRequest = requests.get(historiography_content_analysis)
historiographyContentSoup = bs4.BeautifulSoup(historiographyContentRequest.text, 'html.parser')
historiographyContentPTags = historiographyContentSoup.body.findAll('p')
otherPAgeURLS = []
for paragraphNum, pTag in enumerate(historiographyContentPTags):
tagLinks = pTag.findAll('a', href=re.compile('/wiki/'), class_=False)
for aTag in tagLinks:
relurl = aTag.get('href')
linkText = aTag.text
otherPAgeURLS.append((
urllib.parse.urljoin(wikipedia_base_url, relurl),
paragraphNum,
linkText,
))
#print(otherPAgeURLS[:10])
titlesListDF['source'] = [historiography_content_analysis] * len(titlesListDF['paragraph-text'])
titlesListDF['paragraph-number'] = range(len(titlesListDF['paragraph-text']))
titlesListDF['source-paragraph-number'] = [None] * len(titlesListDF['paragraph-text'])
titlesListDF['source-paragraph-text'] = [None] * len(titlesListDF['paragraph-text'])
for urlTuple in otherPAgeURLS[:5]:
#ignore_index means the indices will not be reset after each append
titlesListDF = titlesListDF.append(getTextFromWikiPage(*urlTuple),ignore_index=True)
titlesListDF
# -
# ## API (Tumblr)
#
# Generally website owners do not like you scraping their sites. If done badly,
# scarping can act like a DOS attack so you should be careful how often you make
# calls to a site. Some sites want automated tools to access their data, so they
# create [application programming interface
# (APIs)](https://en.wikipedia.org/wiki/Application_programming_interface). An API
# specifies a procedure for an application (or script) to access their data. Often
# this is though a [representational state transfer
# (REST)](https://en.wikipedia.org/wiki/Representational_state_transfer) web
# service, which just means if you make correctly formatted HTTP requests they
# will return nicely formatted data.
#
# A nice example for us to study is [Tumblr](https://www.tumblr.com), they have a
# [simple RESTful API](https://www.tumblr.com/docs/en/api/v1) that allows you to
# read posts without any complicated html parsing.
#
# We can get the first 20 posts from a blog by making an http GET request to
# `'http://{blog}.tumblr.com/api/read/json'`, were `{blog}` is the name of the
# target blog. Lets try and get the posts from [http://lolcats-lol-
# cat.tumblr.com/](http://lolcats-lol-cat.tumblr.com/) (Note the blog says at the
# top 'One hour one pic lolcats', but the canonical name that Tumblr uses is in
# the URL 'lolcats-lol-cat').
# +
tumblrAPItarget = 'http://{}.tumblr.com/api/read/json'
r = requests.get(tumblrAPItarget.format('lolcats-lol-cat'))
print(r.text[:1000])
# -
# This might not look very good on first inspection, but it has far fewer angle
# braces than html, which makes it easier to parse. What we have is
# [JSON](https://en.wikipedia.org/wiki/JSON) a 'human readable' text based data
# transmission format based on javascript. Luckily, we can readily convert it to a
# python `dict`.
#We need to load only the stuff between the curly braces
d = json.loads(r.text[len('var tumblr_api_read = '):-2])
print(d.keys())
print(len(d['posts']))
# If we read the [API specification](https://www.tumblr.com/docs/en/api/v1), we
# will see there are a lot of things we can get if we add things to our GET
# request. First we can retrieve posts by their id number. Let's first get post
# `146020177084`.
# +
r = requests.get(tumblrAPItarget.format('lolcats-lol-cat'), params = {'id' : 146020177084})
d = json.loads(r.text[len('var tumblr_api_read = '):-2])
d['posts'][0].keys()
d['posts'][0]['photo-url-1280']
with open('lolcat.gif', 'wb') as f:
gifRequest = requests.get(d['posts'][0]['photo-url-1280'], stream = True)
f.write(gifRequest.content)
# -
# <img src='lolcat.gif'>
#
# Such beauty; such vigor (If you can't see it you have to refresh the page). Now
# we could retrieve the text from all posts as well
# as related metadata, like the post date, caption or tags. We could also get
# links to all the images.
#Putting a max in case the blog has millions of images
#The given max will be rounded up to the nearest multiple of 50
def tumblrImageScrape(blogName, maxImages = 200):
#Restating this here so the function isn't dependent on any external variables
tumblrAPItarget = 'http://{}.tumblr.com/api/read/json'
#There are a bunch of possible locations for the photo url
possiblePhotoSuffixes = [1280, 500, 400, 250, 100]
#These are the pieces of information we will be gathering,
#at the end we will convert this to a DataFrame.
#There are a few other datums we could gather like the captions
#you can read the Tumblr documentation to learn how to get them
#https://www.tumblr.com/docs/en/api/v1
postsData = {
'id' : [],
'photo-url' : [],
'date' : [],
'tags' : [],
'photo-type' : []
}
#Tumblr limits us to a max of 50 posts per request
for requestNum in range(maxImages // 50):
requestParams = {
'start' : requestNum * 50,
'num' : 50,
'type' : 'photo'
}
r = requests.get(tumblrAPItarget.format(blogName), params = requestParams)
requestDict = json.loads(r.text[len('var tumblr_api_read = '):-2])
for postDict in requestDict['posts']:
#We are dealing with uncleaned data, we can't trust it.
#Specifically, not all posts are guaranteed to have the fields we want
try:
postsData['id'].append(postDict['id'])
postsData['date'].append(postDict['date'])
postsData['tags'].append(postDict['tags'])
except KeyError as e:
raise KeyError("Post {} from {} is missing: {}".format(postDict['id'], blogName, e))
foundSuffix = False
for suffix in possiblePhotoSuffixes:
try:
photoURL = postDict['photo-url-{}'.format(suffix)]
postsData['photo-url'].append(photoURL)
postsData['photo-type'].append(photoURL.split('.')[-1])
foundSuffix = True
break
except KeyError:
pass
if not foundSuffix:
#Make sure your error messages are useful
#You will be one of the users
raise KeyError("Post {} from {} is missing a photo url".format(postDict['id'], blogName))
return pandas.DataFrame(postsData)
tumblrImageScrape('lolcats-lol-cat', 50)
# Now we have the urls of a bunch of images and can run OCR on them to gather
# compelling meme narratives, accompanied by cats.
#
# # Files
#
# What if the text we want isn't on a webpage? There are a many other sources of
# text available, typically organized into *files*.
#
# ## Raw text (and encoding)
#
# The most basic form of storing text is as a _raw text_ document. Source code
# (`.py`, `.r`, etc) is usually raw text as are text files (`.txt`) and those with
# many other extension (e.g., .csv, .dat, etc.). Opening an unknown file with a
# text editor is often a great way of learning what the file is.
#
# We can create a text file in python with the `open()` function
# +
#example_text_file = 'sometextfile.txt'
#stringToWrite = 'A line\nAnother line\nA line with a few unusual symbols \u2421 \u241B \u20A0 \u20A1 \u20A2 \u20A3 \u0D60\n'
stringToWrite = 'A line\nAnother line\nA line with a few unusual symbols ␡ ␛ ₠ ₡ ₢ ₣ ൠ\n'
with open(example_text_file, mode = 'w', encoding='utf-8') as f:
f.write(stringToWrite)
# -
# Notice the `encoding='utf-8'` argument, which specifies how we map the bits from
# the file to the glyphs (and whitespace characters like tab (`'\t'`) or newline
# (`'\n'`)) on the screen. When dealing only with latin letters, arabic numerals
# and the other symbols on America keyboards you usually do not have to worry
# about encodings as the ones used today are backwards compatible with
# [ASCII](https://en.wikipedia.org/wiki/ASCII), which gives the binary
# representation of 128 characters.
#
# Some of you, however, will want to use other characters (e.g., Chinese
# characters). To solve this there is
# [Unicode](https://en.wikipedia.org/wiki/Unicode) which assigns numbers to
# symbols, e.g., 041 is `'A'` and 03A3 is `'Σ'` (numbers starting with 0 are
# hexadecimal). Often non/beyond-ASCII characters are called Unicode characters.
# Unicode contains 1,114,112 characters, about 10\% of which have been assigned.
# Unfortunately there are many ways used to map combinations of bits to Unicode
# symbols. The ones you are likely to encounter are called by Python _utf-8_,
# _utf-16_ and _latin-1_. _utf-8_ is the standard for Linux and Mac OS while both
# _utf-16_ and _latin-1_ are used by windows. If you use the wrong encoding,
# characters can appear wrong, sometimes change in number or Python could raise an
# exception. Lets see what happens when we open the file we just created with
# different encodings.
# +
with open(example_text_file, encoding='utf-8') as f:
print("This is with the correct encoding:")
print(f.read())
with open(example_text_file, encoding='latin-1') as f:
print("This is with the wrong encoding:")
print(f.read())
# -
# Notice that with _latin-1_ the unicode characters are mixed up and there are too
# many of them. You need to keep in mind encoding when obtaining text files.
# Determining the encoding can sometime involve substantial work.
# We can also load many text files at once. LEts tart by looking at the Shakespeare files in the `data` directory
with open('../data/Shakespeare/midsummer_nights_dream.txt') as f:
midsummer = f.read()
print(midsummer[-700:])
# Then to load all the files in `../data/Shakespeare` we can use a for loop with `scandir`:
# +
targetDir = '../data/Shakespeare' #Change this to your own directory of texts
shakespearText = []
shakespearFileName = []
for file in (file for file in os.scandir(targetDir) if file.is_file() and not file.name.startswith('.')):
with open(file.path) as f:
shakespearText.append(f.read())
shakespearFileName.append(file.name)
# -
# Then we can put them all in pandas DataFrame
shakespear_df = pandas.DataFrame({'text' : shakespearText}, index = shakespearFileName)
shakespear_df
# Getting your text in a format like this is the first step of most analysis
# ## PDF
#
# Another common way text will be stored is in a PDF file. First we will download
# a pdf in Python. To do that lets grab a chapter from
# _Speech and Language Processing_, chapter 21 is on Information Extraction which
# seems apt. It is stored as a pdf at [https://web.stanford.edu/~jurafsky/slp3/21.
# pdf](https://web.stanford.edu/~jurafsky/slp3/21.pdf) although we are downloading
# from a copy just in case Jurafsky changes their website.
# +
#information_extraction_pdf = 'https://github.com/KnowledgeLab/content_analysis/raw/data/21.pdf'
infoExtractionRequest = requests.get(information_extraction_pdf, stream=True)
print(infoExtractionRequest.text[:1000])
# -
# It says `'pdf'`, so thats a good sign. The rest though looks like we are having
# issues with an encoding. The random characters are not caused by our encoding
# being wrong, however. They are cause by there not being an encoding for those
# parts at all. PDFs are nominally binary files, meaning there are sections of
# binary that are specific to pdf and nothing else so you need something that
# knows about pdf to read them. To do that we will be using
# [`PyPDF2`](https://github.com/mstamy2/PyPDF2), a PDF processing library for
# Python 3.
#
#
# Because PDFs are a very complicated file format pdfminer requires a large amount
# of boilerplate code to extract text, we have written a function that takes in an
# open PDF file and returns the text so you don't have to.
def readPDF(pdfFile):
#Based on code from http://stackoverflow.com/a/20905381/4955164
#Using utf-8, if there are a bunch of random symbols try changing this
codec = 'utf-8'
rsrcmgr = pdfminer.pdfinterp.PDFResourceManager()
retstr = io.StringIO()
layoutParams = pdfminer.layout.LAParams()
device = pdfminer.converter.TextConverter(rsrcmgr, retstr, laparams = layoutParams, codec = codec)
#We need a device and an interpreter
interpreter = pdfminer.pdfinterp.PDFPageInterpreter(rsrcmgr, device)
password = ''
maxpages = 0
caching = True
pagenos=set()
for page in pdfminer.pdfpage.PDFPage.get_pages(pdfFile, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):
interpreter.process_page(page)
device.close()
returnedString = retstr.getvalue()
retstr.close()
return returnedString
# First we need to take the response object and convert it into a 'file like'
# object so that pdfminer can read it. To do this we will use `io`'s `BytesIO`.
infoExtractionBytes = io.BytesIO(infoExtractionRequest.content)
# Now we can give it to pdfminer.
print(readPDF(infoExtractionBytes)[:550])
# From here we can either look at the full text or fiddle with our PDF reader and
# get more information about individual blocks of text.
#
# ## Word Docs
#
# The other type of document you are likely to encounter is the `.docx`, these are
# actually a version of [XML](https://en.wikipedia.org/wiki/Office_Open_XML), just
# like HTML, and like HTML we will use a specialized parser.
#
# For this class we will use [`python-docx`](https://python-
# docx.readthedocs.io/en/latest/) which provides a nice simple interface for
# reading `.docx` files
# +
#example_docx = 'https://github.com/KnowledgeLab/content_analysis/raw/data/example_doc.docx'
r = requests.get(example_docx, stream=True)
d = docx.Document(io.BytesIO(r.content))
for paragraph in d.paragraphs[:7]:
print(paragraph.text)
# -
# This procedure uses the `io.BytesIO` class again, since `docx.Document` expects
# a file. Another way to do it is to save the document to a file and then read it
# like any other file. If we do this we can either delete the file afterwords, or
# save it and avoid downloading the following time.
#
# This function is useful as a part of many different tasks so it and others like it will be added to the helper package `lucem_illud` so we can use it later without having to retype it.
def downloadIfNeeded(targetURL, outputFile, **openkwargs):
if not os.path.isfile(outputFile):
outputDir = os.path.dirname(outputFile)
#This function is a more general os.mkdir()
if len(outputDir) > 0:
os.makedirs(outputDir, exist_ok = True)
r = requests.get(targetURL, stream=True)
#Using a closure like this is generally better than having to
#remember to close the file. There are ways to make this function
#work as a closure too
with open(outputFile, 'wb') as f:
f.write(r.content)
return open(outputFile, **openkwargs)
# This function will download, save and open `outputFile` as `outputFile` or just
# open it if `outputFile` exists. By default `open()` will open the file as read
# only text with the local encoding, which may cause issues if its not a text
# file.
try:
d = docx.Document(downloadIfNeeded(example_docx, example_docx_save))
except Exception as e:
print(e)
# We need to tell `open()` to read in binary mode (`'rb'`), this is why we added
# `**openkwargs`, this allows us to pass any keyword arguments (kwargs) from
# `downloadIfNeeded` to `open()`.
d = docx.Document(downloadIfNeeded(example_docx, example_docx_save, mode = 'rb'))
for paragraph in d.paragraphs[:7]:
print(paragraph.text)
# Now we can read the file with `docx.Document` and not have to wait for it to be
# downloaded every time.
#
# # <span style="color:red">Section 3</span>
# <span style="color:red">Construct cells immediately below this that extract and organize textual content from text, PDF or Word into a pandas dataframe.</span>
#
# +
#PDF miner
def readPDF(pdfFile):
#Based on code from http://stackoverflow.com/a/20905381/4955164
#Using utf-8, if there are a bunch of random symbols try changing this
codec = 'utf-8'
rsrcmgr = pdfminer.pdfinterp.PDFResourceManager()
retstr = io.StringIO()
layoutParams = pdfminer.layout.LAParams()
device = pdfminer.converter.TextConverter(rsrcmgr, retstr, laparams = layoutParams, codec = codec)
#We need a device and an interpreter
interpreter = pdfminer.pdfinterp.PDFPageInterpreter(rsrcmgr, device)
password = ''
maxpages = 0
caching = True
pagenos=set()
for page in pdfminer.pdfpage.PDFPage.get_pages(pdfFile, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):
interpreter.process_page(page)
device.close()
returnedString = retstr.getvalue()
retstr.close()
return returnedString
my_example_pdf = 'http://www.asian-studies.org/Portals/55/Conference/2018%20Korea%20Sessions%20-%20AAS.pdf?ver=2017-11-12-140005-333'
exampleExtractionRequest = requests.get(my_example_pdf, stream=True)
exampleExtractionBytes = io.BytesIO(exampleExtractionRequest.content)
aasKoreaProgram = readPDF(exampleExtractionBytes)
# print(aasKoreaProgram)
session_titles = []
organizer_names = []
possible_second_organizer = False
# print(len(aasKoreaProgram)) # 37984 characters in this PDF document
# Split the entire string bylines
aas_lines = aasKoreaProgram.splitlines()
# print(len(aas_lines)) # 675 lines
for line in aas_lines:
# Extract titles with regex
# by filtering with the line begins with one or more number)
if re.search('^[0-9]+\.\s', line):
possible_second_organizer = False
# If the line is the name of a session, remove the session number
session_title = re.sub('^[0-9]+\.\s', '', line)
session_titles.append(session_title)
# Extract the organizer with regex
# by search for lines that contain the word "Organizer"
# and then only choosing the name
elif re.search('Organizer \|', line):
# Logic to remove the second organizer
if possible_second_organizer:
possible_second_organizer = False
continue
# Only choose the middle element in the line split with '|'
organizer_list = line.split('|')
organizer_name = organizer_list[1][1:-1]
possible_second_organizer = True
organizer_names.append(organizer_name)
aasKoreaDF = pandas.DataFrame({'session-title' : session_titles}, index = organizer_names)
print(aasKoreaDF)
| 33,419 |
/Lab1.ipynb | 378a857b88c7692e98e08cc86f91cba7bff01a39 | [] | no_license | Mineyevaa/Laba | https://github.com/Mineyevaa/Laba | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 597,589 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Первичный анализ и предобработка данных с Pandas
# В этом задании рассмотрим первичный анализ данных на примере реальных данных по шоколадным батончикам. Данные содержат следующие атрибуты: 'company' - компания производитель, 'bar_name' - название шоколадного батончика, 'ref' - номер, 'rew_date' - дата оценки, 'percent' - процент какао бобов, 'company_loc' - местонахождение компании, 'rate' - рейтинг, 'bean_dtype' - тип какао бобов, 'bean_orig' - страна происхождения.
# Лабораторная работа №1
# Вяткина Арина
# Минеева Анна
# РИ-471223
# ## Численные атрибуты
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
best_bar= pd.read_csv('flavors_of_cacao.csv',
sep=',', header=0, names=['company', 'bar_name','ref', 'rew_date', 'percent', 'company_loc', 'rate', 'bean_dtype', 'bean_orig'])
type(best_bar)
# Оставим только только местонахождение компании, рейтинг, процент какао бобов и страну происхождения для дальнейшего анализа.
best_bar = best_bar[['company_loc', 'rate', 'percent', 'bean_orig']]
best_bar.head()
# Необходимо построить гистограмму распределения рейтинга батончика через plot(). Количество столбцов гистограммы равно 20.
# На одном графике необходимо построить график плотности вероятности, который строится на основе оценки непрерывного распределения вероятности. Гистограммы аппроксимируют комбинацией ядре, т.е. более простых распределений, например нормалього (гауссова). Поэтому графики плотности еще называют ядерной окенки плотности (KDE-kernel density estimate). Строится kde с помощью функции плот с параметром kind='kde'.
# Строим два графика:
# - гистограмму распределения рейтинга батончика
# - график плотности вероятности
best_bar.plot(kind='hist', color='green',bins=20)
best_bar.plot(kind='kde', color='blue')
# Необходимо построить гистограмму распределения для процента какао бобов. Однако, следует отметить, что данные атрибут имеет тип object. Шистограмму для такого типа построить не получиться.
best_bar.dtypes
# Необходимо преобразовать данный тип в float с помощью функции apply. Причем сначала необходимо убрать знак '%' в конце каждого элемента, а затем преобразовать в тип данных с плавающей точкой. Функция apply проходит по всем значениям данных и применяет указанную функцию.
# Данные из столбца 'percent' преобразуем в тип float
best_bar['percent'] = best_bar['percent'].apply(lambda x: x[:-1]).astype(float)
best_bar.head()
best_bar.plot('rate', kind='hist', color='blue', bins=20)
# Постройте scatter plot зависимости рейтинга батончика от процента какао бобов, используя метод plot для Pandas DataFrame с аргументом kind='scatter'. Подпишите картинку.
best_bar.plot('rate', 'percent',kind='scatter', title='Зависисмость рейтинга батончика от процента какао бобов')
# Постройте "ящик с усами" для числовых распределений с помощью функции boxplot. Возпользуйтесь также функцией describe() для каждого распределения.
# Рейтинг
best_bar.boxplot(column='rate')
best_bar.rate.describe()
# Процент какао бобов
best_bar.boxplot(column='percent')
best_bar.percent.describe()
# ## Категориальные атрибуты
# Обратим внимание на категориальные признаки. Построить гистограмму распределения не удасться, но в качестве начального анализа можно воспользоваться функцией value_counts, которая позволяет вывести количество встречающихся значений в атрибуте данных. Обратите внимание внимание на места происхождения бобов, которые редко встречаются в наборе данных. Для вывода начальных и конечных элементов используются функции head() и tail() соответственно.
# Выводим количество повторов каждой страны
value_borig = best_bar.bean_orig.value_counts()
value_borig
# После того, как выведено количество уникальных значений можно представить их визуальное распределение с помощью функции plot(kind='bar').
# Выводим график распредления для данных, полученных выше
value_borig.plot(kind='bar',figsize=(20,10))
# По гистограмме распределения категориальных атрибутов о происхождения какао бобов можно увидеть, что присутствует хаос в данных. Для некоторых объектов названия мест происхождения написаны с ошибками, у некоторых объектов вообще отсутствует место происхождения, а так же есть батончики, состоящие из какао бобов из разных мест происхождения. В этой связи необходимо произвести предобработку данных для дальнейшего анализа.
# Для начала убрать объекты с пропущенными записями, используя функцию dropna. Необходимо отметить, что в качестве параметров функции передается 'axis', где 0 - удалять по строкам (объектам), 1 - удалять по столбцам, 'how', где 'any' - удалять строку или столбец, если есть пустой объект, 'all' - удалять строку или столбец, если все объекты пустые.
# Убираем объекты с пропущенными записями
delete = best_bar.dropna(axis=0)
# Заметим, что пустые записи из атрибута происхождения бобов не удалилсь. Значит "пустые" записи не являются пустыми. Выведите все уникальные значения атрибута 'bean_orig' с помощью функции unique() и найдите значение пустой записи.
# Выводим уникальные значения атрибута 'bean_orig'
delete['bean_orig'].unique()
# Находим значение пустой записи '\xa0'
# Для того, чтобы исключить данный элемент, необходимо использовать слудующую конструкцию: best_bar['bean_orig'] != 'element value', которая вернет бинарный массив элементов, где True - атрибут объекта не принимает значение 'element value', False - атрибут объекта принимает значение 'element value'. Затем полученную бинарную матрицу передаем в качестве маски, по которой будут выбраны объекты из нашего массива данных.
# Заводим переменную, в которую помещаем массив, состоящий из True и False
matrix = delete['bean_orig'] != '\xa0'
matrix.value_counts()
# Передаем нашу маску, по которой будут выбраны объекты без пустых записей
q = delete[matrix]
q
print(q.shape)
# Помимо этого необходимо решить проблему с батончиками из разных какао бобов. Лучшим способом является обработка и разделение этих составных бобов. Но на данном этапе, в виду их малого вклада, мы уберем их и сделаем одним видом сложного состава 'complex'. Будем считать редковстречающимися, если в выборке их не более 5.
# Создаем список, в который записываем название стран, повторяющихся не более 5 раз
stroka = []
k = q['bean_orig'].value_counts()
for i in range(k.count()):
if (k[i] <= 5):
stroka.append(k.index[i])
stroka
# С помощью созданного выше списка меняем полученные страны на 'complex'
for i in range(len(stroka)):
q.loc[q['bean_orig'] == stroka[i], 'bean_orig'] = 'complex'
q
q['bean_orig'].value_counts()
# Постройте круговую гистограмму распределения для стран происзводителей какао бобов с помощью функции pie().
# Строим круговую гистограмму для стран происзводителей какао бобов
q['bean_orig'].value_counts().plot.pie(figsize=(20,15))
# ## Попарные графики распределений
from sklearn import preprocessing
from seaborn import pairplot
import seaborn as sns
# Построить попарные распределения для всех признаков.
# Строим попарные распределения для всех признаков
pairplot(q, vars = ['company_loc', 'rate', 'percent', 'bean_orig'])
# Определите, где растут лучшие какао бобы, вычислив средний значение и медиану рейтинга батончика для каждого места происхождения. Напишите первую тройку лучших в обоих случаях.
q.groupby(['bean_orig'])['rate'].mean()q.groupby(['bean_orig'])['rate'].mean()
q.groupby(['bean_orig'])['rate'].mean().sort_values(ascending=False).head(3)
# Вычислив среднее значение, можем сделать вывод, что лучшие какао бобы - Гаити, Гондурас, Конго
q.groupby(['bean_orig'])['rate'].median()
q.groupby(['bean_orig'])['rate'].median().sort_values(ascending=False).head(3)
Вычислив медиану, можем сделать вывод, что лучшие какао бобы - Гондурас, Сан-Томе и Принсипи, Вьетнам
# Определите, где производят лучшие батончики, вычислив средний значение и медиану рейтинга батончика для каждого места происхождения.
q.groupby(['company_loc'])['rate'].mean()
q.groupby(['company_loc'])['rate'].mean().sort_values(ascending=False).head(3)
# Вычислив среднее значение, можем сделать вывод, что лучшие батончики производят в Чили, Амстердаме, Нидерландах
q.groupby(['company_loc'])['rate'].median()
q.groupby(['company_loc'])['rate'].median().sort_values(ascending=False).head(3)
# Вычислив медиану, можем сделать вывод, что лучшие батончики производят в Чили, Амстердаме, Канаде
| 8,627 |
/DZnetology/Вебинар по теме Основы Python/Diplomnaya_rabota_po_python.ipynb | 0e2f3ca5f0e6c507767c79e0b40e0a94a3ec66de | [] | no_license | Bohram/test | https://github.com/Bohram/test | 2 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 229,011 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CSE-221710305047-SAI_D_VETUKURI
# Use the following link for solving the given assignment Questions
#
# https://raw.githubusercontent.com/kotagiri-preeti/Summer-Internship-AI-and-ML/master/german_credit_data.csv
#
import pandas as pd
import numpy as np
df=pd.read_csv("https://raw.githubusercontent.com/kotagiri-preeti/Summer-Internship-AI-and-ML/master/german_credit_data.csv")
df
# If you find Unnamed column in the data remove that by filtering
#
df.drop(['Unnamed: 0'],axis=1,inplace=True)# removing the column which is Unnamed
df
# Display the top seven rows and last Three rows
#
df.head(n=7)# to get top 7 rows
df.tail(n=3)# to get last 3 rows
# Check the total number of entries you have
#
print("The total number of rows for the data is",df.shape[0]) # to get the total number of rows
# Check the No of features,Print features Names(Column Names)
print("The number of features in the data is",df.shape[1])# to get the total no. of columns
print("The Feature for the data is:")
for i in df.columns: # to get all the columns from the data
print(i)
# Print the row indices of the dataframe
#
print(list(df.index)) # print the row indices of the dataframe
# Check the dtype of each and every columns
d=df.dtypes
d
# Description for Numerical Column min,max,count,25%,50%,75%,std
df.describe(include=[np.number]) # to describe the numerical columns
# Description for the categorical Columns count,top,freq
#
df.describe(include=[np.object])
# Missing value count column wise
df.isnull().sum()
# Check the output for the info function
df.info()
# Access particular rows and columns using iloc,loc
df.iloc[0:3] # access the rows using iloc
df.iloc[:,0:4] # access the columns using iloc
df.loc[0:5] # access the rows using loc
df.loc[:,'Age'] # access the columns using loc
# How many unique values are there in a particular column
for i in df.columns:
print(i,len(df[i].unique()))
е две самые скоррелированные и две наименеескоррелированные переменные. - 10 баллов
import matplotlib.pyplot as plt
import seaborn as sb
sb.set(rc={'figure.figsize':(15,5)})
sb.heatmap(hr_df.corr(), annot=True)
# Данная матрица отражает совокупность корреляций. Сделаем на ее примере выводы о взаимосвязях следующих переменных:
#
# Обратная зависимость между удовлетворенностью работой и увольнениями.
# Прямая зависимость между средним отработанным временем и последней оценкой.
# Зависимость между № проекта и последней оценкой, а также номером проекта и средним отработанным временем.
# 4) Рассчитайте сколько сотрудников работает в каждом департаменте - 5 баллов
number_employees=hr_df['department'].value_counts()
number_employees
department = hr_df.groupby(['department']).agg({'number_project':['count']})
department.columns = ['people']
department = department.sort_values('people',ascending = True)
plt.barh(names_dep.index, names_dep['people'])
plt.xlabel('Кол-во сотрудников')
plt.ylabel('Департамент')
plt.title('Кол-во сотрудников в каждом департаменте')
plt.grid()
# 5) Показать распределение сотрудников по зарплатам - 5 баллов
hr_df['salary'].value_counts()
salary_levels = hr_df['salary'].value_counts().reset_index()
plt.bar(salary_levels['index'], salary_levels['salary'])
plt.xlabel('Уровень зарплаты')
plt.ylabel('Кол-во сотрудников')
plt.title('Распределение сотрудников по зарплатам')
plt.grid()
# 6) Показать распределение сотрудников по зарплатам в каждом департаменте по отдельности - 5 баллов
deportation_staf_salary = hr_df.groupby(['department', 'salary']).agg({'salary':['count']})
deportation_staf_salary
plt.figure(figsize=(15, 5))
sb.countplot(x = 'department', hue = 'salary', data = hr_df)
plt.title('Распределение сотрудников по зарплатам и отделам')
plt.xlabel('Уровень зарплаты')
plt.ylabel('Количество сотрудников')
# 7) Проверить гипотезу, что сотрудники с высоким окладом проводят на работе больше времени, чем сотрудники с низким окладом - 10 баллов
# H0 - Сотрудники с высокими и низким окладом работают одинково
#
# H1 - Cотрудники с высоким окладом проводят на работе больше времени, чем сотрудники с низким окладом
df_high=hr_df[(hr_df['salary']=='high')]
df_low=hr_df[(hr_df['salary']=='low')]
from scipy import stats as st
df_high_a = hr_df[hr_df['salary']=='high']
df_high_a.mean()
df_low_a = hr_df[hr_df['salary']=='low']
df_low_a.mean()
a = 0.05
result = st.ttest_ind(df_high['average_montly_hours'], df_low['average_montly_hours'], equal_var=False)
print(result)
if (result.pvalue < a):
print('Отвергаем H0, Cотрудники с высоким окладом проводят на работе больше времени, чем сотрудники с низким окладом')
else:
print('Не отвергаем H0, Сотрудники с высокими и низким окладом работают одинково')
hr_df.groupby(['salary']).agg({'average_montly_hours':['mean']})
data = hr_df[['department','salary','average_montly_hours']]
data.average_montly_hours.hist()
plt.xlabel('Средние кол-во часов в месяц')
plt.ylabel('Кол-во Сотрудников')
plt.title('Распределение рабочего времени')
# 8) Рассчитать следующие показатели среди уволившихся и неуволившихся сотрудников (по отдельности): - 10 баллов
#
# Доля сотрудников с повышением за последние 5 лет
# Средняя степень удовлетворенности
# Среднее количество проектов
# Сформируем таблицу с уволившимися сотрудниками
employees_quit = hr_df[hr_df['left'] == 1]
employees_quit.head()
employees_quit.describe()
#Доля сотрудников с повышением за последние 5 лет
employees_quit.loc[employees_quit['promotion_last_5years'] == 1].count()[0]/employees_quit.count()[0]
print(f' Средняя степень удовлетворенности {employees_quit.satisfaction_level.mean()}')
print(f' Среднее количество проектов {employees_quit.number_project.mean()}')
# +
# Сформируем таблицу по оставшимся в компании сотрудникам
stayed_work = hr_df[hr_df['left'] == 0]
stayed_work.head()
# -
stayed_work.describe()
#Доля сотрудников с повышением за последние 5 лет
stayed_work.loc[stayed_work['promotion_last_5years'] == 1].count()[0]/stayed_work.count()[0]
print(f' Средняя степень удовлетворенности {stayed_work.satisfaction_level.mean()}')
print(f' Среднее количество проектов {stayed_work.number_project.mean()}')
# Итог:
# Доля сотрудников с повышением за последние 5 лет \
# 0.0053 - уволившиеся сотрудники\
# 0.0260 - оставшиеся на работе\
# Средняя степень удовлетворенности\
# 0.44 - уволившиеся сотрудники\
# 0.66 - оставшиеся на работе\
# Среднее количество проектов\
# 3.85 - уволившиеся сотрудники\
# 3.78 - оставшиеся на работе\
#
# Сотрудники которые удовлетворены своей работой, чаще получают свою повышение, хотя проектов выполяют практически так же.
# Тут надо разбираться, может они удовлетворены своей работой потому, что повышали.
# 9) Разделить данные на тестовую и обучающую выборкиПостроить модель LDA, предсказывающую уволился лисотрудник на основе имеющихся факторов (кроме department иsalary)Оценить качество модели на тестовой выборки - 20 баллов
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
hr_df_lda=hr_df[['satisfaction_level','last_evaluation','number_project','average_montly_hours','time_spend_company','Work_accident','left','promotion_last_5years']]
hr_df_lda.head()
data_train, data_test = train_test_split(hr_df_lda, test_size=0.2)
y_train, y_test = data_train['left'], data_test['left']
del data_train['left']
del data_test['left']
data_train
lda = LinearDiscriminantAnalysis()
lda.fit(data_train, y_train)
result = pd.DataFrame([y_test.values, lda.predict(data_test)]).T
result
y_true = result[0]
y_pred = result[1]
confusion_matrix(y_true, y_pred)
# Значительная погрешность модели. Нужно разбираться.
# 10) Загрузить jupyter notebook с решение на github и прислать ссылку - 5 баллов
# ок)
| 8,084 |
/ML-Week2/Lecture5 python data visualization/Seaborn/.ipynb_checkpoints/Grids-checkpoint.ipynb | 5161c0fb50974ca614f3ee78157d50bef838ad67 | [] | no_license | Napat/sidtechtalent | https://github.com/Napat/sidtechtalent | 0 | 0 | null | 2017-09-17T13:56:07 | 2017-09-17T07:05:16 | null | Jupyter Notebook | false | false | .py | 601,357 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import matplotlib.pyplot as plt
# %matplotlib inline
from cipher.keyword_cipher import *
from cipher.vigenere import *
from support.utilities import *
from support.text_prettify import *
c6a = sanitise(open('6a.ciphertext').read())
c6b = sanitise(open('6b.ciphertext').read())
# -
(key_a_word, key_a_wrap), score = keyword_break_mp(c6a)
(key_a_word, key_a_wrap), score
print(keyword_decipher(c6a, key_a_word, wrap_alphabet=key_a_wrap))
c6a
translations = {'c': 'H', 'r': 'A', 'd': 'R', 'p': 'Y', 'i': 'C', 'd': 'R', 'v': 'L', 's': 'I', 'f': 'E',
'h': 'T', 'a': 'B', 'g': 'S', 'm': 'W', 'y': 'O', 'z': 'P', 'n': 'G', 'j': 'U', 't': 'J',
'x': 'N', 'k': 'V', 'l': 'D', 'w': 'M', 'u': 'K', 'e': 'F', 'q': 'Z', 'o': 'X'}
translation_table = ''.maketrans(translations)
plaintext = ' '.join(segment(c6a.translate(translation_table)))
plaintext
''.join(translations[l] for l in sorted(translations))
inverted_translations = {translations[a]: a for a in translations}
''.join(inverted_translations[l] for l in sorted(inverted_translations))
keyword_cipher_alphabet_of('railfences', wrap_alphabet=KeywordWrapAlphabet.from_last)
print(' '.join(segment(keyword_decipher(c6a, 'railfences', wrap_alphabet=KeywordWrapAlphabet.from_last))))
key_b, score = vigenere_frequency_break(c6b)
key_b, score
print(' '.join(segment(sanitise(vigenere_decipher(sanitise(c6b), key_b)))))
simulated_annealing_break(c6a, fitness=Ptrigrams)
keyword_decipher(c6a, 'railfencstuvwxyzbdghjkmopq')
each node $i \in N$, and you shouldn't need any other variables.
# #### Solution:
#
# $$\max \Sigma_i^N x_i$$
# restrict to $$x_i \in \{0,1\}, \forall i\in N$$
# $$x_i+x_j\leq 1,\forall (i,j)\in E$$
# ### Problem 2
#
# Implement a function that solves the integer program given a graph as input.
def independent_set_ip(graph):
"""Computes a maximum independent set of a graph using an integer program.
Args:
- graph (nx.Graph): an undirected graph
Returns:
(list[(int, int)]) The IP solution as a list of node-value pairs.
"""
# TODO: implement function
x = pulp.LpVariable.dicts("node",graph.nodes(),0,1, pulp.LpInteger)
prob = pulp.LpProblem("ISP",pulp.LpMaximize)
prob += pulp.lpSum(x[node] for node in graph.nodes())
for (node1, node2) in graph.edges():
prob += (x[node1]+x[node2]) <= 1
prob.solve()
solution = []
for node in graph.nodes():
solution.append((node,int(pulp.value(x[node]))))
return solution
# The following code outputs the size of the sets computed by your function.
def set_weight(solution):
"""Computes the total weight of the solution of an LP or IP for independent set.
Args:
- solution (list[int, float]): the LP or IP solution
Returns:
(float) Total weight of the solution
"""
return sum(value for (node, value) in solution)
karate_ind_set = independent_set_ip(karate)
print "Size of karate set = ", set_weight(karate_ind_set)
power_ind_set = independent_set_ip(power)
print "Size of power set = ", set_weight(power_ind_set)
# ### Problem 3
#
# Take the *linear programming relaxation* of your integer program and implement a function to solve it. This simply means that in your integer program, you should replace each constraint $x_i \in \{0,1\}$ with $0 \leq x_i \leq 1$.
def independent_set_lp(graph):
"""Computes the solution to the linear programming relaxation for the
maximum independent set problem.
Args:
- graph (nx.Graph): an undirected graph
Returns:
(list[(int, float)]) The LP solution as a list of node-value pairs.
"""
# TODO: implement function
x = pulp.LpVariable.dicts("node",graph.nodes(),0,1, pulp.LpContinuous)
prob = pulp.LpProblem("ISP",pulp.LpMaximize)
prob += pulp.lpSum(x[node] for node in graph.nodes())
for (node1, node2) in graph.edges():
prob += (x[node1]+x[node2]) <= 1
prob.solve()
solution = []
for node in graph.nodes():
solution.append((node, pulp.value(x[node])))
return solution
# Let's see how the LP solutions compare to those of the IP.
karate_ind_set_relax = independent_set_lp(karate)
print "Value of karate set = ", set_weight(karate_ind_set_relax)
power_ind_set_relax = independent_set_lp(power)
print "Value of power set = ", set_weight(power_ind_set_relax)
# A heuristic way to convert a fractional solution to an independent set is as follows. For each node $i$, include the node in the set if $x_i > 1/2$, and discard it otherwise. This will yield a set of $a$ nodes which have $b$ edges between them. By removing at most one node for each edge, this yields an independent set of size at least $a - b$.
# Implement this rounding procedure.
def round_solution(solution, graph):
"""Finds the subgraph corresponding to the rounding of
a solution to the independent set LP relaxation.
Args:
- solution (list[(int, float)]): LP solution
- graph (nx.Graph): the original graph
Returns:
(nx.Graph) The subgraph corresponding to rounded solution
"""
# TODO: implement function
graph = graph.copy()
for (node, value) in solution:
if value <= 0.5:
graph.remove_node(node)
return graph
# The following function assesses the quality of the heuristic approach.
def solution_quality(rounded, optimal):
"""Computes the percent optimality of the rounded solution.
Args:
- rounded (nx.Graph): the graph obtained from rounded LP solution
- optimal: size of maximum independent set
"""
num_nodes = rounded.number_of_nodes() - rounded.number_of_edges()
return float(num_nodes) / optimal
# Let's check the quality of this approach compared to the optimal IP solutions.
# +
karate_rounded = round_solution(karate_ind_set_relax, karate)
karate_quality = solution_quality(karate_rounded, set_weight(karate_ind_set))
print "Quality of karate rounded solution = {:.0f}%".format(karate_quality*100)
power_rounded = round_solution(power_ind_set_relax, power)
power_quality = solution_quality(power_rounded, set_weight(power_ind_set))
print "Quality of power rounded solution = {:.0f}%".format(power_quality*100)
# -
| 6,823 |
/nagao3/alatcala.ipynb | 83ea315c15ac36c5ca387b94f54c27c1db98347e | [] | no_license | santoshkumarradha/NagaO2-MoO3 | https://github.com/santoshkumarradha/NagaO2-MoO3 | 2 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 443,206 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from lmf import lmf #load the lmf calculator
import numpy as np
from ase.io import read
from ase.eos import calculate_eos
from ase.io.trajectory import Trajectory
import matplotlib.pyplot as plt
from ase.eos import EquationOfState as eos
from ase.units import Bohr,Rydberg
from ase.io.trajectory import TrajectoryReader as trread
from ase.io.trajectory import TrajectoryReader as trread
import matplotlib.pyplot as plt
plt.plot(eos.getplotdata()[4],eos.getplotdata()[5])
plt.scatter(eos.getplotdata()[6],eos.getplotdata()[7])
from ase.eos import EquationOfState as eos
from ase.units import Bohr,Rydberg
from ase.io.trajectory import TrajectoryReader as trread
def plot_v(v,e,ax,label,c):
data=eos(v,e,eos='birchmurnaghan').getplotdata()
ax.plot(data[4],data[5],c=c,label=label)
ax.scatter(data[6],data[7],alpha=0.5,s=20,c=c,marker="o")
# +
fig,ax=plt.subplots()
v=np.array([1418.665249,1461.652225,1505.498912,1335.236383])* Bohr**3 * 0.25
e=np.array([-18021.3517579,-18021.3420203,-18021.3225567,-18021.3367162])* 0.25 * Rydberg
plot_v(v,e,ax,label="Pn21 LDA",c="k")
ax.legend()
from ase.io.trajectory import Trajectory
traj = Trajectory('nagao2-r3m-lda.traj')
v=[i.get_volume() for i in traj]
e=[i.get_total_energy() * Rydberg for i in traj]
plot_v(v,e,ax,label="r3m LDA",c="r")
def murnaghan(V, E0, B0, BP, V0):
'From PRB 28,5480 (1983'
E = E0 + B0 * V / BP * (((V0 / V)**BP) / (BP - 1) + 1) - V0 * B0 / (BP - 1)
return E
data=eos(v,e,eos='murnaghan')
data.fit()
V=np.linspace(38,43,100)
E=murnaghan(V,data.eos_parameters[0],data.eos_parameters[1],data.eos_parameters[2],data.eos_parameters[3])
ax.plot(V,E,c="r")
ax.legend()
# +
from ase.io.trajectory import Trajectory
def plot_v(v,e,ax,label,c,sub=0):
data=eos(v,e-sub,eos='birchmurnaghan').getplotdata()
ax.plot(data[4],data[5],c=c,label=label)
ax.scatter(data[6],data[7],alpha=0.5,s=20,c=c,marker="o")
fig,ax=plt.subplots()
traj = Trajectory('nagao2-r3m-gga.traj')
v=[i.get_volume() for i in traj]
e1=np.array([i.get_total_energy() * Rydberg for i in traj])
plot_v(v,e1,ax,label="r-3m GGA",c="r")
eos_data=eos(v,e1,eos='murnaghan')
eos_data.fit()
V=np.linspace(40,47,100)
E=murnaghan(V,eos_data.eos_parameters[0],eos_data.eos_parameters[1],eos_data.eos_parameters[2],eos_data.eos_parameters[3])
ax.plot(V,E,c="r")
ax.legend()
from ase.io.trajectory import Trajectory
traj = Trajectory('nagao2-p2n1_gga.traj')
v=[i.get_volume()*0.25 for i in traj]
e2=np.array([i.get_total_energy() * Rydberg *0.25 for i in traj])
plot_v(v,e2,ax,label="Pn21 GGA",c="k")
eos_data=eos(v,e2,eos='murnaghan')
eos_data.fit()
V=np.linspace(52,59,100)
E=murnaghan(V,eos_data.eos_parameters[0],eos_data.eos_parameters[1],eos_data.eos_parameters[2],eos_data.eos_parameters[3])
ax.plot(V,E,c="k")
ax.legend()
# -
# +
def plot_gga(ax):
traj = Trajectory('nagao2-r3m-gga.traj')
v=[i.get_volume() for i in traj]
e1=np.array([i.get_total_energy() * Rydberg for i in traj])
eos_data=eos(v,e1,eos='murnaghan')
eos_data.fit()
V=np.linspace(35,54,100)
E=murnaghan(V,eos_data.eos_parameters[0],eos_data.eos_parameters[1],eos_data.eos_parameters[2],eos_data.eos_parameters[3])
r3m_e=E;r3m_v=V
traj = Trajectory('nagao2-p2n1_gga.traj')
v=[i.get_volume()*0.25 for i in traj]
e1=np.array([i.get_total_energy() * Rydberg*0.25 for i in traj])
eos_data=eos(v,e1,eos='murnaghan')
eos_data.fit()
V=np.linspace(45,65,100)
E=murnaghan(V,eos_data.eos_parameters[0],eos_data.eos_parameters[1],eos_data.eos_parameters[2],eos_data.eos_parameters[3])
p2n1_e=E;p2n1_v=V
minimum=np.min([r3m_e,p2n1_e])
c="k";label="R$\\bar{3}$m"
ax.plot(r3m_v,r3m_e-minimum,c=c,label=label)
c="r";label="Pna$2_1$"
ax.plot(p2n1_v,p2n1_e-minimum,c=c,label=label)
ax.legend()
# +
fig,ax1=plt.subplots(1,2,figsize=(8,4))
ax=ax1[0]
ax.set_title("LDA")
import pickle
with open('data_r3m.pickle', 'rb') as handle:
data = pickle.load(handle)
i='r-3m-lda'
v=data[i][0]
e=data[i][1]
c="k";label="R$\\bar{3}$m"
eos_data=eos(v,e,eos='birchmurnaghan').getplotdata()
# ax.plot(eos_data[4],eos_data[5],c=c,label=label)
# ax.scatter(eos_data[6],eos_data[7],alpha=0.5,s=20,c=c,marker="o")
def murnaghan(V, E0, B0, BP, V0):
'From PRB 28,5480 (1983'
E = E0 + B0 * V / BP * (((V0 / V)**BP) / (BP - 1) + 1) - V0 * B0 / (BP - 1)
return E
eos_data=eos(v,e,eos='murnaghan')
eos_data.fit()
V=np.linspace(34,52,100)
E=murnaghan(V,eos_data.eos_parameters[0],eos_data.eos_parameters[1],eos_data.eos_parameters[2],eos_data.eos_parameters[3])
minimum=np.min(E)
ax.plot(V,E-minimum,c=c,label=label)
with open('data_pn21.pickle', 'rb') as handle:
data = pickle.load(handle)
i='pn21-lda'
v=np.array(data[i][0])*0.25
e=np.array(data[i][1])*0.25
c="r";label="Pna$2_1$"
eos_data=eos(v,e,eos='birchmurnaghan').getplotdata()
n=8
ax.plot(eos_data[4][n:],eos_data[5][n:]-minimum,c=c,label=label)
# ax.scatter(eos_data[6],eos_data[7],alpha=0.5,s=20,c=c,marker="o")
ax.legend()
ax.set_xlim(32,65)
ax.set_ylim(-.1,1.2)
ax=ax1[1]
ax.set_title("PBE")
plot_gga(ax)
ax.set_ylim(-.1,1.2)
ax.set_xlim(35,65)
for i in ax1:
i.set_ylabel("Energy (eV)")
i.set_xlabel("Volume (A$^{o3}$)")
i.grid()
plt.tight_layout()
plt.savefig("E-V.png",dpi=300)
# +
def plot_gga(ax,ax1):
def murnaghan(V, E0, B0, BP, V0):
'From PRB 28,5480 (1983'
E = E0 + B0 * V / BP * (((V0 / V)**BP) / (BP - 1) + 1) - V0 * B0 / (BP - 1)
return E
traj = Trajectory('nagao2-r3m-gga.traj')
v=[i.get_volume() for i in traj]
e1=np.array([i.get_total_energy() * Rydberg for i in traj])
eos_data=eos(v,e1,eos='murnaghan')
eos_data.fit()
V=np.linspace(35,54,100)
E=murnaghan(V,eos_data.eos_parameters[0],eos_data.eos_parameters[1],eos_data.eos_parameters[2],eos_data.eos_parameters[3])
r3m_e=E;r3m_v=V
traj = Trajectory('nagao2-p2n1_gga.traj')
v=[i.get_volume()*0.25 for i in traj]
e1=np.array([i.get_total_energy() * Rydberg*0.25 for i in traj])
eos_data=eos(v,e1,eos='murnaghan')
eos_data.fit()
V=np.linspace(45,65,100)
E=murnaghan(V,eos_data.eos_parameters[0],eos_data.eos_parameters[1],eos_data.eos_parameters[2],eos_data.eos_parameters[3])
p2n1_e=E;p2n1_v=V
minimum=np.min([r3m_e,p2n1_e])
c="k";label="R$\\bar{3}$m"
ax.plot(r3m_v,r3m_e-minimum,c=c,label=label)
c="r";label="Pna$2_1$"
ax.plot(p2n1_v,p2n1_e-minimum,c=c,label=label)
ax.legend()
c="k";label="R$\\bar{3}$m"
p=-1*np.gradient(r3m_e,np.diff(r3m_v)[0])
h=r3m_e-p*r3m_v
h1=h
p1=p
ax1.plot(p,h,c=c,label=label)
c="r";label="Pna$2_1$"
p=-1*np.gradient(p2n1_e,np.diff(p2n1_v)[0])
h=p2n1_e-p*p2n1_v
h2=h
p2=p
ax1.plot(p,h,c=c,label=label)
ax1.legend()
return h1,h2,p1,p2
fig,ax=plt.subplots(1,2)
h1,h2,p1,p2=plot_gga(ax[0],ax[1])
plt.tight_layout()
# -
p=np.linspace(-.1,.15,200)
h1p = np.poly1d(np.polyfit(p1, h1, 4))
h2p = np.poly1d(np.polyfit(p2, h2, 4))
plt.plot(p,h1p(p)-h2p(p))
plt.axhline(0,c="k")
plt.axvline(-.009,c="k")
plt.xlim(-.05,.05)
-.009*160.2
# +
def plot_gga(ax,ax1):
def murnaghan(V, E0, B0, BP, V0):
'From PRB 28,5480 (1983)'
E = E0 + B0 * V / BP * (((V0 / V)**BP) / (BP - 1) + 1) - V0 * B0 / (BP - 1)
return E
traj = Trajectory('nagao2-r3m-gga.traj')
v=[i.get_volume() for i in traj]
e1=np.array([i.get_total_energy() * Rydberg for i in traj])
eos_data=eos(v,e1,eos='murnaghan')
eos_data.fit()
V=np.linspace(35,54,400)
E=murnaghan(V,eos_data.eos_parameters[0],eos_data.eos_parameters[1],eos_data.eos_parameters[2],eos_data.eos_parameters[3])
r3m_e=E;r3m_v=V
traj = Trajectory('nagao2-p2n1_gga.traj')
v=[i.get_volume()*0.25 for i in traj]
e1=np.array([i.get_total_energy() * Rydberg*0.25 for i in traj])
eos_data=eos(v,e1,eos='murnaghan')
eos_data.fit()
V=np.linspace(45,65,400)
E=murnaghan(V,eos_data.eos_parameters[0],eos_data.eos_parameters[1],eos_data.eos_parameters[2],eos_data.eos_parameters[3])
p2n1_e=E;p2n1_v=V
minimum=np.min([r3m_e,p2n1_e])
c="k";label="R$\\bar{3}$m"
ax.plot(r3m_v,r3m_e-minimum,c=c,label=label)
c="r";label="Pna$2_1$"
ax.plot(p2n1_v,p2n1_e-minimum,c=c,label=label)
ax.legend()
c="k";label="R$\\bar{3}$m"
p=-1*np.gradient(r3m_e,np.diff(r3m_v)[0])
h=r3m_e-p*r3m_v
h1=h
p1=p
ax1.plot(p,h,c=c,label=label)
c="r";label="Pna$2_1$"
p=-1*np.gradient(p2n1_e,np.diff(p2n1_v)[0])
h=p2n1_e-p*p2n1_v
h2=h
p2=p
ax1.plot(p,h,c=c,label=label)
ax1.legend()
return h1,h2,p1,p2,p2n1_e,p2n1_v,r3m_e,r3m_v
fig,ax=plt.subplots(1,3,figsize=(15,5))
h1,h2,p1,p2,p2n1_e,p2n1_v,r3m_e,r3m_v=plot_gga(ax[0],ax[1])
ax[0].set_xlabel("Vol (A$^3$)")
ax[0].set_ylabel("Energy (eV)")
ax[1].set_xlabel("Presure")
ax[1].set_ylabel("Enthalpy")
ax[1].set_xlim(-.05,.05)
ax[2].plot(p2n1_v,p1,c="r")
ax[2].plot(r3m_v,p2,c="k")
ax[2].axhline(0,c="b")
ax[2].set_ylabel("Presure")
ax[2].set_xlabel("Volume")
plt.tight_layout()
# -
traj = Trajectory('nagao2-p2n1_gga.traj')
v=[i.get_volume()*0.25 for i in traj]
e1=np.array([i.get_total_energy() * Rydberg*0.25 for i in traj])
eos_data=eos(v,e1,eos='murnaghan')
eos_data.fit()
eos_data.eos_parameters
p=np.linspace(-.1,.15,200)
h1p = np.poly1d(np.polyfit(p1, h1, 4))
h2p = np.poly1d(np.polyfit(p2, h2, 4))
plt.plot(p,h1p(p)-h2p(p),c="r")
plt.axhline(0)
# h1,h2,p1,p2,p2n1_e,p2n1_v,r3m_e,r3m_v
print("pres ene vol ent")
for i,j in enumerate(p):
print("{:3f} {:3f} {:3f} {:3f}".format(p[i],p2n1_e[i],p2n1_v[i],h1[i]))
y=np.gradient(r3m_e,np.diff(r3m_v)[0])
plt.plot(r3m_v,y,c="r")
y=np.gradient(p2n1_e,np.diff(p2n1_v)[0])
plt.plot(r3m_v,y,c="k")
plt.axvline(42)
#r3m_e,r3m_v
| 10,080 |
/_posts/python-v3/fundamentals/colorscales/colorscales.ipynb | da2b4961209837ad2b0694ee0020765d64edc2a4 | [
"CC-BY-3.0"
] | permissive | plotly/graphing-library-docs | https://github.com/plotly/graphing-library-docs | 54 | 82 | NOASSERTION | 2023-09-02T23:12:54 | 2023-06-10T20:45:50 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 17,085 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### New to Plotly?
# Plotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/).
# <br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online).
# <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
# #### Version Check
# Plotly's python package is updated frequently. Run `pip install plotly --upgrade` to use the latest version.
import plotly
plotly.__version__
# ### Custom Discretized Heatmap Colorscale
# +
import plotly.plotly as py
py.iplot([{
'z': [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
],
'type': 'heatmap',
'colorscale': [
# Let first 10% (0.1) of the values have color rgb(0, 0, 0)
[0, 'rgb(0, 0, 0)'],
[0.1, 'rgb(0, 0, 0)'],
# Let values between 10-20% of the min and max of z
# have color rgb(20, 20, 20)
[0.1, 'rgb(20, 20, 20)'],
[0.2, 'rgb(20, 20, 20)'],
# Values between 20-30% of the min and max of z
# have color rgb(40, 40, 40)
[0.2, 'rgb(40, 40, 40)'],
[0.3, 'rgb(40, 40, 40)'],
[0.3, 'rgb(60, 60, 60)'],
[0.4, 'rgb(60, 60, 60)'],
[0.4, 'rgb(80, 80, 80)'],
[0.5, 'rgb(80, 80, 80)'],
[0.5, 'rgb(100, 100, 100)'],
[0.6, 'rgb(100, 100, 100)'],
[0.6, 'rgb(120, 120, 120)'],
[0.7, 'rgb(120, 120, 120)'],
[0.7, 'rgb(140, 140, 140)'],
[0.8, 'rgb(140, 140, 140)'],
[0.8, 'rgb(160, 160, 160)'],
[0.9, 'rgb(160, 160, 160)'],
[0.9, 'rgb(180, 180, 180)'],
[1.0, 'rgb(180, 180, 180)']
],
'colorbar': {
'tick0': 0,
'dtick': 1
}
}], filename='heatmap-discrete-colorscale')
# -
# ### Colorscale for Scatter Plots
# +
import plotly.plotly as py
import plotly.graph_objs as go
data = [
go.Scatter(
y=[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
marker=dict(
size=16,
cmax=39,
cmin=0,
color=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
colorbar=dict(
title='Colorbar'
),
colorscale='Viridis'
),
mode='markers')
]
fig = go.Figure(data=data)
py.iplot(fig)
# -
# ### Colorscale for Contour Plot
# +
import plotly.plotly as py
import plotly.graph_objs as go
data = [
go.Contour(
z=[[10, 10.625, 12.5, 15.625, 20],
[5.625, 6.25, 8.125, 11.25, 15.625],
[2.5, 3.125, 5., 8.125, 12.5],
[0.625, 1.25, 3.125, 6.25, 10.625],
[0, 0.625, 2.5, 5.625, 10]],
colorscale='Jet',
)
]
py.iplot(data, filename='simple-colorscales-colorscale')
# -
# ### Custom Heatmap Colorscale
# +
import plotly.plotly as py
import plotly.graph_objs as go
import six.moves.urllib
import json
response = six.moves.urllib.request.urlopen('https://raw.githubusercontent.com/plotly/datasets/master/custom_heatmap_colorscale.json')
dataset = json.load(response)
data = [
go.Heatmap(
z=dataset['z'],
colorscale=[[0.0, 'rgb(165,0,38)'], [0.1111111111111111, 'rgb(215,48,39)'], [0.2222222222222222, 'rgb(244,109,67)'], [0.3333333333333333, 'rgb(253,174,97)'], [0.4444444444444444, 'rgb(254,224,144)'], [0.5555555555555556, 'rgb(224,243,248)'], [0.6666666666666666, 'rgb(171,217,233)'], [0.7777777777777778, 'rgb(116,173,209)'], [0.8888888888888888, 'rgb(69,117,180)'], [1.0, 'rgb(49,54,149)']]
)
]
py.iplot(data, filename='custom-colorscale')
# -
# ### Custom Contour Plot Colorscale
# +
import plotly.plotly as py
import plotly.graph_objs as go
data = [
go.Contour(
z=[[10, 10.625, 12.5, 15.625, 20],
[5.625, 6.25, 8.125, 11.25, 15.625],
[2.5, 3.125, 5., 8.125, 12.5],
[0.625, 1.25, 3.125, 6.25, 10.625],
[0, 0.625, 2.5, 5.625, 10]],
colorscale=[[0, 'rgb(166,206,227)'], [0.25, 'rgb(31,120,180)'], [0.45, 'rgb(178,223,138)'], [0.65, 'rgb(51,160,44)'], [0.85, 'rgb(251,154,153)'], [1, 'rgb(227,26,28)']],
)
]
py.iplot(data, filename='colorscales-custom-colorscale')
# -
# ### Custom Colorbar
# +
import plotly.plotly as py
import plotly.graph_objs as go
import six.moves.urllib
import json
response = six.moves.urllib.request.urlopen('https://raw.githubusercontent.com/plotly/datasets/master/custom_heatmap_colorscale.json')
dataset = json.load(response)
data = [
go.Heatmap(
z=dataset['z'],
colorscale=[[0.0, 'rgb(165,0,38)'], [0.1111111111111111, 'rgb(215,48,39)'], [0.2222222222222222, 'rgb(244,109,67)'],
[0.3333333333333333, 'rgb(253,174,97)'], [0.4444444444444444, 'rgb(254,224,144)'], [0.5555555555555556, 'rgb(224,243,248)'],
[0.6666666666666666, 'rgb(171,217,233)'],[0.7777777777777778, 'rgb(116,173,209)'], [0.8888888888888888, 'rgb(69,117,180)'],
[1.0, 'rgb(49,54,149)']],
colorbar = dict(
title = 'Surface Heat',
titleside = 'top',
tickmode = 'array',
tickvals = [2,50,100],
ticktext = ['Hot','Mild','Cool'],
ticks = 'outside'
)
)
]
py.iplot(data, filename='custom-colorscale-colorbar')
# -
# ### Dash Example
# [Dash](https://plotly.com/products/dash/) is an Open Source Python library which can help you convert plotly figures into a reactive, web-based application. Below is a simple example of a dashboard created using Dash. Its [source code](https://github.com/plotly/simple-example-chart-apps/tree/master/dash-colorscaleplot) can easily be deployed to a PaaS.
from IPython.display import IFrame
IFrame(src= "https://dash-simple-apps.plotly.host/dash-colorscaleplot/" ,width="100%" ,height="650px", frameBorder="0")
from IPython.display import IFrame
IFrame(src= "https://dash-simple-apps.plotly.host/dash-colorscaleplot/code" ,width="100%" ,height=500, frameBorder="0")
# ### Reference
# See https://plotly.com/python/reference/ for more information and chart attribute options!
# +
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
# ! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'colorscales.ipynb', 'python/colorscales/', 'Colorscales',
'How to set colorscales and heatmap colorscales in Python and Plotly. Divergent, sequential, and qualitative colorscales.',
title = 'Colorscales in Python | Plotly',
has_thumbnail='true', thumbnail='thumbnail/heatmap_colorscale.jpg',
language='python',
display_as='file_settings',
order=22,
ipynb= '~notebook_demo/187')
| 7,698 |
/exercise2/conv.ipynb | 427943ef8ba7543a0ce6dbb95e6571df9eca6900 | [] | no_license | StandardWobin/dl-lab-2018 | https://github.com/StandardWobin/dl-lab-2018 | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 146,685 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from time import time
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import json
import os
import datetime
from scipy import interpolate
from time import sleep
# # lenet tensorflow class build to be called from extern classes
class LeNetTensor:
def __init__(self, mnistdata):
self.mnist = mnistdata
# input_data.read_data_sets('MNIST_data', one_hot=True)
def plottrainlifesetup(self, n_epoch, title):
if float(mpl.__version__[:3]) >= 1.5:
# %matplotlib notebook
else:
# %matplotlib inline
self.fig, self.ax = plt.subplots(1, 1)
self.ax.set_title(title)
self.ax.set_xlim([0, n_epoch])
self.ax.set_ylim([0, 1])
self.ax.set_xlabel('Epoch')
self.ax.set_ylabel('Validation Accurancy')
def plottrainlifeupdate(self, dataC, csize, labels):
# stolen from ml lecture
if self.ax.lines:
for i, data in enumerate(dataC):
self.ax.lines[i].set_ydata(data)
self.ax.set_ylim([0, 1])
self.fig.canvas.draw()
else:
for i in range(0,csize):
if i <= len(dataC) -1:
self.ax.plot(dataC[i], label=labels[i])
else:
a = np.empty((len(dataC),1))
a[:] = np.nan
self.ax.plot(a, label=labels[i])
self.ax.legend(loc='upper right')
self.fig.canvas.draw()
def validation_accuracy(self, loaded_mnist, accuracy, x, y_, batch_size=64):
""" A function that calculates the accuracy on the validation data."""
batch_num = int(loaded_mnist.test.num_examples / batch_size)
test_accuracy = 0
for i in range(batch_num):
batch = loaded_mnist.test.next_batch(batch_size)
test_accuracy += accuracy.eval(feed_dict={x: batch[0],
y_: batch[1]})
test_accuracy /= batch_num
return test_accuracy
def convNN(self, x, dev, num_filters, biasvalue, filtersize):
"""
actual implementation of cnn as mentionend in task
"""
# reshape image for input layer
with tf.device(dev), tf.name_scope('input'):
input_layer = tf.reshape(x, [-1, 28, 28, 1])
# convolutional 1
with tf.device(dev), tf.name_scope('conv1'):
conv1 = tf.layers.conv2d(inputs=input_layer, filters=num_filters, kernel_size=[filtersize, filtersize], padding="SAME", activation=tf.nn.relu)
# max pool layer 1
with tf.device(dev), tf.name_scope('pool1'):
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=1, padding='SAME')
# convolutional 2
with tf.device(dev), tf.name_scope('conv2'):
conv2 = tf.layers.conv2d(inputs=pool1, filters=num_filters, kernel_size=[filtersize, filtersize], padding="SAME", activation=tf.nn.relu)
# max pool layer 2
with tf.device(dev), tf.name_scope('pool2'):
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=1, padding='SAME')
# flatten input and FC layer 1
with tf.device(dev), tf.name_scope('dense1'):
# fully connected layer
pool2_flat = tf.reshape(pool2, [-1, 28*28*num_filters])
dense1 = tf.layers.dense(inputs=pool2_flat, units=128, activation=tf.nn.relu)
# readout layer, fully connected
with tf.device(dev), tf.name_scope('output'):
logits = tf.layers.dense(inputs=dense1, units=10)
# output probs
return logits
def fill_nan(self, A):
'''
interpolate to fill nan values
stolen from https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
'''
inds = np.arange(A.shape[0])
good = np.where(np.isfinite(A))
f = interpolate.interp1d(inds[good], A[good],bounds_error=False)
B = np.where(np.isfinite(A),A,f(inds))
return B
def testArchitecture(self, learning_rate=0.1, num_filters=16, device='cpu', biasvalue=0.1, numepoch=2000, filtersize=2, batchsize=50, calcstep=100):
"""
Builds a conv NN with the given parameters and trains it,
calculating the number of trainable parameters and measuring
the runtime as well as the performance during training.
"""
dev = '/GPU:0' if device == 'gpu' else '/cpu:0'
print('Train of convolutional NN with {} filters, learning rate {}, using the {}.'.format(num_filters, learning_rate, device))
with tf.device(dev), tf.name_scope('alloc'):
# DATA ALLOC
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
print("* Set up NN * ")
y = self.convNN(x, dev, num_filters, biasvalue, filtersize)
print("* Setup succesful *")
with tf.device(dev), tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
cross_entropy = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy)
acc = []
with tf.device(dev), tf.name_scope("SGD"):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
with tf.device(dev), tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
tf.summary.scalar('accuracy', accuracy)
merged_summary_op = tf.summary.merge_all()
with tf.device(dev), tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
sess.run(tf.global_variables_initializer())
start_time = time()
for epoch in range(numepoch+1):
batch = self.mnist.train.next_batch(batchsize)
batch_val = self.mnist.validation.next_batch(batchsize)
if epoch % calcstep == 0:
val_accuracy = self.validation_accuracy(self.mnist, accuracy, x, y_)
loss = 1 - val_accuracy
acc.append(val_accuracy)
print('epoch {}, validation accuracy {:.2f}%'.format(epoch, val_accuracy*100), end='\n')
else:
acc.append(None)
with tf.device(dev):
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
# save
results = dict()
results["lr"] = learning_rate
results["num_filters"] = num_filters
results["batch_size"] = batchsize
results["learning_curve"] = acc
results["test_error"] = 1-val_accuracy
path = "./output/"
runid = str(datetime.datetime.now())
os.makedirs(path, exist_ok=True )
fname = os.path.join(path, "results_run_%s.json" % runid)
fh = open(fname, "w")
json.dump(results, fh)
fh.close()
return acc, loss
# # Init run
# +
ln = LeNetTensor(input_data.read_data_sets('MNIST_data', one_hot=True))
numepoch = 2000
calcstep = 50
dataC = []
ln.plottrainlifesetup(numepoch, "learning rate test")
for testing_learning_rate in [0.1, 0.01, 0.001, 0.0001]:
data, loss = ln.testArchitecture(learning_rate=testing_learning_rate, num_filters=16, device='cpu', numepoch=numepoch, filtersize=3, batchsize=50, calcstep=calcstep)
data = ln.fill_nan(np.array(data, dtype=float))
dataC.append(data)
tf.reset_default_graph()
ln.plottrainlifeupdate(np.array(dataC, dtype=float), 4, ["0.1", "0.01", "0.001", "0.0001"])
time()
sleep(1)
dataC = []
ln.plottrainlifesetup(numepoch, "filtersize test")
for size_of_filters in [1, 3, 5, 7]:
data, loss = ln.testArchitecture(learning_rate=0.1, num_filters=16, device='cpu', numepoch=numepoch, filtersize=size_of_filters, batchsize=50, calcstep=calcstep)
data = ln.fill_nan(np.array(data, dtype=float))
dataC.append(data)
tf.reset_default_graph()
ln.plottrainlifeupdate(np.array(dataC, dtype=float), 4, ["1", "3", "5", "7"])
| 8,989 |
/test_bowlers_analysis.ipynb | 33bd6bd4b0d1ae3b86d0ca09e33542ad8fa84299 | [] | no_license | Israt055/test_bowlers_analysis | https://github.com/Israt055/test_bowlers_analysis | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 89,851 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reference:
# # actual data source: https://stats.espncricinfo.com/ci/content/records/93276.html
# **import required libraries**
import pandas as pd
import numpy as np
pip install openpyxl
# +
#for reading a excel file
df = pd.read_excel("test_cricket.xlsx" , sheet_name='wickets')
# Naming the DataFrame - df
# Reading the .xlsx file using pandas: pd.read_excel
# Reading the sheet named 'wickets'
display(df.head(11))
#df.head(11) is for showing first 10 rows of the data frame.
# -
# **Observations:**
#
# In this data set there are 14 columns .
#
# * player = player name
#
#
# * span = the staring year and the ending year of a player to play/ career duration
#
#
# * Mat = number of matches
#
#
#
# * inns = number of innings
#
#
#
# * balls = total number of bowling. a bowler delivers the ball from his or her end of the pitch towards the batsman
#
#
#
# * Runs = The number of runs conceded
#
#
# * Wkts= total number wicket taken of a bowler.
#
#
#
# * BBI = BBI stands for Best Bowling in Innings and only gives the score for one innings.
#
#
#
# * BBM = BBM stands for Best Bowling in Match and gives the combined score over 2 or more innings in one match.
#
#
#
# * average = bowling average is the number of runs they have conceded per wicket taken.
#
#
#
# * Econ = Economy rate. a player's economy rate is the average number of runs they have conceded per over bowled.
#
#
#
# * SR = strike rate. Bowling Strike Rate is a measurement of a bowler’s average number of balls bowled for every wicket
# taken.
#
#
#
# * 5 = number of times the player took 5 wickets in a match
#
#
#
# * 10 = number of times the player took 10 wickets in a match
# +
# number of rows
print("number of rows = ", df.shape[0])
# number of columns
print("number of columns = ", df.shape[1] )
# -
# **Observations:**
#
# The DataFrame contains 79 rows (players) and 14 columns (attributes).
# +
df.describe()
#checking the data statistic
#The describe () function is used to generate descriptive statistics
# -
# # Descriptive Statistics:
#
# shows the details of all numerical attributes in the dataset:
#
# 1. count = number of observations
# 2. mean = average of all values
# 3. std = standard deviation
# 4. min = minimum value among all the observations
# 5. 25% = the value at 25th percentile
# 6. 50% = median value
# 7. 75% = the value at 75th percentile.
# 8. max = maximum value among all the observations.
#
#
# **we can see that 25%,50%,75% values or observation has less than or falls below that point**
#
#
# **Observations:**
#
#
# 1. The average number of matches played by the bowlers is 80 and minimum is 37 matches and maximum is 166. 25% bowler played approximately or less than 60 matches. 50% bowler played around or less than 71 matches, 75% bowler played nearly or less than 93 matches.
#
#
# 2. The average number of innings played by the bowlers is 144 and minimum is 67 ,maximum is 301. 25% bowler played approximately or less than 110 innings. 50% bowler played around or less than 129 innings, 75% bowler played nearly or less than 169 innings.
#
#
#
# 3. The average number of runs conceded by the bowlers is 8595. While the highest runs is 18355, only 25% bowler scored less than 6456 runs.
#
#
#
#
# 4. The average number of wickets taken by the bowlers is 317, with 75% of bowlers scoring less than 375.
#
#
#
# 5. The average number of 5 wickets taken by the bowlers is 16 while 75 % bowlers scored less than 20.
# +
print(df.info())
# checking for missing values and data types of each column
# -
# **Observations:**
#
#
# * we can see that there is no missing values present in the dataset
#
# * there is 4 object data type (player, span, BBI, BBM)
#
# * there is 3 float data type ( Strike Rate, Economy Rate, Average) and others are integer value.
# # Renaming the columns
# +
df = df.rename(columns={'Mat':'Matches',
'Inns':'Innings',
'Wkts':'Wickets',
'Ave': 'Bowling Average',
'Econ' : 'Economy Rate',
'SR' : 'Strike Rate',
'BBI' : 'Best Bowling in Innings',
'BBM' : 'Best Bowling in Match',
5: '5_wickets',
10:'10_wickets'})
df.head()
# -
# **Observations:**
#
# Muralitharan was the best bowling in a match where he conceded 220 runs and number of taken wickets was 16.
#
# His average rate is 22.72 ,economy rate was 2.47 and number of 5 wicktes taken in a match was 67.
# # Remove the columns BBI and BBM
# we can see that the data in the column BBI and BBM was bugged and some of the entries got turned into datetime
# +
df.drop(["Best Bowling in Match", "Best Bowling in Innings"], axis=1 , inplace= True )
display(df.head())
# -
# # Splitting Player and Country from 'Player' Column
# in the player column ,there is information in the braket sign about the player played for which country and ICC
# +
df['Player'] = df['Player'].str.replace(")"," ")
df_player = df['Player'].str.split("(", expand=True)
df = df.drop('Player', axis=1)
df = pd.concat([df, df_player], axis=1)
df = df.rename(columns={0: 'Player',1: 'Country'})
column_sequence = ['Player' , 'Country' , 'Span' , 'Matches' , 'Innings', 'Balls', 'Runs' , 'Wickets', 'Bowling Average' , 'Economy Rate' , 'Strike Rate' , '5_wickets' , '10_wickets']
df = df[column_sequence]
display(df.head())
# -
# # Finding how many players played for ICC
def ICC(x):
if "ICC" in x:
return "Yes"
else:
return "No"
# +
df['Played for ICC'] = df['Country'].apply(ICC)
#display(df.head(10))
# removing "ICC/" from string of country column
df['Country'] = df['Country'].str.replace("ICC/","", regex=True)
display(df.head(5))
# -
print (df['Played for ICC'].value_counts())
# **only 5 players played for ICC**
# # Checking how many different countries are present in the dataset
print (df['Country'].value_counts())
# **Obsevations:**
#
# **we can see that there is 18 Australian and 1 Bangladedshi bowlers present in the dataset**
# # Splitting the span column
# +
# splitting 'span' column
df_span = df['Span'].str.split("-", expand=True)
# concatenating the new dataframe with the main dataframe
df = pd.concat([df, df_span], axis=1)
# renaming new splitted column
df = df.rename(columns={0: "Starting Year",
1: "Ending Year"})
# removing 'span' column
df = df.drop("Span", axis=1)
# new column sequence as a list
column_sequence = ['Player', 'Country','Starting Year', 'Ending Year', 'Matches', 'Innings', 'Balls', 'Runs','Wickets' , 'Bowling Average','Economy Rate','Strike Rate', '5_wickets', '10_wickets' , 'Played for ICC']
# implementing the new column sequence
df = df[column_sequence]
display(df.head(5))
# -
df.info()
# # Changing Starting and Ending Year from object type to integer
#if the staring year and ending year were object type then we need to convert this to integer.
df['Ending Year']=pd.to_numeric(df['Ending Year'])
df['Starting Year']=pd.to_numeric(df['Starting Year'])
# +
# total years of span of the players
df['Span']=df['Ending Year']-df['Starting Year']
# rearranging columns
column_sequence = ['Player', 'Country','Starting Year', 'Ending Year', 'Span' , 'Matches', 'Innings', 'Balls', 'Runs','Wickets' , 'Bowling Average','Economy Rate','Strike Rate', '5_wickets', '10_wickets' , 'Played for ICC']
df = df[column_sequence]
display(df.head(5))
# -
# # Players with Longest to Shortest Span
# +
display(df.sort_values(by='Span', ascending = False).head(1))
display(df.sort_values(by='Span', ascending = True).head(1))
# -
# **Observations:**
#
# * Imran Khan had longest carrer span. He had played for the longest period of time of 21 years.
# * GP Swann Khan had shortest carrer span of 5 years
# # Finding the player that had a lowest economy rate
display(df.sort_values(by='Economy Rate', ascending = True).head(5))
# **0bservations:**
# * LR Gibbs had the lowest economy rate of 1.98
# # Finding the player that had the lowest strike rate
display(df.sort_values(by='Strike Rate', ascending = True).head(5))
# **Observations:**
#
# K Rabada had the lowest strike rate of 41.2
# # Finding the player that had the lowest bowling average
display(df.sort_values(by='Bowling Average', ascending = True).head(5))
# **Observations:**
#
# MD Marshall had the lowest bowling average of 20.94
| 8,768 |
/docs/Cruises.ipynb | b7ab1e8e028cea8177b72b096ffdc2e6eb1dbde0 | [
"MIT"
] | permissive | simonscmap/pycmap | https://github.com/simonscmap/pycmap | 5 | 1 | null | null | null | null | Jupyter Notebook | false | false | .py | 2,446 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--NAVIGATION-->
# < [Is Climatology Product](Climatology.ipynb) | [Index](Index.ipynb) | [Cruise Details by Name](CruiseByName.ipynb) >
#
# <a href="https://colab.research.google.com/github/simonscmap/pycmap/blob/master/docs/Cruises.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
#
# <a href="https://mybinder.org/v2/gh/simonscmap/pycmap/master?filepath=docs%2FCruises.ipynb"><img align="right" src="https://mybinder.org/badge_logo.svg" alt="Open in Colab" title="Open and Execute in Binder"></a>
# ## *cruises()*
#
# Returns a dataframe containing the details of all cruise expeditions stored at Simons CMAP database.
# <br />This method requires no input.
#
# >**Returns:**
# >> Pandas dataframe.
# ### Example
# +
# #!pip install pycmap -q #uncomment to install pycmap, if necessary
import pycmap
api = pycmap.API(token='<YOUR_API_KEY>')
api.cruises()
# -
# <img src="figures/sql.png" alt="SQL" align="left" width="40"/>
# <br/>
# ### SQL Statement
# Here is how to achieve the same results using a direct SQL statement. Please refere to [Query](Query.ipynb) for more information.
# <code>EXEC uspCruises<code/>
| 1,515 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.