path
stringlengths
8
399
content_id
stringlengths
40
40
detected_licenses
list
license_type
stringclasses
2 values
repo_name
stringlengths
6
109
repo_url
stringlengths
25
128
star_events_count
int64
0
52.9k
fork_events_count
int64
0
7.07k
gha_license_id
stringclasses
9 values
gha_event_created_at
timestamp[us]
gha_updated_at
timestamp[us]
gha_language
stringclasses
28 values
language
stringclasses
1 value
is_generated
bool
1 class
is_vendor
bool
1 class
conversion_extension
stringclasses
17 values
size
int64
317
10.5M
script
stringlengths
245
9.7M
script_size
int64
245
9.7M
/Machine_learning/2022-Machine-Learning-Specialization/Advanced Learning Algorithms/week1/work/betaVersions/C2_W1_Lab03_CoffeeRoasting_Numpy.ipynb
7992943700b1d75d2cf4621b174504ce64736a75
[]
no_license
Jamie33/learngit
https://github.com/Jamie33/learngit
15
5
null
2023-02-16T01:27:32
2022-10-31T07:53:04
Jupyter Notebook
Jupyter Notebook
false
false
.py
9,989
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt x = np.linspace(-np.pi,np.pi,100) plt.plot(x,np.sin(x)) theta= np.linspace(-np.pi,np.pi,100) #generating a circle radius = 10 x=radius*np.sin(theta) y=radius*np.cos(theta) #equation=>x=rsin(t),y=rcos(t) plt.figure(figsize=(6,6)) plt.plot(x,y) plt.show() theta1=np.linspace(0,40*np.pi,100) rad= np.linspace ort tensorflow as tf from lab_utils_common import dlc, sigmoid from lab_coffee_utils import load_coffee_data, plt_roast, plt_prob, plt_layer, plt_network, plt_output_unit import logging logging.getLogger("tensorflow").setLevel(logging.ERROR) tf.autograph.set_verbosity(0) # ## DataSet # This is the same data set as the previous lab. X,Y = load_coffee_data(); print(X.shape, Y.shape) # Let's plot the coffee roasting data below. The two features are Temperature in Celsius and Duration in minutes. [Coffee Roasting at Home](https://www.merchantsofgreencoffee.com/how-to-roast-green-coffee-in-your-oven/) suggests that the duration is best kept between 12 and 15 minutes while the temp should be between 175 and 260 degrees Celsius. Of course, as the temperature rises, the duration should shrink. plt_roast(X,Y) # ### Normalize Data # To match the previous lab, we'll normalize the data. Refer to that lab for more details print(f"Temperature Max, Min pre normalization: {np.max(X[:,0]):0.2f}, {np.min(X[:,0]):0.2f}") print(f"Duration Max, Min pre normalization: {np.max(X[:,1]):0.2f}, {np.min(X[:,1]):0.2f}") norm_l = tf.keras.layers.Normalization(axis=-1) norm_l.adapt(X) # learns mean, variance Xn = norm_l(X) print(f"Temperature Max, Min post normalization: {np.max(Xn[:,0]):0.2f}, {np.min(Xn[:,0]):0.2f}") print(f"Duration Max, Min post normalization: {np.max(Xn[:,1]):0.2f}, {np.min(Xn[:,1]):0.2f}") # ## Numpy Model (Forward Prop in NumPy) # <center> <img src="./images/C2_W1_RoastingNetwork.PNG" width="200" /> <center/> # Let's build the "Coffee Roasting Network" described in lecture. There are two layers with sigmoid activations. # As described in lecture, it is possible to build your own dense layer using NumPy. This can then be utilized to build a multi-layer neural network. # # <img src="images/C2_W1_dense2.PNG" width="600" height="450"> # # In the first optional lab, you constructed a neuron in NumPy and in Tensorflow and noted their similarity. A layer simply contains multiple neurons/units. As described in lecture, one can utilize a for loop to visit each unit (`j`) in the layer and perform the dot product of the weights for that unit (`W[:,j]`) and sum the bias for the unit (`b[j]`) to form `z`. An activation function `g(z)` can then be applied to that result. Let's try that below to build a "dense layer" subroutine. def my_dense(a_in, W, b, g): """ Computes dense layer Args: a_in (ndarray (n, )) : Data, 1 example W (ndarray (n,j)) : Weight matrix, n features per unit, j units b (ndarray (j, )) : bias vector, j units g activation function (e.g. sigmoid, relu..) Returns a_out (ndarray (j,)) : j units| """ units = W.shape[1] a_out = np.zeros(units) for j in range(units): w = W[:,j] z = np.dot(w, a_in) + b[j] a_out[j] = g(z) return(a_out) # The following cell builds a three-layer neural network utilizing the `my_dense` subroutine above. def my_sequential(x, W1, b1, W2, b2): a1 = my_dense(x, W1, b1, sigmoid) a2 = my_dense(a1, W2, b2, sigmoid) return(a2) # We can copy trained weights and biases from the previous lab in Tensorflow. W1_tmp = np.array( [[-8.93, 0.29, 12.9 ], [-0.1, -7.32, 10.81]] ) b1_tmp = np.array( [-9.82, -9.28, 0.96] ) W2_tmp = np.array( [[-31.18], [-27.59], [-32.56]] ) b2_tmp = np.array( [15.41] ) # ### Predictions # <img align="left" src="./images/C2_W1_RoastingDecision.PNG" style=" width:380px; padding: 10px 20px; " > # # Once you have a trained model, you can then use it to make predictions. Recall that the output of our model is a probability. In this case, the probability of a good roast. To make a decision, one must apply the probability to a threshold. In this case, we will use 0.5 # Let's start by writing a routine similar to Tensorflow's `model.predict()`. This will take a matrix $X$ with all $m$ examples in the rows and make a prediction by running the model. def my_predict(X, W1, b1, W2, b2): m = X.shape[0] p = np.zeros((m,1)) for i in range(m): p[i,0] = my_sequential(X[i], W1, b1, W2, b2) return(p) # We can try this routine on two examples: X_tst = np.array([ [200,13.9], # postive example [200,17]]) # negative example X_tstn = norm_l(X_tst) # remember to normalize predictions = my_predict(X_tstn, W1_tmp, b1_tmp, W2_tmp, b2_tmp) # To convert the probabilities to a decision, we apply a threshold: yhat = np.zeros_like(predictions) for i in range(len(predictions)): if predictions[i] >= 0.5: yhat[i] = 1 else: yhat[i] = 0 print(f"decisions = \n{yhat}") # This can be accomplished more succinctly: yhat = (predictions >= 0.5).astype(int) print(f"decisions = \n{yhat}") # ## Network function # This graph shows the operation of the whole network and is identical to the Tensorflow result from the previous lab. # The left graph is the raw output of the final layer represented by the blue shading. This is overlaid on the training data represented by the X's and O's. # The right graph is the output of the network after a decision threshold. The X's and O's here correspond to decisions made by the network. netf= lambda x : my_predict(norm_l(x),W1_tmp, b1_tmp, W2_tmp, b2_tmp) plt_network(X,Y,netf) # ## Congratulations! # You have built a small neural network in NumPy. # Hopefully this lab revealed the fairly simple and familiar functions which make up a layer in a neural network.
6,224
/ngram-model.ipynb
76dd458df757ad51c2da8ae6f564badcfc8db784
[]
no_license
TracyMRohlin/Trolling_RNN
https://github.com/TracyMRohlin/Trolling_RNN
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
42,905
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true import keras import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import random import urllib from keras.layers.core import Dense, Activation, Flatten from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.text import Tokenizer from sklearn import metrics # + deletable=true editable=true # Read the data into a Pandas dataframe. comments = pd.read_csv('attack_annotated_comments.tsv', sep = '\t', index_col = 0) annotations = pd.read_csv('attack_annotations.tsv', sep = '\t') # Label a comment as an attack if over half of annotators did so. # We can tinker with this threshold later. labels = annotations.groupby('rev_id')['attack'].mean() > 0.5 # Join labels and comments comments['attack'] = labels # Preprocess the data -- remove newlines, tabs, quotes (following Wulczyn) comments['comment'] = comments['comment'].apply(lambda x: x.replace("NEWLINE_TOKEN", " ")) comments['comment'] = comments['comment'].apply(lambda x: x.replace("TAB_TOKEN", " ")) comments['comment'] = comments['comment'].apply(lambda x: x.replace("`", " ")) # + deletable=true editable=true train_data = comments.loc[comments['split'] == 'train'] dev_data = comments.loc[comments['split'] == 'dev'] test_data = comments.loc[comments['split'] == 'test'] # The list of gold-standard labels for the data train_labels = train_data["attack"].tolist() dev_labels = dev_data["attack"].tolist() test_labels = test_data["attack"].tolist() print(len([x for x in train_labels if x])) # Put all the training data (comments) into a list train_texts = train_data["comment"].tolist() dev_texts = dev_data["comment"].tolist() test_texts = test_data["comment"].tolist() # + deletable=true editable=true # Put all the comments into lists train_texts = train_data["comment"].tolist() dev_texts = dev_data["comment"].tolist() test_texts = test_data["comment"].tolist() # + deletable=true editable=true # A set of character unigrams. char_unigrams = set(''.join(train_texts)) # Note: there are 1557 unique characters. len(char_unigrams) # + deletable=true editable=true def char_ngram(text, n): """Create ngrams for a single line.""" z = [] text2 = '*'+text+'*' for k in range(n, n+1): new_info = [text2[i:i+k] for i in range(len(text2)-k+1)] z.append(new_info) z = [ngram for ngrams in z for ngram in ngrams] return " ".join(z) def create_ngrams(text, n=1): """Create ngrams for a whole matrix/list of texts.""" ngrammed_text = [] for t in text: ngrams = char_ngram(t, n) ngrammed_text.append(ngrams) return ngrammed_text # + deletable=true editable=true train_texts = create_ngrams(train_texts, 1) dev_texts = create_ngrams(dev_texts, 1) test_texts = create_ngrams(test_texts, 1) # + deletable=true editable=true print(train_texts[0]) # + deletable=true editable=true def even_split(comments, labels): """Return a slice of the data with randomly selected False (non-attack) data to create an even split with True (attack) data""" # grab all the attacks to see how many we need to match attack_indices = [i for i in range(len(comments)) if labels[i] == True] new_training = [comments[i] for i in attack_indices] new_labels = [labels[i] for i in attack_indices] # grab all the ones that are not attacks, shuffle them # select the same number of non-attacks non_attack_indices = [i for i in range(len(comments)) if labels[i] == False] random.shuffle(non_attack_indices) for i in range(len(attack_indices)): new_training.append(comments[i]) new_labels.append(labels[i]) return new_training, new_labels # + deletable=true editable=true # AUC measure def auc_score(y_true, y_pred): return metrics.roc_auc_score(y_true, y_pred) # Plot the AUROC def plot_ROC(fpr, tpr, roc_auc, name): plt.figure() plt.title('Receiver Operating Characteristic') plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.savefig("{0}.png".format(name)) # + deletable=true editable=true # The char level tokenizer tokenizer = keras.preprocessing.text.Tokenizer() tokenizer.fit_on_texts(train_texts) vocab_size = len(tokenizer.word_index) + 1 print(vocab_size) # + deletable=true editable=true tokenizer.fit_on_texts(dev_texts) tokenizer.fit_on_texts(test_texts) # + deletable=true editable=true tokenizer.word_counts # + deletable=true editable=true def texts_to_matrix(texts, tokenizer): """Given a section of the data, return a matrix representing comments""" matrix = tokenizer.texts_to_matrix(texts) return matrix # + deletable=true editable=true # evenly split the data to have equal amounts of attacks train_texts, train_labels = even_split(train_texts, train_labels) dev_texts, dev_labels = even_split(dev_texts, dev_labels) test_texts, test_labels = even_split(test_texts, test_labels) # + deletable=true editable=true train_matrix = texts_to_matrix(train_texts, tokenizer) dev_matrix = texts_to_matrix(dev_texts, tokenizer) test_matrix = texts_to_matrix(test_texts, tokenizer) # + deletable=true editable=true print(train_matrix[0]) # + deletable=true editable=true # Dimensions of our training matrix train_matrix.shape # + deletable=true editable=true # Make a model. model = Sequential() # Add embedding layer. # Recall from earlier that char unigram vocab size is 1557. # Train_matrix shape is 69526 x 1558, use that as input dimension. model.add(Embedding(vocab_size, 10, input_length=train_matrix.shape[1])) #model.add(Dense(train_matrix.shape[1], input_dim=train_matrix.shape[1])) model.add(Flatten()) # Commented out for now bc... the example I was following only had one Dense layer model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # + deletable=true editable=true model.fit(train_matrix, train_labels, batch_size=100, epochs=4, validation_data=(dev_matrix, dev_labels)) # + deletable=true editable=true # Output predictions so that we can run AUC eval # Remember to recompile the model first before running this y_pred = model.predict(test_matrix, batch_size=2) roc_auc = auc_score(test_labels, y_pred) print('AUROC:{}'.format(roc_auc)) # + deletable=true editable=true fpr, tpr, _ = metrics.roc_curve(test_labels, y_pred) plot_ROC(fpr, tpr, roc_auc, "unigram-model") # + deletable=true editable=true o! Ja inserimos os registros de fabricante na tabela no banco. Agora vamos ler um sql da tabela do banco e adiciona-lo em um dataframe. df_fabricante = pd.read_sql('fabricante', con=conn) # Lê o dado na tabela do banco de dados df_fabricante # Perceba que agora temos o código associado a um fabricante. Esse código será utilizado para popular a tabela de modelo_veiculo. # # ![image.png](attachment:54f7a90a-4336-4c88-a180-042e9a5e12ef.png) # # Perceba que na tabela de modelo_veiculo possui uma dependencia da tabela fabricante. Essa dependência esta relacionada ao código do fabricante. Dessa forma, temos que buscar esse código na tabela de Fabricante e inseri-lo na tabela de modelo_veiculo. # # ![image.png](attachment:f5c9e441-bfaa-4de5-90c4-1306c188432d.png) # # Para fazer isso vamos utilizar o dataframe df_fabricante criado a partir do insert no banco e o dataframe df_modelos. Então o que fazer? # > *Vamos ler todo o dado contido na coluna de fabricante do dataframe **df_modelos** e buscar a sua equivalencia no dataframe **df_fabricante*** # # # Primeiro vamos setar a coluna nome de fabricante como index. Essa coluna vai ser utilizada como index para buscar os dados no dataframe df_fabricante. Após isso, vamos transformar em um dicionário # + dict_fabricante = df_fabricante.set_index(df_fabricante.nome_fabricante)['cod_fabricante'].to_dict() dict_fabricante # - # Nesse momento vamos buscaros nomes dos fabricantes e associamos a seu respectivo código. df_modelos.fabricante = df_modelos.fabricante.map(dict_fabricante) df_modelos # Feito isso, vamos alterar o nome da coluna. df_modelos = df_modelos.rename(columns={'fabricante' : 'cod_fabricante'}) df_modelos # Pronto! Agora é so fazer o insert na tabela do banco. for modelo in df_modelos.itertuples(): nome_modelo = modelo.modelo cod_fabricante = modelo.cod_fabricante try: query = f"""insert into modelo_veiculo (descricao_modelo, cod_fabricante) values ('{nome_modelo}',{cod_fabricante})""" conn.execute(query) print(f"Registro inserido com sucesso: {nome_modelo}") except Exception as e: print(f'Não foi possível inserir o registro: {nome_modelo} Erro: {e}') # ## Criando tabela Staging no banco de dados # # Uma outra forma de popular dados no banco de dados é através da criação de uma tabela de preparação temporária. Uma tabela Staging é uma area intermediária utilizada para preparar os dados para serem utilizados em um ambiente de análise. # # Deste modo, criamos uma cópia da tabela com os dados coletados e persistimos no banco sem nenhum tratamento. # cria uma tabela no banco SQL com o nome df_vendas.to_sql('stg_venda_veiculos', con=conn, schema='coletaIGTI') # Após criar a tabela de staging no banco de dados, vamos executar a seguinte consulta em SQL abaixo: # # ![image.png](attachment:3006f2f5-7863-4398-a336-74c2837947fa.png) # # Na tabela principal **stg_venda_veiculos** vamos selecionar os campos: data_compra, valor e cod_pessoa. Além disso, temos que buscar a informação de cod_modelo que esta na tabela **modelo_veiculo**. Para fazer isso vamos realizar uma junção entre as duas tabelas e indicar o campo de descricao como parâmetro. Feito isso, vamos os dados necessários para inserir na tabela **venda_veiculo**. # Pronto! Agora temos todos os dados coletados e armazenados normalizados no banco de dados. \0/ # <li>By inspection, two components could potentially classify 10 digits since digits seem to be clustered at different areas</li> # <ul><li>See scree plot towards the end of the document</li></ul> # </ul> # + # I use linear discriminant on ziptrain over 256 original pixels (no PCA preprocessing) # I build the confusion matrix of this model over ziptrain ld = LinearDiscriminantAnalysis() ld.fit(train[:, 1:], train[:, 0]) y_predict = ld.predict(train[:, 1:]) accuracy_raw = [] # binarization of digits for confusion matrix: # - all y that equal to digit i are set to 1, else 0 # - all y_predict that equal to digit i are set to 1, else 0 # - then accuracy is computed on the prediction of i only. # - i is 0 to 9 for i in range(0,10): y = (train[:,0] == i).astype(int) y_pred = np.array(y_predict == i).astype(int) y_prob = ld.predict_proba(train[:, 1:])[:,i] accuracy_raw.append(accuracy_metric(y, y_pred)) print("\n------------------ LD on TRAIN data on digit: {} -------------------".format(i)) print(classification_performance(y, y_pred, y_prob, False)); plt.legend(list(range(0,10))); print('\nMean accuracy: ', np.array(accuracy_raw).mean()) # - # # <ul> # <li>The mean accuracy is high (about 99%), which is not a surprise given that accuracy is measured on the train data.</li> # <li>Interestingly, overall accuracy is also high on <u>test data</u> (about 98%), as shown below.</li> # <li>Digit 8 and 2 have the lowest sensitivity scores</li> # </ul> # + # I do the same thing as above, but on test data and get very good overall accuracy of 98.2%, # which suggests that the algorithm does not overfit y_predict = ld.predict(test[:, 1:]) # Performance on test data for i in range(0,10): y = (test[:,0] == i).astype(int) y_pred = np.array(y_predict == i).astype(int) y_prob = ld.predict_proba(test[:, 1:])[:,i] accuracy_raw.append(accuracy_metric(y, y_pred)) print("\n------------------ LD on TEST data on digit: {} -------------------"\ .format(i)) print(classification_performance(y, y_pred, y_prob, False)); plt.legend(list(range(0,10))); print('\nMean accuracy: ', np.array(accuracy_raw).mean()) # + # I use linear disciminant over "m" projected principal components, # with the appropriate choice of "m" = 156 # The accuracy of test data is slighly lower this time (97.7%) components = list(range(1, train[0,:].shape[0])) accuracy_m = [] for m in components: pca = PCA(n_components = m) pca.fit(train[:, 1:]) Z_train = pca.transform(train[:,1:]) ld.fit(Z_train, train[:, 0]) Z_test = pca.transform(test[:,1:]) temp_digit_accuracy = [] # binarization of digits for confusion matrix: for i in range(1,10): y = (test[:,0] == i).astype(int) y_pred = np.array(ld.predict(Z_test) == i).astype(int) temp_digit_accuracy.append(accuracy_metric(y, y_pred)) accuracy_m.append(np.array(temp_digit_accuracy).mean(axis=0)) plt.subplots(figsize=(20,4)) plt.plot(components, accuracy_m) plt.title('Accuracy of LR as a function of PCA components (on SCALED test data)',size=20) plt.xlabel('PCA components',size=16) plt.ylabel('Accuracy',size=16) plt.xlim(0,256) plt.tick_params(labelsize=16) print('Highest accuracy of {} with {} component'\ .format(np.max(accuracy_m),np.where(accuracy_m == np.max(accuracy_m))[0][0]+1)) # - pd.DataFrame(accuracy_m, columns = ['Accuracy']).iloc[150:157] # <ul> # <li>The graph above suggests that best accuracy is obtained when m = 156, with 97.6% accuracy</li> # <li>In contrast, with only 10 components we get an accuracy of 95.5%</li> # <li>I printed the confusion matrices below when m=10 vs. when m = 156.</li> # <ul><li>The scree plot below suggests using only one component for an accuracy of 87.6%</li></ul> # </ul> # + m = 10 pca = PCA(n_components = m) pca.fit(train[:, 1:]) Z_train = pca.transform(train[:,1:]) ld.fit(Z_train, train[:, 0]) Z_test = pca.transform(test[:,1:]) accuracy = [] for i in range(1,10): y = (test[:,0] == i).astype(int) y_pred = np.array(ld.predict(Z_test) == i).astype(int) y_prob = ld.predict_proba(Z_test)[:,i] print("\n------------------ LD on TRAIN data on digit: {} -------------------"\ .format(i)) accuracy.append(accuracy_metric(y, y_pred)) print(classification_performance(y, y_pred, y_prob, False)); print('\nMean accuracy: ', np.array(accuracy).mean()) fig, ax = plt.subplots(figsize=(20, 4)) ax.plot(pca.explained_variance_/np.sum(pca.explained_variance_) ) ax.set_xlabel('Sorted Components', size=16) ax.set_ylabel('Proportion of Variance Explained',size=16) ax.set_xlim([0, 10]) ax.set_title('Scree Plot',size=20) ax.tick_params(labelsize=16) # + m = 156 pca = PCA(n_components = m) pca.fit(train[:, 1:]) Z_train = pca.transform(train[:,1:]) ld.fit(Z_train, train[:, 0]) Z_test = pca.transform(test[:,1:]) accuracy = [] for i in range(1,10): y = (test[:,0] == i).astype(int) y_pred = np.array(ld.predict(Z_test) == i).astype(int) y_prob = ld.predict_proba(Z_test)[:,i] print("\n------------------ LD on TRAIN data on digit: {} -------------------"\ .format(i)) accuracy.append(accuracy_metric(y, y_pred)) print(classification_performance(y, y_pred, y_prob, False)); #plt.legend(list(range(0,10))); print('\nMean accuracy: ', np.array(accuracy).mean())
15,791
/1091101大葉-AI人工智慧/109.11.01 填充與複製.ipynb
9cf0224f0308e673ce81d1c5022936f217cabe19
[]
no_license
BrewTC/AI_Python
https://github.com/BrewTC/AI_Python
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
4,701
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="L2uCo9UD9u2r" executionInfo={"status": "ok", "timestamp": 1604220470261, "user_tz": -480, "elapsed": 706, "user": {"displayName": "\u6c5f\u5ef7\u6615", "photoUrl": "", "userId": "10806022464571103271"}} outputId="c8a97be3-9831-4239-bed2-0b6bf64cc591" colab={"base_uri": "https://localhost:8080/"} import tensorflow as tf a=tf.range(1,13) tf.pad(a, [[5,5]]) a=tf.reshape(a, [2,2,3]) a, tf.pad(a, [[1,1],[2,2],[3,3]]) # + id="1L40BBkeBZyB" executionInfo={"status": "ok", "timestamp": 1604221057981, "user_tz": -480, "elapsed": 628, "user": {"displayName": "\u6c5f\u5ef7\u6615", "photoUrl": "", "userId": "10806022464571103271"}} outputId="6175bbf2-b05d-45b9-a359-0b8bab137648" colab={"base_uri": "https://localhost:8080/"} a=tf.range(12) a, tf.tile(a, [2]) # + id="9Mg2mIrOBv5s" executionInfo={"status": "ok", "timestamp": 1604221142365, "user_tz": -480, "elapsed": 646, "user": {"displayName": "\u6c5f\u5ef7\u6615", "photoUrl": "", "userId": "10806022464571103271"}} outputId="6f6f701f-d4a6-404a-e1d3-7b30d7d8279e" colab={"base_uri": "https://localhost:8080/"} b=tf.range(5) b, tf.reshape(a,[3,4]) # + id="f-2XNy4VD1nk" executionInfo={"status": "ok", "timestamp": 1604221822395, "user_tz": -480, "elapsed": 695, "user": {"displayName": "\u6c5f\u5ef7\u6615", "photoUrl": "", "userId": "10806022464571103271"}} outputId="b98450a6-22f7-4a7f-a7d4-ebcf981a5a08" colab={"base_uri": "https://localhost:8080/"} a=tf.range(10) a, tf.maximum(a, 4), tf.minimum(a,4)
1,710
/ELITEDS/Project 1 Workbook Bundle/.ipynb_checkpoints/Module 1 - The Briefing Room-checkpoint.ipynb
daf870ff7cfa2a7ed69f2a43c5273a37fde38fa6
[]
no_license
sirkells/ML-Projects
https://github.com/sirkells/ML-Projects
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
9,859
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1 style="font-size:42px; text-align:center; margin-bottom:30px;"><span style="color:SteelBlue">Module 1:</span> The Briefing Room</h1> # <hr> # # Welcome to your first **Companion Workbook** for the course! # # Companion Workbooks are Jupyter notebooks set up for you to do 3 things: # 1. Practice writing key lesson code for yourself. # 2. Run any "mission-critical" code needed to complete the project. # 3. Complete the exercises throughout this course. # # As you'll notice, some workbooks will be longer and others will be shorter. Some modules won't even have Companion Workbooks. For those, the focus is on introducing concepts you'll apply later. # # ### Instructions # # As you go through the online lessons, follow along in the Companion Workbooks. We recommend reading the lesson first, then completing its accompanying section in the workbook. # # In this first Companion Workbook, for <span style="color:royalblue">Module 1: The Briefing Room</span>, you won't be writing a lot of code. # # Instead, we'll mainly be introducing you to some key functionality in Jupyter notebooks, and then confirming you have all the required libraries properly installed. # # <br><hr id="toc"> # # ### In this module... # # In this module, we'll arm you with the best tools for tackling machine learning. # # We'll cover... # # 1. [Intro to Practical Machine Learning](#intro) # 2. [The Machine Learning Masterclass](#masterclass) # 3. [Your Arsenal](#arsenal) # 4. [How to Crush this Course](#crush) (like the HULK) # # Alright... Let's dive right in! # # <hr> # <br id="intro"> # # 1. Intro to Practical Machine Learning # # Nothing to do for this lesson. # <br id="masterclass"> # # 2. The Machine Learning Masterclass # # Running Python code in Jupyter Notebooks is very simple. # # <hr> # # **1.) First, make sure the cell type is a <code style="color:steelblue">Code</code> cell.** # * You can look at the toolbar at the top of the notebook to confirm. # * Code cells also have gray backgrounds. # # **2.) Next, click the code cell to place your cursor inside it.** # # **3.) Finally, press <code style="color:steelblue">Shift + Enter<code> on your keyboard to run the cell.** # * If there's 1 shortcut you remember, please make it this one. It will save you a lot of time. # # <hr> # # Give it a try with the cell below. It should display the number <code style="color:steelblue">2</code> right below the cell. # 1. This cell has a gray background, so it's a Code cell # 2. Click anywhere in this cell to place your cursor in it # 3. Press Shift + Enter on your keyboard 1 + 1 # **Tip:** You can insert more code cells using the **Insert > Insert Cell Above / Below** dropdown in the toolbar. # # **Tip:** You can also delete cells by clicking the cell you'd like to delete, then using the **Edit > Delete Cells** dropdown in the toolbar. # # **Try inserting a new code cell below this one, running some code in it (any code), and then deleting the cell.** # <br id="arsenal"> # # 3. Your Arsenal # # Now it's time to introduce the incredible suite of Python libraries for machine learning. # # You'll practice using them over the entire course, so right now we'll just introduce them and check that they are installed correctly. # First, run the cell below if you're using **Python 2**. # * If you're using **Python 3**, running the cell won't do anything. # * Remember, to run a code cell, click anywhere in the cell, then press <code style="color:steelblue">Shift + Enter</code> on your keyboard. # from __future__ import print_function # Compatability with Python 3 print( 'print() function ready to serve' ) # # # Next, let's import NumPy and Pandas and check their versions. # + import numpy as np print( 'NumPy version:', np.__version__, ' (At least 1.11.0 is recommended)' ) import pandas as pd print( 'Pandas version:', pd.__version__, ' (At least 0.19.0 is recommended)' ) # - # If you run into an <code style="color:crimson">ImportError</code>, or if your version is outdated, please check the Quickstart Guide included in this course for instructions. # Next, we have 2 essential libraries for data visualization. # + import matplotlib print( 'Matplotlib version:', matplotlib.__version__, ' (At least 1.5.0 is recommended)' ) import seaborn as sns print( 'Seaborn version:', sns.__version__, ' (At least 0.7.0 is recommended)' ) # - # If you run into an <code style="color:crimson">ImportError</code>, or if your version is outdated, please check the Quickstart Guide included in this course for instructions. # And finally, **Scikit-Learn**, "the Aston Marton of Python machine learning." library is the same. import sklearn print( 'Scikit-Learn version:', sklearn.__version__, ' (At least 0.18.0 is recommended)' ) # If you run into an <code style="color:crimson">ImportError</code>, or if your version is outdated, please check the Quickstart Guide included in this course for instructions. # <br id="crush"> # # 4. How to Crush this Course # # Your Companion Workbook will note when you need to run lesson code. # # For example, let's print if 5 is divisible by 2: # Print if 5 is divisible by 2 if 5 % 2 == 0: print( '5 is divisible by 2.' ) else: print( '5 is not divisible by 2. The remainder is {}.'.format(5 % 2) ) # <br> # ## Next Steps # # Hey, you reached the end of the first module... Nice job! # # Here are a few of things you did in this module: # * You learned the difference between practical and academic machine learning. # * You saw our mantra: # # > <em style="color:royalblue">Algorithms are commodities.</em> # # * You imported the "Aston Martin of Python machine learning"... Scikit-Learn. # * And you got a few tips on how to crush this course like the <span style="color:forestgreen; font-weight:bold">HULK</span> would. # # The next module contains the Python Crash Course, so get ready to roll up your sleeves and write some code. umber of rooms among homes in the neighborhood. # - `'LSTAT'` is the percentage of homeowners in the neighborhood considered "lower class" (working poor). # - `'PTRATIO'` is the ratio of students to teachers in primary and secondary schools in the neighborhood. # # # ** Using your intuition, for each of the three features above, do you think that an increase in the value of that feature would lead to an **increase** in the value of `'MEDV'` or a **decrease** in the value of `'MEDV'`? Justify your answer for each.** # # **Hint:** This problem can phrased using examples like below. # * Would you expect a home that has an `'RM'` value(number of rooms) of 6 be worth more or less than a home that has an `'RM'` value of 7? # * Would you expect a neighborhood that has an `'LSTAT'` value(percent of lower class workers) of 15 have home prices be worth more or less than a neighborhood that has an `'LSTAT'` value of 20? # * Would you expect a neighborhood that has an `'PTRATIO'` value(ratio of students to teachers) of 10 have home prices be worth more or less than a neighborhood that has an `'PTRATIO'` value of 15? # **Answer: ** # * 'RM' - Increase in 'RM' should lead to an increase in the value of 'MEDV'. Large number of rooms would mean a larger home, larger land and construction costs, thus a higher 'MEDV'. # * 'LSTAT' - Increase in 'LSTAT' should lead to a decrease in the value of 'MEDV'. If the ratio of working poor in a neighborhood is large, that would mean that they would only be able to afford home prices with lower prices, resulting in lower 'MEDV'. # * 'PTRATIO' - Increase in 'PTRATIO' should lead to a decrease in the value of 'MEDV'. Higher PTRATIO would mean more students per teacher, resulting in lower quality schools and thus low desirability fot those schools and neighborhoods. # ---- # # ## Developing a Model # In this second section of the project, you will develop the tools and techniques necessary for a model to make a prediction. Being able to make accurate evaluations of each model's performance through the use of these tools and techniques helps to greatly reinforce the confidence in your predictions. # ### Implementation: Define a Performance Metric # It is difficult to measure the quality of a given model without quantifying its performance over training and testing. This is typically done using some type of performance metric, whether it is through calculating some type of error, the goodness of fit, or some other useful measurement. For this project, you will be calculating the [*coefficient of determination*](http://stattrek.com/statistics/dictionary.aspx?definition=coefficient_of_determination), R<sup>2</sup>, to quantify your model's performance. The coefficient of determination for a model is a useful statistic in regression analysis, as it often describes how "good" that model is at making predictions. # # The values for R<sup>2</sup> range from 0 to 1, which captures the percentage of squared correlation between the predicted and actual values of the **target variable**. A model with an R<sup>2</sup> of 0 is no better than a model that always predicts the *mean* of the target variable, whereas a model with an R<sup>2</sup> of 1 perfectly predicts the target variable. Any value between 0 and 1 indicates what percentage of the target variable, using this model, can be explained by the **features**. _A model can be given a negative R<sup>2</sup> as well, which indicates that the model is **arbitrarily worse** than one that always predicts the mean of the target variable._ # # For the `performance_metric` function in the code cell below, you will need to implement the following: # - Use `r2_score` from `sklearn.metrics` to perform a performance calculation between `y_true` and `y_predict`. # - Assign the performance score to the `score` variable. # + # TODO: Import 'r2_score' from sklearn.metrics import r2_score def performance_metric(y_true, y_predict): """ Calculates and returns the performance score between true and predicted values based on the metric chosen. """ # TODO: Calculate the performance score between 'y_true' and 'y_predict' score = r2_score(y_true, y_predict) # Return the score return score # - # ### Question 2 - Goodness of Fit # Assume that a dataset contains five data points and a model made the following predictions for the target variable: # # | True Value | Prediction | # | :-------------: | :--------: | # | 3.0 | 2.5 | # | -0.5 | 0.0 | # | 2.0 | 2.1 | # | 7.0 | 7.8 | # | 4.2 | 5.3 | # # Run the code cell below to use the `performance_metric` function and calculate this model's coefficient of determination. # Calculate the performance of this model score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3]) print("Model has a coefficient of determination, R^2, of {:.3f}.".format(score)) # * Would you consider this model to have successfully captured the variation of the target variable? # * Why or why not? # # ** Hint: ** The R2 score is the proportion of the variance in the dependent variable that is predictable from the independent variable. In other words: # * R2 score of 0 means that the dependent variable cannot be predicted from the independent variable. # * R2 score of 1 means the dependent variable can be predicted from the independent variable. # * R2 score between 0 and 1 indicates the extent to which the dependent variable is predictable. An # * R2 score of 0.40 means that 40 percent of the variance in Y is predictable from X. # **Answer:** # The R2 score here is 0.923, which is close to 1. A score of 1 indicates that the output can be perfectly predicted from the independent variables. A score close to 1 indicates that the dependent variable is highly predictable. # ### Implementation: Shuffle and Split Data # Your next implementation requires that you take the Boston housing dataset and split the data into training and testing subsets. Typically, the data is also shuffled into a random order when creating the training and testing subsets to remove any bias in the ordering of the dataset. # # For the code cell below, you will need to implement the following: # - Use `train_test_split` from `sklearn.cross_validation` to shuffle and split the `features` and `prices` data into training and testing sets. # - Split the data into 80% training and 20% testing. # - Set the `random_state` for `train_test_split` to a value of your choice. This ensures results are consistent. # - Assign the train and testing splits to `X_train`, `X_test`, `y_train`, and `y_test`. # + # TODO: Import 'train_test_split' from sklearn.model_selection import train_test_split # TODO: Shuffle and split the data into training and testing subsets X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.2, shuffle=True) # Success print("Training and testing split was successful.") # - # ### Question 3 - Training and Testing # # * What is the benefit to splitting a dataset into some ratio of training and testing subsets for a learning algorithm? # # **Hint:** Think about how overfitting or underfitting is contingent upon how splits on data is done. # **Answer: ** # When training a model, we want the model to not to be good at predicting examples which it has not seen i.e how well it generalizes to unseen examples. # # If we dont split a training set, and train on the entire sample, we might think the model is good even if it just memorizes the examples of training set. This is seen in models like decision trees which are prone to over fitting. A complex model can overfit the training set but do terribly on unseen data. For example, if we have ten training examples, a polynomial of degree 10 can fit those points with zero training error but if the points had been generated by a second order polynomial, the 10th degree polynomial would be a bad approximation to points which have not been seen. # ---- # # ## Analyzing Model Performance # In this third section of the project, you'll take a look at several models' learning and testing performances on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing `'max_depth'` parameter on the full training set to observe how model complexity affects performance. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone. # ### Learning Curves # The following code cell produces four graphs for a decision tree model with different maximum depths. Each graph visualizes the learning curves of the model for both training and testing as the size of the training set is increased. Note that the shaded region of a learning curve denotes the uncertainty of that curve (measured as the standard deviation). The model is scored on both the training and testing sets using R<sup>2</sup>, the coefficient of determination. # # Run the code cell below and use these graphs to answer the following question. # Produce learning curves for varying training set sizes and maximum depths vs.ModelLearning(features, prices) # ### Question 4 - Learning the Data # * Choose one of the graphs above and state the maximum depth for the model. # * What happens to the score of the training curve as more training points are added? What about the testing curve? # * Would having more training points benefit the model? # # **Hint:** Are the learning curves converging to particular scores? Generally speaking, the more data you have, the better. But if your training and testing curves are converging with a score above your benchmark threshold, would this be necessary? # Think about the pros and cons of adding more training points based on if the training and testing curves are converging. # **Answer: ** # * Maximum depth = 3. # * As more training points are added, the training score decreases and then flattens. Initially, the training error is low because for smaller number of points, the model is able to fit the training set well and score is high. As more and more points are used to train the model, the training score decreases. However its opposite for the test set. Initially, the test score is low as the model trained on very few data points is not a good model. As more points are added, the model improves and the test score increases. # * After 350 training examples, the training and test scores both flatten indicating that the score would not improve much. # ### Complexity Curves # The following code cell produces a graph for a decision tree model that has been trained and validated on the training data using different maximum depths. The graph produces two complexity curves — one for training and one for validation. Similar to the **learning curves**, the shaded regions of both the complexity curves denote the uncertainty in those curves, and the model is scored on both the training and validation sets using the `performance_metric` function. # # ** Run the code cell below and use this graph to answer the following two questions Q5 and Q6. ** vs.ModelComplexity(X_train, y_train) # ### Question 5 - Bias-Variance Tradeoff # * When the model is trained with a maximum depth of 1, does the model suffer from high bias or from high variance? # * How about when the model is trained with a maximum depth of 10? What visual cues in the graph justify your conclusions? # # **Hint:** High bias is a sign of underfitting(model is not complex enough to pick up the nuances in the data) and high variance is a sign of overfitting(model is by-hearting the data and cannot generalize well). Think about which model(depth 1 or 10) aligns with which part of the tradeoff. # **Answer: ** # * For model depth of 1, the model suffers from high bias. Both the training and validation scores are low (and close to each other). A sign of high bias is that the model performs poorly even on the training set. This is true for depth=1 as the training sore of about 0.4 is much lower than scores which can be achived using higher depths. # * The model with depth 10 suffers from high variance. The model performs very well on the training set with a score close to 1.0 but does poorly on the training set with a score close to 0.7. The big gap between training and test shows the overfitting on the training set. # ### Question 6 - Best-Guess Optimal Model # * Which maximum depth do you think results in a model that best generalizes to unseen data? # * What intuition lead you to this answer? # # ** Hint: ** Look at the graph above Question 5 and see where the validation scores lie for the various depths that have been assigned to the model. Does it get better with increased depth? At what point do we get our best validation score without overcomplicating our model? And remember, Occams Razor states "Among competing hypotheses, the one with the fewest assumptions should be selected." # **Answer: ** # Depth 3 produces the model with the best performance on the test set. Increasing the depth further improves the training score but reduces the test score, indicating the start of overfitting. Lower depths (2 or lower) have lower test and training scores, indicating under fitting. # ----- # # ## Evaluating Model Performance # In this final section of the project, you will construct a model and make a prediction on the client's feature set using an optimized model from `fit_model`. # ### Question 7 - Grid Search # * What is the grid search technique? # * How it can be applied to optimize a learning algorithm? # # ** Hint: ** When explaining the Grid Search technique, be sure to touch upon why it is used, what the 'grid' entails and what the end goal of this method is. To solidify your answer, you can also give an example of a parameter in a model that can be optimized using this approach. # **Answer: ** # Grid search is a technique which is used to find the best set of values for hyper parameters. Models have parameters and hyper-parameters. For example in tree models, the decision points and the values at the leaves are parameters. The maximum depth of trees to consider is a hyper parameter. # # Given a training set, the optimization algorithms find the values of the parameters which best optimizes the metric. However, to find the best values of hyper parameters, the best way is to split the training set into a training and validation set. Different models are trained for different values of hyper-paremeters on the training set and their performance is compared on how well they do on the validation set. The set of hyper parameters which gives the best performance on the validation set is then picked. # # Grid search is an easy way to try out different hyper-parameter values. For each hyper parameter a set of values is specified. Grid search then trains a model for each unique combination of hyper parameter values and reports the combination which performs best on the validation set. # ### Question 8 - Cross-Validation # # * What is the k-fold cross-validation training technique? # # * What benefit does this technique provide for grid search when optimizing a model? # # **Hint:** When explaining the k-fold cross validation technique, be sure to touch upon what 'k' is, how the dataset is split into different parts for training and testing and the number of times it is run based on the 'k' value. # # When thinking about how k-fold cross validation helps grid search, think about the main drawbacks of grid search which are hinged upon **using a particular subset of data for training or testing** and how k-fold cv could help alleviate that. You can refer to the [docs](http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation) for your answer. # **Answer: ** # # Generating trainig data is expensive. Before k-fold cross validation, typically the data was split up into training and test sets. The data was trained on the training set and evaluated on the test set. The test set was kept separate and only used at the end for final evaluation. However, this did not make best use of all available data as the test set examples never helped us during training. # # To make the best use of all available data, k-fold cross validation is adopted. The training set is divided into k different parts. For example k=10, we split data into 10 parts. Then we train k models on this training set. During each model training we pick a different part as the test set and training on the remaining k-1 sets. # # The average of the k models is used for evaluation and prediction purposes. # ### Implementation: Fitting a Model # Your final implementation requires that you bring everything together and train a model using the **decision tree algorithm**. To ensure that you are producing an optimized model, you will train the model using the grid search technique to optimize the `'max_depth'` parameter for the decision tree. The `'max_depth'` parameter can be thought of as how many questions the decision tree algorithm is allowed to ask about the data before making a prediction. Decision trees are part of a class of algorithms called *supervised learning algorithms*. # # In addition, you will find your implementation is using `ShuffleSplit()` for an alternative form of cross-validation (see the `'cv_sets'` variable). While it is not the K-Fold cross-validation technique you describe in **Question 8**, this type of cross-validation technique is just as useful!. The `ShuffleSplit()` implementation below will create 10 (`'n_splits'`) shuffled sets, and for each shuffle, 20% (`'test_size'`) of the data will be used as the *validation set*. While you're working on your implementation, think about the contrasts and similarities it has to the K-fold cross-validation technique. # # Please note that ShuffleSplit has different parameters in scikit-learn versions 0.17 and 0.18. # For the `fit_model` function in the code cell below, you will need to implement the following: # - Use [`DecisionTreeRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) from `sklearn.tree` to create a decision tree regressor object. # - Assign this object to the `'regressor'` variable. # - Create a dictionary for `'max_depth'` with the values from 1 to 10, and assign this to the `'params'` variable. # - Use [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) from `sklearn.metrics` to create a scoring function object. # - Pass the `performance_metric` function as a parameter to the object. # - Assign this scoring function to the `'scoring_fnc'` variable. # - Use [`GridSearchCV`](http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html) from `sklearn.grid_search` to create a grid search object. # - Pass the variables `'regressor'`, `'params'`, `'scoring_fnc'`, and `'cv_sets'` as parameters to the object. # - Assign the `GridSearchCV` object to the `'grid'` variable. # + # TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import make_scorer from sklearn.grid_search import GridSearchCV def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data # sklearn version 0.18: ShuffleSplit(n_splits=10, test_size=0.1, train_size=None, random_state=None) # sklearn versiin 0.17: ShuffleSplit(n, n_iter=10, test_size=0.1, train_size=None, random_state=None) cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0) #cv_sets = ShuffleSplit(n_splits = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor() # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth': list(range(1, 11))} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search cv object --> GridSearchCV() # Make sure to include the right parameters in the object: # (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively. grid = GridSearchCV(regressor, param_grid=params, scoring=scoring_fnc, cv=cv_sets) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_ # - # ### Making Predictions # Once a model has been trained on a given set of data, it can now be used to make predictions on new sets of input data. In the case of a *decision tree regressor*, the model has learned *what the best questions to ask about the input data are*, and can respond with a prediction for the **target variable**. You can use these predictions to gain information about data where the value of the target variable is unknown — such as data the model was not trained on. # ### Question 9 - Optimal Model # # * What maximum depth does the optimal model have? How does this result compare to your guess in **Question 6**? # # Run the code block below to fit the decision tree regressor to the training data and produce an optimal model. # + # Fit the training data to the model using grid search reg = fit_model(X_train, y_train) # Produce the value for 'max_depth' print("Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth'])) # - # ** Hint: ** The answer comes from the output of the code snipped above. # # **Answer: 4 # ### Question 10 - Predicting Selling Prices # Imagine that you were a real estate agent in the Boston area looking to use this model to help price homes owned by your clients that they wish to sell. You have collected the following information from three of your clients: # # | Feature | Client 1 | Client 2 | Client 3 | # | :---: | :---: | :---: | :---: | # | Total number of rooms in home | 5 rooms | 4 rooms | 8 rooms | # | Neighborhood poverty level (as %) | 17% | 32% | 3% | # | Student-teacher ratio of nearby schools | 15-to-1 | 22-to-1 | 12-to-1 | # # * What price would you recommend each client sell his/her home at? # * Do these prices seem reasonable given the values for the respective features? # # **Hint:** Use the statistics you calculated in the **Data Exploration** section to help justify your response. Of the three clients, client 3 has has the biggest house, in the best public school neighborhood with the lowest poverty level; while client 2 has the smallest house, in a neighborhood with a relatively high poverty rate and not the best public schools. # # Run the code block below to have your optimized model make predictions for each client's home. # + # Produce a matrix for client data client_data = [[5, 17, 15], # Client 1 [4, 32, 22], # Client 2 [8, 3, 12]] # Client 3 # Show predictions for i, price in enumerate(reg.predict(client_data)): print("Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price)) # - # **Answer: ** # * Predicted selling price for Client 1's home: $415,872.41 # # * Predicted selling price for Client 2's home: $234,514.29 # # * Predicted selling price for Client 3's home: $892,850.00 # ### Sensitivity # An optimal model is not necessarily a robust model. Sometimes, a model is either too complex or too simple to sufficiently generalize to new data. Sometimes, a model could use a learning algorithm that is not appropriate for the structure of the data given. Other times, the data itself could be too noisy or contain too few samples to allow a model to adequately capture the target variable — i.e., the model is underfitted. # # **Run the code cell below to run the `fit_model` function ten times with different training and testing sets to see how the prediction for a specific client changes with respect to the data it's trained on.** vs.PredictTrials(features, prices, fit_model, client_data) # ### Question 11 - Applicability # # * In a few sentences, discuss whether the constructed model should or should not be used in a real-world setting. # # **Hint:** Take a look at the range in prices as calculated in the code snippet above. Some questions to answering: # - How relevant today is data that was collected from 1978? How important is inflation? # - Are the features present in the data sufficient to describe a home? Do you think factors like quality of apppliances in the home, square feet of the plot area, presence of pool or not etc should factor in? # - Is the model robust enough to make consistent predictions? # - Would data collected in an urban city like Boston be applicable in a rural city? # - Is it fair to judge the price of an individual home based on the characteristics of the entire neighborhood? # **Answer: ** # This model is too simplistic to be used in a real word setting for the following reasons: # * Data is about 40 years old. This might not reflect the housing data today due to demographic and economic changes. # * Model is too simplistic - homes are also influenced by individual factors - not all homes in the same neighborhood are priced the same. Appliances, square food, access to public transport or freeways, types of flooring etc can pay an import role. # * The sensivity of the model as shown in the previous section is about 15%. This is a substantial variation and better models should have lower variability. # * This model is trained on urban data and would not generalize to rural or suburbs well. # * This model is too simplistic as individual features of homes have a big impact on the price of the home. # > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to # **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
32,554
/Practical-06-3. Correlation.ipynb
c74d3677ca50a69ddf421f7423b924689922cf45
[ "MIT" ]
permissive
dersteppenwolf/applied_gsa
https://github.com/dersteppenwolf/applied_gsa
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
10,095
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %matplotlib inline import matplotlib.pyplot as plt import numpy as np # ## Kernel Density Estimation # + from sklearn.datasets import make_blobs X, y = make_blobs(n_features=1, n_samples=30, random_state=1) # - plt.hist(X); plt.scatter(X.ravel(), -np.ones(len(X))) plt.yticks(()) # + from sklearn.neighbors import KernelDensity kde = KernelDensity(bandwidth=1).fit(X) # - line = np.linspace(X.min() - 2, X.max() + 2, 100) line_scores = np.exp(kde.score_samples(line[:, np.newaxis])) plt.plot(line, line_scores) plt.twinx().scatter(X.ravel(), np.ones(len(X))) kde.score(X) kde = KernelDensity(bandwidth=0.2).fit(X) line = np.linspace(X.min() - 2, X.max() + 2, 1000) line_scores = np.exp(kde.score_samples(line[:, np.newaxis])) plt.plot(line, line_scores) plt.twinx().scatter(X.ravel(), np.ones(len(X))) kde.score(X) from sklearn.grid_search import GridSearchCV param_grid = {'bandwidth': np.logspace(-1, 1, 20)} grid = GridSearchCV(KernelDensity(), param_grid, cv=10) grid.fit(X) grid.best_params_ line_scores = np.exp(grid.best_estimator_.score_samples(line[:, np.newaxis])) plt.plot(line, line_scores) plt.twinx().scatter(X.ravel(), -np.ones(len(X))) # ## PCA as probabilistic model # + from sklearn.datasets import make_low_rank_matrix from sklearn.decomposition import PCA X = make_low_rank_matrix(n_features=100, effective_rank=10, random_state=0) # - pca = PCA(n_components=20).fit(X) pca.score(X) pca = PCA(n_components=50).fit(X) pca.score(X) from sklearn.learning_curve import validation_curve param_range = range(2, 40, 2) training_scores, validation_scores = validation_curve(PCA(), X, None, param_name="n_components", param_range=param_range, cv=10) from figures import plot_validation_curve plot_validation_curve(param_range, training_scores, validation_scores) his one](https://towardsdatascience.com/why-feature-correlation-matters-a-lot-847e8ba439c4). We could also use Principal Components Analysis (PCA) to perform dimensionality reduction whilst also dealing with correlation between the variables. # - # Here's an output table which gives you nice, specific # numbers but is hard to read so I'm only showing the # first ten rows and columns... scdf.corr().iloc[1:7,1:7] # ### Finding Strong Correlations Visually # + # And here's a correlation heatmap... which is easier to read but has # less detail. What it *does* highlight is high levels of *negative* # correlation as well as positive, so you'll need absolute difference, # not just whether something is more than 0.x correlated. # # From https://seaborn.pydata.org/examples/many_pairwise_correlations.html cdf = scdf.corr() # Generate a mask for the upper triangle mask = np.zeros_like(cdf, dtype=np.bool) mask[np.triu_indices_from(mask)] = True # Set up the matplotlib figure f, ax = plt.subplots(figsize=(10, 10)) # Generate a custom diverging colormap cm = sns.diverging_palette(240, 10, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(cdf, mask=mask, cmap=cm, vmax=1.0, vmin=-1.0, center=0, square=True, linewidths=.1, cbar_kws={"shrink": .5}) # - # <div style="padding:5px;margin-top:5px;margin-bottom:5px;border:dotted 1px red;background-color:rgb(255,233,233);color:red">STOP. Make sure that you understand what the figure above is showing before proceeding to the next stage.</div> # ### Finding Strong Correlations Numerically # Generate the matrix but capture the output this time cdf = scdf.corr() cdf['name'] = cdf.index # We need a copy of the index # + corrh = 0.66 # Specify threshold for highly correlated? print("! High correlation threshold is {0}.".format(corrh)) num_corrs = [] hi_corrs = [] for c in cdf.name.unique(): if c != 'name': # Some formatting print("=" * 10 + f" {c} " + "=" * 10) # Find highly correlated variables hits = cdf.loc[(abs(cdf[c]) >= corrh), c] hits.drop(c, inplace=True) if hits.size == 0: # No correlations > corrs print("+ Not highly correlated with other variables.") else: num_corrs.append(hits.size) print("- High correlations ({0}) with other variables:".format(hits.size)) print(" " + "\n ".join(hits.index.values)) hi_corrs.append(hits.size) # - sns.distplot(hi_corrs, bins=range(0,20), kde=False).set_title( "Number of Strong Correlations (> " + str(corrh) + ") with Other Variables") # ### Stripping Out 'Redundant' Variables # # Let's remove any variable that has a '*lot*' of strong correlations correlations with other variables, though we need to define what is 'a lot'. This will reduce the dimensionality of our data and make clustering a bit easier. An alternative approach to dimensionality reduction -- which can be more 'robust' if we ensure that all of the data has unit variance (which we've done using the MinMaxScaler), though harder for many to understand -- would be to apply Principal Components Analysis (PCA) to the data set and to work with the eigenvalues afterwards. PCA is also available in `sklearn`. # # We'll set our threshold at 5.0 based on a visual inspection of the chart above. # + corrh = 0.66 # Specify threshold for highly correlated? maxcorrs = 4.0 # What's our threshold for too many strong correlations? threshold = 0.5*maxcorrs # What's our threshold for too many strong correlations with columns we keep! print("! High correlation threshold is {0}.".format(corrh)) to_drop = [] # Columns to drop to_keep = [] # Columns to keep num_corrs = [] hi_corrs = [] for c in cdf.columns: if c != 'name': # Find highly correlated variables, but let's # keep the focus on *positive* correlation now hits = cdf.loc[(cdf[c] >= corrh), c] hits.drop(c, inplace=True) multi_vals = False # Remove ones with many correlations if hits.size >= maxcorrs: print(f"- {c} exceeds maxcorr ({maxcorrs}) correlation threshold (by {hits.size-threshold}).") s1 = set(to_keep) s2 = set(hits.index.values) #print("Comparing to_keep (" + ", ".join(s1) + ") to hits (" + ", ".join(s2) + ")") s1 &= s2 #print("Column found in 'many correlations' :" + str(s1)) if len(s1) >= threshold: multi_vals = True print(f" - Dropping b/c exceed {threshold} correlations with retained cols: \n -" + "\n -".join(s1)) else: print(f" + Keeping b/c fewer than {threshold} correlations with retained columns.") else: print(f"+ {c} falls below maxcorr ({maxcorrs}) correlation threshold (by {abs(threshold-hits.size)}).") if multi_vals==True: to_drop.append(c) else: to_keep.append(c) print(" ") print("To drop ({0}): ".format(len(to_drop)) + ", ".join(to_drop)) print(" ") print("To keep ({0}): ".format(len(to_keep)) + ", ".join(to_keep)) # - to_save = scdf.drop(to_drop, axis=1, errors='raise') print("Retained variables: " + ", ".join(to_save.columns.values)) to_save.to_pickle(os.path.join('data','LSOA_2Cluster.pickle')) del(to_save) or(m, b, datapoints) < smallest_error: best_m = m best_b = b smallest_error = calculate_all_error(m, b, datapoints) print (best_m, best_b, smallest_error) # - # ## Part 3: What does our model predict? # # Now we have seen that for this set of observations on the bouncy balls, the line that fits the data best has an `m` of 0.3 and a `b` of 1.7: # # ``` # y = 0.3x + 1.7 # ``` # # This line produced a total error of 5. # # Using this `m` and this `b`, what does your line predict the bounce height of a ball with a width of 6 to be? # In other words, what is the output of `get_y()` when we call it with: # * m = 0.3 # * b = 1.7 # * x = 6 print(get_y(0.3, 1.7, 6)) # Our model predicts that the 6cm ball will bounce 3.5m. # # Now, Reggie can use this model to predict the bounce of all kinds of sizes of balls he may choose to include in the ball pit!
8,481
/Telecom/Telecom.ipynb
74d7e7272b8ba71e2421cebbabd889355f690588
[]
no_license
voleka-ocrv/Praktikum-DA
https://github.com/voleka-ocrv/Praktikum-DA
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
365,486
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Определение перспективного тарифа для телеком компании # **Содержание** # <ul> # <li><a href='#description'>Описание проекта</a></li> # <li><a href='#step1'>Шаг 1 Изучение данных</a></li> # <li><a href='#step2'>Шаг 2 Подготовка данных</a></li> # <ul> # <li><a href='#step21'>Поиск и удаление выбросов</a></li> # </ul> # <li><a href='#step3'>Шаг 3 Анализ данных</a></li> # <li><a href='#step4'>Шаг 4 Проверка гипотезы</a></li> # </ul> # ## Описание проекта # Клиентам «Мегалайн» предлагают два тарифных плана: «Смарт» и «Ультра». <br> # Необходимо определить какой тариф приносит больше денег и какой выгоднее продвигать на рынке.<br> # Анализ выполняется выборке из 500 клиентов. # # ### Описание данных # <section> # Таблица <b>users</b> (информация о пользователях): # <ul> # <li>user_id — уникальный идентификатор пользователя</li> # <li>churn_date — дата прекращения пользования тарифом (если значение пропущено, то тариф ещё действовал на момент выгрузки данных)</li> # <li>city — город проживания пользователя</li> # <li>tariff — название тарифного плана</li> # </ul> # </section> # <section> # Таблица <b>calls</b> (информация о звонках): # <ul> # <li>id — уникальный номер звонка</li> # <li>call_date — дата звонка</li> # <li>duration — длительность звонка в минутах</li> # <li>user_id — идентификатор пользователя, сделавшего звонок</li> # </ul> # </section> # <section> # Таблица <b>messages</b> (информация о сообщениях): # <ul> # <li>id — уникальный номер сообщения</li> # <li>message_date — дата сообщения</li> # <li>user_id — идентификатор пользователя, отправившего сообщение</li> # </ul> # </section> # <section> # Таблица <b>internet</b> (информация об интернет-сессиях): # <ul> # <li>id — уникальный номер сессии</li> # <li>mb_used — объём потраченного за сессию интернет-трафика (в мегабайтах)</li> # <li>session_date — дата интернет-сессии</li> # <li>user_id — идентификатор пользователя</li> # </ul> # </section> # Таблица <b>tariffs</b> (информация о тарифах)<br> # </section> # <a id='step1'></a> # ## Шаг 1 Изучение данных # Откроем файлы с данными и изучим общую информацию import pandas as pd import numpy as np import math from matplotlib import pyplot as plt from math import factorial from scipy.stats import norm from scipy import stats as st def file_info(dataset): display(dataset.head()) display(dataset.info()) display(dataset.describe()) # ### Звонки # Прочитаем файл, содержащий данные о звонках и изучим общую информацию. # # ---- # `call_date`, содержащий дату звонка прочитался с типом `object`. Потребуется привести его к типу `datetime`, чтобы можно было выполнять операции с этим полем.<br> # В некоторых строках `duration` равно 0. Необходимо будет решить как обрабатывать такие значения. calls = pd.read_csv('/datasets/calls.csv') file_info(calls) # ### Интернет # Прочитаем файл, содержащий данные об интернет сессиях и изучим общую информацию. # # ---- # В файле содержится столбец `Unnamed: 0`, который не требуется для выполнения анализа, удалим этот столбец из датасета.<br> # Дату `session_date` приведем к типу `datetime`. internet = pd.read_csv('/datasets/internet.csv') file_info(internet) # ### Сообщения # Прочитаем файл, содержащий данные о смс и изучим общую информацию. # # ---- # Приведем `message_date` к типу `datetime`. messages = pd.read_csv('/datasets/messages.csv') file_info(messages) # ### Параметры тарфиных планов tariffs = pd.read_csv('/datasets/tariffs.csv') tariffs # ### Данные о пользователях # Прочитаем файл, содержащий данные о пользователях и изучим общую информацию. # # ---- # Поле `user_id` является естественным индекстом таблицы. Назначим эту колонку индексом таблицы вместо текущего индекса.<br> # Приведем `reg_date`, `churn_date` к типу `datetime`. users = pd.read_csv('/datasets/users.csv') file_info(users) # <a id='step2'></a> # ## Шаг 2. Подготовка данных # **Примечание:** «Мегалайн» всегда округляет вверх значения минут и мегабайтов. Если пользователь проговорил всего 1 секунду, в тарифе засчитывается целая минута. Поэтому округлим вверх до целого длительность каждого звонка и интернет сессии. # ### Звонки # - Приведем дату к типу datetime. # - Округлим вверх до целого длительность каждого звонка. Если пользователь проговорил всего 1 секунду, в тарифе засчитывается целая минута. calls['call_date'] = pd.to_datetime(calls['call_date'], format='%Y-%m-%d') calls['duration'] = calls['duration'].transform(math.ceil) calls.head() # ### Интернет # - Приведем дату к типу datetime. # - Округлим вверх до целого длительность каждой интернет сессии. Если пользователь потратил 1 Kb, то в тарифе засчитывается 1 Mb. # - Удалим столбец `Unnamed: 0` internet['session_date'] = pd.to_datetime(internet['session_date'], format='%Y-%m-%d') internet['mb_used'] = internet['mb_used'].transform(math.ceil) internet = internet.drop('Unnamed: 0', axis=1) internet.head() # ### Сообщения # - Приведем дату к типу datetime. messages['message_date'] = pd.to_datetime(messages['message_date'], format='%Y-%m-%d') messages.head() # ### Данные о пользователях # - Прочитаем файл, содержащий данные о пользователях и изучим общую информацию. # - Приведем дату к типу datetime. # - Установим колонку `user_id` в качестве индекса таблицы. users = pd.read_csv('/datasets/users.csv') users['reg_date'] = pd.to_datetime(users['reg_date'], format='%Y-%m-%d') users['churn_date'] = pd.to_datetime(users['churn_date'], format='%Y-%m-%d') users = users.set_index('user_id') users.head() # Объединим данных о расходах на звонки, смс и интернет в одной таблице и сгруппируем их по месяцам.<br><br> # Нулевая длительность звонков может означать, что пользователь не дозвонился до абонента. Поскольку это не влияет на расчет общей длительности звонков за месяц, то обрабатывать эти строки не будем. При группировке и подсчете общей длительности звонков за месяц, это не повлияет на итоговое значение. calls_duration = calls.groupby(['user_id', 'call_date']).sum().rename_axis(['user_id', 'date'], axis='index') messages_count = messages.groupby(['user_id', 'message_date']).count().rename_axis(['user_id', 'date'], axis='index') internet_traffic = internet.groupby(['user_id', 'session_date']).sum().rename_axis(['user_id', 'date'], axis='index') incomes = calls_duration.merge(messages_count, left_index=True, right_index=True, how='outer').merge(internet_traffic, left_index=True, right_index=True, how='outer') incomes.head() incomes.set_axis(['calls', 'sms', 'internet'], axis='columns', inplace=True) incomes = incomes.reset_index(level='date') incomes incomes['month'] = incomes['date'].dt.month incomes incomes['tariff'] = users['tariff'] incomes.head() incomes = incomes.groupby(['tariff', 'user_id', 'month']).sum() incomes.head() incomes = incomes.reset_index() # <a id='step21'></a> # ### Поиск и удаление выбросов # Boxplot # Разделим пользователей по тарифам и проанализируем в них пользование звонками, смс, интернетом на наличие выбросов. Удалим выбросы как нетипичных представителей тарифа. incomes_smart = incomes.loc[(incomes.loc[:, 'tariff'] == 'smart')] incomes_smart.describe() incomes_ultra = incomes.loc[(incomes.loc[:, 'tariff'] == 'ultra')] incomes_ultra.describe() # **Тариф "Смарт"** <br> # На диаграмме boxplot для звонков видим, что есть выбросы выше, чем 1.5 межквартильных размаха вверх. Выделим их и посмотрим какую долю от общего количества данных они занимают. Если значение не слишком велико, то стоит избавитья от них. plt.title('Тариф "Смарт"') plt.ylabel('Минуты') plt.grid() data = incomes_smart['calls'] plt.boxplot(data, labels=['Звонки']) # Определим границу, выше которой значения будем считать выбросами. lim_up_calls = incomes_smart['calls'].describe()['75%']+st.iqr(incomes_smart['calls']) lim_up_calls # Рассчитаем количествотаких значений incomes_smart[incomes_smart['calls'] > lim_up_calls]['calls'].count() # На диаграмме boxplot для смс видим, что есть выбросы выше, чем 1.5 межквартильных размаха вверх. plt.title('Тариф "Смарт"') plt.ylabel('шт') plt.grid() data = incomes_smart['sms'] plt.boxplot(data, labels=['SMS']) limit_up_sms = incomes_smart['sms'].describe()['75%']+1.5*st.iqr(incomes_smart['sms']) limit_up_sms incomes_smart[incomes_smart['sms'] > limit_up_sms]['sms'].count() # На диаграмме boxplot для интернета видим, что есть выбросы больше, чем 1.5 межквартильных размаха вверх и вниз. Уберем выбросы. plt.title('Тариф "Смарт"') plt.ylabel('Mb') plt.grid() data = incomes_smart['internet'] plt.boxplot(data, labels=['Интернет']) # Определим верхнюю и нижнюю границы. limit_up_internet = incomes_smart['internet'].describe()['75%']+1.5*st.iqr(incomes_smart['internet']) limit_up_internet limit_down_internet = incomes_smart['internet'].describe()['25%']-1.5*st.iqr(incomes_smart['internet']) limit_down_internet # Рассчитаем количество incomes_smart[(incomes_smart['internet'] > limit_up_internet) | (incomes_smart['internet'] < limit_down_internet)]['internet'].count() # Сделаем выборку значений, исключив выбросы. incomes_smart = incomes_smart.query('calls < @lim_up_calls and sms < @limit_up_sms and (internet < @limit_up_internet and internet > @limit_down_internet)').copy() incomes_smart = incomes_smart.reset_index(drop=True) incomes_smart.describe() # Количество записей после удаления выбросов уменьшилось незначительно. # **Тариф "Ультра"** <br> # Из данных исключим выбросы за пределами 1.5 межквартильных размаха. plt.title('Тариф "Ультра"') plt.ylabel('Минуты') plt.grid() data = incomes_ultra['calls'] plt.boxplot(data, labels=['Звонки']) lim_up_calls = incomes_ultra['calls'].describe()['75%']+st.iqr(incomes_ultra['calls']) lim_up_calls incomes_ultra[incomes_ultra['calls'] > lim_up_calls]['calls'].count() # На диаграмме boxplot для смс видим, что есть выбросы выше, чем 1.5 межквартильных размаха вверх. plt.title('Тариф "Ультра"') plt.ylabel('шт') plt.grid() data = incomes_ultra['sms'] plt.boxplot(data, labels=['SMS']) limit_up_sms = incomes_ultra['sms'].describe()['75%']+1.5*st.iqr(incomes_ultra['sms']) limit_up_sms incomes_ultra[incomes_ultra['sms'] > limit_up_sms]['sms'].count() plt.title('Тариф "Ультра"') plt.ylabel('Mb') plt.grid() data = incomes_ultra['internet'] plt.boxplot(data, labels=['Интернет']) limit_up_internet = incomes_ultra['internet'].describe()['75%']+1.5*st.iqr(incomes_ultra['internet']) limit_up_internet incomes_ultra[(incomes_ultra['internet'] > limit_up_internet)]['internet'].count() # Сделаем выборку значений, исключив выбросы. incomes_ultra = incomes_ultra.query('calls < @lim_up_calls and sms < @limit_up_sms and internet < @limit_up_internet').copy() incomes_ultra = incomes_ultra.reset_index(drop=True) incomes_ultra.describe() # Добавим данные о тарифе пользователя. incomes_smart = incomes_smart.merge(tariffs.rename(columns={'tariff_name': 'tariff'}), on='tariff', how='left') incomes_smart.head() incomes_ultra = incomes_ultra.merge(tariffs.rename(columns={'tariff_name': 'tariff'}), on='tariff', how='left') incomes_ultra.head() # Приведем значения минут, смс и трафика к целому типу. incomes_smart = incomes_smart.astype({'calls':'int32', 'sms':'int32', 'internet':'int32'}) incomes_smart.head() incomes_ultra = incomes_ultra.astype({'calls':'int32', 'sms':'int32', 'internet':'int32'}) incomes_ultra.head() # Посчитаем для каждого пользователя: # - количество сделанных звонков и израсходованных минут разговора по месяцам `calls`; # - количество отправленных сообщений по месяцам `sms`; # - объем израсходованного интернет-трафика по месяцам (Мб) `internet`; # - помесячную выручку с каждого пользователя (вычтите бесплатный лимит из суммарного количества звонков, сообщений и интернет-трафика; остаток умножьте на значение из тарифного плана; прибавьте абонентскую плату, соответствующую тарифному плану) `revenue`. # Рассчитаем помесячную выручку: # - Для звонков рассчитаем разницу между количеством минут, включенных в тариф и количеством использованных минут. Если получается отрицательное число, значит был перерасход. Перерасход умножим на стоимость минуты по тарифу получим стоимость звонков сверх пакета. # - Аналогично рассчитаем стоимость смс сверх пакета. # - Расход интернета и интернет, включенный в пакет, указаны в Mb. Стоимость интернета сверх лимита указана за 1 Gb. Для рассчета стоимости интернета сверх пакета рассчитаем разность между трафиком в пакете и трафиком использованным (Mb). Полученное значение разделим на 1024, округлим до 2 знаков и умножим на стоимость интернета сверх пакета. # - Просуммируем расходы на звонки, смс и интернет сверх пакета и прибавим абонентскую плату по тарифу. # - Реализуем расчет с помощью функции. def revenue(row): fee = row['rub_monthly_fee'] minutes_tariff = row['minutes_included'] minutes_income = row['calls'] minutes_price = row['rub_per_minute'] sms_tariff = row['messages_included'] sms_income = row['sms'] sms_price = row['rub_per_message'] internet_tariff = row['mb_per_month_included'] internet_income = row['internet'] internet_price = row['rub_per_gb'] minutes_over = -(minutes_tariff - minutes_income) if (minutes_tariff - minutes_income) < 0 else 0 sms_over = -(sms_tariff - sms_income) if (sms_tariff - sms_income) < 0 else 0 internet_over = -(internet_tariff - internet_income) if (internet_tariff - internet_income) < 0 else 0 internet_over = round(internet_over/1024, ndigits=2) return fee + minutes_over*minutes_price + sms_over*sms_price + internet_over*internet_price incomes_ultra['revenue'] = incomes_ultra.apply(revenue, axis=1) incomes_ultra = incomes_ultra.astype({'revenue':'int32'}) incomes_ultra.head() incomes_ultra.info() incomes_smart['revenue'] = incomes_smart.apply(revenue, axis=1) incomes_smart = incomes_smart.astype({'revenue':'int32'}) incomes_smart.head() incomes_smart.info() # <a id='step3'></a> # ## Шаг 3. Анализ данных # Опишите поведение клиентов оператора, исходя из выборки.<br> # Сколько минут разговора, сколько сообщений и какой объём интернет-трафика требуется пользователям каждого тарифа в месяц? <br> # Посчитайте среднее количество, дисперсию и стандартное отклонение. Постройте гистограммы. Опишите распределения. incomes_smart.describe() incomes_ultra.describe() # ## Распределение # ### Звонки # Звонки имеют нормальное распределение. Среднее значение для тарифа "Смарт" составляет 410 мин/мес, для тарифа "Ультра" 510 мин/мес. <br> # На тарифе "Смарт" включено 500 минут, на тарифе "Ультра" 3000 минут.<br> # Чаще всего пользователи тарифа "Смарт" полностью расходуют пакет минут, при этом около четверти продолжают совершать звонки, оплачивая их поминутно. Максимальный перерасход составил 300 минут (900 руб.)<br> # Половина пользователей тарифа "Ультра" расходуют пакет минут примерно на 20%, 3/4 расходуют примерно 25% пакета, полностью минуты пакета не расходуются.<br> # Пользователи на тарифе "Ультра" имеют большую вариативность поведения. Присутствуют как те, кто тратит совсем мало минут, так и те, кто расходует почти половину пакета. ax = incomes_smart.plot(kind='kde', y='calls', figsize=(10, 5), linewidth=5, alpha=0.7, label='smart') incomes_ultra.plot(kind='kde', y='calls', figsize=(10, 5), linewidth=5, alpha=0.7, label='ultra', ax=ax, grid=True, legend=True, title='Calls') # ### SMS # Смс имеют скошенность вправо, т.е. дополнительные значения находятся справа. Это говорит о большом разбросе значений, превышающих медиану. # 75% пользователей тарифа "Смарт" полностью расходуют пакет смс и только 25% продолжают использовать платные смс. # Пользователи тарифа "Ультра" не расходуют лимит смс в пакете полностью, большая часть тратит меньше 8%. # Пользователи тарифа "Ультра" имеют большую вариативность. ax = incomes_smart.plot(kind='kde', y='sms', figsize=(10, 5), linewidth=5, alpha=0.7, label='smart') incomes_ultra.plot(kind='kde', y='sms', figsize=(10, 5), linewidth=5, alpha=0.7, label='ultra', ax=ax, grid=True, legend=True, title='SMS') # <div class="alert alert-block alert-warning"> # <b>КОММЕНТАРИЙ ОТ НАСТАВНИКА:</b> # # Тут также рекомендую расширить вывод и описать абоненты какого тарифа чаще выходять за пределы лимитов, или очень много не добирают до него и не расходуют свой пакет услуг. Также какой из тарифов обладает больше вариативностью по признаку смс. # # </div> # ### Internet # Датасет по тарифу "Ультра" скошен влево, значит дополнительные значения расположены слева от медианы. <br> # Половина пользователей тарифа "Смарт" превышают пакет интернета, 25% пользователей превышают пакет примерно на 3,5 Gb (700 руб.).<br> # Пользователи тарифа "Ультра" в основном не используют полностью пакет интернета. Половина пользователей использует около 60% пакета.<br> # Пользователи тарифа "Ультра" обладают большей вариативностью. ax = incomes_smart.plot(kind='kde', y='internet', figsize=(10, 5), linewidth=5, alpha=0.7, label='smart') incomes_ultra.plot(kind='kde', y='internet', figsize=(10, 5), linewidth=5, alpha=0.7, label='ultra', ax=ax, grid=True, legend=True, title='Internet') # ### Выручка # Выручка от пользователей тарифа "Ультра" стабильнее и выше, чем от пользователей тарифа "Смарт". ax = incomes_smart.plot(kind='kde', y='revenue', figsize=(10, 5), linewidth=5, alpha=0.7, label='smart') incomes_ultra.plot(kind='kde', y='revenue', figsize=(10, 5), linewidth=5, alpha=0.7, label='ultra', ax=ax, grid=True, legend=True, title='revenue') incomes = incomes_smart.append(incomes_ultra).reset_index() incomes.tail() incomes_stat = round(incomes.groupby('tariff')[['calls', 'sms', 'internet', 'revenue']].agg([np.mean, np.median, lambda x: np.var(x, ddof=1), lambda x: np.std(x, ddof=1)]).reset_index()) incomes_stat.columns = (['tariff', 'calls_mean', 'calls_median', 'calls_var', 'calls_std', 'sms_mean', 'sms_median', 'sms_var', 'sms_std', 'internet_mean', 'internet_median', 'internet_var', 'internet_std', 'revenue_mean', 'revenue_median', 'revenue_var', 'revenue_std']) # Из таблицы видно какое среднее количество звонков/смс/интернета используют представители каждого тарифа. # Пользователи тарифа "Ультра" по всем услугам расходуют больше, при этом все-таки находятся в рамках пакета и незначительно превышая расход пользователей тарифа "Смарт" (в среднем на 20-30%). incomes_stat # Пользователи тарифа "Смарт" чаще всего полностью используют пакет включенных услуг, при этом незначительно выходят за пределы лимита, оплачивая услуги по тарифу.<br> # Пользователи тарифа "Ультра" довольно редко полностью используют пакет включенных услуг, <br> # У пользователей тарифа "Ультра" больше размах, вариативность потребления услуг. При том, что абонентская плата почти в 3,5 раза превышает плату на тарифе "Смарт", средние значения потребления услуг отличаются незначительно.<br> # # ---- # **Вывод:** Для оператора выгоднее продвигать тариф "Ультра". Клиенты тарифа приносят стабильно больше выручки, при этом ресурсы сети на обеспечение услуг расходуются примерно такие же как на тарифе "Смарт". incomes_pt_sum = incomes.pivot_table(index = 'tariff', values = 'revenue', aggfunc = 'sum').sort_values('revenue', ascending = False) incomes_pt_sum round(incomes_pt_sum.loc['smart', 'revenue']/incomes_pt_sum.loc['ultra', 'revenue'], ndigits=2) # <a id='summary'></a> # Для Смарта 2529853/2136 # Для Ультры 1958137/954 incomes_pt_count = incomes.pivot_table(index = 'tariff', values = 'revenue', aggfunc = 'count').sort_values('revenue', ascending = False) incomes_pt_count round(incomes_pt_count.loc['smart', 'revenue']/incomes_pt_count.loc['ultra', 'revenue'], ndigits=2) incomes_pt_median = incomes.pivot_table(index = 'tariff', values = 'revenue', aggfunc = 'median').sort_values('revenue', ascending = False) incomes_pt_median round(incomes_pt_median.loc['smart', 'revenue']/incomes_pt_median.loc['ultra', 'revenue'], ndigits=2) # <a id='step4'></a> # ## Шаг 4. Проверка гипотезы # средняя выручка пользователей тарифов «Ультра» и «Смарт» различается;<br> # средняя выручка пользователей из Москвы отличается от выручки пользователей из других регионов;<br> # Пороговое значение alpha задайте самостоятельно.<br> # Поясните: # - как вы формулировали нулевую и альтернативную гипотезы; # - какой критерий использовали для проверки гипотез и почему. # Типичная гипотеза "средние двух генеральных совокупностей равны между собой" # # ### Гипотеза 1 # Проверка гипотезы **средняя выручка пользователей тарифов «Ультра» и «Смарт» различается**<br> # # ---- # **Нулевая гипотеза (H0):**<br> # Средняя выручка от пользователей тарифов "Ультра" и "Смарт" одинаковая<br> # # **Альтернативная гипотеза (H1):**<br> # Средняя выручка от пользователей тарифов "Ультра" и "Смарт" НЕ одинаковая. sample_1 = incomes_smart['revenue'].array sample_2 = incomes_ultra['revenue'].array from scipy.stats import mannwhitneyu mannwhitneyu(sample_1, sample_2) # + from scipy.stats import shapiro gauss_data = incomes_ultra['revenue'] stat, p = shapiro(gauss_data) # interpret alpha = 0.05 if p > alpha: msg = 'Sample looks Gaussian (fail to reject H0)' else: msg = 'Sample does not look Gaussian (reject H0)' result_mat = [ ['Length of the sample data', 'Test Statistic', 'p-value', 'Comments'], [len(gauss_data), stat, p, msg] ] # - result_mat # + sample_1 = incomes_smart['revenue'].array sample_2 = incomes_ultra['revenue'].array alpha = .05 # критический уровень статистической значимости # если p-value окажется меньше него - отвергнем гипотезу results = st.ttest_ind( sample_1, sample_2) print('p-значение:', results.pvalue) if (results.pvalue < alpha): print("Отвергаем нулевую гипотезу") else: print("Не получилось отвергнуть нулевую гипотезу") # - # ### Вывод # Таким образом гипотеза о равенстве выручки на тарифах "Смарт" и "Ультра" не подтвердилась - среднаяя выручка на тарифах разная. # ### Гипотеза 2 # Проверка гипотезы **средняя выручка пользователей из Москвы отличается от выручки пользователей из других регионов** # - Сформируем датасет из пользователей из Москвы и пользователей из других регионов. incomes_city = incomes.merge(users.loc[:, 'city'], left_on='user_id', right_index=True) incomes_moscow = incomes_city.loc[incomes_city.loc[:, 'city'] == 'Москва'] incomes_moscow.head() incomes_regions = incomes_city.loc[incomes_city.loc[:, 'city'] != 'Москва'] incomes_regions.head() # - Выполним проверку гипотезы. # ---- # **Нулевая гипотеза (H0):**<br> # Средняя выручка от пользователей Москвы и регионов одинаковая.<br> # # **Альтернативная гипотеза (H1):**<br> # Средная выручка от пользователей Москвы и регионов НЕ одинаковая. # + sample_1 = incomes_moscow['revenue'].array sample_2 = incomes_regions['revenue'].array alpha = .05 # критический уровень статистической значимости # если p-value окажется меньше него - отвергнем гипотезу results = st.ttest_ind( sample_1, sample_2) print('p-значение:', results.pvalue) if (results.pvalue < alpha): print("Отвергаем нулевую гипотезу") else: print("Не получилось отвергнуть нулевую гипотезу") # - # #### Вывод # Гипотеза о том, что средняя выручка от пользователей Москвы и регионов одинаковая не опровергнута. <br> # Однако это не не говорит о равенстве выручки в Москве и регионах. Т.к. такая проверка позволяет только сказать есть ли данные, противоречащие гипотезе, но не позволяет сказать, что есть данные подтверждающие гипотезу. # ### **Чек-лист по заданиям** # # - [x] Открыты все файлы # - [x] Файлы изучены (выведены первые строки, метод info()) # - [x] Данные приведены к нужным типам # - [x] Выполнена предобработка данных # # *Посчитано:* # # - [x] Количество сделанных звонков и израсходованных минут разговора по месяцам # - [x] Количество отправленных сообщений по месяцам # - [x] Объем израсходованного интернет-трафика по месяцам # - [x] Помесячная выручка с каждого пользователя (вычтите бесплатный лимит из суммарного количества звонков, сообщений и интернет-трафика; остаток умножьте на значение из тарифного плана) # - [x] Найдены и удалены выбросы # # <b>Укажи название осей для графиков</b> # - [x] Посчитано среднее количество, дисперсия и стандартное отклонение # - [x] Есть ответ на вопрос "Сколько минут разговора, сколько сообщений и какой объём интернет-трафика требуется пользователям каждого тарифа в месяц?" # - [x] Построены и описаны гистограммы # - [x] Проверена гипотеза "Средняя выручка пользователей тарифов «Ультра» и «Смарт» различаются" # - [x] Проверена гипотеза "Средняя выручка пользователи из Москвы отличается от выручки пользователей из других регионов" # - [x] Есть пояснения к проверке гипотез "Как вы формулировали нулевую и альтернативную гипотезы" # - [x] Есть пояснения к проверке гипотез "Какой критерий использовали для проверки гипотез и почему" # - [x] В каждом этапе есть выводы # - [x] Есть общий вывод <b>Не могу согласиться с общим выводом.</b>
25,950
/amazon_reviews/import product reviews to mongodb.ipynb
f48c7c98f509d8429b242db6c574aaa7224cbae6
[]
no_license
rainerenglisch/y2buy_alpha1
https://github.com/rainerenglisch/y2buy_alpha1
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
3,463
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autotime # **RETURN MOST FREQUENT CHAR FROM STRING. DONT INCLUDE SPACES** # + s='Tu hi hai mujhe mey ya nahi hai tuu' def yo(s): o=[] for i in s: if i !=' ': o.append(i) #return o freq=[] for i in o: freq.append(o.count(i)) if o.count(i)== max(freq): ans=i return ans #for i in o: #if o.count(i) == max(freq): #return i yo(s) # - # ** MERGE TWO STRINGS. ALTERNATING CHARACTERS BETWEEN A & B ** a='hello' b='bye' def alt(s, t): if not (s and t): return s + t return s[0] + t[0] + alt(s[1:], t[1:]) alt(a,b) ## SOLUTION FROM THE WEB ## # **TWO SUM. HOW MANY NUMBER OF PAIRS WHICH HAVE A SUM IN THE ARRAY** # *Numbers will be unique, any length of array can be given* # + arr=[1,2,3,5,10,14,18,20,25] def full(arr): m=[] def rec(arr): length=len(arr)*(len(arr)-1) if len(m)==length: return m else: i=0 for j in range(i+1,len(arr)): m.append(arr[i]+arr[j]) return rec(arr[1:]+arr[0:1]) sums=rec(arr) counter=0 for i in range(len(sums)): for j in range(len(arr)): if sums[i]==arr[j]: counter+=1 if counter < 1: return "Sorry, no pairs mate" else: return int(counter/2) full(arr) # - # ** HOW MANY CONSECUTIVE FOR EACH NUMBER? # INPUT STRING= 111233 # OUTPUT TUPLES= (3,1),(2,3),--> (number of consecutive times, number) # IF NUMBER HAS NO CONSECUTIVE AFTER IT. NO NEED TO OUTPUT (1,2)** s='111225566778888115555555551' count=1 l=[] for i in range(1,len(s)): if s[i-1]==s[i]: count+=1 if i==len(s)-1: l.append((count,int(s[i]))) else: l.append((count,int(s[i-1]))) count=1 #l.pop(-2) for j in tuple(l): print(j) # **Given an array of numbers, return array of products of all other numbers (no division)** # # **Input : [1, 2, 3, 4, 5] # Output: [(2x3x4x5), (1x3x4x5), (1x2x4x5), (1x2x3x5), (1x2x3x4)] # = [120, 60, 40, 30, 24] ** # + u= [1,2,3,4,5] m=[] def rec(arr): multi=1 length=len(arr) if len(m)==length: return m else: i=0 for i in range(i+1,len(arr)): multi*=arr[i] m.append(multi) return rec(arr[1:]+arr[0:1]) rec(u) # - # **2D ARRAY TO DICTIONARY TO 2D ARRAY ** - Practice # + arr=[[12,14],[34,5],[56],[78],[910],[77]] d={} l=[] for i in range(len(arr)): l.append([]) d["arr{0}".format(i)]=arr[i] for j in list(d.values())[i]: if j%2==0: l[i].append(j) l=[x for x in l if len(x)!=0] l # - # ** REMOVE DUPLICATES FROM A LIST/ARRAY. ONLY KEEP FIRST APPEARANCE OF VALUE ** # + arr=[2,5,5,5,4,3,2,1,5,5,5] def ayo(d): for i in range(len(d)): count=1 if d.count(d[i])!=1: for j in range(len(d)): if (d[i]==d[j]) and (count>1): d[j]='*' elif d[i]==d[j]: count+=1 return [x for x in d if x!='*'] ayo(arr) # - # ** TRY REMOVING DUPLICATES RECURSIVELY ** def gen(a): b=[] def recursive(a): if len(a)==0: return b elif a[0] in b: return recursive(a[1:]) elif a[0] not in b: b.append(a[0]) return recursive(a[1:]) return recursive(a) gen([2,5,5,5,4,3,2,1,5,5,7]) # + d={} for i in c: s= ''.join(sorted(i)) d[s]= d.get(s,[])+i [x for x in d.values()] # - # ** IF INPUT = 15,5. OUTPUT SHOULD BE 15,10,5,0,5,10,15 <br> # IF INPUT = 16,4 OUTPUT SHOULD BE 16,12,8,4,0,4,8,12,16 <br> # IF INPUT = 32,8 OUPUT SHOULD BE 32,24,16,8,0,8,16,24,32** # + def recurs1(a,b): m=[] m.append(a) if a%b==0: end = ((a/b)*2)+1 mid = (end+1)/2 def recurs(m): if len(m)==end: return m elif len(m)<mid: n= m[-1]-b m.append(n) return recurs(m) elif len(m)>mid-1: n=m[-1]+b m.append(n) return recurs(m) for i in recurs(m): print(i) else: print("Number is Not fully divisible") recurs1(1044,18) # - # ** GIVEN A LIST, RETURN MAXIMUM INTEGER. ONLY RECURSIVE FUNCTION ALLOWED ** # + l=[101,14,1,105,2,1118,3,104,5,6,7,100,101,110,-1]*2000 def recurs(l): i=1 maximum=l[0] if len(l) == 1: return maximum else: if l[i]<l[i-1]: maximum=l[i-1] return recurs(l[0:1]+l[2:]) else: return recurs(l[1:]) print(recurs(l)) # %timeit ## THIS IS AN EXAMPLE OF BINARY RECURSION OR AKA TREE RECURSION ## TWO RECURSIVE CALLS MADE ## # - max(l) # in built max function is still signficantly faster. wonder why # ** INPUT = LIST OF INPUTS, OUTPUT= EVERY COMBINATION OF THOSE INPUTS. MAX INPUTS=3 ( RECURSIVE FUNCTION ONLY)** import sys sys.setrecursionlimit(2000000000) # + arr=[1,2,3] def full(arr): m=[] n=[] for i in range(1,len(arr)+1): #----> CHECK TWO BOXES DOWN FOR RECURSIVE FUNCTION OF FACTORIALS n.append(i) result=1 for i in n: result= result * i a= result + 1 b= int(result/2) def rec(arr): if len(m)==a: return m if len(m)==b: m.append([]) return rec(arr[::-1]) else: m.append([arr]) return rec(arr[1:]+arr[0:1]) return [x for x in rec(arr) if x!=[]] full(arr) # - # **BONUS ( ill come back later) - what if more than 3 inputs are given? ** # ** SUM OF NATURAL NUMBERS. INPUT=5 OUTPUT=1+2+3+4+5=15 ** def natural(n): if n==0: return n else: return n + natural((n-1)) natural(3) # ** FACTORIAL. INPUT=3 OUTPUT= 3x2x1 = 6 ** def natural(n): if n==1: return n else: return n * natural((n-1)) natural(3) # ** SUM OF DIGITS. INPUT=12345, OUTPUT=1+2+3+4+5= 15. ONLY RECURSIVE ALLOWED ** def summer(n): if n==0: return 0 else: return n%10 + summer(int(n/10)) summer(123456789) # ** SUM OF DIGITS. USING FOR LOOP. ** def summer1(n): sum=0 for i in str(n): sum+=int(i) return sum summer1(123456789) import sys sys.setrecursionlimit(2000000) # ** INPUT= ANY NUMBER. OUTPUT IF PRIME= TRUE, OUTPUT IF NOT PRIME= FALSE, RECURSION ONLY ** # + def gen(n): if n<1: return False b=n-1 try: def recursive(n,b): if b==1: return True elif n%b==0: return False elif n%b!=0: return recursive(n,b-1) return gen(recursive(n,b)) except ZeroDivisionError as error: return True gen(18797) ## STACK LIMIT REACHED WHEN TRYING INPUT ABOVE 5 DIGITS, EVEN AFTER SPECIFYING HIGH RECURSION LIMIT ABOVE ## ## BONUS: TRY TO FIX for input=1. It says its True, when it should be False. # - # ** TRY ABOVE PROBLEM USING ITERATION ** # + def prime(n): b=n-1 if n<=1: return False else: for i in range(n,0,-1): if b==1: return True elif n%b==0: return False elif n%b!=0: b=b-1 prime(18797) # - # ** WRITE AN IMPLIMENTATION FOR THE IN-BUILT COUNT FUNCTION IN PYTHON ** # + y=[1,2,9,9,9,1,2,3,4,5,7,8,9,9,10,1,2,3,4,5,6,7,9,8,10,11,12,13,14,15,16,17]*500 c=2 def count(y,c,coun): if len(y)==0: return coun else: if c==y[0]: coun+=1 return count(y[1:],c,coun) else: return count(y[1:],c,coun) count(y,c,0) # - def count(y,c): count=0 for i in y: if i==c: count+=1 return count count(y,c) # + ## USING FOR LOOP instead of recursion is insanely faster ## --> at scale(500 * list) # - # ** FIND EVEN WORD WITH MOST CHARs. IF NO EVEN WORDS, RETURN FALSE ** # + def even(s): my=s.split(' ') mine=[] for i in range(len(my)): if len(my[i])%2==0: mine.append(my[i]) if len(mine)==0: return False mine2=[] for i in range(len(mine)): mine2.append(len(mine[i])) h=max(mine2) for i in range(len(mine)): if len(mine[i])==h: return mine[i] even('tuuaa tua hia tohayya') # - # ** REVERSE INTEGER VALUE ** ### CATCH ANYTHING ABOVE 32 BIT INT #### def reverse(x): if x<0: x=x*-1 c=[i for i in str(x)] if int(''.join(c[::-1]))> 2**31: return 0 else: return -1*int(''.join(c[::-1])) else: c=[i for i in str(x)] if int(''.join(c[::-1]))> 2**31: return 0 else: return int(''.join(c[::-1])) reverse(153) x='351' int(''.join([i for i in str(x)][::-1])) # ** MEDIAN OF 2 ARRAYS ** # + a=[1,2,3,4,5,6,7,8,9] b=[10,11,12,13,14,15] def median(a,b): c=sorted(a+b) if (len(c))%2==0: return (int(c[int((len(c)+1)/2)])+int(c[int(((len(c)+1)/2)-1)]))/2 else: return c[int((len(c))/2)] median(a,b) # - # ** GIVEN AN UNSORTED ARRAY, FIND THE SMALLEST MISSING POSITIVE INTEGER ** # + def firstpos(d): if len(d)==0: return 1 elif len(d)==1 and d[0]<0: return 1 elif sum(n<0 for n in d)==len(d): return 1 else: for i in range(1,max(d)+2): if i not in d: return i firstpos([-5,-4,2,3,4,1,6,8]) # - # ** RETURN TWO SUM--> FALSE/TRUE ** # + c=[3,4,5,4] targ=8 def twosum(c,targ): for i in range(len(c)): for j in [x for x in range(len(c)) if x != i]: if c[i]+c[j]==targ: return [i,j] return False twosum(c,targ) ### WORKS, but too time intensive for larger list. Try using dictionaries # - # **SOLVED** c=[3,4,4,3,4] targ=8 def twosum(nums, target): dic = {} for i,n in enumerate(nums): if target - n in dic: return [dic[target-n], i] dic[n] = i return False twosum(c,targ) # **FIRST NON REPEATING ELEMNT IN A LIST ** # + c=[-1,-1] def repeat(c): d=[c.count(x) for x in c] for i in range(len(d)): if d[i]==1: return c[i] return False repeat(c) # - # ** FIND MISSING NUMBER IN SORTED ARRAY ** # *Recursive* # + c=[1, 2, 3, 4, 5, 6, 7, 9] def missing(c): if len(c)==1: return 'no missing number' else: if c[0]==c[1]-1: return missing(c[1:]) else: return c[1]-1 missing(c) # - # *LOOPING* # + c=[1, 2, 3, 4, 5, 6, 7, 9] def missing(c): for i in range(1,len(c)): if c[i-1]!=c[i]-1: return c[i]-1 return 'no missing number' missing(c) # - # ** Reverse a string ** c='mey' ''.join([c[x] for x in range(len(c)-1,-1,-1)]) # ** OR ** c[::-1] # ** Given an array of strings, group anagrams together. ** <br> # *INPUT=['eat','tea','tan','ate','nat','bat']* <br> # *OUTPUT=[['ate','eat','tea'],['nat','tan'],['bat']]* # + c=['eat','tea','tan','ate','nat','bat','tab','tae','tna'] def anagram(c): d=[] for i in range(0,len(c)): d.append([c[i]]) if c[i]=='*': continue for j in range(len(c)): if j!=i: if sorted(c[i])==sorted(c[j]): d[i].append(c[j]) c[j]='*' return [l for l in d if l[0]!='*'] anagram(c) # - # ** TRY SOLVING ABOVE PROBLEM USING DICTIONARY FOR BETTER SPEED ** # *what an elegant solution*-> # *set key to sorted value of iteration. lookup that same key to add new values or if no value* # *there then create empty list to add [i] to* # + c=['eat','tea','tan','ate','nat','bat','tab','tae','tna'] d={} for i in c: s=''.join(sorted(i)) d[s]= d.get(s,[])+[i] [x for x in d.values()] # - for i in c: print(''.join(sorted(i))) #a=[1,2,3,4,5,6,7,8] a='12345678' for i in range(len(a),0,-1): print(i) a[::-1] on of unhappy customers for telecommunication service providers. The goal is to identify customers who may cancel their service soon so that you can entice them to stay. This is known as customer churn prediction. # # The dataset we use is publicly available and was mentioned in the book [Discovering Knowledge in Data](https://www.amazon.com/dp/0470908742/) by Daniel T. Larose. It is attributed by the author to the University of California Irvine Repository of Machine Learning Datasets. # + project_name = 'ml_deploy' data_source = S3Uploader.upload(local_path='./data/customer-churn.csv', desired_s3_uri='s3://{}/{}'.format(bucket, project_name), sagemaker_session=session) train_prefix = 'train' val_prefix = 'validation' train_data = 's3://{}/{}/{}/'.format(bucket, project_name, train_prefix) validation_data = 's3://{}/{}/{}/'.format(bucket, project_name, val_prefix) # - # ## Create Resources # In the following steps we'll create the Glue job and Lambda function that are called from the Step Functions workflow. # ### Create the AWS Glue Job # + glue_script_location = S3Uploader.upload(local_path='./code/glue_etl.py', desired_s3_uri='s3://{}/{}'.format(bucket, project_name), sagemaker_session=session) glue_client = boto3.client('glue') response = glue_client.create_job( Name=job_name, Description='PySpark job to extract the data and split in to training and validation data sets', Role=glue_role, # you can pass your existing AWS Glue role here if you have used Glue before ExecutionProperty={ 'MaxConcurrentRuns': 2 }, Command={ 'Name': 'glueetl', 'ScriptLocation': glue_script_location, 'PythonVersion': '3' }, DefaultArguments={ '--job-language': 'python' }, GlueVersion='1.0', WorkerType='Standard', NumberOfWorkers=2, Timeout=60 ) # - # ### Create the AWS Lambda Function # + import zipfile zip_name = 'query_training_status.zip' lambda_source_code = './code/query_training_status.py' zf = zipfile.ZipFile(zip_name, mode='w') zf.write(lambda_source_code, arcname=lambda_source_code.split('/')[-1]) zf.close() S3Uploader.upload(local_path=zip_name, desired_s3_uri='s3://{}/{}'.format(bucket, project_name), sagemaker_session=session) # + lambda_client = boto3.client('lambda') response = lambda_client.create_function( FunctionName=function_name, Runtime='python3.7', Role=lambda_role, Handler='query_training_status.lambda_handler', Code={ 'S3Bucket': bucket, 'S3Key': '{}/{}'.format(project_name, zip_name) }, Description='Queries a SageMaker training job and return the results.', Timeout=15, MemorySize=128 ) # - # ### Configure the AWS SageMaker Estimator # + container = sagemaker.image_uris.retrieve('xgboost', region, '1.2-1') xgb = sagemaker.estimator.Estimator(container, sagemaker_execution_role, train_instance_count=1, train_instance_type='ml.m4.xlarge', output_path='s3://{}/{}/output'.format(bucket, project_name)) xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, objective='binary:logistic', eval_metric='error', num_round=100) # - # # ## Build a Machine Learning Workflow # You can use a state machine workflow to create a model retraining pipeline. The AWS Data Science Workflows SDK provides several AWS SageMaker workflow steps that you can use to construct an ML pipeline. In this tutorial you will create the following steps: # # * [**ETLStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/compute.html#stepfunctions.steps.compute.GlueStartJobRunStep) - Starts an AWS Glue job to extract the latest data from our source database and prepare our data. # * [**TrainingStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep) - Creates the training step and passes the defined estimator. # * [**ModelStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.ModelStep) - Creates a model in SageMaker using the artifacts created during the TrainingStep. # * [**LambdaStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/compute.html#stepfunctions.steps.compute.LambdaStep) - Creates the task state step within our workflow that calls a Lambda function. # * [**ChoiceStateStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/states.html#stepfunctions.steps.states.Choice) - Creates the choice state step within our workflow. # * [**EndpointConfigStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointConfigStep) - Creates the endpoint config step to define the new configuration for our endpoint. # * [**EndpointStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointStep) - Creates the endpoint step to update our model endpoint. # * [**FailStateStep**](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/states.html#stepfunctions.steps.states.Fail) - Creates fail state step within our workflow. # SageMaker expects unique names for each job, model and endpoint. # If these names are not unique the execution will fail. execution_input = ExecutionInput(schema={ 'TrainingJobName': str, 'GlueJobName': str, 'ModelName': str, 'EndpointName': str, 'LambdaFunctionName': str }) # ### Create an ETL step with AWS Glue # In the following cell, we create a Glue step thats runs an AWS Glue job. The Glue job extracts the latest data from our source database, removes unnecessary columns, splits the data in to training and validation sets, and saves the data to CSV format in S3. Glue is performing this extraction, transformation, and load (ETL) in a serverless fashion, so there are no compute resources to configure and manage. See the [GlueStartJobRunStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/compute.html#stepfunctions.steps.compute.GlueStartJobRunStep) Compute step in the AWS Step Functions Data Science SDK documentation. etl_step = steps.GlueStartJobRunStep( 'Extract, Transform, Load', parameters={"JobName": execution_input['GlueJobName'], "Arguments":{ '--S3_SOURCE': data_source, '--S3_DEST': 's3a://{}/{}/'.format(bucket, project_name), '--TRAIN_KEY': train_prefix + '/', '--VAL_KEY': val_prefix +'/'} } ) # ### Create a SageMaker Training Step # # In the following cell, we create the training step and pass the estimator we defined above. See [TrainingStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep) in the AWS Step Functions Data Science SDK documentation to learn more. training_step = steps.TrainingStep( 'Model Training', estimator=xgb, data={ 'train': TrainingInput(train_data, content_type='text/csv'), 'validation': TrainingInput(validation_data, content_type='text/csv') }, job_name=execution_input['TrainingJobName'], wait_for_completion=True ) # ### Create a Model Step # # In the following cell, we define a model step that will create a model in Amazon SageMaker using the artifacts created during the TrainingStep. See [ModelStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.ModelStep) in the AWS Step Functions Data Science SDK documentation to learn more. # # The model creation step typically follows the training step. The Step Functions SDK provides the [get_expected_model](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.TrainingStep.get_expected_model) method in the TrainingStep class to provide a reference for the trained model artifacts. Please note that this method is only useful when the ModelStep directly follows the TrainingStep. model_step = steps.ModelStep( 'Save Model', model=training_step.get_expected_model(), model_name=execution_input['ModelName'], result_path='$.ModelStepResults' ) # ### Create a Lambda Step # In the following cell, we define a lambda step that will invoke the previously created lambda function as part of our Step Function workflow. See [LambdaStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/compute.html#stepfunctions.steps.compute.LambdaStep) in the AWS Step Functions Data Science SDK documentation to learn more. lambda_step = steps.compute.LambdaStep( 'Query Training Results', parameters={ "FunctionName": execution_input['LambdaFunctionName'], 'Payload':{ "TrainingJobName.$": '$.TrainingJobName' } } ) # ### Create a Choice State Step # In the following cell, we create a choice step in order to build a dynamic workflow. This choice step branches based off of the results of our SageMaker training step: did the training job fail or should the model be saved and the endpoint be updated? We will add specfic rules to this choice step later on in section 8 of this notebook. check_accuracy_step = steps.states.Choice( 'Accuracy > 90%' ) # ### Create an Endpoint Configuration Step # In the following cell we create an endpoint configuration step. See [EndpointConfigStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.sagemaker.EndpointConfigStep) in the AWS Step Functions Data Science SDK documentation to learn more. endpoint_config_step = steps.EndpointConfigStep( "Create Model Endpoint Config", endpoint_config_name=execution_input['ModelName'], model_name=execution_input['ModelName'], initial_instance_count=1, instance_type='ml.m4.xlarge' ) # ### Update the Model Endpoint Step # In the following cell, we create the Endpoint step to deploy the new model as a managed API endpoint, updating an existing SageMaker endpoint if our choice state is sucessful. endpoint_step = steps.EndpointStep( 'Update Model Endpoint', endpoint_name=execution_input['EndpointName'], endpoint_config_name=execution_input['ModelName'], update=False ) # ### Create the Fail State Step # In addition, we create a Fail step which proceeds from our choice state if the validation accuracy of our model is lower than the threshold we define. See [FailStateStep](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/states.html#stepfunctions.steps.states.Fail) in the AWS Step Functions Data Science SDK documentation to learn more. fail_step = steps.states.Fail( 'Model Accuracy Too Low', comment='Validation accuracy lower than threshold' ) # ### Add Rules to Choice State # In the cells below, we add a threshold rule to our choice state. Therefore, if the validation accuracy of our model is below 0.90, we move to the Fail State. If the validation accuracy of our model is above 0.90, we move to the save model step with proceeding endpoint update. See [here](https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst) for more information on how XGBoost calculates classification error. # # For binary classification problems the XGBoost algorithm defines the model error as: # # \begin{equation*} # \frac{incorret\:predictions}{total\:number\:of\:predictions} # \end{equation*} # # To achieve an accuracy of 90%, we need error <.10. # + threshold_rule = steps.choice_rule.ChoiceRule.NumericLessThan(variable=lambda_step.output()['Payload']['trainingMetrics'][0]['Value'], value=.1) check_accuracy_step.add_choice(rule=threshold_rule, next_step=endpoint_config_step) check_accuracy_step.default_choice(next_step=fail_step) # - # ### Link all the Steps Together # Finally, create your workflow definition by chaining all of the steps together that we've created. See [Chain](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/sagemaker.html#stepfunctions.steps.states.Chain) in the AWS Step Functions Data Science SDK documentation to learn more. endpoint_config_step.next(endpoint_step) workflow_definition = steps.Chain([ etl_step, training_step, model_step, lambda_step, check_accuracy_step ]) # ## Run the Workflow # Create your workflow using the workflow definition above, and render the graph with [render_graph](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.render_graph): workflow = Workflow( name='MyInferenceRoutine_{}'.format(id), definition=workflow_definition, role=workflow_execution_role, execution_input=execution_input ) workflow.render_graph() # Create the workflow in AWS Step Functions with [create](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.create): workflow.create() # Run the workflow with [execute](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.execute): execution = workflow.execute( inputs={ 'TrainingJobName': 'regression-{}'.format(id), # Each Sagemaker Job requires a unique name, 'GlueJobName': job_name, 'ModelName': 'CustomerChurn-{}'.format(id), # Each Model requires a unique name, 'EndpointName': 'CustomerChurn', # Each Endpoint requires a unique name 'LambdaFunctionName': function_name } ) # Render workflow progress with the [render_progress](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Execution.render_progress). This generates a snapshot of the current state of your workflow as it executes. This is a static image therefore you must run the cell again to check progress: execution.render_progress() # Use [list_events](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Execution.list_events) to list all events in the workflow execution: execution.list_events(html=True) # Use [list_executions](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.list_executions) to list all executions for a specific workflow: workflow.list_executions(html=True) # Use [list_workflows](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/workflow.html#stepfunctions.workflow.Workflow.list_workflows) to list all workflows in your AWS account: Workflow.list_workflows(html=True) # ## Clean Up # When you are done, make sure to clean up your AWS account by deleting resources you won't be reusing. Uncomment the code below and run the cell to delete the Glue job, Lambda function, and Step Function. # + #lambda_client.delete_function(FunctionName=function_name) #glue_client.delete_job(JobName=job_name) #workflow.delete() # - # ---
28,243
/seminar2018_justify.ipynb
3a2cf6767f4d1287d8f6e30d7adb0dca86049b31
[]
no_license
nelsondelimar/Masters
https://github.com/nelsondelimar/Masters
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
446,182
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Notebook desenvolvido para exemplificar o uso da correlação entre o Gradiente Total e o Gradiente Vertical import numpy from matplotlib import pyplot from codes.auxiliars import rotation_z # Define area e numero de pontos nx, ny = 80, 80 area = [-3000., 3000., -3000., 3000.] from codes.grids import regular_grid xp, yp, zp = regular_grid(area, (nx, ny), -100.) xyzp = numpy.vstack([xp, yp, zp]).T rotxyz = numpy.zeros_like(xyzp) for k, i in enumerate(xyzp): rotxyz[k,:] = numpy.dot(rotation_z(45.), i) xo = rotxyz[:,0] yo = rotxyz[:,1] zo = rotxyz[:,2] from codes.prism import prism_tf from codes.plot import draw_prism modelo1 = [-1500., 1500., -500., 500., 200., 300., 1.] tf = prism_tf(xo, yo, zo, modelo1, -20., 59.) rtp = prism_tf(xo, yo, zo, modelo1, 90., 0.) xp = xp.reshape((nx,ny)) yp = yp.reshape((nx,ny)) tf = tf.reshape((nx,ny)) rtp = rtp.reshape((nx,ny)) from codes.derivative import zderiv, totalgrad from codes.filtering import reduction gt = totalgrad(xp, yp, tf) gv = zderiv(xp, yp, tf) # + #rtp = reduction(xp, yp, tf, 45., -40.) # + pyplot.figure(figsize=(20,5)) pyplot.subplot(1,3,1) pyplot.contourf(yp, xp, tf, 25, cmap = pyplot.cm.jet) #draw_prism(newmodel, linewidth = 1., style = 'k.--' , label = 'Soleira', xy2ne = True) pyplot.text(-2800., 2500., '(A)', fontsize = 15., bbox=dict(facecolor='white', edgecolor='w', alpha = 0.8)) pyplot.title('Anomalia de campo total (nT)', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('Norte (metros)', fontsize = 12) pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) pyplot.yticks(numpy.linspace(yp.min(), yp.max(), 5)) #pyplot.legend(loc = 'lower center', fontsize = 'x-large') pyplot.colorbar() pyplot.subplot(1,3,2) pyplot.contourf(yp, xp, gt, 40, cmap = pyplot.cm.jet) pyplot.plot(yp[:,40], xp[:,40], 'k--', label = 'Perfil 1 (N-S)') pyplot.plot(yp[40,:], xp[40,:], 'r--', label = 'Perfil 2 (E-W)') pyplot.text(-2800., 2500., '(B)', fontsize = 15., bbox=dict(facecolor='white', edgecolor='w', alpha = 0.8)) pyplot.title('Gradiente total calculado (nT/m)', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('Norte (metros)', fontsize = 12) pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) pyplot.yticks(numpy.linspace(yp.min(), yp.max(), 5)) pyplot.legend(loc = 'lower left', fontsize = 'large') pyplot.colorbar() pyplot.subplot(1,3,3) pyplot.contourf(yp, xp, gv, 40, cmap = pyplot.cm.jet) pyplot.plot(yp[:,40], xp[:,40], 'k--', label = 'Perfil 1 (N-S)') pyplot.plot(yp[40,:], xp[40,:], 'r--', label = 'Perfil 2 (E-W)') pyplot.text(-2800., 2500., '(C)', fontsize = 15., bbox=dict(facecolor='white', edgecolor='w', alpha = 0.8)) pyplot.title('Gradiente vertical calculado (nT/m)', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('Norte (metros)', fontsize = 12) pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) pyplot.yticks(numpy.linspace(yp.min(), yp.max(), 5)) pyplot.legend(loc = 'lower left', fontsize = 'large') pyplot.colorbar() pyplot.savefig('figs-seminario2018/justificativa1.png', dpi = 300, bbox_inches = 'tight') pyplot.show() # - pyplot.figure(figsize=(6, 5)) pyplot.contourf(yp, xp, tf, 25, cmap = pyplot.cm.jet) #draw_prism(modelo1[:4], linewidth = 1., style = 'k.--' , label = 'Soleira', xy2ne = True) pyplot.title('Anomalia de campo total (nT)', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('Norte (metros)', fontsize = 12) pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) pyplot.yticks(numpy.linspace(yp.min(), yp.max(), 5)) #pyplot.legend(loc = 'lower center', fontsize = 'x-large') pyplot.colorbar() pyplot.savefig('figs-seminario2018/justificativa0.png', dpi = 300, bbox_inches = 'tight') pyplot.show() # + pyplot.figure(figsize=(18, 12)) pyplot.subplot(2,2,1) pyplot.plot(xp[:,40], tf[:,40], 'b-', linewidth = 3) pyplot.text(-2900., 20., '(A)', fontsize = 15., bbox=dict(facecolor='black', edgecolor='k', alpha = 0.25)) pyplot.title('Perfil 1 (N-S): Anomalia de campo total', fontsize = 15) pyplot.xlabel('Norte (metros)', fontsize = 12) pyplot.ylabel('nanoTesta (nT))', fontsize = 12) #pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) #pyplot.yticks(numpy.linspace(tf[:,44].min(), tf[:,44].max(), 5) , fontsize = 10) pyplot.xlim(xp.min(), xp.max()) pyplot.subplot(2,2,3) pyplot.plot(xp[:,40], gv[:,40], 'k-', linewidth = 2, label = 'Gradiente total') pyplot.plot(xp[:,40], gt[:,40], 'r-', linewidth = 2, label = 'Gradiente vertical') pyplot.text(-2900., 0.13, '(C)', fontsize = 15., bbox=dict(facecolor='black', edgecolor='k', alpha = 0.25)) pyplot.title('Gradientes Total e Vertical', fontsize = 15) pyplot.xlabel('Norte (metros)', fontsize = 12) pyplot.ylabel('(nT/metro)', fontsize = 12) #pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) #pyplot.yticks(fontsize = 10) pyplot.xlim(xp.min(), xp.max()) pyplot.legend(loc = 'lower left', fontsize = 'x-large') pyplot.subplot(2,2,2) pyplot.plot(yp[40,:], tf[40,:], 'b-', linewidth = 3) pyplot.text(-2900., 26., '(B)', fontsize = 15., bbox=dict(facecolor='black', edgecolor='k', alpha = 0.25)) pyplot.title('Perfil 2 (E-W): Anomalia de campo total', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('nanoTesta (nT))', fontsize = 12) #pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5) #pyplot.yticks(numpy.linspace(tf[33,:].min(), tf[33,:].max(), 5), fontsize = 10) pyplot.xlim(xp.min(), xp.max()) pyplot.subplot(2,2,4) pyplot.plot(yp[40,:], gv[40,:], 'k-', linewidth = 2, label = 'Gradiente total') pyplot.plot(yp[40,:], gt[40,:], 'r-', linewidth = 2, label = 'Gradiente vertical') pyplot.text(-2900., 0.13, '(D)', fontsize = 15., bbox=dict(facecolor='black', edgecolor='k', alpha = 0.25)) pyplot.title('Gradientes Total e Vertical', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('(nT/metro)', fontsize = 12) #pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5), fontsize = 10) #pyplot.yticks(fontsize = 10) pyplot.xlim(xp.min(), xp.max()) pyplot.legend(loc = 'lower right', fontsize = 'x-large') pyplot.savefig('figs-seminario2018/justificativa2-anomalia.png', dpi = 300, bbox_inches = 'tight') pyplot.show() # - pyplot.figure(figsize=(6, 5)) pyplot.contourf(yp, xp, rtp, 25, cmap = pyplot.cm.jet) #draw_prism(modelo1[:4], linewidth = 1., style = 'k.--' , label = 'Soleira', xy2ne = True) pyplot.title('Anomalia reduzida ao Polo (nT)', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('Norte (metros)', fontsize = 12) pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) pyplot.yticks(numpy.linspace(yp.min(), yp.max(), 5)) #pyplot.legend(loc = 'lower center', fontsize = 'x-large') pyplot.colorbar() pyplot.savefig('figs-seminario2018/justificativa0-rtp.png', dpi = 300, bbox_inches = 'tight') pyplot.show() gt = totalgrad(xp, yp, rtp) gv = zderiv(xp, yp, rtp) # + pyplot.figure(figsize=(20,5)) pyplot.subplot(1,3,1) pyplot.contourf(yp, xp, rtp, 25, cmap = pyplot.cm.jet) #draw_prism(modelo1[:4], linewidth = 1., style = 'k.--' , label = 'Soleira', xy2ne = True) pyplot.text(-2800., 2500., '(A)', fontsize = 15., bbox=dict(facecolor='white', edgecolor='w', alpha = 0.8)) pyplot.title('Anomalia reduzida ao Polo (nT)', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('Norte (metros)', fontsize = 12) pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) pyplot.yticks(numpy.linspace(yp.min(), yp.max(), 5)) #pyplot.legend(loc = 'lower center', fontsize = 'x-large') pyplot.colorbar() pyplot.subplot(1,3,2) pyplot.contourf(yp, xp, gt, 40, cmap = pyplot.cm.jet) pyplot.plot(yp[:,40], xp[:,40], 'k--', label = 'Perfil 1 (N-S)') pyplot.plot(yp[40,:], xp[40,:], 'r--', label = 'Perfil 2 (E-W)') pyplot.text(-2800., 2500., '(B)', fontsize = 15., bbox=dict(facecolor='white', edgecolor='w', alpha = 0.8)) pyplot.title('Gradiente total calculado (nT/m)', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('Norte (metros)', fontsize = 12) pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) pyplot.yticks(numpy.linspace(yp.min(), yp.max(), 5)) pyplot.legend(loc = 'lower left', fontsize = 'large') pyplot.colorbar() pyplot.subplot(1,3,3) pyplot.contourf(yp, xp, gv, 40, cmap = pyplot.cm.jet) pyplot.plot(yp[:,40], xp[:,40], 'k--', label = 'Perfil 1 (N-S)') pyplot.plot(yp[40,:], xp[40,:], 'r--', label = 'Perfil 2 (E-W)') pyplot.text(-2800., 2500., '(C)', fontsize = 15., bbox=dict(facecolor='white', edgecolor='w', alpha = 0.8)) pyplot.title('Gradiente vertical calculado (nT/m)', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('Norte (metros)', fontsize = 12) pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) pyplot.yticks(numpy.linspace(yp.min(), yp.max(), 5)) pyplot.legend(loc = 'lower left', fontsize = 'large') pyplot.colorbar() pyplot.savefig('figs-seminario2018/justificativa1-rtp.png', dpi = 300, bbox_inches = 'tight') pyplot.show() # + pyplot.figure(figsize=(18, 12)) pyplot.subplot(2,2,1) pyplot.plot(xp[:,44], rtp[:,44], 'b-', linewidth = 3) pyplot.text(-2900., 55., '(A)', fontsize = 15., bbox=dict(facecolor='black', edgecolor='k', alpha = 0.25)) pyplot.title('Perfil 1 (N-S): Anomalia reduzida ao Polo', fontsize = 15) pyplot.xlabel('Norte (metros)', fontsize = 12) pyplot.ylabel('nanoTesta (nT))', fontsize = 12) #pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) #pyplot.yticks(numpy.linspace(tf[:,44].min(), tf[:,44].max(), 5) , fontsize = 10) pyplot.xlim(xp.min(), xp.max()) pyplot.subplot(2,2,3) pyplot.plot(xp[:,44], gv[:,44], 'k-', linewidth = 2, label = 'Gradiente total') pyplot.plot(xp[:,44], gt[:,44], 'r-', linewidth = 2, label = 'Gradiente vertical') pyplot.text(-2900., 0.13, '(C)', fontsize = 15., bbox=dict(facecolor='black', edgecolor='k', alpha = 0.25)) pyplot.title('Gradientes Total e Vertical', fontsize = 15) pyplot.xlabel('Norte (metros)', fontsize = 12) pyplot.ylabel('(nT/metro)', fontsize = 12) #pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5)) #pyplot.yticks(fontsize = 10) pyplot.xlim(xp.min(), xp.max()) pyplot.legend(loc = 'lower left', fontsize = 'large') pyplot.subplot(2,2,2) pyplot.plot(yp[33,:], rtp[33,:], 'b-', linewidth = 3) pyplot.text(-2900., 55., '(B)', fontsize = 15., bbox=dict(facecolor='black', edgecolor='k', alpha = 0.25)) pyplot.title('Perfil 2 (E-W): Anomalia reduzida ao Polo', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('nanoTesta (nT))', fontsize = 12) #pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5) #pyplot.yticks(numpy.linspace(tf[33,:].min(), tf[33,:].max(), 5), fontsize = 10) pyplot.xlim(xp.min(), xp.max()) pyplot.subplot(2,2,4) pyplot.plot(yp[33,:], gv[33,:], 'k-', linewidth = 2, label = 'Gradiente total') pyplot.plot(yp[33,:], gt[33,:], 'r-', linewidth = 2, label = 'Gradiente vertical') pyplot.text(-2900., 0.125, '(D)', fontsize = 15., bbox=dict(facecolor='black', edgecolor='k', alpha = 0.25)) pyplot.title('Gradientes Total e Vertical', fontsize = 15) pyplot.xlabel('Leste (metros)', fontsize = 12) pyplot.ylabel('(nT/metro)', fontsize = 12) #pyplot.xticks(numpy.linspace(xp.min(), xp.max(), 5), fontsize = 10) #pyplot.yticks(fontsize = 10) pyplot.xlim(xp.min(), xp.max()) pyplot.legend(loc = 'lower right', fontsize = 'large') pyplot.savefig('figs-seminario2018/justificativa2-rtp.png', dpi = 300, bbox_inches = 'tight') pyplot.show() # -
11,625
/Walker2DBulletEnv-v0_TD3/Walker2DBulletEnv_std0.02.ipynb
f01ac5b2221dae523686a40c9551072fb5124741
[]
no_license
jorditorresBCN/Deep-Reinforcement-Learning-Udacity
https://github.com/jorditorresBCN/Deep-Reinforcement-Learning-Udacity
1
1
null
2020-08-18T14:23:41
2020-08-15T17:03:55
null
Jupyter Notebook
false
false
.py
174,849
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # cd C:/Users\Julius\Downloads\Bing # + #Get the images from Bing from bs4 import BeautifulSoup import requests import re import urllib2 import os from PIL import Image import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline def get_soup(url): return BeautifulSoup(requests.get(url).text) image_type = "check" query = "check" url = "http://www.bing.com/images/search?q=" + query + "&qft=+filterui:color2-bw+filterui:imagesize-large&FORM=R5IR3" soup = get_soup(url) images = [a['src'] for a in soup.find_all("img", {"src": re.compile("mm.bing.net")})] for img in images: raw_img = urllib2.urlopen(img).read() cntr = len([i for i in os.listdir("images") if image_type in i]) + 1 f = open("images/" + image_type + "_"+ str(cntr), 'wb') f.write(raw_img) f.close() # + #setup a standard image size; this will distort some images but will get everything into the same shape STANDARD_SIZE = (300, 167) def img_to_matrix(filename, verbose=False): """ takes a filename and turns it into a numpy array of RGB pixels """ img = Image.open(filename) if verbose==True: print "changing size from %s to %s" % (str(img.size), str(STANDARD_SIZE)) img = img.resize(STANDARD_SIZE) img = list(img.getdata()) img = map(list, img) img = np.array(img) return img def flatten_image(img): """ takes in an (m, n) numpy array and flattens it into an array of shape (1, m * n) """ s = img.shape[0] * img.shape[1] img_wide = img.reshape(1, s) return img_wide[0] # + import numpy as np img_dir = "images/" images = [img_dir + f for f in os.listdir(img_dir)] labels = ["check" if "check" in f.split('/')[-1] else "drivers_license" for f in images] data = [] for image in images: img = img_to_matrix(image) img = flatten_image(img) data.append(img) data = np.array(data) data # + #Define your training set is_train = np.random.uniform(0, 1, len(data)) <= 0.7 y = np.where(np.array(labels)=="check", 1, 0) train_x, train_y = data[is_train], y[is_train] test_x, test_y = data[is_train==False], y[is_train==False] # - #now use PCA to reduce the number of features from sklearn.decomposition import RandomizedPCA pca = RandomizedPCA(n_components=2) X = pca.fit_transform(data) df = pd.DataFrame({"x": X[:, 0], "y": X[:, 1], "label":np.where(y==1, "Check", "Driver's License")}) colors = ["red", "yellow"] for label, color in zip(df['label'].unique(), colors): mask = df['label']==label plt.scatter(df[mask]['x'], df[mask]['y'], c=color, label=label) plt.legend() plt.show()
2,858
/1 Multiple Regression_50_Startups_Jj.ipynb
5cbfee2e626b7567b21000608b4c120f1c919724
[]
no_license
vitthalkcontact/MACHINE-LEARNING-2020
https://github.com/vitthalkcontact/MACHINE-LEARNING-2020
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
21,362
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- import sys import re import os import csv import urllib import urllib3 import ssl from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.pdfpage import PDFPage from pdfminer.converter import XMLConverter, HTMLConverter, TextConverter from pdfminer.layout import LAParams from cStringIO import StringIO from compiler.ast import flatten def cases(data): check = data.find('"rel" : "documents"') start_link = data.find('"https:', check) end_link = data.find('"', start_link + 1) link = data[start_link + 1:end_link] return link, end_link def get_all_cases(patentnumber): webaddress = "https://ptabdata.uspto.gov/ptab-api/trials?patentNumber=" url = webaddress+patentnumber http = urllib3.PoolManager() x = http.request('GET', url) webdata = x.data caseslinks = [] while True: link, position = cases(webdata) if link: caseslinks.append(link) webdata = webdata[position:] else: break return caseslinks def pdfparser(data): fp = file(data, 'rb') rsrcmgr = PDFResourceManager() retstr = StringIO() codec = 'utf-8' laparams = LAParams() device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) # Create a PDF interpreter object. interpreter = PDFPageInterpreter(rsrcmgr, device) # Process each page contained in the document. for page in PDFPage.get_pages(fp): interpreter.process_page(page) data = retstr.getvalue() fp.close() return data def downloadpdf(patentnumber, web): inval_type = None ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE f = urllib.urlopen(web, context=ctx) with open("temp.pdf", "wb") as code: code.write(f.read()) text = pdfparser("temp.pdf") searchObj = re.compile( r'\b\d,\d\d\d,\d\d\d',re.IGNORECASE) numbers = searchObj.findall(text) numbers = list(set(numbers)) patentnumber = patentnumber[:1] + ',' + patentnumber [1:4] + ',' + patentnumber[4:] similar_numbers = [z for z in numbers if z != patentnumber] os.remove("temp.pdf") return similar_numbers def numbers_lookup(patentnumber, multiplelinks): numbersfound = [] for link in multiplelinks: http = urllib3.PoolManager() y = http.request('GET', link) casedata = y.data start_pdf = casedata.find('"type" : "final decision"') if start_pdf != -1: rel_download = casedata.find('download', start_pdf) start_pdflink = casedata.find('"https:', rel_download) end_pdflink = casedata.find('"', start_pdflink + 1) pdflink = casedata[start_pdflink + 1:end_pdflink] tempvalue = downloadpdf(patentnumber, pdflink) if tempvalue: numbersfound.append(tempvalue) patentnumberfound = list(set(flatten(numbersfound))) patentnumber = patentnumber[:1] + ',' + patentnumber [1:4] + ',' + patentnumber[4:] patentnumberfound.insert(0, patentnumber) return patentnumberfound def func(PN): patentnumber = PN a = get_all_cases(patentnumber) b = numbers_lookup(patentnumber, a) return b lines=range(643,719) i=0 f = open('UniqInvalidatedPatent.txt') for line in f: if i in lines: line = line.strip() data = func(line) results = [j.replace(',','') for j in data] with open('ResponsiblePatents.csv', 'a') as csvfile: writer = csv.writer(csvfile, delimiter=",", lineterminator='\n') writer.writerow(results) i+=1 f.close() print func('5478650')
3,980
/.ipynb_checkpoints/Final_Module-checkpoint.ipynb
90aff2ed6c998ad268521f4d87229188c9ce2b10
[]
no_license
sumanthvrao/We_R_Pythons
https://github.com/sumanthvrao/We_R_Pythons
1
2
null
2018-11-23T08:53:50
2018-11-22T17:22:11
Jupyter Notebook
Jupyter Notebook
false
false
.py
563,217
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Deep Learning # language: python # name: deepl # --- #importing all necessary librarys import import_ipynb import Content_based_filtering as cf import Collaborative_filtering_RBM as rbm from scipy.io import loadmat import pandas as pd import numpy as np from IPython.display import display import Front_End_Widget as few from IPython.display import display from IPython.html import widgets import tensorflow as tf import matplotlib.pyplot as plt # # Front End for user # + top10ids=[] dic=loadmat('top10.mat') u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code'] user_df = pd.read_csv('ml-100k/u.user', sep='|', names=u_cols, encoding='latin-1', parse_dates=True) movies_df=pd.read_csv('ml-100k/item_cc.csv') #Movie details movies_df2=pd.read_csv('ml-100k/item_cc_genre.csv') matrix=pd.read_csv('ml-100k/user_item_matrix.csv') new_prediction=pd.read_csv('ml-100k/new_predicted_colab.csv') # - m=np.zeros(shape=(943+1,1682+1)).astype(int) p=pd.read_csv("ml-100k/new_predicted_colab.csv") p=p.values for i in p: try: m[int(i[0])][int(i[1])]=int(i[2]) except: print(i) np.savetxt("ml-100k/only_predicted.csv", m.astype(int), fmt='%i', delimiter=",") only_predicted=pd.read_csv('ml-100k/only_predicted.csv') # + flag=0 def getusers(uname,movieid): #Suggest users who would rate this movie the same as current user rating=matrix.iloc[uname][movieid] print("you're going to rate this movie: ",rating) print("---------------------------------------------------------------------") print("your partners are (They also rated",rating,")") h=only_predicted.loc[only_predicted.iloc[:,movieid]==rating] l=h.index.tolist() with pd.option_context('display.max_rows', None, 'display.max_columns', None): final_user_frame = user_df.loc[user_df['user_id'].isin(l)] few.filter_data(final_user_frame) print("---------------------------------------------------------------------") #driver(196,242) # + def gettop(uname): #Top 10 movies which the user is going to rate high print("You may like:") ll=dic[str(uname)][0] with pd.option_context('display.max_rows', None, 'display.max_columns', None): display(movies_df2.loc[movies_df['movie_id'].isin(ll)]) print("---------------------------------------------------------------------") # + def driver(uname, movieid=""): gettop(uname) if(movieid!=""): movietitle=movies_df2.loc[movies_df['movie_id']==int(movieid),'movie_title'].iloc[0] if(m[uname][movieid]!=0): getusers(uname,movieid) else: print("You have already watched this movie") print("Similar movies are") ans=cf.get_recommendations(movietitle) lll=ans[0].tolist() df2=movies_df2['movie_title'].str.lower().to_frame() df2=df2['movie_title'].str.strip().to_frame() df3=movies_df2[['genre','year','director','actor1','actor2']] df3=pd.concat([df2, df3], axis=1) with pd.option_context('display.max_rows', None, 'display.max_columns', None): display(df3.loc[df3['movie_title'].isin(lll)]) print("---------------------------------------------------------------------") # - driver(4,1) # + from ipywidgets import Button, Layout movieid="" uname = int(input("Enter User ID: ")) movieid = input("Enter Movie ID (Optional): ") button1 = widgets.Button(description="Contest Based Movie Recommendation",layout=Layout(width='50%', height='40px')) button2 = widgets.Button(description="Collaborative Based Recommendation for users and movies",layout=Layout(width='50%', height='40px')) button3 = widgets.Button(description="RBM Based Movie Recommendation",layout=Layout(width='50%', height='40px')) display(button1) display(button2) display(button3) list_of_rated = rbm.list_of_rated(uname) display(list_of_rated.head()) movietitle="" if(movieid!=""): global movietitle movietitle=movies_df2.loc[movies_df['movie_id']==int(movieid),'movie_title'].iloc[0] def content_based(b): if(movieid!=""): print("Similar movies are") ans=cf.get_recommendations(movietitle) lll=ans[0].tolist() df2=movies_df2['movie_title'].str.lower().to_frame() df2=df2['movie_title'].str.strip().to_frame() df3=movies_df2[['genre','year','director','actor1','actor2']] df3=pd.concat([df2, df3], axis=1) with pd.option_context('display.max_rows', None, 'display.max_columns', None): display(df3.loc[df3['movie_title'].isin(lll)]) print("---------------------------------------------------------------------") def collaborative_based(b): gettop(uname) if(movieid!=""): movietitle=movies_df2.loc[movies_df['movie_id']==int(movieid),'movie_title'].iloc[0] if(m[uname][int(movieid)]!=0): getusers(uname,int(movieid)) else: print("You have already watched this movie") def RBM_based(b): rbm_op = rbm.suggested_to_watch(uname) rbm_op = rbm_op.sort_values(['Generated Score'], ascending=False).head(10) display(rbm_op) button1.on_click(content_based) button2.on_click(collaborative_based) button3.on_click(RBM_based)
5,491
/homework1.ipynb
269347e7810d8cff028089769c9e6bc7e290500c
[]
no_license
zainabaderinmola/Logic
https://github.com/zainabaderinmola/Logic
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
3,627
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- import pandas as pd import matplotlib.pyplot as plt import numpy as np import sklearn as sk lifeExpectancyPrediction_Train=pd.read_csv('train.csv') lifeExpectancyPrediction_Test=pd.read_csv('test.csv') lifeExpectancyPrediction_Train # + X = lifeExpectancyPrediction_Train.drop(['ID','TARGET_LifeExpectancy'], axis=1) Y = lifeExpectancyPrediction_Train[['TARGET_LifeExpectancy']] X_Train_Poly = lifeExpectancyPrediction_Train.drop(['TARGET_LifeExpectancy', 'ID'], axis=1) Y_Train_Poly = lifeExpectancyPrediction_Train[['TARGET_LifeExpectancy']] X_Test_Poly = lifeExpectancyPrediction_Test.drop('ID', axis=1) # - from sklearn import linear_model linReg = linear_model.LinearRegression(normalize=False) # <span style="color:blue; font-weight:bold">If i do degree=4 all the majority prediction comes to be nearly 68,which is not diverse, but given the degree 3, prediction coming from 50 to 80, which is pretty much diverse</span> from sklearn.preprocessing import PolynomialFeatures polyFeat = PolynomialFeatures(degree=2, include_bias=True) polyTrainX = polyFeat.fit_transform(X_Train_Poly) polyTestX = polyFeat.fit_transform(X_Test_Poly) linReg.fit(polyTrainX, Y_Train_Poly) print(linReg.intercept_) print(linReg.coef_) predYRm_Poly = linReg.predict(polyTestX) predictedResults=pd.DataFrame(predYRm_Poly, columns=['Predicted Life Expectancy_Polynomial']).to_csv('polynomial_prediction.csv') # ## K-FOLD CROSS VALIDATION from sklearn import model_selection number_splits = 10 kFold = model_selection.KFold(n_splits=number_splits, shuffle=True) lRegPara_ridge = [0, 0.01, 0.1, 0.5, 0.8, 1] # + from sklearn import linear_model from sklearn.metrics import mean_squared_error plt.figure() lResults = np.zeros((number_splits,len(lRegPara))) nsplit = 0 for trainIndex, validIndex in kFold.split(X_Train_Poly): # Get the training and validation data trainX_Regularization = np.array(X_Train_Poly.loc[trainIndex]) trainY_Regularization = np.array(Y_Train_Poly.loc[trainIndex]) validX_Regularization = np.array(X_Train_Poly.loc[validIndex]) validY_Regularization = np.array(Y_Train_Poly.loc[validIndex]) # This is where you're polynomial model is used! polyFitTrainX = polyFeat.fit_transform(trainX_Regularization) polyFitValidX = polyFeat.fit_transform(validX_Regularization) for j , regPara in enumerate(lRegPara_ridge): polyRidgeReg = linear_model.Ridge(alpha=regPara, normalize=True) polyRidgeReg.fit(polyFitTrainX, trainY_Regularization) predY = polyRidgeReg.predict(polyFitValidX) mse = mean_squared_error(validY_Regularization, predY) lResults[nsplit, j] = (mse) plt.plot(lRegPara_ridge, lResults[nsplit, :], label='Fold '+str(nsplit+1)) nsplit = nsplit + 1 plt.xlabel("Alpha") plt.ylabel("MSE") plt.legend() plt.figure() plt.errorbar(lRegPara, np.mean(lResults,axis=0), yerr=np.std(lResults,axis=0),capsize=3) plt.xlabel("Alpha") plt.ylabel("Average MSE") # + from sklearn import linear_model from sklearn.metrics import mean_squared_error # print('R2 ', r2_score(testY, predYRm)) plt.figure() lResults = np.zeros((number_splits,len(lRegPara))) nsplit = 0 for trainIndex, validIndex in kFold.split(X_Train_Poly): # Get the training and validation data trainX_Regularization = np.array(X_Train_Poly.loc[trainIndex]) trainY_Regularization = np.array(Y_Train_Poly.loc[trainIndex]) validX_Regularization = np.array(X_Train_Poly.loc[validIndex]) validY_Regularization = np.array(Y_Train_Poly.loc[validIndex]) # This is where you're polynomial model is used! polyFitTrainX = polyFeat.fit_transform(trainX_Regularization) polyFitValidX = polyFeat.fit_transform(validX_Regularization) for j , regPara in enumerate(lRegPara): lasso=linear_model.Lasso(alpha=regPara, normalize=True) polyRidgeReg = lasso polyRidgeReg.fit(polyFitTrainX, trainY_Regularization) # train_score=lasso.score(trainX_Regularization,trainY_Regularization) predY = polyRidgeReg.predict(polyFitValidX) mse = mean_squared_error(validY_Regularization, predY) lResults[nsplit, j] = (mse) plt.plot(lRegPara, lResults[nsplit, :], label='Fold '+str(nsplit+1)) nsplit = nsplit + 1 plt.xlabel("Alpha") plt.ylabel("MSE") plt.legend() plt.figure() plt.errorbar(lRegPara, np.mean(lResults,axis=0), yerr=np.std(lResults,axis=0),capsize=3) plt.xlabel("Alpha") plt.ylabel("Average MSE") # - # ## Investigating the model from sklearn.metrics import mean_squared_error trainX, validX, trainY, validY = model_selection.train_test_split(X_Train_Poly, Y_Train_Poly, test_size=0.20) LassoReg = linear_model.Lasso(alpha=0.05, normalize=True) LassoReg.fit(trainX, trainY) predY = LassoReg.predict(validX) mse = mean_squared_error(validY, predY) print("MSE : ", mse) coef = pd.Series(np.squeeze(LassoReg.coef_), index = X.columns) imp_coef = coef.sort_values() imp_coef.plot(kind = "barh") plt.title("Feature importance using Lasso Linear Model") plt.show()
5,327
/002_wordcount.ipynb
e1d919130340c9bf4ec57be70232d8f1049eadd9
[]
no_license
idontdomath/datos-spark-lab
https://github.com/idontdomath/datos-spark-lab
1
0
null
2015-08-27T02:03:34
2015-08-27T01:41:45
null
Jupyter Notebook
false
false
.py
42,349
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Ejemplos con Wordcount # ## RDD paralelizando datos data = [1, 2, 3, 4, 5] distData = sc.parallelize(data) print distData # ## Uso el contenido de un archivo de texto como fuente de datos textFile = sc.textFile("/usr/local/bin/spark-1.3.1-bin-hadoop2.6/README.md") # ## Cuento palabras (flatMap, map, reduceByKey) wordCounts = textFile.flatMap(lambda line: line.split()).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a+b) # ## Muestro todas las tuplas (collect) wordCounts.collect() # ## Las 5 palabras que más aparecen (takeOrdered) wordCounts.takeOrdered(5, key = lambda x: -x[1]) # ## Las 5 palabras que más aparecen pero calculandolo afuera de Spark words = wordCounts.collect() words.sort(key = lambda x: -x[1]) print words[0:10] # ## Las 5 palabras que más aparecen usando grupos (groupByKey) textFile.flatMap(lambda line: line.split()).map(lambda word: (word, 1)).groupByKey().takeOrdered(5, lambda (key, value): -1 * len(value)) # ## La palabra más larga (reduce) textFile.flatMap(lambda line: line.split()).reduce(lambda a, b: a if (len(a) > len(b)) else b) # ## Palabras que empiezan con a (filter) textFile.flatMap(lambda line: line.split()).filter(lambda word: word.startswith('a')).collect() # ## Palabras únicas que empiezan con a (distinct) textFile.flatMap(lambda line: line.split()).filter(lambda word: word.startswith('a')).distinct().collect() # ## Cantidad de palabras por frecuencia de repetición ordenados (sortByKey) rangeCounts = wordCounts.map(lambda set: (set[1], 1)).reduceByKey(lambda a, b: a+b).sortByKey().collect() rangeCounts # ## Graficando cantidad de palabras por frecuencia de repetición # + import numpy as np import matplotlib.pyplot as plt ranges = [t[0] for t in rangeCounts] y_pos = np.arange(len(ranges)) words = [t[1] for t in rangeCounts] plt.barh(y_pos, words) plt.yticks(y_pos, ranges) plt.xlabel('Words') plt.title('Word count by word frequency') plt.show()
2,210
/R2_ Linear Regression_Mobile Price_Nilotpal.ipynb
225d3ca133a965a839774e2c15954efebfc94a95
[]
no_license
nilotpalkumarnk/Machine-Learning-Projects
https://github.com/nilotpalkumarnk/Machine-Learning-Projects
0
0
null
2021-08-24T13:50:47
2021-08-12T11:39:58
Jupyter Notebook
Jupyter Notebook
false
false
.py
1,290,235
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from xgboost import XGBRegressor from sklearn.model_selection import StratifiedKFold from sklearn.utils import shuffle from sklearn.metrics import mean_squared_log_error from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn import ensemble import sklearn from sklearn.svm import SVR from sklearn import impute import tsfresh import seaborn as sns sns.set(rc={'figure.figsize':(12,8.27)}) train=pd.read_csv('../input/train.csv') test=pd.read_csv('../input/test.csv') product_id=test.Product_id # train.head() def ncat(x): if x<200: return 0 if x>=200 and x<300: return 1 if x>=300 and x<400: return 2 if x>=400: return 3 # train["Selling_Price"]=train["Selling_Price"].fillna(train["Selling_Price"].mean()) train=train[train["Selling_Price"]>0] train=train[train["Minimum_price"]<=22000] train=train[train["Maximum_price"]<=33000] train=train[((train["Selling_Price"]<train["Minimum_price"]) & (train["Discount_avail"]==1.0)) | ((train["Selling_Price"]>train["Minimum_price"]) & (train["Discount_avail"]==0.0))] # train["Selling_Price"]=train["Selling_Price"].fillna(train["Selling_Price"].mean()) test.loc[:, "Selling_Price"] = -1000000000 # concatenate both training and test data data = pd.concat([train, test]).reset_index(drop=True) data=data.drop(columns=['Customer_name','Product_id']) # data=data.drop('instock_date') # data=data.drop(' Product_id') a=data["Minimum_price"].mean() b=data["Maximum_price"].mean() c=b-a data['Grade_chg2']=data.Grade.astype(str) + '_' + data['charges_2 (%)'].astype(str) # data['stall_prod']=data.Product_Category + '_' + data.Stall_no.astype(str) data['prod_grd']=data.Product_Category + '_' + data.Grade.astype(str) # data['prod_demand']=data.Product_Category + '_' + data.Demand.astype(str) data["charges_1"]=data["charges_1"].fillna(data["charges_1"].mean()) data["charges_range"]=data["charges_1"].apply(ncat) # data["charges_2 (%)"]=data["charges_2 (%)"].fillna(data["charges_2 (%)"].mean()) data["Minimum_price"]=data["Minimum_price"].fillna(data["Maximum_price"]-c) data["Maximum_price"]=data["Maximum_price"].fillna(data["Minimum_price"]+c) data["Minimum_price"]=data["Minimum_price"].fillna(data["Minimum_price"].mean()) data["Maximum_price"]=data["Maximum_price"].fillna(data["Maximum_price"].mean()) # "charges_1","charges_2 (%)"","Minimum_price","Maximum_price","Selling_Price" data['mean_p']=(data['Minimum_price']+data['Maximum_price'])//2 # data.instock_date=pd.to_datetime(data.instock_date) # data['year']=data.instock_date.dt.year # data['month']=data.instock_date.dt.month # data['weekday']=data.instock_date.dt.dayofweek data.drop('instock_date',axis=1,inplace=True) num_col=['Maximum_price','Minimum_price','Selling_Price','charges_1','mean_p','Demand'] cat_col=[col for col in data.columns if col not in num_col] for feat in cat_col: lbl_enc = preprocessing.LabelEncoder() temp_col = data[feat].fillna("None").astype(str).values data.loc[:, feat] = lbl_enc.fit_transform(temp_col) train2= data[data.Selling_Price != -1000000000].reset_index(drop=True) test2 = data[data.Selling_Price == -1000000000].reset_index(drop=True) # print(train2.info()) # print(test2.info()) # print(train2.describe()) # print(test2.describe()) x_test=test2.drop('Selling_Price',axis=1) x_train=train2.drop('Selling_Price',axis=1) y_train=train2["Selling_Price"].astype(float) # x_train, x_vld, y_train, y_vld = train_test_split(x_train,y_train test_size=0.2, random_state=42) # trainx=np.array(x_train) # valx=np.array(x_test) # testx=np.array(testx) model=ensemble.GradientBoostingRegressor(n_estimators=160,max_depth=7) # model=XGBRegressor(max_depth=15,n_estimators=155) # model=ensemble.GradientBoostingRegressor(n_estimators=370,max_depth=8,min_samples_split=2,learning_rate=0.035,loss='ls') # model=ensemble.GradientBoostingRegressor(n_estimators=305,max_depth=5,min_samples_split=2,learning_rate=0.05,loss='ls') # model=ensemble.GradientBoostingRegressor(n_estimators=290,max_depth=5,min_samples_split=2,learning_rate=0.04,loss='ls') # model=sklearn.ensemble.AdaBoostRegressor(base_estimator=None, n_estimators=200, learning_rate=0.1, loss='square', random_state=2) # model=sklearn.neighbors.KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) # model=sklearn.linear_model.Ridge() # model=sklearn.linear_model.Lasso() # model=sklearn.linear_model.ElasticNet() # model=sklearn.linear_model.LinearRegression() # model=sklearn.linear_model.LogisticRegression() # model=SVR(kernel='rbf') # model=sklearn.linear_model.SGDRegressor(loss='squared_loss', alpha=0.001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.01, shuffle=True, verbose=2, epsilon=0.1, random_state=2, learning_rate='optimal', eta0=0.01, power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False) # fit model on training data (ohe) print("training your model...") model.fit(x_train,y_train) print(model.score(x_train,y_train)) # print(model.score(x_vld,y_vld)) # a=model.predict(trainx) # # print(a) # b=model.predict(valx) # # print(b) result=model.predict(x_test) # def root_mean_squared_error(y_true, y_pred): # return np.sqrt(np.mean(np.square(y_pred - y_true))) # print("training loss: ",root_mean_squared_error(np.array(y_train),a)) # # print("val loss: ",root_mean_squared_error(np.array(y_test),b)) submission=pd.Series(data=result,index=product_id,name='Selling_Price') submission[submission<0]=submission[submission<0]*(-1) submission.to_csv('../prediction/3.csv') # - y 'Black' in the 'approx_price_EUR' column. # As 'approx_price_EUR' is a target column so we should drop this row rather than imputing the value. DF_mobile[DF_mobile.approx_price_EUR == 'Black'] DF_mobile[DF_mobile["approx_price_EUR"].str.isdigit()==False] # Another way of implemting the above logic DF_mobile = DF_mobile[DF_mobile['approx_price_EUR'] != 'Black'] # Dropping the column where 'approx_price_EUR == Black' DF_mobile.shape # We can see 1 row is dropped DF_mobile['approx_price_EUR'] = DF_mobile['approx_price_EUR'].astype('float64') # Now changing the dtype = float64 DF_mobile['approx_price_EUR'].dtypes # We can see the dtype has changed for 'approx_price_EUR' column # + [markdown] colab_type="text" id="H5jiA_uo_xJD" # ### 3. Data Cleansing # + [markdown] colab_type="text" id="bY7sHjyYe2zC" # #### Select instances having brand as 'Samsung' and report the final shape of the dataset # + [markdown] colab_type="text" id="2BI7FK3ngMIJ" # #### Check for missing values and drop the columns having more than 30% os missing values. And impute the remaining columns with their mode value # + colab={} colab_type="code" id="pbi9C7oEYC21" DF_Samsung = DF_mobile[DF_mobile['brand'] == 'Samsung'] # Creating new DF_Samsung with dataset of only Samsung brand # - DF_Samsung.shape # Cheacking the shape of DF_Samsung DF_Samsung.info() # Checking not-null value for all attributes in DF_Samsung # + # Checking columns which consist more than 30% of null values cols_to_drop = DF_Samsung.columns[DF_Samsung.isnull().sum()/len(DF_Samsung) > 0.3] cols_to_drop # - # Drop the columns which consist more than 30% of null values and assigning it to new df_Samsung df_Samsung = DF_Samsung.drop(cols_to_drop,axis=1) # + [markdown] colab_type="text" id="2bWLUwKdfQMg" # #### Report the final shape of the dataset # + colab={} colab_type="code" id="-ueJqOtEfVJ9" df_Samsung.shape # Now df_Samsung consists of 1103 rows and 30 attributes...i.e 10 columns have been dropped # - df_Samsung.columns # Columns in df_Samsung df_Samsung.isnull().sum() # Null values count in the remaining columns in df_Samsung # + colab={} colab_type="code" id="Vciq2eE8fv2I" df_Samsung = df_Samsung[~df_Samsung['approx_price_EUR'].isnull()] # dropping rows where 'approx_price_EUR' = null # + # Another way : #df_Samsung.dropna(subset = ['approx_price_EUR'], inplace=True) # - df_Samsung.shape # Now df_Samsung has 870 rows and 30 attributes df_Samsung['approx_price_EUR'].isnull().any() # No numm values present in the 'approx_price_EUR' column # + # Imputing the null values in the datafrmae with the mode() for i in df_Samsung.columns: if df_Samsung[i].isnull().any(): df_Samsung[i] = df_Samsung[i].fillna(df_Samsung[i].mode()[0]) # - df_Samsung.isnull().sum() # Now we can see that all the null values in the dataset has been imputed with mode() value df_Samsung.columns # Columns present in the df_Samsung df_Samsung['model'].value_counts() # We should drop this column as it consist of all unique values df_Samsung['2G_bands'].value_counts() # We cannot encode this column so we can delete this column df_Samsung['GPRS'].value_counts() # We can encode it as 0 and 1 as there are 133 datapoints with no GPRS which can be encoded as 1 # and rest all can be encoded as 0 which means GPRS is present. df_Samsung['GPS'].value_counts() # We can encode it as 0 and 1 as there are 369 datapoints with no GPS which can be encoded as 1 # and rest all can be encoded as 0 which means GPS is present. df_Samsung['EDGE'].value_counts() # We can encode it as 0 and 1 as there are 209 datapoints with no EDGE which can be encoded as 1 # and rest all can be encoded as 0 which means EDGE is present. df_Samsung['USB'].value_counts() # We can encode it as 0 and 1 as there are 43 datapoints with no USB which can be encoded as 1 # and rest all can be encoded as 0 which means USB is present. df_Samsung['primary_camera'].value_counts() # + [markdown] colab_type="text" id="tauuH5qNfVyL" # #### Let's select a few important columns for model building. We will encode 'network_technology', 'GPS', 'USB', 'primary_camera', 'display_resolution', 'internal_memory' and use 'approx_price_EUR' as target columns. # # We will do the following encoding and check final frequency counts # - # Creating DF_Final with the columns ['network_technology', 'GPS', 'USB', 'primary_camera', 'display_resolution', 'internal_memory'] DF_Final = df_Samsung[['network_technology', 'GPS', 'USB', 'primary_camera', 'display_resolution', 'internal_memory']] Y = df_Samsung['approx_price_EUR'] # Target Column DF_Final.shape # DF_Final consists of 870 rows and 6 attributes # + [markdown] colab_type="text" id="xi-fuSYnfxOP" # #### Encode 'GPS', 'USB' column into two categories (whether GPS/USB is YES or NO) # + colab={} colab_type="code" id="ffhFiqMlgFKb" DF_Final['GPS'].value_counts() # We can encode it as 0 and 1 as there are 369 datapoints with no GPS which can be encoded as 1 # and rest all can be encoded as 0 which means GPS is present. # - DF_Final['GPS'].nunique() # Unique enteries in GPS column # Label encoding Logic for GPS column for i in range(len(DF_Final['GPS'])): if DF_Final['GPS'].iloc[i] == 'No': DF_Final['GPS'].iloc[i] = 0 else: DF_Final['GPS'].iloc[i] = 1 DF_Final['GPS'].value_counts() DF_Final['USB'].value_counts() # We can encode it as 0 and 1 as there are 43 datapoints with no USB which can be encoded as 1 # and rest all can be encoded as 0 which means usb is present. DF_Final['USB'].nunique() # Unique enteries for USB column # Label encoding for USB column for i in range(len(DF_Final['USB'])): if DF_Final['USB'].iloc[i] == 'No': DF_Final['USB'].iloc[i] = 0 else: DF_Final['USB'].iloc[i] = 1 DF_Final['USB'].value_counts() # + [markdown] colab_type="text" id="m8UPKX8cgFfH" # #### Encode "network_technology". Create two categories one having value as "No cellular connectivity " and another having all otrher types of connectivity # - df_Samsung['network_technology'].value_counts() # There are 36 cellular phone with 'No cellular Connectivity' in df_Samsung # + colab={} colab_type="code" id="jYEYYQUPgbDH" DF_Final['network_technology'].nunique() # Unique elements in the network_technologies column # - # Label encoding for network_technologies column for i in range(len(DF_Final['network_technology'])): if DF_Final['network_technology'].iloc[i] == 'No cellular connectivity': DF_Final['network_technology'].iloc[i] = 0 else: DF_Final['network_technology'].iloc[i] = 1 DF_Final['network_technology'].value_counts() # # ### A RegEx, or Regular Expression , is a sequence of characters that forms a seacrh pattern. # # RegEx can be used to check if a string contains the specified pattern. # # - findall : Retuns a list containing all matches # - search : Returns a match object if there is a match in the string # - split : Returns a list where the string has been split at each match # - sub : Replaces one or many matches with a string # # # Returns a match where the string contains digit(numbers 0-9) "/d" text = '2...2 inches (~27.111% screen-to-body ratio) 25' import re re.findall("\d+\.\d", text) text = '2...2 inches (~27.111% screen-to-body ratio) 25' import re re.findall("\d+\.?\d*", text) text = '2...2 inches (~27.111% screen-to-body ratio) 25' import re re.findall("\d+\.*\d*", text) # + [markdown] colab_type="text" id="yHWJXcIugbVA" # #### Encode "display_resolution". Find the resolution number using regex # - X = DF_Final.copy() # Creating new data frame X as the copy of DF_Final dataset # + colab={} colab_type="code" id="gfDFnSImg7m7" # Regular expression for extracting the numbers from each enteries in the display_resolution column and replacing it in the same column import re for i in range(len(X['display_resolution'])): txt=X['display_resolution'].iloc[i] x = re.findall("\d+\.*\d*", txt) X['display_resolution'].iloc[i]=x[0] # - X['display_resolution'].head() # Displaying top 5 rows of display_resolution column # Converting the datatype of the display_resolution column to float64 X['display_resolution'] = X['display_resolution'].astype('float64') X.dtypes # Checking the datatype has been changed from object to float64 for display_resolution column # + [markdown] colab_type="text" id="ZnL36YVqhENc" # #### Encode "internal_memory". Create two categories having values in MB, or GB using regex # + [markdown] colab_type="text" id="DtL4j8SmhcXy" # ### Hint - # mb = re.search('MB',txt) # if mb: # X['internal_memory'].iloc[i]=1 # else: # X['internal_memory'].iloc[i]=0 # - X['internal_memory'].head() # Selecting top 5 rows of the internal_memory column # + colab={} colab_type="code" id="iABIy5XQhXDR" # Label encoding for internal_memory column import re for i in range(len(X['internal_memory'])): txt=X['internal_memory'].iloc[i] x = re.search('GB',txt) if x: X['internal_memory'].iloc[i]=1 else: X['internal_memory'].iloc[i]=0 # - X['internal_memory'].value_counts() # + [markdown] colab_type="text" id="ImOxZ6_ihfcB" # #### Encode "primary_camera". Create 2-3 categories having no camera, camera in M, or VGA using regex # # - X['primary_camera'].nunique() # Checking counts of unique elements in the primary_camera column # + colab={} colab_type="code" id="BGTuoHs6h8XR" # Label encoding for primary_camera column import re for i in range(len(X['primary_camera'])): txt=X['primary_camera'].iloc[i] type1 = re.search('VGA',txt) type2 = re.search('MP',txt) if type1: X['primary_camera'].iloc[i]=0 elif type2: X['primary_camera'].iloc[i]=1 else : X['primary_camera'].iloc[i]=2 # - X['primary_camera'].value_counts() # + [markdown] colab_type="text" id="G322DWvw_xKq" # ### 4. Data Preparation # + [markdown] colab_type="text" id="nULMYAS4wVW7" # Segregate X and y # + colab={} colab_type="code" id="QOMKamuXiMxn" X.head() # Checking data in X data set # - y = Y # Creating target dataset X.count() y.count() # + [markdown] colab_type="text" id="t6lzckJU_xLY" # ### 5. EDA # + [markdown] colab_type="text" id="OStGnzHgiVmv" # #### Perform univariate analysis by checking frequency distribution of all the features # + colab={} colab_type="code" id="jwgcNlrgiej2" sns.pairplot(X); # + [markdown] colab_type="text" id="diBuBs7Digjt" # #### Perform bivariate analysis by checking doing crosstab between independent and dependent features # + colab={} colab_type="code" id="XZIMCOC_iqaV" pd.crosstab(X['GPS'],y).plot(figsize=(20,20)); # - pd.crosstab(y,X['GPS']).plot(); pd.crosstab(y,X['USB']).plot(); pd.crosstab(y,X['network_technology']).plot(); pd.crosstab(y,X['display_resolution']).plot(figsize=(20,20)); # + [markdown] colab_type="text" id="EarZ-KHT_xL3" # ### 6. Split data into Train and Test set and build a Supervised Learning model using Linear Regression # + colab={} colab_type="code" id="iclEMxL5ixtd" # Splitting data into the train and test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,y,test_size= 0.3, random_state = 10) # - model = LinearRegression() # Instantiating LinearRegression Model model.fit(X_train,y_train) # Training the model model.score(X_train,y_train) # Checking score for training set model.score(X_test,y_test) # Checking score for testing set # + [markdown] colab_type="text" id="GGvaqpVC_xL7" # ### 7. Train a KNN model and check its performance # + colab={} colab_type="code" id="I_XRryiPi7mL" from sklearn.neighbors import KNeighborsRegressor knn = KNeighborsRegressor(n_neighbors=8) # Instantiating KNN Regressor model # - knn.fit(X_train,y_train) # Training the model knn.score(X_train,y_train) # Checking score for training set knn.score(X_test,y_test) # Checking score for testing set # + [markdown] colab_type="text" id="OGCvdjnO_xMY" # End of case study
18,388
/Lab_4.ipynb
276bbcae5ea50f43f11e2790bc1ca699470dd223
[]
no_license
lisaVolkova/data_analysys
https://github.com/lisaVolkova/data_analysys
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
36,014
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="KU7GJIVdiCbJ" from pandas import read_csv from datetime import datetime from google.colab import files import io import pandas as pd from pandas import read_csv from matplotlib import pyplot as plt from pandas import read_csv from pandas import DataFrame from pandas import concat from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from sklearn.metrics import mean_squared_error from keras.preprocessing.sequence import pad_sequences from keras.models import Model, Sequential from keras.layers import LSTM, Dense import numpy as np import plotly.graph_objects as go import tensorflow as tf from sklearn.metrics import classification_report # + id="7Ai2njoQiZdT" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBMaWNlbnNlZCB1bmRlciB0aGUgQXBhY2hlIExpY2Vuc2UsIFZlcnNpb24gMi4wICh0aGUgIkxpY2Vuc2UiKTsKLy8geW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoIHRoZSBMaWNlbnNlLgovLyBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKLy8KLy8gICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5zZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUgTGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCi8qKgogKiBAZmlsZW92ZXJ2aWV3IEhlbHBlcnMgZm9yIGdvb2dsZS5jb2xhYiBQeXRob24gbW9kdWxlLgogKi8KKGZ1bmN0aW9uKHNjb3BlKSB7CmZ1bmN0aW9uIHNwYW4odGV4dCwgc3R5bGVBdHRyaWJ1dGVzID0ge30pIHsKICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnc3BhbicpOwogIGVsZW1lbnQudGV4dENvbnRlbnQgPSB0ZXh0OwogIGZvciAoY29uc3Qga2V5IG9mIE9iamVjdC5rZXlzKHN0eWxlQXR0cmlidXRlcykpIHsKICAgIGVsZW1lbnQuc3R5bGVba2V5XSA9IHN0eWxlQXR0cmlidXRlc1trZXldOwogIH0KICByZXR1cm4gZWxlbWVudDsKfQoKLy8gTWF4IG51bWJlciBvZiBieXRlcyB3aGljaCB3aWxsIGJlIHVwbG9hZGVkIGF0IGEgdGltZS4KY29uc3QgTUFYX1BBWUxPQURfU0laRSA9IDEwMCAqIDEwMjQ7CgpmdW5jdGlvbiBfdXBsb2FkRmlsZXMoaW5wdXRJZCwgb3V0cHV0SWQpIHsKICBjb25zdCBzdGVwcyA9IHVwbG9hZEZpbGVzU3RlcChpbnB1dElkLCBvdXRwdXRJZCk7CiAgY29uc3Qgb3V0cHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKG91dHB1dElkKTsKICAvLyBDYWNoZSBzdGVwcyBvbiB0aGUgb3V0cHV0RWxlbWVudCB0byBtYWtlIGl0IGF2YWlsYWJsZSBmb3IgdGhlIG5leHQgY2FsbAogIC8vIHRvIHVwbG9hZEZpbGVzQ29udGludWUgZnJvbSBQeXRob24uCiAgb3V0cHV0RWxlbWVudC5zdGVwcyA9IHN0ZXBzOwoKICByZXR1cm4gX3VwbG9hZEZpbGVzQ29udGludWUob3V0cHV0SWQpOwp9CgovLyBUaGlzIGlzIHJvdWdobHkgYW4gYXN5bmMgZ2VuZXJhdG9yIChub3Qgc3VwcG9ydGVkIGluIHRoZSBicm93c2VyIHlldCksCi8vIHdoZXJlIHRoZXJlIGFyZSBtdWx0aXBsZSBhc3luY2hyb25vdXMgc3RlcHMgYW5kIHRoZSBQeXRob24gc2lkZSBpcyBnb2luZwovLyB0byBwb2xsIGZvciBjb21wbGV0aW9uIG9mIGVhY2ggc3RlcC4KLy8gVGhpcyB1c2VzIGEgUHJvbWlzZSB0byBibG9jayB0aGUgcHl0aG9uIHNpZGUgb24gY29tcGxldGlvbiBvZiBlYWNoIHN0ZXAsCi8vIHRoZW4gcGFzc2VzIHRoZSByZXN1bHQgb2YgdGhlIHByZXZpb3VzIHN0ZXAgYXMgdGhlIGlucHV0IHRvIHRoZSBuZXh0IHN0ZXAuCmZ1bmN0aW9uIF91cGxvYWRGaWxlc0NvbnRpbnVlKG91dHB1dElkKSB7CiAgY29uc3Qgb3V0cHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKG91dHB1dElkKTsKICBjb25zdCBzdGVwcyA9IG91dHB1dEVsZW1lbnQuc3RlcHM7CgogIGNvbnN0IG5leHQgPSBzdGVwcy5uZXh0KG91dHB1dEVsZW1lbnQubGFzdFByb21pc2VWYWx1ZSk7CiAgcmV0dXJuIFByb21pc2UucmVzb2x2ZShuZXh0LnZhbHVlLnByb21pc2UpLnRoZW4oKHZhbHVlKSA9PiB7CiAgICAvLyBDYWNoZSB0aGUgbGFzdCBwcm9taXNlIHZhbHVlIHRvIG1ha2UgaXQgYXZhaWxhYmxlIHRvIHRoZSBuZXh0CiAgICAvLyBzdGVwIG9mIHRoZSBnZW5lcmF0b3IuCiAgICBvdXRwdXRFbGVtZW50Lmxhc3RQcm9taXNlVmFsdWUgPSB2YWx1ZTsKICAgIHJldHVybiBuZXh0LnZhbHVlLnJlc3BvbnNlOwogIH0pOwp9CgovKioKICogR2VuZXJhdG9yIGZ1bmN0aW9uIHdoaWNoIGlzIGNhbGxlZCBiZXR3ZWVuIGVhY2ggYXN5bmMgc3RlcCBvZiB0aGUgdXBsb2FkCiAqIHByb2Nlc3MuCiAqIEBwYXJhbSB7c3RyaW5nfSBpbnB1dElkIEVsZW1lbnQgSUQgb2YgdGhlIGlucHV0IGZpbGUgcGlja2VyIGVsZW1lbnQuCiAqIEBwYXJhbSB7c3RyaW5nfSBvdXRwdXRJZCBFbGVtZW50IElEIG9mIHRoZSBvdXRwdXQgZGlzcGxheS4KICogQHJldHVybiB7IUl0ZXJhYmxlPCFPYmplY3Q+fSBJdGVyYWJsZSBvZiBuZXh0IHN0ZXBzLgogKi8KZnVuY3Rpb24qIHVwbG9hZEZpbGVzU3RlcChpbnB1dElkLCBvdXRwdXRJZCkgewogIGNvbnN0IGlucHV0RWxlbWVudCA9IGRvY3VtZW50LmdldEVsZW1lbnRCeUlkKGlucHV0SWQpOwogIGlucHV0RWxlbWVudC5kaXNhYmxlZCA9IGZhbHNlOwoKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIG91dHB1dEVsZW1lbnQuaW5uZXJIVE1MID0gJyc7CgogIGNvbnN0IHBpY2tlZFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgaW5wdXRFbGVtZW50LmFkZEV2ZW50TGlzdGVuZXIoJ2NoYW5nZScsIChlKSA9PiB7CiAgICAgIHJlc29sdmUoZS50YXJnZXQuZmlsZXMpOwogICAgfSk7CiAgfSk7CgogIGNvbnN0IGNhbmNlbCA9IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoJ2J1dHRvbicpOwogIGlucHV0RWxlbWVudC5wYXJlbnRFbGVtZW50LmFwcGVuZENoaWxkKGNhbmNlbCk7CiAgY2FuY2VsLnRleHRDb250ZW50ID0gJ0NhbmNlbCB1cGxvYWQnOwogIGNvbnN0IGNhbmNlbFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgY2FuY2VsLm9uY2xpY2sgPSAoKSA9PiB7CiAgICAgIHJlc29sdmUobnVsbCk7CiAgICB9OwogIH0pOwoKICAvLyBXYWl0IGZvciB0aGUgdXNlciB0byBwaWNrIHRoZSBmaWxlcy4KICBjb25zdCBmaWxlcyA9IHlpZWxkIHsKICAgIHByb21pc2U6IFByb21pc2UucmFjZShbcGlja2VkUHJvbWlzZSwgY2FuY2VsUHJvbWlzZV0pLAogICAgcmVzcG9uc2U6IHsKICAgICAgYWN0aW9uOiAnc3RhcnRpbmcnLAogICAgfQogIH07CgogIGNhbmNlbC5yZW1vdmUoKTsKCiAgLy8gRGlzYWJsZSB0aGUgaW5wdXQgZWxlbWVudCBzaW5jZSBmdXJ0aGVyIHBpY2tzIGFyZSBub3QgYWxsb3dlZC4KICBpbnB1dEVsZW1lbnQuZGlzYWJsZWQgPSB0cnVlOwoKICBpZiAoIWZpbGVzKSB7CiAgICByZXR1cm4gewogICAgICByZXNwb25zZTogewogICAgICAgIGFjdGlvbjogJ2NvbXBsZXRlJywKICAgICAgfQogICAgfTsKICB9CgogIGZvciAoY29uc3QgZmlsZSBvZiBmaWxlcykgewogICAgY29uc3QgbGkgPSBkb2N1bWVudC5jcmVhdGVFbGVtZW50KCdsaScpOwogICAgbGkuYXBwZW5kKHNwYW4oZmlsZS5uYW1lLCB7Zm9udFdlaWdodDogJ2JvbGQnfSkpOwogICAgbGkuYXBwZW5kKHNwYW4oCiAgICAgICAgYCgke2ZpbGUudHlwZSB8fCAnbi9hJ30pIC0gJHtmaWxlLnNpemV9IGJ5dGVzLCBgICsKICAgICAgICBgbGFzdCBtb2RpZmllZDogJHsKICAgICAgICAgICAgZmlsZS5sYXN0TW9kaWZpZWREYXRlID8gZmlsZS5sYXN0TW9kaWZpZWREYXRlLnRvTG9jYWxlRGF0ZVN0cmluZygpIDoKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgJ24vYSd9IC0gYCkpOwogICAgY29uc3QgcGVyY2VudCA9IHNwYW4oJzAlIGRvbmUnKTsKICAgIGxpLmFwcGVuZENoaWxkKHBlcmNlbnQpOwoKICAgIG91dHB1dEVsZW1lbnQuYXBwZW5kQ2hpbGQobGkpOwoKICAgIGNvbnN0IGZpbGVEYXRhUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICAgIGNvbnN0IHJlYWRlciA9IG5ldyBGaWxlUmVhZGVyKCk7CiAgICAgIHJlYWRlci5vbmxvYWQgPSAoZSkgPT4gewogICAgICAgIHJlc29sdmUoZS50YXJnZXQucmVzdWx0KTsKICAgICAgfTsKICAgICAgcmVhZGVyLnJlYWRBc0FycmF5QnVmZmVyKGZpbGUpOwogICAgfSk7CiAgICAvLyBXYWl0IGZvciB0aGUgZGF0YSB0byBiZSByZWFkeS4KICAgIGxldCBmaWxlRGF0YSA9IHlpZWxkIHsKICAgICAgcHJvbWlzZTogZmlsZURhdGFQcm9taXNlLAogICAgICByZXNwb25zZTogewogICAgICAgIGFjdGlvbjogJ2NvbnRpbnVlJywKICAgICAgfQogICAgfTsKCiAgICAvLyBVc2UgYSBjaHVua2VkIHNlbmRpbmcgdG8gYXZvaWQgbWVzc2FnZSBzaXplIGxpbWl0cy4gU2VlIGIvNjIxMTU2NjAuCiAgICBsZXQgcG9zaXRpb24gPSAwOwogICAgd2hpbGUgKHBvc2l0aW9uIDwgZmlsZURhdGEuYnl0ZUxlbmd0aCkgewogICAgICBjb25zdCBsZW5ndGggPSBNYXRoLm1pbihmaWxlRGF0YS5ieXRlTGVuZ3RoIC0gcG9zaXRpb24sIE1BWF9QQVlMT0FEX1NJWkUpOwogICAgICBjb25zdCBjaHVuayA9IG5ldyBVaW50OEFycmF5KGZpbGVEYXRhLCBwb3NpdGlvbiwgbGVuZ3RoKTsKICAgICAgcG9zaXRpb24gKz0gbGVuZ3RoOwoKICAgICAgY29uc3QgYmFzZTY0ID0gYnRvYShTdHJpbmcuZnJvbUNoYXJDb2RlLmFwcGx5KG51bGwsIGNodW5rKSk7CiAgICAgIHlpZWxkIHsKICAgICAgICByZXNwb25zZTogewogICAgICAgICAgYWN0aW9uOiAnYXBwZW5kJywKICAgICAgICAgIGZpbGU6IGZpbGUubmFtZSwKICAgICAgICAgIGRhdGE6IGJhc2U2NCwKICAgICAgICB9LAogICAgICB9OwogICAgICBwZXJjZW50LnRleHRDb250ZW50ID0KICAgICAgICAgIGAke01hdGgucm91bmQoKHBvc2l0aW9uIC8gZmlsZURhdGEuYnl0ZUxlbmd0aCkgKiAxMDApfSUgZG9uZWA7CiAgICB9CiAgfQoKICAvLyBBbGwgZG9uZS4KICB5aWVsZCB7CiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICB9CiAgfTsKfQoKc2NvcGUuZ29vZ2xlID0gc2NvcGUuZ29vZ2xlIHx8IHt9OwpzY29wZS5nb29nbGUuY29sYWIgPSBzY29wZS5nb29nbGUuY29sYWIgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYi5fZmlsZXMgPSB7CiAgX3VwbG9hZEZpbGVzLAogIF91cGxvYWRGaWxlc0NvbnRpbnVlLAp9Owp9KShzZWxmKTsK", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 72} outputId="67d702f1-a95f-455f-e9b9-4a7de0f650a3" # load data uploaded = files.upload() # + id="l7-CmQt1if13" #Read CSV Input = read_csv(io.BytesIO(uploaded['PETR4_INPUT.csv']), index_col = 'Date') Train = Input[:int(0.7*len(Input))].copy() Test = Input[int(0.7*len(Input)):].copy() # + id="GFbOWGdxjHxy" # Put your inputs into a single list input_cols = ['LOG_RETURN','VOLUME','MA', 'ADX', 'MACD', 'MFI'] Train['single_input_vector'] = Train[input_cols].apply(tuple, axis=1).apply(list) Test['single_input_vector'] = Test[input_cols].apply(tuple, axis=1).apply(list) # + id="GdhrIyRKgBPH" # Double-encapsulate list so that you can sum it in the next step and keep time steps as separate elements Train['single_input_vector'] = Train.single_input_vector.apply(lambda x: [list(x)]) Test['single_input_vector'] = Test.single_input_vector.apply(lambda x: [list(x)]) # + id="3MfD1medgjLa" #Include Output output_cols = ['OUTPUT'] Train['output_vector'] = Train[output_cols].apply(tuple, axis=1).apply(list) Test['output_vector'] = Test[output_cols].apply(tuple, axis=1).apply(list) # + id="ywtmiVBKLNt_" # Extract your training data X_train_init = np.asarray(Train.single_input_vector) # Use hstack to and reshape to make the inputs a 3d vector X_train = np.hstack(X_train_init).reshape(len(Train),1,len(input_cols)) y_train = np.hstack(np.asarray(Train.OUTPUT)).reshape(len(Train),len(output_cols)) # + id="nfOZQ2iGMX7R" # Extract your training data X_test_init = np.asarray(Test.single_input_vector) # Use hstack to and reshape to make the inputs a 3d vector X_test = np.hstack(X_test_init).reshape(len(Test),1,len(input_cols)) y_test = np.hstack(np.asarray(Test.OUTPUT)).reshape(len(Test),len(output_cols)) # + id="AC4YCIc6M7YZ" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="042f7b50-87ca-49d1-afe6-70db695a2b9b" print(X_train.shape, y_train.shape, X_test.shape, y_test.shape) # + id="F28uZVdC1Pg0" preds = [] for i in range(10): model = Sequential() model.add(LSTM(8,input_shape=(1, 6),return_sequences=True)) model.add(LSTM(4,return_sequences=True)) model.add(LSTM(2,return_sequences=False)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, epochs=750, batch_size=100,verbose=0,shuffle=True) y_pred = model.predict_classes(X_test) clas =classification_report(y_test, y_pred, output_dict=True) preds.append(clas) # + id="Bs35V9ebxcQJ" list_of_df = list() for df in preds: list_of_df.append(pd.DataFrame(df).transpose()) # + id="4MLBK6aLwRju" Final_df = sum(list_of_df)/10 # + id="kYWNWTnvx3EY" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="f8ba357e-37d3-4400-8938-17b48b1c9d36" Final_df # + id="Dm9gYvpOx4PZ" 080/", "height": 204} id="1zOnvFC27gTz" outputId="958f635e-b707-46e9-e847-b60464f6d659" # 3 df = pd.read_csv(io.StringIO(uploaded['insta_comments.csv'].decode('utf-8'))) df = df.fillna(df.mean()) df.head() # + colab={"base_uri": "https://localhost:8080/"} id="o3jnG0i58mAE" outputId="9d56eabf-ebae-42b5-8b65-3e7b8dfad9a3" import nltk nltk.download('stopwords') def remove_stopwords(str_x): words = str_x.split(' ') neutral_words = ['edit', 'got', 'thing', 'want', 'make', 'is', 'do'] new_words = list() for word in words: if word not in stopwords.words('english') or word not in neutral_words: new_words.append(word) return ' '.join(new_words) df["comment_text"] = df["comment_text"].map(lambda x: re.sub(r'[^\w]', ' ', x)) df["comment_text"] = df["comment_text"].map(lambda x: x.lower()) df["comment_text"] = df["comment_text"].map(remove_stopwords) # + colab={"base_uri": "https://localhost:8080/"} id="9su_qy1y9rpJ" outputId="8586a18a-927b-4ec6-ee3e-35c160784f63" vectorizer = TfidfVectorizer() X = vectorizer.fit_transform(df["comment_text"].values).toarray() Y = df.iloc[:, 3:].values num_samples = X.shape[0] train_samples = int(0.75*num_samples) indexes = np.arange(num_samples) random.shuffle(indexes) X, Y = X[indexes], Y[indexes] x_train, y_train = X[:train_samples, :], Y[:train_samples] x_test, y_test = X[train_samples:, :], Y[train_samples:] print(x_train.shape, y_train.shape) print(x_test.shape, y_test.shape) # + colab={"base_uri": "https://localhost:8080/"} id="SdPQ33NP9z7p" outputId="fde7756d-840a-4e90-bc1f-5a29fa5668a2" inputs = keras.Input(shape=(9372, 1)) x = keras.layers.LSTM(128)(inputs) outputs = keras.layers.Dense(6, activation="softmax")(x) rnn_model = keras.Model(inputs=inputs, outputs=outputs, name="rnn_model") rnn_model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="23npkfQ392Ul" outputId="6fb40431-63b2-4e66-ddd7-774ed5591d5f" rnn_model.compile( loss=keras.losses.CategoricalCrossentropy(from_logits=True), optimizer=keras.optimizers.RMSprop(), metrics=["accuracy"], ) rnn_model.fit(x_train, y_train, batch_size=4, epochs=1, validation_split=0.2) # + colab={"base_uri": "https://localhost:8080/"} id="Zsazs05m-CGs" outputId="13606fbd-6c04-4d48-fd42-dfa87a617dcb" test = rnn_model.evaluate(x_test, y_test, verbose=2) print("Loss test:", test[0]) print("Accuracy test:", test[1])
13,408
/main_FIR.ipynb
4902312c3acca6a6f900d5921b7eeea57e9f01f4
[]
no_license
NaOsugi1987/EEG-ECoG
https://github.com/NaOsugi1987/EEG-ECoG
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
9,814
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # FIR EEG-ECoG 共通 EEG 予測 共通 # # 1. FIR 0, IIR 1, LSTM 2 # 2. EEG-ECoG間でフィルター共通 0, バラバラ 1 # 3. 予測対象 EEG 0, ECoG 1 # 4. 予測の仕方 共通 0, NNをわける(フォルター特性が変わる) 1 # # # + # -*- coding: utf_8 -*- # %matplotlib inline import matplotlib.pyplot as plt import seaborn import copy import json import pandas as pd import numpy as np import datetime import math import time import chainer from chainer import cuda, Function, gradient_check, utils, Variable from chainer import optimizers, serializers from chainer import Link, Chain, ChainList import chainer.functions as F import chainer.links as L from scipy import signal import pickle import sys import random import h5py import fir_class # + test=h5py.File('Ma/DeepANES.mat') ECoG = np.array(test['ECoG']) EEG = np.array(test['EEG']) EEG = np.delete(EEG, 14, axis=1) EEG = EEG.astype('float32') ECoG = ECoG.astype('float32') # + order = 5 time_range = 100 train_time = [0, time_range] hamming = signal.hamming(train_time[1]-train_time[0]) #training ぶんだけにする #EEG_t = np.zeros((EEG.shape[0]-order+1, EEG.shape[1], order)) #ECoG_t = np.zeros((ECoG.shape[0]-order+1, ECoG.shape[1], order)) EEG_t = np.zeros((train_time[1]-train_time[0], EEG.shape[1], order)) ECoG_t = np.zeros((train_time[1]-train_time[0], ECoG.shape[1], order)) EEG_tmp = EEG[train_time[0]:train_time[1], :] *np.array([hamming]*EEG.shape[1]).T ECoG_tmp = ECoG[train_time[0]:train_time[1], :] *np.array([hamming]*ECoG.shape[1]).T for t in range(order): for ch in range(EEG.shape[1]): #EEG_t[:,ch,t] = EEG[:EEG.shape[0]-order+1, ch] EEG_t[t:,ch,t] = EEG_tmp[t:, ch] for ch in range(ECoG.shape[1]): #ECoG_t[:,ch,t] = ECoG[:ECoG.shape[0]-order+1, ch] ECoG_t[t:,ch,t] = ECoG_tmp[t:, ch] EEG_t = Variable((EEG_t[train_time[0]:train_time[1], :, :]).astype('float32')) ECoG_t = Variable((ECoG_t[train_time[0]:train_time[1], :, :]).astype('float32')) # - def calNN(timeSpacePerceptron, EEG_t, ECoG_t, EEG_filtered_prev, ECoG_filtered_prev, loop=200): loss_history = np.zeros((loop, 1)) #optimizer = optimizers.RMSprop() optimizer = optimizers.AdaDelta() optimizer.setup(timeSpacePerceptron) optimizer.add_hook(chainer.optimizer.WeightDecay(0.05)) #optimizer.add_hook(chainer.optimizer.Lasso(0.5)) optimizer.add_hook(chainer.optimizer.GradientClipping(0.5)) #timeSpacePerceptron.zerograds() for i in range(loop): timeSpacePerceptron.zerograds() x1, x2, EEG_filtered_prev, ECoG_filtered_prev = timeSpacePerceptron(EEG_t, ECoG_t, EEG_filtered_prev, ECoG_filtered_prev, train=True) loss = F.mean_squared_error(x1, x2) loss.backward() optimizer.prepare() optimizer.update() loss_history[i] = loss.data return timeSpacePerceptron, loss_history, EEG_filtered_prev, ECoG_filtered_prev def loop_calNN(EEG_t, ECoG_t, data_type, loop=200, loop_num=20): EEG_filtered_prev = [Variable(np.zeros((time_range, 1)).astype('float32')) for j in range(EEG.shape[1])] #前の出力をいれるところ (RNN) ECoG_filtered_prev = [Variable(np.zeros((time_range, 1)).astype('float32')) for j in range(ECoG.shape[1])] #前の出力をいれるところ saved_data = dict() outout = [] out_loss_history = [] for n in range(loop_num): out = [] loss_history = np.zeros((loop, 1)) timeSpacePerceptron = fir_class.__dict__["TimeSpacePerceptron{0}".format(data_type)](time_range) timeSpacePerceptron, loss_history, EEG_filtered_prev, ECoG_filtered_prev = calNN(timeSpacePerceptron, EEG_t, ECoG_t, EEG_filtered_prev, ECoG_filtered_prev, loop=loop) out_loss_history.append(loss_history) outout.append(timeSpacePerceptron) saved_data['loss_history'] = out_loss_history saved_data['NN'] = outout return saved_data # + saved_data = dict() loop = 50 loop_num = 1 st = time.time() # channle をわけない data_type ='0000' saved_data[data_type] = loop_calNN(EEG_t, ECoG_t, data_type, loop=loop, loop_num=loop_num) print(time.time()-st) data_type ='0100' saved_data[data_type] = loop_calNN(EEG_t, ECoG_t, data_type, loop=loop, loop_num=loop_num) print(time.time()-st) data_type ='0010' saved_data[data_type] = loop_calNN(EEG_t, ECoG_t, data_type, loop=loop, loop_num=loop_num) print(time.time()-st) data_type ='0110' saved_data[data_type] = loop_calNN(EEG_t, ECoG_t, data_type, loop=loop, loop_num=loop_num) print(time.time()-st) # + # channle をわける # EEG data_type ='0001' tmp_data = [] for ch in range(EEG.shape[1]): tmp_data.append(loop_calNN(EEG_t[:, ch, :], ECoG_t, data_type, loop=loop, loop_num=loop_num)) saved_data[data_type] = tmp_data print(time.time()-st) data_type ='0101' tmp_data = [] for ch in range(EEG.shape[1]): tmp_data.append(loop_calNN(EEG_t[:, ch, :], ECoG_t, data_type, loop=loop, loop_num=loop_num)) saved_data[data_type] = tmp_data print(time.time()-st) # ECoG data_type ='0011' tmp_data = [] for ch in range(ECoG.shape[1]): tmp_data.append(loop_calNN(EEG_t, ECoG_t[:, ch, :], data_type, loop=loop, loop_num=loop_num)) saved_data[data_type] = tmp_data print(time.time()-st) data_type ='0111' tmp_data = [] for ch in range(ECoG.shape[1]): tmp_data.append(loop_calNN(EEG_t, ECoG_t[:, ch, :], data_type, loop=loop, loop_num=loop_num)) saved_data[data_type] = tmp_data print(time.time()-st) # - pickle.dump(saved_data, open('result_FIR_{0}{1}.pkl'.format(loop, loop_num), 'wb')) # + #train_time[1]-train_time[0] # + #eeg_reconstruct, eeg_filtered = outout[2][99](EEG_t, ECoG_t, train=False) # + #for i in range(16): # print(np.corrcoef(eeg_reconstruct.data[:,i], eeg_filtered.data[:,i])[0, 1]) # -
5,874
/Dates_and_Times/calendar - Work with Dates.ipynb
e11efb8f79e813094dc130d7913b378ec65eabc5
[]
no_license
alvinstars/Py3MOTW_Practice
https://github.com/alvinstars/Py3MOTW_Practice
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
12,107
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # calendar - Work with Dates # # Purpose: The calendar module implements classes for working with dates to manage year/month/week oriented values. # # The calendar module defines the Calendar class, which encapsulates calculations for values such as the dates of the weeks in a given month or year. In addition, the TextCalendar and HTMLCalendar classes can produce pre-formatted output. # ## Formatting Examples # # The prmonth() method is a simple function that produces the formatted text output for a month. # # The example configures TextCalendar to start weeks on Sunday, following the American convention. The default is to use the European convention of starting a week on Monday. # + # calendar_textcalendar.py import calendar c = calendar.TextCalendar(calendar.SUNDAY) c.prmonth(2017,4) # - # A similar HTML table can be produced with HTMLCalendar and formatmonth(). The rendered output looks roughly the same as the plain text version, but is wrapped with HTML tags. Each table cell has a class attribute corresponding to the day of the week, so the HTML can be styled through CSS. # # To produce output in a format other than one of the available defaults, use calendar to calculate the dates and organize the values into week and month ranges, then iterate over the result. The weekheader(), monthcalendar(), and yeardays2calendar() methods of Calendar are especially useful for that. # # Calling yeardays2calendar() produces a sequence of “month row” lists. Each list includes the months as another list of weeks. The weeks are lists of tuples made up of day number (1-31) and weekday number (0-6). Days that fall outside of the month have a day number of 0. # # Calling yeardays2calendar(2017, 3) returns data for 2017, organized with three months per row. # + # calendar_yeardays2calendar.py import calendar import pprint cal = calendar.Calendar(calendar.SUNDAY) cal_data = cal.yeardays2calendar(2017,3) print('len(cal_data) :', len(cal_data)) top_months = cal_data[0] print('len(top_months) :', len(top_months)) first_month = top_months[0] print('len(first_month) :', len(first_month)) print('first_month:') pprint.pprint(first_month, width=65) # - # This is equivalent to the data used by formatyear(). # + # calendar_formatyear.py import calendar cal = calendar.TextCalendar(calendar.SUNDAY) print(cal.formatyear(2017, 2, 1, 1, 3)) # - # The day_name, day_abbr, month_name, and month_abbr module attributes useful for producing custom formatted output (i.e., to include links in the HTML output). They are automatically configured correctly for the current locale. # ## Locales # # To produce a calendar formatted for a locale other than the current default, use LocaleTextCalendar or LocaleHTMLCalendar. # # The first day of the week is not part of the locale settings, and the value is taken from the argument to the calendar class just as with the regular TextCalendar. # + # calendar_locale.py import calendar c = calendar.LocaleTextCalendar(locale='en_US') c.prmonth(2017, 4) print() c = calendar.LocaleTextCalendar(locale='fr_FR') c.prmonth(2017, 4) # - # ## Calculating Dates # # Although the calendar module focuses mostly on printing full calendars in various formats, it also provides functions useful for working with dates in other ways, such as calculating dates for a recurring event. For example, the Python Atlanta User’s Group meets on the second Thursday of every month. To calculate the dates for the meetings for a year, use the return value of monthcalendar(). # # Some days have a 0 value. Those are days of the week that overlap with the given month, but that are part of another month. # + # calendar_monthcalendar.py import calendar import pprint pprint.pprint(calendar.monthcalendar(2017,4)) # - # The first day of the week defaults to Monday. It is possible to change that by calling setfirstweekday(), but since the calendar module includes constants for indexing into the date ranges returned by monthcalendar(), it is more convenient to skip that step in this case. # # To calculate the group meeting dates for a year, assuming they are always on the second Thursday of every month, look at the output of monthcalendar() to find the dates on which Thursdays fall. The first and last week of the month are padded with 0 values as placeholders for the days falling in the preceding or subsequent month. For example, if a month starts on a Friday, the value in the first week in the Thursday position will be 0. # + # calendar_secondthursday.py import calendar year = 2017 # show every month for month in range(1, 13): # compute the dates for each week that overlaps the month c = calendar.monthcalendar(year, month) first_week = c[0] second_week = c[1] thrid_week = c[2] if first_week[calendar.THURSDAY]: meeting_date = second_week[calendar.THURSDAY] else: meeting_date = thrid_week[calendar.THURSDAY] print('{:>3}: {:>2}'.format(calendar.month_abbr[month], meeting_date))
5,291
/420/BonusAssignment.ipynb
d45071b28385895e2b337d1ec7365c5c3bedaf78
[ "MIT" ]
permissive
knightman/nu-msds
https://github.com/knightman/nu-msds
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
572,449
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import re import math from collections import defaultdict from operator import itemgetter all_words = set() words_per_article = [] themes = [] stop_words = [""] # + run_control={"marked": false} with open("news_train.txt", "r", encoding="utf-8") as train_text: for number_of_rows, article in enumerate(train_text, 1): split = article.split('\t') theme = split[0] themes.append(theme) text = split[1] + " " + split[2] words = set(re.split("(\w[\w']*\w|\w)",text)) words_per_article.append(words) all_words |= words # - themes_enc = dict((k,i) for i,k in enumerate(set(themes))) themes_transformed = [themes_enc[theme] for theme in themes] themes_transformed[:5] columns = dict((k,i) for i,k in enumerate(all_words)) def get_most_common_item(array): count_dict = defaultdict(int) for key in array: count_dict[key] += 1 key, count = max(count_dict.items(), key=itemgetter(1)) return key def dot(A,B): return (sum(a*b for a,b in zip(A,B))) def cosine_similarity(a,b): return dot(a,b) / ( (dot(a,a) **.5) * (dot(b,b) ** .5) ) def euclidean_dist(words_train, words_test): #return len(words_train) + len(words_test) - 2 * len(words_train & words_test) train_row = [0] * len(columns) print("RAZ") for word_train in words_train: train_row[columns[word_train]] = 1 print("DVA") test_row = [0] * len(columns) for word_test in words_test: test_row[columns[word_test]] = 1 print("TRI!") return cosine_similarity(train_row, test_row)#math.sqrt(sum([(train_row[i]-test_row[i])**2 for i, _ in enumerate(train_row)]) ) def knn(k=1): y_test = [] with open("news_test.txt", "r", encoding="utf-8") as test_text: for test_article in test_text: split = article.split('\t') text = split[1] + " " + split[2] test_words = set(re.split("(\w[\w']*\w|\w)", text)) test_words &= all_words eucl_dist = [euclidean_dist(train_words, test_words) for train_words in words_per_article] sorted_eucl_dist = sorted(eucl_dist) closest_knn = [eucl_dist.index(sorted_eucl_dist[i]) for i in range(0, k)] if k > 1 else [eucl_dist.index(min(eucl_dist))] closest_labels_knn = [themes_transformed[x] for x in closest_knn] y_test.append(get_most_common_item(closest_labels_knn)) print(get_most_common_item(closest_labels_knn)) return y_test knn(k=100) erent cities/zip codes # # The data set is saved in plainntext format in the file CustomerReviews.txt which is available with this assignment. # # Different reviews, in the file, are separated by BLANK LINEs. # - # + import os import cPickle as pickle import pandas as pd import numpy as np from pandas import DataFrame, Series # + # 1. Open the fiel to read it f = open('CustomerReviews.txt') # 2. Read the entire file in one step as a single GIANT string raw_giant_string_data = f.read() # - # Verify the datatype you got? type(raw_giant_string_data) # 3. Split the GIANT string you read into LIST of lines raw_list_of_strings_data = raw_giant_string_data.splitlines() # Verify the datatype you got? type(raw_list_of_strings_data) # 4. Print the raw_list_data # How does the data look like in the raw_list_data? # Every Line will be represented as an item of type (String) in the resulting LIST raw_list_of_strings_data[:10] # + # Does the data list you got have similar patterns? # Did you get a list of strings, where the PATTERN of the string could be either # (a) 'parameterName: parameterValue' # (b) '' # Note that '' string is the BLANK line in the textfile # + # 5. Create the LOL - a List Of two-item Lists # So we can have something like this: # [['Product/ModelName', 'Samsung TV 60 LED'], # ['Product/Category', 'TV'], # ['Product/Price', '1200'], # ........................ # ........................ # - # + # Empty list: The [] characters denote an empty list. # Python evaluates zero-element collections to False. # In our data list, the blank line is represented by Empty List [] raw_list_of_lists_data = [] for row in raw_list_of_strings_data: if row : raw_list_of_lists_data = raw_list_of_lists_data + [row.split(': ')] # - type (raw_list_of_lists_data) raw_list_of_lists_data[:1] raw_list_of_lists_data[:32] # + # 6. Create a generator Method for our partitions of the reviews in the raw_list_of_lists_data def partition_generator(reviews_list, n): def reviews_partitions(): for i in xrange(0, len(reviews_list), n): yield reviews_list[i:i+n] return [i for i in reviews_partitions()] # + partitioned_list_of_reviews = partition_generator(raw_list_of_lists_data, 17) partitioned_list_of_reviews[:3] # + # 7. Create Column Headers # Read the FIRST list ONLY in the partitioned_list_of_reviews # and extract from it column_headers for our review_table # All other lists have the SAME header/pattern names keys = [row[0] for row in partitioned_list_of_reviews[0]] review_row = [row[1] for row in partitioned_list_of_reviews[0]] # - keys # + # 8. Create List of Doctionaries for the reviews number_of_rows = len(partitioned_list_of_reviews) number_of_cols = len(keys) list_of_reviews_dictionaries = [] for row in xrange(number_of_rows): review_dict = {} for col in xrange(number_of_cols): review_dict[keys[col]] = partitioned_list_of_reviews[row][col][1] list_of_reviews_dictionaries.append(review_dict) # + # Sanity Test # Verify you got healthy data by printing 3 reviews from teh doctionary list_of_reviews_dictionaries[:3] # - # Sanity Test # Verify you got the right number of reviews print number_of_rows # Sanity Test # Verify you got right number of fields for reviews print number_of_cols # Sanity Test # Verify you got the right number of reviews in the list of dictionaries len(list_of_reviews_dictionaries) # + # 9. Import pymongo package from pymongo import MongoClient # + # 10. Connect to MongoDB running instance # MongoClient defaults to the MongoDB instance that runs on the localhost interface on port 27017 client = MongoClient() # + # You can specify a complete MongoDB URI to define the connection: # client = MongoClient("mongodb://localhost:27017") # + # Databases hold groups of logically related collections. # MongoDB creates new databases implicitly upon their first use # + # 11. Create your database db = client.reviews # + active="" # A collection is a group of documents stored in MongoDB, and can be thought of as roughly the equivalent of a table in a relational database. Getting a collection in PyMongo works the same as getting a database: # # + active="" # Data in MongoDB is represented (and stored) using JSON-style documents. In PyMongo we use dictionaries to represent documents. # - reviews = db['reviews'] # + #list_of_reviews_dictionaries[0] # + active="" # To insert a document into a collection we can use the insert() method: # - # review_id = reviews.insert(list_of_reviews_dictionaries[1]) for review in list_of_reviews_dictionaries: review_id = reviews.insert(review) # + active="" # After inserting the first document, the reviews collection has actually been created on the server. We can verify this by listing all of the collections in our database # - db.collection_names(include_system_collections=False) # + active="" # The most basic type of query that can be performed in MongoDB is find_one(). This method returns a single document matching a query (or None if there are no matches). It is useful when you know there is only one matching document, or are only interested in the first match. Here we use find_one() to get the first document from the reviews collection: # - reviews.find_one() # + active="" # The result is a dictionary matching the one that we inserted previously. # + active="" # The returned document contains an "_id", which was automatically added on insert. # + active="" # find_one() also supports querying on specific elements that the resulting document must match. To limit our results to a document with retailer “Bestbuy” we do: # - reviews.find_one({"RetailerName": "Bestbuy"}) review_id # + active="" # We can also find a post by its _id, which in our example is an ObjectId: # - reviews.find_one({"_id": review_id}) # + active="" # To get more than a single document as the result of a query we use the find() method. find() returns a Cursor instance, which allows us to iterate over all matching documents. For example, we can iterate over every document in the reviews collection: # # + # uncomment the following lines if you want to print the 2000 reviews in the database #for review in reviews.find(): # print review # - type(reviews) # + # uncomment the following lines if you want to print ALL of Bestbuy reviews in the database # for review in reviews.find({"RetailerName": "Bestbuy"}): # print review reviews.find_one({"RetailerName": "Bestbuy"}) # + active="" # If we just want to know how many documents match a query we can perform a count() operation instead of a full query. We can get a count of all of the documents in a collection: # # - reviews.count() reviews.find({"RetailerName": "Bestbuy"}).count() # + active="" # To remove all documents from a collection, pass an empty query document {} to the remove() method. The remove() method does not remove the indexes. # + # Clean up the database while you are testing your code # db.reviews.remove({}) # - # sanity test reviews.count() # + active="" # Couple of status commands about the database/collection that you could use # - db.command({'dbstats': 1}) db.command({'collstats': 'reviews'}) # # +++++++++++++++++++++++++++++++++++++++++++ # # +++++++++++++++++++++++++++++++++++++++++++ # # +++++ Now lets execute different queries for our analysis ++++++ # # +++++++++++++++++++++++++++++++++++++++++++ # # +++++++++++++++++++++++++++++++++++++++++++ # ## Query 1: Print the list of reviews for the retailer Target for review in reviews.find({"RetailerName": "Target"}): print review # ## Query 2: Print a list of reviews where rating less than 3 and the list is sorted by ProductModelName # + # import re when you want to do regex query import re for review in reviews.find({ 'ReviewRating': {'$lt': '3'}} ).sort('ProductModelName') : print review # - # # Query 3: Get a list of reviews where reviewer age greater than 50 and the list is sorted by age for review in reviews.find({ 'UserAge': {'$gt': '50'}} ).sort('UserAge') : print review # # Query 4: Get a list of reviews where customers mentioned bluetooth or overheat in the review text and the list is sorted by ProductModelName # + for review in reviews.find({'ReviewText': re.compile('bluetooth|overheat')}).sort('ProductModelName') : print review # - # # Query 5: Get a list of reviews where customers mentioned bluetooth or overheat in the review text and gave a ReviewRating less than 3 # + for review in reviews.find({'ReviewText': re.compile('bluetooth|overheat'), 'ReviewRating': {'$lt': '3'}}) : print review # - # # Query 6: Get a list of 5 reviews where customers mentioned bluetooth or overheat in the review text and gave a ReviewRating less than 3 # + for review in reviews.find({'ReviewText': re.compile('bluetooth|overheat'), 'ReviewRating': {'$lt': '3'}}).limit(5) : print review # - # # +++++++++++++++++++++++++++++++++++++++++++ # # +++++++++++++++++++++++++++++++++++++++++++ # # +++++++++++++++++ Deliverable ++++++++++++++++++ # # +++++++++++++++++++++++++++++++++++++++++++ # # +++++++++++++++++++++++++++++++++++++++++++ # # After you create the reviews collection in MongoDB, Write Python code (one-cell-for-every-requirement)for the following requirements : # # Requirement #1: # + active="" # Get the list of reviewes for products sold in Chicago # - # # Requirement #2: # + active="" # Get the list of reviews for products sold in chicago and got review rating highr than 3 # - # # Requirement #3: # + active="" # Get the list of products sold at walmart and reviewed in zip code 90033 # - # # Requirement #4: # + active="" # Get the list of reviews that got rating less than 3 and wifi or battery issues/problems mentioned in the review text # - # # Requirement #5: # + active="" # Get the list of reviews got rating greater than 3 and the resulting list to be sorted by the ManufacturerName # -
12,896
/DataScienceAcademy/PythonFundamentos/Cap08/Notebooks/DSA-Python-Cap08-08-Seaborn.ipynb
773cbbbe2e75a06de9a932781c3dce9338cbd9d7
[ "MIT" ]
permissive
andrecontisilva/Python-aprendizado
https://github.com/andrecontisilva/Python-aprendizado
0
0
MIT
2021-02-10T21:45:00
2021-02-10T21:26:08
Jupyter Notebook
Jupyter Notebook
false
false
.py
629,165
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Without baseline # + import gym import numpy as np import matplotlib.pyplot as plt from agent import Agent # %matplotlib inline #Setup env = gym.make('CartPole-v0') input_dim, output_dim = env.observation_space.shape[0], env.action_space.n agent = Agent(input_dim, output_dim) EPISODES = 1000 scores = [] for e in range(1,EPISODES+1): state = env.reset() state = np.reshape(state,[1, input_dim]) reward_sum = 0 done = False for time in range(500): action = agent.act(state) next_state, reward, done, _ = env.step(action) next_state = np.reshape(next_state, [1, input_dim]) agent.remember(state[0], action, reward) state = next_state reward_sum += reward if done: break agent.learn() scores.append(reward_sum) if e % 100 == 0: print('episode, reward = {}, {}'.format(e,reward_sum)) plt.plot(scores) # - # ### With baseline # + import gym import numpy as np import matplotlib.pyplot as plt from agent_with_baseline import Agent # %matplotlib inline #Setup env = gym.make('CartPole-v0') input_dim, output_dim = env.observation_space.shape[0], env.action_space.n agent = Agent(input_dim, output_dim) EPISODES = 1000 scores_baseline = [] for e in range(1,EPISODES+1): state = env.reset() state = np.reshape(state,[1, input_dim]) reward_sum = 0 done = False for time in range(500): action = agent.act(state) next_state, reward, done, _ = env.step(action) next_state = np.reshape(next_state, [1, input_dim]) agent.remember(state[0], action, reward) state = next_state reward_sum += reward if done: break agent.learn() scores_baseline.append(reward_sum) if e % 100 == 0: print('episode, reward = {}, {}'.format(e,reward_sum)) plt.plot(scores) plt.plot(scores_baseline) plt.legend(['regular', 'baseline']) rn summation def sum_dx(n,nodes,x): summation=0 for i in range(0,n): summation = summation + lagrange_coef_dx(i,nodes,n,x) return summation # + def lagrange_coef_dx_dx(j,nodes,n,x): summation1 = 0 for l in range(0,n): if l != j: summation2 = 0 for m in range(0,n): if m != l and m != j: product = 1 for k in range(0,n): if k != m and k != j and k != l: product = product * (x-nodes[k]) / (nodes[j] - nodes[k]) summation2 = summation2 + product / (nodes[j] - nodes[m]) summation1 = summation1 + np.sign(lagrange_coef_0(j,nodes,n,x)) * summation2 / (nodes[j] - nodes[l]) return summation1 def sum_dx_dx(n,nodes,x): summation=0 for i in range(0,n): summation = summation + lagrange_coef_dx_dx(i,nodes,n,x) return summation # - def newton_raphson(x0,nodes,eps,error,n): while(error>eps): error = np.absolute(x0 - (x0 - sum_dx(n,nodes,x0) / sum_dx_dx(n,nodes,x0))) x0 = x0 - sum_dx(n,nodes,x0) / sum_dx_dx(n,nodes,x0) return x0 # + n=50 plotting1 = [] plotting2 = [] for i in range(3,n): maximum = newton_raphson((2+(2-1/(i-1)))/2,equidistant_nodes(0,2,i),0.001,np.inf,i) delta_max = np.sum(np.absolute(lagrange_coef(equidistant_nodes(0,2,i),i,maximum))) plotting1 = plotting1 + [delta_max] plotting2 = plotting2 + [i] plt.title('$||L_n||$ in function of $n$') plt.xlabel('$n$') plt.ylabel('$||L_n||$') plt.plot(plotting2,plotting1,'blue',label="$||L_n||$") plt.plot(np.linspace(1,n),np.power(2,np.linspace(1,n)),'red',label="$2^n$") plt.yscale('log', basey=2) plt.legend() plt.show() # - # We clearly see that $||L_n|| \sim 2^n$. I used the Newton-Raphson method to maximize the $x$ of $\lambda_n(x)$. # # QUESTION 3 def fun_sin (n,x): return np.absolute(np.sin((n+1/2)*x)/(2*np.sin(x/2))) # + n=150 number_points = 10000 nodes = equidistant_nodes(0,np.pi,number_points) plotting1= [] plotting2 = [] for j in range(2,n): summation = 0 for i in range(1,number_points): if i == 1 or i == (number_points - 1): summation = summation + fun_sin(j,nodes[i]) * (np.pi/number_points)/2 else: summation = summation + fun_sin(j,nodes[i]) * (np.pi/number_points) plotting1 = plotting1 + [summation] plotting2 = plotting2 + [j] plt.title('$||S_n||$ in function of $n$') plt.xlabel('$n$') plt.ylabel('$||S_n||$') plt.plot(plotting2,plotting1,'blue',label="$||S_n||$") plt.plot(np.linspace(2,n),0.43*np.log2(np.linspace(2,n)),'red',label="$c*\log_2{n}$") plt.xscale('log', basex=2) plt.legend() plt.show() # - # So in the assumed dependance $||S_n||=c*\log_2{n}$, we see that $c\approx 0.43$, as in question $1$. # # QUESTION 4 # + def legendre_coef_0(n,x): a = 1 b = x c = 0 if n == 0: return 1.0 if n == 1: return x for i in range(1,n): c = ((2 * i + 1) * x * b - i * a) / (i + 1) a = b b = c return c def fun_abs_sum(n,x): summation = 0 for i in range(0,n+1): summation = summation + (i + 0.5) * legendre_coef_0(i,x) return np.absolute(summation) # + n=50 number_points = 10000 nodes = equidistant_nodes(-1,1,number_points) plotting1= [] plotting2 = [] for j in range(2,n): summation = 0 for i in range(1,number_points): if i == 1 or i == (number_points - 1): summation = summation + fun_abs_sum(j,nodes[i]) * (2/number_points)/2 else: summation = summation + fun_abs_sum(j,nodes[i]) * (2/number_points) plotting1 = plotting1 + [summation] plotting2 = plotting2 + [j] plt.title('$||S_n||$ in function of $n$') plt.xlabel('$n$') plt.ylabel('$||S_n||$') plt.plot(plotting2,plotting1,'blue',label="$||S_n||$") plt.plot(np.linspace(2,n),1.55*np.power(np.linspace(2,n),1/2),'red',label="$c*\sqrt{n}$") plt.legend() plt.show() # - # So in the assumed dependance $||S_n||=c*\sqrt{n}$, we see that $c\approx 0.55$. # # QUESTION 5 def lagrange_coef_0(j,nodes,n,x): product = 1 for i in range(0,n): if i != j: product = product * (x - nodes[i]) / (nodes[j] - nodes[i]) return product lagrange_coef_0(4,equidistant_nodes(0,1,11),11,0.67) # + most=20 number_points = 10000 nodes = equidistant_nodes(0,1,number_points) plotting1= [] plotting2 = [] for n in range(1,most): summation = 0 for k in range(0,n+1): integral = 0 for i in range(0,number_points): if i == 0 or i == (number_points - 1): integral = integral + lagrange_coef_0(k,nodes,n,nodes[i]) * (1/number_points)/2 else: integral = integral + lagrange_coef_0(k,nodes,n,nodes[i]) * (1/number_points) summation = summation + np.absolute(integral) plotting1 = plotting1 + [summation] plotting2 = plotting2 + [n] plt.title('$||Q_n||$ in function of $n$') plt.xlabel('$n$') plt.ylabel('$||Q_n||$') plt.plot(plotting2,plotting1,'blue',label="$||Q_n||$") plt.plot(np.linspace(0,n),np.linspace(0,n),'red',label="$n$") plt.legend() plt.show() # -
7,420
/ch05/.ipynb_checkpoints/05_01_BinarySearch-checkpoint.ipynb
385c25b6b9841b2e07aee5dcb2b2928d83617de1
[]
no_license
lijingxin6/Algorithms
https://github.com/lijingxin6/Algorithms
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
14,174
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3.6 (tensorflow) # language: python # name: tensorflow # --- # + [markdown] run_control={"frozen": false, "read_only": false} # # Binary Search # - # In this lecture, you will learn: # # <a href='#Ex1'>Ex.1 Binary Search Review</a> # # <a href='#Ex2'>Ex.2 Binary Search Template</a> # # <a href='#Ex3'>Ex.3 Find Min in Rotated Sorted Array</a> # # <a href='#Ex4'>Ex.4 Find in Rotated Array</a> # # <a href='#Ex5'>Ex.5 Search Insert Position </a> # # <a href='#Ex6'>Ex.6 Find Range</a> # # <a href='#Ex7'>Ex.7 Search in Sorted Array with Empty Strings</a> # # <a href='#Ex8'>Ex.8 Search 1st Position of element in Infinite Array</a> # + [markdown] run_control={"frozen": false, "read_only": false} # ### Ex.1: Binary Search Review # # Find 1st position of target, return -1 if not found # # How about last position, any position? # + [markdown] run_control={"frozen": false, "read_only": false} # Binary Search (iterative) # + run_control={"frozen": false, "read_only": false} def bi_search_iter(alist, item): left, right = 0, len(alist) - 1 while left <= right: mid = (left + right) // 2 if alist[mid] < item: left = mid + 1 elif alist[mid] > item: right = mid - 1 else: # alist[mid] = item return mid return -1 # + run_control={"frozen": false, "read_only": false} num_list = [1,2,3,5,7,8,9] print(bi_search_iter(num_list, 7)) print(bi_search_iter(num_list, 4)) # - # ### Ex.2: Binary Search Template # # Remember? ** Template! ** def binarysearch(alist, item): if len(alist) == 0: # 注意边界条件,判断input size是否为0,如果是0,return -1。 这行程序最好写上,对运行时间无影响。 return -1 left, right = 0, len(alist) - 1 while left + 1 < right: # 跳出循环:① LR相邻的时候 ② L和R指向同一个 ③ R L mid = left + (right - left) // 2 if alist[mid] == item: right = mid # 不是return而是直接等于mid, 因为如果要找第一个肯定在mid前面,不可能在mid后面,所以让right=mid 而不是left elif alist[mid] < item: left = mid # 不是 mid + 1 elif alist[mid] > item: right = mid # 不是 mid - 1 if alist[left] == item: # 跳出while循环了。所以 可能 L 是 2 return left if alist[right] == item: # 可能 R 是 2 return right return -1 # ### Ex.3 Find Min in Rotated Sorted Array # # Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand. Find the minimum element. # + # O(nlgn) def searchlazy(alist): alist.sort() return alist[0] # O(n) def searchslow(alist): mmin = alist[0] for i in alist: mmin = min(mmin, i) return mmin # O(lgn) def search(alist): if len(alist) == 0: # ① return -1 left, right = 0, len(alist) - 1 while left + 1 < right: # ② if (alist[left] < alist[right]): # ③ return alist[left]; mid = left + (right - left) // 2 if (alist[mid] >= alist[left]): # 前半部分排好序了 left = mid + 1 # 去后半部分找 else: # 后半部分找不到,则去前半部分找 right = mid return alist[left] if alist[left] < alist[right] else alist[right] # ④ 看L小还是R小 # - # ### Ex.4 Find in Rotated Array # O(lgn) def search(alist, target): if len(alist) == 0: return -1 left, right = 0, len(alist) - 1 while left + 1 < right: mid = left + (right - left) // 2 if alist[mid] == target: return mid if (alist[left] < alist[mid]): if alist[left] <= target and target <= alist[mid]: right = mid else: left = mid else: if alist[mid] <= target and target <= alist[right]: left = mid else: right = mid if alist[left] == target: return left if alist[right] == target: return right return -1 # ### Ex.5 Search Insert Position # # Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order. You may assume no duplicates in the array def search_insert_position(alist, target): if len(alist) == 0: return 0 left, right = 0, len(alist) - 1 while left + 1 < right: mid = left + (right - left) // 2 if alist[mid] == target: return mid if (alist[mid] < target): left = mid else: right = mid if alist[left] >= target: return left if alist[right] >= target: return right return right + 1 # 都不是的话那就在最后面加上 # ### Ex.6 Find the starting and ending position of a given target value. # + def search_range(alist, target): if len(alist) == 0: return (-1, -1) lbound, rbound = -1, -1 # search for left bound left, right = 0, len(alist) - 1 while left + 1 < right: mid = left + (right - left) // 2 if alist[mid] == target: right = mid elif (alist[mid] < target): left = mid else: right = mid if alist[left] == target: lbound = left elif alist[right] == target: lbound = right else: return (-1, -1) # search for right bound left, right = 0, len(alist) - 1 while left + 1 < right: mid = left + (right - left) // 2 if alist[mid] == target: left = mid elif (alist[mid] < target): left = mid else: right = mid if alist[right] == target: rbound = right elif alist[left] == target: rbound = left else: return (-1, -1) return (lbound, rbound) # - # ### Ex.7 Search in Sorted Array with Empty Strings # # Given a sorted array of strings which is interspersed with empty strings, write a meth­od to find the location of a given string. # 最差O(n) 可以用in def search_empty(alist, target): if len(alist) == 0: return -1 left, right = 0, len(alist) - 1 while left + 1 < right: while left + 1 < right and alist[right] == "": # 从右边开始找 找到第一个非空字符串 right -= 1 if alist[right] == "": right -= 1 if right < left: return -1 mid = left + (right - left) // 2 # 在上面的基础上在去求mid point while alist[mid] == "": #中间为空字符串,会往后面去找 mid += 1 if alist[mid] == target: return mid if alist[mid] < target: left = mid + 1 else: right = mid - 1 if alist[left] == target: return left if alist[right] == target: return right return -1 # ### Ex.8 Search 1st Position of element in Infinite Array def search_first(alist): left, right = 0, 1 while alist[right] == 0: left = right right *= 2 if (right > len(alist)): right = len(alist) - 1 break return left + search_range(alist[left:right+1], 1)[0] alist = [0, 0, 0, 0, 0, 1] r = search_first(alist)
7,427
/first_non.ipynb
2b9420bf14e512d17c8046e0bc1c33e99638cd12
[]
no_license
songxiang666/PostGraduate
https://github.com/songxiang666/PostGraduate
3
0
null
null
null
null
Jupyter Notebook
false
false
.py
313,068
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd from matplotlib import pyplot as plt # %matplotlib inline # - data = open('three_kind_andMark/G_in/first_non.csv', encoding = 'utf-8') df = pd.read_csv(data, skiprows = 0) print('data size:', df.shape) df.head() # ### 处理数据进入CCA data1 = open('cleaned_data/G-ex/thir_non_excludeG_Cleaned.csv', encoding = 'utf-8') df1 = pd.read_csv(data1, skiprows = 0) df1.head() #按照X进行排序 处理进入CCA之前的数据 a = df1.sort_values(by = 'X') a.head() #保存数据 a.to_csv('cleaned_data/CCA/thir_non_excludeG_Cleaned.csv', index = False, header = True, encoding = 'utf-8') #输出各个类别的数量 import os filenames = os.listdir('cleaned_data/G-ex') for filename in filenames: data1 = open('cleaned_data/G-ex' + filename, encoding = 'utf-8') df1 = pd.read_csv(data1, skiprows = 0) # ID:病例编号 # X:分类目标 # 其余特征表示一线标志物 # # 数据的直观分布 # # ## 数据构成比例 # # ### 时间与患病数 df['Year'] = df['ID']. apply(lambda row : row.split('-')[0] ) a = df[['Year', 'X', 'ID']].groupby(['X']).count() a plt.style.use('seaborn') b = a.unstack() b (b.T).plot(kind = 'pie', subplots = True) # ## 特征的分布规律 #复制除编号以及病状的所有特征 features = [x for x in df.columns if x not in['ID', 'X'] ] plt.figure() ax = plt.subplot(111) df.hist(column = features, bins = 20, ax = ax) # 正相关的特征分布 df.hist(column = ['','',''], bins = 20) # 负相关的特征分布 df.hist(column = ['','',''], bins = 20) # ## 数据的正态化变换 # # # T-01和T-04两个特征的分布图不符合正态性 # + df['T-04'].plot(kind = 'kde', xlim = [0,80], label = 'T-04-old') np.log10(df['T-04']).plot(kind = 'kde', xlim = [0,80], label = 'T-04-new') plt.legend() # - np.log10(df['T-04']).hist(bins = 25) # + df['T-01'].plot(kind = 'kde', xlim = [0,50], label = 'T-01-old') np.log10(df['T-01']).plot(kind = 'kde', xlim = [0,50], label = 'T-01-new') plt.legend() # - np.log10(df['T-01']).hist(bins = 25) # # 数据的浓缩 # # 五数概括法: # 最小值、最大值、中位数、上四分位数、下四分位数 print('数据的概况总览') df.describe() # ## 样本的矩 # # # 1阶原点矩——数学期望、均值 # 2阶中心矩——方差 # 3阶中心矩——偏度系数 # 4阶中心矩——峰度系数 df['T-05'].describe() print('以特征T-05为例:') print('均值 = ', df['T-05'].mean()) print('方差 = ', df['T-05'].var()) print('偏度 = ', df['T-05'].skew()) print('峰度 = ', df['T-05'].kurt()) # ## 箱型图和异常值 import seaborn as sns # 使用seaborn的箱线图展示 sns.boxplot(x = 'X', y = 'T-05', data = df) print('同样以T-05为例,看各分类的箱型图:') # 异常值分析: # 1) 大于Q3+1.5*IQR或小于Q1-1.5IQR的值,其中IQR为四分位距 # 2) 3西格玛原则,数值分布在(μ—3σ,μ+3σ)中的概率为0.9974 # ## 数据相关性 df[['X', 'T-05']].plot(x = 'X', y = 'T-05', kind = 'scatter') # 患病和特征T-05不具有相关性 df[['X','T-01']].plot(x = 'X', y = 'T-01', kind = 'scatter') df[['X','T-04']].plot(x = 'X', y = 'T-04', kind = 'scatter') df[['T-01', 'T-05']].plot(x='T-01', y='T-05', c=df['X'], kind='scatter') print('T-01和T-05线性无关') # T-01、T-04、T-05、T-06两两之间线性无关 # ## 相关系数矩阵 # + #def process(x): # if 0<= x <= 30: # return 0 # elif 30< x < 60: # return 1 # else: # return 2 #df['T-05-new'] = df['T-05'].apply(lambda x : process(x)) #df['T-05-new'] # - # 皮尔逊相关系数: # 值域为[-1,1], 1表示完全正相关, -1表示完全负相关,0表示无关, 绝对值越大表明相关性越强 df.corr(method = 'spearman') corr_mat = df.corr(method = 'pearson') corr_mat['X'].abs().sort_values(ascending = False) #得到性别与其他特征的相关系数 倒序 # # 预处理和降维 # # ## 异常值的处理 # # ### 缺失值处理 print("观察缺失值的比例,以决定采用填补还是丢弃") df.isnull().describe() # 这里直接删除包含缺失的样本 # 实际应用中可能需要适合的fill none算法 df.dropna(inplace = True) #数据清洗 print('剩余样本:', df.shape) # ## 离群点处理 # # 这里直接用3西格玛原则删除了异常的年龄 # 实际应用中需要适合的outlier detect算法 # 3西格玛原则:|采样点与均值的差| > 3倍标准差 --》 则剔除 # 变形为: # Xi< X - 3σ or Xi > X + 3σ cols = df.columns for i in range(2,df.shape[1]): up_bound = df[cols[i]].mean() + 3*df[cols[i]].std() low_bound = df[cols[i]].mean() - 3*df[cols[i]].std() df = df[(df[cols[i]]<=up_bound) & (df[cols[i]]>=low_bound)] print("处理特征%s, 剩余样本:%d" % (cols[i],df.shape[0])) df.isnull().describe() # ## 数据降维 # # ### 主成分分析 # # #### to maximize Var(c1X1 + c2X2 + c3X3 + ...) 其中 c1^2 + c2^2 +....+ cp^2 = 1 # PCA算法流程: # 1) 0均值化:将X的每一维数据分别减去其平均值,即 X=X-Mean(X) # 2) 协方差矩阵:协方差矩阵C可以描述m维属性之间的相关关系,即 C=(X'X)/m # 3) 数据降维:协方差矩阵C是m*m阶,选取的特征向量矩阵P是m*k阶。通过 Y=XP 即得到投影后的k维样本。 from sklearn.decomposition import PCA # 直接调包实现了,手写也不复杂 features = [i for i in df.columns if i not in ['ID', 'X', 'Year']] #feature中去除了 'ID', 'X', 'Year' X, y = df[features], df['X'] #PCA算法所要保留下来的主成分个数 pca = PCA(n_components=4) pca.fit(X) #降维后 各主成分的方差值 print(pca.explained_variance_ratio_) #返回各主成分的方差百分比(贡献率) print(pca.explained_variance_) # n_component PCA算法中要保留的成分个数 pca = PCA(n_components=3) #训练模型 用数据X来训练 pca.fit(X) #返回降维后的数据 X_new = pca.transform(X) #画出散点图 plt.scatter(X_new[:, 0], X_new[:, 1], c=y, marker='o') # y = 0 /1 /2 plt.show() # # 参数检测和方差分析 # + from scipy.stats.mstats import kruskalwallis import scipy.stats as stats import numpy as np import scipy as sp #啥叫显著性差异 # p>0.05 表示差异不显著 # 0.01 < p < 0.05 表示差异性显著 # p< 0.01 表示差异性极显著 #三组非正太分布数据检验 def Kruskawallis_test(list1, list2, list3): # Perform the Kruskal-Wallis test,返回True表示有显著差异,返回False表示无显著差异 print("Use kruskawallis test:") h, p = kruskalwallis(list1, list2, list3) print("H value:",h) print("p value:",p) # Print the results if p<0.05: print('There is a significant difference.') return True else: print('No significant difference.') return False #两组非正态分布数据检验 有关于统计学的知识 def Mannwhitneyu(group1, group2): #以下处理sp版本问题 if np.int(sp.__version__.split('.')[1]) > 16: u, p_value = stats.mannwhitneyu(group1, group2, alternative='two-sided') else: u, p_value = stats.mannwhitneyu(group1, group2, use_continuity=True) p_value *= 2 # because the default was a one-sided p-value print(("Mann-Whitney test", p_value)) if p_value<0.05: print("there is significant difference") else: print("there is no significant difference") # - # ## 单因素方差分析 # + #检验T-06和患病是否有关 list_1 = df[df['T-06'] == 1]['X'].tolist() list_2 = df[df['T-06'] == 2]['X'].tolist() #独立T检验 print(stats.ttest_ind(list_1, list_2)) #Mannwhitneyu 检验 print(Mannwhitneyu(list_1, list_2)) # - # 结果说明,T-06和患病与否有显著性相关 # ## 多因素方差分析 # 连续型的属性,需要采样后进行多因子的方差分析 # 例如T-05,可以分成多段bins与其他因素结合 # + def process(x): if 0<= x <=30: return 0 elif 30< x< 60: return 1 else: return 2 df['T-05-new'] = df['T-05'].apply(lambda x : process(x)) df['T-05-new'].head() # + list_T05_0 = df[df['T-05-new']==0]['X'].tolist() list_T05_1 = df[df['T-05-new']==1]['X'].tolist() list_T05_2 = df[df['T-05-new']==2]['X'].tolist() list_group = [list_T05_0, list_T05_1, list_T05_2] #Kruskawallis_test 三组非正太分布数据检验 Kruskawallis_test(list_T05_0, list_T05_1, list_T05_2) # - # 结果说明,T-05和患病与否具有显著相关 # # 完整的处理流程 # ## 数据分析 # # ## 数据清洗 # ### 删除缺失> 15%的列 isnull_df = df.isnull().describe() isnull_df isnull_df.loc[:, isnull_df.loc['freq'] < isnull_df.loc['count']*0.85] # + #删除缺失值比例大于0.85的列 #df.drop(columns = [ ''] ,inplace = True) #df.head() # - # ### 删除相关性<0.01的列 corr_mat = df.corr(method = 'pearson') corr_mat['X'].abs().sort_values(ascending = False) corr = corr_mat['X'].abs() print(corr[corr < 0.01].index) df.drop(columns = ['Year'] , inplace = True) df.head() # ## 数据预处理 # # ### 删除6sigma离群点 print('原始样本', df.shape) des_df = df.describe() des_df # 离群点大于Q3+1.5*IQR或小于Q1-1.5IQR的值,其中IQR为四分位距 箱线图中上限和下限 des_df.loc['25%', 'T-05'] # + #删除离群点 改为正常值 #得到目前为止 经过清洗后的数据的特征名 除 ‘ID’ 'X':病情 features = [x for x in df.columns if x not in['ID', 'X']] #提取出除X外 与x相关系数大于0.1的索引 importances = [x for x in corr[corr>0.1].index.tolist() if x != 'X'] for x in features: # upper_bound = des_df.loc['75%', x] + 1.5*(des_df.loc['75%', x]-des_df.loc['25%', x]) # lower_bound = des_df.loc['25%', x] - 1.5*(des_df.loc['75%', x]-des_df.loc['25%', x]) upper_bound = des_df.loc['mean', x] + 6*des_df.loc['std', x] lower_bound = des_df.loc['mean', x] - 6*des_df.loc['std', x] if x in importances: df = df[(df[x]<=upper_bound) & (df[x]>=lower_bound)] else: df.loc[df[x]>upper_bound, x] = upper_bound df.loc[df[x]<lower_bound, x] = lower_bound print("剩余样本:", df.shape) # - decf = df.isnull().describe() decf # ### 分段mean 补全缺失值 df[['X', 'ID']].groupby('X').count() # 分段 按x=0, 1, 2不同情况 把每列进行均值填充 print(df['X'].unique().tolist()) for x in df['X'].unique().tolist(): #复制所有df['X'] = 当前遍历值 的行 tmp = df[df['X']==x].copy(deep=False) #tmp.fillna(tmp.mean(axis=0, skipna=True), inplace=True) #print(tmp.mean()) #以'X' 为分组, NAN均以每列的平均值填充 df.loc[df['X']==x] = df[df['X']==x].fillna(tmp.mean()) df.isnull().describe() # ### 数据标准化 features = [x for x in df.columns if x not in['ID', 'X']] for x in features: delta = 0.000001 df[x] = (df[x] - df[x].mean()) / (df[x].std() + delta) df.describe() # 计算协方差 cov_mat = df.cov() cov_mat['X'].abs().sort_values(ascending = False) cov = cov_mat['X'] print('正向:', cov[cov > 0].index.tolist()) print('负向: ', cov[cov < 0].index.tolist()) # ### 保存cleaned数据 df.to_csv('first_non_Cleaned.csv', index = False, header = True, encoding = 'utf-8') df.head() # ## 数据建模 # # ### 训练逻辑回归LR模型 from sklearn.linear_model import LogisticRegressionCV,LinearRegression from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.linear_model.coordinate_descent import ConvergenceWarning #划分训练集与测试集 随机 target = 'X' features = [x for x in df.columns if x not in['ID', 'X']] X, Y = df[features], df[target]# X = dataSet and Y = labels X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) # + #构建并训练模型 ## multi_class:分类方式选择参数,有"ovr(默认)"和"multinomial"两个值可选择,在二元逻辑回归中无区别 ## cv:几折交叉验证 ## solver:优化算法选择参数,当penalty为"l1"时,参数只能是"liblinear(坐标轴下降法)" ## "lbfgs"和"cg"都是关于目标函数的二阶泰勒展开 ## 当penalty为"l2"时,参数可以是"lbfgs(拟牛顿法)","newton_cg(牛顿法变种)","seg(minibactch随机平均梯度下降)" ## 维度<10000时,选择"lbfgs"法,维度>10000时,选择"cs"法比较好,显卡计算的时候,lbfgs"和"cs"都比"seg"快 ## penalty:正则化选择参数,用于解决过拟合,可选"l1","l2" ## tol:当目标函数下降到该值是就停止,叫:容忍度,防止计算的过多 lr = LogisticRegressionCV( multi_class="multinomial", fit_intercept=True, Cs=np.logspace(-2,2,20), cv=2, penalty="l2", solver="lbfgs", tol=0.01, max_iter = 1000, class_weight='balanced', ) re = lr.fit(X_train,Y_train) # - #模型效果获取 r = re.score(X_train,Y_train) print("R值(准确率):",r) print("参数:",re.coef_) print("截距:",re.intercept_) print("稀疏化特征比率:%.2f%%" %(np.mean(lr.coef_.ravel()==0)*100)) print("=========sigmoid函数转化的值,即:概率p=========") print(re.predict_proba(X_test)) #sigmoid函数转化的值,即:概率p # ### 模型的持久化 #模型的保存与持久化 from sklearn.externals import joblib # joblib.dump(ss,"logistic_ss.model") #将标准化模型保存 joblib.dump(lr,"first_non_logistic_lr.model") #将训练后的线性模型保存 # joblib.load("logistic_ss.model") #加载模型,会保存该model文件 joblib.load("first_non_logistic_lr.model") # ### 预测结果评价 # + import matplotlib as mpl #解决中文显示问题 mpl.rcParams['font.sans-serif']=[u'simHei'] mpl.rcParams['axes.unicode_minus']=False # + #预测 Y_predict = lr.predict(X_test) #画图对预测值和实际值进行比较 x = range(len(X_test)) plt.figure(figsize=(14,7),facecolor="w") plt.ylim(0,6) plt.plot(x,Y_test,"ro",markersize=8,zorder=3,label=u"Groud Truth") plt.plot(x,Y_predict,"go",markersize=14,zorder=2,label=u"Predict Value,$R^2$=%.3f" %lr.score(X_test,Y_test)) plt.legend(loc="upper left") plt.xlabel(u"sample",fontsize=18) plt.ylabel(u"label",fontsize=18) plt.title(u"Logistic Model",fontsize=20) plt.show() print("=============Y_test==============") print(Y_test.ravel()) print("============Y_predict============") print(Y_predict) # + from sklearn import metrics print("预测准确率:", metrics.accuracy_score(Y_test, Y_predict)) print("精度:", metrics.precision_score(Y_test, Y_predict, average="weighted")) print("召回率:", metrics.recall_score(Y_test, Y_predict, average="weighted")) # + from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import numpy as np # + #from sklearn.linear_model.coordinate_descent import ConvergenceWarning from sklearn import tree from matplotlib import pyplot as plt #划分训练集与测试集 随机 target = 'X' features = [x for x in df.columns if x not in['ID', 'X']] X, Y = df[features], df[target]# X = dataSet and Y = labels X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) bestTestingScore = 0.0 bestDepth = 0 bestriterion = '' maxdepth = 40 depths = np.arange(2, maxdepth) criterions = ['gini','entropy'] for criterion in criterions: training_scores = [] testing_scores = [] for depth in depths: clf = tree.DecisionTreeClassifier(criterion = criterion ,max_depth = depth) clf.fit(X_train, Y_train) trSc = clf.score(X_train, Y_train) teSc = clf.score(X_test, Y_test) training_scores.append(trSc) testing_scores.append(teSc) if bestTestingScore< teSc: bestTestingScore = teSc bestDepth = depth bestriterion = criterion print('best score %.2f' % bestTestingScore) print('depth %d' % bestDepth) print('criterion %s' % bestriterion) # + from sklearn import datasets from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC from sklearn.svm import SVC target = 'X' features = [x for x in df.columns if x not in['ID', 'X']] X, Y = df[features], df[target]# X = dataSet and Y = labels X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) svm_clf = Pipeline(( ('scaler', StandardScaler()), ('linear_svc', LinearSVC(C = 1, loss = 'hinge')) )).fit(X_train, Y_train) print('linear_svc training score:%f' % svm_clf.score(X_train, Y_train)) print('linear_svc testing score:%f' % svm_clf.score(X_test, Y_test)) rbf_kernel_svm_clf = Pipeline((('scaler', StandardScaler()), ('svm_clf', SVC(kernel = 'rbf', gamma = 5, C = 5 )) #当C为0.001 时 预测非1 )).fit(X_train, Y_train) print('rbf_kernel_svc training score:%f' % rbf_kernel_svm_clf.score(X_train, Y_train)) print('rbf_kernel_svc testing score:%f' % rbf_kernel_svm_clf.score(X_test, Y_test)) # - # ## CCA 特征 ## CCA 数据 没有 """ CCA_FILES = ["CCA.csv", "DMPCCA.csv", "LDCCA.csv", "LPCCA.csv", "rank-CCA.csv", "semi-CCA.csv"] for fi in CCA_FILES: df = pd.read_csv(open(fi, encoding='utf-8'), skiprows=0) #划分训练集与测试集 target = 't' features = [x for x in df.columns if x not in['t']] X, Y = df[features], df[target] X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) lr = LogisticRegressionCV( multi_class="multinomial", fit_intercept=True, Cs=np.logspace(-2,2,20), cv=2, penalty="l2", solver="lbfgs", tol=0.01, max_iter = 1000, class_weight='balanced', ) re = lr.fit(X_train,Y_train) Y_predict = lr.predict(X_test) print("===== %s =====" % fi) print("训练准确率:", re.score(X_train,Y_train)) print("预测准确率:", metrics.accuracy_score(Y_test, Y_predict)) print("精度:", metrics.precision_score(Y_test, Y_predict, average="weighted")) print("召回率:", metrics.recall_score(Y_test, Y_predict, average="weighted")) print("==========") """
15,501
/Costa Rica Poverty/Costa Rica.ipynb
19e8680533fb6bc862a987dafa4db07f3d741c28
[ "MIT" ]
permissive
JinLi711/Predicting-House-Hold-Poverty-Levels-in-Costa-Rica
https://github.com/JinLi711/Predicting-House-Hold-Poverty-Levels-in-Costa-Rica
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
70,869
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # # # Project: Investigate a Gapminder Dataset # # ## Table of Contents # <ul> # <li><a href="#intro">Introduction</a></li> # <li><a href="#wrangling">Data Wrangling</a></li> # <li><a href="#eda">Exploratory Data Analysis</a></li> # <li><a href="#conclusions">Conclusions</a></li> # </ul> # <a id='intro'></a> # ## Introduction # > **The data set**: I choose a data set from the Gapminder foundation website, for my analysis, I choose six indicators the first is employment rate in the population aged 15+, the second one is child mortality under 5 per 100 born, the third is GDP per capita en-us dollar which is has been adjusted for inflation, the fourth is life expectancy in years, the fifth one is mean years in school for men and women aged between 15 and 34, the last one is population growth. My analysis is going to focus on the period 1991 to 2013. # # ## Questions # <ol> # <li> The first question I am going to ask how these indicators have changed over time for my home country Morroco, and my continent Africa, and in our world. # <li> The second question is how different regions in the world doing in some of these factors. # <li> The third question is there a relationship between some of these indicators? # </ol> #importing packages import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np import country_converter as pc # %matplotlib inline # <a id='wrangling'></a> # ## Data Wrangling # # > ** Clean **: In the next few cells, I will clean the data by first getting rid of the column I don't need to fit the period I choose then I will fill the missing data. # # # loading data employement = pd.read_csv('aged_15plus_employment_rate_percent.csv') child_mortality = pd.read_csv('child_mortality_0_5_year_olds_dying_per_1000_born.csv') gdp = pd.read_csv('gdppercapita_us_inflation_adjusted.csv') life_expectancy = pd.read_csv('life_expectancy_years.csv') school_years = pd.read_csv('mean_years_in_school_women_percent_men_25_to_34_years.csv') pop = pd.read_csv('population_growth_annual_percent.csv') pop.head(2) cols = ['country'] + child_mortality.loc[: , '1991':'2013'].columns.to_list() # ### Data Cleaning : Select columns and filing missing data # > ** Missing data **: In the next cell, I will select the desired columns and filing the missing data. I am going to use bfill() and ffill() function which I think is more appropriate as opposed to the mean or the median considering that the data is collected over a timeline, but this method, in this case, has a drawback especially if we have a row with multiple consecutive missing values a better way or missing values I guess would be a regression. # Selecting columns and filing missing values employement = employement[cols] child_mortality = child_mortality[cols] gdp = gdp[cols] gdp = gdp.fillna(method='bfill', axis=1) gdp = gdp.fillna(method='ffill', axis=1) life_expectancy = life_expectancy[cols] school_years = school_years[cols] pop = pop[cols] pop = pop.fillna(method='bfill', axis=1) pop = pop.fillna(method='ffill', axis=1) pop.head(2) # ## More cleaning # <p> # In the next cell, I will create a function to add a column called continent to each of the datasets which will hold for each country which continent they are in, so I can compare region later # </p> # + # This function will add column that specifie the continent for each country def convert(data): codes_iso3 = pc.convert(names=list(data['country']), to='ISO3') data['continent'] = pc.convert(names=codes_iso3, src = 'ISO3', to = 'continent') return data # add the continent column to each datasets pop = convert(pop) employement= convert(employement) child_mortality = convert(child_mortality) gdp = convert(gdp) life_expectancy = convert(life_expectancy) school_years = convert(school_years) # - pop.describe() life_expectancy.describe() school_years.describe() # <a id='eda'></a> # ## Exploratory Data Analysis # # <h6>Question 1</h6> # <p>The first question I am going to address is how these indicators have changed over time for my home country Morroco and my continent Africa and in our world. </p> # this function will select the data for my country from each of the datasets. def transform_m(data): data.set_index(['country', 'continent'], inplace=True) data1 = data.loc['Morocco', 'Africa'] data.reset_index(inplace=True) return (data1 - np.min(data1))/(np.max(data1) - np.min(data1)) # + # ploting each factor in diffrent plot datasets = [pop, employement, school_years, child_mortality, gdp, life_expectancy] labels = ['population growth', 'employment rate', 'years in school', 'child mortality', 'gdp per capita','life expectancy'] plt.figure(figsize=(14,8)) gs = plt.GridSpec(3, 3) for data, i , label in zip(datasets,range(6),labels): plt.subplot(gs[i//2, i%2]) plt.plot(range(1991, 2014),transform_m(data)) plt.ylabel(label) # - # ## Morroco plots analysis # <p> # I can see from this charts above that, in general, getting better in my home country. for example child mortality dropped significantly since 1991, on the other hand, life expectancy rose steadily in the period 1991 through 2013, the same thing could be said about GDP per capita, as for population growth we see a significant drop between 1991 and 2005 then it starts rising again, the chart with the most fluctuation is the employment rate, the employment rate had ups and downs in the last decade of the last century and had nice steady growth since the start of the new century, but since 2007 it begins to fluctuate again which to be expected given the last recession. # </p> def transform_a(data): data1 = data.groupby('continent').mean().loc['Africa',:] return (data1 - np.min(data1))/(np.max(data1) - np.min(data1)) plt.figure(figsize=(14,8)) gs = plt.GridSpec(3, 3) for data, i, label in zip(datasets, range(6), labels): plt.subplot(gs[i//2, i%2]) plt.plot(range(1991, 2014),transform_a(data)) plt.ylabel(label) # ## Africa plots analysis # <p> # In Africa things also getting better, in this plots we can see that population growth change a lot it drops at the beginning of the 1990s, one possible reason for this it's the AIDS pandemic, then it rose again until it hits the pick in this period around 1997 then it fell until around 2003 when it starts rising again and fluctuate, as of child mortality it drops steadily since the 1990s. The number of years in school for Africans is rising since 1991 the same things could be said about GDP per capita and life expectancy, as of employment rate it did badly since 1991 through around 2002, but things get better from there where we see a steady growth of employment. # </p> def transform_w(data): return (data.mean() - np.min(data.mean()))/(np.max(data.mean()) - np.min(data.mean())) # + plt.figure(figsize=(14,8)) gs = plt.GridSpec(3, 3) for data, i, label in zip(datasets, range(6), labels): plt.subplot(gs[i//2, i%2]) plt.plot(range(1991, 2014),transform_w(data)) plt.ylabel(label) plt.xlabel('Year') # - # ## World plots analysis # <p> # In these plots about the world, we see also that things generally getting better. the number of years in school for the citizen of the world between 1991 and 2013 rose significantly the same thing could be said about life expectancy and gdb per capita except around the last recession, on the other hand, we observe a steady fall of child mortality. the plots that fluctuate are population growth and employment rate. As of population growth, it generally fell from 1991 until 2004 when it starts raising again but it fell again since 2009. For the employment rate, it starts by falling at the beginning of the 1990s the rose a little bit, then it took off until the recession, but it recovers when the economy starts recovering. # </p> # ## Question 2 # <p> # The second question is how different regions in the world doing in some of these factors. # </p> # + # plotting each continent child mortality against that's of the world plt.figure(figsize=(14,8)) #gs = plt.GridSpec(3, 3) continent = list(pop.groupby('continent').mean().index) for i, cont in zip(range(6), continent): plt.subplot(gs[i//2, i%2]) plt.plot(range(1991,2014), child_mortality.groupby('continent').mean().loc[cont], label=cont) plt.plot(range(1991,2014), child_mortality.mean(), label='world') plt.title(cont + ' vs world') plt.xlabel('year') plt.rcParams['axes.axisbelow'] = True plt.legend() # - # ## Compare continent to the world in child mortality # <p> # I can see from charts that Africa have higher child mortality rate compared to the rest of the world, so a lot need to be done to improve our continent # </p> plt.figure(figsize=(14,8)) gs = plt.GridSpec(3, 3) #fig, axs = plt.subplots(3,2, figsize=(15, 8), sharex=True, sharey=True) for data, i, label, in zip(datasets, range(6), labels): plt.subplot(gs[i//2, i%2]) plt.plot(range(1991,2014), data.groupby('continent').mean().loc['America'], label='America') plt.plot(range(1991,2014), data.groupby('continent').mean().loc['Europe'], label='Europe') plt.ylabel(label) plt.xlabel('Year') plt.legend() # ## comarison between amireca and europe # <p> # In this comparison I can see that in America the growth of the population is declining, on the other hand, there is more fluctuation in Europe. The employment rate the observer can see that America has a more or less steady growth than Europe. As of years in school, we observe that at the beginning of the 1990s Europe was better but around 2004 America overtook Europe, a child is much better in Europe than in America, also the life expectancy factor is much better in Europe than in America. # </p> # # + pop1 = pop.loc[:, 'country':'2011'] pop1['mean'] = np.mean(pop1, axis=1) pop1.groupby('continent').mean()['mean'].plot(kind='bar'); plt.ylabel('population grouth') plt.title('mean poplulation grouth'); # - # ## Mean population grouth # <p> # in the plot above w can see that Africa had the highest mean population growth with Asia close behind, and Europe had the lowest population growth. # </p> # ## Question 3 # <p> # How many countries had an above averege life expectancy in the year 2013 # </p> is_above_averge = life_expectancy['2013'] >= life_expectancy['2013'].mean() ax = is_above_averge.value_counts().plot(kind='bar' ) ax.xaxis.set_ticklabels(['Above averge', 'Under averge']); # <p> # The bar plot above shows that in the world of 2013 the maojorty of countries had an above averge life expectancy # </p> # ## Question 4 # <p> # The third question is there a relationship between these indicators # </p> # + # This function will make the three datasets the same shape countries = set(life_expectancy['country']) & set(child_mortality['country']) & set(gdp['country']) def redauce(data): data.set_index('country',inplace=True) data = data.loc[countries] return data gdp = redauce(gdp) life_expectancy = redauce(life_expectancy) child_mortality = redauce(child_mortality) # - #plot gdp vs child mortality plt.scatter(gdp.mean(),child_mortality.mean()) plt.xlabel('Gdp per capita') plt.ylabel('Child mortality') plt.title('Gdp per capita vs child mortality '); # ## Child mortality and GDP # <p> # In this scatter plot there is a negative correlation between child mortality and gdb per capita as the observer can see that as the GDP grows the child mortality decline so maybe there is a relationship between GDP growth and child mortality. # </p> plt.scatter(gdp.mean(), life_expectancy.mean()) plt.xlabel('Gdp per capita') plt.ylabel('Life expectancy') plt.title('Gdp per capita vs Life expectancy'); # ## Life expectancy and GDP # <p> # In this scatter plot we can observe that there is a positive correlation between life expectancy and gdb per capita as the observer can see that as the GDP grows the life expectancy grows also. # </p> # ## Limitations # <p> # <ul> # <li>Some data sets have missing values especially one, where there are like 10 consecutive values missing in the same row which could be problematic luckily its just one row. # <li> Another limitation is that most of the datasets have just about 190 countries, which could impact the analysis. # <ul> # </p> # # <a id='conclusions'></a> # ## Conclusions # # <p> # Generally, the world is getting better in most of this indicatiors child mortality and population growth are generally in decline, on the other hand, the world saw steady growth in life expectancy, employment rate, GDP per capita and the number of years in school. there is also a correlation between life expectancy and child mortality and GDP. The life expectancy in most countries is above the averge of the wolrd which could indicate that most of the world is geeting better in that area. # </p> # ## Ressources # <ul> # <li> # <a href ="https://stackoverflow.com/">stackoverflow </a> # </li> # <li> # <a href ="https://nbviewer.jupyter.org/github/konstantinstadler/country_converter/blob/master/doc/country_converter_examples.ipynb"> This note book </a> # </li> # <li> # <a href ="https://github.com/konstantinstadler/country_converter"> This repository </a> # </li> # </ul>
13,656
/NikeVsAdidas.ipynb
ebc59bc54d1203f90b26a0961eb56772aee46e61
[]
no_license
smj007/Nike-Adidas-Classifier
https://github.com/smj007/Nike-Adidas-Classifier
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
114,458
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import gmaps import numpy as np import pandas as pd import requests import time job_df=pd.read_csv("../01. Prospective Project Data/1. 72199_158097_compressed_indeed_job_dataset.csv") A_wjMkyX8wh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="d850cf99-34ee-4f38-f89b-fb13b06f5aee" from google.colab import drive drive.mount('/content/drive') # + id="UPxAUm5GYAva" colab_type="code" colab={} os.chdir('/content/drive/My Drive/NIKE_vs_ADIDAS-master') # + id="hzMr9b9AYM39" colab_type="code" colab={} train_dir = './TRAIN' test_dir = './TEST' size = 120 lr = 1e-3 # + id="g2nL2-TvYObE" colab_type="code" colab={} def label_img(img): word_label = img.split('_')[0] #gets the name of the brand if word_label == 'NIKE': return [1,0] elif word_label == 'ADIDAS': return [0,1] # + id="t9IxER2qYQAB" colab_type="code" colab={} def create_train_data(): train_dataset = [] for image in tqdm(os.listdir(train_dir)): label = label_img(image) path = os.path.join(train_dir, image) image = Image.open(path) #Grayscale conversion using PIL - channel L is for single channel #channel P is for mode or palettized image = image.convert('L') image = image.resize((size, size), Image.ANTIALIAS) #using this lib instead of cv2 #eqvt of cv2 is cv2.cvtColor and cv2.resize( ,(bicubic, NN interpolation arg, etc)) train_dataset.append([np.array(image), np.array(label)]) shuffle(train_dataset) np.save('train_data.npy', train_dataset) return train_dataset # + id="mLLGdF4KYRqf" colab_type="code" colab={} def process_test_data(): test_dataset = [] for image in tqdm(os.listdir(test_dir)): path = os.path.join(test_dir, image) if "DS_Store" not in path: image_idx = image.split('_')[1] #images are like NIKE_7 etc image = Image.open(path) image = image.convert('L') image = image.resize((size, size), Image.ANTIALIAS) test_dataset.append([np.array(image), np.array(image_idx)]) shuffle(test_dataset) np.save('test_data.npy', test_dataset) return test_dataset # + id="fEfyh3eIYqe1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 106} outputId="475267ca-cb8d-43ff-9c4c-8759a7016a76" # !pip install tflearn # + id="UuX2pWNEY8fv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 402} outputId="31b76001-d7aa-4157-bca1-3565065c131c" pip install tensorflow==1.13.2 # + id="Q7bPPzsEYTgd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="80a20fa2-831c-42a0-9b14-63a87b3b84fa" #using tflearn instead of tensorflow/keras import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression import tensorflow as tf tf.reset_default_graph() convnet = input_data(shape=[None, size, size, 1], name = 'input') convnet = conv_2d(convnet, 32, 5, activation = 'relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation = 'relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 32, 5, activation = 'relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation = 'relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 32, 5, activation = 'relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation = 'relu') convnet = max_pool_2d(convnet, 5) convnet = fully_connected(convnet, 1024, activation = 'relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 2, activation = 'softmax') convnet = regression(convnet, optimizer = 'adam', learning_rate = lr, loss = 'categorical_crossentropy', name = 'targets') model = tflearn.DNN(convnet, tensorboard_verbose=3) # + id="sEWCbCkabnK-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 303} outputId="26bb8366-f706-4014-ef81-68f9596ff4d1" train_dataset = create_train_data() plt.imshow(train_dataset[43][0], cmap = 'gray') print(train_dataset[43][1]) # + id="_AEI4KgBYXgw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c3781655-da7e-4c23-9640-e82af1de64d4" train = train_dataset[-90:] print(len(train)) # + id="k5NUkla2bdFu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="05160bd1-99c0-4dd7-cff8-756adfb5b8f6" val = train_dataset[:-90] print(len(val)) # + id="JdZ7NzT7cI8t" colab_type="code" colab={} X = np.array([i[0] for i in train]).reshape(-1, size, size, 1) y = [i[1] for i in train] val_X = np.array([i[0] for i in val]).reshape(-1, size, size, 1) val_y = [i[1] for i in val] # + id="1ID9gkGaeIQp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="7a42e45a-ba49-4574-9ffe-803f4c705fd0" model.fit({'input':X}, {'targets':y}, n_epoch = 100, validation_set=({'input': val_X}, {'targets': val_y}), snapshot_step=50, show_metric=True) # + id="ws2oB_tTg3-U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 472} outputId="9fe2848e-64f6-40fa-9b1c-e29d2e20d660" test_data = process_test_data() fig = plt.figure() for num, data in enumerate(test_data[:10]): image_idx = data[1] image_data = data[0] y = fig.add_subplot(3, 4, num + 1) original = image_data data = image_data.reshape(size, size, 1) output = model.predict([data])[0] print(output) if np.argmax(output) == 1: label = 'ADIDAS' else: label = 'NIKE' y.imshow(original, cmap = 'gray') plt.title(label) plt.show() # + id="zEgIcOLNm8Cy" colab_type="code" colab={}
6,085
/Assignment 2-Fall+2018++Exploratory+Data+Analysis+of+US+flights.ipynb
92a07a24131d7f51ad9bdd733d9b4cc14f31e9a7
[]
no_license
KlavdiaZ/for-USML
https://github.com/KlavdiaZ/for-USML
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
615,382
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd # pip install seaborn import seaborn as sns import matplotlib.pyplot as plt sns.set() import matplotlib import matplotlib.pyplot as plt import matplotlib.ticker # %matplotlib inline # + # Tune the visual settings for figures in `seaborn` sns.set_context( "notebook", font_scale=1.5, rc={ "figure.figsize": (11, 8), "axes.titlesize": 18 } ) from matplotlib import rcParams rcParams['figure.figsize'] = 11, 8 # - dtype = {'DayOfWeek': np.uint8, 'DayofMonth': np.uint8, 'Month': np.uint8 , 'Cancelled': np.uint8, 'Year': np.uint16, 'FlightNum': np.uint16 , 'Distance': np.uint16, 'UniqueCarrier': str, 'CancellationCode': str, 'Origin': str, 'Dest': str, 'ArrDelay': np.float16, 'DepDelay': np.float16, 'CarrierDelay': np.float16, 'WeatherDelay': np.float16, 'NASDelay': np.float16, 'SecurityDelay': np.float16, 'LateAircraftDelay': np.float16, 'DepTime': np.float16} # %%time # change the path if needed flights_df = pd.read_csv("2008.csv.bz2", usecols=dtype.keys(), dtype=dtype) print(flights_df.shape) print(flights_df.columns) flights_df.head() flights_df.head().T flights_df.info() flights_df.describe().T flights_df['UniqueCarrier'].nunique() flights_df.groupby('UniqueCarrier').size().plot(kind='bar'); flights_df.groupby(['UniqueCarrier','FlightNum'])['Distance'].sum().sort_values(ascending=False).iloc[:3] flights_df.groupby(['UniqueCarrier','FlightNum'])\ .agg({'Distance': [np.mean, np.sum, 'count'], 'Cancelled': np.sum})\ .sort_values(('Distance', 'sum'), ascending=False)\ .iloc[0:3] pd.crosstab(flights_df.Month, flights_df.DayOfWeek) plt.imshow(pd.crosstab(flights_df.Month, flights_df.DayOfWeek), cmap='seismic', interpolation='none'); flights_df.hist('Distance', bins=20); flights_df['Date'] = pd.to_datetime(flights_df.rename(columns={'DayofMonth': 'Day'})[['Year', 'Month', 'Day']]) num_flights_by_date = flights_df.groupby('Date').size() num_flights_by_date.plot(); num_flights_by_date.rolling(window=7).mean().plot(); # # 1. Find top-10 carriers in terms of the number of completed flights (UniqueCarrier column)? # Which of the listed below is not in your top-10 list? # # DL # AA # OO # EV # flights_df["UniqueCarrier"].value_counts().sort_values(ascending=False).iloc[:10] flights_df["Cancelled"].unique() flights_df[flights_df["Cancelled"]==1]["UniqueCarrier"].value_counts() completed=flights_df["UniqueCarrier"].value_counts() - flights_df[flights_df["Cancelled"]==1]["UniqueCarrier"].value_counts() completed.sort_values(ascending=False).iloc[:10] #EV - answer (DL AA OO EV) # # 2. Plot distributions of flight cancellation reasons (CancellationCode). # What is the most frequent reason for flight cancellation? (Use this link to translate codes into reasons) # # carrier # weather conditions # National Air System # security reasons sns.countplot(x="CancellationCode", data=flights_df); # + # CancellationCode # B (Weather)- answer (A Carrier B Weather C National Air System D Security) # - # # 3. Which route is the most frequent, in terms of the number of flights? # (Take a look at 'Origin' and 'Dest' features. Consider A->B and B->A directions as different routes) # # New-York – Washington # San-Francisco – Los-Angeles # San-Jose – Dallas # New-York – San-Francisco flights_df["Route"]=flights_df["Origin"] + "-" + flights_df["Dest"] flights_df.head() flights_df["Route"].value_counts().sort_values(ascending=False).head() # + # SFO-LAX - answer (San-Francisco – Los-Angeles) # - # # 4. Find top-5 delayed routes (count how many times they were delayed on departure). From all flights on these 5 routes, count all flights with weather conditions contributing to a delay. # # 449 # 539 # 549 # 668 delayed=flights_df[flights_df["DepDelay"] > 0]["Route"].value_counts().sort_values(ascending=False).head().index.tolist() #delayed.drop([:,1]) # не получается дропнуть print(delayed) flights_df["WeatherDelay"].unique() flights_df[(flights_df["Route"].isin(delayed)) & (flights_df["WeatherDelay"] >0)] # + # 668-answer # - # # 5. Examine the hourly distribution of departure times. For that, create a new series from DepTime, removing missing values. # Choose all correct statements: # # Flights are normally distributed within time interval [0-23] (Search for: Normal distribution, bell curve). # Flights are uniformly distributed within time interval [0-23]. # In the period from 0 am to 4 am there are considerably less flights than from 7 pm to 8 pm. flights_df["DepTime"].describe() flights_df["DepTime_1"]=round(flights_df["DepTime"].dropna()/100,0) _, axes = plt.subplots(figsize=(20,12)) sns.countplot(x="DepTime_1", data=flights_df); # + # In the period from 0 am to 4 am there are considerably less flights than at 7pm. (answer) # - # # 6. Show how the number of flights changes through time (on the daily/weekly/monthly basis) and interpret the findings. # Choose all correct statements: # # The number of flights during weekends is less than during weekdays (working days). # The lowest number of flights is on Sunday. # There are less flights during winter than during summer. sns.countplot(x="Month",data=flights_df); sns.countplot(x="DayofMonth",data=flights_df); sns.countplot(x="DayOfWeek",data=flights_df); # + _, axes = plt.subplots(1, 3, sharex=True, figsize=(30, 8)) sns.countplot(x="Month",data=flights_df, ax=axes[0]); sns.countplot(x="DayOfWeek",data=flights_df, ax=axes[1]); sns.countplot(x="DayofMonth",data=flights_df, ax=axes[2]); # - sns.heatmap(pd.crosstab(flights_df["Month"], flights_df["DayofMonth"])) sns.heatmap(pd.crosstab(flights_df["Month"], flights_df["DayOfWeek"])) # + # The number of flights during weekends is less than during weekdays (working days). YES # The lowest number of flights is on Sunday. NO (Saturday) # There are less flights during winter than during summer. YES # - # # 7. Examine the distribution of cancellation reasons with time. Make a bar plot of cancellation reasons aggregated by months. # Choose all correct statements: # # December has the highest rate of cancellations due to weather. # The highest rate of cancellations in September is due to Security reasons. # April's top cancellation reason is carriers. # Flights cancellations due to National Air System are more frequent than those due to carriers. flights_df["CancellationCode"]=flights_df["CancellationCode"].map({"A":"Carrier", "B":"Weather", "C":"National Air System", "D":"Security"}) flights_df["CancellationCode"].unique() sns.countplot(x="Month",data=flights_df, hue = "CancellationCode"); flights_df["CancellationCode"].unique() flights_df[flights_df["CancellationCode"]=="Weather"]["Month"].value_counts().sort_values(ascending=False) # + # answer (A Carrier B Weather C National Air System D Security) #December has the highest rate of cancellations due to weather. (YES) #The highest rate of cancellations in September is due to Security reasons.(NO) #April's top cancellation reason is carriers. (YES) #Flights cancellations due to National Air System are more frequent than those due to carriers.(NO) # - # # 8. Which month has the greatest number of cancellations due to Carrier? # # May # January # September # April flights_df[flights_df["CancellationCode"]=="Carrier"]["Month"].value_counts().sort_values(ascending=False) # + # sns.countplot(x="Month",data=flights_df, hue = flights_df[flights_df["CancellationCode"]=="Carrier"]); #?? # + # April - answer # - # # 9. Identify the carrier with the greatest number of cancellations due to carrier in the corresponding month from the previous question. # # 9E # EV # HA # AA flights_df[(flights_df["CancellationCode"]=="Carrier") & (flights_df["Month"]==4)]["UniqueCarrier"].value_counts() #.max() # change MONTH!!! # + # AA - answer # - # # 10. Examine median arrival and departure delays (in time) by carrier. Which carrier has the lowest median delay time for both arrivals and departures? Leave only non-negative values of delay times ('ArrDelay', 'DepDelay'). Boxplots can be helpful in this exercise, as well as it might be a good idea to remove outliers in order to build nice graphs. You can exclude delay time values higher than a corresponding .95 percentile. # # EV # OO # AA # AQ # + #flights_df["ArrDelay0"]=flights_df[flights_df["ArrDelay"] >0]["ArrDelay"] #print(flights_df.head()) #flights_df.groupby("UniqueCarrier")["ArrDelay0"].median() #mask1 = flights_df.apply(lambda row: row["ArrDelay"] > 0, axis=1) #mask2 = flights_df.apply(lambda row: row["DepDelay"] > 0, axis=1) #flights_df[(mask1) & (mask2)].head() # - flights_df10=flights_df[(flights_df["ArrDelay"] >0) & (flights_df["DepDelay"] >0) & (flights_df["DepDelay"] < flights_df["DepDelay"].quantile(q=0.95)) & (flights_df["ArrDelay"] < flights_df["ArrDelay"].quantile(q=0.95))] _, axes = plt.subplots(figsize=(20,12)) df1 = pd.melt(flights_df10, value_vars=["ArrDelay", "DepDelay"], id_vars="UniqueCarrier") #, id_vars="Month" or "UniqueCarrier" sns.boxplot(x="variable", y="value", hue="UniqueCarrier", data=df1) #hue="Month", or "UniqueCarrier" plt.show() df1 = pd.melt(flights_df10, value_vars=["ArrDelay", "DepDelay"]) #, id_vars="Month" or "UniqueCarrier" sns.boxplot(x="variable", y="value", data=df1) #hue="Month", or "UniqueCarrier" plt.show() flights_df10.groupby("UniqueCarrier")["ArrDelay"].median().sort_values(ascending=False) flights_df10.groupby("UniqueCarrier")["DepDelay"].median().sort_values(ascending=False) # + # AQ - answer
9,905
/Course_2/Week4/C2_W4_Assignment.ipynb
6790752807ade65599c41517f490cee77297f1b2
[]
no_license
kotsonis/NLP_Specialization
https://github.com/kotsonis/NLP_Specialization
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
121,983
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline import warnings warnings.filterwarnings('ignore') from lightgbm import LGBMClassifier # - df = pd.read_csv("Desktop\Cristano_Ronaldo_Final_v1\data.csv") print("Total number of dataset is {}".format(df.shape[0])) df.drop_duplicates(keep='first', inplace=True) # Remove duplicate entries if any print("Total number of dataset after removing duplicates is {}".format(df.shape[0])) df.head(2) df['match_event_id'] = df['match_event_id'].interpolate() df.drop(['Unnamed: 0','match_id','team_name','team_id','shot_id_number'], inplace = True, axis = 1) df[['game_season', 'knockout_match','home/away','power_of_shot','lat/lng','date_of_game']] = df[['game_season','knockout_match','home/away','power_of_shot','lat/lng','date_of_game']].fillna(method='ffill') df['type_of_shot'] = df['type_of_shot'].replace(np.nan, '', regex=True) df['type_of_combined_shot'] = df['type_of_combined_shot'].replace(np.nan, '', regex=True) df['shot'] = df['type_of_shot'] + df['type_of_combined_shot'] df.drop(['type_of_shot','type_of_combined_shot'],axis=1,inplace=True) # + # remaining_sec makes more sense as number of seconds should be <=60 df.drop('remaining_sec.1', axis=1, inplace=True) # for continous values in following cases let us use mean df.remaining_sec.fillna(value=df.remaining_sec.mean(), inplace=True, limit=None) # - df['opp_team'] = df['home/away'].str[-3:] df['home/away'] = df['home/away'].str[5:-3].map(lambda x: x.strip()) df['home/away'].replace({'@': 'away','vs.': 'home'},inplace=True) # + # On the same lines of remaining_sec for remainig_min df.drop('remaining_min.1', axis=1, inplace=True ) # Fill missing values using mean df.remaining_min.fillna(value=df.remaining_min.mean(), inplace=True, limit=None) # - df.drop(['power_of_shot.1','knockout_match.1'], axis=1, inplace=True) df["date_of_game"] = pd.to_datetime(df["date_of_game"]) df["year"] = df["date_of_game"].apply(lambda x:x.year) df["day"] = df["date_of_game"].apply(lambda x:x.day) df["month"] = df["date_of_game"].apply(lambda x:x.month) df.drop(['date_of_game'],axis=1,inplace=True) # + # both of them is almost same, we can take one df['distance_of_shot'] and drop other df.drop('distance_of_shot.1', axis = 1, inplace = True) # Fill missing value in distance_of_shot #df.distance_of_shot.fillna(value = df['distance_of_shot'].mean(), inplace = True) # - df['area_of_shot'][df['shot_basics'] == 'Left Corner']=df['area_of_shot'][df['shot_basics'] == 'Left Corner'].fillna('Left Side(L)') df['area_of_shot'][df['shot_basics'] == 'Right Corner']=df['area_of_shot'][df['shot_basics'] == 'Right Corner'].fillna('Right Side(R)') df['area_of_shot'][df['shot_basics'] == 'Goal Area']=df['area_of_shot'][df['shot_basics'] == 'Goal Area'].fillna('Center(C)') df['area_of_shot'][df['shot_basics'] == 'Mid Ground Line']=df['area_of_shot'][df['shot_basics'] == 'Mid Ground Line'].fillna('Mid Ground(G)') df['location_y'] = df['location_y'].apply(lambda x: "{:.0f}".format(x) if not pd.isnull(x) else x) df['location_x'] = df['location_x'].apply(lambda x: "{:.0f}".format(x) if not pd.isnull(x) else x) # + d = df[['location_x','location_y','area_of_shot']] d1 = d.dropna(subset = ['location_x','location_y']) d2 = d.dropna(subset = ['location_y','area_of_shot']) d3 = d.dropna(subset = ['area_of_shot','location_x']) # + from sklearn.preprocessing import LabelEncoder label_encoder = LabelEncoder() label_encoder.fit(d2['area_of_shot']) d2['area_of_shot'] = label_encoder.transform(d2['area_of_shot']) # - label_encoder.fit(d3['area_of_shot']) d3['area_of_shot'] = label_encoder.transform(d3['area_of_shot']) # + nonempty_area = d1.dropna(axis=0, how='any', subset=['area_of_shot']) empty_area = d1[~d1.index.isin(nonempty_area.index)] nonempty_x = d2.dropna(axis=0, how='any', subset=['location_x']) empty_x = d2[~d2.index.isin(nonempty_x.index)] nonempty_y = d3.dropna(axis=0, how='any', subset=['location_y']) empty_y = d3[~d3.index.isin(nonempty_x.index)] # - empty_area.drop(['area_of_shot'],inplace=True,axis=1) empty_x.drop(['location_x'],inplace=True,axis=1) empty_y.drop(['location_y'],inplace=True,axis=1) # + from sklearn.linear_model import LogisticRegression lr = LogisticRegression () from sklearn.ensemble import RandomForestRegressor rfr = RandomForestRegressor() # + nonempty_area_labels = nonempty_area['area_of_shot'] nonempty_area.drop(['area_of_shot'],axis=1,inplace=True) nonempty_x_labels = nonempty_x['location_x'] nonempty_x.drop(['location_x'],axis=1,inplace=True) nonempty_y_labels = nonempty_y['location_y'] nonempty_y.drop(['location_y'],axis=1,inplace=True) # + lr.fit(nonempty_area,nonempty_area_labels) pred1 = lr.predict(empty_area) rfr.fit(nonempty_x,nonempty_x_labels) pred2 = rfr.predict(empty_x) rfr.fit(nonempty_y,nonempty_y_labels) pred3 = rfr.predict(empty_y) # + empty_area['area_of_shot'] = pred1 d1.drop(['area_of_shot'],axis=1,inplace=True) empty_x['location_x'] = pred2 d2.drop(['location_x'],axis=1,inplace=True) empty_y['location_y'] = pred3 d3.drop(['location_y'],axis=1,inplace=True) # + final1 = pd.merge(d, empty_area, left_index=True, right_index=True,how='outer') final1 = final1.replace(np.nan, '', regex=True) final1 = final1['area_of_shot_y'] final2 = pd.merge(d, empty_x, left_index=True, right_index=True,how='outer') final2 = final2.replace(np.nan, '', regex=True) final2 = final2['location_x_y'] final3 = pd.merge(d, empty_y, left_index=True, right_index=True,how='outer') final3 = final3.replace(np.nan, '', regex=True) final3 = final3['location_y_y'] # - df = pd.merge(df, final1, left_index=True, right_index=True,how='outer') #df = pd.merge(df, final1, left_index=True, right_index=True,how='outer') df['area_of_shot'] = df['area_of_shot'].replace(np.nan, '', regex=True) df['area_of_shot'] = df['area_of_shot']+df['area_of_shot_y'] df['area_of_shot'] = df['area_of_shot'].replace(r'^\s*$', np.nan, regex=True) df.drop(['area_of_shot_y'],axis=1,inplace=True) df = pd.merge(df, final2, left_index=True, right_index=True,how='outer') df['location_x'] = df['location_x'].replace(np.nan, '', regex=True) # + df['location_x'] = df['location_x'].astype(str) df['location_x_y'] = df['location_x_y'].astype(str) df['location_x'] = df[['location_x', 'location_x_y']].apply(lambda x: ''.join(x), axis=1) # - df['location_x'] = df['location_x'].replace(r'^\s*$', np.nan, regex=True) df['location_x'] = df['location_x'].astype(float) df['location_x'] = round(df['location_x']) df.drop(['location_x_y'],axis=1,inplace=True) # + df = pd.merge(df, final3, left_index=True, right_index=True,how='outer') df['location_y'] = df['location_y'].replace(np.nan, '', regex=True) df['location_y'] = df['location_y'].astype(str) df['location_y_y'] = df['location_y_y'].astype(str) df['location_y'] = df[['location_y', 'location_y_y']].apply(lambda x: ''.join(x), axis=1) df['location_y'] = df['location_y'].replace(np.nan, '', regex=True) df['location_y'] = df['location_y'].replace(r'^\s*$', np.nan, regex=True) df['location_y'] = df['location_y'].astype(float) df['location_y'] = round(df['location_y']) df.drop(['location_y_y'],axis=1,inplace=True) # - def segregate(x): l = x["lat/lng"].split(", ") return pd.Series(l) df[["lat","long"]] = df.apply(segregate, axis=1) df["lat"] = pd.to_numeric(df["lat"]) df["long"] = pd.to_numeric(df["long"]) def _split_columns(array): if array.ndim == 1: return array[0], array[1] # just a single row else: return array[:,0], array[:,1] # + R = 6378137.0 R_km = R/1000 def haversine(lat,lon): """ Calculate the great-circle distance bewteen points_a and points_b points_a and points_b can be a single points or lists of points """ points_a=[42.982923, -71.446094] lat1, lon1 = _split_columns(np.radians(points_a)) lat2, lon2 = (np.radians(lat)),(np.radians(lon)) # calculate haversine lat = lat2 - lat1 lon = lon2 - lon1 d = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lon * 0.5) ** 2 h = 2 * R_km * np.arcsin(np.sqrt(d)) return h # - df['dist'] = haversine(df['lat'],df['long']) df['dist'] = round(df['dist']) df.loc[ df['dist'] == 0, 'dist_group'] = 0 df.loc[(df['dist'] > 0) & (df['dist'] <= 1000), 'dist_group'] = 1 df.loc[(df['dist'] > 1000) & (df['dist'] <= 2000), 'dist_group'] = 2 df.loc[(df['dist'] > 2000) & (df['dist'] <= 3000), 'dist_group'] = 3 df.loc[ df['dist'] > 3000, 'dist_group'] = 4 ; # + d = df[['distance_of_shot','shot_basics','range_of_shot']] d1 = d.dropna(subset = ['distance_of_shot','shot_basics']) d2 = d.dropna(subset = ['range_of_shot','shot_basics']) d3 = d.dropna(subset = ['range_of_shot','distance_of_shot']) # + label_encoder.fit(d1['shot_basics']) d1['shot_basics'] = label_encoder.transform(d1['shot_basics']) label_encoder.fit(d2['shot_basics']) d2['shot_basics'] = label_encoder.transform(d2['shot_basics']) label_encoder.fit(d2['range_of_shot']) d2['range_of_shot'] = label_encoder.transform(d2['range_of_shot']) label_encoder.fit(d3['range_of_shot']) d3['range_of_shot'] = label_encoder.transform(d3['range_of_shot']) # + nonempty_range = d1.dropna(axis=0, how='any', subset=['range_of_shot']) empty_range = d1[~d1.index.isin(nonempty_range.index)] nonempty_dist = d2.dropna(axis=0, how='any', subset=['distance_of_shot']) empty_dist = d2[~d2.index.isin(nonempty_dist.index)] nonempty_shot = d3.dropna(axis=0, how='any', subset=['shot_basics']) empty_shot = d3[~d3.index.isin(nonempty_shot.index)] # - empty_range.drop(['range_of_shot'],inplace=True,axis=1) empty_dist.drop(['distance_of_shot'],inplace=True,axis=1) empty_shot.drop(['shot_basics'],inplace=True,axis=1) # + nonempty_range_labels = nonempty_range['range_of_shot'] nonempty_range.drop(['range_of_shot'],axis=1,inplace=True) nonempty_dist_labels = nonempty_dist['distance_of_shot'] nonempty_dist.drop(['distance_of_shot'],axis=1,inplace=True) nonempty_shot_labels = nonempty_shot['shot_basics'] nonempty_shot.drop(['shot_basics'],axis=1,inplace=True) # + lr.fit(nonempty_range,nonempty_range_labels) pred1 = lr.predict(empty_range) rfr.fit(nonempty_dist,nonempty_dist_labels) pred2 = rfr.predict(empty_dist) lr.fit(nonempty_shot,nonempty_shot_labels) pred3 = lr.predict(empty_shot) # + empty_range['range_of_shot'] = pred1 d1.drop(['range_of_shot'],axis=1,inplace=True) empty_dist['distance_of_shot'] = pred2 d2.drop(['distance_of_shot'],axis=1,inplace=True) empty_shot['shot_basics'] = pred3 d3.drop(['shot_basics'],axis=1,inplace=True) # + final1 = pd.merge(d, empty_range, left_index=True, right_index=True,how='outer') final1 = final1.replace(np.nan, '', regex=True) final1 = final1['range_of_shot_y'] final2 = pd.merge(d, empty_dist, left_index=True, right_index=True,how='outer') final2 = final2.replace(np.nan, '', regex=True) final2 = final2['distance_of_shot_y'] final3 = pd.merge(d, empty_shot, left_index=True, right_index=True,how='outer') final3 = final3.replace(np.nan, '', regex=True) final3 = final3['shot_basics_y'] # - df = pd.merge(df, final1, left_index=True, right_index=True,how='outer') df['range_of_shot'] = df['range_of_shot'].replace(np.nan, '', regex=True) df['range_of_shot'] = df['range_of_shot']+df['range_of_shot_y'] df['range_of_shot'] = df['range_of_shot'].replace(r'^\s*$', np.nan, regex=True) df.drop(['range_of_shot_y'],axis=1,inplace=True) df = pd.merge(df, final3, left_index=True, right_index=True,how='outer') df['shot_basics'] = df['shot_basics'].replace(np.nan, '', regex=True) df['shot_basics'] = df['shot_basics']+df['shot_basics_y'] df['shot_basics'] = df['shot_basics'].replace(r'^\s*$', np.nan, regex=True) df.drop(['shot_basics_y'],axis=1,inplace=True) df = pd.merge(df, final2, left_index=True, right_index=True,how='outer') df['distance_of_shot'] = df['distance_of_shot'].replace(np.nan, '', regex=True) # + df['distance_of_shot'] = df['distance_of_shot'].astype(str) df['distance_of_shot_y'] = df['distance_of_shot_y'].astype(str) df['distance_of_shot'] = df[['distance_of_shot', 'distance_of_shot_y']].apply(lambda x: ''.join(x), axis=1) # - df['distance_of_shot'] = df['distance_of_shot'].replace(r'^\s*$', np.nan, regex=True) df['distance_of_shot'] = df['distance_of_shot'].astype(float) df['distance_of_shot'] = round(df['distance_of_shot']) df.drop(['distance_of_shot_y'],axis=1,inplace=True) df.drop(['lat/lng'],axis=1,inplace=True) # + m1 = df['distance_of_shot'].mean() df['distance_of_shot'] = df['distance_of_shot'].fillna(m1) m2 = df['area_of_shot'].mode() df['area_of_shot'] = df['area_of_shot'].fillna(m2[0]) m3 = df['range_of_shot'].mode() df['range_of_shot'] = df['range_of_shot'].fillna(m3[0]) m4 = df['location_x'].mean() df['location_x'] = df['location_x'].fillna(m4) m5 = df['location_y'].mean() df['location_y'] = df['location_y'].fillna(m5) m6 = df['shot_basics'].mode() df['shot_basics'] = df['shot_basics'].fillna(m6[0]) # - df["add_loc"] = df["location_x"] + df["location_y"] df["sub_loc"] = df["location_x"] - df["location_y"] df = df.reset_index() df['index'] +=1 df = df.rename(columns={"index": "shot_id_number"}) from sklearn.preprocessing import LabelEncoder # + columns = ['shot','opp_team','home/away','area_of_shot','shot_basics','range_of_shot','year','month','game_season'] def encoder(df): for col in columns: label_encoder = LabelEncoder() label_encoder.fit(df[col]) df[col] = label_encoder.transform(df[col]) return df # - df = encoder(df) df.drop(['game_season'],axis=1,inplace=True) df.to_csv('df.csv',index=False) # !jupyter nbconvert --to script zs.ipynb your code) ### # Compute l1 as W2^T (Yhat - Y) # Re-use it whenever you see W2^T (Yhat - Y) used to compute a gradient l1 = np.dot(W2.T,(yhat-y)) # Apply relu to l1 l1 = (l1>0)*l1 # Compute the gradient of W1 grad_W1 = np.dot(l1,x.T)/batch_size # Compute the gradient of W2 grad_W2 = np.dot(yhat-y,h.T)/batch_size # Compute the gradient of b1 # print(f'batch size = {batch_size} and l1 has shape {l1.shape}') grad_b1 = np.sum(l1,axis=1,keepdims=True)/batch_size # Compute the gradient of b2 grad_b2 = np.sum(yhat-y,axis=1,keepdims=True)/batch_size ### END CODE HERE ### return grad_W1, grad_W2, grad_b1, grad_b2 # + # Test the function tmp_C = 2 tmp_N = 50 tmp_batch_size = 4 tmp_word2Ind, tmp_Ind2word = get_dict(data) tmp_V = len(word2Ind) # get a batch of data tmp_x, tmp_y = next(get_batches(data, tmp_word2Ind, tmp_V,tmp_C, tmp_batch_size)) print("get a batch of data") print(f"tmp_x.shape {tmp_x.shape}") print(f"tmp_y.shape {tmp_y.shape}") print() print("Initialize weights and biases") tmp_W1, tmp_W2, tmp_b1, tmp_b2 = initialize_model(tmp_N,tmp_V) print(f"tmp_W1.shape {tmp_W1.shape}") print(f"tmp_W2.shape {tmp_W2.shape}") print(f"tmp_b1.shape {tmp_b1.shape}") print(f"tmp_b2.shape {tmp_b2.shape}") print() print("Forwad prop to get z and h") tmp_z, tmp_h = forward_prop(tmp_x, tmp_W1, tmp_W2, tmp_b1, tmp_b2) print(f"tmp_z.shape: {tmp_z.shape}") print(f"tmp_h.shape: {tmp_h.shape}") print() print("Get yhat by calling softmax") tmp_yhat = softmax(tmp_z) print(f"tmp_yhat.shape: {tmp_yhat.shape}") tmp_m = (2*tmp_C) tmp_grad_W1, tmp_grad_W2, tmp_grad_b1, tmp_grad_b2 = back_prop(tmp_x, tmp_yhat, tmp_y, tmp_h, tmp_W1, tmp_W2, tmp_b1, tmp_b2, tmp_batch_size) print() print("call back_prop") print(f"tmp_grad_W1.shape {tmp_grad_W1.shape}") print(f"tmp_grad_W2.shape {tmp_grad_W2.shape}") print(f"tmp_grad_b1.shape {tmp_grad_b1.shape}") print(f"tmp_grad_b2.shape {tmp_grad_b2.shape}") # - # ##### Expected output # # ```CPP # get a batch of data # tmp_x.shape (5778, 4) # tmp_y.shape (5778, 4) # # Initialize weights and biases # tmp_W1.shape (50, 5778) # tmp_W2.shape (5778, 50) # tmp_b1.shape (50, 1) # tmp_b2.shape (5778, 1) # # Forwad prop to get z and h # tmp_z.shape: (5778, 4) # tmp_h.shape: (50, 4) # # Get yhat by calling softmax # tmp_yhat.shape: (5778, 4) # # call back_prop # tmp_grad_W1.shape (50, 5778) # tmp_grad_W2.shape (5778, 50) # tmp_grad_b1.shape (50, 1) # tmp_grad_b2.shape (5778, 1) # ``` # <a name='2.5'></a> # ## Gradient Descent # # <a name='ex-05'></a> # ### Exercise 05 # Now that you have implemented a function to compute the gradients, you will implement batch gradient descent over your training set. # # **Hint:** For that, you will use `initialize_model` and the `back_prop` functions which you just created (and the `compute_cost` function). You can also use the provided `get_batches` helper function: # # ```for x, y in get_batches(data, word2Ind, V, C, batch_size):``` # # ```...``` # # Also: print the cost after each batch is processed (use batch size = 128) # UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: gradient_descent def gradient_descent(data, word2Ind, N, V, num_iters, alpha=0.03): ''' This is the gradient_descent function Inputs: data: text word2Ind: words to Indices N: dimension of hidden vector V: dimension of vocabulary num_iters: number of iterations Outputs: W1, W2, b1, b2: updated matrices and biases ''' W1, W2, b1, b2 = initialize_model(N,V, random_seed=282) batch_size = 128 iters = 0 C = 2 for x, y in get_batches(data, word2Ind, V, C, batch_size): ### START CODE HERE (Replace instances of 'None' with your own code) ### # Get z and h z, h = forward_prop(x, W1, W2, b1, b2) # Get yhat yhat = softmax(z) # Get cost cost = compute_cost(y, yhat, batch_size) if ( (iters+1) % 10 == 0): print(f"iters: {iters + 1} cost: {cost:.6f}") # Get gradients grad_W1, grad_W2, grad_b1, grad_b2 = back_prop(x, yhat, y, h, W1, W2, b1, b2, batch_size) # Update weights and biases W1 = W1-alpha*grad_W1 W2 = W2 - alpha*grad_W2 b1 = b1 - alpha*grad_b1 b2 = b2 - alpha*grad_b2 ### END CODE HERE ### iters += 1 if iters == num_iters: break if iters % 100 == 0: alpha *= 0.66 return W1, W2, b1, b2 # test your function C = 2 N = 50 word2Ind, Ind2word = get_dict(data) V = len(word2Ind) num_iters = 150 print("Call gradient_descent") W1, W2, b1, b2 = gradient_descent(data, word2Ind, N, V, num_iters) # ##### Expected Output # # # ```CPP # iters: 10 cost: 0.789141 # iters: 20 cost: 0.105543 # iters: 30 cost: 0.056008 # iters: 40 cost: 0.038101 # iters: 50 cost: 0.028868 # iters: 60 cost: 0.023237 # iters: 70 cost: 0.019444 # iters: 80 cost: 0.016716 # iters: 90 cost: 0.014660 # iters: 100 cost: 0.013054 # iters: 110 cost: 0.012133 # iters: 120 cost: 0.011370 # iters: 130 cost: 0.010698 # iters: 140 cost: 0.010100 # iters: 150 cost: 0.009566 # ``` # # Your numbers may differ a bit depending on which version of Python you're using. # <a name='3'></a> # ## 3.0 Visualizing the word vectors # # In this part you will visualize the word vectors trained using the function you just coded above. # + # visualizing the word vectors here from matplotlib import pyplot # %config InlineBackend.figure_format = 'svg' words = ['king', 'queen','lord','man', 'woman','dog','wolf', 'rich','happy','sad'] embs = (W1.T + W2)/2.0 # given a list of words and the embeddings, it returns a matrix with all the embeddings idx = [word2Ind[word] for word in words] X = embs[idx, :] print(X.shape, idx) # X.shape: Number of words of dimension N each # - result= compute_pca(X, 2) pyplot.scatter(result[:, 0], result[:, 1]) for i, word in enumerate(words): pyplot.annotate(word, xy=(result[i, 0], result[i, 1])) pyplot.show() # You can see that man and king are next to each other. However, we have to be careful with the interpretation of this projected word vectors, since the PCA depends on the projection -- as shown in the following illustration. result= compute_pca(X, 4) pyplot.scatter(result[:, 3], result[:, 1]) for i, word in enumerate(words): pyplot.annotate(word, xy=(result[i, 3], result[i, 1])) pyplot.show()
20,498
/SVM_Assignment_1.ipynb
0b7f00c801ec47a2ed16d0198bb1f8a73befc5ec
[]
no_license
Sankha1998/machine_learning
https://github.com/Sankha1998/machine_learning
4
0
null
null
null
null
Jupyter Notebook
false
false
.py
42,523
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Sankha1998/machine_learning/blob/master/SVM_Assignment_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="xxO9tyeJW4uh" colab_type="code" colab={} from sklearn import datasets import pandas as pd # + id="mjcU_PeeW8eU" colab_type="code" colab={} X, y = datasets.make_blobs(n_samples=50, n_features=2, centers=2, cluster_std=1.05, random_state=40) # + id="YFUgOrbIW-eF" colab_type="code" colab={} import numpy as np import matplotlib.pyplot as plt import seaborn as sns # + id="WyUdIxRDX-pH" colab_type="code" colab={} x=np.linspace(-10,10,100) # + id="J9MZCZj-YDgH" colab_type="code" colab={} y1=-5*x+1 # + id="nGM1xxeMYy9n" colab_type="code" colab={} y2=-4*x+1 # + id="nZogsr9CXCra" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="08ec3670-ed12-4463-9976-598ad2e05814" plt.plot(x,y1) plt.plot(x,y2) sns.scatterplot(X.T[0],X.T[1],hue=y) # + [markdown] id="U2pT8YqeZGPf" colab_type="text" # Equation of the orange line: # # y=-4x+1 # # Equation of the blue line: # # y=-5x+1 # # You have to find out which line will act as a better classifier according to the SVM alsorithm and why? # + id="u3KywixYZcYl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 954} outputId="52b91e90-570a-4b42-bbfd-b313f79d8da5" X # + id="0XilR3SpdfUP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 73} outputId="26aa150b-b9ba-40c2-e9c9-f65331358ec8" y # + id="Xo81Lx4adgCm" colab_type="code" colab={} y1=[] for i in y: if i == 0: y1.append(-1) else: y1.append(i) y = np.array(y1) # + id="DT5XEOird7mg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 73} outputId="f0911749-4496-4c6a-98f4-96c3b78b4473" y # + id="LdwtriYWejMO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 954} outputId="8d3cd432-cfa1-4b91-f9bb-ccd5f43c43ca" X # + id="XiZr0Uq5fJ6D" colab_type="code" colab={} df=pd.DataFrame() # + id="vpwYOpFzddje" colab_type="code" colab={} df['x']=X[:,0] df['y']=X[:,1] df['label']=y # + id="wo-v1IwwfdHK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="185a5bae-19fe-4779-e2e6-264e44574638" df.head() # + id="DOyFC5JFd8WF" colab_type="code" colab={} ##For line y=-4x+1 : ##4x+y-1=0 (rearranged) #now comparing the eqution with plane, w1=4 w2=1 b=-1 # + id="QiBHROwR6eCn" colab_type="code" colab={} lable=df.label x=df.x y=df.y # + id="EbQLu4C059Dg" colab_type="code" colab={} ## hard margin condition def condition(lable,w1,w2,x,y,b): values=lable*((w1*x)+(w2*y)+b) for i in values: if i<=0: print("eqution (label)*wi.xi+b>=0 is not satisfied") else: pass else : print('eqution (label)*wi.xi+b>=0 is satisfied') # + id="JxM6XatU93km" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="dc244e20-d8d8-4e58-a3a4-cf1cad2a4dbc" condition(lable,w1,w2,x,y,b) ## condition cheak for line1 # + id="ObOezPg3pcki" colab_type="code" colab={} ## Margin calculation for line 1: margin_line1=2/np.sqrt(w1**2+w2**2) # + id="AyH0UTZd9exH" colab_type="code" colab={} ##For line y=-5x+1: ##5x+y-1=0 (rearranged) #now comparing the eqution with plane, w1=5 w2=1 b=-1 # + id="PYqie1vn-l-1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="9a7ddc79-7dc2-4e07-aab3-f36ec3ced35f" condition(lable,w1,w2,x,y,b) ## condition cheak for line2 # + id="2nlZeoNr-xK8" colab_type="code" colab={} ## Margin calculation for line 2: margin_line2=2/np.sqrt(w1**2+w2**2) # + id="guAKhWNz-zrW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="e6f81ac8-cb54-4b25-da31-625d36910de3" if (margin_line1>margin_line2): print('line y=-4x+1 is better') else: print('line y=-5x+1 is better') # + id="FHyfFKYn_4LT" colab_type="code" colab={} ## as y=-4x+1 is larger in case of seperation so it's the best for spliting worker in big_df['Worker'].unique(): cut = big_df.loc[big_df['Worker'] == worker] cut = cut.loc[cut['mask-typ'] == 'original'] if sum(cut['model_wrong']) > 0: bad_workers.append(worker) print(bad_workers) # - big_df.groupby(['mask-typ', 'flip-class']).agg({'model_wrong': ['mean', 'count', 'sum']}) # ## Trying on Less Pure Splits of the Data big_bad_df = pd.merge(cum_df, gen_df, left_on = 'hypoID', right_on = 'Unnamed: 0') big_bad_df['model_wrong'] = big_bad_df.response != big_bad_df['new-label'] big_bad_df.groupby(['mask-typ', 'flip-class']).agg({'model_wrong': ['mean', 'count', 'sum']}).T big_horrible_df = pd.merge(s_df, gen_df, left_on = 'hypoID', right_on = 'Unnamed: 0') big_horrible_df['model_wrong'] = big_horrible_df.response != big_horrible_df['new-label'] big_horrible_df.groupby(['mask-typ', 'flip-class']).agg({'model_wrong': ['mean', 'count', 'sum']}).T
5,282
/notebooks/NSGAIINotebook.ipynb
a97d99bdb668c6b8b3a8cf8c997b98ce8f40cc56
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
GitHubChengPeng/jMetalPy
https://github.com/GitHubChengPeng/jMetalPy
0
0
MIT
2020-05-03T10:00:48
2020-04-26T14:35:13
null
Jupyter Notebook
false
false
.py
55,834
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/SamiEzz/MineDor/blob/main/Quantitative_Trading_with_Python_Loading_Data_Using_Python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="zKqx599kbLGD" # # Environment Setup # Before we dive deep into quantitative trading, we should have the required data that we want to work with. In this scenario, we'll focus on the entire process of collecting high-quality data from different sources and load them into pandas dataframes. # # Some of the important sources of data you can explore: # # - Yahoo Finance # - Quandl # - Intrinio # # We are only going to cover Yahoo finance and pandas-datareader in this scenario, but you can check out documentation of the other two. It's pretty simple and straight forward. # # So, let's set up our environment first. # # Let's first execute installation command in the terminal: # # The next step is to install the required libraries that will facilitate data from Yahoo Finance and FRED (Federal Reserve Economic Data). # # + id="z2WE2xtMa_1i" # !pip install yfinance pandas-datareader # + [markdown] id="U80pVHO3brUT" # And our environment is all set up and we are ready to start gathering data now. # + [markdown] id="soe_tzDDbqQv" # Extracting data from Yahoo Finance # With the environment all set now, let's load the end-of-day stock pricing data. For this entire series, we'll be working with the Apple stock prices from 2011-2020. # + [markdown] id="7qDKLvAFcA0V" # Now, import the required libraries: # # + id="j1p-7AG5cDtu" import pandas as pd import yfinance as yf # + [markdown] id="Qmp8KUh8cEz3" # To download the data from Yahoo Finance, you need to pass the stock symbol (AAPL in our case), starting date, ending date, and progress to set the progress bar visibility: # # + id="xLD5XnF0cH-a" df_apple = yf.download('AAPL', start='2011-01-01', end='2020-09-30', progress=False) # + [markdown] id="osiUwoV1cJt4" # This will give us the data in a pandas dataframe which we can inspect using: # # + colab={"base_uri": "https://localhost:8080/"} id="MPMtZAfucMhJ" outputId="ed72dfb1-6e9c-4034-8d1a-473c73d23b1c" print(df_apple.head()) # + [markdown] id="8AQZUQJ4c98A" # We have the Apple EOD stock pricing data in a dataframe containing daily Open, High, Low, and Close (OHLC) prices, as well as the adjusted close price and volume. # # Let's now look at another possible method of downloading the historical stock pricing data. # + [markdown] id="ZYRJ8OLHddyo" # # Extracting data from pandas-datareader # # Another way we can download the same data is using the pandas-datareader library that we installed. # # In fact, pandas-datareader provides functions to extract data from various internet sources in a pandas dataframe. Currently, the following sources are supported by them: # # - Tiingo # - IEX # - Alpha Vantage # - Enigma # - Quandl # - St.Louis FED (FRED) # - Kenneth French’s data library # - World Bank # - OECD # - Eurostat # - Thrift Savings Plan # - Nasdaq Trader symbol definitions # - Stooq # - MOEX # - Naver Finance # - For this example, let's try to extract data from Quandl. # # Similar to what we did in the previous step, here's how we'll download data here: # # # # # We have the same Apple stock prices for the same time period with same columns(features). # + colab={"base_uri": "https://localhost:8080/"} id="FpzChsAMd4ay" outputId="443a2e7d-d8d1-47e0-fabd-a2b019054ba4" import pandas_datareader.data as pdr df_apple = pdr.DataReader('AAPL', 'quandl', '2011-01-01', '2020-09-30', api_key='yuqp72Y_-GpAsrjQEXfL') # his will give us the data in a pandas dataframe which we can inspect using: # First five rows print(df_apple.head()) # Note: # Incase if an error arises with pdr.DataReader due to the api_key argument, # please create an account on quandl to obtain your own personal API key. # + colab={"base_uri": "https://localhost:8080/"} id="UqjjWGbMeHid" outputId="44bec3cc-b78b-4130-85b4-9f5887d3f701" # Information about the dataframe print(df_apple.info()) # + [markdown] id="GX-71a3Geh32" # # Initial Exploration of Data # We have the data loaded into dataframes but we don't know much about what it contains and what each column represents. # # We saw we have the following columns in the previous step: # # - Date: specifies trading date # - Open: opening price # - High: maximum price during the day # - Low: minimum price during the day # - Close: close price adjusted for splits # - Volume: number of shares traded on a particular day # - Split: number of shares after split / number of shares before # - Dividend: number of shares after dividend / number of shares before # - AdjOpen: adjusted opening price for that day # - AdjHigh: adjusted maximum price for that day # - AdjLow: adjusted lowest price for that day # - AdjClose: adjusted close price adjusted for both dividends and splits # - AdjVolume: adjusted volume of shares traded # # To get a slightly deeper understanding of the prices and volumes, we should have a good understanding of the summary statistics of each column: # + colab={"base_uri": "https://localhost:8080/"} id="AmVJGEtaewY2" outputId="7352c3f9-af8d-40ef-f311-b6b1aa984f05" print(df_apple.describe()) # + [markdown] id="wgSUN6B2e0ep" # Next, we should explore the movement of prices of the Apple stock over the years. # # Let's plot the adjusted closing prices on the chart: # + colab={"background_save": true} id="inQylB5Pew02" outputId="7b3fea32-5932-4601-f25d-807c09653dd3" import matplotlib.pyplot as plt fig = plt.figure() plt = df_apple['AdjClose'].plot() fig.savefig('close_plot.png') # + [markdown] id="4FJvL-pbfEz_" # Looking at the summary statistics and movement of prices give us a high-level picture of how the apple stock has been performing for the past ~10 years. # + [markdown] id="6Pw_5S8RfJqX" # # Here's a quick summary of key takeaways from this notebook : # # - We saw how we can get access to historical stock pricing datasets from different credible sources. # - We learned to peek at the data using the head() method and look at its information using the info() method. # - Captured the descriptive statistics option using the describe() method # - and plotted the prices using matplotlib library. ics import confusion_matrix,classification_report print(confusion_matrix(df['Cluster'],kmeans.labels_)) print(classification_report(df['Cluster'],kmeans.labels_)) # Não é tão ruim, considerando o algoritmo está puramente usando as várias para tentar agrupar as universidades em dois grupos distintos. Espero que você possa começar a ver como o K-Means é útil para agrupar dados não rotulados. # ## O Valor ótimo de K # # Muitas vezes no aprendizado supervisionado, temos o interesse de saber qual o melhor valor para o número de grupos. Embora tenhamos escolhido k=2 no exemplo anterior, isso foi apenas para testar se o particionamento tinha sido feito em coerência com as classes já pre-estabelecidas. Como vimos nas métricas, não foi bem assim e não será assim necessáriamente. Os algoritmos de agrupemento irão procurar por estruturas escondidas nos dados, e nem sempre isso será de acordo uma primeira suposição nossa e nem de acordo com rotulos pre-estabelecidos. # # Por isso, uma das perguntas que se faz é: "Qual a maneira de encontrar um valor apropriado para o número de grupos?". Para isso podemos usar a regra do joelho ao aplicarmos uma métrica de estabilização de agrupamentos de forma iterativa em várias simulações do algoritmo. # + import numpy as np import matplotlib.pyplot as plt from sklearn import metrics from sklearn.cluster import KMeans from scipy.spatial.distance import cdist mean = [] std = [] X = df.drop('Private',axis=1).values SIMULATION = 3 N_CLUSTERS = 10 for k in range(2, N_CLUSTERS): distortions = [] for i in range(SIMULATION): kmeanModel = KMeans(n_clusters=k) kmeanModel.fit(X) distortions.append(sum(np.min(cdist(X, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0]) mean.append(np.mean(distortions)) std.append(np.std(distortions)) # - plt.figure(figsize=(12, 7)) plt.errorbar(range(2, N_CLUSTERS), mean, yerr=std) plt.xlabel("Number of Clusters") plt.ylabel("Distortion") plt.title("Choosing the Best K") # Pelo método do joelho, tentamos escolher o menor valor de K que não atrapalha no processo de otimização ou o menor valor de K a partir do qual o processo de otimização fica estagnado. Nesse caso, valores aceitáveis de K poderiam ser K= 6 ou K = 7. accuracy history') plt.xlabel('Epoch') plt.ylabel('Classification accuracy') plt.legend() plt.show() # + from cs231n.vis_utils import visualize_grid # Visualize the weights of the network def show_net_weights(net): W1 = net.params['W1'] W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2) plt.imshow(visualize_grid(W1, padding=3).astype('uint8')) plt.gca().axis('off') plt.show() show_net_weights(net) # - # # Tune your hyperparameters # # **What's wrong?**. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy. # # **Tuning**. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value. # # **Approximate results**. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set. # # **Experiment**: You goal in this exercise is to get as good of a result on CIFAR-10 as you can (52% could serve as a reference), with a fully-connected Neural Network. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.). # + [markdown] tags=["pdf-inline"] # **Explain your hyperparameter tuning process below.** # # $\color{blue}{\textit Your Answer:}$ # + tags=["code"] best_net = None # store the best model into this ################################################################################# # TODO: Tune hyperparameters using the validation set. Store your best trained # # model in best_net. # # # # To help debug your network, it may help to use visualizations similar to the # # ones we used above; these visualizations will have significant qualitative # # differences from the ones we saw above for the poorly tuned network. # # # # Tweaking hyperparameters by hand can be fun, but you might find it useful to # # write code to sweep through possible combinations of hyperparameters # # automatically like we did on the previous exercises. # ################################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Constants input_size = 32 * 32 * 3 num_classes = 10 # Hyperparameters hidden_size = 81 reg = 0.35 learning_rate = 1e-3 num_iters = 2000 net = TwoLayerNet(input_size, hidden_size, num_classes) # Train the network stats = net.train(X_train, y_train, X_val, y_val, num_iters=num_iters, batch_size=200, learning_rate=learning_rate, learning_rate_decay=0.95, reg=reg, verbose=True) # Predict on the validation set val_acc = (net.predict(X_val) == y_val).mean() print('Validation accuracy: ', val_acc) best_net = net # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # - # visualize the weights of the best network show_net_weights(best_net) # # Run on the test set # When you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%. test_acc = (best_net.predict(X_test) == y_test).mean() print('Test accuracy: ', test_acc) # + [markdown] tags=["pdf-inline"] # **Inline Question** # # Now that you have trained a Neural Network classifier, you may find that your testing accuracy is much lower than the training accuracy. In what ways can we decrease this gap? Select all that apply. # # 1. Train on a larger dataset. # 2. Add more hidden units. # 3. Increase the regularization strength. # 4. None of the above. # # $\color{blue}{\textit Your Answer:}$ # # $\color{blue}{\textit Your Explanation:}$ # #
13,680
/jupyter notebook/visualization.ipynb
fbb0399a4b6ac805e204dac71735ce3774c4877a
[]
no_license
shakenovdev/Apartment-price-prediction
https://github.com/shakenovdev/Apartment-price-prediction
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
42,803
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Simple Linear Regression, Predict the marks # obtained by a student based on hours of study # Import Pandas for data processing import pandas as pd # Read the CSV file dataset = pd.read_csv('Students01.csv') # display first 5 rows dataset.head() # Split into X (Independent) and Y (predicted) X = dataset.iloc[:, :-1] Y = dataset.iloc[:, -1] # Create the Training and Test datasets from sklearn.model_selection import train_test_split # split the data into training and test 30% test size x_train, x_test, y_train, y_test = \ train_test_split (X, Y, test_size = 0.3, random_state=12) # Train the Simple Linear Regression #import the linear regression lib from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(x_train, y_train) # Predict the results y_predict = lin_reg.predict(x_test) # Get the R-Squared slr_score = lin_reg.score(x_test, y_test) # Coefficient and Intercept lr_coefficient = lin_reg.coef_ lr_intercept = lin_reg.intercept_ # Equation of the line # y = 34.27 + 5.02 * X # Calculate the errors using RMSE from sklearn.metrics import mean_squared_error import math lr_rmse = math.sqrt(mean_squared_error(y_test, y_predict)) # plot the result using matplotlib import matplotlib.pyplot as plt plt.scatter(x_test, y_test) plt.plot(x_test, y_predict) plt.ylim(ymin=0) plt.show()
1,622
/ENSTA/old/TD_correction/td2_tree_and_forest_correction.ipynb
46bb96ac30587d886140f81009d5d852d6b4d275
[]
no_license
stepherbin/teaching
https://github.com/stepherbin/teaching
4
8
null
null
null
null
Jupyter Notebook
false
false
.py
64,744
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ksmC-Zwx0_tS" colab_type="text" # # TD: prédiction du vote 2016 aux Etats-Unis par arbres de décisions et méthodes ensemblistes # # La séance d'aujourd'hui porte sur la prévision du vote en 2016 aux États-Unis. Précisément, les données d'un recensement sont fournies avec diverses informations par comté à travers les États-Unis. L'objectif est de construire des prédicteurs de leur couleur politique (républicain ou démocrate) à partir de ces données. # + [markdown] id="kbPqcu_Lhfjp" colab_type="text" # Exécuter les commandes suivantes pour charger l'environnement. # + id="X-foHxal1Ops" colab_type="code" colab={} # %matplotlib inline from pylab import * import numpy as np import os import random import matplotlib.pyplot as plt # + [markdown] id="W6D1aUcuQ9ff" colab_type="text" # # Accès aux données # # * Elles sont disponibles: https://github.com/stepherbin/teaching/tree/master/ENSTA/TD2 # # * Charger le fichier the combined_data.csv sur votre drive puis monter le depuis colab # # + id="HnKt3tylQ8Qp" colab_type="code" outputId="932bd9fe-cf4b-467f-c671-388146400ea5" executionInfo={"status": "ok", "timestamp": 1579869986698, "user_tz": -60, "elapsed": 2693, "user": {"displayName": "St\u00e9phane Herbin", "photoUrl": "", "userId": "09364083353920886752"}} colab={"base_uri": "https://localhost:8080/", "height": 35} USE_COLAB = True UPLOAD_OUTPUTS = False if USE_COLAB: # mount the google drive from google.colab import drive drive.mount('/content/drive', force_remount=True) # download data on GoogleDrive data_dir = "/content/drive/My Drive/teaching/ENSTA/TD_tree/" else: data_dir = "data/" # + id="ISKHiOpYuiWR" colab_type="code" colab={} import pandas as pd census_data = pd.read_csv( os.path.join(data_dir, 'combined_data.csv') ) # + [markdown] id="v3QJtATomOgi" colab_type="text" # # Analyse préliminaire des données # # Les données sont organisées en champs: # * fips = code du comté à 5 chiffres, le premier ou les deux premiers chiffres indiquent l'état. # * votes = nombre de votants # * etc.. # # Regarder leur structure, quantité, nature. # # Où se trouvent les informations pour former les ensembles d'apprentissage et de test? # # Où se trouvent les classes à prédire? # # Visualiser quelques distributions. # # Le format de données python est décrit ici: # https://pandas.pydata.org/pandas-docs/stable/reference/frame.html # # + id="yAk8Da4lvlQq" colab_type="code" outputId="176e8a26-4cbc-443c-d73c-38a4de670c76" executionInfo={"status": "ok", "timestamp": 1579869986702, "user_tz": -60, "elapsed": 2683, "user": {"displayName": "St\u00e9phane Herbin", "photoUrl": "", "userId": "09364083353920886752"}} colab={"base_uri": "https://localhost:8080/", "height": 917} # Exemples de moyens d'accéder aux caractéristiques des données print(census_data.shape ) print(census_data.columns.values) print(census_data['fips']) print(census_data.head(3)) iattr = 10 attrname = census_data.columns[iattr] print("Mean of {} is {:.1f}".format(attrname,np.array(census_data[attrname]).mean())) ######################### ## METTRE VOTRE CODE ICI ######################### print("Nombre de données = {}".format(census_data.shape[0])) print("Nombre d'attributs utiles = {}".format(census_data.shape[1]-2)) #hist.... # + [markdown] id="cFcoVcNqDL-P" colab_type="text" # La classe à prédire ('Democrat') n'est décrite que par un seul attribut binaire. # Calculer la répartition des couleurs politiques (quel est a priori la probabilité qu'un comté soit démocrate vs. républicain) # + id="vks9-75MrFQT" colab_type="code" outputId="b554b066-02b9-4ece-c6b3-487f2e792b85" executionInfo={"status": "ok", "timestamp": 1579869986705, "user_tz": -60, "elapsed": 2677, "user": {"displayName": "St\u00e9phane Herbin", "photoUrl": "", "userId": "09364083353920886752"}} colab={"base_uri": "https://localhost:8080/", "height": 35} ######################### ## METTRE VOTRE CODE ICI ######################### results = np.array( census_data['Democrat'] ) proba_dem = results.mean() proba_rep = 1 - proba_dem print("La probabilité qu'un comté soit démocrate est de {:.2f}%%".format(100*proba_dem)) # + [markdown] id="7zrNDQFDc4uB" colab_type="text" # # Préparation du chantier d'apprentissage # # On va préparer les ensembles d'apprentissage et de test. # # Pour éviter des problèmes de format de données, on choisit une liste d'attributs utiles dans la liste "feature_cols" ci dessous. # # L'ensemble de test sera constitué des comtés d'un seul état. # # Info: https://scikit-learn.org/stable/model_selection.html # # Liste des états et leurs codes FIPS code (2 digits): # https://en.wikipedia.org/wiki/Federal_Information_Processing_Standard_state_code # # # + id="SOrtFFPHT_Pi" colab_type="code" colab={} ## Sous ensembles d'attributs informatifs pour la suite feature_cols = ['BLACK_FEMALE_rate', 'BLACK_MALE_rate', 'Percent of adults with a bachelor\'s degree or higher, 2011-2015', 'ASIAN_MALE_rate', 'ASIAN_FEMALE_rate', '25-29_rate', 'age_total_pop', '20-24_rate', 'Deep_Pov_All', '30-34_rate', 'Density per square mile of land area - Population', 'Density per square mile of land area - Housing units', 'Unemployment_rate_2015', 'Deep_Pov_Children', 'PovertyAllAgesPct2014', 'TOT_FEMALE_rate', 'PerCapitaInc', 'MULTI_FEMALE_rate', '35-39_rate', 'MULTI_MALE_rate', 'Percent of adults completing some college or associate\'s degree, 2011-2015', '60-64_rate', '55-59_rate', '65-69_rate', 'TOT_MALE_rate', '85+_rate', '70-74_rate', '80-84_rate', '75-79_rate', 'Percent of adults with a high school diploma only, 2011-2015', 'WHITE_FEMALE_rate', 'WHITE_MALE_rate', 'Amish', 'Buddhist', 'Catholic', 'Christian Generic', 'Eastern Orthodox', 'Hindu', 'Jewish', 'Mainline Christian', 'Mormon', 'Muslim', 'Non-Catholic Christian', 'Other', 'Other Christian', 'Other Misc', 'Pentecostal / Charismatic', 'Protestant Denomination', 'Zoroastrian'] filtered_cols = ['Percent of adults with a bachelor\'s degree or higher, 2011-2015', 'Percent of adults completing some college or associate\'s degree, 2011-2015', 'Percent of adults with a high school diploma only, 2011-2015', 'Density per square mile of land area - Population', 'Density per square mile of land area - Housing units', 'WHITE_FEMALE_rate', 'WHITE_MALE_rate', 'BLACK_FEMALE_rate', 'BLACK_MALE_rate', 'ASIAN_FEMALE_rate', 'Catholic', 'Christian Generic', 'Jewish', '70-74_rate', 'D', 'R'] # + id="C1UqAsUQg1_a" colab_type="code" outputId="a6cf2595-3b7d-4db5-bc79-85e7e3607640" executionInfo={"status": "ok", "timestamp": 1579869987046, "user_tz": -60, "elapsed": 3006, "user": {"displayName": "St\u00e9phane Herbin", "photoUrl": "", "userId": "09364083353920886752"}} colab={"base_uri": "https://localhost:8080/", "height": 164} ## 1-state test split def county_data(census_data, fips_code=17): #fips_code 48=Texas, 34=New Jersey, 31=Nebraska, 17=Illinois, 06=California, 36=New York mask = census_data['fips'].between(fips_code*1000, fips_code*1000 + 999) census_data_train = census_data[~mask] census_data_test = census_data[mask] XTrain = census_data_train[feature_cols] yTrain = census_data_train['Democrat'] XTest = census_data_test[feature_cols] yTest = census_data_test['Democrat'] return XTrain, yTrain, XTest, yTest STATE_FIPS_CODE = 17 X_train, y_train, X_test, y_test = county_data(census_data, STATE_FIPS_CODE) print(X_train.head(2)) print(y_test.head(2)) # + [markdown] id="TaLx1whAl2oS" colab_type="text" # # # Apprentissage d'un arbre de décision # # On utilisera la bibliothèque scikit learn # # * Construire l'arbre sur les données d'entrainement # * Prédire le vote sur les comtés de test # * Calculer l'erreur et la matrice de confusion # # Faire varier certains paramètres (profondeur max, pureté, critère...) et visualisez leur influence. # # # Info: https://scikit-learn.org/stable/modules/tree.html # # Info: https://scikit-learn.org/stable/modules/model_evaluation.html # # + id="iY-WE9EQenH8" colab_type="code" colab={} from sklearn import tree ######################### ## METTRE VOTRE CODE ICI ######################### clf = tree.DecisionTreeClassifier(criterion="entropy",max_depth=4,min_samples_split=10) clf = clf.fit( X_train, y_train ) # + [markdown] id="UqWBzMtwc2K8" colab_type="text" # Les instructions suivantes permettent de visualiser l'arbre. # Interpréter le contenu de la représentation. # + id="aaqZalJ6mOya" colab_type="code" outputId="311725d0-1f3d-4ae6-a72c-e21b6eb40a04" executionInfo={"status": "ok", "timestamp": 1579869987050, "user_tz": -60, "elapsed": 2997, "user": {"displayName": "St\u00e9phane Herbin", "photoUrl": "", "userId": "09364083353920886752"}} colab={"base_uri": "https://localhost:8080/", "height": 776} import graphviz dot_data = tree.export_graphviz(clf, out_file=None) graph = graphviz.Source(dot_data) dot_data = tree.export_graphviz(clf, out_file=None, feature_names=X_train.columns.values, class_names=["R","D"], filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph # + id="2p0aaQkMm1pV" colab_type="code" outputId="2dc512f3-5696-41cb-c391-5652109a6490" executionInfo={"status": "ok", "timestamp": 1579869987051, "user_tz": -60, "elapsed": 2990, "user": {"displayName": "St\u00e9phane Herbin", "photoUrl": "", "userId": "09364083353920886752"}} colab={"base_uri": "https://localhost:8080/", "height": 166} # Prédiction et évaluation ######################### ## METTRE VOTRE CODE ICI ######################### preds = clf.predict(X_test) print('Predictions per county in state #'+ str(STATE_FIPS_CODE) + ' are ' + str(preds ) ) print('Votes per county in state #'+ str(STATE_FIPS_CODE) + ' are ' + str(list(y_test) ) ) import sklearn.metrics as perf oa = perf.accuracy_score(y_test, preds) bas = perf.balanced_accuracy_score(y_test, preds) print(oa,bas) cm = perf.confusion_matrix(y_test, preds) print(cm) # + [markdown] id="nx-UmaBEtrFN" colab_type="text" # # --- # # # Bagging # # L'objectif de cette partie est de construire **à la main** une approche de bagging. # # Le principe de l'approche est de: # # * Apprendre et collecter plusieurs arbres sur des échantillonnages aléatoires des données d'apprentissage # * Agréger les prédictions par vote # * Evaluer: Les prédictions agrégées # * Comparer avec les arbres individuels et le résultat précédent # # # Utiliser les fonctions de construction d'ensemble d'apprentissage/test de scikit-learn https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html pour générer les sous-esnembles échantillonnés. # # **Comparer après le cours** les fonctions de scikit-learn: https://scikit-learn.org/stable/modules/ensemble.html # # Numpy tips: [np.arange](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.arange.html), [numpy.sum](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.sum.html), [numpy.mean](https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.mean.html), [numpy.where](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.where.html) # + id="S0Haw1inuPMs" colab_type="code" colab={} from sklearn.model_selection import train_test_split # Données d'apprentissage: X_train, y_train, idx_train # Données de test: X_test, y_test, idx_test # Les étapes de conception du prédicteur (apprentissage) sont les suivantes: # - Construction des sous-ensembles de données # - Apprentissage d'un arbre # - Agrégation de l'arbre dans la forêt # # Pour le test def learn_forest(XTrain, yTrain, nb_trees, depth=15): ######################### ## METTRE VOTRE CODE ICI ######################### forest = [] singleperf=[] for ss in range(nb_trees): # bagging for subset X_train_sub, X_test_sub, y_train_sub, y_test_sub = train_test_split( XTrain, yTrain, test_size=0.2 ) # single tree training clf = tree.DecisionTreeClassifier(max_depth=depth, splitter="random") clf = clf.fit( X_train_sub, y_train_sub ) # grow the forest forest.append( clf ) # single tree evaluation curr_train_pred=clf.predict(X_train_sub) curr_test_pred=clf.predict(X_test_sub) singleperf.append([perf.balanced_accuracy_score( y_train_sub, curr_train_pred ), perf.balanced_accuracy_score( y_test_sub,curr_test_pred)]) return forest,singleperf # + id="qyT6zrLRvl-x" colab_type="code" colab={} def predict_forest(forest, XTest, yTest = None): ######################### ## METTRE VOTRE CODE ICI ######################### singleperf=[] all_preds=[] nb_trees = len(forest) for ss in forest:# nb_trees test_pred=ss.predict(XTest) all_preds.append(test_pred) if (yTest is not None): singleperf.append(perf.balanced_accuracy_score( yTest, test_pred )) all_preds=np.array(all_preds) #print(all_preds) # Vote gloup = np.sum(all_preds, axis=0) final_pred = np.where(gloup > nb_trees/2, 1, 0) if (yTest is not None): return final_pred,singleperf else: return final_pred # + id="Bs8kgN1XvnGa" colab_type="code" outputId="41702c02-44d1-4cdd-c416-9b821d7c97df" executionInfo={"status": "ok", "timestamp": 1579870061460, "user_tz": -60, "elapsed": 1011, "user": {"displayName": "St\u00e9phane Herbin", "photoUrl": "", "userId": "09364083353920886752"}} colab={"base_uri": "https://localhost:8080/", "height": 54} ######################### ## METTRE VOTRE CODE ICI ######################### X_train, y_train, X_test, y_test = county_data(census_data, 6) F,singleperf = learn_forest(X_train, y_train, 20, depth=15) pred, singleperftest = predict_forest(F, X_test, y_test) acc = perf.balanced_accuracy_score( y_test, pred ) print("Taux de bonne prédiction final = {:.2f}%".format(100*acc)) print("Moyenne des bonnes prédictions individuelles = {:.2f}%".format(100*mean(singleperftest))) #print(singleperftest) #print(singleperf)
15,188
/submission_report.ipynb
c515e73d795c8694bfd63eac230e195344fc4544
[]
no_license
joon1230/Mask_Detection
https://github.com/joon1230/Mask_Detection
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
353,230
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pandas as pd res = pd.read_csv("../input/data/eval/##BEST##43th_ensemble.csv") res.head() tmp = res.ans.value_counts() print('best') sns.barplot(x=tmp.index, y=tmp.values) def count_labels(path): res = pd.read_csv(path) tmp = res.ans.value_counts() sns.barplot(x=tmp.index, y=tmp.values) plt.show() print('uniform_dist_40th') count_labels('../input/data/eval/40th_EfficientNet_b2500300_train_0.8_ver12_uniformdist_epoch_7.csv') print('best2') count_labels("../input/data/eval/##BEST2##13&26th_submission_ResNet34_oversampling_36.csv") print('best3') count_labels("../input/data/eval/##BEST3##23th_EfficientNet_b2oversample_size_400300_loss_label_weight_totaltwo_14.csv") print('best4') count_labels("../input/data/eval/##BEST4##8th_submission_Inception_size224_31.csv") print('14th sub') count_labels("../input/data/eval/14th_submission_Inception_size224_oversampling_29.csv") print('38th sub') count_labels("../input/data/eval/38th_EfficientNet_b2500300_train_0.8_ver10_pixblur.csv") print('43th sub') count_labels("../input/data/eval/##BEST##43th_ensemble.csv") print('44th sub') count_labels("../input/data/eval/44thResNet50500300_train_0.8_ver16epoch_3.csv") print('56th sub') count_labels("../input/data/eval/56th_EfficientNet_b4350350_train_0.8_ver22epoch_15.csv") '##BEST2##42thEfficientNet_b2500300_train_0.4_ver14_LowTrainRatio/epoch_15.csv', '##BEST3##28th_EfficientNet_b2oversample_size_500300_loss_label_weight_/epoch_20.csv' '##BEST4##13&26th_submission_ResNet34_oversampling_36.csv' '##BEST5##23th_EfficientNet_b2oversample_size_400300_loss_label_weight_totaltwo/epoch14.csv' '38th_EfficientNet_b2500300_train_0.8_ver10_pixblur.csv' '47th_EfficientNet_b2400300_train_0.7_ver17/epoch_8.csv' '##BEST5##8th_submission_Inception_size224_31.csv' '53th_EfficientNet_b2350350_train_0.8_ver20epoch_11' '49th_EfficientNet_b4500300_train_0.7_augmentationepoch_12' import os, glob base_path = "../input/data/eval/*.csv" res = glob.glob(base_path) for f in [i for i in res if not('info.csv' in i )]: print(f.split('/')[-1]) count_labels(f) model.fc s[0].flat.measures.stream() # + zs = [] for i in range(len(notes)): if i is 0: zs.append([0, notes[i].quarterLength]) else: zs.append([getIntervalInTones(notes[i-1], notes[i]), notes[i].quarterLength]) #print(zs) d = {} r = range(8,10) for j in r: d[j] = {} for i in range(len(zs)-j): z = str(zs[i:i+j]) if z in d[j]: d[j][z] += 1 else: d[j][z] = 1 #for j in range(2,10): #for i, v in d.items(): #print(v) max(d[j][i] for j in r for (i, v) in d[j].items()) # + phrases = [] phrase_lenght = 5 for i in range(len(zs) - phrase_lenght): phrases.append(zs[i : i + phrase_lenght]) inter = [] dur = [] for ind, phrase in enumerate(phrases): inter.append([]) dur.append([]) for n in phrase: inter[ind].append(n[0]) dur[ind].append(n[1]) print(len(inter)) print(len(dur)) # dist = cdist(inter, dur, 'euclidean') dist_inter = pdist(inter, 'euclidean') dist_dur = pdist(dur, 'euclidean') # - plt.matshow(squareform(dist_inter)) plt.savefig('inter.png', dpi=1000) plt.matshow(squareform(dist_dur)) plt.savefig('dur.png', dpi=1000) # + # print(phrases) # - for i in range(100): x = np.random.choice(notes, 2, replace = False) a, b = x[0], x[1] print(a, b) print(getIntervalInTones(a, b)) print('\n') floor(21.123 * 100) / 100 d=[[1,2], [3,4], [5,6]] str(d)
3,862
/Indexing_Selecting_Assigning.ipynb
49751bdfe56187be9179f23c0fa8fdf9adb80454
[]
no_license
isotop786/Pandas-101
https://github.com/isotop786/Pandas-101
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
113,921
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div align="center" class="alert alert-info" role="alert"> # <h1> # Sibur Challenge 2020 # <br> # <small class="text-muted">онлайн-чемпионат по анализу данных</small> # </h1> # </div> # <div align="right" class="alert alert-info" role="alert"> # <h3>Евгения Хомякова (в составе команды "+-3сигмы"), 2020/12</h3> # </div> # ## Сопоставление названий # # При поиске новых клиентов СИБУРу приходится обрабатывать информацию о миллионах новых компаний из различных источников. Названия компаний при этом могут иметь разное написание, содержать сокращения или ошибки, быть аффилированными с компаниями, уже известными СИБУРу. # # Для более эффективной обработки информации о потенциальных клиентах, СИБУРу необходимо знать, связаны ли два названия (т.е. принадлежат одной компании или аффилированным компаниям). # # В этом случае СИБУР сможет использовать уже известную информацию о самой компании или об аффилированных компаниях, не дублировать обращения в компанию или не тратить время на нерелевантные компании или дочерние компании конкурентов. # # Тренировочная выборка содержит пары названий из разных источников (в том числе, пользовательских) и разметку. # # Разметка получена частично вручную, частично - алгоритмически. Кроме того, разметка может содержать ошибки. Вам предстоит построить бинарную модель, предсказывающую, являются ли два названия связанными. Метрика, используемая в данной задаче - F1. # + import numpy as np import pandas as pd pd.set_option('display.max_rows', 500) import warnings warnings.filterwarnings("ignore", 'This pattern has match groups') import re from tqdm import tqdm import pycountry from transliterate import translit import dask.bag as db import dask.dataframe as dd from dask.diagnostics import ProgressBar ProgressBar().register() import textdistance as td from fuzzywuzzy import fuzz from fuzzywuzzy import process # - # # Загрузка данных train = pd.read_csv("train.csv", index_col="pair_id") test = pd.read_csv("test.csv", index_col="pair_id") train_raw = pd.read_csv("train.csv", index_col="pair_id") test_raw = pd.read_csv("test.csv", index_col="pair_id") train.head() train.info(memory_usage='deep') test.head() test.info(memory_usage='deep') # # Статистика таргета train.is_duplicate.value_counts() train[train.is_duplicate==1].sample(10) train[train.is_duplicate==0].sample(10) # убираем неодинарные пробелы и пробелы в начале и конце строк def clean_spaces(): for df in [train, test]: for name in ['name_1', 'name_2']: df[name] = df[name].str.replace('\s+', ' ', regex=True) df[name] = df[name].str.strip() df[name] = df[name].str.lstrip() # # Очистка данных # ### Убираем аномалии # нижний регистр for df in [train, test]: for name in ['name_1', 'name_2']: df[name] = df[name].str.lower() train.duplicated().sum() test.duplicated().sum() train = train.drop_duplicates() train.sample(15) anomalies = [ # убрать китайское и арабское '[\u4E00-\u9FFF]+|[\u0621-\u064A]+|[\u0627-\u064a]+', # биржевые идентификаторы '\(?tse:\d+\)?', '\(?nyse:.+\)?', '\(?nasdaq.+\)?', '\(?bse:\d+\)?', # 'tax\s?id\s?\d+', # убрать кавычки, апострофы, запятые, звездочки, скобки ',', '/', '\.', '`','`s', '\*', '\+', '\[', '\]', '\(.*\)', '#', ':', '"', '\?', "'s", "'", '-', '«', '»', '&', ] # %%time for a in tqdm(anomalies): for df in [train, test]: df['name_1'] = df['name_1'].str.replace(a, ' ', n=-1, regex=True) df['name_2'] = df['name_2'].str.replace(a, ' ', n=-1, regex=True) clean_spaces() # опечатки некоторые исправим for df in [train, test]: for series_name in ['name_1', 'name_2']: df[series_name] = df[series_name].str.replace(r'g\s?m\s?b\s?h', 'gmbh', regex=True) df[series_name] = df[series_name].str.replace('mexico', ' ', regex=True) #очень много где есть, мешает чистке # Сложный случай с ё: в трейне она одна, но заменим везде "ё" ее на "о": train[train.name_1.str.contains('ё') | train.name_2.str.contains('ё')] # А также другие символы с точечками: dots_replace_dict = { 'ё': 'о', "é": "e", "ę": "e", "è": "e", "í": "i", "ú": "u", "ü": "u", "ű": "u", "ö": "o", "ó": "o", "õ": "o", "ő": "o", "ä": "a", "á": "a", "ä": "a", "ã": "a", "ç": "c", "ł": "l", "ñ": "n", "ş": "s" } for key, value in tqdm(dots_replace_dict.items()): for df in [train, test]: df['name_1'] = df['name_1'].str.replace(key, value, regex=True) df['name_2'] = df['name_2'].str.replace(key, value, regex=True) clean_spaces() train.loc[24723] train.duplicated().sum() train = train.drop_duplicates() # ## Проверка полных дубликатов 1 train['full_duplicate'] = train.apply(lambda x: set(x.name_1.split()) == set(x.name_2.split()), axis=1).astype('int') train[(train.full_duplicate == 1) & (train.is_duplicate == 0)].shape # сделаем одинаковый порядок слов train.loc[train.full_duplicate == 1, 'name_1'] = train.loc[train.full_duplicate == 1, 'name_2'].values # проверим полные дубликаты в тесте test['full_duplicate'] = test.apply(lambda x: set(x.name_1.split()) == set(x.name_2.split()), axis=1).astype('int') # сделаем одинаковый порядок слов test.loc[test.full_duplicate == 1, 'name_1'] = test.loc[test.full_duplicate == 1, 'name_2'].values # Отсечем от первого имени одно слово с конца: def del1_word_n1(row): n1 = row.name_1.split() n2 = row.name_2.split() if len(n1) > 1: return set(n1[:-1]) == set(n2) else: return False train['full_duplicate_1'] = train.apply(del1_word_n1, axis=1).astype('int') train[(train.full_duplicate_1 == 1) & (train.is_duplicate == 0)].shape # сделаем одинаковый порядок слов train.loc[train.full_duplicate_1 == 1, 'name_1'] = train.loc[train.full_duplicate_1 == 1, 'name_2'].values # отметим такие же дубликаты в тесте test['full_duplicate_1'] = test.apply(del1_word_n1, axis=1).astype('int') # сделаем одинаковый порядок слов test.loc[test.full_duplicate_1 == 1, 'name_1'] = test.loc[test.full_duplicate_1 == 1, 'name_2'].values test[test['full_duplicate_1'] == 1].shape # Класс! # То же самое, но от второго названия уберем одно слово: def del1_word_n2(row): n1 = row.name_1.split() n2 = row.name_2.split() if len(n1) > 1: return set(n1) == set(n2[:-1]) else: return False train['full_duplicate_2'] = train.apply(del1_word_n2, axis=1).astype('int') train[(train.full_duplicate_2 == 1) & (train.is_duplicate == 0)].shape # сделаем одинаковый порядок слов train.loc[train.full_duplicate_2 == 1, 'name_2'] = train.loc[train.full_duplicate_2 == 1, 'name_1'].values # удалим эти столбцы train['label'] = train[['full_duplicate','full_duplicate_1','full_duplicate_2']].max(axis=1) train = train.drop(['full_duplicate', 'full_duplicate_1', 'full_duplicate_2'], axis=1) # отметим такие же дубликаты в тесте test['full_duplicate_2'] = test.apply(del1_word_n2, axis=1).astype('int') # сделаем одинаковый порядок слов test.loc[test.full_duplicate_2 == 1, 'name_2'] = test.loc[test.full_duplicate_2 == 1, 'name_1'].values # в тесте сделаем столбец разметки на основе сравнения test['label'] = test[['full_duplicate','full_duplicate_1','full_duplicate_2']].max(axis=1) test = test.drop(['full_duplicate', 'full_duplicate_1', 'full_duplicate_2'], axis=1) train[(train.label == 1) & (train.is_duplicate == 0)].shape test[test.label==1].shape # ### Продолжаем чистку # Некоторые названия выглядят как 'буква( буква) буква': reg = re.compile('^(\w)\s(\w\s)') train[(train.name_1.str.contains(reg) | train.name_2.str.contains(reg))] # Объединим их: for df in [train, test]: df['name_1'] = df['name_1'].str.replace('^(\w)\s(\w)\s(\w\s?)', '\\1\\2\\3', regex=True) df['name_2'] = df['name_2'].str.replace('^(\w)\s(\w)\s(\w\s?)', '\\1\\2\\3', regex=True) df['name_1'] = df['name_1'].str.replace('^(\w)\s(\w\s?)', '\\1\\2', regex=True) df['name_2'] = df['name_2'].str.replace('^(\w)\s(\w\s?)', '\\1\\2', regex=True) train.duplicated().sum() train = train.drop_duplicates() # ## Удаление legal entities # # https://en.wikipedia.org/wiki/List_of_legal_entity_types_by_country # # https://en.wikipedia.org/wiki/Private_limited_company enity_list = [ # все в этом словаре определено исключительно на трейне 'san\s?v?e?\s?tic', # + 'co', #private limited company => ltd in UK and Commonwealth 'private ltd', 'private limited', 'pvt\s?ltd', 'pvt', 'gmbh', #germany 'kft', #hungary 'pty\s?l?t?d?', #australia 'l\s?l\s?c', #usa 'sp\s?z\s?o\s?o', #poland 'pte\s?l?t?d?', #singapore 'lda', #portugal 'pt', #indonesia 'p\s?m\s?e', 'w\s?l\s?l', 'ооо', #russia 'общество\sс\sогр\S*\sответ\S*', 's\s?r\s?o', #slovakia/ceska_republika 'd\s?o\s?o', #west_europe 's\s?i\s?a', #latvia '(e\s?i\s?r)?\s?l\s?t\s?d\s?a', #south_america 'e\s?i\s?r\s?l', #south_america, #eirl 's\s?[pac]\s?r\s?l', #belgium, france/africa 's\s?a\s?s', #sas 'a\s?s', #turkey '(rls)?\s?s?\s?de\s?r\s?l\s?(de)?\s?c\s?v', #mexico (rls) (s) de rl de cv 's?\s?de\s?r\s?l', #mexico '(de)?\s?s?s\s?a de cv', #mexico 's\s?a\s?p\s?i\s?de\s?c\s?v', #mexico #public limited company => plc in UK and Commonwealth 'public ltd', 'p\s?l\s?c', #uk 's\s?a\d?\s?de\s?c\s?v', #mexico + 's\s?a', 'sociedad', 'anonima', '[зо]?ао', #russia 'ag', #germany 's\s?p\s?a', #italy # если после всего этого останется ltd еще 'l\s?t\s?d', 'limited', 'sti', 'l\s?p', 's\s?l\s?u', 'c\s?a', 'b\s?v', 'n\s?v', 'sp\s?k?', '(de)?\s?c\s?v', ] # %%time for le in tqdm(enity_list): le_reg = re.compile('(^|\s+)'+le+'(\s+|$)') for df in [train, test]: df['name_1'] = df['name_1'].str.replace(le_reg, ' ', regex=True) df['name_2'] = df['name_2'].str.replace(le_reg, ' ', regex=True) clean_spaces() train.sample(5) train.duplicated().sum() train = train.drop_duplicates() # ## Стоп-слова stop_words = [ 'the', 'of', 'do', 'and', "to", 'e', 've', 'de', 'automotive', 'bank', 'chemicals?', 'company','corporation', 'corp', 'co', 'comerci\S*', "com", 'city', 'distribution', 'equipment', 'exp', 'enterprise\S*', 'electronic\S*','engineering', 'global', 'general', 'group', 'imp', 'importadora', 'international', 'industr\S*', 'inds', 'inc', 'kg', 'logistic\S*', 'lojistik', 'mfg','material\S*', 'plastic\S*', 'products','polymer\S*', 'rubber', 'ram', "supply", "systems", 'solutions', 'sports', 'service\S*', "synthetic", 'sociedad', 'shoes', 'textile', 'trad\S*', 'technolog\S*', 'tech', "transport\S*", 't[iy]re', "united", '\w', #любая одиночная буква 'компания', 'филиал\sкомпании', 'филиал', 'снг', 'рус', ] for stop_word in tqdm(stop_words): stop_word_reg = re.compile('(^|\s+)'+stop_word+'(\s+|$)') for df in [train, test]: for name in ['name_1', 'name_2']: df[name] = df[name].str.replace(stop_word_reg, ' ', regex=True) clean_spaces() train.duplicated().sum() train = train.drop_duplicates() # ### Транслитерация русского russian = re.compile(r'[А-Яа-я]+') (train.name_1.str.contains(russian) | train.name_2.str.contains(russian)).sum() train[train.name_1.str.contains(russian) | train.name_2.str.contains(russian)].sample(5) # Теперь сделаем транслитерацию на строках, содержащих русские слова: # %%time train.loc[train.name_1.str.contains(russian) | train.name_2.str.contains(russian), 'name_1'] = train.loc[train.name_1.str.contains(russian) | train.name_2.str.contains(russian), 'name_1'].apply(lambda x: translit(x, "ru", reversed=True)) train.loc[train.name_1.str.contains(russian) | train.name_2.str.contains(russian), 'name_2'] = train.loc[train.name_1.str.contains(russian) | train.name_2.str.contains(russian), 'name_2'].apply(lambda x: translit(x, "ru", reversed=True)) test.loc[test.name_1.str.contains(russian) | test.name_2.str.contains(russian), 'name_1'] = test.loc[test.name_1.str.contains(russian) | test.name_2.str.contains(russian), 'name_1'].apply(lambda x: translit(x, "ru", reversed=True)) test.loc[test.name_1.str.contains(russian) | test.name_2.str.contains(russian),'name_2'] = test.loc[test.name_1.str.contains(russian) | test.name_2.str.contains(russian), 'name_2'].apply(lambda x: translit(x, "ru", reversed=True)) (train.name_1.str.contains(russian) | train.name_2.str.contains(russian)).sum() train.loc[24723] # При транслитерации могли произойти неоднозначные замены, поправим их: # + train['name_1'] = train['name_1'].str.replace('w', 'v', regex=True) train['name_2'] = train['name_2'].str.replace('w', 'v', regex=True) test['name_1'] = test['name_1'].str.replace('w', 'v', regex=True) test['name_2'] = test['name_2'].str.replace('w', 'v', regex=True) train['name_1'] = train['name_1'].str.replace('ks', 'x', regex=True) train['name_2'] = train['name_2'].str.replace('ks', 'x', regex=True) test['name_1'] = test['name_1'].str.replace('ks', 'x', regex=True) test['name_2'] = test['name_2'].str.replace('ks', 'x', regex=True) # - # убираем неодинарные пробелы и пробелы в начале и конце строк clean_spaces() train.sample(10) train.query('is_duplicate == 1').sample(10) train.duplicated().sum() train = train.drop_duplicates() # ## Удаление названий стран # есть имена стран на других языках for df in [train, test]: for series_name in ['name_1', 'name_2']: df[series_name] = df[series_name].str.replace('brasil', 'brazil') df[series_name] = df[series_name].str.replace('czechy', 'czechia') df[series_name] = df[series_name].str.replace('polska', 'poland') df[series_name] = df[series_name].str.replace('mexic\S*', 'mexico', regex=True) df[series_name] = df[series_name].str.replace('deutschland', 'germany') df[series_name] = df[series_name].str.replace('turk', 'turkey') df[series_name] = df[series_name].str.replace('nederland', 'netherlands') countries = [country.name.lower() for country in pycountry.countries] + ['usa?', 'uk', 'america\S*', 'north', 'south', 'europe', 'asia'] # %%time for country in tqdm(countries): country_reg = re.compile('(^|\s+)'+country+'(\s+|$)') for df in [train, test]: df['name_1'] = df['name_1'].str.replace(country_reg, ' ', regex=True) df['name_2'] = df['name_2'].str.replace(country_reg, ' ', regex=True) clean_spaces() # ### Убираем названия городов # + active="" # # по-хорошему надо так # from allcities import cities # cities_list = ([city.name.lower() for city in cities.filter(country_code='US')] + # [city.name.lower() for city in cities.filter(country_code='RU')] + # [city.name.lower() for city in cities.filter(country_code='DE')] + # [city.name.lower() for city in cities.filter(country_code='CN')] + # [city.name.lower() for city in cities.filter(country_code='MX')] + # [city.name.lower() for city in cities.filter(country_code='IN')] + # [city.name.lower() for city in cities.filter(country_code='ES')] + # [city.name.lower() for city in cities.filter(country_code='IT')] # ) # # # print(len(cities_list)) # print(cities_list[:10]) # + active="" # cities_list_new = [] # for city in cities_list: # city = city.replace('ā','a').replace('ū','u').replace('ī','i') # cities_list_new.append(city) # - # Названия китайских провинций из википедии https://wikitravel.org/en/List_of_Chinese_provinces_and_regions: ch_prov = ['Anhui', 'Fujian', 'Gansu', 'Guangdong', 'Guizhou','Hainan','Hebe', 'Heilongjiang','Henan','Hubei','Hunan','Jiangsu','Jiangxiv','Jilin', 'Liaoning','Qinghai','Shaanxi','Shanxi','Sichuan','Yunnan','Zhejiang',] ch_prov = [i.lower() for i in ch_prov] cities_list = ['shenzhen', 'shanghai', 'guangzhou', 'guangdong', 'huizhou', #эти названия есть в трейне 'shenyang', 'dongguan', 'qingdao', 'shenzhen', 'zhongshan', 'hangzhou', 'tianjin', 'zhuhai', 'xiamen', 'changshu', "ningbo", 'suzhou', 'uchkurgan' ] + ch_prov + ['hindustan', 'khawaja', 'jindal'] print(len(cities_list)) train['name_1'].str.contains('shanghai').sum() # %%time for city in tqdm(cities_list): train['name_1'] = train['name_1'].str.replace(city, ' ') train['name_2'] = train['name_2'].str.replace(city, ' ') test['name_1'] = test['name_1'].str.replace(city, ' ') test['name_2'] = test['name_2'].str.replace(city, ' ') clean_spaces() train['name_1'].str.contains('shanghai').sum() #удалим все цифры (которые отдельным "словом") for df in [train, test]: for col_name in ['name_1', 'name_2']: df[col_name] = df[col_name].str.replace('(^|\s+)\d+(\s+|$)', '', regex=True) train.duplicated().sum() train = train.drop_duplicates() train.to_csv('train_clean.csv', index=True) test.to_csv('test_clean.csv', index=True) # ### Проверка полных дубликатов 2 train['full_duplicate'] = train.apply(lambda x: set(x.name_1.split()) == set(x.name_2.split()), axis=1).astype('int') train[(train.full_duplicate == 1) & ((train.is_duplicate == 0) | (train.label == 0))].shape # сделаем одинаковый порядок слов train.loc[train.full_duplicate == 1, 'name_1'] = train.loc[train.full_duplicate == 1, 'name_2'].values # проверим полные дубликаты в тесте test['full_duplicate'] = test.apply(lambda x: set(x.name_1.split()) == set(x.name_2.split()), axis=1).astype('int') # сделаем одинаковый порядок слов test.loc[test.full_duplicate == 1, 'name_1'] = test.loc[test.full_duplicate == 1, 'name_2'].values # Отсечем от первого имени одно слово с конца: train['full_duplicate_1'] = train.apply(del1_word_n1, axis=1).astype('int') train[(train.full_duplicate_1 == 1) & ((train.is_duplicate == 0) | (train.label == 0))].shape # сделаем одинаковый порядок слов train.loc[train.full_duplicate_1 == 1, 'name_1'] = train.loc[train.full_duplicate_1 == 1, 'name_2'].values # отметим такие же дубликаты в тесте test['full_duplicate_1'] = test.apply(del1_word_n1, axis=1).astype('int') # сделаем одинаковый порядок слов test.loc[test.full_duplicate_1 == 1, 'name_1'] = test.loc[test.full_duplicate_1 == 1, 'name_2'].values test[test['full_duplicate_1'] == 1].shape # То же самое, но от второго названия уберем одно слово: train['full_duplicate_2'] = train.apply(del1_word_n2, axis=1).astype('int') train[(train.full_duplicate_2 == 1) & ((train.is_duplicate == 0) | (train.label == 0))].shape # сделаем одинаковый порядок слов train.loc[train.full_duplicate_2 == 1, 'name_2'] = train.loc[train.full_duplicate_2 == 1, 'name_1'].values # удалим эти столбцы train['label'] = train[['label', 'full_duplicate','full_duplicate_1','full_duplicate_2']].max(axis=1) train = train.drop(['full_duplicate', 'full_duplicate_1', 'full_duplicate_2'], axis=1) # отметим такие же дубликаты в тесте test['full_duplicate_2'] = test.apply(del1_word_n2, axis=1).astype('int') # сделаем одинаковый порядок слов test.loc[test.full_duplicate_2 == 1, 'name_2'] = test.loc[test.full_duplicate_2 == 1, 'name_1'].values # в тесте сделаем столбец разметки на основе сравнения test['label'] = test[['label','full_duplicate','full_duplicate_1','full_duplicate_2']].max(axis=1) test = test.drop(['full_duplicate', 'full_duplicate_1', 'full_duplicate_2'], axis=1) train[(train.label == 1) & (train.is_duplicate == 0)].shape test[test.label==1].shape # ### Проверка дубликатов первого слова def check_first_words(row): s1 = row['name_1'] s2 = row['name_2'] try: fw1 = s1.split()[0] except Exception: fw1 = s1 try: fw2 = s2.split()[0] except Exception: fw2 = s2 return fw1 == fw2 # %%time train['first_word_duplicate'] = train.apply(check_first_words, axis=1).astype("int") test['first_word_duplicate'] = test.apply(check_first_words, axis=1).astype("int") train.head() train['label'] = train[['label', 'first_word_duplicate']].max(axis=1) train = train.drop(['first_word_duplicate'], axis=1) train[(train.label == 1) & (train.is_duplicate == 0)].shape test['label'] = test[['label', 'first_word_duplicate']].max(axis=1) test = test.drop(['first_word_duplicate'], axis=1) test[test.label==1].shape # + train["org_name_1"] = train_raw["name_1"] train["org_name_2"] = train_raw["name_2"] test["org_name_1"] = test_raw["name_1"] test["org_name_2"] = test_raw["name_2"] # - # # Метрики сходства строк # ### Расстояния # + # textdistance td_names = [ 'mlipns', 'hamming', 'hamming_norm', 'levenshtein', 'levenshtein_norm', 'damerau_levenshtein', 'jaro_winkler', 'strcmp95', 'tanimoto', 'monge_elkan', 'lcsseq', 'lcsstr', 'needleman_wunsch', 'needleman_wunsch_norm', 'gotoh', 'smith_waterman', 'smith_waterman_norm', 'ratcliff_obershelp', 'cosine', 'jaccard', 'sorensen' ] td_methods = [ td.mlipns.normalized_similarity, td.hamming.similarity, td.hamming.normalized_similarity, td.levenshtein.similarity, td.levenshtein.normalized_similarity, td.damerau_levenshtein.normalized_similarity, td.jaro_winkler.normalized_similarity, td.strcmp95.normalized_similarity, td.tanimoto.normalized_similarity, td.monge_elkan.normalized_similarity, td.lcsseq.normalized_similarity, td.lcsstr.normalized_similarity, td.needleman_wunsch.similarity, td.needleman_wunsch.normalized_similarity, td.gotoh.normalized_similarity, td.smith_waterman.normalized_similarity, td.ratcliff_obershelp.similarity, td.cosine.similarity, td.jaccard.similarity, td.sorensen.similarity ] # - def td_distance_count(row, method, name): n1, n2 = row['name_1'], row['name_2'] if n1 == "" and n2 == "": n1, n2 = row['org_name_1'], row['org_name_2'] if name in ['jaccard', 'sorensen']: return method(n1.split(' '), n2.split(' ')) return method(''.join(n1.split(' ')), ''.join(n2.split(' '))) # + # fuzz fuzz_names = [ 'ratio', 'partial_ratio', 'token_sort_ratio', 'token_set_ratio' ] fuzz_methods = [ fuzz.ratio, fuzz.partial_ratio, fuzz.token_sort_ratio, fuzz.token_set_ratio ] # - def fuzz_distance_count(row, method, name): n1, n2 = row['name_1'], row['name_2'] if n1 == "" and n2 == "": n1, n2 = row['org_name_1'], row['org_name_2'] return method(''.join(n1.split(' ')), ''.join(n2.split(' ')))/100 # ddf для ускорения apply train_ddf = dd.from_pandas(train, npartitions=4) test_ddf = dd.from_pandas(test, npartitions=2) # %%time for name, method in zip(td_names, td_methods): print(name) train[name] = train_ddf.apply(td_distance_count, axis=1, args=(method, name), meta=(name, 'float32')) test[name] = test_ddf.apply(td_distance_count, axis=1, args=(method, name), meta=(name, 'float32')) # %%time for name, method in zip(fuzz_names, fuzz_methods): print(name) train[name] = train_ddf.apply(fuzz_distance_count, axis=1, args=(method, name), meta=(name, 'float32')) test[name] = test_ddf.apply(fuzz_distance_count, axis=1, args=(method, name), meta=(name, 'float32')) train.to_csv('train_with_features.csv') test.to_csv('test_with_features.csv')
24,298
/docs/extras/modules/data_connection/vectorstores/integrations/singlestoredb.ipynb
1276a8213cb1c274e5aaf1c742507c0f75ae21f0
[ "MIT" ]
permissive
hwchase17/langchain
https://github.com/hwchase17/langchain
52,928
7,074
null
null
null
null
Jupyter Notebook
false
false
.py
4,404
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # SingleStoreDB # >[SingleStoreDB](https://singlestore.com/) is a high-performance distributed SQL database that supports deployment both in the [cloud](https://www.singlestore.com/cloud/) and on-premises. It provides vector storage, and vector functions including [dot_product](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/dot_product.html) and [euclidean_distance](https://docs.singlestore.com/managed-service/en/reference/sql-reference/vector-functions/euclidean_distance.html), thereby supporting AI applications that require text similarity matching. # # This tutorial illustrates how to [work with vector data in SingleStoreDB](https://docs.singlestore.com/managed-service/en/developer-resources/functional-extensions/working-with-vector-data.html). # Establishing a connection to the database is facilitated through the singlestoredb Python connector. # Please ensure that this connector is installed in your working environment. # !pip install singlestoredb # + import os import getpass # We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") # - from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import SingleStoreDB from langchain.document_loaders import TextLoader # + # Load text samples loader = TextLoader("../../../state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() # - # There are several ways to establish a [connection](https://singlestoredb-python.labs.singlestore.com/generated/singlestoredb.connect.html) to the database. You can either set up environment variables or pass named parameters to the `SingleStoreDB constructor`. Alternatively, you may provide these parameters to the `from_documents` and `from_texts` methods. # + # Setup connection url as environment variable os.environ["SINGLESTOREDB_URL"] = "root:pass@localhost:3306/db" # Load documents to the store docsearch = SingleStoreDB.from_documents( docs, embeddings, table_name="notebook", # use table with a custom name ) # - query = "What did the president say about Ketanji Brown Jackson" docs = docsearch.similarity_search(query) # Find documents that correspond to the query print(docs[0].page_content)
2,770
/text_cluster/.ipynb_checkpoints/文本聚类之数据预处理-checkpoint.ipynb
9c0410f80445a5ce5c0ef019fc8c8f9ce3138c4b
[]
no_license
ap1024/nlp-demo
https://github.com/ap1024/nlp-demo
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
258,945
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TCGA Random Forest Machine Learning # + # Import Libraries import gzip import math import numpy as np import pandas as pd import seaborn as sns import matplotlib import matplotlib.pyplot as plt import random from scipy import stats from pprint import pprint # Import PCA libraries from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from IPython.display import Image import umap from sklearn.manifold import TSNE from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler from sklearn.metrics import silhouette_score # Import ML libraries from sklearn.model_selection import cross_val_score from sklearn.metrics import classification_report, confusion_matrix from sklearn.model_selection import train_test_split,KFold, StratifiedKFold from sklearn.ensemble import RandomForestClassifier from sklearn import model_selection from sklearn.model_selection import cross_val_score from sklearn.preprocessing import LabelEncoder from sklearn.pipeline import Pipeline from sklearn.metrics import confusion_matrix from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.metrics import roc_curve, auc print("sucess!") # - # ## Load in our data # + # Load in Data + Trnasform Direction df=pd.read_csv('log2transformed_and_normalized_gene_expression_data.csv') # load df=df.transpose() genes=df.loc['Unnamed: 0'].to_list() # saves all genes as list df.columns=genes samples=df.index.to_list() samples=samples[1:] # removes the "New_Gene" Label df=df.drop(['Unnamed: 0']).reset_index(drop=True) # remove gene column df.index=samples # set index names df=df.astype(float) # weird nonsense step thats needed ~sometimes~ df.head() # show results # - # ### Using Gene Signature # + df_gene_sig=df[['TREML2', 'PTPRN', 'PGLYRP1', 'NOG', 'VIP',\ 'RIMKLB', 'NKAIN4', 'FAM171B', 'ZNF417', 'GLRA2', 'HOXA7', 'FABP6', 'MUSK',\ 'HTR6', 'GRIP2', 'VEGFA', 'AKAP12', 'RHEB', 'PMEPA1','GLTP', 'METTL7A',\ 'CITED2', 'SCARA5', 'CDH3','IL6R', 'PKIB', 'GLP2R', 'EPB41L3', 'NR3C2']] # Genes not found from gene signature (possible naming issues) # PADI4, NCKIPSD C5orf53 TREML3 HES5 OR8D2 KLRK1 NCRNA00152 PPAP2A LINC00974 df_gene_sig.head(10) # - # ### Using DESeq2 # + df_deseq=pd.read_csv('top_genes.csv') # load df_deseq.drop(labels=['baseMean','lfcSE','stat','pvalue'],axis=1,inplace=True) df_deseq.rename(columns={'Unnamed: 0':'genes'},inplace=True) #num_gens=100; #df_deseq=df_deseq[:num_gens].copy() print(np.shape(df_deseq)) df_deseq.head() df_SEQ=df[df_deseq['genes'].to_list()] df_SEQ.head() # - # ### Using Random Genes num_genes=100 genes=df.columns.to_list() # generate random genes random.seed(32) n = random.sample(range(0, len(genes)), num_genes) rand_genes=[genes[i] for i in n] # subset dataset rand_df=df[rand_genes].copy() # ## Extract tissue sample from columns (healthy or tumor) # + # Create Target vector target=[] sample_names=[] # loop through and extract tumor + healthy for i in samples: temp=i.split('_')[1] # Split string using "_" #remove numbers at end (if they have it) if temp[-1].isnumeric(): temp=temp[:-1] target.append(temp)# save tissues type to list (in order) # Convert to Panda Series target=pd.Series(target,name='Tissue') # + # Convert to binary tumor=(1) + healthy=(0) target_binary=[] for i in target: if i=='Healthy': target_binary.append(0) else: target_binary.append(1) # Convert to Panda Series target_binary=pd.Series(target_binary) # - # create colors labels for PCA colors=[] for i in target_binary: if i ==1: colors.append('orange') else: colors.append('blue') # ## Vizualize data in a PCA Plots # https://towardsdatascience.com/explaining-k-means-clustering-5298dc47bad6 # ### PCA Plot Using All Genes # + # Generate Colors + numbers num_list=range(0,54) # PCA pca = PCA(n_components=2) X_r = pca.fit(df).transform(df) # Plot for color, i, target_name in zip(colors, num_list, samples): plt.scatter(X_r[i,0], X_r[i,1], color=color, alpha=.8, lw=2, label=target_name) plt.title('PCA of Samples Using All Genes', fontsize=18, fontweight='black', color = '#333F4B') plt.axvline(x=0,linestyle='--',c='k') plt.axhline(y=0,linestyle='--',c='k') plt.tick_params(axis='y', which='major', labelsize=14) plt.tick_params(axis='x', which='major', labelsize=14) plt.grid(True) plt.xlabel('Component 1', fontsize=14, fontweight='black', color = '#333F4B') plt.ylabel('Component 2', fontsize=14, fontweight='black', color = '#333F4B') # - # ### PCA Plot Using Gene Signature # + # Generate Colors + numbers num_list=range(0,54) # PCA pca = PCA(n_components=2) X_r = pca.fit(df_gene_sig).transform(df_gene_sig) # Plot for color, i, target_name in zip(colors, num_list, samples): plt.scatter(X_r[i,0], X_r[i,1], color=color, alpha=.8, lw=2, label=target_name) plt.title('PCA of Samples Using Gene Signature', fontsize=18, fontweight='black', color = '#333F4B') plt.axvline(x=0,linestyle='--',c='k') plt.axhline(y=0,linestyle='--',c='k') plt.tick_params(axis='y', which='major', labelsize=14) plt.tick_params(axis='x', which='major', labelsize=14) plt.grid(True) plt.xlabel('Component 1', fontsize=14, fontweight='black', color = '#333F4B') plt.ylabel('Component 2', fontsize=14, fontweight='black', color = '#333F4B') # - # ### PCA Plot Using DESeq2 Signature # + # Generate Colors + numbers num_list=range(0,54) # PCA pca = PCA(n_components=2) X_r = pca.fit(df_SEQ).transform(df_SEQ) # Plot for color, i, target_name in zip(colors, num_list, samples): plt.scatter(X_r[i,0], X_r[i,1], color=color, alpha=.8, lw=2, label=target_name) plt.title('PCA of Samples Using DESeq2 Signature', fontsize=18, fontweight='black', color = '#333F4B') plt.axvline(x=0,linestyle='--',c='k') plt.axhline(y=0,linestyle='--',c='k') plt.tick_params(axis='y', which='major', labelsize=14) plt.tick_params(axis='x', which='major', labelsize=14) plt.grid(True) plt.xlabel('Component 1', fontsize=14, fontweight='black', color = '#333F4B') plt.ylabel('Component 2', fontsize=14, fontweight='black', color = '#333F4B') # - # ## Random Forest Model # https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74 # to make ROC plots def plot_roc_curve(fpr, tpr, data_type, test_or_train, auc_k): plt.plot(fpr, tpr, color='orange', label=('ROC ' + data_type +' (AUC = '+str(auc_k)+")")) plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--', label='Random Guess') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC Curve' + ' ' + test_or_train + ' ' + data_type) plt.legend() plt.show() # ### Create, Train, and Test Model with Gene Signature # #### Split Into Training and Test Data # + # independent dataset : df_gene_sig : features that determine if patient has cancer # dependent dataset : target_bianry : diagnosis x_train, x_test, y_train, y_test = train_test_split(df_gene_sig, target_binary, test_size=15,shuffle=True, random_state=12) # display size of print('Our Training Data Set has %d patient samples'%(np.shape(x_train)[0])) print('Our Testing Data Set has %d patient samples'%(np.shape(x_test)[0])) # - # #### Train Data # + # X =df_gene_sig, y = target_binary, # x_train, x_test, y_train, y_test print('\nTrain Data:') # Initialize random forest mode rfc = RandomForestClassifier() # Train model on training data rfc.fit(x_train, y_train) # Get the reults of the training data results = rfc.predict(x_train) # Create Confusion matrix using prediction + truth cm=confusion_matrix(y_train, results) print(cm) # Generate ROC Curve fpr_rfc, tpr_rfc, thresholds_rfc = roc_curve(y_train, results) # Calculate AUC (Area under the ROC Curve ) auc_k = auc(fpr_rfc, tpr_rfc) print('AUC: %.4f'%(auc_k)) #predict probs for test data probs = rfc.predict_proba(x_test) # keep probs of positive calss only probs = probs[:, 1] # plot ROC curve usinf function plot_roc_curve(fpr_rfc, tpr_rfc, 'Gene Signature', 'Train', auc_k) # - # #### Test Data # + # Test Set - now that we've trained, we can now use test set to print('\nTest Data:') # run it foward - to predict using TEST results=rfc.predict(x_test) # Create Confusion matrix using prediction + truth cm=confusion_matrix(y_test, results) print(cm) # Generate ROC Curve fpr_rfc, tpr_rfc, thresholds_rf = roc_curve(y_test, results) # Calculate AUC (Area under the ROC Curve ) auc_k = auc(fpr_rfc, tpr_rfc) print('AUC: %.4f'%(auc_k)) # label for plot plt.plot(fpr_rfc, tpr_rfc) #predict probs for test data probs = rfc.predict_proba(x_test) # keep probs of positive calss only probs = probs[:, 1] # plot ROC curve usinf function plot_roc_curve(fpr_rfc, tpr_rfc, 'Gene Signature', 'Test', auc_k) # - # ### Create, Train, and Test Model with DESeq2 Signature # #### Split into trainnig and testing data # + # Re-split Data - this doesn't chnage rows but since we're using more genes we need to re-split to capture # Split Into Training and Test Data x_train, x_test, y_train, y_test = train_test_split(df_SEQ, target_binary, test_size=15,shuffle=True, random_state=12) print('Our Training Data Set has %d patient samples'%(np.shape(x_train)[0])) print('Our Testing Data Set has %d patient samples'%(np.shape(x_test)[0])) # - # #### Train Data # + # X =df_gene_sig, y = target_binary, # x_train, x_test, y_train, y_test print('\nTrain Data:') # Initialize random forest mode rfc = RandomForestClassifier() # Train model on training data rfc.fit(x_train, y_train) # Get the reults of the training data results = rfc.predict(x_train) # Create Confusion matrix using prediction + truth cm=confusion_matrix(y_train, results) print(cm) # Generate ROC Curve fpr_rfc, tpr_rfc, thresholds_rfc = roc_curve(y_train, results) # Calculate AUC (Area under the ROC Curve ) auc_k = auc(fpr_rfc, tpr_rfc) print('AUC: %.4f'%(auc_k)) # plot ROC curve usinf function plot_roc_curve(fpr_rfc, tpr_rfc, 'DESeq2 Genes', 'Train', auc_k) # - # #### Test Data # + # Test Set - now that we've trained, we can now use test set to print('\nTest Data:') # run it foward - to predict using TEST results=rfc.predict(x_test) # Create Confusion matrix using prediction + truth cm=confusion_matrix(y_test, results) print(cm) # Generate ROC Curve fpr_rfc, tpr_rfc, thresholds_rf = roc_curve(y_test, results) # Calculate AUC (Area under the ROC Curve ) auc_k = auc(fpr_rfc, tpr_rfc) print('AUC: %.4f'%(auc_k)) # label for plot plt.plot(fpr_rfc, tpr_rfc) # plot ROC curve usinf function plot_roc_curve(fpr_rfc, tpr_rfc, 'DESeq2', 'Test', auc_k) # - # ### Create, Train, and Test Model with Random Genes # #### Split Into Training and Test Data # + # Re-split Data - this doesn't chnage rows but since we're using more genes we need to re-split to capture # Split Into Training and Test Data x_train, x_test, y_train, y_test = train_test_split(rand_df, target_binary, test_size=15,shuffle=True, random_state=12) print('Our Training Data Set has %d patient samples'%(np.shape(x_train)[0])) print('Our Testing Data Set has %d patient samples'%(np.shape(x_test)[0])) # - # #### Train Data # + # X =df_gene_sig, y = target_binary, # x_train, x_test, y_train, y_test print('\nTrain Data:') # Initialize random forest mode rfc = RandomForestClassifier() # Train model on training data rfc.fit(x_train, y_train) # Get the reults of the training data results = rfc.predict(x_train) # Create Confusion matrix using prediction + truth cm=confusion_matrix(y_train, results) print(cm) # Generate ROC Curve fpr_rfc, tpr_rfc, thresholds_rfc = roc_curve(y_train, results) # Calculate AUC (Area under the ROC Curve ) auc_k = auc(fpr_rfc, tpr_rfc) print('AUC: %.4f'%(auc_k)) # plot ROC curve usinf function plot_roc_curve(fpr_rfc, tpr_rfc, 'Random Genes', 'Train', auc_k) # - # #### Test Data # + # Test Set - now that we've trained, we can now use test set to print('\nTest Data:') # run it foward - to predict using TEST results=rfc.predict(x_test) # Create Confusion matrix using prediction + truth cm=confusion_matrix(y_test, results) print(cm) # Generate ROC Curve fpr_rfc, tpr_rfc, thresholds_rf = roc_curve(y_test, results) # Calculate AUC (Area under the ROC Curve ) auc_k = auc(fpr_rfc, tpr_rfc) print('AUC: %.4f'%(auc_k)) # label for plot plt.plot(fpr_rfc, tpr_rfc) # plot ROC curve usinf function plot_roc_curve(fpr_rfc, tpr_rfc, 'Random Genes', 'Test', auc_k) # - # ## Plot of Combined Results # ## Cross Validation
12,948
/sales_analysis/Notebooks/Consulting Data Analysis-Copy1.ipynb
3d363bcf21b9de74f17c9b032880d90f264b2023
[]
no_license
cg1122/bip-python-unh
https://github.com/cg1122/bip-python-unh
0
0
null
2021-03-03T01:45:07
2021-01-31T19:04:56
null
Jupyter Notebook
false
false
.py
31,642
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd pd.__version__ # + contracts = pd.read_csv (r'../data/contracts.csv') pd.Index([u'contract_id',u'customer_id',u'contract_start',u'contract_term',u'deposit',u'monthly_amt'], dtype = 'object') print(contracts) customers = pd.read_csv (r'../data/customers.csv') pd.Index([u'id',u'company',u'sector',u'industry',u'email',u'address',u'city',u'state',u'zip'], dtype = 'object') print(customers) merged_df = pd.merge(left=customers, right=contracts, how='left', left_on='id', right_on='customer_id') print(merged_df) # Total Sales merged_df.groupby('state').sum('monthly_amt') # + # Changing Data Types customers['id'] = customers['id'].astype(str) contracts['customer_id'] = contracts['customer_id'].astype(str) # Merge/Join to one DataFrame df = pd.merge(customers, contracts, left_on='id', right_on='customer_id') # Fix Dollar Signs & Commas & Convert to Float df['deposit'] = df['deposit'].replace('[$,()]', '', regex=True) df['deposit'] = df['deposit'].astype(float) # Calculate Total Sales By State total_sales = df.groupby(['state'])['deposit'].agg('sum') total_sales = df.groupby(['state'])['deposit'].agg('mean', 'sum') pd.set_option('display.max_columns', None) total_sales.columns = ['state', 'total sales'] print(total_sales) # + import pandas as pd unique_companies = df.drop_duplicates(subset=['company']) pd.Series.nunique(unique_companies['company']) # - print(df.value_counts('state')) unique_companies.value_counts('state') # + avg_sales = df.groupby(['state'])['deposit'].agg('mean','sum') avg_sales / df.value_counts('state') # - After analyzing the data, I think the best place for a headquarters would be TX. It has the highest value count and also the highest unique companies
2,003
/Decision Tree/Decision Tree(health care stroke dataset).ipynb
a7abf82eddeed8e9d0b62cb8f2aa713e8b331b8f
[]
no_license
sanjoysutradhar/Machine-Learning-alogoritms
https://github.com/sanjoysutradhar/Machine-Learning-alogoritms
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,459
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import graphviz from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn import tree from sklearn.model_selection import train_test_split,GridSearchCV from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score, confusion_matrix, roc_curve, roc_auc_score from sklearn.externals.six import StringIO from IPython.display import Image from sklearn.tree import export_graphviz import pydotplus data = pd.read_csv("healthcare-dataset-stroke-data.csv") data data.describe()
813
/Py3 - Functions, Files, and Dictionaries.ipynb
4bdb514d0d608792c8c0f02b1e4a631366d79233
[]
no_license
bgitar/Python-3
https://github.com/bgitar/Python-3
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
80,234
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ####################################################################################################################### # # # # Week 1 - files and CSV # # # ####################################################################################################################### # + #The textfile, travel_plans.txt, contains the summer travel plans for someone with some commentary. #Find the total number of characters in the file and save to the variable num. travel_plans = open("travel_plans.txt", 'r') lines = travel_plans.read() num = len(lines) print(num) # + #We have provided a file called emotion_words.txt that contains lines of words that describe emotions. #Find the total number of words in the file and assign this value to the variable num_words. num_words = 0 with open("emotion_words.txt", 'r') as emotions_file: for line in emotions_file: words = line.split() num_words += len(words) print(num_words) # + #Assign to the variable num_lines the number of lines in the file school_prompt.txt. num_lines = 0 with open("school_prompt.txt", 'r') as school_file: for line in school_file: num_lines += 1 print(num_lines) # + #Assign the first 30 characters of school_prompt.txt as a string to the variable beginning_chars. with open("school_prompt.txt", 'r') as school_file: beginning_chars = (school_file.read()[:30]) print(beginning_chars) # + #Challenge: Using the file school_prompt.txt, assign the third word of every line to a list called three. three = [] with open("school_prompt.txt", 'r') as school_file: three = [line.split()[2] for line in school_file] print(three) # + #Challenge: Create a list called emotions that contains the first word of every line in emotion_words.txt. emotions = [] emotion_file = open('emotion_words.txt', 'r') lines = emotion_file.readlines() for line in lines: a = line.split() emotions.append(a[0]) print(emotions) # + #Assign the first 33 characters from the textfile, travel_plans.txt to the variable first_chars. travel_file = open('travel_plans.txt', 'r') first_chars = (travel_file.read()[:33]) print(first_chars) # + #Challenge: Using the file school_prompt.txt, if the character ‘p’ is in a word, then add the word to a list called p_words. p_words = [] school_file = open('school_prompt.txt', 'r') words = school_file.read().split() #print(words) for word in words: if 'p' in word: p_words.append(word) print(p_words) # + ####################################################################################################################### # # # # Week 2 - Dictionaries # # # ####################################################################################################################### # + #Dictionary Methods Assesment #At the halfway point during the Rio Olympics, the United States had 70 medals, #Great Britain had 38 medals, China had 45 medals, Russia had 30 medals, and Germany had 17 medals. #Create a dictionary assigned to the variable medal_count with the country names as the keys and #the number of medals the country had as each key’s value. medal_count = {'United States': 70, 'Great Britain': 38, 'China': 45, 'Russia': 30, 'Germany': 17} # + #Given the dictionary swimmers, add an additional key-value pair to the dictionary with "Phelps" as the key and the integer 23 as the value. #Do not rewrite the entire dictionary. swimmers = {'Manuel':4, 'Lochte':12, 'Adrian':7, 'Ledecky':5, 'Dirado':4} swimmers['Phelps'] = 23 # + #Add the string “hockey” as a key to the dictionary sports_periods and assign it the value of 3. #Do not rewrite the entire dictionary. sports_periods = {'baseball': 9, 'basketball': 4, 'soccer': 4, 'cricket': 2} sports_periods['hockey'] = 3 # + #The dictionary golds contains information about how many gold medals each country won in the 2016 Olympics. #But today, Spain won 2 more gold medals. Update golds to reflect this information. golds = {"Italy": 12, "USA": 33, "Brazil": 15, "China": 27, "Spain": 19, "Canada": 22, "Argentina": 8, "England": 29} golds["Spain"] = golds["Spain"] + 2 # + #Create a list of the countries that are in the dictionary golds, and assign that list to the variable name countries. #Do not hard code this. golds = {"Italy": 12, "USA": 33, "Brazil": 15, "China": 27, "Spain": 19, "Canada": 22, "Argentina": 8, "England": 29} countries = [] for key in golds: countries.append(key) print(countries) # + #Provided is the dictionary, medal_count, which lists countries and their respective medal count at the halfway point in the 2016 Rio Olympics. #Using dictionary mechanics, assign the medal count value for "Belarus" to the variable belarus. #Do not hardcode this. medal_count = {'United States': 70, 'Great Britain':38, 'China':45, 'Russia':30, 'Germany':17, 'Italy':22, 'France': 22, 'Japan':26, 'Australia':22, 'South Korea':14, 'Hungary':12, 'Netherlands':10, 'Spain':5, 'New Zealand':8, 'Canada':13, 'Kazakhstan':8, 'Colombia':4, 'Switzerland':5, 'Belgium':4, 'Thailand':4, 'Croatia':3, 'Iran':3, 'Jamaica':3, 'South Africa':7, 'Sweden':6, 'Denmark':7, 'North Korea':6, 'Kenya':4, 'Brazil':7, 'Belarus':4, 'Cuba':5, 'Poland':4, 'Romania':4, 'Slovenia':3, 'Argentina':2, 'Bahrain':2, 'Slovakia':2, 'Vietnam':2, 'Czech Republic':6, 'Uzbekistan':5} belarus = medal_count['Belarus'] # + #The dictionary total_golds contains the total number of gold medals that countries have won over the course of history. #Use dictionary mechanics to find the number of golds Chile has won, and assign that number to the variable name chile_golds. #Do not hard code this! total_golds = {"Italy": 114, "Germany": 782, "Pakistan": 10, "Sweden": 627, "USA": 2681, "Zimbabwe": 8, "Greece": 111, "Mongolia": 24, "Brazil": 108, "Croatia": 34, "Algeria": 15, "Switzerland": 323, "Yugoslavia": 87, "China": 526, "Egypt": 26, "Norway": 477, "Spain": 133, "Australia": 480, "Slovakia": 29, "Canada": 22, "New Zealand": 100, "Denmark": 180, "Chile": 13, "Argentina": 70, "Thailand": 24, "Cuba": 209, "Uganda": 7, "England": 806, "Denmark": 180, "Ukraine": 122, "Bahamas": 12} chile_golds = total_golds.get("Chile") # + #Provided is a dictionary called US_medals which has the first 70 metals that the United States has won in 2016, and in which category they have won it in. #Using dictionary mechanics, assign the value of the key "Fencing" to a variable fencing_value. #Remember, do not hard code this. US_medals = {"Swimming": 33, "Gymnastics": 6, "Track & Field": 6, "Tennis": 3, "Judo": 2, "Rowing": 2, "Shooting": 3, "Cycling - Road": 1, "Fencing": 4, "Diving": 2, "Archery": 2, "Cycling - Track": 1, "Equestrian": 2, "Golf": 1, "Weightlifting": 1} fencing_value = US_medals.get("Fencing") # + #Dictionary accumulation patterns #The dictionary Junior shows a schedule for a junior year semester. #The key is the course name and the value is the number of credits. #Find the total number of credits taken this semester and assign it to the variable credits. #Do not hardcode this – use dictionary accumulation! Junior = {'SI 206':4, 'SI 310':4, 'BL 300':3, 'TO 313':3, 'BCOM 350':1, 'MO 300':3} credits = 0 for val in Junior.values(): credits += val print(credits) # + #Create a dictionary, freq, that displays each character in string str1 as the key and its frequency as the value. str1 = "peter piper picked a peck of pickled peppers" freq = {} for c in str1: if c not in freq: freq[c] = 0 freq[c] = freq[c] + 1 print(freq) # + #Provided is a string saved to the variable name s1. Create a dictionary named counts that contains each letter in s1 and the number of times it occurs. s1 = "hello" counts = {} for c in s1: if c not in counts: counts[c] = 0 counts[c] = counts[c] + 1 print(counts) # + #Create a dictionary, freq_words, that contains each word in string str1 as the key and its frequency as the value. str1 = "I wish I wish with all my heart to fly with dragons in a land apart" freq_words = {} words = str1.split() for word in words: if word not in freq_words: freq_words[word] = 0 freq_words[word] += 1 print(freq_words) # + #Create a dictionary called wrd_d from the string sent, so that the key is a word and the value is how many times you have seen that word. sent = "Singing in the rain and playing in the rain are two entirely different situations but both can be good" wrd_d = {} words = sent.split() for word in words: if word not in wrd_d: wrd_d[word] = 0 wrd_d[word] += 1 print(wrd_d) # + #Create the dictionary characters that shows each character from the string sally and its frequency. #Then, find the most frequent letter based on the dictionary. Assign this letter to the variable best_char. sally = "sally sells sea shells by the sea shore" characters = {} for c in sally: if c not in characters: characters[c] = 0 characters[c] += 1 keys = list(characters.keys()) best_char = keys[0] for key in keys: if characters[key] > characters[best_char]: best_char = key print(best_char) # + #Find the least frequent letter. #Create the dictionary characters that shows each character from string sally and its frequency. #Then, find the least frequent letter in the string and assign the letter to the variable worst_char. sally = "sally sells sea shells by the sea shore and by the road" characters = {} for c in sally: if c not in characters: characters[c] = 0 characters[c] += 1 keys = list(characters.keys()) worst_char = keys[0] for key in keys: if characters[key] < characters[worst_char]: worst_char = key print(worst_char) # + #Create a dictionary named letter_counts that contains each letter and the number of times it occurs in string1. #Challenge: Letters should not be counted separately as upper-case and lower-case. #Intead, all of them should be counted as lower-case. string1 = "There is a tide in the affairs of men, Which taken at the flood, leads on to fortune. Omitted, all the voyage of their life is bound in shallows and in miseries. On such a full sea are we now afloat. And we must take the current when it serves, or lose our ventures." string_lower = string1.lower() #print(string_lower) letter_counts = {} for c in string_lower: if c not in letter_counts: letter_counts[c] = 0 letter_counts[c] += 1 print(letter_counts) # + #Create a dictionary called low_d that keeps track of all the characters in the string p and notes how many times each character was seen. #Make sure that there are no repeats of characters as keys, such that “T” and “t” are both seen as a “t” for example. p = "Summer is a great time to go outside. You have to be careful of the sun though because of the heat." p_low = p.lower() low_d = {} for c in p_low: if c not in low_d: low_d[c] = 0 low_d[c] += 1 print(low_d) # + ####################################################################################################################### # # # # Week 3 - Functions # # # ####################################################################################################################### # + #Write a function called int_return that takes an integer as input and returns the same integer. def int_return(x): return x # + #Write a function called add that takes any number as its input and returns that sum with 2 added. def add(x): return x + 2 # + #Write a function called change that takes any string, adds “Nice to meet you!” to the end of the argument given, and returns that new string. def change(str): return str + "Nice to meet you!" # + #Write a function, accum, that takes a list of integers as input and returns the sum of those integers. def accum(int_lst): sum = 0 for int in int_lst: sum = sum + int return sum # + #Write a function, length, that takes in a list as the input. #If the length of the list is greater than or equal to 5, return “Longer than 5”. If the length is less than 5, return “Less than 5”. def length(lst): if len(lst) >= 5: return "Longer than 5" return "Less than 5" # + #You will need to write two functions for this problem. #The first function, divide that takes in any number and returns that same number divided by 2. #The second function called sum should take any number, divide it by 2, and add 6. #It should return this new number. You should call the divide function within the sum function. #Do not worry about decimals. def divide(n): return n / 2 def sum(x): return divide(x) + 6 # + ####################################################################################################################### # # # # Week 3 - Tuples # # # ####################################################################################################################### # + #Create a tuple called olympics with four elements: “Beijing”, “London”, “Rio”, “Tokyo”. olympics = "Beijing", "London", "Rio", "Tokyo" # + #The list below, tuples_lst, is a list of tuples. #Create a list of the second elements of each tuple and assign this list to the variable country. tuples_lst = [('Beijing', 'China', 2008), ('London', 'England', 2012), ('Rio', 'Brazil', 2016, 'Current'), ('Tokyo', 'Japan', 2020, 'Future')] country = [] for cntry in tuples_lst: country.append(cntry[1]) # + #With only one line of code, assign the variables city, country, and year to the values of the tuple olymp. olymp = ('Rio', 'Brazil', 2016) city, country, year = olymp # + #Define a function called info with five parameters: name, gender, age, bday_month, and hometown. #The function should then return a tuple with all five parameters in that order. def info(name, gender, age, bday_month, hometown): return name, gender, age, bday_month, hometown # + #Given is the dictionary, gold, which shows the country and the number of gold medals they have earned so far in the 2016 Olympics. #Create a list, num_medals, that contains only the number of medals for each country. #You must use the .items() method. Note: The .items() method provides a list of tuples. Do not use .keys() method. gold = {'USA':31, 'Great Britain':19, 'China':19, 'Germany':13, 'Russia':12, 'Japan':10, 'France':8, 'Italy':8} num_medals = [] for contry, medals in gold.items(): num_medals.append(medals) # + ####################################################################################################################### # # # # Week 4 while statement # # # ####################################################################################################################### # + #Write a function, sublist, that takes in a list of numbers as the parameter. #In the function, use a while loop to return a sublist of the input list. #The sublist should contain the same values of the original list up until it reaches the number 5 ( #it should not contain the number 5). def sublist(nums): num_list=[] i=0 while i < len(nums): if nums[i] == 5: break num_list.append(nums[i]) i=i+1 return num_list sublist([1,2,3,4,5]) # + #Write a function called check_nums that takes a list as its parameter, and contains a while loop that only stops once the element of the list is the number 7. #What is returned is a list of all of the numbers up until it reaches 7. def check_nums(lst): new_lst = [] i = 0 while i < len(lst): if lst[i] == 7: break new_lst.append(lst[i]) i = i + 1 return new_lst # + #Write a function, sublist, that takes in a list of strings as the parameter. #In the function, use a while loop to return a sublist of the input list. #The sublist should contain the same values of the original list up until it reaches the string “STOP” (it should not contain the string “STOP”). def sublist(str): new_lst = [] i = 0 while i < len(str): if str[i] == "STOP": break new_lst.append(str[i]) i += 1 return new_lst # + #Write a function called stop_at_z that iterates through a list of strings. #Using a while loop, append each string to a new list until the string that appears is “z”. #The function should return the new list. def stop_at_z(str): new_lst = [] i = 0 while i < len(str): if str[i] == "z": break new_lst.append(str[i]) i += 1 return new_lst # + #Below is a for loop that works. Underneath the for loop, rewrite the problem so that it does the same thing, but using a while loop instead of a for loop. #Assign the accumulated total in the while loop code to the variable sum2. Once complete, sum2 should equal sum1. sum1 = 0 lst = [65, 78, 21, 33] for x in lst: sum1 = sum1 + x sum2 = 0 i = 0 while i < len(lst): sum2 += lst[i] i += 1 print(sum1, sum2) # + #Challenge: Write a function called beginning that takes a list as input and contains a while loop that only stops once the element of the list is the string ‘bye’. #What is returned is a list that contains up to the first 10 strings, regardless of where the loop stops. #(i.e., if it stops on the 32nd element, the first 10 are returned. #If “bye” is the 5th element, the first 4 are returned.) #If you want to make this even more of a challenge, do this without slicing def beginning(lst): i = 0 new_lst = [] while i < len(lst): if lst[i] == "bye": break elif len(new_lst) >= 10: break new_lst.append(lst[i]) i += 1 return new_lst # + ####################################################################################################################### # # # # Week 4 - Advanced Functions # # # ####################################################################################################################### # + #Create a function called mult that has two parameters, the first is required and should be an integer, #the second is an optional parameter that can either be a number or a string but whose default is 6. #The function should return the first parameter multiplied by the second. def mult(x, y = 6): return x * y # + #The following function, greeting, does not work. #Please fix the code so that it runs without error. This only requires one change in the definition of the function. #Make name the first parameter passed to the function def greeting(name, greeting="Hello ", excl="!"): return greeting + name + excl print(greeting("Bob")) print(greeting("")) print(greeting("Bob", excl="!!!")) # + #Below is a function, sum, that does not work. Change the function definition so the code works. #The function should still have a required parameter, intx, and an optional parameter, intz with a defualt value of 5. #make intx the first parameter def sum(intx, intz=5): return intz + intx # + #Write a function, test, that takes in three parameters: #a required integer, an optional boolean whose default value is True, and an optional dictionary, #called dict1, whose default value is {2:3, 4:5, 6:8}. #If the boolean parameter is True, the function should test to see if the integer is a key in the dictionary. #The value of that key should then be returned. If the boolean parameter is False, return the boolean value “False”. def test(int1, b = True, dict1 = {2:3, 4:5, 6:8}): if b == True: if int1 in dict1: return dict1[int1] elif b == False: return False # + #Write a function called checkingIfIn that takes three parameters. #The first is a required parameter, which should be a string. #The second is an optional parameter called direction with a default value of True. #The third is an optional parameter called d that has a default value of {'apple': 2, 'pear': 1, 'fruit': 19, 'orange': 5, 'banana': 3, 'grapes': 2, 'watermelon': 7}. #Write the function checkingIfIn so that when the second parameter is True, it checks to see if the first parameter is a key in the third parameter; #if it is, return True, otherwise return False. #But if the second paramter is False, then the function should check to see if the first parameter is not a key of the third. #If it’s not, the function should return True in this case, and if it is, it should return False. def checkingIfIn (str1, direction = True, d = {'apple': 2, 'pear': 1, 'fruit': 19, 'orange': 5, 'banana': 3, 'grapes': 2, 'watermelon': 7}): if direction == True: if str1 in d: return True else: return False elif direction == False: if str1 not in d: return True else: return False # + #We have provided the function checkingIfIn such that if the first input parameter is in the third, dictionary, #input parameter, then the function returns that value, and otherwise, it returns False. #Follow the instructions in the active code window for specific variable assignmemts. def checkingIfIn(a, direction = True, d = {'apple': 2, 'pear': 1, 'fruit': 19, 'orange': 5, 'banana': 3, 'grapes': 2, 'watermelon': 7}): if direction == True: if a in d: return d[a] else: return False else: if a not in d: return True else: return d[a] # Call the function so that it returns False and assign that function call to the variable c_false c_false = checkingIfIn("Ice cream") # Call the fucntion so that it returns True and assign it to the variable c_true c_true = checkingIfIn("Pizza", direction = False) # Call the function so that the value of fruit is assigned to the variable fruit_ans fruit_ans = checkingIfIn("fruit") # Call the function using the first and third parameter so that the value 8 is assigned to the variable param_check param_check = checkingIfIn("mcnuggets", direction = False , d = {'Big Macs': 2, 'mcnuggets': 8, 'fries': 4, 'hot cakes': 29000}) # + ####################################################################################################################### # # # # Week 5 - Sorting Basics # # # ######################################################################################################################## # + #Sort the following string alphabetically, from z to a, and assign it to the variable sorted_letters. letters = "alwnfiwaksuezlaeiajsdl" sorted_letters = sorted(letters, reverse=True) # + #Sort the list below, animals, into alphabetical order, a-z. Save the new list as animals_sorted. animals = ['elephant', 'cat', 'moose', 'antelope', 'elk', 'rabbit', 'zebra', 'yak', 'salamander', 'deer', 'otter', 'minx', 'giraffe', 'goat', 'cow', 'tiger', 'bear'] animals_sorted = sorted(animals) # + #The dictionary, medals, shows the medal count for six countries during the Rio Olympics. #Sort the country names so they appear alphabetically. Save this list to the variable alphabetical. medals = {'Japan':41, 'Russia':56, 'South Korea':21, 'United States':121, 'Germany':42, 'China':70} alphabetical = sorted(medals) # + #Given the same dictionary, medals, now sort by the medal count. #Save the three countries with the highest medal count to the list, top_three. medals = {'Japan':41, 'Russia':56, 'South Korea':21, 'United States':121, 'Germany':42, 'China':70} sorted_medals = sorted(medals.keys(), key=lambda k: medals[k], reverse=True) top_three = sorted_medals[0:3] print(top_three) # + #We have provided the dictionary groceries. #You should return a list of its keys, but they should be sorted by their values, from highest to lowest. #Save the new list as most_needed. groceries = {'apples': 5, 'pasta': 3, 'carrots': 12, 'orange juice': 2, 'bananas': 8, 'popcorn': 1, 'salsa': 3, 'cereal': 4, 'coffee': 5, 'granola bars': 15, 'onions': 7, 'rice': 1, 'peanut butter': 2, 'spinach': 9} most_needed = sorted(groceries.keys(), key=lambda k: groceries[k], reverse=True) # + #Create a function called last_four that takes in an ID number and returns the last four digits. #For example, the number 17573005 should return 3005. #Then, use this function to sort the list of ids stored in the variable, ids, from lowest to highest. #Save this sorted list in the variable, sorted_ids. Hint: Remember that only strings can be indexed, so conversions may be needed. def last_four(x): str_x = str(x) return str_x[-4:] ids = [17573005, 17572342, 17579000, 17570002, 17572345, 17579329] sorted_ids = sorted(ids, key=last_four) # + #Sort the list ids by the last four digits of each id. Do this using lambda and not using a defined function. #Save this sorted list in the variable sorted_id. ids = [17573005, 17572342, 17579000, 17570002, 17572345, 17579329] def last_four(x): str_x = str(x) return str_x[-4:] sorted_id = sorted(ids, key=lambda id: last_four(id)) print(sorted_id) # + #Sort the following list by each element’s second letter a to z. Do so by using lambda. #Assign the resulting value to the variable lambda_sort. ex_lst = ['hi', 'how are you', 'bye', 'apple', 'zebra', 'dance'] lambda_sort = sorted(ex_lst, key=lambda word: word[1]) print(lambda_sort) # + ####################################################################################################################### # # # # Course 2 Project # # # ####################################################################################################################### # + '''We have provided some synthetic (fake, semi-randomly generated) twitter data in a csv file named project_twitter_data.csv which has the text of a tweet, the number of retweets of that tweet, and the number of replies to that tweet. We have also words that express positive sentiment and negative sentiment, in the files positive_words.txt and negative_words.txt. Your task is to build a sentiment classifier, which will detect how positive or negative each tweet is. You will create a csv file, which contains columns for the Number of Retweets, Number of Replies, Positive Score (which is how many happy words are in the tweet), Negative Score (which is how many angry words are in the tweet), and the Net Score for each tweet. At the end, you upload the csv file to Excel or Google Sheets, and produce a graph of the Net Score vs Number of Retweets. To start, define a function called strip_punctuation which takes one parameter, a string which represents a word, and removes characters considered punctuation from everywhere in the word. (Hint: remember the .replace() method for strings.)''' def strip_punctuation(str1): punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@'] for char in punctuation_chars: str1 = str1.replace(char, '') return str1 # + '''Next, copy in your strip_punctuation function and define a function called get_pos which takes one parameter, a string which represents one or more sentences, and calculates how many words in the string are considered positive words. Use the list, positive_words to determine what words will count as positive. The function should return a positive integer - how many occurrences there are of positive words in the text. Note that all of the words in positive_words are lower cased, so you’ll need to convert all the words in the input string to lower case as well.''' def strip_punctuation(str1): punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@'] for char in punctuation_chars: str1 = str1.replace(char, '') return str1 punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@'] # list of positive words to use positive_words = [] with open("positive_words.txt") as pos_f: for lin in pos_f: if lin[0] != ';' and lin[0] != '\n': positive_words.append(lin.strip()) def get_pos(str_pos): str_pos_lower = str_pos.lower() str_pos_punc = strip_punctuation(str_pos_lower) str_pos_split = str_pos_punc.split() pos_cnt = 0 for word in str_pos_split: if word in positive_words: pos_cnt += 1 return pos_cnt # + '''Next, copy in your strip_punctuation function and define a function called get_neg which takes one parameter, a string which represents one or more sentences, and calculates how many words in the string are considered negative words. Use the list, negative_words to determine what words will count as negative. The function should return a positive integer - how many occurrences there are of negative words in the text. Note that all of the words in negative_words are lower cased, so you’ll need to convert all the words in the input string to lower case as well.''' def strip_punctuation(str1): punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@'] for char in punctuation_chars: str1 = str1.replace(char, '') return str1 punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@'] negative_words = [] with open("negative_words.txt") as pos_f: for lin in pos_f: if lin[0] != ';' and lin[0] != '\n': negative_words.append(lin.strip()) def get_neg(str_neg): str_neg_lower = str_neg.lower() str_neg_punc = strip_punctuation(str_neg_lower) str_neg_split = str_neg_punc.split() neg_cnt = 0 for word in str_neg_split: if word in negative_words: neg_cnt += 1 return neg_cnt # + '''Finally, copy in your previous functions and write code that opens the file project_twitter_data.csv which has the fake generated twitter data (the text of a tweet, the number of retweets of that tweet, and the number of replies to that tweet). Your task is to build a sentiment classifier, which will detect how positive or negative each tweet is. Copy the code from the code windows above, and put that in the top of this code window. Now, you will write code to create a csv file called resulting_data.csv, which contains the Number of Retweets, Number of Replies, Positive Score (which is how many happy words are in the tweet), Negative Score (which is how many angry words are in the tweet), and the Net Score (how positive or negative the text is overall) for each tweet. The file should have those headers in that order. Remember that there is another component to this project. You will upload the csv file to Excel or Google Sheets and produce a graph of the Net Score vs Number of Retweets. Check Coursera for that portion of the assignment, if you’re accessing this textbook from Coursera.''' def strip_punctuation(str1): punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@'] for char in punctuation_chars: str1 = str1.replace(char, '') return str1 def get_pos(str_pos): str_pos_lower = str_pos.lower() str_pos_punc = strip_punctuation(str_pos_lower) str_pos_split = str_pos_punc.split() pos_cnt = 0 for word in str_pos_split: if word in positive_words: pos_cnt += 1 return pos_cnt def get_neg(str_neg): str_neg_lower = str_neg.lower() str_neg_punc = strip_punctuation(str_neg_lower) str_neg_split = str_neg_punc.split() neg_cnt = 0 for word in str_neg_split: if word in negative_words: neg_cnt += 1 return neg_cnt punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@'] # lists of words to use positive_words = [] with open("positive_words.txt") as pos_f: for lin in pos_f: if lin[0] != ';' and lin[0] != '\n': positive_words.append(lin.strip()) negative_words = [] with open("negative_words.txt") as pos_f: for lin in pos_f: if lin[0] != ';' and lin[0] != '\n': negative_words.append(lin.strip()) #open twitter file fileref=open("project_twitter_data.csv","r") tweets = fileref.readlines() print(tweets) #check out how tweets are formatted #Write tweets to csv file outfile=open("resulting_data.csv","w") outfile.write("Number of Retweets, Number of Replies, Positive Score, Negative Score, Net Score") outfile.write("\n") for tweet in tweets[1:]: tweet_data = "" clean_tweet=tweet.strip().split(",") #print(clean_tweet) tweet_data = ("{},{},{},{},{}".format(clean_tweet[1], clean_tweet[2], get_pos(clean_tweet[0]), get_neg(clean_tweet), (get_pos(clean_tweet)-get_neg(clean_tweet)))) outfile.write(tweet_data) outfile.write("\n") outfile.close() # + #Peer-graded Assignment: Project - Part 2: Sentiment Analysis #make scatterplot of resulting_csv showing net score and number of retweets. import pandas as pd import seaborn as sns import matplotlib.pyplot as plt df=pd.read_csv(r"C:\Users\BRG4142\Desktop\Py3 Specialization\resulting_csv.csv") plt.figure(figsize=(16, 8)) sns.scatterplot(data=df, x="Net Score", y="Number of Retweets", s=91) plt.xlabel("Net Score", size=20) plt.ylabel("Number of Retweets", size=20) plt.title("Twitter Sentiment Analysis: Net Score by Number of Retweets", size=24) # -
33,260
/1_Input/.ipynb_checkpoints/Test-checkpoint.ipynb
b749050ee5454609ccc6d7189ef4448513da2b81
[]
no_license
write2bobby/GroupProject_PoxyServer
https://github.com/write2bobby/GroupProject_PoxyServer
0
0
null
2019-03-18T23:54:42
2019-03-15T00:00:15
null
Jupyter Notebook
false
false
.py
33,294
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Setup # %load_ext autoreload # %autoreload 2 # + from pathlib import Path import matplotlib.pyplot as plt from lung_cancer_detection.data.nodule import ClassificationDataModule from lung_cancer_detection.utils import load_config, load_json # - config_path = Path().absolute().parents[0] / "configs/test.yaml" config_path.exists() config = load_config(config_path) data_dir = Path().absolute().parents[0] / config["data"]["data_dir"] split_dir = Path().absolute().parents[0] / config["data"]["split_dir"] cache_dir = Path().absolute().parents[0] / config["data"]["cache_dir"] splits = (load_json(split_dir/"train.json"), load_json(split_dir/"valid.json")) # ## Helper functions def print_shapes(dataset): for item in dataset: print(item["image"].shape, item["label"].shape) def preview_dataset(ds, z=None): plt.figure("nodules", (12, 12)) for i, item in enumerate(ds[:20], start=1): img = item["image"].numpy()[0] plt.subplot(4, 5, i) if z: plt.imshow(img[:,:,z], cmap="gray") else: plt.imshow(img[:,:,int(img.shape[2]/2)], cmap="gray") plt.title(f"Label: {item['label'].numpy()[0]}") plt.show() # ## Inititalize DataModule dm = ClassificationDataModule(data_dir, cache_dir, splits, batch_size=2) dm.setup() print_shapes(dm.train_ds) print_shapes(dm.val_ds) elem = dm.train_ds[0] for key in elem: print(key, type(elem[key])) from monai.data.utils import list_data_collate dl = dm.train_dataloader() for batch in dl: print(batch["image"].shape, batch["label"].shape) dl = dm.val_dataloader() for batch in dl: print(batch["image"].shape, batch["label"].shape) preview_dataset(dm.train_ds) preview_dataset(dm.val_ds)
2,010
/02. Train-Validation-Test Data Split.ipynb
fab8f71577b8c91bf221dc7b0ebbc17fb30e8219
[]
no_license
RafalPikula/faces
https://github.com/RafalPikula/faces
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
32,331
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import skimage from sklearn.utils import shuffle # %matplotlib inline # - # Read in the processed metadata. photo_data = pd.read_csv('processed_photo_metadata.csv') photo_data.head() # We are going to partition the photo metadata into 3 sets: the test set, the validation set and the train set. We want to ensure that: # - the model is trained, validated and tested on disjoint sets of face photos, # - the photos of the same person do not appear too frequently in the validation and test sets so as not to skew the results. # # Given the above reasoning, we form: # - the test set by selecting the photos of people who appear 1 or 3 times in the data set, # - the validation set by selecting the photos of people who appear 2 times in the data set, # - the train set by selecting the remaining photos, i.e., the photos of people who appear at least 4 times in the data set, and shuffling them randomly since, apparently, they are ordered by name. # # Such assignment roughly maximizes the number of different faces in the validation and test sets. On the downside, as will be shown presently, this arbitrary choice somewhat disrupts the distributon of certain key characteristics. # + name_count = photo_data['name'].value_counts() test_index = photo_data['name'].apply(lambda name: name_count[name]).isin([1, 3]) valid_index = photo_data['name'].apply(lambda name: name_count[name]) == 2 train_index = photo_data['name'].apply(lambda name: name_count[name]) > 3 # + train_set = photo_data.loc[train_index, ['photo_path', 'name', 'gender', 'age']].copy() train_set = shuffle(train_set, random_state=7532).drop(columns=['name']).reset_index(drop=True) print(f'Number of datapoints in the train set: {train_set.shape[0]}') valid_set = photo_data.loc[valid_index, ['photo_path', 'gender', 'age']].copy() valid_set = valid_set.reset_index(drop=True) print(f'Number of datapoints in the validation set: {valid_set.shape[0]}') test_set = photo_data.loc[test_index, ['photo_path', 'gender', 'age']].copy() test_set = test_set.reset_index(drop=True) print(f'Number of datapoints in the test set: {test_set.shape[0]}') # - # **Gender distribution** <br> # The train set is more balanced that the other two sets which happen to have almost identical gender distributions. # + print('Train set:') print(train_set['gender'].value_counts(normalize=True).apply(lambda x: str(round(100*x, 2)) + '%')) print() print('Validation set:') print(valid_set['gender'].value_counts(normalize=True).apply(lambda x: str(round(100*x, 2)) + '%')) print() print('Test set:') print(test_set['gender'].value_counts(normalize=True).apply(lambda x: str(round(100*x, 2)) + '%')) # - # **Age distribution** <br> # The overall shape is roughly the same and all sets contain the whole chosen age spectrum. However, the validation and test sets share more similarities with one another than with the train set which has 'thiner tails'. bins = 30 _, axs = plt.subplots(1, 3, figsize=(15, 5), sharex=True) axs[0].hist(train_set['age'], bins=bins); axs[0].set_title('Train Set'); axs[1].hist(valid_set['age'], bins=bins); axs[1].set_title('Validation Set'); axs[2].hist(test_set['age'], bins=bins); axs[2].set_title('Test Set'); # ## Train-Validation-Test Data Preparation PHOTO_DIR = 'imdb_crop/' # It turns out that there are two more issues to contend with: # - the majority of the photos are 3-dimensional (2 size dimensions plus 1 color dimension) but some of them are 2-dimensional (2 size dimensions only) # - photo size is not the same for all photographs # # We deal with them by first converting 2D photos to 3D photos and then resizing the photos to size 320x320. Having performed these operations, we save the resulting photos for further use. # + def convert_to_3D(img): """Converts a 2D image to a 3D image.""" img_3D = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.float32) img_3D[:,:,0], img_3D[:,:,1], img_3D[:,:,2] = img, img, img return img_3D def homogenize_images(img_path_series, img_dir=PHOTO_DIR, output_dir='_all_photos/', new_width=320, new_height=320, only_save_to_disk=True): """ 1) Resizes images to new_width and new_height 2) Converts 2D images to 3D images 3) Creates and returns an array of homogenized 3D images/only saves the trasformed images to disk 4) Retuns 2D image count """ count_2D = 0 if only_save_to_disk: indx = 0 else: homogenized_image_array = [] for img_name in img_path_series: img = plt.imread(img_dir + img_name) if (img.shape[0] == new_width) and (img.shape[1] == new_height): img = (img / 255.0).astype(np.float32) else: # The resize function returns a float64 representation of an image img = skimage.transform.resize(img, (new_width, new_height), order=3, mode='reflect', anti_aliasing=True).astype(np.float32) # Turn 2D images into 3D images if img.ndim == 2: img = convert_to_3D(img) count_2D += 1 if only_save_to_disk: new_img_name = img_name[3:-4] + '_HMGD' + img_name[-4:] plt.imsave(img_dir + output_dir + new_img_name, img) img_path_series[indx] = new_img_name indx += 1 else: homogenized_image_array.append(img) if only_save_to_disk: return count_2D else: homogenized_image_array = np.array(homogenized_image_array) return homogenized_image_array , count_2D # - # %%time count_2D = homogenize_images(valid_set.loc[:, 'photo_path']) print('Validation set:') print(f'Number of 2D photos converted to 3D: {count_2D}\n') # %%time count_2D = homogenize_images(test_set.loc[:, 'photo_path']) print('Test set') print(f'Number of 2D photos converted to 3D: {count_2D}\n') # %%time count_2D = homogenize_images(train_set.loc[:, 'photo_path']) print('Train set') print(f'Number of 2D photos converted to 3D: {count_2D}\n') # Finally, we can save the appropriately adjusted train, validation and test sets. train_set.to_csv('train_set_metadata_HMGD.csv', index=False) valid_set.to_csv('valid_set_metadata_HMGD.csv', index=False) test_set.to_csv('test_set_metadata_HMGD.csv', index=False) # Prepare and save the test set as a numpy array. # %%time test_image_array, count_2D = homogenize_images(test_set.loc[:, 'photo_path'], only_save_to_disk=False) print('Test set') print(f'Number of 2D photos converted to 3D: {count_2D}\n') np.save('test_set_hmgd_arr.npy' ,test_image_array)
7,076
/Basics.ipynb
ac4bf817402108b886a6ca1560c2fc81a2536fad
[]
no_license
fantasyocean/Star-Wars-Series-Survey-Analysis-
https://github.com/fantasyocean/Star-Wars-Series-Survey-Analysis-
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
247,810
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd star_wars = pd.read_csv("star_wars.csv", encoding = "ISO-8859-1") star_wars.head(10) star_wars.columns star_wars = star_wars[star_wars["RespondentID"].notnull()] star_wars.head(10) # + #columns #Have you seen any of the 6 films in the Star Wars franchise? #Do you consider yourself to be a fan of the Star Wars film franchise? # + yes_no = {"Yes": True, "No": False} columns = ["Have you seen any of the 6 films in the Star Wars franchise?", "Do you consider yourself to be a fan of the Star Wars film franchise?"] for c in columns: star_wars[c] = star_wars[c].map(yes_no) # - star_wars["Have you seen any of the 6 films in the Star Wars franchise?"].value_counts() star_wars["Do you consider yourself to be a fan of the Star Wars film franchise?"].value_counts() print(star_wars.iloc[0,3:9]) print(star_wars.iloc[:,0]) # + import numpy as np rename_col = star_wars.columns[3:9] Convert_val = { "Star Wars: Episode I The Phantom Menace" : True, "Star Wars: Episode II Attack of the Clones" : True, "Star Wars: Episode III Revenge of the Sith" : True, "Star Wars: Episode IV A New Hope" : True, "Star Wars: Episode V The Empire Strikes Back" : True, "Star Wars: Episode VI Return of the Jedi" : True, np.NaN : False } New_name = { "Which of the following Star Wars films have you seen? Please select all that apply." : "seen_1", "Unnamed: 4" : "seen_2", "Unnamed: 5" : "seen_3", "Unnamed: 6" : "seen_4", "Unnamed: 7" : "seen_5", "Unnamed: 8" : "seen_6" } for c in star_wars.columns[3:9]: star_wars[c] = star_wars[c].map(Convert_val) star_wars = star_wars.rename(columns=New_name) print(star_wars.head()) # - star_wars[star_wars.columns[9:15]] = star_wars[star_wars.columns[9:15]].astype(float) # + New_rank = { "Please rank the Star Wars films in order of preference with 1 being your favorite film in the franchise and 6 being your least favorite film." : "ranking_1", "Unnamed: 10" : "ranking_2", "Unnamed: 11" : "ranking_3", "Unnamed: 12" : "ranking_4", "Unnamed: 13" : "ranking_5", "Unnamed: 14" : "ranking_6", } star_wars = star_wars.rename(columns=New_rank) # - print(star_wars.columns) # + # %matplotlib inline import matplotlib.pyplot as plt mean_ranking = ["ranking_1", "ranking_2", "ranking_3", "ranking_4", "ranking_5", "ranking_6"] star_wars[mean_ranking].mean() # - plt.bar(range(6), star_wars[mean_ranking].mean()) # looks like Episode III is most favorite moves among the survey takers, while Episode V is least favorite movies star_wars[star_wars.columns[3:9]].sum() plt.bar(range(6), star_wars[star_wars.columns[3:9]].sum()) # looks like the more recent movies have got more views, maybe because the oringinal ones set up good awareness and attract more audience for the later ones. # + males = star_wars[star_wars["Gender"] == "Male"] females = star_wars[star_wars["Gender"] == "Female"] plt.bar(range(6), males[males.columns[3:9]].sum()) plt.show() plt.bar(range(6), females[females.columns[3:9]].sum()) plt.show() plt.bar(range(6), males[males.columns[9:15]].mean()) plt.show() plt.bar(range(6), females[females.columns[9:15]].mean()) plt.show() # - # more males have watched Episode I,II, and III, while preference on movies follow the similar trends for both males and females star_wars["Education"].value_counts() star_wars_edu = star_wars.groupby(star_wars["Education"]).agg(sum) plt.bar(range(5), star_wars_edu["seen_1"]) plt.show() plt.bar(range(5), star_wars_edu["seen_2"]) plt.show() plt.bar(range(5), star_wars_edu["seen_3"]) plt.show() plt.bar(range(5), star_wars_edu["seen_4"]) plt.show() plt.bar(range(5), star_wars_edu["seen_5"]) plt.show() plt.bar(range(5), star_wars_edu["seen_6"]) plt.show() # it appears that people with college/associate degree views most Episode 1 star_wars["Location (Census Region)"].value_counts() star_wars_loc = star_wars.groupby(star_wars["Location (Census Region)"]).agg(np.mean) plt.bar(range(9), star_wars_loc["ranking_1"]) plt.show() plt.bar(range(9), star_wars_loc["ranking_2"]) plt.show() plt.bar(range(9), star_wars_loc["ranking_3"]) plt.show() plt.bar(range(9), star_wars_loc["ranking_4"]) plt.show() plt.bar(range(9), star_wars_loc["ranking_5"]) plt.show() plt.bar(range(9), star_wars_loc["ranking_6"]) plt.show() # perference for movies varies by location, e.g. West North viewers like the Episode VI most star_wars["Which character shot first?"].value_counts() # It appears Han is most favorite charater, https://en.wikipedia.org/wiki/Han_Solo star_wars.columns[15:29] # + import numpy as np Like_level = { "Very favorably" : "like", "Somewhat favorably" : "like", "Neither favorably nor unfavorably (neutral)" : "controversial", "somewhat unfavorably" : "dislike", "Unfamiliar (N/A)" : "controversial", "Very unfavorably" : "dislike", np.NaN : "controversial" } col_name = { "Please state whether you view the following characters favorably, unfavorably, or are unfamiliar with him/her." : "cha_1", "Unnamed: 16" : "cha_2", "Unnamed: 17" : "cha_3", "Unnamed: 18" : "cha_4", "Unnamed: 19" : "cha_5", "Unnamed: 20" : "cha_6", "Unnamed: 21" : "cha_7", "Unnamed: 22" : "cha_8", "Unnamed: 23" : "cha_9", "Unnamed: 24" : "cha_10", "Unnamed: 25" : "cha_11", "Unnamed: 26" : "cha_12", "Unnamed: 27" : "cha_13", "Unnamed: 28" : "cha_14", } # - for col in star_wars.columns[15:29]: star_wars[col] = star_wars[col].map(Like_level) star_wars = star_wars.rename(columns = col_name) star_wars["cha_1"].value_counts() star_wars[star_wars.columns[15:29]].head() # + like = {} dislike = {} controversial = {} for col in star_wars.columns[15:29]: like[col] = star_wars[star_wars[col] == "like"][col].count() dislike[col] = star_wars[star_wars[col] == "dislike"][col].count() controversial[col] = star_wars[star_wars[col] == "controversial"][col].count() print(like) print(dislike) print(controversial) # - plt.bar(range(len(like)), list(like.values())) plt.show() plt.bar(range(len(dislike)), list(dislike.values())) plt.show() plt.bar(range(len(controversial)), list(controversial.values())) plt.show() # cha_1,6,7,8,10 are favored by respondents, # cha_2,9,14 got most dislikes # cha_3,5,13 are more controversial random.choice(x_test.shape[0], 10) xhat = x_test[xhat_idx] yhat_classes = model.predict_classes(xhat) for i in range(10): print('True : ' + str(np.argmax(y_test[xhat_idx[i]])) + ', Predict : ' + str(yhat_classes[i])) # - # # 7. 최종 모델 훈련 # + x_total = np.vstack((x_train, x_test)) y_total = np.vstack((y_train, y_test)) print(x_total.shape) print(y_total.shape) # - early_stopping = EarlyStopping(monitor='loss', patience=10) reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=5, min_lr=0.0001) # + EPOCHS = 60 INIT_LR = 1e-3 BS = 256 split_ratio = 0.2 print("[INFO] compiling model...") opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # + # EPOCHS = 100 # INIT_LR = 1e-4 # BS = 256 # split_ratio = 0.2 # print("[INFO] compiling model...") # opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) # model.compile(loss="categorical_crossentropy", optimizer=opt, # metrics=["accuracy"]) # - hist = model.fit(x_total, y_total, epochs=EPOCHS, batch_size=BS, #validation_split=split_ratio, verbose = 1 ,callbacks=[reduce_lr] ) loss_and_metrics = model.evaluate(x_test, y_test, batch_size=BS) print('## evaluation loss and_metrics ##') print(loss_and_metrics) # + xhat_idx = np.random.choice(x_test.shape[0], 10) xhat = x_test[xhat_idx] yhat_classes = model.predict_classes(xhat) for i in range(10): print('True : ' + str(np.argmax(y_test[xhat_idx[i]])) + ', Predict : ' + str(yhat_classes[i])) # - # # 8. 모델 저장 # + #model.save('./model_data/video_model_3.h5') # - ass_, ImageName) for class_ in df_by_ImageName['class'].unique()] for img_path in img_paths: img_dir=os.path.split(img_path)[0] if not os.path.isdir(img_dir): os.makedirs(img_dir) #download and copy images to each class directory img_path0 = img_paths[0] if not os.path.isfile(img_path0): wget.download(img_url, img_path0) for img_path in img_paths[1:]: if not os.path.isfile(img_path): shutil.copyfile(img_path0, img_path) #collect garbage [gc.collect() for i in range(3)] def download_imgs(img_meta, imgs_dir, ImageNames_train, verbose = 2): print('n Images to download:',len(img_meta['image_name'].unique())) executor = joblib.Parallel(n_jobs=-1, verbose = 2, backend='multiprocessing' ) jobs = [joblib.delayed(download_img)(ImageName, img_url, imgs_dir, df_by_ImageName, ImageNames_train) for [ImageName, img_url], df_by_ImageName in img_meta.groupby(['image_name','image_url'])] executor(jobs) # - download_imgs(img_meta, imgs_dir, ImageNames_train, verbose = 5) # + def download_img(ImageName, imgs_dir, img_url): path_img = os.path.join(imgs_dir, ImageName) if not os.path.isfile(path_img): wget.download(img_url, path_img) def download_imgs(img_meta, imgs_dir): if not os.path.isdir(imgs_dir): os.makedirs(imgs_dir) #Instantiate parallel job print('n Images to download:',len(img_meta['image_name'].unique())) executor = joblib.Parallel(n_jobs=-1, verbose = 2, backend='multiprocessing' ) jobs = [joblib.delayed(download_img)(ImageName, imgs_dir, img_url) for [ImageName, img_url], _ in img_meta.groupby(['image_name','image_url'])] outputs = executor(jobs) gc.collect() # - for [class_, LabelName], img_meta_by_class in img_meta.groupby(['class', 'LabelName']): print(class_) img_meta_by_class_train = img_meta_by_class[img_meta_by_class['image_name'].isin(ImageNames_train)] img_meta_by_class_test = img_meta_by_class[img_meta_by_class['image_name'].isin(ImageNames_test)] imgs_dir_train = os.path.join(imgs_dir, 'train', class_) imgs_dir_test = os.path.join(imgs_dir, 'test', class_) Complete=False while not Complete: try: download_imgs(img_meta_by_class_train, imgs_dir_train) download_imgs(img_meta_by_class_test, imgs_dir_test) Complete=True except Exception as e: print(e)
10,885
/Gradient+Checking+v1.ipynb
6c2bb2de4e105287ad2ebd3b5579be97801614ca
[]
no_license
sanchezfdezjavier/Improving_Deep_Neural_Networks
https://github.com/sanchezfdezjavier/Improving_Deep_Neural_Networks
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
26,960
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gradient Checking # # Welcome to the final assignment for this week! In this assignment you will learn to implement and use gradient checking. # # You are part of a team working to make mobile payments available globally, and are asked to build a deep learning model to detect fraud--whenever someone makes a payment, you want to see if the payment might be fraudulent, such as if the user's account has been taken over by a hacker. # # But backpropagation is quite challenging to implement, and sometimes has bugs. Because this is a mission-critical application, your company's CEO wants to be really certain that your implementation of backpropagation is correct. Your CEO says, "Give me a proof that your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking". # # Let's do it! # Packages import numpy as np from testCases import * from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector # ## 1) How does gradient checking work? # # Backpropagation computes the gradients $\frac{\partial J}{\partial \theta}$, where $\theta$ denotes the parameters of the model. $J$ is computed using forward propagation and your loss function. # # Because forward propagation is relatively easy to implement, you're confident you got that right, and so you're almost 100% sure that you're computing the cost $J$ correctly. Thus, you can use your code for computing $J$ to verify the code for computing $\frac{\partial J}{\partial \theta}$. # # Let's look back at the definition of a derivative (or gradient): # $$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$ # # If you're not familiar with the "$\displaystyle \lim_{\varepsilon \to 0}$" notation, it's just a way of saying "when $\varepsilon$ is really really small." # # We know the following: # # - $\frac{\partial J}{\partial \theta}$ is what you want to make sure you're computing correctly. # - You can compute $J(\theta + \varepsilon)$ and $J(\theta - \varepsilon)$ (in the case that $\theta$ is a real number), since you're confident your implementation for $J$ is correct. # # Lets use equation (1) and a small value for $\varepsilon$ to convince your CEO that your code for computing $\frac{\partial J}{\partial \theta}$ is correct! # ## 2) 1-dimensional gradient checking # # Consider a 1D linear function $J(\theta) = \theta x$. The model contains only a single real-valued parameter $\theta$, and takes $x$ as input. # # You will implement code to compute $J(.)$ and its derivative $\frac{\partial J}{\partial \theta}$. You will then use gradient checking to make sure your derivative computation for $J$ is correct. # # <img src="images/1Dgrad_kiank.png" style="width:600px;height:250px;"> # <caption><center> <u> **Figure 1** </u>: **1D linear model**<br> </center></caption> # # The diagram above shows the key computation steps: First start with $x$, then evaluate the function $J(x)$ ("forward propagation"). Then compute the derivative $\frac{\partial J}{\partial \theta}$ ("backward propagation"). # # **Exercise**: implement "forward propagation" and "backward propagation" for this simple function. I.e., compute both $J(.)$ ("forward propagation") and its derivative with respect to $\theta$ ("backward propagation"), in two separate functions. # + # GRADED FUNCTION: forward_propagation def forward_propagation(x, theta): """ Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x) Arguments: x -- a real-valued input theta -- our parameter, a real number as well Returns: J -- the value of function J, computed using the formula J(theta) = theta * x """ ### START CODE HERE ### (approx. 1 line) J = np.dot(theta, x) ### END CODE HERE ### return J # - x, theta = 2, 4 J = forward_propagation(x, theta) print ("J = " + str(J)) # **Expected Output**: # # <table style=> # <tr> # <td> ** J ** </td> # <td> 8</td> # </tr> # </table> # **Exercise**: Now, implement the backward propagation step (derivative computation) of Figure 1. That is, compute the derivative of $J(\theta) = \theta x$ with respect to $\theta$. To save you from doing the calculus, you should get $dtheta = \frac { \partial J }{ \partial \theta} = x$. # + # GRADED FUNCTION: backward_propagation def backward_propagation(x, theta): """ Computes the derivative of J with respect to theta (see Figure 1). Arguments: x -- a real-valued input theta -- our parameter, a real number as well Returns: dtheta -- the gradient of the cost with respect to theta """ ### START CODE HERE ### (approx. 1 line) dtheta = x ### END CODE HERE ### return dtheta # - x, theta = 2, 4 dtheta = backward_propagation(x, theta) print ("dtheta = " + str(dtheta)) # **Expected Output**: # # <table> # <tr> # <td> ** dtheta ** </td> # <td> 2 </td> # </tr> # </table> # **Exercise**: To show that the `backward_propagation()` function is correctly computing the gradient $\frac{\partial J}{\partial \theta}$, let's implement gradient checking. # # **Instructions**: # - First compute "gradapprox" using the formula above (1) and a small value of $\varepsilon$. Here are the Steps to follow: # 1. $\theta^{+} = \theta + \varepsilon$ # 2. $\theta^{-} = \theta - \varepsilon$ # 3. $J^{+} = J(\theta^{+})$ # 4. $J^{-} = J(\theta^{-})$ # 5. $gradapprox = \frac{J^{+} - J^{-}}{2 \varepsilon}$ # - Then compute the gradient using backward propagation, and store the result in a variable "grad" # - Finally, compute the relative difference between "gradapprox" and the "grad" using the following formula: # $$ difference = \frac {\mid\mid grad - gradapprox \mid\mid_2}{\mid\mid grad \mid\mid_2 + \mid\mid gradapprox \mid\mid_2} \tag{2}$$ # You will need 3 Steps to compute this formula: # - 1'. compute the numerator using np.linalg.norm(...) # - 2'. compute the denominator. You will need to call np.linalg.norm(...) twice. # - 3'. divide them. # - If this difference is small (say less than $10^{-7}$), you can be quite confident that you have computed your gradient correctly. Otherwise, there may be a mistake in the gradient computation. # # + # GRADED FUNCTION: gradient_check def gradient_check(x, theta, epsilon = 1e-7): """ Implement the backward propagation presented in Figure 1. Arguments: x -- a real-valued input theta -- our parameter, a real number as well epsilon -- tiny shift to the input to compute approximated gradient with formula(1) Returns: difference -- difference (2) between the approximated gradient and the backward propagation gradient """ # Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit. ### START CODE HERE ### (approx. 5 lines) thetaplus = theta + epsilon # Step 1 thetaminus = theta - epsilon # Step 2 J_plus = forward_propagation(x, thetaplus) # Step 3 J_minus = forward_propagation(x, thetaminus) # Step 4 gradapprox = (J_plus - J_minus) / (2 * epsilon) # Step 5 ### END CODE HERE ### # Check if gradapprox is close enough to the output of backward_propagation() ### START CODE HERE ### (approx. 1 line) grad = backward_propagation(x, theta) ### END CODE HERE ### ### START CODE HERE ### (approx. 1 line) numerator = np.linalg.norm(grad - gradapprox) # Step 1' denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2' difference = numerator / denominator # Step 3' ### END CODE HERE ### if difference < 1e-7: print ("The gradient is correct!") else: print ("The gradient is wrong!") return difference # - x, theta = 2, 4 difference = gradient_check(x, theta) print("difference = " + str(difference)) # **Expected Output**: # The gradient is correct! # <table> # <tr> # <td> ** difference ** </td> # <td> 2.9193358103083e-10 </td> # </tr> # </table> # Congrats, the difference is smaller than the $10^{-7}$ threshold. So you can have high confidence that you've correctly computed the gradient in `backward_propagation()`. # # Now, in the more general case, your cost function $J$ has more than a single 1D input. When you are training a neural network, $\theta$ actually consists of multiple matrices $W^{[l]}$ and biases $b^{[l]}$! It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it! # ## 3) N-dimensional gradient checking # The following figure describes the forward and backward propagation of your fraud detection model. # # <img src="images/NDgrad_kiank.png" style="width:600px;height:400px;"> # <caption><center> <u> **Figure 2** </u>: **deep neural network**<br>*LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID*</center></caption> # # Let's look at your implementations for forward propagation and backward propagation. def forward_propagation_n(X, Y, parameters): """ Implements the forward propagation (and computes the cost) presented in Figure 3. Arguments: X -- training set for m examples Y -- labels for m examples parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (5, 4) b1 -- bias vector of shape (5, 1) W2 -- weight matrix of shape (3, 5) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) Returns: cost -- the cost function (logistic cost for one example) """ # retrieve parameters m = X.shape[1] W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) # Cost logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y) cost = 1./m * np.sum(logprobs) cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) return cost, cache # Now, run backward propagation. def backward_propagation_n(X, Y, cache): """ Implement the backward propagation presented in figure 2. Arguments: X -- input datapoint, of shape (input size, 1) Y -- true "label" cache -- cache output from forward_propagation_n() Returns: gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables. """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) * 2 db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3, "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients # You obtained some results on the fraud detection test set but you are not 100% sure of your model. Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct. # **How does gradient checking work?**. # # As in 1) and 2), you want to compare "gradapprox" to the gradient computed by backpropagation. The formula is still: # # $$ \frac{\partial J}{\partial \theta} = \lim_{\varepsilon \to 0} \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon} \tag{1}$$ # # However, $\theta$ is not a scalar anymore. It is a dictionary called "parameters". We implemented a function "`dictionary_to_vector()`" for you. It converts the "parameters" dictionary into a vector called "values", obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them. # # The inverse function is "`vector_to_dictionary`" which outputs back the "parameters" dictionary. # # <img src="images/dictionary_to_vector.png" style="width:600px;height:400px;"> # <caption><center> <u> **Figure 2** </u>: **dictionary_to_vector() and vector_to_dictionary()**<br> You will need these functions in gradient_check_n()</center></caption> # # We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector(). You don't need to worry about that. # # **Exercise**: Implement gradient_check_n(). # # **Instructions**: Here is pseudo-code that will help you implement the gradient check. # # For each i in num_parameters: # - To compute `J_plus[i]`: # 1. Set $\theta^{+}$ to `np.copy(parameters_values)` # 2. Set $\theta^{+}_i$ to $\theta^{+}_i + \varepsilon$ # 3. Calculate $J^{+}_i$ using to `forward_propagation_n(x, y, vector_to_dictionary(`$\theta^{+}$ `))`. # - To compute `J_minus[i]`: do the same thing with $\theta^{-}$ # - Compute $gradapprox[i] = \frac{J^{+}_i - J^{-}_i}{2 \varepsilon}$ # # Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient with respect to `parameter_values[i]`. You can now compare this gradapprox vector to the gradients vector from backpropagation. Just like for the 1D case (Steps 1', 2', 3'), compute: # $$ difference = \frac {\| grad - gradapprox \|_2}{\| grad \|_2 + \| gradapprox \|_2 } \tag{3}$$ # + # GRADED FUNCTION: gradient_check_n def gradient_check_n(parameters, gradients, X, Y, epsilon=1e-7): """ Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n Arguments: parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters. x -- input datapoint, of shape (input size, 1) y -- true "label" epsilon -- tiny shift to the input to compute approximated gradient with formula(1) Returns: difference -- difference (2) between the approximated gradient and the backward propagation gradient """ # Set-up variables parameters_values, _ = dictionary_to_vector(parameters) grad = gradients_to_vector(gradients) num_parameters = parameters_values.shape[0] J_plus = np.zeros((num_parameters, 1)) J_minus = np.zeros((num_parameters, 1)) gradapprox = np.zeros((num_parameters, 1)) # Compute gradapprox for i in range(num_parameters): # Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]". # "_" is used because the function you have to outputs two parameters but we only care about the first one ### START CODE HERE ### (approx. 3 lines) thetaplus = np.copy(parameters_values) # Step 1 thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2 J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3 ### END CODE HERE ### # Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]". ### START CODE HERE ### (approx. 3 lines) thetaminus = np.copy(parameters_values) # Step 1 thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2 J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3 ### END CODE HERE ### # Compute gradapprox[i] ### START CODE HERE ### (approx. 1 line) gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon) ### END CODE HERE ### # Compare gradapprox to backward propagation gradients by computing difference. ### START CODE HERE ### (approx. 1 line) numerator = np.linalg.norm(grad - gradapprox) # Step 1' denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2' difference = numerator / denominator # Step 3' ### END CODE HERE ### if difference > 1e-7: print("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m") else: print("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m") return difference # + X, Y, parameters = gradient_check_n_test_case() cost, cache = forward_propagation_n(X, Y, parameters) gradients = backward_propagation_n(X, Y, cache) difference = gradient_check_n(parameters, gradients, X, Y) # - # **Expected output**: # # <table> # <tr> # <td> ** There is a mistake in the backward propagation!** </td> # <td> difference = 0.285093156781 </td> # </tr> # </table> # It seems that there were errors in the `backward_propagation_n` code we gave you! Good that you've implemented the gradient check. Go back to `backward_propagation` and try to find/correct the errors *(Hint: check dW2 and db1)*. Rerun the gradient check when you think you've fixed it. Remember you'll need to re-execute the cell defining `backward_propagation_n()` if you modify the code. # # Can you get gradient check to declare your derivative computation correct? Even though this part of the assignment isn't graded, we strongly urge you to try to find the bug and re-run gradient check until you're convinced backprop is now correctly implemented. # # **Note** # - Gradient Checking is slow! Approximating the gradient with $\frac{\partial J}{\partial \theta} \approx \frac{J(\theta + \varepsilon) - J(\theta - \varepsilon)}{2 \varepsilon}$ is computationally costly. For this reason, we don't run gradient checking at every iteration during training. Just a few times to check if the gradient is correct. # - Gradient Checking, at least as we've presented it, doesn't work with dropout. You would usually run the gradient check algorithm without dropout to make sure your backprop is correct, then add dropout. # # Congrats, you can be confident that your deep learning model for fraud detection is working correctly! You can even use this to convince your CEO. :) # # <font color='blue'> # **What you should remember from this notebook**: # - Gradient checking verifies closeness between the gradients from backpropagation and the numerical approximation of the gradient (computed using forward propagation). # - Gradient checking is slow, so we don't run it in every iteration of training. You would usually run it only to make sure your code is correct, then turn it off and use backprop for the actual learning process.
19,841
/visualization.ipynb
80891a7b8c8c01a0b507206f08d8b0caf327d420
[]
no_license
ameyp-ml/guru-gesture-recognition
https://github.com/ameyp-ml/guru-gesture-recognition
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
78,343
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import pyplot as plt import numpy as np distr = [] for i in range(20): distr.append([]) for i in range(1,471): with open("/media/amey/76D076A5D0766B6F/chalap/train/Sample{:04d}_labels.csv".format(i)) as f: for l in f: [label,_,_] = [int(n) for n in l.split(',')] distr[label-1].append(i) plt.scatter(distr[0], np.ones(len(distr[0])), marker='.', linewidths='1') for i in range(20): #plt.subplot(20, 1, i+1) plt.scatter(distr[i], (i+1) * np.ones(len(distr[i])), marker='.') accs = [0.140108,0.324714,0.542995,0.466747,0.361395,0.344438,0.401684,0.412026,0.401203,0.386049,0.389296,0.498016,0.494408,0.352856,0.548166,0.596993,0.487072,0.619363,0.59988,0.770535] plt.figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k') plt.plot(range(len(accs)), accs)
1,128
/TP2/submit_xgboost_ariel_6_with_features_11.ipynb
510efeba93756fd4d3f5df367024722966af92aa
[]
no_license
juankristal/7506-Datos
https://github.com/juankristal/7506-Datos
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
109,601
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Challenge: Advanced Regression # # #### Source: # I choose a dataset from kaggle on Austin Texas's Animal Shelter, the largest no-kill animal shelter in the US, to predict whether an animal at intake will likely be adopted or have to be transfered elsewhere. # # https://www.kaggle.com/aaronschlegel/austin-animal-center-shelter-intakes-and-outcomes#aac_intakes_outcomes.csv # + #imports import pandas as pd import numpy as np import math #plotting from matplotlib import pyplot as pl import seaborn as sns # %matplotlib inline sns.set_style('white') #models from sklearn import linear_model from sklearn.linear_model import LogisticRegression import statsmodels.api as sm from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score # - #read in data df = pd.read_csv('https://www.dropbox.com/s/i613e1l09n5dhp7/aac_intakes_outcomes.csv?dl=1') #drop unnecessary columns df = df.drop(['animal_id_outcome','count','age_upon_outcome_age_group','outcome_subtype','age_upon_intake_age_group','animal_id_intake', 'color', 'found_location','breed','time_in_shelter','date_of_birth','dob_monthyear','outcome_monthyear','intake_monthyear' ], axis = 1) df.head(10) df.describe() df.info() #Limit the outcome types for adoption/transfer for prediction df_adopt = df.loc[((df['outcome_type'] == 'Transfer') | (df['outcome_type'] == 'Adoption'))] #create correlation matrix sns.heatmap(df_adopt.corr()) #drop some of the heavily corrleated variables df_adopt = df_adopt.drop(['age_upon_outcome', 'age_upon_outcome_(years)','outcome_number','outcome_year', 'age_upon_intake_(years)', 'outcome_datetime','intake_datetime', 'dob_year'], axis = 1) # ### Feature Engineering #create values for the categorical variables outcome_weekday_values = {'Sunday':0, 'Monday':1, 'Tuesday':2, 'Wednesday':3, 'Thursday':4,'Friday':5,'Saturday':6} df_adopt.replace({'outcome_weekday':outcome_weekday_values}, inplace=True) sex_upon_outcome_values = {'Unknown':0, 'Neutered Male':1, 'Spayed Female':2, 'Intact Female':3, 'Intact Male':4} df_adopt.replace({'sex_upon_outcome':sex_upon_outcome_values}, inplace = True) animal_type_values = {'Other':0,'Bird':1,'Cat':2, 'Dog':3} df_adopt.replace({'animal_type':animal_type_values}, inplace = True) intake_condition_values = {'Normal':0,'Sick':1,'Aged':2, 'Feral':3,'Nursing':4, 'Injured':5, 'Pregnant':6, 'Other':7} df_adopt.replace({'intake_condition': intake_condition_values}, inplace = True) intake_type_values ={'Owner Surrender':0, 'Public Assist': 1, 'Stray':2, 'Wildlife':3,'Euthanasia Request':4} df_adopt.replace({'intake_type': intake_type_values}, inplace = True) intake_weekday_values = {'Sunday':0, 'Monday':1, 'Tuesday':2, 'Wednesday':3, 'Thursday':4,'Friday':5,'Saturday':6} df_adopt.replace({'intake_weekday':intake_weekday_values}, inplace=True) sex_upon_intake_values = {'Unknown':0, 'Neutered Male':1, 'Spayed Female':2, 'Intact Female':3, 'Intact Male':4} df_adopt.replace({'sex_upon_intake':sex_upon_intake_values}, inplace = True) #run another correlation matrix with the newly created features sns.heatmap(df_adopt.corr()) #create the binary prediction outcomes outcome_type_values = {'Transfer':0, 'Adoption':1} df_adopt.replace({'outcome_type':outcome_type_values}, inplace = True) #clean data to fit model df_adopt.replace('NaN', np.nan) df_adopt['age_upon_intake'] = df_adopt.age_upon_intake.str.extract('(\d+)', expand=True).astype(int) df_adopt.fillna(0, inplace=True) for col in df_adopt.iloc[:, 1:]: df_adopt[col] = df_adopt[col].replace('[^0-9]+', '', regex=True) #Let's remove any outcome variables, we'll want to know what the likelyhood of that animal getting adopted would be over transfered at time of intake df_adopt = df_adopt.drop(['sex_upon_outcome','age_upon_outcome_(days)', 'outcome_month','outcome_weekday','outcome_hour'], axis = 1) # ## Vanilla Logistic Regression # + #create training/test sets X = df_adopt.drop('outcome_type', axis=1) Y = df_adopt['outcome_type'] X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size=0.3) # - # ##### Fitting a binary logistic model using statsmodels # + #declare predictors X_statsmod = X_train.copy() #the statsmodels formulation requires a column with constant value 1 that #will act as the intercept. X_statsmod['intercept'] = 1 #declare and fit the model. logit = sm.Logit(Y_train, X_statsmod) result = logit.fit() #lots of information about the model and its coefficients, but the #accuracy rate for predictions is missing. print(result.summary2()) # - # ##### Fitting a binary logistic model using sklearn # + #declare a logistic regression classifier lr = LogisticRegression() Y = Y_train X = X_train #fit the model. fit = lr.fit(X, Y) #display print('Coefficients') print(fit.coef_) print(fit.intercept_) pred_y_sklearn = lr.predict(X) #print confusion matrix print('\n Accuracy') print(pd.crosstab(pred_y_sklearn, Y)) #print accuracy score print('\n Percentage accuracy') print(lr.score(X_test, Y_test)) #perform cross validation print('\n Cross Validation Scores') print(cross_val_score(lr, X_test, Y_test, cv=10)) # - # ## Ridge Logistic Regression #initiate the ridge model through the pentalty l2 ridgemodel = LogisticRegression(penalty='l2') ridgefit = ridgemodel.fit(X_train, Y_train) ridgemodel.score(X_test, Y_test) # + #metrics pred_y_sklearn = ridgemodel.predict(X_test) #confusion matrix print('\n Accuracy') print(pd.crosstab(pred_y_sklearn, Y_test)) #accuracy score print('\n Percentage accuracy') print(ridgemodel.score(X_test, Y_test)) #cross validation print('\n Cross Validation Scores') print(cross_val_score(ridgemodel, X_test, Y_test, cv=10)) # - # ## Lasso Logistic Regression #initial the Lasso model through the pentalty l1 lassomodel = LogisticRegression(penalty='l1') lassomodel.fit(X_train, Y_train) lassomodel.score(X_test, Y_test) # + #=metrics pred_y_sklearn = lassomodel.predict(X_test) #confusion matrix. print('\n Accuracy') print(pd.crosstab(pred_y_sklearn, Y_test)) #accuracy Score print('\n Percentage accuracy') print(lassomodel.score(X_test, Y_test)) #cross validation print('\n Cross Validation Scores') print(cross_val_score(lassomodel, X_test, Y_test, cv=10)) # - # ## Conclusion # In Summation: # # Logistic Regression Percentage Accuracy: 0.7235096453018046 # # Ridge Logistic Regression Percentage Accuracy: 0.723312812173307 # # Lasso Logistic Regression Percentage Accuracy: 0.7234289696828899 # # Our logistic regression model performed better by around .0002, so very minimally better than the ridge and logistic regression. We have low multicolinearity in this model, and most variables appear useful, thus, I would stick with the logistic regression for not only simplicity, but better performance. ima_Terreno_industrial_10_mas_cercanos' : np.float32, 'distancia_promedio_Villa_10_mas_cercanos' : np.float32, 'distancia_centro_ciudad' : np.float32, 'distancia_centro_provincia' : np.float32, 'distancia_centro_pais' : np.float32, 'distancia_Distrito_Federal' : np.float32, 'banos_preciopromedio_ciudad' : np.float32, 'habitaciones_preciopromedio_ciudad' : np.float32, 'garages_preciopromedio_ciudad' : np.float32, 'banos_preciopromedio_metroscubiertos' : np.float32, 'habitaciones_preciopromedio_metroscubiertos' : np.float32, 'garages_preciopromedio_metroscubiertos' : np.float32, 'precio_x_m2' : np.float32, 'tipodepropiedad_mean_precio' : np.float32, 'titulo_cantidad_palabras_importantes' : np.float32, 'descripcion_cantidad_palabras_importantes' : np.float32, 'direccion_cantidad_palabras_importantes' : np.float32, 'titulo_cantidad_caracteres_en_palabras_importantes' : np.float32, 'descripcion_cantidad_caracteres_en_palabras_importantes' : np.float32, 'direccion_cantidad_caracteres_en_palabras_importantes' : np.float32, 'titulo_longitud_media_de_palabra' : np.float32, 'descripcion_longitud_media_de_palabra' : np.float32, 'direccion_longitud_media_de_palabra' : np.float32, 'titulo_cantidad_stopwords' : np.float32, 'descripcion_cantidad_stopwords' : np.float32, 'direccion_cantidad_stopwords' : np.float32, 'titulo_cantidad_signos_puntacion' : np.float32, 'descripcion_cantidad_signos_puntacion' : np.float32, 'direccion_cantidad_signos_puntacion' : np.float32, 'direccion_cantidad_palabras_en_mayuscula' : np.float32, 'direccion_cantidad_titulos' : np.float32, 'titulo_cantidad_palabras_top_k' : np.float32, 'descripcion_cantidad_palabras_top_k' : np.float32, 'direccion_cantidad_palabras_top_k' : np.float32, 'titulo_cantidad_palabras_bottom_k' : np.float32, 'descripcion_cantidad_palabras_bottom_k' : np.float32, 'direccion_cantidad_palabras_bottom_k' : np.float32, 'titulo_cantidad_prefijos_top_k' : np.float32, 'descripcion_cantidad_prefijos_top_k' : np.float32, 'direccion_cantidad_prefijos_top_k' : np.float32, 'titulo_cantidad_postfijos_top_k' : np.float32, 'descripcion_cantidad_postfijos_top_k' : np.float32, 'direccion_cantidad_postfijos_top_k' : np.float32, 'categoria_descripcion_0' : np.float32, 'categoria_descripcion_1' : np.float32, 'categoria_descripcion_2' : np.float32, 'categoria_descripcion_3' : np.float32, 'categoria_descripcion_4' : np.float32, 'categoria_descripcion_5' : np.float32, 'categoria_descripcion_6' : np.float32, 'categoria_descripcion_7' : np.float32, 'distancia_euclideana_al_origen' : np.float32, 'distancia_minima_comercial' : np.float32, 'producto_interno_maximo_ciudad_pais' : np.float32, 'ciudad_mean_antiguedad_sobre_provincia_mean_antiguedad' : np.float32, 'tipodepropiead_mean_utilidades_extra_sobre_ciudad_mean_utilidades_extra' : np.float32, 'antiguedad_sobre_tipodepropiedad_mean_antiguedad' : np.float32, 'direccion_cantidad_al_menos_una_mayuscula' : np.float32, 'direccion_cantidad_fijos_top_k' : np.float32, 'titulo_cantidad_fijos_top_k' : np.float32, 'titulo_palabras_top_k_sobre_total_palabras' : np.float32, 'ciudad_distancia_al_origen': np.float32, 'ciudad_mean_mean_todas' : np.float32, 'ciudad_mean_antiguedad_sobre_mean_metrocubiertos' : np.float32 } ) FEATURES = ['metrostotales', 'metroscubiertos', 'idzona', 'lat', 'antiguedad', 'metros_x_espacio', 'metros_x_habitaciones', 'metros_x_banos', 'metros_x_garages', 'descripcion_longitud_media_de_palabra', 'habitaciones_preciopromedio_ciudad', 'banos_preciopromedio_ciudad', 'distancia_minima_Local_en_centro_comercial', 'titulo_longitud_media_de_palabra', 'lng', 'distancia_minima_Local_Comercial', 'distancia_minima_Bodega_comercial', 'garages_preciopromedio_ciudad', 'distancia_minima_Terreno_comercial', 'distancia_minima_Oficina_comercial', 'habitaciones', 'producto_interno_centro_ciudad', 'dia', 'tipodepropiead_mean_utilidades_extra_sobre_ciudad_mean_utilidades_extra', 'descripcion_cantidad_stopwords', 'descripcion_cantidad_palabras_top_k', 'descripcion_cantidad_caracteres_en_palabras_importantes', 'descripcion_cantidad_signos_puntacion', 'titulo_cantidad_caracteres_en_palabras_importantes', 'descripcion_cantidad_palabras_importantes', 'direccion_longitud_media_de_palabra', 'antiguedad_sobre_tipodepropiedad_mean_antiguedad', 'mes', 'habitaciones_preciopromedio_metroscubiertos', 'titulo_palabras_top_k_sobre_total_palabras', 'metros_x_utilidades_extra', 'distancia_minima_comercial', 'precio_x_m2', 'año'] X_test.set_index('id', inplace = True) xgb_regressor = pickle.load(open("entrenamientos/xgb_training_submit_ariel_with_features_11.pickle.dat", "rb")) t0 = time.time() y_kaggle_pred = xgb_regressor.predict(X_test[FEATURES]) t1 = time.time() print('tiempo = {0:.2f} segundos'.format((t1-t0))) res = pd.DataFrame(y_kaggle_pred, index=X_test.index, columns=['target']) res.to_csv("submits/submit_xgboost_ariel_with_features_11.csv", header=True) def plot_importance(modelo): feat_imp_1 = pd.Series(modelo.get_booster().get_fscore()).sort_values(ascending=False) feat_imp_1.plot(kind='barh', title='Feature Importances', figsize=(15, 15)) plt.ylabel('Feature Importance Score') plt.show() return feat_imp_1 plot_importance(xgb_regressor) X_test[FEATURES].shape # + active="" # feat_imp_1 = pd.Series(xgb_regressor.get_booster().get_fscore()).sort_values(ascending=False) # plot = feat_imp_1.plot(kind='barh', fontsize = 40, figsize=(40, 50)) # plt.title('Feature Importance XGBRegressor Submit Final', fontsize = 50) # plt.xlabel('"Feature Importance Score"', fontsize = 50) # plt.tight_layout() # fig = plot.get_figure() # fig.savefig('submits/feature_importance_xgb_submit_final') # plt.show()
15,010
/scripts/frequency/growth_word_competition_prediction.ipynb
1d137f8173593b584f38f42bc120b5cfe67ffba5
[]
no_license
ianbstewart/nonstandard_word_dissemination
https://github.com/ianbstewart/nonstandard_word_dissemination
7
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,833,426
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Growth word competition prediction # In which we predict the competition results of paired growth words, according to different social and semantic factors. from __future__ import division import pandas as pd import matplotlib % matplotlib inline import matplotlib.pyplot as plt import sys if('..' not in sys.path): sys.path.append('..') from data_processing.data_handler import get_default_vocab # ## Load data tf_2015_2016 = pd.read_csv('../../data/frequency/2015_2016_tf_norm.tsv', sep='\t', index_col=0) tf_2014_2015 = pd.read_csv('../../data/frequency/2014_2015_tf_norm.tsv', sep='\t', index_col=0) vocab = get_default_vocab() print(len(vocab)) tf_2014_2015 = tf_2014_2015.loc[vocab] tf_2014_2015.fillna(0, inplace=True) tf_2014_2015.shape # Figure out which words didn't exist in 2014-2015. tf_2014_2015_mean = tf_2014_2015.mean(axis=1) tf_2014_2015_high_freq = tf_2014_2015_mean[tf_2014_2015_mean > 0.] print(tf_2014_2015_high_freq.shape) growth_percentile = 90 tf_growth_2015_2016 = pd.read_csv('../../data/frequency/2015_2016_tf_norm_correls.tsv', sep='\t', index_col=0) growth_lower = pd.np.percentile(tf_growth_2015_2016['correl'], growth_percentile) growth_word_correls = tf_growth_2015_2016[tf_growth_2015_2016['correl'] >= growth_lower] print(growth_word_correls.head()) print('%d growth words'%(len(growth_word_correls))) # want growth words that were not present or minimally present in 2014-2015 growth_words = list(set(growth_word_correls.index) - set(tf_2014_2015_high_freq.index)) print('%d growth words that were not present in 2014-2015'%(len(growth_words))) # print(pd.np.random.choice(growth_words, size=100, replace=False)) print(growth_words) # This sample size is too small. What if we increase the lower limit to count as "high frequency" in 2014-2015? lower_percentile = 10 # numeric cutoff tf_upper_bound = pd.np.percentile(tf_2014_2015.mean(axis=1), lower_percentile) print(tf_upper_bound) tf_2014_2015_high_freq = tf_2014_2015_mean[tf_2014_2015_mean > tf_upper_bound] growth_words = list(set(growth_word_correls.index) - set(tf_2014_2015_high_freq.index)) print('%d growth words that were not present in 2014-2015'%(len(growth_words))) print(pd.np.random.choice(growth_words, size=100, replace=False)) # Mostly non-English but also misspellings and actual new words?? `dogwhistle` is new. # ## Set up prediction # Now we pair growth words into competitors based on their semantic similarity. After each pairing, we remove both words from the pool to prevent a word being chosen twice. # # For each growth word $g_{1}$ in $G$, $g_{2}=argmax_{g_{w} \in G, w \neq 1}(sim(g_{1}, g_{w}))$. # Once we have the pairs, we predict which word in the pair will have a higher frequency at $t_{11}$ based on the following statistics at $t_{0}$: # # - frequency $f$ # - user diffusion $D_{U}$ # - subreddit diffusion $D_{S}$ # - thread diffusion $D_{T}$ # - bigram residuals (i.e. context diversity) $C_{2}$ # - trigram residuals (i.e. context diversity) $C_{3}$ # - nearest neighbor distance $N$ # ## Compute similarity between growth words # Compute cosine similarity between all growth words based on their word embeddings at the initial timestep (`2015-06`). from gensim.models import Word2Vec from sklearn.metrics.pairwise import cosine_similarity # embed_file = '../../data/embeddings/2015-06_wang2vec_100_5_embeddings' embed_file = '../../data/embeddings/2015-06_word2vec_100_5_embeddings' embeddings = Word2Vec.load_word2vec_format(embed_file, binary=True) embed_vocab = list(set(growth_words) & set(embeddings.vocab.keys())) print('%d growth words in embeddings'%(len(embed_vocab))) growth_embeddings = pd.concat([pd.Series(embeddings[v]) for v in embed_vocab], axis=1).transpose() growth_sims = cosine_similarity(growth_embeddings) growth_sims = pd.DataFrame(growth_sims, index=embed_vocab, columns=embed_vocab) print(growth_sims.head()) # ## Pair growth words # Pair words according to semantic similarity. N = int(len(growth_words) / 2) vocab = list(growth_words) def get_pairs_no_replace(sims, vocab, size): g_sims = sims.ix[vocab, vocab] g_pairs = [] g_words = set() while(len(g_pairs) < size): g1 = pd.np.random.choice(vocab, 1, replace=False)[0] g2_list = g_sims.loc[g1].sort_values(inplace=False, ascending=False)[1:].index.tolist() for g in g2_list: if(g not in g_words): g2 = g break g_pair = (g1, g2) g_pairs.append(g_pair) g_words.add(g1) g_words.add(g2) return g_pairs all_pairs_no_replace = get_pairs_no_replace(growth_sims, vocab, N) pd.np.random.shuffle(all_pairs_no_replace) top_k = 20 print('got %d sample pairs %s'%(top_k, all_pairs_no_replace[:top_k])) # plot example growth word pairs top_k = 20 pd.np.random.shuffle(all_pairs_no_replace) sample_pairs = all_pairs_no_replace[:top_k] cols = 4 rows = int(len(sample_pairs) / cols) + 1 all_dates = sorted(tf_2015_2016.columns) x_positions = range(len(all_dates)) xticks, xlabels = zip(*zip(x_positions, all_dates)[::3]) plt.figure(figsize=(cols * size, rows * size)) for i, (w1, w2) in enumerate(sample_pairs): plt.subplot(rows, cols, i+1) plt.plot(x_positions, tf_2015_2016.loc[w1], 'r', label=w1) plt.plot(x_positions, tf_2015_2016.loc[w2], 'b', label=w2) plt.title('%s vs. %s'%(w1, w2)) plt.legend(loc='upper left') plt.xticks(xticks, xlabels) plt.tight_layout() plt.show() # There's a few cases of crossover but generally there's a clear winner after the first few weeks. # ## Gather predictor variables # Extract all the predictor statistics and make sure they all have the same vocabulary. tf_0 = tf_2015_2016.ix[:,0] user_diffusion = pd.read_csv('../../data/frequency/2015_2016_user_diffusion.tsv', sep='\t', index_col=0).ix[:,0] sub_diffusion = pd.read_csv('../../data/frequency/2015_2016_subreddit_diffusion.tsv', sep='\t', index_col=0).ix[:,0] thread_diffusion = pd.read_csv('../../data/frequency/2015_2016_thread_diffusion.tsv', sep='\t', index_col=0).ix[:,0] bigram_resids = pd.read_csv('../../data/frequency/2015_2016_2gram_resids.tsv', sep='\t', index_col=0).ix[:,0] trigram_resids = pd.read_csv('../../data/frequency/2015_2016_3gram_resids.tsv', sep='\t', index_col=0).ix[:,0] nearest_sim = pd.read_csv('../../data/embeddings/2015_2016_wang2vec_top_1_sim_niche_similarities.tsv', sep='\t', index_col=0).ix[:,0] all_stats = [tf_0, user_diffusion, sub_diffusion, thread_diffusion, bigram_resids, trigram_resids, nearest_sim] stat_names = ['f', 'DU', 'DS', 'DT', 'C2', 'C3', 'N'] vocab = set(get_default_vocab()) clean_stats = [] for n, s in zip(stat_names, all_stats): s = s.loc[vocab] s.fillna(0, inplace=True) s_smooth = 1e-1 * s[s > 0].min() s = s + s_smooth clean_stats.append(s) all_stats = clean_stats from sklearn.preprocessing import MinMaxScaler from sklearn.cross_validation import train_test_split def get_data(pairs, stats, stat_names): data = {} w1, w2 = zip(*pairs) w1 = list(w1) w2 = list(w2) # print(w1) # print(w2) for stat_name, stat in zip(stat_names, stats): # print('processing stat %s'%(stat_name)) w1_vals = stat.loc[w1].values w2_vals = stat.loc[w2].values if(stat_name == 'C2' or stat_name == 'C3'): data_s = w1_vals - w2_vals else: data_s = w1_vals / w2_vals data[stat_name] = data_s index = ['%s_%s'%(w1, w2) for w1, w2 in pairs] data = pd.DataFrame(data, index=index) # make sure that data is in right order data = data[stat_names] return data X_data = get_data(all_pairs_no_replace, all_stats, stat_names) pair_names = X_data.index.tolist() scaler = MinMaxScaler() X_data = scaler.fit_transform(X_data) X_data = pd.DataFrame(X_data, index=pair_names, columns=stat_names) w1, w2 = zip(*all_pairs) w1 = list(w1) w2 = list(w2) Y_data = ((tf_11.loc[w1].values / tf_11.loc[w2].values) > 1.).astype(int) X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_data) from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_recall_fscore_support lr = LogisticRegression() lr.fit(X_train, Y_train) Y_pred = lr.predict(X_test) precision, recall, fscore, support = precision_recall_fscore_support(Y_test, Y_pred) precision = precision[1] recall = recall[1] fscore = fscore[1] support = support[1] print('precision: %.3f'%(precision*100)) print('recall: %.3f'%(recall*100)) print('F-score: %.3f'%(fscore*100)) print('support: %.3f'%(support*100)) from sklearn.model_selection import cross_val_score lr = LogisticRegression() scores = cross_val_score(lr, X_data, Y_data, cv=5) print(scores) print(pd.np.mean(scores)) # Overall a pretty weak performance. What if we only provide some of the predictors? X_combos = [ ['f'], ['f', 'DS', 'DU', 'DT'], ['f', 'C2', 'C3', 'N'], ['f', 'DS', 'DU', 'DT', 'C2', 'C3', 'N'], ] for combo in X_combos: X_data_ = X_data[combo] scores = cross_val_score(lr, X_data_, Y_data, cv=10) print('combo %s got scores %s => %.3f'%('/'.join(combo), str(scores), pd.np.mean(scores))) # Seems like diffusion hurts the least. # Let's investigate the relative influence of the different factors on word success. from statsmodels.discrete.discrete_model import Logit logit = Logit(Y_data, X_data) results = logit.fit(method='bfgs', maxiter=1000) print(results.summary()) # Most of the predictors have little to no effect except for $D_{S}$ and (maybe) $N$. This is probably a product of the word-pairing process. # ## Pair growth words with replacement # This will cause some words to be over-represented but will also cut down on the number of falsely-connected growth pairs. vocab = list(growth_words) N = len(vocab) def get_pairs_replace(sims, vocab, size): g_sims = sims.ix[vocab, vocab] g_pairs = [] for g1 in vocab: g2_list = g_sims.loc[g1].sort_values(inplace=False, ascending=False)[1:].index.tolist() g2 = g2_list[0] g_pair = (g1, g2) g_pairs.append(g_pair) return g_pairs all_pairs_replace = get_pairs_replace(growth_sims, vocab, N) pd.np.random.shuffle(all_pairs_replace) top_k = 20 print('got %d sample pairs %s'%(top_k, all_pairs_replace[:top_k])) # make sure that we aren't oversampling words w1, w2 = zip(*all_pairs) print('%d words paired with %d unique matches'%(len(set(w1)), len(set(w2)))) # plot example growth word pairs top_k = 20 pd.np.random.shuffle(all_pairs_replace) sample_pairs = all_pairs_replace[:top_k] cols = 4 rows = int(len(sample_pairs) / cols) + 1 all_dates = sorted(tf_2015_2016.columns) x_positions = range(len(all_dates)) xticks, xlabels = zip(*zip(x_positions, all_dates)[::3]) plt.figure(figsize=(cols * size, rows * size)) for i, (w1, w2) in enumerate(sample_pairs): plt.subplot(rows, cols, i+1) plt.plot(x_positions, tf_2015_2016.loc[w1], 'r', label=w1) plt.plot(x_positions, tf_2015_2016.loc[w2], 'b', label=w2) plt.title('%s vs. %s'%(w1, w2)) plt.legend(loc='upper left') plt.xticks(xticks, xlabels) plt.tight_layout() plt.show() # There seems to be more crossover/co-evolution here. X_data = get_data(all_pairs_replace, all_stats, stat_names) pair_names = X_data.index.tolist() scaler = MinMaxScaler() X_data = scaler.fit_transform(X_data) X_data = pd.DataFrame(X_data, index=pair_names, columns=stat_names) w1, w2 = zip(*all_pairs) w1 = list(w1) w2 = list(w2) Y_data = ((tf_11.loc[w1].values / tf_11.loc[w2].values) > 1.).astype(int) X_combos = [ ['f'], ['f', 'DS', 'DU', 'DT'], ['f', 'C2', 'C3', 'N'], ['f', 'DS', 'DU', 'DT', 'C2', 'C3', 'N'], ] for combo in X_combos: lr = LogisticRegression() X_data_ = X_data[combo] scores = cross_val_score(lr, X_data_, Y_data, cv=10) print('combo %s got scores %s => %.3f'%('/'.join(combo), str(scores), pd.np.mean(scores))) logit = Logit(Y_data, X_data) results = logit.fit() print(results.summary()) # $D_{T}$ has negative correlation with word success, and $C_{3}$ has positive correlation. Maybe $C2$ has too much overlap (e.g. double-counting bigrams) to be useful. # ## Test different timesteps # Try training data drawn from $t=1$ to $t=10$. We predict that data closer to $t=11$ will be more accurate. tf = pd.read_csv('../../data/frequency/2015_2016_tf_norm.tsv', sep='\t', index_col=0) user_diffusion = pd.read_csv('../../data/frequency/2015_2016_user_diffusion.tsv', sep='\t', index_col=0) sub_diffusion = pd.read_csv('../../data/frequency/2015_2016_subreddit_diffusion.tsv', sep='\t', index_col=0) thread_diffusion = pd.read_csv('../../data/frequency/2015_2016_thread_diffusion.tsv', sep='\t', index_col=0) bigram_resids = pd.read_csv('../../data/frequency/2015_2016_2gram_resids.tsv', sep='\t', index_col=0) trigram_resids = pd.read_csv('../../data/frequency/2015_2016_3gram_resids.tsv', sep='\t', index_col=0) nearest_sims = pd.read_csv('../../data/embeddings/2015_2016_word2vec_top_10_sim_niche_similarities.tsv', sep='\t', index_col=0) all_stats_full = [tf, user_diffusion, sub_diffusion, thread_diffusion, bigram_resids, trigram_resids, nearest_sims] stat_names = ['f', 'DU', 'DS', 'DT', 'C2', 'C3', 'N'] timesteps = range(1,11) vocab = get_default_vocab() tf_11 = tf.ix[:, 11] Y_data = ((tf_11.loc[w1].values / tf_11.loc[w2].values) > 1.).astype(int) all_dates = sorted(tf.columns) cv = 10 all_clean_stats = {} # first collect clean data for t in timesteps: t_str = all_dates[t] all_stats_t = [s.ix[:, t] for s in all_stats_full] clean_stats = [] for s in all_stats_t: s = s.loc[vocab] s.fillna(0, inplace=True) s_smooth = 1e-1 * s[s > 0].min() s = s + s_smooth clean_stats.append(s) all_clean_stats[t_str] = clean_stats # then train classifier scaler = MinMaxScaler() for t in timesteps: t_str = all_dates[t] all_stats_t = all_clean_stats[t_str] X_data = get_data(all_pairs, all_stats_t, stat_names) pair_names = X_data.index.tolist() X_data = scaler.fit_transform(X_data) X_data = pd.DataFrame(X_data, index=pair_names, columns=stat_names) lr = LogisticRegression() scores = cross_val_score(lr, X_data, Y_data, cv=cv) # print('%s = %.3f +/- %.3f'%(scores, pd.np.mean(scores), pd.np.std(scores))) print('%s: %.3f +/- %.3f'%(t_str, pd.np.mean(scores), pd.np.std(scores))) # Unsurprisingly, using data closer to $t=11$ produces more reliable results. # same but with feature analysis/ablation scaler = MinMaxScaler() stat_combos = [ ['f'], ['f', 'DU', 'DS', 'DT'], # ['f', 'C2', 'C3'], ['f', 'C2', 'C3', 'N'], ['f', 'DU', 'DS', 'DT', 'C2', 'C3', 'N'] ] stat_indices = {'f' : 0, 'DU' : 1, 'DS' : 2, 'DT' : 3, 'C2' : 4, 'C3' : 5, 'N' : 6} # plot it out plt.figure(figsize=(5,5)) cmap = plt.get_cmap('Reds') markers = ['o', '*', '^', 's'] t_start = min(timesteps) t_end = max(timesteps) cv = 5 x_ticks_labels = zip(timesteps, [all_dates[t] for t in timesteps])[::3] xticks, xlabels = zip(*x_ticks_labels) ctr = 1 for stat_combo in stat_combos: # print('testing stat combo %s'%(stat_combo)) stat_combo_indices = [stat_indices[stat_name] for stat_name in stat_combo] mean_scores = [] score_errs = [] for t in timesteps: t_str = all_dates[t] all_stats_t = all_clean_stats[t_str] all_stats_t = [all_stats_t[i] for i in stat_combo_indices] X_data = get_data(all_pairs, all_stats_t, stat_combo) pair_names = X_data.index.tolist() X_data = scaler.fit_transform(X_data) X_data = pd.DataFrame(X_data, index=pair_names, columns=stat_combo) lr = LogisticRegression() scores = cross_val_score(lr, X_data, Y_data, cv=cv) mean_score = pd.np.mean(scores) mean_scores.append(mean_score) score_err = pd.np.std(scores) score_errs.append(score_err) color = cmap(ctr / len(stat_combos)) marker = markers[ctr % len(markers)] stat_combo_str = '$%s$'%(','.join(stat_combo)) plt.plot(timesteps, mean_scores, color=color, marker=marker, label=stat_combo_str) plt.errorbar(timesteps, mean_scores, yerr=score_errs) plt.xticks(xticks, xlabels) ctr += 1 plt.title('Growth word competition classification') plt.legend(loc='upper left') plt.show() # Frequency on its own doesn't do a lot; it's the combination of frequency and either (1) social or (2) semantic info that better predicts word success. # ## Testing frequency predictiveness # How well does the frequency at $t_{n-1}$ predict frequency delta at $t_{n}$? Similar to Altmann et al. (2011) plots; repeat plot across all statistics. tf_final = tf.ix[:,1:] tf_start = tf.ix[:,0:-1] tf_final.columns = tf_start.columns # ## Test different embeddings # Let's try different embedding methods to determine whether one produces more reliable growth word pairs. # tf_means = pd.read_csv('../../data/frequency/2015_2016_tf_norm.tsv', sep='\t', index_col=0).mean(axis=1) top_k = 100 top_growth_words = tf_0.loc[growth_words].sort_values(inplace=False, ascending=False)[:top_k] top_growth_words = top_growth_words.index.tolist() from collections import defaultdict import os embed_types = ['word2vec', 'wang2vec'] dims = [50, 100, 300] windows = [2, 5, 10] date_str = '2015-06' embed_names = ['%s_%s_%d_%d_embeddings'%(date_str, e, d, w) for e in embed_types for d in dims for w in windows] data_dir = '../../data/embeddings/' embed_files = [os.path.join(data_dir, e) for e in embed_names] growth_word_matches = defaultdict(list) top_k = 10 for embed_name, embed_file in zip(embed_names, embed_files): embeddings = Word2Vec.load_word2vec_format(embed_file, binary=True) embed_vocab = list(set(growth_words) & set(embeddings.vocab.keys())) growth_word_embeddings = pd.DataFrame(pd.np.array([embeddings[v] for v in embed_vocab]), index=embed_vocab) growth_word_sims = cosine_similarity(growth_word_embeddings) growth_word_sims = pd.DataFrame(growth_word_sims, index=embed_vocab, columns=embed_vocab) for w in top_growth_words: match = growth_word_sims.loc[w].sort_values(ascending=False, inplace=False).index[1] growth_word_matches[w].append(match) # neighbors, sims = zip(*neighbor_sim_pairs) # growth_word_sets[w].append(neighbors) pd.set_option('display.max_rows', 100) pd.set_option('display.max_columns', 100) # growth_word_matches = pd.DataFrame(growth_word_matches, index=embed_names).transpose() growth_word_matches # Even just considering the top growth words, there is not a lot of evidence for competition between growth words. It seems like they're competing more for the same topical attention rather than for grammatical or functional niche. # ## Visualize social vs. semantic distinction # Compare the dimensions of {$D_U, D_S, D_T$} x {$C2, C3, P$} for all growth words. import seaborn as sns # clean all stats first def clean_stats(all_stats, vocab): cleaned = [] for s in all_stats: s = s.loc[vocab] s.fillna(0, inplace=True) s_smooth = 1e-1 * s[s > 0].min() s = s + s_smooth cleaned.append(s) return cleaned social_stats = [user_diffusion.copy(), sub_diffusion.copy(), thread_diffusion.copy()] semantic_stats = [bigram_resids.copy(), trigram_resids.copy(), nearest_sim.copy()] social_stats = clean_stats(social_stats, vocab) semantic_stats = clean_stats(semantic_stats, vocab) social_stat_names = ['DU', 'DT', 'DS'] semantic_stat_names = ['C2', 'C3', 'N'] all_stat_names = social_stat_names + semantic_stat_names all_stats = [s.values.flatten() for s in social_stats] + [s.values.flatten() for s in semantic_stats] all_stats = pd.DataFrame({stat_name : s for stat_name, s in zip(all_stat_names, all_stats)}) plt.figure(figsize=(20,20)) sns.pairplot(all_stats) plt.show() # pairgrid = sns.PairGrid(all_stats) # pairgrid.map_diag(sns.kdeplot) # pairgrid.map_offdiag(sns.jointplot, cmap="Blues_d") # plt.show() # N = len(social_stats) * len(semantic_stats) # cols = 3 # rows = int(N / cols) + 1 # size = 4 # plt.figure(figsize=(cols * size, rows * size)) # ctr = 1 # for social_stat_name, social_stat in zip(social_stat_names, social_stats): # # print('%s %s'%(social_stat_name, str(social_stat.shape))) # social_stat = social_stat.values.flatten() # for semantic_stat_name, semantic_stat in zip(semantic_stat_names, semantic_stats): # # print('%s %s'%(semantic_stat_name, str(semantic_stat.shape))) # semantic_stat = semantic_stat.values.flatten() # # plt.subplot(rows, cols, ctr) # plt.figure(figsize=(5,5)) # # can't plot all points because overflow # # plt.plot(social_stat, semantic_stat) # # bivariate distribution # sns.jointplot(x=social_stat, y=semantic_stat, kind='hex', color='k') # plt.xlabel('$%s$'%(social_stat_name)) # plt.ylabel('$%s$'%(semantic_stat_name)) # ctr += 1 # plt.show() # ## Example of over-diffusion # Compare growth and decline of `fleek` with another growth word with matching frequency. We want to determine whether excessive social/context diffusion led to the decline of `fleek` as compared to another growth word. # tf_2014_2015 = pd.read_csv('../../data/frequency/2014_2015_tf_norm.tsv', sep='\t', index_col=0) # tf_2015_2016 = pd.read_csv('../../data/frequency/2015_2016_tf_norm.tsv', sep='\t', index_col=0) # tf_2014_2016 = pd.concat([tf_2014_2015, tf_2015_2016], axis=1) test_word = 'fleek' match = abs(tf_2015_2016.ix[growth_words, 0] - tf_2015_2016.ix[test_word, 0]).sort_values().index[1] x_positions = range(tf_2014_2016.shape[1]) xticks, xlabels = zip(*zip(x_positions, tf_2014_2016.columns)[::3]) plt.figure(figsize=(10, 5)) plt.plot(x_positions, tf_2014_2016.loc[test_word], 'r', label=test_word) plt.plot(x_positions, tf_2014_2016.loc[match], 'b--', label=match) plt.legend(loc='upper left') plt.xticks(xticks, xlabels) plt.show() # compare with e.g. user diffusion social_stats = [user_diffusion, sub_diffusion, thread_diffusion] social_stat_names = ['DU', 'DS', 'DT'] cols = len(social_stats) rows = 1 plt.figure(figsize=(cols*5, rows*5)) x_positions = range(tf_2015_2016.shape[1]) xticks, xlabels = zip(*zip(x_positions, sorted(tf_2015_2016.columns))[::3]) for i, (social_stat, stat_name) in enumerate(zip(social_stats, social_stat_names)): plt.subplot(rows, cols, i+1) plt.plot(x_positions, social_stat.loc[test_word], 'r', label=test_word) plt.plot(x_positions, social_stat.loc[match], 'b--', label=match) plt.legend(loc='upper left') plt.title(stat_name) plt.xticks(xticks, xlabels) plt.tight_layout() plt.show() # Even though `stixxay` got more popular over time, it still remained confined to specific audience while `fleek` remained over-diffuse. # ## Solo growth word prediction # What if we just predict the overall growth of individual growth word?
23,269
/notebooks/Plot_AtmNu_recoildistributions.ipynb
dadab66c3b7305a759c090d77f125c12c8c0d78e
[ "MIT" ]
permissive
cajohare/AtmNuFloor
https://github.com/cajohare/AtmNuFloor
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
188,109
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # pandas tutorial - 2 # - import pandas as pd df = pd.DataFrame([{'Name': 'Ram', 'Item Purchased': 'NBK', 'Cost': 22.50}, {'Name': 'Sham', 'Item Purchased': 'PEN', 'Cost': 2.50}, {'Name': 'Ravit', 'Item Purchased': 'TABLE', 'Cost': 5.00}], index=['Store 1', 'Store 1', 'Store 2']) df df['Date'] = ['December 1', 'January 1', 'Mid - May'] df df['Delivered'] = True df df['Feedback'] = ['Positive', None, 'Negative'] df # assigning values based on index labels adf = df.reset_index() adf['Date'] = pd.Series({0: 'December 15', 2: 'Mid - May'}) adf # merging data frames staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR'}, {'Name': 'Sally', 'Role': 'Course liasion'}, {'Name': 'James', 'Role': 'Grader'}]) staff_df = staff_df.set_index('Name') student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business'}, {'Name': 'Mike', 'School': 'Law'}, {'Name': 'Sally', 'School': 'Engineering'}]) student_df = student_df.set_index('Name') print(staff_df.head()) print print(student_df.head()) # merging on index using outer pd.merge(staff_df, student_df, how="outer", left_index = True, right_index = True) pd.merge(staff_df, student_df, how="left", left_index = True, right_index = True) pd.merge(staff_df, student_df, how="right", left_index = True, right_index = True) pd.merge(staff_df, student_df, how="inner", left_index = True, right_index = True) # merging on column values staff_df = staff_df.reset_index() student_df = student_df.reset_index() pd.merge(staff_df, student_df, how="outer", left_on = "Name", right_on = "Name") staff_df = pd.DataFrame([{'Name': 'Kelly', 'Role': 'Director of HR', 'Location': 'State Street'}, {'Name': 'Sally', 'Role': 'Course liasion', 'Location': 'Washington Avenue'}, {'Name': 'James', 'Role': 'Grader', 'Location': 'Washington Avenue'}]) student_df = pd.DataFrame([{'Name': 'James', 'School': 'Business', 'Location': '1024 Billiard Avenue'}, {'Name': 'Mike', 'School': 'Law', 'Location': 'Fraternity House #22'}, {'Name': 'Sally', 'School': 'Engineering', 'Location': '512 Wilson Crescent'}]) pd.merge(staff_df, student_df, how='left', left_on='Name', right_on='Name') staff_df = pd.DataFrame([{'First Name': 'Kelly', 'Last Name': 'Desjardins', 'Role': 'Director of HR'}, {'First Name': 'Sally', 'Last Name': 'Brooks', 'Role': 'Course liasion'}, {'First Name': 'James', 'Last Name': 'Wilde', 'Role': 'Grader'}]) student_df = pd.DataFrame([{'First Name': 'James', 'Last Name': 'Hammond', 'School': 'Business'}, {'First Name': 'Mike', 'Last Name': 'Smith', 'School': 'Law'}, {'First Name': 'Sally', 'Last Name': 'Brooks', 'School': 'Engineering'}]) staff_df student_df pd.merge(staff_df, student_df, how='inner', left_on=['First Name','Last Name'], right_on=['First Name','Last Name']) # pandas idioms df = pd.read_csv('census.csv') df (df.where(df['SUMLEV'] == 50) .dropna() .set_index(['STNAME', 'CTYNAME'])) # + import numpy as np def min_max(row): data = row[['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015']] return pd.Series({'min' : np.min(data), 'max' : np.max(data)}) # - df.apply(min_max, axis = 1).head() def min_max(row): data = row[['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015']] row['max'] = np.max(data) row['min'] = np.min(data) return row df.apply(min_max, axis = 1).head() # same can be done using lambdas but only one column can be added at a time rows = ['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015'] df.apply(lambda x: np.max(x[rows]), axis = 1).head() # similarly for min df.apply(lambda x: np.min(x[rows]), axis = 1).head() # group by df = pd.read_csv('census.csv') df = df[df['SUMLEV']==50] df for group, frame in df.groupby('STNAME'): avg = np.average(frame['CENSUS2010POP']) print 'Countries in State ' + group + ' have an average population of ' + str(avg) # grouping based on a custom function # we need to have the column we want to group on as the index df = df.set_index("STNAME") df.head() # + def fun(item): if item[0] < 'N': return 0 elif item[0] < 'R': return 1 return 2 for group, frame in df.groupby(fun): print 'Number of frames in group ' + str(group) + ' are ' + str(len(frame)) # - # the above method was to split the data frame now we apply a funtion to the grouped values df = pd.read_csv('census.csv') df = df[df['SUMLEV']==50] df.groupby('STNAME').agg({'CENSUS2010POP' : np.average}).head() # difference between series and data frame group by print type(df.groupby(level = 0)['POPESTIMATE2010', 'POPESTIMATE2011']) print type(df.groupby(level = 0)['POPESTIMATE2010']) df.set_index('STNAME').groupby(level = 0)['CENSUS2010POP'].agg({'avg' : np.average, 'sum' : np.sum}) (df.set_index('STNAME').groupby(level = 0)['POPESTIMATE2010', 'POPESTIMATE2011'] .agg({'avg' : np.average, 'sum' : np.sum})) (df.set_index('STNAME').groupby(level = 0)['POPESTIMATE2010', 'POPESTIMATE2011'] .agg({'POPESTIMATE2010' : np.average, 'POPESTIMATE2011' : np.sum})) # + # scales # - df = pd.DataFrame(['A+', 'A', 'A-', 'B+', 'B', 'B-', 'C+', 'C', 'C-', 'D+', 'D'], index=['excellent', 'excellent', 'excellent', 'good', 'good', 'good', 'ok', 'ok', 'ok', 'poor', 'poor']) df.rename(columns={0: 'Grades'}, inplace=True) df df['Grades'].astype('category') grades = df['Grades'].astype('category', categories = ['D', 'D+', 'C-', 'C', 'C+', 'B-', 'B', 'B+', 'A-', 'A', 'A+'], ordered = True) grades grades > 'C' s = pd.Series([16, 18, 17, 19, 17]) pd.cut(s, 3) df = pd.read_csv('census.csv') df = df[df['SUMLEV']==50] df = df.set_index('STNAME').groupby(level=0)['CENSUS2010POP'].agg({'avg': np.average}) pd.cut(df['avg'], 10) pd.cut(s, 3, labels = ['small', 'medium', 'large']) # + # pivot tables # - df = pd.read_csv('cars.csv') df.head() df.pivot_table(values = '(kW)', index = 'YEAR', columns = 'Make', aggfunc = np.mean) df.pivot_table(values = '(kW)', index = 'YEAR', columns = 'Make', aggfunc = [np.mean, np.sum]) df.pivot_table(values = '(kW)', index = 'YEAR', columns = 'Make', aggfunc = [np.mean, np.sum], margins = True) # + # date functionality # - pd.Timestamp('9/1/2016 10:05AM') pd.Period('3/5/2016') pd.Period('1/2016') # date time index t1 = pd.Series(list('abc'), [pd.Timestamp('2016-09-01'), pd.Timestamp('2016-09-02'), pd.Timestamp('2016-09-03')]) t1 type(t1.index) # period index t2 = pd.Series(list('def'), [pd.Period('2016-09'), pd.Period('2016-10'), pd.Period('2016-11')]) t2 type(t2.index) # converting to date time d1 = ['2 June 2013', 'Aug 29, 2014', '2015-06-26', '7/12/16'] ts3 = pd.DataFrame(np.random.randint(10, 100, (4,2)), index=d1, columns=list('ab')) ts3 ts3.index = pd.to_datetime(ts3.index) ts3 pd.to_datetime('4.7.12', dayfirst=True) # time deltas pd.Timestamp('9/3/2016') - pd.Timestamp('9/1/2016') pd.Timestamp('9/9/2018') + pd.Timedelta('12D3H') dates = pd.date_range('28-09-2018', periods=9, freq='2W-SUN') dates df = pd.DataFrame({'count1' : 100 + np.random.randint(-5, 10, 9).cumsum(), 'count2' : 120 + np.random.randint(-5, 10, 9)}, index = dates) df df.index.weekday_name df.diff() df.resample('M').mean() df['2018'] df['2018-12':] df.asfreq('W', method='ffill') with open(filename, "r") as f: return json.load(f, strict=False) def dump_json_to_file(obj, filename, **kwargs): with open(filename, "w") as f: json.dump(obj, f, **kwargs) # - video_dataset = load_json_from_file('train_dataset/info.json') video_dataset # ### Загрузка видео ### # Загрузка видео осуществляется при помощи cv2.VideoCapture. Этот код изменять и дописывать не нужно. def read_video(video_path): cap = cv2.VideoCapture(video_path) frames = [] while(cap.isOpened()): ret, frame = cap.read() if ret==False: break yield frame cap.release() frames = read_video(os.path.join('train_dataset', 'video', '03.mp4')) # Что такое frames? Это итератор на кадры видео. Чтобы пройтись по всем кадрам последовательности, воспользуйтесь следующей конструкцией: # *Аккуратно, по одной переменной frames можно пройти только один раз!* for frame in tqdm(frames): pass for frame in tqdm(frames): # Второй раз уже не будет итерации pass # ## Пишем свой простой детектор смен сцен ## # На данном этапе предлагается написать простой Scene Change Detector (SCD) на основе выделения характеристик кадров, подсчёта разницы между кадрами на основе данных характеристик, а также подобрать наиболее оптимальный порог для этих признаков и совместить эти признаки. # Сменой сцен в данной задаче являются только обычные мгновенные смены сцен, без дополнительных эффектов. # # В качестве примера приведён простой детектор смен, который считает межкадровую разницу между кадрами. # # *Важное замечание. Здесь и далее результатом алгоритма детектора сцен являются **индексы кадров начал сцен**, при этом кадры **нумеруются с 0**. Нулевой кадр в качестве ответа указывать не нужно* # <img src="Hard_cut.jpg"> def baseline_scene_change_detector(frames, threshold=2000, with_vis=False): """ Baseline SCD Arguments: frames -- iterator on video frames threshold -- parameter of your algorithm (optional) with_vis -- saving neighboring frames at a scene change (optional) Returns: scene_changes -- list of scene changes (idx of frames) vis -- list of neighboring frames at a scene change (for visualization) metric_values -- list of metric values (for visualization) """ def pixel_metric(frame, prev_frame): # Базовое расстояние между кадрами - среднеквадратическая ошибка между ними return np.mean((frame.astype(np.int32) - prev_frame) ** 2) scene_changes = [] vis = [] metric_values = [] prev_frame = None for idx, frame in tqdm(enumerate(frames), leave=False): # frame - это кадр # idx - это номер кадра if prev_frame is not None: # Находим расстояние между соседними кадрами metric_value = pixel_metric(frame, prev_frame) if metric_value > threshold: scene_changes.append(idx) if with_vis: # Кадры в памяти занимают много места, поэтому сохраним лишь первые 100 срабатываний if len(vis) < 100: vis.append([prev_frame, frame]) metric_values.append(metric_value) else: metric_values.append(0) prev_frame = frame return scene_changes, vis, metric_values frames = read_video(os.path.join('train_dataset', 'video', '03.mp4')) cuts_base = load_json_from_file(os.path.join('train_dataset', 'gt', '03.json'))['cut'] scene_changes_base, vis_base, metric_values_base = baseline_scene_change_detector(frames, with_vis=True) # Посмотрим визуально, насколько сильно алгоритм ошибается, а также на значения метрики def visualize_metric_error(frame, prev_frame, value): fig = plt.figure(figsize=(16,4)) plt.suptitle('Значение метрики на текущем кадре: {:.4f}'.format(value), fontsize=24) ax = fig.add_subplot(1, 2, 1) ax.imshow(prev_frame[:,:,::-1]) ax.set_title("Предыдущий кадр", fontsize=18) ax.set_xticks([]) ax.set_yticks([]) ax = fig.add_subplot(1, 2, 2) ax.imshow(frame[:,:,::-1]) ax.set_title("Текущий кадр", fontsize=18) ax.set_xticks([]) ax.set_yticks([]) plt.subplots_adjust(top=0.80) idx = 1 visualize_metric_error(vis_base[idx][0], vis_base[idx][1], metric_values_base[scene_changes_base[idx]]) # смена сцен idx = 10 visualize_metric_error(vis_base[idx][0], vis_base[idx][1], metric_values_base[scene_changes_base[idx]]) # ошибается, это не смена сцен def visualize_metric_values(metric_values, threshold, cuts = None): sns.set() plt.figure(figsize=(16, 8)) plt.plot(metric_values, label='Значение метрики на кадрах') plt.xlabel('Номер кадра') plt.ylabel('Значение метрики') plt.hlines(y=threshold, xmin=0, xmax=len(metric_values), linewidth=2, color='r', label='Пороговое значение') if cuts is not None: for cut in cuts: plt.axvline(x=cut, color='k', linestyle=':', linewidth=0.5, label='Смена сцены') handles, labels = plt.gca().get_legend_handles_labels() by_label = dict(zip(labels, handles)) plt.legend(by_label.values(), by_label.keys()) plt.show() visualize_metric_values(metric_values_base, 2000, cuts_base) # **Как видим, очень плохо подобран порог, да и сам признак, похоже, сильно зашумлён. Попробуйте что-то своё!** # ## Ваше решение ## # * В качестве решения вы должны прикрепить функцию ниже. Все пороги должны быть указаны внутри функции. # Т.е. должен быть возможен вызов: # `scene_changes, vis, metric_values = scene_change_detector(frames)` # * Строку (# GRADED FUNCTION: [function name]) менять **нельзя**. Она будет использоваться при проверке вашего решения. # * Ячейка должна содержать только **одну** функцию. # + # GRADED FUNCTION: scene_change_detector def scene_change_detector(frames, with_vis=False, Tb = 0.5, Tm = 0.05, a0 = 0.01, a1 = 0.1, b0 = 1, b1 = 0.1, b2 = 0.108): """ Baseline SCD Arguments: frames -- iterator on video frames threshold -- parameter of your algorithm (optional) with_vis -- saving neighboring frames at a scene change (optional) Returns: scene_changes -- list of scene changes (idx of frames) vis -- list of neighboring frames at a scene change (for visualization) metric_values -- list of metric values (for visualization) """ numof_blocks = 10 k = 6 buf_next_frames = [[],[]] buf_prev_frames = [[],[]] import math def std_metric(block, prev_block): return math.fabs(np.std(block) - np.std(prev_block))/255 def mean_brightness_metric(block, prev_block): return math.fabs(np.mean(block) - np.mean(prev_block))/255 def lapl(frame): src = cv2.GaussianBlur(frame, (3, 3), 0) src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) dst = cv2.Laplacian(src_gray, cv2.CV_16S, ksize=5) abs_dst = cv2.convertScaleAbs(dst) return abs_dst def delta_fun(cur_frame, prev_frame): delta_horiz = 0 delta_vert = 0 block_width = cur_frame.shape[1]//numof_blocks block_height = cur_frame.shape[0]//numof_blocks for i in range(numof_blocks): for j in range(numof_blocks): block_ij = cur_frame[i*block_height:(i+1)*block_height,j*block_width:(j+1)*block_width] prev_block_ij = prev_frame[i*block_height:(i+1)*block_height,j*block_width:(j+1)*block_width] if j < numof_blocks - 1: block_ij1 = cur_frame[i*block_height:(i+1)*block_height,(j+1)*block_width:(j+2)*block_width] prev_block_ij1 = prev_frame[i*block_height:(i+1)*block_height,(j+1)*block_width:(j+2)*block_width] delta_horiz += math.fabs((np.mean(block_ij) - np.mean(block_ij1)) - (np.mean(prev_block_ij) - np.mean(prev_block_ij1))) if i < numof_blocks - 1: block_i1j = cur_frame[(i+1)*block_height:(i+2)*block_height,j*block_width:(j+1)*block_width] prev_block_i1j = prev_frame[(i+1)*block_height:(i+2)*block_height,j*block_width:(j+1)*block_width] delta_vert += math.fabs((np.mean(block_ij) - np.mean(block_i1j)) - (np.mean(prev_block_ij) - np.mean(prev_block_i1j))) delta_horiz /= 510*numof_blocks*(numof_blocks-1) delta_vert /= 510*numof_blocks*(numof_blocks-1) return (delta_horiz+delta_vert)/2 def E(cur_frame): return np.sum(cur_frame)/(255*cur_frame.shape[0]*cur_frame.shape[1]) scene_changes = [] vis = [] metric_values = [[],[],[],[],[],[]] prev_frame = None last_pos = None for idx, frame in tqdm(enumerate(frames), leave=False): if idx < k: if prev_frame is None: buf_next_frames[0].append(frame) buf_next_frames[1].append(1) else: buf_next_frames[0].append(frame) buf_next_frames[1].append(delta_fun(frame, prev_frame)) else: if len(buf_prev_frames[0]) < 1: buf_prev_frames[0].append(buf_next_frames[0].pop(0)) buf_prev_frames[1].append(buf_next_frames[1].pop(0)) buf_next_frames[0].append(frame) buf_next_frames[1].append(delta_fun(frame, prev_frame)) metric_values[0].append(0) metric_values[1].append(0) metric_values[2].append(0) metric_values[3].append(0) metric_values[4].append(0) metric_values[5].append(0) else: cur_frame = buf_next_frames[0].pop(0) cur_delta = buf_next_frames[1].pop(0) buf_next_frames[0].append(frame) buf_next_frames[1].append(delta_fun(frame, prev_frame)) L = 0 D = 0 count = 1 for i in range(len(buf_prev_frames[0])): L += E(buf_prev_frames[0][i]) D += buf_prev_frames[1][i] count += 1 L += E(cur_frame) D += cur_delta for i in range(len(buf_next_frames[0])): L += E(buf_next_frames[0][i]) D += buf_next_frames[1][i] count += 1 L /= count D /= count g = 0 if L !=0 and D != 0: g = b0 + b1*math.log(L) + b2*math.log(D) Tb = max(0.2, min(0.8, g)) else: Tb = 1 Tm = max(0.03, min(0.07, a0+a1*Tb)) numof_sig_blocks = 0 numof_sig_blocks2 = 0 metric_values[1].append(Tb) metric_values[2].append(Tm) metric_values[3].append(g) metric_values[4].append(D) for i in range(numof_blocks): for j in range(numof_blocks): block_width = frame.shape[1]//numof_blocks block_height = frame.shape[0]//numof_blocks block = cur_frame[i*block_height:(i+1)*block_height,j*block_width:(j+1)*block_width] prev_block = buf_prev_frames[0][-1][i*block_height:(i+1)*block_height,j*block_width:(j+1)*block_width] lapl_block = lapl(block) lapl_prev_block = lapl(prev_block) metric_value1 = std_metric(block, prev_block) metric_value2 = mean_brightness_metric(block, prev_block) metric_value3 = std_metric(lapl_block,lapl_prev_block) metric_value4 = mean_brightness_metric(lapl_block,lapl_prev_block) if metric_value1 > Tm or metric_value2 > Tm: numof_sig_blocks += 1 if metric_value3 > Tm or metric_value4 > Tm: numof_sig_blocks2 += 1 if numof_sig_blocks/(numof_blocks**2) > Tb and numof_sig_blocks2/(numof_blocks**2) > Tb: scene_changes.append(idx-k) if with_vis: # Кадры в памяти занимают много места, поэтому сохраним лишь первые 100 срабатываний if len(vis) < 100: vis.append([buf_prev_frames[0][-1], cur_frame]) metric_values[0].append(numof_sig_blocks/(numof_blocks**2)) metric_values[5].append(numof_sig_blocks2/(numof_blocks**2)) buf_prev_frames[0].append(cur_frame) buf_prev_frames[1].append(cur_delta) if len(buf_prev_frames[0]) > k: buf_prev_frames[0].pop(0) buf_prev_frames[1].pop(0) prev_frame = frame last_pos = idx for i in range(len(buf_next_frames[0])): cur_frame = buf_next_frames[0].pop(0) cur_delta = buf_next_frames[1].pop(0) L = 0 D = 0 count = 1 for i in range(len(buf_prev_frames[0])): L += E(buf_prev_frames[0][i]) D += buf_prev_frames[1][i] count += 1 L += E(cur_frame) D += cur_delta for i in range(len(buf_next_frames[0])): L += E(buf_next_frames[0][i]) D += buf_next_frames[1][i] count += 1 L /= count D /= count g = 0 if L !=0 and D != 0: g = b0 + b1*math.log(L) + b2*math.log(D) Tb = max(0.2, min(0.8, g)) else: Tb = 1 Tm = max(0.03, min(0.07, a0+a1*Tb)) metric_values[1].append(Tb) metric_values[2].append(Tm) metric_values[3].append(L) metric_values[4].append(D) numof_sig_blocks = 0 numof_sig_blocks2 = 0 for i in range(numof_blocks): for j in range(numof_blocks): block_width = frame.shape[1]//numof_blocks block_height = frame.shape[0]//numof_blocks block = cur_frame[i*block_height:(i+1)*block_height,j*block_width:(j+1)*block_width] prev_block = buf_prev_frames[0][-1][i*block_height:(i+1)*block_height,j*block_width:(j+1)*block_width] lapl_block = lapl(block) lapl_prev_block = lapl(prev_block) metric_value3 = std_metric(lapl_block,lapl_prev_block) metric_value4 = mean_brightness_metric(lapl_block,lapl_prev_block) if metric_value1 > Tm or metric_value2 > Tm: numof_sig_blocks += 1 if metric_value3 > Tm or metric_value4 > Tm: numof_sig_blocks2 += 1 if numof_sig_blocks/(numof_blocks**2) > Tb and numof_sig_blocks2/(numof_blocks**2) > Tb: scene_changes.append(last_pos-k+i) if with_vis: # Кадры в памяти занимают много места, поэтому сохраним лишь первые 100 срабатываний if len(vis) < 100: vis.append([buf_prev_frames[0][-1], cur_frame]) metric_values[0].append(numof_sig_blocks/(numof_blocks**2)) metric_values[5].append(numof_sig_blocks2/(numof_blocks**2)) buf_prev_frames[0].append(cur_frame) buf_prev_frames[1].append(cur_delta) if len(buf_prev_frames[0]) > k: buf_prev_frames[0].pop(0) buf_prev_frames[1].pop(0) return scene_changes, vis, metric_values # - frames = read_video(os.path.join('train_dataset', 'video', '17.mp4')) cuts = load_json_from_file(os.path.join('train_dataset', 'gt', '17.json'))['cut'] scene_changes, vis, metric_values = scene_change_detector(frames, with_vis=True) # #### Обратите внимание на скорость работы алгоритма! #### # Если вычислять признаки без циклов по пикселям, а пользоваться методами из numpy, то скорость будет не медленнее 7-8 кадров в секунду. # Например, вы можете использовать функцию `np.histogram` или `cv2.calcHist` для подсчёта гистограмм, а `cv2.Sobel` для применения оператора Собеля к кадру. #Посмотрим на найденные смены сцен idx = 1 visualize_metric_error(vis[idx][0], vis[idx][1], metric_values[scene_changes[idx]]) #Посмотрим на значения метрики visualize_metric_values(metric_values, 2000, cuts) # ## Подсчёт метрики F1-Score## # Чтобы оценивать алгоритм и научиться сравнивать несколько алгоритмов, нужна метрика качества. В данной задаче для оценки качества алгоритма используется F1-Score. Преимущества использования этой метрики к текущей постановке задачи смены сцен были рассказаны на лекции, напишем только формулы: # $$precision = \frac{tp}{tp+fp}$$ # $$recall = \frac{tp}{tp+fn}$$ # $$F = 2 * \frac{precision * recall}{precision+recall}$$ # На всякий случай опишем как именно происходит подсчёт метрики для видео # # 1) Сначала из выборки удаляются все кадры, которые по разметке либо являются сложными переходами между сценами, либо помечены как сложные для анализа и разметки (например, титры/обилие компьютерной графики и т.п) # # # 2) Затем для оставшихся кадров уже подсчитывается F1_Score #Эти пять клеток кода править не нужно def calculate_matrix(true_scd, predicted_scd, scene_len, not_to_use_frames=set()): tp, fp, tn, fn = 0, 0, 0, 0 scene_len = scene_len for scd in predicted_scd: if scd in true_scd: tp += 1 elif scd not in not_to_use_frames: fp += 1 for scd in true_scd: if scd not in predicted_scd: fn += 1 tn = scene_len - len(not_to_use_frames) - tp - fp - fn return tp, fp, tn, fn def calculate_precision(tp, fp, tn, fn): return tp / max(1, (tp + fp)) def calculate_recall(tp, fp, tn, fn): return tp / max(1, (tp + fn)) def f1_score(true_scd, predicted_scd, scene_len, not_to_use_frames=set()): tp, fp, tn, fn = calculate_matrix(true_scd, predicted_scd, scene_len, not_to_use_frames) precision_score = calculate_precision(tp, fp, tn, fn) recall_score = calculate_recall(tp, fp, tn, fn) if precision_score + recall_score == 0: return 0 else: return 2 * precision_score * recall_score / (precision_score + recall_score) def f1_score_matrix(tp, fp, tn, fn): precision_score = calculate_precision(tp, fp, tn, fn) recall_score = calculate_recall(tp, fp, tn, fn) if precision_score + recall_score == 0: return 0 else: return 2 * precision_score * recall_score / (precision_score + recall_score) # ## Тестируем разработанный метод сразу на нескольких видео ## # Проверим, насколько хорошо работает разработанный метод. *Учтите, что итоговое тестирование будет производиться на аналогичном, но недоступном вам наборе видео, но все параметры алгоритмов должны быть указаны вами (иными словами - подобраны на тренировочном наборе).* def run_scene_change_detector_all_video(scene_change_detector, dataset_path): video_dataset = load_json_from_file(os.path.join(dataset_path, 'info.json')) param_log = { '_mean_f1_score': [] } for video_info in tqdm(video_dataset, leave=False): # Загружаем видео, его длину и смены сцен frames = read_video(os.path.join(dataset_path, video_info['source'])) video_len = video_info['len'] true_scene_changes = load_json_from_file(os.path.join(dataset_path, video_info['scene_change'])) # Составляем список сцен, которые не будут тестироваться not_use_frames = set() for type_scene_change in ['trash', 'fade', 'dissolve']: for bad_scene_range in true_scene_changes.get(type_scene_change, []): not_use_frames.update(list(range(bad_scene_range[0], bad_scene_range[1] + 1))) predicted_scene_changes, _, _ = scene_change_detector(frames) param_log['f1_score_{}'.format(video_info['source'])] = f1_score( true_scene_changes['cut'], predicted_scene_changes, video_len, not_use_frames ) print(f1_score(true_scene_changes['cut'], predicted_scene_changes, video_len, not_use_frames)) video_tp, video_fp, video_tn, video_fn = calculate_matrix( true_scene_changes['cut'], predicted_scene_changes, video_len, not_use_frames ) param_log['tp_{}'.format(video_info['source'])] = video_tp param_log['fp_{}'.format(video_info['source'])] = video_fp param_log['tn_{}'.format(video_info['source'])] = video_tn param_log['fn_{}'.format(video_info['source'])] = video_fn param_log['_mean_f1_score'].append(param_log['f1_score_{}'.format(video_info['source'])]) param_log['_mean_f1_score'] = np.mean(param_log['_mean_f1_score']) return param_log video_dataset = 'train_dataset' # Данная функция поможет вам посмотреть, на каких видео и на сколько ошибается ваш метод. Прогнать метод на отдельном видео и детально посмотреть кадры вы могли выше. # # Кроме того, с помощью этой функции вы можете подобрать оптимальные параметры для метода. #Протестируем базовый метод run_scene_change_detector_all_video(baseline_scene_change_detector, video_dataset) #Протестируем разработанный вами метод run_scene_change_detector_all_video(scene_change_detector, video_dataset) # Когда вы смотрите на результат, обращайте внимание на **_mean_f1_score** # Именно по этой метрике будет производится финальное оценивание. # ## Бонусное задание: распознавание смен сцен типа "наложения" # На практике кроме катов часто встречаются смены сцен, где происходит "наложение" одной сцены на другую: # <img src="Dissolve.jpg"> # ## Ваше решение ## # * В качестве решения вы должны прикрепить функцию ниже. Все пороги должны быть указаны внутри функции. # Т.е. должен быть возможен вызов: # `scene_changes, vis, metric_values = scene_change_detector_dissolve(frames)` # * Строку (# GRADED FUNCTION: [function name]) менять **нельзя**. Она будет использоваться при проверке вашего решения. # * Ячейка должна содержать только **одну** функцию. # + # GRADED FUNCTION: scene_change_detector_dissolve def scene_change_detector_dissolve(frames, threshold=None, with_vis=False): scene_changes = [] vis = [] metric_values = [] ### START CODE HERE ### # Ваши внешние переменные ### END CODE HERE ### for idx, frame in tqdm(enumerate(frames), leave=False): # frame - это кадр # idx - это номер кадра ### START CODE HERE ### # Основная часть вашего алгоритма ### END CODE HERE ### return scene_changes, vis, metric_values # - # В качестве метрики качества используется видоизменённый f1-score: # # Так как смена сцен не происходит за один кадр, попаданием считается попадание ответа смены сцен в отрезок, где происходит наложение. # **Обратите внимание**, что несколько раз указывать одну смену сцен не нужно. # # Попадание вне отрезков смен сцен путём наложения считается как false positive, не попадание в указанный отрезок - как false negative #Эти три клетки кода править не нужно def calculate_matrix_dissolve(true_scd, predicted_scd, scene_len): tp, fp, tn, fn = 0, 0, 0, 0 scene_len = scene_len checked_dissolve_segments = set() total_scene_dissolve_len = np.sum([dissolve_segment[1] - dissolve_segment[0] + 1 for dissolve_segment in true_scd]) for scd in predicted_scd: for dissolve_segment in true_scd: if scd in range(dissolve_segment[0], dissolve_segment[1] + 1): tp += 1 checked_dissolve_segments.add(tuple(dissolve_segment)) break else: fp += 1 fn = len(true_scd) - len(checked_dissolve_segments) tn = scene_len - total_scene_dissolve_len + len(true_scd) - tp - fp - fn return tp, fp, tn, fn def f1_score_dissolve(true_scd, predicted_scd, scene_len): tp, fp, tn, fn = calculate_matrix_dissolve(true_scd, predicted_scd, scene_len) precision_score = calculate_precision(tp, fp, tn, fn) recall_score = calculate_recall(tp, fp, tn, fn) if precision_score + recall_score == 0: return 0 else: return 2 * precision_score * recall_score / (precision_score + recall_score) def run_scene_change_detector_all_video_dissolve(scene_change_detector, dataset_path): video_dataset = load_json_from_file(os.path.join(dataset_path, 'info.json')) param_log = { '_mean_f1_score': [] } for video_info in tqdm(video_dataset, leave=False): frames = read_video(os.path.join(dataset_path, video_info['source'])) video_len = video_info['len'] true_scene_changes = load_json_from_file(os.path.join(dataset_path, video_info['scene_change'])) predicted_scene_changes, _, _ = scene_change_detector(frames) param_log['f1_score_{}'.format(video_info['source'])] = f1_score_dissolve( true_scene_changes.get('dissolve', []), predicted_scene_changes, video_len ) video_tp, video_fp, video_tn, video_fn = calculate_matrix_dissolve( true_scene_changes.get('dissolve', []), predicted_scene_changes, video_len ) param_log['tp_{}'.format(video_info['source'])] = video_tp param_log['fp_{}'.format(video_info['source'])] = video_fp param_log['tn_{}'.format(video_info['source'])] = video_tn param_log['fn_{}'.format(video_info['source'])] = video_fn param_log['_mean_f1_score'].append(param_log['f1_score_{}'.format(video_info['source'])]) param_log['_mean_f1_score'] = np.mean(param_log['_mean_f1_score']) return param_log video_dataset_path = 'train_dataset' #Протестируем разработанный вами метод run_scene_change_detector_all_video_dissolve(scene_change_detector_dissolve, video_dataset_path) # ## Немного об оценивании задания ## # Оценивание задания будет производиться по следующей схеме: # # Пусть на скрытой выборке по F-метрике вы получили X, лучшее решение получило Y. # # 1. Базовая часть оценивется как $$20 * \left(\frac{\max(0, X_{base} - 0.5)}{Y_{base} - 0.5}\right)^2 + Bonus_{base}$$ Бонусные баллы $Bonus$ можно получить за оригинальные идеи в задаче или в её реализации # 2. Дополнительное задание оценивается как $$5 * \frac{\max(0, X_{add} - 0.1)}{Y_{add} - 0.1} + Bonus_{add}$$Процесс получения бонусных баллов аналогичен получению бонусных баллов в базовой части # ## Ваши ощущения ## # *До дедлайна пару часов и вы никак не можете улучшить текущее решение? Или наоборот, вы всё сделали очень быстро? Опишите кратко ваши ощущения от задания - сколько времени вы потратили на задание, сколько вы потратили на изучение питона и установку необходимых библиотек, как быстро вы придумывали новые идеи и как они давали прирост по метрике и в целом насколько это задание вам понравилось и что хотели бы изменить/добавить.*
35,015
/notebooks/Fifo.ipynb
4234d6647b8fc80a6eefc0d7659bf11c986cb8dc
[]
no_license
leonardt/silica
https://github.com/leonardt/silica
14
1
null
2019-10-10T00:13:02
2019-10-07T20:55:54
Python
Jupyter Notebook
false
false
.py
45,094
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sunxueliang96/WF-FrameWork/blob/master/transplant.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="D3WCsIaUTuNV" colab_type="code" outputId="e1092327-100e-40e7-e9ed-106eaf05f317" colab={"base_uri": "https://localhost:8080/", "height": 121} #Mount Google Drive as folder from google.colab import drive drive.mount('/content/drive', force_remount=True) # + id="rF9tyC-TTxmk" colab_type="code" outputId="8379bb07-f6ba-4572-8a7b-8de1e7e799a1" colab={"base_uri": "https://localhost:8080/", "height": 34} # cd /content/drive/'My Drive'/datasets/no_paded/open_world/walkiebatch # + id="thC1b8glT6p9" colab_type="code" outputId="0526defc-2d5c-46c3-b425-a13974d7d60b" colab={"base_uri": "https://localhost:8080/", "height": 118} # ls # + id="NOpTb4DkT9db" colab_type="code" outputId="64034d77-4572-4c6a-fd87-8b39b2175246" colab={"base_uri": "https://localhost:8080/", "height": 67} import numpy as np import pickle print('loading data...') with open('X_walkiebatch.pkl','rb') as handle: X = np.array(pickle.load(handle)) with open('y_walkiebatch.pkl','rb') as handle: y = np.array(pickle.load(handle)) print('the shape of X',X.shape) print('the shape of y',y.shape) # + id="pnVKARJIuhwD" colab_type="code" colab={} # + id="iBm0gBfZZ_UG" colab_type="code" colab={} import math import random def creat_data(X,y,PERCENT_TRANSPLANT,NB_TIMES): #Magnificate dataset by adding noising randomly X = list(X) y = list(y) PERCENT_TRANSPLANT = PERCENT_TRANSPLANT #PERCENT of Noise at each time NB_TIMES = NB_TIMES #TIMES of Magnification X_sum = X.copy() y_sum = y.copy() print("{} samples has beed add to X_train, {}% of noise for each sample".format(math.ceil(len(X)*NB_TIMES),PERCENT_NOISE)) for i in range(math.ceil(len(X)*NB_TIMES)): while(True): # target pos p (sensitive website), pos q (non-sensitive website) p = random.choice(range(len(X))) q = random.choice(range(len(X))) if(y[p]!=-1 & y[q]==-1): # open_world # if(y[p]!=-1): # close_world break else: #print('-1 detected') pass X_new = X[p].copy() target = X[q].copy() y_new = y[p] length_X = math.ceil(PERCENT_TRANSPLANT*len(X_new)/100) length_target = math.ceil(PERCENT_TRANSPLANT*len(target)/100) start_X = random.choice(range(len(X_new)-length_X)) start_target = random.choice(range(len(X_new)-length_target)) temp = target[start_target:start_target+length_target] X_new[start_X:start_X+length_X] = temp X_sum.append(X_new) y_sum.append(y_new) return np.array(X_sum),np.array(y_sum) #return two array of X, y ''' def locally_change(X,y,PERCENT_CHANGE,METHOD): X = list(X) y = list(y) PERCENT_NOISE = PERCENT_NOISE #PERCENT of Changing NB_TIMES = METHOD #Method of Changing X_sum = X.copy() y_sum = y.copy() for i in range(math.ceil(len(X)*NB_TIMES)): p = random.choice(range(len(X))) #print('###########################################',p) X_new = X[p].copy() y_new = y[p] for n in range(math.ceil(NB_TIMES*len(X_new)/100)):#dealing X_new noise = random.choice([1,-1]) #noise could only be 1 or -1 pos = random.choice(range(len(X_new))) #position belong to (0,len(sequence)) X_new.insert(pos,noise) #print('insert {} at position {} in sequence {} now len of sequence is {}'.format(noise,pos,p,len(X_new))) #for test X_sum.append(X_new) y_sum.append(y_new) return np.array(X_sum),np.array(y_sum) #return two array of X, y #return x,y def locally_delete(): pass def rnn_padding(): #a seq to seq model pass def bayes_padding(): pass def mult_scale_windowing(): pass def GAN_padding(): pass def data_mixing(): pass ''' # + id="RZpMmHj3Uefl" colab_type="code" outputId="dfdc7075-471f-481d-cced-9a37159386dc" colab={"base_uri": "https://localhost:8080/", "height": 146} from sklearn.model_selection import train_test_split from keras.utils import np_utils from keras.preprocessing import sequence from collections import Counter #CLASSES_KNN = 101######################################################################################################################################################################################################## #CLASSES_WAKIE = 100######################################################################################################################################################################################################## MAXLEN_KNN = 2000 MAXLEN_WAKIE =5000 maxlen = 5000 NB_CLASSES = 0 def choose(): global maxlen,NB_CLASSES print('if padding, the max of length of seq is {}'.format(maxlen)) NB_CLASSES = len(Counter(y).keys()) print('number of classes is {}'.format(NB_CLASSES)) choose()################################################################################################################################################################################################################ #print('spliting data...') #X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) #X_train,y_train = creat_data(X_train,y_train,PERCENT_NOISE=0.5,NB_TIMES=1) #print(len(X_train), 'train sequences') #print(len(y_train), 'test sequences') print('Average sequence length: {}'.format(np.mean(list(map(len, X)), dtype=int))) # + id="E_gm7tqd4yEV" colab_type="code" colab={} #FCN from keras import Sequential from keras.layers.core import Activation, Flatten, Dense, Dropout from keras.optimizers import Adamax def DNN(input_shape, classes): model = Sequential() model.add(Dense(128, input_shape=(input_shape,))) model.add(Activation('relu')) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dense(1024)) model.add(Activation('relu')) model.add(Dense(512)) model.add(Dropout(0.5)) model.add(Activation('relu')) model.add(Dense(classes)) model.add(Activation('softmax')) return model def run_DNN(X_train,y_train,X_test,y_test): print('Pad sequences to ',maxlen) x_train = X_train[:] x_test = X_test[:] x_train = sequence.pad_sequences(x_train, maxlen=maxlen,padding='post',truncating='post') x_test = sequence.pad_sequences(x_test, maxlen=maxlen,padding='post',truncating='post') y_train = np_utils.to_categorical(y_train, NB_CLASSES) y_test = np_utils.to_categorical(y_test, NB_CLASSES) print('the shape of x_train',x_train.shape) print('the shape of y_train',y_train.shape) print('the shape of x_test',x_test.shape) print('the shape of y_test',y_test.shape) model_DNN = DNN(maxlen,NB_CLASSES) #model_DNN.summary() model_DNN.compile(loss='categorical_crossentropy',optimizer=OPTIMIZER,metrics=['accuracy', precision, recall, fmeasure]) history = model_DNN.fit(x_train,y_train,batch_size=BATCH_SIZE,epochs=NB_EPOCH,validation_data=(x_test,y_test),verbose=0) score = model_DNN.evaluate(x_test,y_test,verbose=VERBOSE) #print(history.history) #print(score) return(history.history) # + id="M9hQ5LZK6sFU" colab_type="code" colab={} #CNN from keras import Input,Model,Sequential from keras.layers import Embedding,GlobalAveragePooling1D,Dense,Dropout from keras.layers import Conv1D, MaxPooling1D, BatchNormalization from keras.layers.core import Activation, Flatten, Dense, Dropout from keras.layers.advanced_activations import ELU from keras.initializers import glorot_uniform from keras.optimizers import Adamax def CNN(input_shape, classes): model = Sequential() #Block1 filter_num = ['None',32,64,128,256] kernel_size = ['None',8,8,8,8] conv_stride_size = ['None',1,1,1,1] pool_stride_size = ['None',4,4,4,4] pool_size = ['None',8,8,8,8] model.add(Conv1D(filters=filter_num[1], kernel_size=kernel_size[1], input_shape=(input_shape,1), strides=conv_stride_size[1], padding='same', name='block1_conv1')) model.add(BatchNormalization(axis=-1)) model.add(ELU(alpha=1.0, name='block1_adv_act1')) model.add(Conv1D(filters=filter_num[1], kernel_size=kernel_size[1], strides=conv_stride_size[1], padding='same', name='block1_conv2')) model.add(BatchNormalization(axis=-1)) model.add(ELU(alpha=1.0, name='block1_adv_act2')) model.add(MaxPooling1D(pool_size=pool_size[1], strides=pool_stride_size[1], padding='same', name='block1_pool')) model.add(Dropout(0.1, name='block1_dropout')) model.add(Conv1D(filters=filter_num[2], kernel_size=kernel_size[2], strides=conv_stride_size[2], padding='same', name='block2_conv1')) model.add(BatchNormalization()) model.add(Activation('relu', name='block2_act1')) model.add(Conv1D(filters=filter_num[2], kernel_size=kernel_size[2], strides=conv_stride_size[2], padding='same', name='block2_conv2')) model.add(BatchNormalization()) model.add(Activation('relu', name='block2_act2')) model.add(MaxPooling1D(pool_size=pool_size[2], strides=pool_stride_size[3], padding='same', name='block2_pool')) model.add(Dropout(0.1, name='block2_dropout')) model.add(Conv1D(filters=filter_num[3], kernel_size=kernel_size[3], strides=conv_stride_size[3], padding='same', name='block3_conv1')) model.add(BatchNormalization()) model.add(Activation('relu', name='block3_act1')) model.add(Conv1D(filters=filter_num[3], kernel_size=kernel_size[3], strides=conv_stride_size[3], padding='same', name='block3_conv2')) model.add(BatchNormalization()) model.add(Activation('relu', name='block3_act2')) model.add(MaxPooling1D(pool_size=pool_size[3], strides=pool_stride_size[3], padding='same', name='block3_pool')) model.add(Dropout(0.1, name='block3_dropout')) model.add(Conv1D(filters=filter_num[4], kernel_size=kernel_size[4], strides=conv_stride_size[4], padding='same', name='block4_conv1')) model.add(BatchNormalization()) model.add(Activation('relu', name='block4_act1')) model.add(Conv1D(filters=filter_num[4], kernel_size=kernel_size[4], strides=conv_stride_size[4], padding='same', name='block4_conv2')) model.add(BatchNormalization()) model.add(Activation('relu', name='block4_act2')) model.add(MaxPooling1D(pool_size=pool_size[4], strides=pool_stride_size[4], padding='same', name='block4_pool')) model.add(Dropout(0.1, name='block4_dropout')) model.add(Flatten(name='flatten')) model.add(Dense(512, kernel_initializer=glorot_uniform(seed=0), name='fc1')) model.add(BatchNormalization()) model.add(Activation('relu', name='fc1_act')) model.add(Dropout(0.7, name='fc1_dropout')) model.add(Dense(512, kernel_initializer=glorot_uniform(seed=0), name='fc2')) model.add(BatchNormalization()) model.add(Activation('relu', name='fc2_act')) model.add(Dropout(0.5, name='fc2_dropout')) model.add(Dense(classes, kernel_initializer=glorot_uniform(seed=0), name='fc3')) model.add(Activation('softmax', name="softmax")) return model def run_CNN(X_train,y_train,X_test,y_test): print('Pad sequences to ',maxlen) x_train = X_train[:] x_test = X_test[:] x_train = sequence.pad_sequences(x_train, maxlen=maxlen,padding='post',truncating='post') x_test = sequence.pad_sequences(x_test, maxlen=maxlen,padding='post',truncating='post') x_train = x_train[:,:,np.newaxis] x_test = x_test[:,:,np.newaxis] y_train = np_utils.to_categorical(y_train, NB_CLASSES) y_test = np_utils.to_categorical(y_test, NB_CLASSES) print('the shape of x_train',x_train.shape) print('the shape of y_train',y_train.shape) print('the shape of x_test',x_test.shape) print('the shape of y_test',y_test.shape) model_CNN = CNN(maxlen,NB_CLASSES) #model_CNN.summary() model_CNN.compile(loss='categorical_crossentropy',optimizer=OPTIMIZER,metrics=['accuracy', precision, recall, fmeasure]) history = model_CNN.fit(x_train,y_train,batch_size=BATCH_SIZE,epochs=NB_EPOCH,validation_data=(x_test,y_test),verbose=0) score = model_CNN.evaluate(x_test,y_test,verbose=VERBOSE) #print(score) return(history.history) # + id="DNxUC1-miWSA" colab_type="code" colab={} #LSTM from keras import Input,Model,Sequential from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding, TimeDistributed from keras.layers.convolutional import Conv1D,MaxPooling1D from keras.layers.core import Activation, Flatten, Dense from keras.layers.advanced_activations import ELU from keras.initializers import glorot_uniform from keras.optimizers import Adamax def my_LSTM(input_shape, classes): model = Sequential() model.add(LSTM(256,activation='relu',return_sequences=True,input_shape=(input_shape,1))) model.add(Dropout(0.5)) model.add(LSTM(256,return_sequences=True)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(LSTM(512,return_sequences=False)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(100, activation='relu')) model.add(Dense(classes, activation='softmax')) return model def run_LSTM(X_train,y_train,X_test,y_test): print('Pad sequences to ',maxlen) x_train = X_train[:] x_test = X_test[:] x_train = sequence.pad_sequences(x_train, maxlen=maxlen,padding='post',truncating='post') x_test = sequence.pad_sequences(x_test, maxlen=maxlen,padding='post',truncating='post') x_train = x_train[:,:,np.newaxis] x_test = x_test[:,:,np.newaxis] y_train = np_utils.to_categorical(y_train, NB_CLASSES) y_test = np_utils.to_categorical(y_test, NB_CLASSES) print('the shape of x_train',x_train.shape) print('the shape of y_train',y_train.shape) print('the shape of x_test',x_test.shape) print('the shape of y_test',y_test.shape) model_LSTM = my_LSTM(maxlen,NB_CLASSES) #model_LSTM.summary() model_LSTM.compile(loss='categorical_crossentropy',optimizer=OPTIMIZER,metrics=['accuracy', precision, recall, fmeasure]) history = model_LSTM.fit(x_train,y_train,batch_size=BATCH_SIZE,epochs=NB_EPOCH,validation_data=(x_test,y_test),verbose=0) score = model_LSTM.evaluate(x_test,y_test,verbose=VERBOSE) return(history.history) # + id="1-J-rv6Ri0r2" colab_type="code" colab={} #CNN+LSTM from keras import Input,Model,Sequential from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding, TimeDistributed,BatchNormalization from keras.layers.convolutional import Conv1D,MaxPooling1D from keras.layers.core import Activation, Flatten, Dense from keras.layers.advanced_activations import ELU from keras.initializers import glorot_uniform from keras.optimizers import Adamax def CNN_LSTM(n_features,NB_SPLIT,classes): model = Sequential() ''' model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu'),input_shape=(None,n_features//NB_SPLIT,1))) model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu'))) model.add(TimeDistributed(Dropout(0.5))) model.add(TimeDistributed(MaxPooling1D(pool_size=2))) model.add(TimeDistributed(Flatten())) model.add(LSTM(256)) model.add(Dropout(0.5)) model.add(Dense(256, activation='relu')) model.add(Dense(classes, activation='softmax')) ''' #Block1 filter_num = ['None',32,64,128,256] kernel_size = ['None',8,8,8,8] conv_stride_size = ['None',1,1,1,1] pool_stride_size = ['None',4,4,4,4] pool_size = ['None',8,8,8,8] model.add(TimeDistributed(Conv1D(filters=filter_num[1], kernel_size=kernel_size[1], strides=conv_stride_size[1], padding='same', name='block1_conv1'),input_shape=(None,n_features//NB_SPLIT,1))) model.add(TimeDistributed(BatchNormalization(axis=-1))) model.add(TimeDistributed(ELU(alpha=1.0, name='block1_adv_act1'))) model.add(TimeDistributed(Conv1D(filters=filter_num[1], kernel_size=kernel_size[1], strides=conv_stride_size[1], padding='same', name='block1_conv2'))) model.add(TimeDistributed(BatchNormalization(axis=-1))) model.add(TimeDistributed(ELU(alpha=1.0, name='block1_adv_act2'))) model.add(TimeDistributed(MaxPooling1D(pool_size=pool_size[1], strides=pool_stride_size[1], padding='same', name='block1_pool'))) model.add(TimeDistributed(Dropout(0.1, name='block1_dropout'))) model.add(TimeDistributed(Conv1D(filters=filter_num[2], kernel_size=kernel_size[2], strides=conv_stride_size[2], padding='same', name='block2_conv1'))) model.add(TimeDistributed(BatchNormalization())) model.add(TimeDistributed(Activation('relu', name='block2_act1'))) model.add(TimeDistributed(Conv1D(filters=filter_num[2], kernel_size=kernel_size[2], strides=conv_stride_size[2], padding='same', name='block2_conv2'))) model.add(TimeDistributed(BatchNormalization())) model.add(TimeDistributed(Activation('relu', name='block2_act2'))) model.add(TimeDistributed(MaxPooling1D(pool_size=pool_size[2], strides=pool_stride_size[3], padding='same', name='block2_pool'))) model.add(TimeDistributed(Dropout(0.1, name='block2_dropout'))) model.add(TimeDistributed(Conv1D(filters=filter_num[3], kernel_size=kernel_size[3], strides=conv_stride_size[3], padding='same', name='block3_conv1'))) model.add(TimeDistributed(BatchNormalization())) model.add(TimeDistributed(Activation('relu', name='block3_act1'))) model.add(TimeDistributed(Conv1D(filters=filter_num[3], kernel_size=kernel_size[3], strides=conv_stride_size[3], padding='same', name='block3_conv2'))) model.add(TimeDistributed(BatchNormalization())) model.add(TimeDistributed(Activation('relu', name='block3_act2'))) model.add(TimeDistributed(MaxPooling1D(pool_size=pool_size[3], strides=pool_stride_size[3], padding='same', name='block3_pool'))) model.add(TimeDistributed(Dropout(0.1, name='block3_dropout'))) model.add(TimeDistributed(Conv1D(filters=filter_num[4], kernel_size=kernel_size[4], strides=conv_stride_size[4], padding='same', name='block4_conv1'))) model.add(TimeDistributed(BatchNormalization())) model.add(TimeDistributed(Activation('relu', name='block4_act1'))) model.add(TimeDistributed(Conv1D(filters=filter_num[4], kernel_size=kernel_size[4], strides=conv_stride_size[4], padding='same', name='block4_conv2'))) model.add(TimeDistributed(BatchNormalization())) model.add(TimeDistributed(Activation('relu', name='block4_act2'))) model.add(TimeDistributed(MaxPooling1D(pool_size=pool_size[4], strides=pool_stride_size[4], padding='same', name='block4_pool'))) model.add(TimeDistributed(Dropout(0.1, name='block4_dropout'))) model.add(TimeDistributed(Flatten(name='flatten'))) model.add(LSTM(256,return_sequences=True)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(LSTM(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(256, activation='relu')) model.add(Dense(classes, activation='softmax')) return model def run_CNN_LSTM(X_train,y_train,X_test,y_test): print('Pad sequences to ',maxlen) NB_SPLIT = 10 x_train = X_train[:] x_test = X_test[:] x_train = sequence.pad_sequences(x_train, maxlen=maxlen,padding='post',truncating='post') x_test = sequence.pad_sequences(x_test, maxlen=maxlen,padding='post',truncating='post') x_train = x_train.reshape(x_train.shape[0],NB_SPLIT,maxlen//NB_SPLIT,1)# 把他拆分成NB_SPLIT个子序列,每个子序列分别交给CNN去处理, x_test = x_test.reshape(x_test.shape[0],NB_SPLIT,maxlen//NB_SPLIT,1) y_train = np_utils.to_categorical(y_train, NB_CLASSES) y_test = np_utils.to_categorical(y_test, NB_CLASSES) print('the shape of x_train',x_train.shape) print('the shape of y_train',y_train.shape) print('the shape of x_test',x_test.shape) print('the shape of y_test',y_test.shape) model_CNN_LSTM = CNN_LSTM(maxlen,NB_SPLIT,NB_CLASSES) #model_CNN_LSTM.build() #model_CNN_LSTM.summary() model_CNN_LSTM.compile(loss='categorical_crossentropy',optimizer=OPTIMIZER,metrics=['accuracy', precision, recall, fmeasure]) history = model_CNN_LSTM.fit(x_train,y_train,batch_size=BATCH_SIZE,epochs=NB_EPOCH,validation_data=(x_test,y_test),verbose=0) score = model_CNN_LSTM.evaluate(x_test,y_test,verbose=VERBOSE) return(history.history) # + id="AuVICfeHwymL" colab_type="code" colab={} import keras from keras import backend as K def precision(y_true, y_pred): # Calculates the precision true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision def recall(y_true, y_pred): # Calculates the recall true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def fbeta_score(y_true, y_pred, beta=1): # Calculates the F score, the weighted harmonic mean of precision and recall. if beta < 0: raise ValueError('The lowest choosable beta is zero (only precision).') # If there are no true positives, fix the F score at 0 like sklearn. if K.sum(K.round(K.clip(y_true, 0, 1))) == 0: return 0 p = precision(y_true, y_pred) r = recall(y_true, y_pred) bb = beta ** 2 fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon()) return fbeta_score def fmeasure(y_true, y_pred): # Calculates the f-measure, the harmonic mean of precision and recall. return fbeta_score(y_true, y_pred, beta=1) # + id="oEMNvc6PntbC" colab_type="code" outputId="0635f3d2-c482-4676-c87c-5fc79e028a22" colab={"base_uri": "https://localhost:8080/", "height": 1000} #DNN ############# test for noise adding , function :: creat_data############ BATCH_SIZE = 64 NB_EPOCH = 50 VALIDATION_SPLIT = 0.3 VERBOSE = 1 OPTIMIZER = Adamax() historys_DNN_PERCENT_NOISE = [] historys_DNN_NB_TIMES = [] PERCENT_NOISE = [0.1,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,10,20] #hundred percent % NB_TIMES = [0.1,0.5,1,1.5,2,3,4,5] #new part of sample print('base_line for PERCENT_NOISE#######################################################################################################################################') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) historys_DNN_PERCENT_NOISE.append(run_DNN(X_train,y_train,X_test,y_test)) for i in PERCENT_NOISE: print('spliting data...') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) X_train,y_train = creat_data(X_train,y_train,PERCENT_NOISE=i,NB_TIMES=0.2) historys_DNN_PERCENT_NOISE.append(run_DNN(X_train,y_train,X_test,y_test)) print('base_line for NB_CLASSES###########################################################################################################################################') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) historys_DNN_NB_TIMES.append(run_DNN(X_train,y_train,X_test,y_test)) for i in NB_TIMES: print('spliting data...') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) X_train,y_train = creat_data(X_train,y_train,PERCENT_NOISE=0.2,NB_TIMES=i) historys_DNN_NB_TIMES.append(run_DNN(X_train,y_train,X_test,y_test)) with open('NOISE_ADDED_historys_DNN_PERCENT_NOISE.pkl','wb') as handle: pickle.dump(historys_DNN_PERCENT_NOISE,handle) with open('NOISE_ADDED_historys_DNN_NB_TIMES.pkl','wb') as handle: pickle.dump(historys_DNN_NB_TIMES,handle) # + id="G6Py6svpp1YU" colab_type="code" outputId="5e044d4f-8014-49bc-8757-d179e0150211" colab={"base_uri": "https://localhost:8080/", "height": 490} #CNN ############# test for noise adding , function :: creat_data############ BATCH_SIZE = 64 NB_EPOCH = 50 VALIDATION_SPLIT = 0.3 VERBOSE = 1 OPTIMIZER = Adamax() historys_CNN_PERCENT_NOISE = [] historys_CNN_NB_TIMES = [] PERCENT_NOISE = [0.1,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,10,20] #hundred percent % NB_TIMES = [0.1,0.5,1,1.5,2,3,4,5] #new part of sample print('base_line for PERCENT_NOISE#######################################################################################################################################') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) historys_CNN_PERCENT_NOISE.append(run_CNN(X_train,y_train,X_test,y_test)) for i in PERCENT_NOISE: print('spliting data...') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) X_train,y_train = creat_data(X_train,y_train,PERCENT_NOISE=i,NB_TIMES=0.2) historys_CNN_PERCENT_NOISE.append(run_CNN(X_train,y_train,X_test,y_test)) print('base_line for NB_CLASSES###########################################################################################################################################') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) historys_CNN_NB_TIMES.append(run_CNN(X_train,y_train,X_test,y_test)) for i in NB_TIMES: print('spliting data...') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) X_train,y_train = creat_data(X_train,y_train,PERCENT_NOISE=0.2,NB_TIMES=i) historys_CNN_NB_TIMES.append(run_CNN(X_train,y_train,X_test,y_test)) with open('NOISE_ADDED_historys_CNN_PERCENT_NOISE.pkl','wb') as handle: pickle.dump(historys_CNN_PERCENT_NOISE,handle) with open('NOISE_ADDED_historys_CNN_NB_TIMES.pkl','wb') as handle: pickle.dump(historys_CNN_NB_TIMES,handle) # + id="ah_JCMAUj_Xh" colab_type="code" colab={} #LSTM ############# test for noise adding , function :: creat_data############ BATCH_SIZE = 64 NB_EPOCH = 50 VALIDATION_SPLIT = 0.3 VERBOSE = 1 OPTIMIZER = Adamax() historys_LSTM_PERCENT_NOISE = [] historys_LSTM_NB_TIMES = [] PERCENT_NOISE = [0.1,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,10,20] #hundred percent % NB_TIMES = [0.1,0.5,1,1.5,2,3,4,5] #new part of sample print('base_line for PERCENT_NOISE#######################################################################################################################################') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) historys_LSTM_PERCENT_NOISE.append(run_LSTM(X_train,y_train,X_test,y_test)) for i in PERCENT_NOISE: print('spliting data...') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) X_train,y_train = creat_data(X_train,y_train,PERCENT_NOISE=i,NB_TIMES=0.2) historys_LSTM_PERCENT_NOISE.append(run_LSTM(X_train,y_train,X_test,y_test)) print('base_line for NB_CLASSES###########################################################################################################################################') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) historys_LSTM_NB_TIMES.append(run_LSTM(X_train,y_train,X_test,y_test)) for i in NB_TIMES: print('spliting data...') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) X_train,y_train = creat_data(X_train,y_train,PERCENT_NOISE=0.2,NB_TIMES=i) historys_LSTM_NB_TIMES.append(run_LSTM(X_train,y_train,X_test,y_test)) with open('NOISE_ADDED_historys_LSTM_PERCENT_NOISE.pkl','wb') as handle: pickle.dump(historys_LSTM_PERCENT_NOISE,handle) with open('NOISE_ADDED_historys_LSTM_NB_TIMES.pkl','wb') as handle: pickle.dump(historys_LSTM_NB_TIMES,handle) # + id="SFReqQ7kkdcK" colab_type="code" colab={} #CNN_LSTM ############# test for noise adding , function :: creat_data############ BATCH_SIZE = 64 NB_EPOCH = 50 VALIDATION_SPLIT = 0.3 VERBOSE = 1 OPTIMIZER = Adamax() historys_CNN_LSTM_PERCENT_NOISE = [] historys_CNN_LSTM_NB_TIMES = [] PERCENT_NOISE = [0.1,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,10,20] #hundred percent % NB_TIMES = [0.1,0.5,1,1.5,2,3,4,5] #new part of sample print('base_line for PERCENT_NOISE#######################################################################################################################################') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) historys_CNN_LSTM_PERCENT_NOISE.append(run_CNN_LSTM(X_train,y_train,X_test,y_test)) for i in PERCENT_NOISE: print('spliting data...') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) X_train,y_train = creat_data(X_train,y_train,PERCENT_NOISE=i,NB_TIMES=0.2) historys_CNN_LSTM_PERCENT_NOISE.append(run_CNN_LSTM(X_train,y_train,X_test,y_test)) print('base_line for NB_CLASSES###########################################################################################################################################') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) historys_CNN_LSTM_NB_TIMES.append(run_CNN_LSTM(X_train,y_train,X_test,y_test)) for i in NB_TIMES: print('spliting data...') X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3) X_train,y_train = creat_data(X_train,y_train,PERCENT_NOISE=0.2,NB_TIMES=i) historys_CNN_LSTM_NB_TIMES.append(run_CNN_LSTM(X_train,y_train,X_test,y_test)) with open('NOISE_ADDED_historys_CNN_LSTM_PERCENT_NOISE.pkl','wb') as handle: pickle.dump(historys_CNN_LSTM_PERCENT_NOISE,handle) with open('NOISE_ADDED_historys_CNN_LSTM_NB_TIMES.pkl','wb') as handle: pickle.dump(historys_CNN_LSTM_NB_TIMES,handle) # + id="xmJjFI9gkmPw" colab_type="code" colab={} aklsdjfldsalkfjasdlkf 故意报错停止 # + id="gZkPsw1S2SZt" colab_type="code" colab={} ''' import matplotlib.pyplot as plt plt.figure(figsize=(30,10)) labels_NB_TIMES = NB_TIMES.copy() labels_NB_TIMES.insert(0,0) for history,label in zip(historys_DNN_NB_TIMES,labels_NB_TIMES): print(np.mean(history[-6:-1]),label) plt.title('Result Analysis') plt.plot(range(len(history)),history,label='P'+str(label)) [plt.annotate(label,(i,history[i])) for i in range(len(history))] plt.legend() # 显示图例 plt.xlabel('Epoch') plt.ylabel('acc') plt.show() ''' # + id="X09FNfzf-K0k" colab_type="code" colab={} ''' plt.figure(figsize=(30,10)) labels_PERCENT_NOISE = PERCENT_NOISE.copy() labels_PERCENT_NOISE.insert(0,0) for history,label in zip(historys_DNN_PERCENT_NOISE,labels_PERCENT_NOISE): print(np.mean(history[-6:-1]),label) plt.title('Result Analysis') plt.plot(range(len(history)),history,label='P'+str(label)) [plt.annotate(label,(i,history[i])) for i in range(len(history))] plt.legend() # 显示图例 plt.xlabel('Epoch') plt.ylabel('acc') plt.show() ''' # + id="fhoWRsiS95Fh" colab_type="code" colab={} ''' import matplotlib.pyplot as plt plt.figure(figsize=(30,10)) labels_NB_TIMES = NB_TIMES.copy() labels_NB_TIMES.insert(0,0) for history,label in zip(historys_CNN_NB_TIMES,labels_NB_TIMES): print(np.mean(history[-6:-1]),label) plt.title('Result Analysis') plt.plot(range(len(history)),history,label='P'+str(label)) [plt.annotate(label,(i,history[i])) for i in range(len(history))] plt.legend() # 显示图例 plt.xlabel('Epoch') plt.ylabel('acc') plt.show() ''' # + id="ct7ddkVv2fsO" colab_type="code" colab={} ''' plt.figure(figsize=(30,10)) labels_PERCENT_NOISE = PERCENT_NOISE.copy() labels_PERCENT_NOISE.insert(0,0) for history,label in zip(historys_CNN_PERCENT_NOISE,labels_PERCENT_NOISE): print(np.mean(history[-6:-1]),label) plt.title('Result Analysis') plt.plot(range(len(history)),history,label='P'+str(label)) [plt.annotate(label,(i,history[i])) for i in range(len(history))] plt.legend() # 显示图例 plt.xlabel('Epoch') plt.ylabel('acc') plt.show() ''' # + id="ASOofuK6U_Ps" colab_type="code" colab={} # + id="U5fGfE7yYekt" colab_type="code" colab={} # + id="4Rv1aH55UQFK" colab_type="code" colab={} y_train # + id="TbDvHvuPSLTp" colab_type="code" colab={} # + id="NE9KpCI5Jdhl" colab_type="code" colab={} # + id="BY9Pt18bJtjD" colab_type="code" colab={}
33,508
/get_transit.ipynb
792805ded76c866696549b9ca0e09441e00d580c
[]
no_license
ryan-oldfield/astroProject
https://github.com/ryan-oldfield/astroProject
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
720,857
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- #import necessary modules import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline #Read the data from csv file downloaded from UCI Machine Learning repository i.e. https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/ df = pd.read_csv('auto-mpg.csv') #Get a hold of data df.head() #Hava an idea of how correlated the variables are to each other as we can choose for ourselves the regression target/independent variables sns.heatmap(df.corr(), cmap='coolwarm') # + #Since horsepower is not included in our correlation data #Let's convert all the values in this column into NaN and Numbers df['horsepower'] = pd.to_numeric(df['horsepower'], errors='coerce') # - #Removing the NaN rows df.dropna(axis=0, inplace=True) df.mean() # + #Now we have the mean of horespower column too # - df.describe() sns.heatmap(df.corr(), cmap='coolwarm') #Check for NULL/NaN values and remove them sns.heatmap(df.isnull(), yticklabels=False, cbar = False, cmap = 'coolwarm') sns.distplot(df['mpg']) #Initializing multivariate Regression X = df[['cylinders', 'displacement', 'horsepower', 'weight','acceleration', 'model year', 'origin']] y = df['mpg'] #train_test_split from sklearn.model_selection import train_test_split #Split the file into train [80%] and test [20%] data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101) #importing LinearRegression from sklearn.linear_model import LinearRegression lm = LinearRegression() #fitting into lm model lm.fit(X_train,y_train) print lm.intercept_ print lm.coef_ predictions = lm.predict(X_test) plt.scatter(y_test,predictions) #Residual Histogram sns.distplot(y_test-predictions, bins=40) from sklearn import metrics metrics.r2_score(y_test,predictions) # + par = np.polyfit(y_test, predictions, 1, full=True) slope=par[0][0] intercept=par[0][1] xl = [min(y_test), max(y_test)] yl = [slope*predictions + intercept for predictions in xl] # - plt.plot(xl,yl) #Initializing Univariate Regression # + X = df[['model year']] y = df['mpg'] #train_test_split from sklearn.model_selection import train_test_split # - #Split the file into train [80%] and test [20%] data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101) # + #importing LinearRegression from sklearn.linear_model import LinearRegression lm = LinearRegression() # + #fitting into lm model lm.fit(X_train,y_train) # + predictions = lm.predict(X_test) plt.scatter(y_test,predictions) # + from sklearn import metrics metrics.r2_score(y_test,predictions) # + par = np.polyfit(y_test, predictions, 1, full=True) slope=par[0][0] intercept=par[0][1] xl = [min(y_test), max(y_test)] yl = [slope*predictions + intercept for predictions in xl] # - plt.plot(xl, yl, '-r') # + #CALCULATING Gradient Descent # + df1 = pd.read_csv('auto-mpg.csv') df1.head() # + # Load the data model = pd.DataFrame(df1, columns = ['mpg', 'acceleration']) model # - model.mean() model['mpg'].dtype model['acceleration'].dtype #let's look at their correlation to have a fair idea sns.heatmap(model.corr(), cmap='coolwarm') # Initialize the hyper parameters with guessing initial y-intercept and slope learning_rate = 0.0001 guess_y_intercept = 0 guess_slope = 0 num_iterations = 1000 # + points = model points[:10] # - len(points) points=points.reset_index().values points[:10] points[1,1] totalError = 0 for i in range(0, len(points)): x = points[i, 2] y = points[i, 1] totalError += (y - (guess_y_intercept * x + guess_slope)) ** 2 return totalError / float(len(points)) print("Starting gradient descent at b = {0}, m = {1}, error = {2}".format(initial_b,initial_m,compute_error_for_line_given_points(initial_b, initial_m, points))) # + b = starting_b m = starting_m for i in range(num_iterations): b, m = step_gradient(b, m, points, learning_rate) return [b, m] # -
4,296
/08/nuclear_accidents/homework-8-argueso-nuclear-accidents.ipynb
63207aa0ac39a11d8b25f0565767209ce7ff39e3
[]
no_license
oargueso/foundations-homework
https://github.com/oargueso/foundations-homework
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
419,839
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="UID9RK1qDlVB" # ## Task 2: K- Means Clustering # ## Made By: Abhinav Garg # + colab={"base_uri": "https://localhost:8080/", "height": 195} colab_type="code" executionInfo={"elapsed": 835, "status": "ok", "timestamp": 1544240454142, "user": {"displayName": "A M Aditya", "photoUrl": "https://lh3.googleusercontent.com/-WI8p7JNWLic/AAAAAAAAAAI/AAAAAAAAAfs/vS8ElgH0p0c/s64/photo.jpg", "userId": "15341571102300750919"}, "user_tz": -480} id="kO_1kOEGDTws" outputId="24bb4b2a-2342-4c91-c702-b127a5391152" # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from sklearn import datasets # Load the iris dataset iris = pd.read_csv("Iris.csv") iris.drop(["Id"],axis=1,inplace=True) iris.head() # - # Getting the Statistical Information iris.describe() # Lets check for datatypes iris.info() # ### Using Pairplots for better understanding of the data points distribution sns.pairplot(data=iris,hue="Species",palette="Set1") plt.show() # From above visuals iris-setosa is easily separable from the other two. from sklearn.cluster import KMeans features = iris.loc[:,["SepalLengthCm","SepalWidthCm","PetalLengthCm","PetalWidthCm"]] # + [markdown] colab_type="text" id="q_pPmK9GIKMz" # #### Below snippet shows how we can find the optimum number of clusters for K Means and how can we determine the value of K? # + colab={"base_uri": "https://localhost:8080/", "height": 376} colab_type="code" executionInfo={"elapsed": 1649, "status": "ok", "timestamp": 1544110062756, "user": {"displayName": "A M Aditya", "photoUrl": "https://lh3.googleusercontent.com/-WI8p7JNWLic/AAAAAAAAAAI/AAAAAAAAAfs/vS8ElgH0p0c/s64/photo.jpg", "userId": "15341571102300750919"}, "user_tz": -480} id="WevSKogFEalU" outputId="d0161248-67f2-48a4-cf4b-224f8faae045" # Finding the optimum number of clusters for k-means classification x = iris.iloc[:, [0, 1, 2, 3]].values from sklearn.cluster import KMeans wcss = [] for i in range(1, 11): kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) kmeans.fit(x) wcss.append(kmeans.inertia_) # Plotting the results onto a line graph, # `allowing us to observe 'The elbow' plt.plot(range(1, 11), wcss) plt.title('The elbow method') plt.xlabel('Number of clusters') plt.ylabel('WCSS') # Within cluster sum of squares plt.show() # + [markdown] colab_type="text" id="IUXmLTh4Ih6r" # We can clearly see why it is called 'The elbow method' from the above graph, the optimum clusters is where the elbow occurs. This is when the within cluster sum of squares (WCSS) doesn't decrease significantly with every iteration. # # From this we choose the number of clusters as **3**. # - # ### Verifying visually that with which cluster number, K-means will be optimum # + plt.figure(figsize=(24,4)) plt.suptitle("K Means Clustering",fontsize=20) plt.subplot(1,5,1) plt.title("K = 1",fontsize=16) plt.xlabel("PetalLengthCm") plt.ylabel("PetalWidthCm") plt.scatter(features.PetalLengthCm,features.PetalWidthCm) plt.subplot(1,5,2) plt.title("K = 2",fontsize=16) plt.xlabel("PetalLengthCm") kmeans = KMeans(n_clusters=2) features["labels"] = kmeans.fit_predict(features) plt.scatter(features.PetalLengthCm[features.labels == 0],features.PetalWidthCm[features.labels == 0]) plt.scatter(features.PetalLengthCm[features.labels == 1],features.PetalWidthCm[features.labels == 1]) # dropping labels we only want to use features. features.drop(["labels"],axis=1,inplace=True) plt.subplot(1,5,4) plt.title("K = 3",fontsize=16) plt.xlabel("PetalLengthCm") kmeans = KMeans(n_clusters=3) features["labels"] = kmeans.fit_predict(features) plt.scatter(features.PetalLengthCm[features.labels == 0],features.PetalWidthCm[features.labels == 0]) plt.scatter(features.PetalLengthCm[features.labels == 1],features.PetalWidthCm[features.labels == 1]) plt.scatter(features.PetalLengthCm[features.labels == 2],features.PetalWidthCm[features.labels == 2]) # dropping labels as we only want to use features. features.drop(["labels"],axis=1,inplace=True) plt.subplot(1,5,3) plt.title("K = 4",fontsize=16) plt.xlabel("PetalLengthCm") kmeans = KMeans(n_clusters=4) features["labels"] = kmeans.fit_predict(features) plt.scatter(features.PetalLengthCm[features.labels == 0],features.PetalWidthCm[features.labels == 0]) plt.scatter(features.PetalLengthCm[features.labels == 1],features.PetalWidthCm[features.labels == 1]) plt.scatter(features.PetalLengthCm[features.labels == 2],features.PetalWidthCm[features.labels == 2]) plt.scatter(features.PetalLengthCm[features.labels == 3],features.PetalWidthCm[features.labels == 3]) # dropping labels as we only want to use features. features.drop(["labels"],axis=1,inplace=True) plt.subplot(1,5,5) plt.title("Original Labels",fontsize=16) plt.xlabel("PetalLengthCm") plt.scatter(iris.PetalLengthCm[iris.Species == "Iris-setosa"],iris.PetalWidthCm[iris.Species == "Iris-setosa"]) plt.scatter(iris.PetalLengthCm[iris.Species == "Iris-versicolor"],iris.PetalWidthCm[iris.Species == "Iris-versicolor"]) plt.scatter(iris.PetalLengthCm[iris.Species == "Iris-virginica"],iris.PetalWidthCm[iris.Species == "Iris-virginica"]) plt.subplots_adjust(top=0.8) plt.show() # + colab={} colab_type="code" id="aJbyXuNGIXI9" # Applying kmeans to the dataset / Creating the kmeans classifier kmeans = KMeans(n_clusters = 3, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0) y_kmeans = kmeans.fit_predict(x) # + colab={"base_uri": "https://localhost:8080/", "height": 364} colab_type="code" executionInfo={"elapsed": 670, "status": "ok", "timestamp": 1544110172140, "user": {"displayName": "A M Aditya", "photoUrl": "https://lh3.googleusercontent.com/-WI8p7JNWLic/AAAAAAAAAAI/AAAAAAAAAfs/vS8ElgH0p0c/s64/photo.jpg", "userId": "15341571102300750919"}, "user_tz": -480} id="Q42-XPJjIyXv" outputId="12284613-40c5-41c6-93ba-6b66fae0aa5f" # Visualising the clusters - On the first two columns plt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Iris-setosa') plt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Iris-versicolour') plt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Iris-virginica') # Plotting the centroids of the clusters plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], s = 100, c = 'yellow', label = 'Centroids') plt.legend()
6,811
/data/NASAhackathon19.ipynb
1c148492520e24b3c5ee7114ec2f6ba8bf2bf674
[]
no_license
EazyReal/NASAHackathon2019
https://github.com/EazyReal/NASAHackathon2019
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
19,470
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Linked List Reversal # # ## Problem # # Write a function to reverse a Linked List in place. The function will take in the head of the list as input and return the new head of the list. # # You are given the example Linked List Node class: class Node(object): def __init__(self,value): self.value = value self.nextnode = None # # Solution # # Fill out your solution below: def reverse(head): current = head previous = None next_node = None while current != None: next_node = current.nextnode current.nextnode = previous previous = current current = next_node return previous # # Test Your Solution # # **Note, this isn't a classic run cell for testing your solution, please read the statements below carefully** # # You should be able to easily test your own solution to make sure it works. Given the short list a,b,c,d with values 1,2,3,4. Check the effect of your reverse function and make sure the results match the logic here below: # + # Create a list of 4 nodes a = Node(1) b = Node(2) c = Node(3) d = Node(4) # Set up order a,b,c,d with values 1,2,3,4 a.nextnode = b b.nextnode = c c.nextnode = d # - # Now let's check the values of the nodes coming after a, b and c: print(a.nextnode.value) print(b.nextnode.value) print(c.nextnode.value) d.nextnode.value # So far so good. Note how there is no value proceeding the last node, this makes sense! Now let's reverse the linked list, we should see the opposite order of values! reverse(a) print(d.nextnode.value) print(c.nextnode.value) print(b.nextnode.value) print(a.nextnode.value) # This will give an error since it now points to None # Great, now we can see that each of the values points to its previous value (although now that the linked list is reversed we can see the ordering has also reversed) # # ## Good Job!
2,135
/part_I/02_Linear/025_Exercises.ipynb
313a00878e7a0088cef8efde719b682d780ac4d2
[]
no_license
MikolajMGT/machine-learning
https://github.com/MikolajMGT/machine-learning
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
15,524
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ParkingBirmingham # ## dataDetect.py # #### observe data set import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as mdate import datetime import pandas.plotting._converter as pandacnv pandacnv.register() df = pd.read_csv('ParkingBirminghamDataSet.csv') timeStamp = pd.to_datetime(df['LastUpdated'].str.split().str[0]) print("Time range: ", timeStamp.min().date(), " - ", timeStamp.max().date()) carparks_code = df['SystemCodeNumber'].unique() print("Unique Car Park Code list:") carparks_code print("Total number of unique Car Park Code: " + str(len(carparks_code))) minDate = timeStamp.min().date() maxDate = timeStamp.max().date() timeTicks = pd.date_range(minDate, maxDate, freq = 'W-MON') timeTicks plt.figure('TimeStamp Distribution') plt.figure(figsize=(15,8)) plt.scatter(df['SystemCodeNumber'], timeStamp, marker='*') plt.xticks(rotation = 270) plt.ylim((minDate, maxDate)) plt.gca().yaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d')) plt.yticks(pd.date_range(minDate,maxDate,freq = 'W-MON')) plt.title('TimeStamps Distribution') plt.show() # + # general missing records on Oct. 20, Oct. 21 and Dec.3, Dec 4. # large scale missing records especially for BHMBRTARC01 and NIA North. # - # get time information def getTimeInfo(time): # get month, day and year time = time.split('/') month = int(time[0]) day = int(time[1]) year = int(time[2][:4]) # get hour and minute clock = time[2][5:].split(':') hour = int(clock[0]) minute = int(clock[1]) # the first recording date is 10/4/2016, so we can get the date index day_idx = (datetime.datetime(2016, month, day) - datetime.datetime(2016, 10, 4)).days + 1 minutes = hour * 60 + minute - 450 daytime_idx = ( minutes ) // 60 * 2 + ( minutes % 60 ) // 20 total_idx = (day_idx - 1) * 18 + daytime_idx return np.array([year, month, day, hour, minute, day_idx, daytime_idx, total_idx]) times = df['LastUpdated'].values times_info = [getTimeInfo(time) for time in times] times_info = np.array(times_info) times_info # + ## create time_info.cvs # times_info_df = pd.DataFrame(times_info, columns = ['year', 'month', 'day', 'hour', 'minute', 'day_idx', 'daytime_idx', 'total_idx']) # times_info_csv = times_info_df.to_csv(r'C:\Users\Euterpe\Desktop\PyProject\ParkingBirmingham\times_info.csv', index = None, header = True) # - print( 'Total Records: ', len(df)) print( 'Total Days: ', (datetime.datetime(2016, 12, 19) - datetime.datetime(2016, 10, 4)).days + 1) # record number of records for each car park numberOfRecords = [] print('CarParkCode TotalRecords Capacity') for carpark_code in carparks_code: curCarPark = df[df['SystemCodeNumber'] == carpark_code] print(carpark_code, len(curCarPark), curCarPark['Capacity'].unique()) numberOfRecords.append(len(curCarPark)) plt.figure('NumberOfRecords') plt.figure(figsize=(15,5)) xs = range(len(numberOfRecords)) (markers, stemlines, baseline) = plt.stem(xs, numberOfRecords) plt.setp(stemlines, linestyle = "-", color = "olive", linewidth = 1) plt.xticks(xs, carparks_code, rotation = 270) plt.show() # + # ######################################################################################### # Summarize # Total number of unique car park is 30. # Each car park have 1200 records per day except two. # The records of "BHMBRTARC01", "NIA North" are obviously less than other car parks. # ######################################################################################### # - a print("shape of pca_reduced.shape = ", pca_data.shape) # + # attaching the label for each 2-d data point pca_data = np.vstack((pca_data.T, labels)).T # creating a new data fram which help us in ploting the result data pca_df = pd.DataFrame(data=pca_data, columns=("1st_principal", "2nd_principal", "label")) sn.FacetGrid(pca_df, hue="label", size=6).map(plt.scatter, '1st_principal', '2nd_principal').add_legend() plt.show() # - # # PCA for dimensionality redcution (not for visualization) # + # PCA for dimensionality redcution (non-visualization) pca.n_components = 784 pca_data = pca.fit_transform(sample_data) percentage_var_explained = pca.explained_variance_ / np.sum(pca.explained_variance_); cum_var_explained = np.cumsum(percentage_var_explained) # Plot the PCA spectrum plt.figure(1, figsize=(6, 4)) plt.clf() plt.plot(cum_var_explained, linewidth=2) plt.axis('tight') plt.grid() plt.xlabel('n_components') plt.ylabel('Cumulative_explained_variance') plt.show() # If we take 200-dimensions, approx. 90% of variance is expalined. # - # # t-SNE using Scikit-Learn # + # TSNE from sklearn.manifold import TSNE # Picking the top 1000 points as TSNE takes a lot of time for 15K points data_1000 = standardized_data[0:1000,:] labels_1000 = labels1[0:1000] model = TSNE(n_components=2, random_state=0) # configuring the parameteres # the number of components = 2 # default perplexity = 30 # default learning rate = 200 # default Maximum number of iterations for the optimization = 1000 tsne_data = model.fit_transform(data_1000) # creating a new data frame which help us in ploting the result data tsne_data = np.vstack((tsne_data.T, labels_1000)).T tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "label")) # Ploting the result of tsne sn.FacetGrid(tsne_df, hue="label", size=6).map(plt.scatter, 'Dim_1', 'Dim_2').add_legend() plt.show() # + model = TSNE(n_components=2, random_state=0, perplexity=50) tsne_data = model.fit_transform(data_1000) # creating a new data fram which help us in ploting the result data tsne_data = np.vstack((tsne_data.T, labels_1000)).T tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "label")) # Ploting the result of tsne sn.FacetGrid(tsne_df, hue="label", size=6).map(plt.scatter, 'Dim_1', 'Dim_2').add_legend() plt.title('With perplexity = 50') plt.show() # + model = TSNE(n_components=2, random_state=0, perplexity=50, n_iter=5000) tsne_data = model.fit_transform(data_1000) # creating a new data fram which help us in ploting the result data tsne_data = np.vstack((tsne_data.T, labels_1000)).T tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "label")) # Ploting the result of tsne sn.FacetGrid(tsne_df, hue="label", size=6).map(plt.scatter, 'Dim_1', 'Dim_2').add_legend() plt.title('With perplexity = 50, n_iter=5000') plt.show() # + model = TSNE(n_components=2, random_state=0, perplexity=2) tsne_data = model.fit_transform(data_1000) # creating a new data fram which help us in ploting the result data tsne_data = np.vstack((tsne_data.T, labels_1000)).T tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "label")) # Ploting the result of tsne sn.FacetGrid(tsne_df, hue="label", size=6).map(plt.scatter, 'Dim_1', 'Dim_2').add_legend() plt.title('With perplexity = 2') plt.show() # + #Excercise: Run the same analysis using 42K points with various #values of perplexity and iterations. # If you use all of the points, you can expect plots like this blog below: # http://colah.github.io/posts/2014-10-Visualizing-MNIST/ # - p_sentiment_score.sort_values(by = 'Sentiment_Polarity', ascending=False) app_sentiment_score_sorted.head() # + dc={"key": "4"} top_10_user_feedback = app_sentiment_score_sorted.head(10) top_10_user_feedback # + [markdown] dc={"key": "4"} # ` Note` # # This is the **Unguided** version of The Android App Market Analysis on DataCamp. # # **DONE ON:** 22.09.2021
7,749
/AICT_CAE-Lab/test3.ipynb
49a5c382da748eb7aa85c7045ee3e0dbb332df21
[]
no_license
Jdamdam/ML_study
https://github.com/Jdamdam/ML_study
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
2,272,262
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf import numpy as np import matplotlib.pyplot as plt # %matplotlib inline tf.set_random_seed(777) # for reproducibility f_data = np.loadtxt('crack_or_cicle_training4.csv', delimiter=',', dtype=np.float32) f_test = np.loadtxt('crack_or_cicle_test.csv', delimiter=',', dtype=np.float32) def MinMaxScaler(data): numerator = data - np.min(data, 0) denominator = np.max(data, 0) - np.min(data, 0) # noise term prevents the zero division return numerator / (denominator + 1e-7) # - f_data = MinMaxScaler(f_data) f_test = MinMaxScaler(f_test) print(f_data) print(f_test) # + x_data = f_data[:, 0:-1] y_data = f_data[:, [-1]] x_test = f_test[:, 0:-1] y_test = f_test[:, [-1]] print(x_data) print(y_data) print(x_test) print(y_test) # + X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W1 = tf.Variable(tf.random_normal([2,4]), name='weight1') b1 = tf.Variable(tf.random_normal([4]), name='bias1') layer1 = tf.nn.relu(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.random_normal([4,4]), name='weight2') b2 = tf.Variable(tf.random_normal([4]), name='bias2') layer2 = tf.nn.relu(tf.matmul(layer1, W2) + b2) W3 = tf.Variable(tf.random_normal([4,1]), name='weight2') b3 = tf.Variable(tf.random_normal([1]), name='bias2') hypothesis = tf.matmul(layer2, W3) + b3 # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.001 training_step = 500 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # - plt.plot(range(len(cost_history)),cost_history) plt.axis([0,training_step,0,np.max(cost_history)]) plt.show() # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 500 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.1 training_step = 500 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.001 training_step = 5000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 5000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.1 training_step = 5000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.001 training_step = 10000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 10000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.1 training_step = 10000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W1 = tf.Variable(tf.random_normal([2,4]), name='weight1') b1 = tf.Variable(tf.random_normal([4]), name='bias1') layer1 = tf.nn.sigmoid(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.random_normal([4,4]), name='weight2') b2 = tf.Variable(tf.random_normal([4]), name='bias2') layer2 = tf.nn.sigmoid(tf.matmul(layer1, W2) + b2) W3 = tf.Variable(tf.random_normal([4,1]), name='weight2') b3 = tf.Variable(tf.random_normal([1]), name='bias2') hypothesis = tf.matmul(layer2, W3) + b3 # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 1000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # - plt.plot(range(len(cost_history)),cost_history) plt.axis([0,training_step,0,np.max(cost_history)]) plt.show() # + X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W1 = tf.Variable(tf.random_normal([2,4]), name='weight1') b1 = tf.Variable(tf.random_normal([4]), name='bias1') layer1 = tf.nn.relu(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.random_normal([4,4]), name='weight2') b2 = tf.Variable(tf.random_normal([4]), name='bias2') layer2 = tf.nn.relu(tf.matmul(layer1, W2) + b2) W3 = tf.Variable(tf.random_normal([4,1]), name='weight2') b3 = tf.Variable(tf.random_normal([1]), name='bias2') hypothesis = tf.matmul(layer2, W3) + b3 # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 1000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # - plt.plot(range(len(cost_history)),cost_history) plt.axis([0,training_step,0,np.max(cost_history)]) plt.show() # + X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W1 = tf.Variable(tf.random_normal([2,2]), name='weight1') b1 = tf.Variable(tf.random_normal([2]), name='bias1') layer1 = tf.nn.relu(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.random_normal([2,2]), name='weight2') b2 = tf.Variable(tf.random_normal([2]), name='bias2') layer2 = tf.nn.relu(tf.matmul(layer1, W2) + b2) W3 = tf.Variable(tf.random_normal([2,1]), name='weight2') b3 = tf.Variable(tf.random_normal([1]), name='bias2') hypothesis = tf.matmul(layer2, W3) + b3 # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 1000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W1 = tf.Variable(tf.random_normal([2,3]), name='weight1') b1 = tf.Variable(tf.random_normal([3]), name='bias1') layer1 = tf.nn.relu(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.random_normal([3,3]), name='weight2') b2 = tf.Variable(tf.random_normal([3]), name='bias2') layer2 = tf.nn.relu(tf.matmul(layer1, W2) + b2) W3 = tf.Variable(tf.random_normal([3,1]), name='weight2') b3 = tf.Variable(tf.random_normal([1]), name='bias2') hypothesis = tf.matmul(layer2, W3) + b3 # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 1000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W1 = tf.Variable(tf.random_normal([2,4]), name='weight1') b1 = tf.Variable(tf.random_normal([4]), name='bias1') layer1 = tf.nn.relu(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.random_normal([4,4]), name='weight2') b2 = tf.Variable(tf.random_normal([4]), name='bias2') layer2 = tf.nn.relu(tf.matmul(layer1, W2) + b2) W3 = tf.Variable(tf.random_normal([4,1]), name='weight2') b3 = tf.Variable(tf.random_normal([1]), name='bias2') hypothesis = tf.matmul(layer2, W3) + b3 # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 1000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W1 = tf.Variable(tf.random_normal([2,5]), name='weight1') b1 = tf.Variable(tf.random_normal([5]), name='bias1') layer1 = tf.nn.relu(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.random_normal([5,5]), name='weight2') b2 = tf.Variable(tf.random_normal([5]), name='bias2') layer2 = tf.nn.relu(tf.matmul(layer1, W2) + b2) W3 = tf.Variable(tf.random_normal([5,1]), name='weight2') b3 = tf.Variable(tf.random_normal([1]), name='bias2') hypothesis = tf.matmul(layer2, W3) + b3 # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 1000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy ) # + X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W1 = tf.Variable(tf.random_normal([2,6]), name='weight1') b1 = tf.Variable(tf.random_normal([6]), name='bias1') layer1 = tf.nn.relu(tf.matmul(X, W1) + b1) W2 = tf.Variable(tf.random_normal([6,6]), name='weight2') b2 = tf.Variable(tf.random_normal([6]), name='bias2') layer2 = tf.nn.relu(tf.matmul(layer1, W2) + b2) W3 = tf.Variable(tf.random_normal([6,1]), name='weight2') b3 = tf.Variable(tf.random_normal([1]), name='bias2') hypothesis = tf.matmul(layer2, W3) + b3 # + # Simplified cost/loss function cost = tf.reduce_mean(tf.square(hypothesis - Y)) learning_rate = 0.01 training_step = 1000 cost_history = np.empty(shape=[1],dtype=float) # Minimize optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Launch the graph in a session. sess = tf.Session() # Initializes global variables in the graph. sess.run(tf.global_variables_initializer()) for epoch in range(training_step): cost_val, hy_val, _ = sess.run([cost, hypothesis,optimizer],feed_dict={X:x_data,Y:y_data}) cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: x_data,Y: y_data})) print("Step: ", epoch, "Cost: ", cost_val) # print("Step: ", epoch, "Cost: ", cost_val, "\n", "prediction: ", "\n", hy_val) # if(epoch % 10 == 0): # val_acc = (sess.run(accuracy, feed_dict={X: x_test, Y: y_test})) # val_list.append({"epoch":epoch, "val_acc": val_acc}) # print("epoch: %s valiation accuracy: %s" %(epoch, val_acc)) print("Learning finished") print("------------------") test_val, test_hy = sess.run([cost,hypothesis], feed_dict={X: x_test, Y: y_test}) print("Test error", test_val, "\n" "Test Prediction" "\n", test_hy )
27,626
/P1.ipynb
b3d54aeff708eec50e0dba5297aee1493f5189ce
[]
no_license
babbling/basic_lane_finding
https://github.com/babbling/basic_lane_finding
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
427,395
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import scipy.spatial import numpy as np import re t = list() with open("sentences.txt", "r") as file: sentences = file.readlines() for sentence in sentences: t.append([word for word in re.split('[^a-z]', sentence.lower()) if word != '']) len(t) words = dict() i = 0 for sentence in t: for word in sentence: if word not in words: words[word] = i i += 1 len(words) matrix = np.zeros((len(t), len(words))) for i in range(0, len(t)): for word in t[i]: matrix[i][words[word]] += 1 matrix # + distances = list() for i in range(len(sentences)): distances.append((i, scipy.spatial.distance.cosine(matrix[0], matrix[i]))) sorted_list = sorted(distances, key=lambda tup: tup[1]) print sorted_list[1][0], sorted_list[2][0] # - def f (x): return np.sin(x / 5.0) * np.exp(x / 10.0) + 5 * np.exp(-x / 2.0) x = np.array([1, 4, 8, 10, 15]) n = 1 A1 = [ [x[0] ** i for i in range(0, n+1)], [x[4] ** i for i in range(0, n+1)] ] B1 = [f(x[0]), f(x[4])] scipy.linalg.solve(A1, B1) n = 2 A1 = [ [x[0] ** i for i in range(0, n+1)], [x[2] ** i for i in range(0, n+1)], [x[4] ** i for i in range(0, n+1)] ] B1 = [f(x[0]), f(x[2]), f(x[4])] scipy.linalg.solve(A1, B1) n = 3 A1 = [ [x[0] ** i for i in range(0, n+1)], [x[1] ** i for i in range(0, n+1)], [x[3] ** i for i in range(0, n+1)], [x[4] ** i for i in range(0, n+1)] ] B1 = [f(x[0]), f(x[1]), f(x[3]), f(x[4])] ans = scipy.linalg.solve(A1, B1) map(lambda x: x.round(2), ans) from matplotlib import pylab as plt plt.plot([x[0], x[1], x[3], x[4]], ans) plt.show() x1 = np.arange(1, 15, 0.1) y = f(x1) plt.plot(x1, y) plt.show() plt.plot([x[0], x[1], x[3], x[4]], ans, 'o', x1, y, '-') plt.show() variable (Χ2) defined by the following equation. # `Χ2 = Σ [ (Or,c - Er,c)2 / Er,c ]` # # where Or,c is the observed frequency count at level r of Variable A and level c of Variable B, and Er,c is the expected frequency count at level r of Variable A and level c of Variable B. # # # ### P-value # # The P-value is the probability of observing a sample statistic as extreme as the test statistic. # # ### Conclude # # * If observed chi-square < critical chi-square, then variables are not related. # * If observed chi-square > critical chi-square, then variables are not independent (and hence may be related). # # For DF=1 and a precision of 5% (α=0.05), the critical chi-square is 3.841. # ## Tools # ### Test definitions population = 1000 nb_exp = 1000 # !pip install joblib from joblib import Parallel, delayed def chi_evaluation_over_populations(nb_experiments, log_scale_populations, hashers, nb_jobs): chi_squre = {hasher_name: list() for hasher_name, hasher in hashers.items()} for hasher_name, hasher in hashers.items(): for log_population in log_scale_populations: print('Computing chi square for {hasher_name} hash function on 10^{log_population} users'.format( hasher_name=hasher_name, log_population=log_population)) chi_values = Parallel(n_jobs=nb_jobs)(delayed(compute_chi_square)(hasher, 10**log_population) for _ in range(nb_experiments)) # print(chi_values) chi_squre[hasher_name].append(sorted(chi_values)[nb_experiments // 2]) return chi_squre def compute_chi_square(hasher, population): return chi.statistic_test(DataHelper.generate(hasher, population)) # + nb_experiments = 20 log_scale_populations = [1, 2, 3, 4, 5, 6] hashers = { "built-in": BuiltInHasher(), "md5": Md5Hasher(), "sha256": Sha256Hasher() } nb_jobs = 8 # - chi_square = chi_evaluation_over_populations(nb_experiments, log_scale_populations, hashers, nb_jobs) chi_square # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') # + fig = plt.figure() ax = plt.axes() for hasher_name, chi_values in chi_square.items(): ax.plot(log_scale_populations, chi_values, label=hasher_name) plt.legend(); # - nny(img, low_threshold, high_threshold) def gaussian_blur(img, kernel_size): """Applies a Gaussian Noise kernel""" return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. """ #defining a blank mask to start with mask = np.zeros_like(img) #defining a 3 channel or 1 channel color to fill the mask with depending on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) #returning the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image def draw_lines(img, lines, color=[255, 0, 0], thickness=2): """ NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below """ left_lines=[] right_lines=[] for line in lines: for x1,y1,x2,y2 in line: slope=(y2-y1)/(x2-x1) if slope < 0: left_lines.append(line) elif slope > 0: right_lines.append(line) t_lx1=0 t_ly1=0 t_lx2=0 t_ly2=0 min_ly=img.shape[1] for ll in left_lines: for x1,y1,x2,y2 in ll: t_lx1=t_lx1+x1 t_ly1=t_ly1+y1 t_lx2=t_lx2+x2 t_ly2=t_ly2+y2 if y2<min_ly: min_ly=y2 t_rx1=0 t_ry1=0 t_rx2=0 t_ry2=0 min_ry=img.shape[1] for rl in right_lines: for x1,y1,x2,y2 in rl: t_rx1=t_rx1+x1 t_ry1=t_ry1+y1 t_rx2=t_rx2+x2 t_ry2=t_ry2+y2 if y1<min_ry: min_ry=y1 # find the minimum y line min_y = min(min_ly, min_ry) # average the lines into a single lines, one for each side left_lines_count=len(left_lines) right_lines_count=len(right_lines) if left_lines_count > 0: lx1=t_lx1/left_lines_count ly1=t_ly1/left_lines_count lx2=t_lx2/left_lines_count ly2=t_ly2/left_lines_count # recalculate the slope left_slope=(ly2-ly1)/(lx2-lx1) lb=ly1-(left_slope)*lx1 # left intercept # compute the absolute end points l_abs_x1=int(round((img.shape[1]-lb)/left_slope)) l_abs_y1=int(round(img.shape[1])) l_abs_x2=int(round((min_y-lb)/left_slope)) l_abs_y2=int(round(min_y)) cv2.line(img, (l_abs_x1, l_abs_y1), (l_abs_x2, l_abs_y2), color, thickness) if right_lines_count > 0: rx1=t_rx1/right_lines_count ry1=t_ry1/right_lines_count rx2=t_rx2/right_lines_count ry2=t_ry2/right_lines_count # recalculate the slope right_slope=(ry2-ry1)/(rx2-rx1) rb=ry1-(right_slope)*rx1 # right intercept r_abs_x1=int(round((min_y-rb)/right_slope)) r_abs_y1=int(round(min_y)) r_abs_x2=int(round((img.shape[1]-rb)/right_slope)) r_abs_y2=int(round(img.shape[1])) cv2.line(img, (r_abs_x1, r_abs_y1), (r_abs_x2, r_abs_y2), color, thickness) def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): """ `img` should be the output of a Canny transform. Returns an image with hough lines drawn. """ lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) draw_lines(line_img, lines) return line_img # Python 3 has support for cool math symbols. def weighted_img(img, initial_img, α=0.8, β=1., λ=0.): """ `img` is the output of the hough_lines(), An image with lines drawn on it. Should be a blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. The result image is computed as follows: initial_img * α + img * β + λ NOTE: initial_img and img must be the same shape! """ return cv2.addWeighted(initial_img, α, img, β, λ) # - # ## Test on Images # # Now you should build your pipeline to work on the images in the directory "test_images" # **You should make sure your pipeline works well on these images before you try the videos.** import os os.listdir("test_images/") # run your solution on all test_images and make copies into the test_images directory). # + # TODO: Build your pipeline that will draw lane lines on the test_images # then save them to the test_images directory. #for f in os.listdir("test_images/"): # image = mpimg.imread('test_images/'+f) # gray = grayscale(image) # #plt.imshow(gray) # plt.imshow(gray, cmap='gray') image = mpimg.imread('test_images/solidWhiteRight.jpg') gray = grayscale(image) plt.imshow(gray, cmap='gray') # Define a kernel size and apply Gaussian smoothing kernel_size = 5 blur_gray = gaussian_blur(gray,kernel_size) # Define our parameters for Canny and apply low_threshold = 50 high_threshold = 150 edges = canny(blur_gray, low_threshold, high_threshold) imshape = edges.shape #print(imshape) vertices = np.array([[(0,imshape[0]),(460, 312), (480, 312), (imshape[1],imshape[0])]], dtype=np.int32) masked_edges = region_of_interest(edges, vertices) # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = 2 # distance resolution in pixels of the Hough grid theta = np.pi/180 # angular resolution in radians of the Hough grid threshold = 15 #15 # minimum number of votes (intersections in Hough grid cell) min_line_length = 40 #40 #minimum number of pixels making up a line max_line_gap = 20 #20 # maximum gap in pixels between connectable line segments line_image = np.copy(image)*0 # creating a blank to draw lines on hough_image = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap) weighted_image = weighted_img(hough_image, image, 0.8, 1., 0.) plt.imshow(weighted_image) # - # ## Test on Videos # # You know what's cooler than drawing lanes over images? Drawing lanes over video! # # We can test our solution on two provided videos: # # `solidWhiteRight.mp4` # # `solidYellowLeft.mp4` # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(image): # NOTE: The output you return should be a color image (3 channel) for processing video below # TODO: put your pipeline here, # you should return the final output (image with lines are drawn on lanes) #image = mpimg.imread('test_images/solidYellowLeft.jpg') gray = grayscale(image) # Define a kernel size and apply Gaussian smoothing kernel_size = 5 blur_gray = gaussian_blur(gray,kernel_size) # Define our parameters for Canny and apply #low_threshold = 50 #high_threshold = 150 high_threshold, thresh_im = cv2.threshold(blur_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) low_threshold = 0.5*high_threshold edges = canny(blur_gray, low_threshold, high_threshold) imshape = edges.shape vertices = np.array([[(0,imshape[0]),(460, 312), (480, 312), (imshape[1],imshape[0])]], dtype=np.int32) masked_edges = region_of_interest(edges, vertices) # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = 2 # distance resolution in pixels of the Hough grid theta = np.pi/180 # angular resolution in radians of the Hough grid threshold = 60 #15 # minimum number of votes (intersections in Hough grid cell) min_line_length = 60 #40 #minimum number of pixels making up a line max_line_gap = 40 #20 # maximum gap in pixels between connectable line segments line_image = np.copy(image)*0 # creating a blank to draw lines on hough_image = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap) result = weighted_img(hough_image, image, 0.8, 1., 0.) return result # Let's try the one with the solid white lane on the right first ... white_output = 'white.mp4' clip1 = VideoFileClip("solidWhiteRight.mp4") white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! # %time white_clip.write_videofile(white_output, audio=False) # Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice. HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(white_output)) # **At this point, if you were successful you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. Modify your draw_lines function accordingly and try re-running your pipeline.** # Now for the one with the solid yellow lane on the left. This one's more tricky! yellow_output = 'yellow.mp4' clip2 = VideoFileClip('solidYellowLeft.mp4') yellow_clip = clip2.fl_image(process_image) # %time yellow_clip.write_videofile(yellow_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(yellow_output)) # ## Reflections # # Congratulations on finding the lane lines! As the final step in this project, we would like you to share your thoughts on your lane finding pipeline... specifically, how could you imagine making your algorithm better / more robust? Where will your current algorithm be likely to fail? # # Please add your thoughts below, and if you're up for making your pipeline more robust, be sure to scroll down and check out the optional challenge video below! # # My Reflections: # - My annotations include a few lines that are not in line with the lane markers. These are momentary and occur when the road is curving. I could try to eliminate these by maintaining a history of slopes for both left and right lanes and only allow line predictions that are within a certain allowed slope tolerance given a pre-defined rate of change in slope. This will smoothen out the line predictions by eliminating predictions that are way off from the previous predictions. # - Near the apex where the lines approach each other, they sometimes cross each other. This could be eliminated by ensure the x-values of the two line predictions are never allowed to cross each other during the intercept calculations. # - Line predictions are just that. Lines. One could perhaps try to account for curves and changes in perspective by drawing out curved lines instead using maybe Bezier curves. # - Shadows on the road can interfere with the line detection so perhaps try adjusting the thresholds and also try different color schemes instead of just RGB. Perhaps CMYK. # # ## Submission # # If you're satisfied with your video outputs it's time to submit! Submit this ipython notebook for review. # # ## Optional Challenge # # Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project! challenge_output = 'extra.mp4' clip2 = VideoFileClip('challenge.mp4') challenge_clip = clip2.fl_image(process_image) # %time challenge_clip.write_videofile(challenge_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(challenge_output))
17,112
/thanksgiving-dinner/Thanksgiving Dinner Data.ipynb
ff9b2f5969caa757c79393370695e87a8e55c434
[]
no_license
henriquehfr/data-science-studies
https://github.com/henriquehfr/data-science-studies
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
33,850
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #scrapy 爬虫框架 #爬虫框架是实现爬虫功能的一个软件结构和功能组件集合 #爬虫框架是一个半成品,能够帮助用户实现专业网络爬虫 #7个部分 #spider,engine,item pipelines,engine,downloader,scheduler, #中间介模块:middleware #三个数据流 #Engine,控制所有模块之间的数据流,根据条件触发控制 #Downloader,根据请求下载网页 #schduler,管理/调度爬取请求 #downloader middleware,实施engine,scheduler和downloader之间用户可配置的控制,通过中间键的编写,修改,丢弃,新增请求或响应 #spider,解析downloader返回的响应(response),产生爬取项(scraped item),产生额外的爬取请求(request) #item pipelines,以流水线方式处理sipder产生的爬取项,由一组操作顺序组成,类似流水线,每个操作是一个item pipeline类型 #可能操作包括:清理\检验和查重爬取项中的HTML数据,将数据存储到数据库中. #spider Middleware,对请求和爬取项(request,scraped item)的再处理,修改\丢弃\新增请求和爬取项 # + # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as img im1=img.imread('C:\\Users\\hzg0601\\Pictures\\scrapy2.png') plt.imshow(im1) plt.axis('off') plt.show() # - #requests vs. scrapy #两者都没有处理js,提交表单\应对验证码等功能(可扩展) #反扒网站需要慢速爬取 #scapy框架适用于需要累积,不间断获取数据 #定制程度很高的需求(不考虑规模),自搭框架,requests>scrapy # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as img im3=img.imread('C:\\Users\\hzg0601\\Pictures\\scrapy3.png') plt.imshow(im3) plt.axis('off') plt.show() # + #scrapy是为持续运行设计的专业爬虫框架,提供操作的scrapy命令行 #scrapy -h #命令行格式, scrapy <command> [option][args] #命令行(不是图形界面)更容易自动化,适合脚本控制 #scrapy 命令 #startproject,创建一个新工程,scrapy startproject <name>[dir] #genspider,创建一个爬虫,scrapy genspider [options] #settings,获得爬虫信息,scrapy settings[options] #crawl,运行一个爬虫,scrapy crawl<spider> #list,列出工程中的所有爬虫,scrapy list #shell,启动URL调试命令行,scrapy shell[url] # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as img im4=img.imread('C:\\Users\\hzg0601\\Pictures\\scrapy4.png') plt.imshow(im4) plt.axis('off') plt.show() # + #建立工程和spider模板 #scrapy startproject BaiduStocks#命令行 # #cd BaiduStocks #scrapy genspider [例子,example example.com] stocks baidu.com #scrapy crawl # + #优化,修改settings的并发控制 # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as img im5=img.imread('C:\\Users\\hzg0601\\Pictures\\scrapy5.png') plt.imshow(im5) plt.axis('off') plt.show() # + # -*- coding: utf-8 -*- import scrapy import re ##建立工程和spider模板 #scrapy startproject BaiduStocks#命令行 # #cd BaiduStocks #scrapy genspider [例子,example example.com] stocks baidu.com ##编写spider文件 #配置stocks.py文件 #修改对返回页面的处理 #修改对新增url爬取请求的处理 ##配置pipeline文件 ##修改settings.py ##scrapy crawl stocks class StocksSpider(scrapy.Spider): name = 'stocks' start_urls = ['http://quote.eastmoney.com/stocklist.html']#初始url链接 def parse(self, response): for href in response.css('a::attrs(href)').extract():#提取a标签中的链接中href属性 try: stock=re.findall(r'[s][hz]\d{6}',href)[0]#正则化获得列表 url='https://gupiao.baidu.com/stock/'+stock+'.html' yield scrapy.Request(url,callback=self.parse_stock)#将url作为新的请求提交给scrapy, #yield 关键词将parse作为一个生成器,callback定义对应响应的处理函数 except: continue def parse_stock(self, response): infoDict = {} stockInfo = response.css('.stock-bets')#获取s属性为stock-bets的标签 name = stockInfo.css('.bets-name').extract()[0]#获取属性为bets-name的文本的第一个 keyList = stockInfo.css('dt').extract()#获取所有名称为dt的标签的文本 valueList = stockInfo.css('dd').extract() for i in range(len(keyList)): key = re.findall(r'>.*</dt>', keyList[i])[0][1:-5]#获取每一个条目名 try: val = re.findall(r'\d+\.?.*</dd>', valueList[i])[0][0:-5]#获取每一个条目的值 except: val = '--' infoDict[key]=val#更新条目与值 infoDict.update( {'股票名称': re.findall('\s.*\(',name)[0].split()[0] + \ re.findall('\>.*\<', name)[0][1:-1]})#更新股票名称 yield infoDict # + # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html class BaidustocksPipeline(object): def process_item(self, item, spider): return item class BaidustocksInfoPipeline(object): def open_spider(self,spider): self.f=open('BaiduStockInfo.txt','w') def close_spider(self,spider): self.f.close() def process_item(self,item,spider): try: line=str(dict(item))+'\n' self.f.write(line) except: pass return item # + BOT_NAME = 'BaiduStocks' SPIDER_MODULES = ['BaiduStocks.spiders'] NEWSPIDER_MODULE = 'BaiduStocks.spiders' ROBOTSTXT_OBEY = True ITEM_PIPELINES = { 'BaiduStocks.pipelines.BaidustocksInfoPipeline': 300, }
5,124
/.ipynb_checkpoints/Challenge_check-checkpoint.ipynb
da4e562f819df296a4e5f892c52adb48de63e064
[]
no_license
loc-nt/Mission-to-Mars
https://github.com/loc-nt/Mission-to-Mars
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
6,774
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Naive Bayes Model for Newsgroups Data # # For an explanation of the Naive Bayes model, see [our course notes](https://jennselby.github.io/MachineLearningCourseNotes/#naive-bayes). # # This notebook uses code from http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html. # # ## Instructions # 0. If you haven't already, follow [the setup instructions here](https://jennselby.github.io/MachineLearningCourseNotes/#setting-up-python3) to get all necessary software installed. # 0. Read through the code in the following sections: # * [Newgroups Data](#Newgroups-Data) # * [Model Training](#Model-Training) # * [Prediction](#Prediction) # 0. Complete at least one of the following exercises: # * [Exercise Option #1 - Standard Difficulty](#Exercise-Option-#1---Standard-Difficulty) # * [Exercise Option #2 - Advanced Difficulty](#Exercise-Option-#2---Advanced-Difficulty) # + from sklearn.datasets import fetch_20newsgroups # the 20 newgroups set is included in scikit-learn from sklearn.naive_bayes import MultinomialNB # we need this for our Naive Bayes model # These next two are about processing the data. We'll look into this more later in the semester. from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer # - # ## Newgroups Data # # Back in the day, [Usenet](https://en.wikipedia.org/wiki/Usenet_newsgroup) was a popular discussion system where people could discuss topics in relevant newsgroups (think Slack channel or subreddit). At some point, someone pulled together messages sent to 20 different newsgroups, to use as [a dataset for doing text processing](http://qwone.com/~jason/20Newsgroups/). # # We are going to pull out messages from just a few different groups to try out a Naive Bayes model. # # Examine the newsgroups dictionary, to make sure you understand the dataset. # # **Note**: If you get an error about SSL certificates, you can fix this with the following: # 1. In Finder, click on Applications in the list on the left panel # 1. Double click to go into the Python folder (it will be called something like Python 3.7) # 1. Double click on the Install Certificates command in that folder # # + # which newsgroups we want to download newsgroup_names = ['comp.graphics', 'rec.sport.hockey', 'sci.electronics', 'sci.space'] # get the newsgroup data (organized much like the iris data) newsgroups = fetch_20newsgroups(categories=newsgroup_names, shuffle=True, random_state=265) newsgroups.keys() # - # This next part does some processing of the data, because the scikit-learn Naive Bayes module is expecting numerical data rather than text data. We will talk more about what this code is doing later in the semester. For now, you can ignore it. # + # Convert the text into numbers that represent each word (bag of words method) word_vector = CountVectorizer() word_vector_counts = word_vector.fit_transform(newsgroups.data) # Account for the length of the documents: # get the frequency with which the word occurs instead of the raw number of times term_freq_transformer = TfidfTransformer() term_freq = term_freq_transformer.fit_transform(word_vector_counts) # - # ## Model Training # # Now we fit the Naive Bayes model to the subset of the 20 newsgroups data that we've pulled out. # Train the Naive Bayes model model = MultinomialNB().fit(term_freq, newsgroups.target) # ## Prediction # # Let's see how the model does on some (very short) documents that we made up to fit into the specific categories our model is trained on. # + # Predict some new fake documents fake_docs = [ 'That GPU has amazing performance with a lot of shaders', 'The player had a wicked slap shot', 'I spent all day yesterday soldering banks of capacitors', 'Today I have to solder a bank of capacitors', 'NASA has rovers on Mars'] fake_counts = word_vector.transform(fake_docs) fake_term_freq = term_freq_transformer.transform(fake_counts) predicted = model.predict(fake_term_freq) print('Predictions:') for doc, group in zip(fake_docs, predicted): print('\t{0} => {1}'.format(doc, newsgroups.target_names[group])) probabilities = model.predict_proba(fake_term_freq) print('Probabilities:') print(''.join(['{:17}'.format(name) for name in newsgroups.target_names])) for probs in probabilities: print(''.join(['{:<17.8}'.format(prob) for prob in probs])) # - # # Exercise Option #1 - Standard Difficulty # # Modify the fake documents and add some new documents of your own. # # What words in your documents have particularly large effects on the model probabilities? Note that we're not looking for documents that consist of a single word, but for words that, when included or excluded from a document, tend to change the model's output. # # # # Exercise Option #2 - Advanced Difficulty # # Write some code to count up how often the words you found in the exercise above appear in each category in the training dataset. Does this match up with your intuition?
5,281
/Python/NeuralNet_titanic.ipynb
e5ea9726923eee0cb8bd15695458edd658ab8516
[]
no_license
Dpnia/AI_Titanic
https://github.com/Dpnia/AI_Titanic
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
16,470
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # + import pandas as pd import numpy as np from sklearn.neural_network import MLPClassifier train_data = pd.read_csv("train.csv") # Age 컬럼에 있는 비어있는 row에 Age컬럼의 중앙값을 채워준다. median_age_train = train_data["Age"].median() train_data["Age"] = train_data["Age"].fillna(median_age_train) # Embarked 컬럼에 있는 값들 중에 가장 많이 나온 밸류를 비어있는 row에 채워준다. mst_frq_embarked_train = train_data["Embarked"].value_counts().index[0] train_data["Embarked"] = train_data["Embarked"].fillna(mst_frq_embarked_train) # Embarked 컬럼에 있는 문자열을 int형으로 바꿔준다 Ports = list(enumerate(np.unique(train_data['Embarked']))) Ports_dict = { name : i for i, name in Ports } train_data["Embarked"] = train_data["Embarked"].map(lambda x: Ports_dict[x]).astype(int) # Sex 컬럼에 있는 값들을 Gender 컬럼을 새로 만들어 0,1로 바꿔준다 train_data["Gender"] = 0 train_data["Gender"][train_data["Sex"] == 'male'] = 1 train_data.head() # train_data.info() # train_data.describe() # + test_data = pd.read_csv("test.csv") # Age 컬럼에 있는 비어있는 row에 Age컬럼의 중앙값을 채워준다. median_age_test = test_data["Age"].median() test_data["Age"] = test_data["Age"].fillna(median_age_test) # Embarked 컬럼에 있는 값들 중에 가장 많이 나온 밸류를 비어있는 row에 채워준다. mst_frq_embarked_test = test_data["Embarked"].value_counts().index[0] test_data["Embarked"] = test_data["Embarked"].fillna(mst_frq_embarked_test) # Embarked 컬럼에 있는 문자열을 int형으로 바꿔준다 test_data["Embarked"] = test_data["Embarked"].map(lambda x: Ports_dict[x]).astype(int) # Sex 컬럼에 있는 값들을 Gender 컬럼을 새로 만들어 0,1로 바꿔준다 test_data["Gender"] = 0 test_data["Gender"][test_data["Sex"] == 'male'] = 1 # Fare 컬럼에 비어있는 로우에 중앙값으로 채워준다 test_data["Fare"] = test_data["Fare"].fillna(test_data["Fare"].median()) test_data["Survived"] = np.NaN test_data.head() test_data.info() test_data.describe() # + X_train = train_data[["Pclass", "Gender", "Age", "Fare", "Embarked", "SibSp", "Parch"]] y_train = train_data["Survived"] X_test = test_data[["Pclass", "Gender", "Age", "Fare", "Embarked", "SibSp", "Parch"]] y_test = test_data["Survived"] # + clf = MLPClassifier(solver='lbfgs', alpha=1e-5, ... hidden_layer_sizes=(5, 5), random_state=1) clf.fit(X_train, y_train) # print(clf.feature_importances_) print(clf.score(X_train, y_train)) # + clf.predict(X_test) test_data["Survived"] = clf.predict(X_test) # - test_data = pd.DataFrame(test_data, columns=["PassengerId", "Survived"]) test_data.to_csv("neural_network_5.csv", index=False)
2,676
/Computer Vision/Image translation and rotation 190930/참고자료/Affine Transforms In Python.ipynb
44a308fe778fb4a720038dffaa5987cf883a540a
[]
no_license
tenjumh/GraduateSchool
https://github.com/tenjumh/GraduateSchool
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
395,572
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- letterR_url = "https://www.google.com/url?sa=i&source=images&cd=&cad=rja&uact=8&ved=2ahUKEwim6rO0q9fgAhVKeKwKHThBAmAQjRx6BAgBEAU&url=https%3A%2F%2Fwww.typoland.cz%2Fen%2Fwooden-letters-black%2F313-wooden-letter-r-black.html&psig=AOvVaw3j1sPP23kUuim1n7lXLS-U&ust=1551199679589930" # ### Affine Image Transformations in Python with Numpy, Pillow and OpenCV # # In this article I will be describing what it means to apply an affine transformation to an image and how to do it in Python. First I will demonstrate the low level operations in Numpy to give a detailed geometric implementation. Then I will segue those into a more practical usage of the Python Pillow and OpenCV libraries. # # This article was written using a Jupyter notebook and the source can be found at my [GitHub repo](https://github.com/amcquistan/affine-image-transforms-python) so, please feel free to clone / fork it and experiment with the code. # # ### What is an Affine Transformation # # According to [Wikipedia](https://en.wikipedia.org/wiki/Affine_transformation) an affine transformation is a functional mapping between two geometric (affine) spaces which preserve points, straight and parallel lines as well as ratios between points. All that mathy abstract wording boils down is a loosely speaking linear transformation that results in, at least in the context of image processing, one or more manipulations like rotating, flipping, scaling or shearing by applying a transformation matrix. # # One good thing is that since this is essentially a 2D geometric operation we can visualize it. Let me start off by giving a table of affine transformations that describe each type of geometric manipulation. # <table> # <thead> # <tr> # <th>Transformation Type</th> # <th>Transformation Matrix</th> # <th>Pixel Mapping Equation</th> # </tr> # </thead> # <tbody> # <tr> # <td>Identity</td> # <td> # $$ # \begin{bmatrix} # 1 & 0 & 0 \\ # 0 & 1 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # </td> # <td> # $$x^{'} = x$$ <br> $$y^{'} = y$$ # </td> # </tr> # <tr> # <td>Scaling</td> # <td> # $$ # \begin{bmatrix} # c_{x} & 0 & 0 \\ # 0 & c_{y} & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # </td> # <td> # $$x^{'} = c_{x} * x$$ <br> $$y^{'} = c_{y} * y$$ # </td> # </tr> # <tr> # <td>Rotation*</td> # <td> # $$ # \begin{bmatrix} # cos \Theta & sin \Theta & 0 \\ # -sin \Theta & cos \Theta & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # </td> # <td> # $$x^{'} = x * cos \Theta - y * sin \Theta$$ <br> $$y^{'} = x * cos \Theta + y * sin \Theta$$ # </td> # </tr> # <tr> # <td>Translation</td> # <td> # $$ # \begin{bmatrix} # 1 & 0 & t_{x} \\ # 0 & 1 & t_{y} \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # </td> # <td> # $$x^{'} = x + t_{x}$$ <br> $$y^{'} = y + t_{y}$$ # </td> # </tr> # <tr> # <td>Horizontal Shear</td> # <td> # $$ # \begin{bmatrix} # 1 & s_{h} & 0 \\ # 0 & 1 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # </td> # <td> # $$x^{'} = x + s_{v} * y$$ <br> $$y^{'} = y$$ # </td> # </tr> # <tr> # <td>Vertical Shear</td> # <td> # $$ # \begin{bmatrix} # 1 & 0 & 0 \\ # s_{v} & 1 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # </td> # <td> # $$x^{'} = x$$ <br> $$y^{'} = x * s_{h} + y$$ # </td> # </tr> # </tbody> # </table> # # \* affine transformation uses angle of rotation that is clockwise which is in contrast to the typical geometry unit circle of angles being measured in counter clockwise rotation with 0 starting from the positive X axis, therefore you will see that the negative of the angle is often applied. # # ' notation here is just referring to the transformed output coordinate of x or y not the calculus notation for a derivative # For means of simple demonstration I will apply a couple transformations to manipulate the x and y coordinates of the following points which have three dimensional components of x, y and ascii character index similar to the way an image pixel has 3 dimensional components of x, y, and frequency (or intensity). # # a = (0, 1, 0) # b = (1, 0, 1) # c = (0, -1, 2) # d = (-1, 0, 3) # # The transformations for this example will be Scaling by 2 in all directions and rotation of 90 degrees clockwise. First I will perform the transformations individually to show the direct effect each has on moving the points around then I will combine the transformations and apply them in one action. # # To begin I want to build a Numpy array (some may call this a matrix) with each row representing the point where the first column is the x, the second the y, and the third is the index of its letter in the ascii character set similar to the table shown below. Next I use [Matplotlib](https://matplotlib.org/) to plot the points (after applying the unchanging Identity transformation) to give a baseline visual of where we stand. # # <table> # <thead> # <tr><th>Point</th><th>x (row)</th><th>y (column)</th><th>ascii index</th></tr> # </thead> # <tbody> # <tr><td>a</td><td>0</td><td>1</td><td>0</td></tr> # <tr><td>b</td><td>1</td><td>0</td><td>1</td></tr> # <tr><td>c</td><td>0</td><td>-1</td><td>2</td></tr> # <tr><td>d</td><td>-1</td><td>0</td><td>3</td></tr> # </tbody> # </table> # + import matplotlib.pyplot as plt import numpy as np import string # points a, b and, c a, b, c, d = (0, 1, 0), (1, 0, 1), (0, -1, 2), (-1, 0, 3) # matrix with row vectors of points A = np.array([a, b, c, d]) # 3x3 Identity transformation matrix I = np.eye(3) # - color_lut = 'rgbc' fig = plt.figure() ax = plt.gca() xs = [] ys = [] for row in A: output_row = I @ row x, y, i = output_row xs.append(x) ys.append(y) i = int(i) # convert float to int for indexing c = color_lut[i] plt.scatter(x, y, color=c) plt.text(x + 0.15, y, f"{string.ascii_letters[i]}") xs.append(xs[0]) ys.append(ys[0]) plt.plot(xs, ys, color="gray", linestyle='dotted') ax.set_xticks(np.arange(-2.5, 3, 0.5)) ax.set_yticks(np.arange(-2.5, 3, 0.5)) plt.grid() plt.show() # The three points a, b, and c plotted on a grid after applying the Identity transformation to them via a simple vector matrix dot product leaving them unchanged. # # I will now move on to creating a scaling transformation matrix T_s (shown below) which scales the placement of the points in all directions. # # $$ # T_s = \begin{bmatrix} # 2 & 0 & 0 \\ # 0 & 2 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # # Now I will move on to plotting the transformed points similar to what was done with the original points unaltered by the Identity transformation but, this time I will apply the scaling transformation matrix defined above. For a better visualization, I plot a dotted line connecting the points. # + # create the scaling transformation matrix T_s = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 1]]) fig = plt.figure() ax = plt.gca() xs_s = [] ys_s = [] for row in A: output_row = T_s @ row x, y, i = row x_s, y_s, i_s = output_row xs_s.append(x_s) ys_s.append(y_s) i, i_s = int(i), int(i_s) # convert float to int for indexing c, c_s = color_lut[i], color_lut[i_s] # these are the same but, its good to be explicit plt.scatter(x, y, color=c) plt.scatter(x_s, y_s, color=c_s) plt.text(x + 0.15, y, f"{string.ascii_letters[int(i)]}") plt.text(x_s + 0.15, y_s, f"{string.ascii_letters[int(i_s)]}'") xs_s.append(xs_s[0]) ys_s.append(ys_s[0]) plt.plot(xs, ys, color="gray", linestyle='dotted') plt.plot(xs_s, ys_s, color="gray", linestyle='dotted') ax.set_xticks(np.arange(-2.5, 3, 0.5)) ax.set_yticks(np.arange(-2.5, 3, 0.5)) plt.grid() plt.show() # - # From the plot above it should be very clear that the x and y dimensions were simply scaled up by a factor of two while the third dimension responsible for the ascii letter index was left unchanged. In fact, those familiar with matrix algebra will have noticed that for all of the affine transformations listed in the first table the value represented in the third dimension is always left un altered as indicated by the all zeros and lone one value in the third dimension index of the last column. # # Now let me describe how to interpret the rotation transformation. I will start by solving the two trigonometric functions for the desired angle of rotation of 90 degrees then I simply plug them into the rotation transformation matrix listed in the previous table. # # $$ # sin (90^{o}) = 1 # $$ # # $$ # cos (90^{o}) = 0 # $$ # # $$ # T_s = \begin{bmatrix} # 0 & 1 & 0 \\ # -1 & 0 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # # Now all I need to do is apply the same logic to transform and plot the points like so. # + # create the rotation transformation matrix T_r = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]) fig = plt.figure() ax = plt.gca() for row in A: output_row = T_r @ row x_r, y_r, i_r = output_row i_r = int(i_r) # convert float to int for indexing c_r = color_lut[i_r] # these are the same but, its good to be explicit letter_r = string.ascii_letters[i_r] plt.scatter(x_r, y_r, color=c_r) plt.text(x_r + 0.15, y_r, f"{letter_r}'") plt.plot(xs, ys, color="gray", linestyle='dotted') ax.set_xticks(np.arange(-2.5, 3, 0.5)) ax.set_yticks(np.arange(-2.5, 3, 0.5)) plt.grid() plt.show() # - # Hopefully you can tell from the plot that all points were rotated 90 degrees around an axis of rotation at the origin. # # The neat thing about affine transformations being essentially linear transformations is that you can combine the transformations and apply them in one step. To demonstrate this I will apply the dot product (matrix multiplication) of my two transformation matrices like. # # $$ # T_{comb} = \begin{bmatrix} # 0 & 1 & 0 \\ # -1 & 0 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} * # \begin{bmatrix} # 2 & 0 & 0 \\ # 0 & 2 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} = # \begin{bmatrix} # 0 & 2 & 0 \\ # -2 & 0 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # # # Now I can apply this combined transformation matrix to the points and replot them to show a combination of scaling by two and rotation by 90 degrees. # # # + # create combined tranformation matrix T = T_s @ T_r fig = plt.figure() ax = plt.gca() xs_comb = [] ys_comb = [] for row in A: output_row = T @ row x, y, i = row x_comb, y_comb, i_comb = output_row xs_comb.append(x_comb) ys_comb.append(y_comb) i, i_comb = int(i), int(i_comb) # convert float to int for indexing c, c_comb = color_lut[i], color_lut[i_comb] # these are the same but, its good to be explicit letter, letter_comb = string.ascii_letters[i], string.ascii_letters[i_comb] plt.scatter(x, y, color=c) plt.scatter(x_comb, y_comb, color=c_comb) plt.text(x + 0.15 , y, f"{letter}") plt.text(x_comb + 0.15, y_comb, f"{letter_comb}'") xs_comb.append(xs_comb[0]) ys_comb.append(ys_comb[0]) plt.plot(xs, ys, color="gray", linestyle='dotted') plt.plot(xs_comb, ys_comb, color="gray", linestyle='dotted') ax.set_xticks(np.arange(-2.5, 3, 0.5)) ax.set_yticks(np.arange(-2.5, 3, 0.5)) plt.grid() plt.show() # - # ### Working with an Image # # By I now hope that I've been able to build up some intuition about how affine transformations are used to simply move around points in 2D space so, with that out of the way I'd like to start working with some real image data to give a more concrete demonstration of how all this works. This also allows me to cover another important topic of affine transformations which deals with the third dimension. The third dimension of data in an image represents the actual pixel value or sometimes referred to as the intensity domain where as the physical 2D location of the pixels in the other two dimensions are referred to as the spatial domain. # # To begin I will read in and display an image using matplotlib which is simply a large capital letter R. # + import matplotlib.pyplot as plt import numpy as np import string img = plt.imread('letterR.jpg') img.shape # - # Using the `imread(...)` method I am able to read in the JPG image representing the capital letter R into a numpy ndarray. I then display the dimensions of the array which are 1000 rows by 1000 columns together making up 1,000,000 pixels locations in the spatial domain. The individual pixel data is then in the form of an array of 4 unsigned integers representing a red, green, blue and alpha channel (or sample) that together provide the intensity data of each pixel. plt.figure(figsize=(5, 5)) plt.imshow(img) # Next I would like to apply the previous scale and rotation to the spatial domain of the image data thus transforming the pixel locations similar to what I demonstrated earlier with the points data. However, I need to take a slightly different approach because the image data is organized in a different way than that of the rows of data points I worked with earlier. With the image data I need to map the indices for each pixel of the input data to the transformed output indices using the transformation matrix T defined earlier. # + #T_s = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) T_r = np.array([[1, 0, -100], [0, 1, -100], [0, 0, 1]]) #T = T_s @ T_r print(T_r) # 2x scaling requires a tranformation image array 2x the original image img_transformed = np.empty((1000, 1000, 4), dtype=np.uint8) for i, row in enumerate(img): for j, col in enumerate(row): pixel_data = img[i, j, :] input_coords = np.array([i, j, 1]) i_out, j_out, _ = np.dot(T_r, input_coords) img_transformed[i_out, j_out, :] = pixel_data plt.figure(figsize=(5, 5)) plt.imshow(img_transformed) # - # Plotting the image after applying the transformation clearly shows that the original image has been rotated 90 degrees clockwise and scaled up 2X. However, the result is now obviously diminished as you can easily see discontinuity in the pixel intensities. # # To understand the reason for this I will again utilize a simple grid plot for demonstration. Consider a plot of 4 squares in a 2x2 grid similar to the spatial domain of a 2x2 image. # + def plot_box(plt, x0, y0, txt, w=1, h=1): plt.scatter(x0, y0) plt.scatter(x0, y0 + h) plt.scatter(x0 + w, y0 + h) plt.scatter(x0 + w, y0) plt.plot([x0, x0, x0 + w, x0 + w, x0], [y0, y0 + h, y0 + h, y0, y0], color="gray", linestyle='dotted') plt.text(x0 + (.33 * w), y0 + (.5 * h), txt) # x0, y0, letter a = np.array((0, 1, 0)) b = np.array((1, 1, 1)) c = np.array((0, 0, 2)) d = np.array((1, 0, 3)) A = np.array([a, b, c, d]) fig = plt.figure() ax = plt.gca() for pt in A: x0, y0, i = I @ pt x0, y0, i = int(x0), int(y0), int(i) plot_box(plt, x0, y0, f"{string.ascii_letters[int(i)]} ({x0}, {y0})") ax.set_xticks(np.arange(-1, 5, 1)) ax.set_yticks(np.arange(-1, 5, 1)) plt.grid() plt.show() # - # Now watch what happens when I apply a 2X scaling transformation as depicted below. Recall that: # # $$ # T_s = \begin{bmatrix} # 2 & 0 & 0 \\ # 0 & 2 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # # You will notice that such a spatial transformation results in ... well gaps to put it in simple terms which I've made obvious by plotting question marks along with the coordinates. The 2x2 grid is transformed into a 3x3 grid with the original squares being repositioned based of the linear transformation applied. This means that (0,0) * $T_s$ remains (0,0) because its properties as a 0 vector but, all others are scaled by two such as (1,1) * $T_s$ -> (2,2). # + fig = plt.figure() ax = plt.gca() for pt in A: xt, yt, i = T_s @ pt xt, yt, i = int(xt), int(yt), int(i) plot_box(plt, xt, yt, f"{string.ascii_letters[i]}' ({xt}, {yt})") delta_w, delta_h = 0.33, 0.5 plt.text(0 + delta_w, 1 + delta_h, "? (0, 1)") plt.text(1 + delta_w, 0 + delta_h, "? (1, 0)") plt.text(1 + delta_w, 1 + delta_h, "? (1, 1)") plt.text(1 + delta_w, 2 + delta_h, "? (1, 2)") plt.text(2 + delta_w, 1 + delta_h, "? (2, 1)") ax.set_xticks(np.arange(-1, 5, 1)) ax.set_yticks(np.arange(-1, 5, 1)) plt.grid() plt.show() # - # The question remains of what to do with those gaps that have been introduced? An intuitive thought would be to simply look to the original image for the answer. It just so happens that if we apply the inverse of the transformation to a coordinate in the output I will get the corresponding location of the original input. # # In matrix operations such as backwards mapping looks like this. # # $$ # (x, y, 1) = T_s^{-1} * (x' y' 1) # $$ # # where x', y' are the coordinates int the above transformed 3x3 grid, specifically the a missing location such as (2, 1), $T_s^{-1}$ (actual values shown below) is the inverse of the 2x scaling matrix $T_s$ and x, y are the coordinates that are found in the original 2x2 grid. # # $$ # T_s^{-1} = \begin{bmatrix} # 1/2 & 0 & 0 \\ # 0 & 1/2 & 0 \\ # 0 & 0 & 1 # \end{bmatrix}^{-1} # $$ # # However, you will soon realize there is a bit of an issue that still needs sorted out due to the fact that each of the gap's coordinates map back to fractional values of the 2x2 coordinate system. In the case of image data you can't really have a fraction of a pixel. This will be clearer with an example of mapping the (2, 1) gap back to the original 2x2 space like so. # # $$ # T_s^{-1} * (2, 1, 1) = (1, 1/2, 1) # $$ # # In this case I will round the y' = 1/2 down to 0 and say that that maps to (1, 0). In the general sense this method of selecting a value in the original 2x2 grid to put into the gaps of the transformed 3x3 grid is known as interpolation, and in this specific example I am using a simplified version of the nearest neighbor interpolation method. # # Ok, now back to the image data. It should be fairly clear what should be done now to fix those gaps in the scaled and rotated version of the letter R. I must develop an implementation of nearest neighbor interpolation based off the backwards mapping, using the inverse of the transformation matrix T, of the pixel coordinates in the transformed image to find either the exact match or nearest neighbor in the original image. # + T_inv = np.linalg.inv(T) # nearest neighbors interpolation def nearest_neighbors(i, j, M, T_inv): x_max, y_max = M.shape[0] - 1, M.shape[1] - 1 x, y, _ = T_inv @ np.array([i, j, 1]) if np.floor(x) == x and np.floor(y) == y: x, y = int(x), int(y) return M[x, y] if np.abs(np.floor(x) - x) < np.abs(np.ceil(x) - x): x = int(np.floor(x)) else: x = int(np.ceil(x)) if np.abs(np.floor(y) - y) < np.abs(np.ceil(y) - y): y = int(np.floor(y)) else: y = int(np.ceil(y)) if x > x_max: x = x_max if y > y_max: y = y_max return M[x, y,] img_nn = np.empty((2000, 2000, 4), dtype=np.uint8) for i, row in enumerate(img_transformed): for j, col in enumerate(row): img_nn[i, j, :] = nearest_neighbors(i, j, img, T_inv) plt.figure(figsize=(5, 5)) plt.imshow(img_nn) # - # Not too shabby right? # # I should note that in most cases the nearest neighbor method will not be sufficient. There are two other more common interpolation methods known as bilinear and bicubic interpolation that generally provide much better results. I will speak more about these other interpolation algorithms when introducing the Pillow and OpenCV libraries in latter sections. The purpose of this section is just to build an intuitive understanding of how things work. # ### Affine Transformations with Pillow # # In this section I will be briefly covering how to use the excellent Python image processing library Pillow to perform affine transformations. # # First off Pillow will need to be installed. I used pip to accomplish this like so. # # ```sh # pip install pillow # ``` # # Now first step is to import the Image class from the PIL (PIL is the name of the python module associated with Pillow) module and read in my image. from PIL import Image # To read in the sample image file name "letterR.jpg" I call the class method `Image.open(...)` passing it the filename which returns an instance of the `Image` class which I then convert to a numpy array and display with matplotlib. img = Image.open('letterR.jpg') plt.figure(figsize=(5, 5)) plt.imshow(np.asarray(img)) # The Pillow `Image` class has a handy method called `transform(...)` that allows you to perform fine grained affine transformations but, there are a few oddities that I must discuss first before I jump into a demonstration of it. The `transform(...)` method begins with two required parameters representing `size` as a tuple of height and width followed by the `method` of transformation to be applied which will be `Image.AFFINE` in this case. # # The remaining parameters are optional keyword arguments that control how the transformation is to be performed. In the case of this example I will be using the `data` parameter which takes the first two rows of an affine transformation matrix. # # For example, the 2x scaling transformation matrix I've been working with trimmed down to just the first two rows looks like this. # # $$ # T_s = \begin{bmatrix} # 2 & 0 & 0 \\ # 0 & 2 & 0 # \end{bmatrix} # $$ # # The last parameter that I will be using with the `transform(...)` method is `resample` which is used to indicate the type of pixel interpolation algorithm to apply out of the possible choices of `Image.NEAREST` (nearest neighbor), `Image.BILINEAR`, or `Image.BICUBIC`. This choice will often vary depending on the transformation being applied. However, bilinear and bicubic generally give better results than nearest neighbor but, as already demonstrated in this example nearest neighbor works quite well. # # There are a few peculiarities that served as real gotchas for me the first time I used the `Image.transform(...)` method particularly around the construction of the affine transformation matrix with the weirdly truncated off last row. Thus, I'd like to spend some time going over why things work the way they do because its a bit of a process. # # First thing that must happen is the image must be translated so that the origin (0, 0) is in the middle of the image. In the case of the 1000 x 1000 image of the letter R in this example that means a translation of -500 in the x and y. # # Below I show the generic translation transformation matrix $T_translate$ and the one I'll be using in the example $T_{neg500}$. # # $$ # T_{translate} = \begin{bmatrix} # 1 & 0 & t_x \\ # 0 & 1 & t_y \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # # $$ # T_{neg500} = \begin{bmatrix} # 1 & 0 & -500 \\ # 0 & 1 & -500 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # # Then there are the 2X scaling $T_scale$ and 90 degree rotation $T_rotate$ matrices from before. However, the Pillow library actually decided to use standard geometric angles (ie, counter clockwise) rather than the clockwise rotations I described earlier so the signs on the of the sin functions flip. Below are the resultant individual transformation matrices. # # $$ # T_{rotate} = \begin{bmatrix} # 0 & -1 & 0 \\ # 1 & 0 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # # $$ # T_{scale} = \begin{bmatrix} # 2 & 0 & 0 \\ # 0 & 2 & 0 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # # Next another translation matrix needs to be applied which acts to reposition the spatial domain of the pixels essentially negating the first one that centered the origin. In this case I need a positive translation of 1000 in the x and y where 1000 comes from twice the original because it has been scaled up by two. # # $$ # T_{pos1000} = \begin{bmatrix} # 1 & 0 & 1000 \\ # 0 & 1 & 1000 \\ # 0 & 0 & 1 # \end{bmatrix} # $$ # # These constitute the individual transformation steps that are required so, all that remains is to multiply the matrices in order (ie, right to left) like so. # # $$ # T = T_{pos1000} * T_{rotate} * T_{scale} * T_{neg500} # $$ # # Ok, so there is actually one last oddity. The `Image.transform(...)` method actually requires the inverse of the transformation matrix be supplied to the `data` parameter as a flattened array (or tuple) excluding the last row. # # $$ # T_{inv} = T^{-1} # $$ # # In code this all works as follows. # + # recenter resultant image T_pos1000 = np.array([ [1, 0, 1000], [0, 1, 1000], [0, 0, 1]]) # rotate - opposite angle T_rotate = np.array([ [0, -1, 0], [1, 0, 0], [0, 0, 1]]) # scale T_scale = np.array([ [2, 0, 0], [0, 2, 0], [0, 0, 1]]) # center original to 0,0 T_neg500 = np.array([ [1, 0, -500], [0, 1, -500], [0, 0, 1]]) T = T_pos1000 @ T_rotate @ T_scale @ T_neg500 T_inv = np.linalg.inv(T) # - img_transformed = img.transform((2000, 2000), Image.AFFINE, data=T_inv.flatten()[:6], resample=Image.NEAREST) plt.imshow(np.asarray(img_transformed)) # ### Affine Transformations with OpenCV2 # # Continuing on I would like to briefly describe how to carry out these affine transformations with the popular image processing and computer vision library OpenCV. I use the word brief here because it is largely the same as what is required in the previous description of using Pillow. # # First things first you must install like so. # # ```sh # pip install opencv-python # ``` # # As I mentioned above there is significant overlap in methodology between the Pillow approach and using OpenCV. For example, you still create a transformation matrix that first centers then uncenters the array of pixels to and from the origin and, you only use the first two rows of the transformation matrix. The major difference is that with OpenCV you give it the standard matrix rather than the inverse. # # So, with that understanding laid out I will jump into the code starting with importing the opencv-python module which is named `cv2`. import cv2 # To read in the image is as simple as calling the `cv2.imread(...)` method passing the filename as an argument. This returns the image data in to form of a 3D numpy array similar to how matplotlib works but, the pixel data in the 3rd dimension is comprised of an array of channels in the order of blue, green, red instead of red, green, blue, alpha as was in the case of reading with matplotlib. # # Thus, in order to plot the numpy image data originating from the OpenCV library one must reverse the order of the pixel channels. Luckily, OpenCV provides a convince method `cvtColor(...)` that can be used to do this as shown below (although numpy purists are likely to know that `img[:,:,::-1]` will do the same). img = cv2.imread('letterR.jpg') plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) # A few last items to mention are that OpenCV requires the data in the transformation matrix to be of type 32 bit float rather than the default 64 bit float so, be sure to convert down to 32 bit with `numpy.float32(...)`. Also, the API to `cv2.warpAffine(...)` does not provide the ability to specify what type of pixel interpolation algorithm to apply I could not determine from the docs what is used. If you know or find out please post in the comments below. T_opencv = np.float32(T.flatten()[:6].reshape(2,3)) img_transformed = cv2.warpAffine(img, T_opencv, (2000, 2000)) plt.imshow(cv2.cvtColor(img_transformed, cv2.COLOR_BGR2RGB)) # ### Conclusion # # In this article I have covered what an affine transformation is and how it can be applied to image processing using Python. Pure numpy and matplotlib was used to give a low level intuitive description of how affine transformations work then I went on to demonstrate how the same can be done using two popular Python libraries Pillow and OpenCV. # # Thanks for reading and as always don't be shy about commenting or critiquing below.
29,916
/notebooks/analogy_network-gray.ipynb
7b538475427cf40946349f88841813aba12cd0cb
[]
no_license
kanikel/video_prediction
https://github.com/kanikel/video_prediction
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
6,954,858
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd url = 'https://mssi.blueskymss.com/Contractors.aspx  ' tables = pd.read_html(url) tables 2021-03-01T02:45:49.255230", "status": "completed"} # # CIFAR 10 CNN # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 5.368123, "end_time": "2021-03-01T02:45:54.634071", "exception": false, "start_time": "2021-03-01T02:45:49.265948", "status": "completed"} import sys from matplotlib import pyplot from keras.datasets import cifar10 from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import Dense from keras.layers import Flatten from keras.layers import Dropout from keras.optimizers import SGD # + papermill={"duration": 0.012502, "end_time": "2021-03-01T02:45:54.652033", "exception": false, "start_time": "2021-03-01T02:45:54.639531", "status": "completed"} def load_dataset(): (trainX, trainY), (testX, testY) = cifar10.load_data() trainY = to_categorical(trainY) testY = to_categorical(testY) return trainX, trainY, testX, testY # + papermill={"duration": 0.011688, "end_time": "2021-03-01T02:45:54.668864", "exception": false, "start_time": "2021-03-01T02:45:54.657176", "status": "completed"} def prep_pixels(train, test): train_norm = train.astype('float32') test_norm = test.astype('float32') train_norm = train_norm / 255.0 test_norm = test_norm / 255.0 return train_norm, test_norm # + papermill={"duration": 0.013186, "end_time": "2021-03-01T02:45:54.687169", "exception": false, "start_time": "2021-03-01T02:45:54.673983", "status": "completed"} def summarize_diagnostics(history): # plot loss pyplot.subplot(211) pyplot.title('Cross Entropy Loss') pyplot.plot(history.history['loss'], color='blue', label='train') pyplot.plot(history.history['val_loss'], color='orange', label='test') # plot accuracy pyplot.subplot(212) pyplot.title('Classification Accuracy') pyplot.plot(history.history['accuracy'], color='blue', label='train') pyplot.plot(history.history['val_accuracy'], color='orange', label='test') # + papermill={"duration": 3.025272, "end_time": "2021-03-01T02:45:57.717663", "exception": false, "start_time": "2021-03-01T02:45:54.692391", "status": "completed"} model = Sequential() model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3))) model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.3)) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same')) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu', kernel_initializer='he_uniform')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) # compile model opt = SGD(lr=0.001, momentum=0.9) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) # + papermill={"duration": 786.57568, "end_time": "2021-03-01T02:59:04.302105", "exception": false, "start_time": "2021-03-01T02:45:57.726425", "status": "completed"} # load dataset trainX, trainY, testX, testY = load_dataset() # prepare pixel data trainX, testX = prep_pixels(trainX, testX) # fit model history = model.fit(trainX, trainY, epochs=200, batch_size=64, validation_data=(testX, testY), verbose=0) # evaluate model _, acc = model.evaluate(testX, testY, verbose=0) print('Accuracy: %.3f' % (acc * 100.0)) # + papermill={"duration": 0.263896, "end_time": "2021-03-01T02:59:04.587887", "exception": false, "start_time": "2021-03-01T02:59:04.323991", "status": "completed"} # learning curves summarize_diagnostics(history) onv_5_1 = conv_(pool_4, 512, 'conv5_1') conv_5_2 = conv_(conv_5_1, 512, 'conv5_2') conv_5_3 = conv_(conv_5_2, 512, 'conv5_3') pool_5 = pool_(conv_5_3) flattened = tf.contrib.layers.flatten(pool_5) fc_6 = dropout(fc(flattened, 4096, 'fc6'), keep_prob) fc_7 = fc(fc_6, 4096, 'fc7', relu=False) return fc_7 def vgg_simple(input, keep_prob = 0.5): pool_ = lambda x: max_pool(x, 2, 2) conv_ = lambda x, output_depth, name: conv(x, 3, output_depth, 1, name=name) conv_1_1 = conv_(input, 16, 'conv1_1') pool_1 = pool_(conv_1_1) conv_2_1 = conv_(pool_1, 32, 'conv2_1') pool_2 = pool_(conv_2_1) conv_3_1 = conv_(pool_2, 64, 'conv3_1') pool_3 = pool_(conv_3_1) conv_4_1 = conv_(pool_3, 64, 'conv4_1') pool_4 = pool_(conv_4_1) conv_5_1 = conv_(pool_4, 64, 'conv5_1') pool_5 = pool_(conv_5_1) flattened = tf.contrib.layers.flatten(pool_5) fc_6 = dropout(fc(flattened, 4096, 'fc6'), keep_prob) fc_7 = fc(fc_6, 4096, 'fc7', relu=False) return fc_7 # + code_folding=[51] class Generator(object): def __init__(self): self.train_variables = [] self.has_defined_layers = False self.has_defined_C1 = False self.has_defined_vgg_ = False def init_network(self, discriminator): self.p_t_n = tf.placeholder(tf.float32, [None, 224, 224,L]) self.p_t = tf.placeholder(tf.float32, [None, 224, 224,L]) self.x_t = tf.placeholder(tf.float32, [None, 224, 224, 1]) self.x_t_n = tf.placeholder(tf.float32, [None, 224, 224, 1]) self.x_t_n_predicted = self.get_output_tensor(self.p_t_n, self.p_t, self.x_t) mean_l2 = lambda x, y: tf.reduce_mean(tf.squared_difference(x, y)) self.l2_loss = tf.check_numerics(mean_l2(self.x_t_n, self.x_t_n_predicted), "l2") self.feat_loss = tf.check_numerics(mean_l2(self.C1(self.x_t), self.C1(self.x_t_n_predicted)), "feat") # feat_loss = mean_l2(self.C1(self.x_t), self.C1(self.x_t_n_predicted))+mean_l2(self.vgg(self.x_t), self.vgg(self.x_t_n_predicted)) self.adv_loss = tf.check_numerics(-tf.reduce_mean(tf.log(discriminator.get_output_tensor(self.x_t_n_predicted, self.p_t_n))), "adv") self.loss = self.l2_loss + 0.1*self.feat_loss + 0.1*self.adv_loss self.opt = tf.train.AdamOptimizer(learning_rate=1e-5).minimize(self.loss, var_list=self.train_variables) # self.check_op = tf.add_check_numerics_ops() def get_output_tensor(self, p_t_n, p_t, x_t): with tf.variable_scope('generator', reuse=self.has_defined_layers): p_t_n_latent = self.f_pose(p_t_n) # latent = p_t_n_latent - self.f_pose(p_t, force_reuse=True) + self.f_img(x_t) latent = tf.concat((p_t_n_latent, self.f_pose(p_t, force_reuse=True), self.f_img(x_t)), axis = 1) output = self.f_dec(latent) if not self.has_defined_layers: self.train_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator') self.has_defined_layers = True return output def f_pose(self, input, force_reuse=False): ''' Applies f_pose function to the input tensor to get an output. Should be similar to VGG architecture ''' with tf.variable_scope('f_pose', reuse=(self.has_defined_layers or force_reuse)): return vgg_simple(input) def f_img(self, input): ''' Applies f_img function to the input tensor to get an output. Should be exactly VGG architecture ''' with tf.variable_scope('f_img', reuse=self.has_defined_layers): return vgg(input, process_input=True) def f_dec(self, input): ''' Applies f_dec function to the input tensor to get an output. ''' with tf.variable_scope('f_dec', reuse=self.has_defined_layers): reshaped = tf.reshape(input, shape=[tf.shape(input)[0], 1, 1, 4096*3]) deconv_6_2 = deconv_layer(reshaped, 7, 128, 1, 'deconv6_2', padding='VALID') deconv_6_1 = deconv_layer(deconv_6_2, 3, 128, 2, 'deconv6_1') deconv_5_2 = deconv_layer(deconv_6_1, 3, 128, 1, 'deconv5_2') deconv_5_1 = deconv_layer(deconv_5_2, 3, 128, 2, 'deconv5_1') deconv_4_3 = deconv_layer(deconv_5_1, 3, 128, 1, 'deconv4_3') deconv_4_2 = deconv_layer(deconv_4_3, 3, 128, 1, 'deconv4_2') deconv_4_1 = deconv_layer(deconv_4_2, 3, 64, 2, 'deconv4_1') deconv_3_3 = deconv_layer(deconv_4_1, 3, 64, 1, 'deconv3_3') deconv_3_2 = deconv_layer(deconv_3_3, 3, 64, 1, 'deconv3_2') deconv_3_1 = deconv_layer(deconv_3_2, 3, 32, 2, 'deconv3_1') deconv_2_2 = deconv_layer(deconv_3_1, 3, 32, 1, 'deconv2_2') deconv_2_1 = deconv_layer(deconv_2_2, 3, 16, 2, 'deconv2_1') deconv_1_2 = deconv_layer(deconv_2_1, 3, 16, 1, 'deconv1_2') deconv_1_1 = deconv_layer(deconv_1_2, 3, 1, 1, 'deconv1_1') return deconv_1_1 def C1(self, input): input = tf.image.resize_images(input, [227, 227]) with tf.variable_scope('C1', reuse=self.has_defined_C1): conv1 = conv(input, 11, 96, 4, padding='VALID', name='conv1', trainable=False) pool1 = max_pool(conv1, 3, 2, padding='VALID', name='pool1') norm1 = lrn(pool1, 2, 2e-5, 0.75, name='norm1') conv2 = conv(norm1, 5, 256, 1, groups=2, name='conv2', trainable=False) pool2 = max_pool(conv2, 3, 2, padding='VALID', name='pool2') norm2 = lrn(pool2, 2, 2e-5, 0.75, name='norm2') conv3 = conv(norm2, 3, 384, 1, name='conv3', trainable=False) conv4 = conv(conv3, 3, 384, 1, groups=2, name='conv4', trainable=False) conv5 = conv(conv4, 3, 256, 1, groups=2, name='conv5', trainable=False) self.has_defined_C1 = True return conv5 # def vgg(self, input): # with tf.variable_scope('f_vgg', reuse=self.has_defined_vgg_): # self.has_defined_vgg_ = True # return vgg(input, process_input=True) def init_weights(self, sess, alexnet_file, vgg_file): weights_dict = np.load(alexnet_file, encoding='bytes').item() with tf.variable_scope('C1', reuse=True): for layer in ['conv2', 'conv3', 'conv4', 'conv5']: with tf.variable_scope(layer): W_value, b_value = weights_dict[layer] W = tf.get_variable('W', trainable=False) b = tf.get_variable('b', trainable=False) sess.run(W.assign(W_value)) sess.run(b.assign(b_value)) weights_dict = np.load(vgg_file, encoding='bytes').item() weights_dict = { key.decode('ascii') : value for key, value in weights_dict.items() } with tf.variable_scope('generator/f_img', reuse=True): for layer in [ 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3', 'fc6', 'fc7']: with tf.variable_scope(layer): W_value, b_value = weights_dict[layer] W = tf.get_variable('W') b = tf.get_variable('b') sess.run(W.assign(W_value)) sess.run(b.assign(b_value)) # with tf.variable_scope('f_vgg', reuse=True): # for layer in [ 'conv1_2', # 'conv2_1', 'conv2_2', # 'conv3_1', 'conv3_2', 'conv3_3', # 'conv4_1', 'conv4_2', 'conv4_3', # 'conv5_1', 'conv5_2', 'conv5_3', # 'fc6', 'fc7']: # with tf.variable_scope(layer): # W_value, b_value = weights_dict[layer] # W = tf.get_variable('W') # b = tf.get_variable('b') # sess.run(W.assign(W_value)) # sess.run(b.assign(b_value)) def fit_batch(self, sess, p_t, p_t_n, x_t, x_t_n): _, loss = sess.run((self.opt, self.loss), feed_dict={ self.p_t : p_t, self.p_t_n : p_t_n, self.x_t : x_t, self.x_t_n : x_t_n }) return loss def test_batch(self, sess, p_t, p_t_n, x_t, x_t_n): output = sess.run((self.x_t_n_predicted), feed_dict={ self.p_t : p_t, self.p_t_n : p_t_n, self.x_t : x_t, self.x_t_n : x_t_n }) return output #def test_gen(self, sess, p_t, p_t_n, x_t): class Discriminator(object): def __init__(self): self.train_variables = [] self.has_defined_layers = False def init_network(self, discriminator): self.p_t = tf.placeholder(tf.float32, [None, 224, 224,L]) self.p_t_n = tf.placeholder(tf.float32, [None, 224, 224,L]) self.x_t = tf.placeholder(tf.float32, [None, 224, 224, 1]) self.x_t_n = tf.placeholder(tf.float32, shape=[None, 224, 224, 1]) x_t_n_real = self.x_t_n x_t_n_pred = generator.get_output_tensor(self.p_t_n, self.p_t, self.x_t) real_prob = self.get_output_tensor(x_t_n_real, self.p_t_n) fake_prob = self.get_output_tensor(x_t_n_pred, self.p_t_n) real_mismatch_prob = self.get_output_tensor(self.x_t, self.p_t_n) self.loss = -tf.reduce_mean(tf.log(real_prob) + 0.5 * tf.log(1 - fake_prob) + 0.5 * tf.log(1 - real_mismatch_prob)) self.opt = tf.train.AdamOptimizer(learning_rate=1e-6).minimize(self.loss, var_list=self.train_variables) # self.check_op = tf.add_check_numerics_ops() def get_output_tensor(self, x, p): with tf.variable_scope('discriminator', reuse=self.has_defined_layers): with tf.variable_scope('f_img'): vgg_x = vgg(x) with tf.variable_scope('f_pose'): vgg_p = vgg_simple(p) concat = tf.concat([vgg_x, vgg_p], axis=1) fc8 = fc(concat, 1024, name='fc8') output = tf.nn.sigmoid(fc(fc8, 1, name='fc9', relu=False)) if not self.has_defined_layers: self.train_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator') self.has_defined_layers = True return output def init_weights(self, sess, alexnet_file, vgg_file): weights_dict = np.load(vgg_file, encoding='bytes').item() weights_dict = { key.decode('ascii') : value for key, value in weights_dict.items() } with tf.variable_scope('discriminator/f_img', reuse=True): for layer in [ 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3', 'fc6', 'fc7']: with tf.variable_scope(layer): W_value, b_value = weights_dict[layer] W = tf.get_variable('W') b = tf.get_variable('b') sess.run(W.assign(W_value)) sess.run(b.assign(b_value)) def fit_batch(self, sess, p_t, p_t_n, x_t, x_t_n): _, loss = sess.run((self.opt, self.loss), feed_dict={ self.p_t : p_t, self.p_t_n : p_t_n, self.x_t : x_t, self.x_t_n : x_t_n }) return loss # + # tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='') # + code_folding=[] hide_input=false def restore(generator, discriminator, checkpoint): var_list = generator.train_variables + discriminator.train_variables # saver = tf.train.Saver(var_list={var.name.split(':')[0].replace(, 'alexnet') : var for var in var_list}) saver.restore(sess, checkpoint) # - L = 13 # + tf.reset_default_graph() sess = tf.Session() generator = Generator() discriminator = Discriminator() generator.init_network(discriminator) discriminator.init_network(generator) sess.run(tf.global_variables_initializer()) generator.init_weights(sess, 'models/alexnet.npy', 'models/vgg16.npy') discriminator.init_weights(sess, 'models/alexnet.npy', 'models/vgg16.npy') # + # Create dictionary of squat videos. # Key = video number, Value = list of frames (numpy array images) of video # mod 0-8 for training, mod 9 for test def process_videos(mod): videos = {} videos_test = {} i = 0 for video in os.listdir('squats/'): if i % 9 != mod: videos[video] = [] for frame in os.listdir('squats/' + str(video) + '/'): filename = 'squats/' + str(video) + '/' + str(frame) videos[video].append(imread(filename, flatten=True).reshape((224,224,1)).astype('uint8')) else: videos_test[video] = [] for frame in os.listdir('squats/' + str(video) + '/'): filename = 'squats/' + str(video) + '/' + str(frame) videos_test[video].append(imread(filename, flatten=True).reshape((224,224,1)).astype('uint8')) i = i + 1 # Create dictionary of heatmat labels for squat videos. # For L = 13 # Key = video number, Value = list of stack of joints (numpy array images (224x224x13)) if L == 13: labels = {} labels_test = {} for video in os.listdir('squats_labels_multiple/'): if video in videos: labels[video] = [] for frame in os.listdir('squats_labels_multiple/' + str(video) + '/'): frame_folder = 'squats_labels_multiple/' + str(video) + '/' + str(frame) + '/' temp_image_stack = np.zeros((224,224,13)).astype('uint8') for i in range(13): temp_image_stack[:,:,i] = imread(frame_folder + str(i)+ '.jpg').astype('uint8') labels[video].append(temp_image_stack) elif video in videos_test: labels_test[video] = [] for frame in os.listdir('squats_labels_multiple/' + str(video) + '/'): frame_folder = 'squats_labels_multiple/' + str(video) + '/' + str(frame) + '/' temp_image_stack = np.zeros((224,224,13)).astype('uint8') for i in range(13): temp_image_stack[:,:,i] = imread(frame_folder + str(i)+ '.jpg').astype('uint8') labels_test[video].append(temp_image_stack) # For L = 1 # Key = video number, Value = list of heatmaps for each frame (numpy array images (224x224x1)) elif L == 1: labels = {} labels_test = {} for video in os.listdir('squats_labels/'): if video in videos: labels[video] = [] for frame in os.listdir('squats_labels/' + str(video) + '/'): filename = 'squats_labels/' + str(video) + '/' + str(frame) labels[video].append(imread(filename).reshape((224,224,1))) elif video in videos_test: labels_test[video] = [] for frame in os.listdir('squats_labels/' + str(video) + '/'): filename = 'squats_labels/' + str(video) + '/' + str(frame) labels_test[video].append(imread(filename).reshape((224,224,1))) return videos, videos_test, labels, labels_test # + # f1,f2,h1,h2 = create_minibatch(5) # print(sess.run((generator.adv_loss, generator.l2_loss, generator.feat_loss), feed_dict={ generator.p_t : h1, generator.p_t_n : h2, generator.x_t : f1, generator.x_t_n :f2 }))# print(gen_loss) # # for key in labels.keys(): # # for frame in range(len(labels[key])): # if len(np.unique(labels[key][frame][:,:,0])) == 1: # print(np.unique(labels[key][frame][:,:,0])) # for key in videos.keys(): # for frame in range(len(videos[key])): # if len(np.unique(videos[key][frame][:,:,0])) == 1: # print(np.unique(videos[key][frame][:,:,0])) # + def create_minibatch(batch_size): frames1 = [] heatmaps1 = [] frames2 = [] heatmaps2 = [] for i in range(batch_size): rand_video = videos.keys()[random.randint(0,len(videos.keys())-1)] rand_int = random.randint(0,len(videos[rand_video])-1) frames1.append(videos[rand_video][rand_int]) heatmaps1.append(labels[rand_video][rand_int]) rand_int = random.randint(0,len(videos[rand_video])-1) frames2.append(videos[rand_video][rand_int]) heatmaps2.append(labels[rand_video][rand_int]) return frames1, frames2, heatmaps1, heatmaps2 def create_minibatch_test(batch_size): frames1 = [] heatmaps1 = [] frames2 = [] heatmaps2 = [] for i in range(batch_size): rand_video = videos_test.keys()[random.randint(0,len(videos_test.keys())-1)] rand_int = random.randint(0,len(videos_test[rand_video])-1) frames1.append(videos_test[rand_video][rand_int]) heatmaps1.append(labels_test[rand_video][rand_int]) rand_int = random.randint(0,len(videos_test[rand_video])-1) frames2.append(videos_test[rand_video][rand_int]) heatmaps2.append(labels_test[rand_video][rand_int]) return frames1, frames2, heatmaps1, heatmaps2 # - videos, videos_test, labels, labels_test = process_videos(1) # + # saver = tf.train.Saver() # saver.restore(sess, '/media/jeff/WD HDD/CS280/models/test') # saver = tf.train.Saver() # saver.restore(sess, '/media/jeff/WD HDD/CS280/models/gray_nodropout_10_1_1-49') # saver.restore(sess, '/media/jeffzhang/WD HDD/model/multi-labels-100-50-5/multi-labels-test2-227') # + epochs = 300 n_samples = 1000 batch_size = 4 display_step = 1 save_model = 50 # videos, labels = {},{} mean_gen_losses = [] mean_disc_losses = [] mod = 0 for epoch in range(epochs): total_iter = n_samples // batch_size total_gen_loss = 0 total_disc_loss = 0 for i in range(total_iter): f1,f2,h1,h2 = create_minibatch(batch_size) gen_loss = generator.fit_batch(sess,h1,h2,f1,f2) disc_loss = discriminator.fit_batch(sess,h1,h2,f1,f2) total_gen_loss += gen_loss total_disc_loss += disc_loss mean_gen_loss = total_gen_loss / total_iter mean_disc_loss = total_disc_loss / total_iter mean_gen_losses.append(mean_gen_loss) mean_disc_losses.append(mean_disc_loss) if (epoch + 1) % display_step == 0: print('epoch %s: gen_loss=%.4f, disc_loss=%.4f' % (epoch + 1, mean_gen_loss, mean_disc_loss)) if (epoch + 1) % save_model == 0: saver = tf.train.Saver() saver.save(sess, '/media/jeff/WD HDD/CS280/models/gray_13_10_1_1',global_step=epoch) # + epochs = 192 n_samples = 1000 batch_size = 4 display_step = 1 save_model = 50 # videos, labels = {},{} mean_gen_losses = [] mean_disc_losses = [] mod = 0 for epoch in range(epochs): total_iter = n_samples // batch_size total_gen_loss = 0 total_disc_loss = 0 for i in range(total_iter): f1,f2,h1,h2 = create_minibatch(batch_size) gen_loss = generator.fit_batch(sess,h1,h2,f1,f2) disc_loss = discriminator.fit_batch(sess,h1,h2,f1,f2) total_gen_loss += gen_loss total_disc_loss += disc_loss mean_gen_loss = total_gen_loss / total_iter mean_disc_loss = total_disc_loss / total_iter mean_gen_losses.append(mean_gen_loss) mean_disc_losses.append(mean_disc_loss) if (epoch + 1) % display_step == 0: print('epoch %s: gen_loss=%.4f, disc_loss=%.4f' % (epoch + 1, mean_gen_loss, mean_disc_loss)) if (epoch + 1) % save_model == 0: saver = tf.train.Saver() saver.save(sess, '/media/jeff/WD HDD/CS280/models/gray_13_10_1_1',global_step=108+epoch) # - for i in range(4): plt.imshow(f1[i][:,:,0]) plt.show() plt.imshow(f2[i][:,:,0]) plt.show() plt.imshow(h1[i][:,:,0]) plt.show() plt.imshow(h2[i][:,:,0]) plt.show() # + # saver = tf.train.Saver() # saver.restore(sess, '/media/jeffzhang/WD HDD/model/multi-labels-test2') # - total_test = 5 for i in range(total_test): f1,f2,h1,h2 = create_minibatch(3) gen_img = generator.test_batch(sess,h1,h2,f1,f2) for i in range(3): plt.figure(figsize=(15,4)) plt.subplot(131) plt.imshow(f1[i].reshape((224,224)), cmap = 'gray') plt.subplot(132) plt.imshow(f2[i].reshape((224,224)), cmap = 'gray') plt.subplot(133) plt.imshow(gen_img[i].reshape((224,224)), cmap = 'gray') plt.show() total_test = 5 for i in range(total_test): f1,f2,h1,h2 = create_minibatch_test(3) gen_img = generator.test_batch(sess,h1,h2,f1,f2) for i in range(3): plt.figure(figsize=(15,4)) plt.subplot(131) plt.imshow(f1[i].reshape((224,224)), cmap = 'gray') plt.subplot(132) plt.imshow(f2[i].reshape((224,224)), cmap = 'gray') plt.subplot(133) plt.imshow(gen_img[i].reshape((224,224)), cmap = 'gray') plt.show()
25,650
/JessUP/ML2/GraficasBayesiano/Graficas/.ipynb_checkpoints/Graficas-checkpoint.ipynb
c1cbf3a911366dbd0ac663a1a70db6d2c20b13f0
[]
no_license
fou-foo/DSenCDMX
https://github.com/fou-foo/DSenCDMX
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
4,832
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project 04 Data Acquisition import twint import numpy as np import pandas as pd import nest_asyncio nest_asyncio.apply() import functools from sklearn.feature_extraction.text import CountVectorizer from sklearn.decomposition import NMF # + # Make better use of Jupyter Notebook cell width from IPython.core.display import display, HTML display(HTML("<style>.container { width:95% !important; }</style>")) pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.max_colwidth', 400) pd.options.display.float_format = '{:,.10f}'.format # - # ## Pull in sample data from Twitter using Twint # ...showing sample extracts below # + #configuration config = twint.Config() config.Search = "@VAVetBenefits" config.Lang = "en" config.Limit = 1000 #config.Since = '2020–12–18' #config.Until = time_until config.Store_csv = True config.Output = "custom_out_vba_sample.csv" #running search twint.run.Search(config) # + #configuration config = twint.Config() config.Search = "@DeptVetAffairs" config.Lang = "en" config.Limit = 1000 #config.Since = '2020–12–18' #config.Until = time_until config.Store_csv = True config.Output = "custom_out_VA_sample.csv" #running search twint.run.Search(config) # + #configuration config = twint.Config() config.Search = "@VeteransHealth" config.Lang = "en" config.Limit = 1000 #config.Since = '2020–12–18' #config.Until = time_until config.Store_csv = True config.Output = "custom_out_VHA_sample2.csv" #running search vha_sample = twint.run.Search(config) # - # ## Create Combined Dataset vba_full = pd.read_csv('/Users/arcarter/Git_Repos/project_04/custom_out_vba.csv') vha_full = pd.read_csv('/Users/arcarter/Git_Repos/project_04/custom_out_VHA.csv') va_full = pd.read_csv('/Users/arcarter/Git_Repos/project_04/custom_out_VA.csv') va_full.shape # ## Create new columns for VBA vs VHA vs VA Tweets # (Allowing multiple to be selected for same tweet) vba_full['VBA Tweet'] = 'Yes' vba_full_slim = pd.DataFrame(vba_full[["tweet", "id","username","name","date","time","VBA Tweet"]]) vha_full['VHA Tweet'] = 'Yes' vha_full_slim = pd.DataFrame(vha_full[["tweet", "id","username","name","date","time","VHA Tweet"]]) va_full['VA Tweet'] = 'Yes' va_full_slim = pd.DataFrame(va_full[["tweet", "id","username","name","date","time","VA Tweet"]]) vba_cols = list(vba_full_slim.columns) vha_cols = list(vha_full_slim.columns) va_cols = list(va_full_slim.columns) no_tweet_vba = vba_cols[:-1] no_tweet_vha = vha_cols[:-1] no_tweet_va = va_cols[:-1] # ## Merge all versions # ...and clean up shared columns merged_all_slim = pd.merge(left=vba_full_slim, right=va_full_slim, how='outer', left_on=no_tweet_vba, right_on=no_tweet_va) merged_all_slim.sample(10); merged_all_slim.shape merged_all_slim[merged_all_slim.username.str.contains("revolutapp")].shape merged_all_slim = merged_all_slim[~merged_all_slim.username.str.contains("revolutapp")] merged_all_slim.shape merged_all_w_vha_slim = pd.merge(left=merged_all_slim, right=vha_full_slim, how='outer', left_on=no_tweet_vba, right_on=no_tweet_vha) merged_all_w_vha_slim.head(5) merged_all_w_vha_slim.shape merged_all_w_vha_slim = merged_all_w_vha_slim[~merged_all_w_vha_slim.tweet.str.contains("RevolutApp")] merged_all_w_vha_slim = merged_all_w_vha_slim[~merged_all_w_vha_slim.username.str.contains("revolutapp")] merged_all_w_vha_slim.shape merged_all_w_vha_slim.loc[merged_all_w_vha_slim['VBA Tweet'] == 'Yes', 'VBA Tweet'] = 1 merged_all_w_vha_slim.loc[merged_all_w_vha_slim['VA Tweet'] == 'Yes', 'VA Tweet'] = 1 merged_all_w_vha_slim.loc[merged_all_w_vha_slim['VHA Tweet'] == 'Yes', 'VHA Tweet'] = 1 merged_all_w_vha_slim['VBA Tweet'] = merged_all_w_vha_slim['VBA Tweet'].fillna(0) merged_all_w_vha_slim['VA Tweet'] = merged_all_w_vha_slim['VA Tweet'].fillna(0) merged_all_w_vha_slim['VHA Tweet'] = merged_all_w_vha_slim['VHA Tweet'].fillna(0) merged_all_w_vha_slim.head(5) merged_all_w_vha_slim[merged_all_w_vha_slim['username'].str.contains("247fubar")].head(100) # ## Send version to CSV # ...and Pickle merged_all_w_vha_slim.to_csv(index=False,path_or_buf='/Users/arcarter/Git_Repos/project_04/merged_all_w_vha_slim.csv') merged_all_w_vha_slim.shape import pickle pickle.dump(merged_all_w_vha_slim, open("merged_all_w_vha_slim.p", "wb" ) ) merged_all_w_vha_slim_pickle = pickle.load( open( "merged_all_w_vha_slim.p", "rb" ) ) merged_all_w_vha_slim.head() cience/dataDSI/nyc_flight_data.csv") #take a look df_flights # + [markdown] id="Ub1yQgpOiEk5" # Download the data with the destination [information](https://drive.google.com/file/d/1-ACJcTJkGlHG_lNqeNIjACh6Dt7NHxBZ/view?usp=sharing). # + id="34_vv2LeZFG_" #place the file in your google drive folder, and load it into colab using pd.read_cvs() df_dest = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/DataScience/dataDSI/destination_size.csv') #take a look df_dest # + [markdown] id="spO8A8QNpp-Y" # <img src='http://drive.google.com/uc?export=view&id=1WC4tXGCEF-1_2LQ74gIxJAZ-GLXCwBdK' width="100" align = 'left'> # + [markdown] id="0AwRFGZGZjd1" # Can you merge the two dataframes (df_flights and df_dest) so that you'll have all the destination airport sizes. # > This might be useful to compare arrival delays of individual flights to the size of the airport? # + id="4dGKh3HCZWVA" #join the two dataframes so that you'll have pd.merge(?) # + [markdown] id="8X0bL0Gpp-Ic" # What kind of join did you use? # + [markdown] id="lknGnsz9i0Ra" # By merging the two dataframes we now have the amount of delay along with the size of the destination airport. We could then look at the relationship between the two using a figure or a model. # + [markdown] id="6V4xVe65TyDm" # Further reading # # https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html # #
6,088
/.ipynb_checkpoints/MULTIVARIADO_DATASET-checkpoint.ipynb
d37f07ac1063e92d5d7066a34092912c23a9ab8e
[]
no_license
frluenga/R-code-CCA
https://github.com/frluenga/R-code-CCA
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
20,567
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tratamiento Base import pandas as pd import numpy as np df = pd.read_csv('GlobalLandTemperaturesByCountry.csv') df['year'] = pd.to_datetime(df['dt']) idx = df['year'] > pd.to_datetime('1970-01-01') df_temp = df[idx] df_t_avg = df_temp.groupby(['Country', pd.Grouper(key='year',freq='1Y')]).agg({'AverageTemperature':[np.mean,np.median]}) df_t = df_t_avg['AverageTemperature'][['median']].reset_index() df_t['date'] = df_t['year'].dt.year df_t.rename(columns={'median':'temperature'},inplace=True) df_agri = pd.read_csv('AGRI.K2_DS2.csv', header = 2) df_fore = pd.read_csv('LND.FRST.K2_DS2.csv', header = 2) df_elec = pd.read_csv('USE.ELEC.KH.PC_DS2.csv', header = 2) df_co2e = pd.read_csv('ATM.CO2E.KT_DS2.csv', header = 2) df_popu = pd.read_csv('POP.TOTL_DS2.csv', header = 2) # map(str,range(1971,2015)) ## lista de todos los años que se desea analizar en formato string cols= ['Country Name','Country Code'] + list(map(str,range(1971,2015))) # lista de columnas que deseo extraer de los dataframe def fun_format(df,col='agriculture'): return df.loc[:,cols].melt(id_vars=['Country Name','Country Code']).rename( columns ={'variable':'date', 'Country Name':'Country', 'Country Code':'name', 'value':col}) df_agri = fun_format(df_agri,col='agriculture') df_fore = fun_format(df_fore,col='forest') df_elec = fun_format(df_elec,col='electricprod') df_co2e = fun_format(df_co2e,col='co2') df_popu = fun_format(df_popu,col='population') df_popu['date'] = df_popu['date'].astype(float) df_fore['date'] = df_fore['date'].astype(float) df_agri['date'] = df_agri['date'].astype(float) df_elec['date'] = df_elec['date'].astype(float) df_co2e['date'] = df_co2e['date'].astype(float) # + df_merge = pd.merge(df_t[['Country','temperature','date']], df_popu, on = ['Country','date'],how='inner') df_merge = pd.merge(df_merge, df_co2e, on = ['Country','name','date'], how = 'inner') df_merge = pd.merge(df_merge, df_fore, on = ['Country','name','date'], how = 'inner') df_merge = pd.merge(df_merge, df_elec, on = ['Country','name','date'], how = 'inner') df_merge = pd.merge(df_merge, df_agri, on = ['Country','name','date'], how = 'inner') # - df_merge df_climate = df_merge.dropna() dir_pandas = '~/Documents/Estadistica/Multivariado/Taller 3/{}'.format('CCA.csv') dir_pandas df_climate df_climate.to_csv(dir_pandas, index = False)
2,951
/week14/Day_80_Lecture_1_Assignment.ipynb
ca623a15f41d60fc0ce6700864fb7a012e6b7a9b
[]
no_license
ssmcnatt/thinkful-pair-work
https://github.com/ssmcnatt/thinkful-pair-work
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
7,143
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="s3b2_cqaKWkw" # ## Long Short-Term Memory # # In this assignment, we will learn about LSTM models. We will create an LSTM model for time series prediction. # + id="k0a6OKp4KWkz" import numpy as np import os import pandas as pd from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, LSTM, Dropout, Flatten from tensorflow.keras.utils import to_categorical # + id="mne4psN7CpYz" # + [markdown] id="UY5p0gThKWk1" # Below is a function for loading time series data collected by sensors. There are 9 different files, We have data about body acceleration, body gyro, and total acceleration for the x, y, and z axis # + id="kAhD_8wwKWk2" def load_func(path, file_ind=False): data_list = [] if file_ind: filenames = [path] else: files = os.listdir(path) filenames = [path + '/' + f for f in files] for f in filenames: dataframe = pd.read_csv(f, header=None, delim_whitespace=True) data_list.append(dataframe.values) if len(data_list) > 1: return np.dstack(data_list) else: return data_list[0] # + id="06u-u8e4KWk4" outputId="ed92dc7b-3ace-4253-9096-7b3efbf69f62" os.listdir('./train/Inertial Signals/') # + id="c5_0OBVeKWk7" X_train = load_func("./train/Inertial Signals") X_test = load_func("./test/Inertial Signals") y_train_cat = load_func('./train/y_train.txt', True) y_test_cat = load_func('./test/y_test.txt', True) # + [markdown] id="Rgw_29IQKWk8" # Print the dimensions of both the predictor variables and the target. # + id="BBiwN-zUKWk9" # Answer below: # + [markdown] id="2rL-Y9KFKWk-" # The target variable is categorical. One hot encode the target variable. # + id="2BzBC9QNKWk_" # Answer below: # + [markdown] id="HeKcEUwsKWlA" # Create a model containing an LSTM layer with unit size 100, and input shape that is the tuple containing the number of columns in X and the number of files in X. # # The next layer is a dropout layer. Choose 0.5 for the proportion. Then add a dense layer of unit size 100 and finally an output dense layer. # + id="ujymmzAFKWlB" # Answer below: # + [markdown] id="GX4stdzAKWlC" # Print the model summary to ensure you have the correct number of parameters. # + id="AbUNEvSJKWlE" # Answer below: # + [markdown] id="Ww7tK6UwKWlG" # Compile and fit the model. Select the appropriate activation, loss, and optimizer. # # Run the model for 10 epochs with a batch size of 80. # + id="qdMikFoMKWlG" # Answer below: # + [markdown] id="h-_IsmrcKWlI" # Print the confusion matrix for the test data. # + id="bDLwv62wKWlI" # Answer below: # + id="ceYZ8zW4KWlK" t['longitude'], 'o', label=given_type) #plot each subset of df based on the given type plt.legend() plt.show() # - world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres')) world cities = gpd.read_file(gpd.datasets.get_path('naturalearth_cities')) cities earth = world.plot(figsize = (14,8)) geometry = [Point(xy) for xy in zip(df['longitude'], df['latitude'])] geo_df = gpd.GeoDataFrame(df, crs={'init': 'epsg:4326'}, geometry=geometry) geo_df.plot() # + fig, ax = plt.subplots(figsize=(14,8)) world.plot(ax=ax) geo_df.plot(ax=ax, color='red') plt.show # - # us geodata: https://eric.clst.org/tech/usgeojson/ usmap = gpd.read_file('/Users/Me/Documents/Datascience/Rowe/EPluribus/confederates/geodata/usmap.shp') usa = gpd.read_file(gplt.datasets.get_path('contiguous_usa')) usa.plot()
3,759
/Копия_блокнота_ cnns_ipynb .ipynb
c6e6a4e65818dd24ae335f4a0040ecc5380aa394
[]
no_license
termit209/CV_labs
https://github.com/termit209/CV_labs
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,881,773
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="kJYwYTjOlppf" # For that lab we will need keras # + id="ojvo0a1Ml3je" colab={"base_uri": "https://localhost:8080/"} outputId="7d5ab447-3c90-4ce3-a8eb-c2ae64f80e8a" # !python -m pip install keras # + [markdown] id="cqEei2T7mQ0w" # Let's create the [VGG16](https://keras.io/api/applications/vgg/) default model with pretrained weights on [ImageNet](http://www.image-net.org/) dataset # + id="7vvuTETilppg" colab={"base_uri": "https://localhost:8080/"} outputId="ec8a96e8-fed3-4f9b-bc03-b2d34b805a8f" import keras model = keras.applications.vgg16.VGG16() # + [markdown] id="qL6AIkW9lppj" # ### Getting weight and layers data # + [markdown] id="MMHmrvN8lppk" # First, we will visualize this model # + id="sMydsBMDlppl" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="7c81ae84-999d-4d09-d908-082cacad33dc" from IPython.display import SVG from keras.utils.vis_utils import model_to_dot SVG(model_to_dot(model, show_shapes= True, show_layer_names=True, dpi=65).create(prog='dot', format='svg')) # + [markdown] id="uKDjHVlLlppn" # And also see the shapes of weights # + id="2mZ5lq5eGKKm" model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="zuUEAP7D9dn7" outputId="93b9224d-22a0-4764-f9fe-1e9cca96ee8c" model.layers[1] # + id="U6AcYYRPlppo" colab={"base_uri": "https://localhost:8080/"} outputId="228a81eb-eca1-4abe-b09d-149e857719ca" weights = model.get_weights() for weight in weights: print(weight.shape) # + [markdown] id="PP5toQAxvb0L" # We can also access each layer separately # + id="EFBJ9tDalppq" colab={"base_uri": "https://localhost:8080/"} outputId="3326a6b7-2840-4f26-9dbf-b1e635c15fd9" layers = model.layers for layer in layers: print(layer) # + [markdown] id="HPlJvKholpps" # You might notice that the number of weights is greater than the number of layers. This is because we also have bias weights, in which we are not currently inerested in. # + [markdown] id="dsnfqTE0lpps" # ### Task 1 # Visualize weights from the input layer # + id="D_SJ2aduk2Tk" import numpy as np import cv2 import matplotlib.pyplot as plt # %matplotlib inline import tensorflow as tf # + id="pnw-Gk6BF_I_" w = weights[0] q = np.transpose(w, (3, 0, 1, 2, )) # + colab={"base_uri": "https://localhost:8080/", "height": 248} id="Sr5MOCeQmk9H" outputId="c5e540cc-c701-4676-eeeb-9cfd2d1be18a" img = cv2.cvtColor(q[0, :, :, :], cv2.COLOR_BGR2RGB) img1 = cv2.cvtColor(q[1, :, :, :], cv2.COLOR_BGR2RGB) img2 = cv2.cvtColor(q[2, :, :, :], cv2.COLOR_BGR2RGB) img3 = cv2.cvtColor(q[3, :, :, :], cv2.COLOR_BGR2RGB) img4 = cv2.cvtColor(q[4, :, :, :], cv2.COLOR_BGR2RGB) fig, axs = plt.subplots(1, 5, figsize=(10, 20)) axs[1].imshow(img1) axs[2].imshow(img2) axs[3].imshow(img3) axs[4].imshow(img4) axs[0].imshow(img) # + id="vK5fEdNlH9vI" colab={"base_uri": "https://localhost:8080/", "height": 383} outputId="903e32ee-32a3-44a6-9d2a-682689b14df6" fig, axs = plt.subplots(1, 5, figsize=(40, 80)) axs[0].imshow(q[0, :, :]) axs[1].imshow(q[1, :, :, :]) axs[2].imshow(q[2, :, :, :]) axs[3].imshow(q[3, :, :, :]) axs[4].imshow(q[4, :, :, :]) # + colab={"base_uri": "https://localhost:8080/", "height": 912} id="VT8cssUlQLsq" outputId="36e025ab-7a4f-4ed4-94fd-cdc2160ae32f" fig, axs = plt.subplots(3, 5, figsize=(10, 20)) axs[0, 0].imshow(q[:, :, 0, 0], cmap = 'gray') axs[0,1].imshow(q[:, :, 0, 1], cmap = 'gray') axs[0,2].imshow(q[:, :, 0, 2], cmap = 'gray') axs[0,3].imshow(q[:, :, 0, 3], cmap = 'gray') axs[0,4].imshow(q[:, :, 0, 4], cmap = 'gray') axs[1, 0].imshow(q[:, :, 1, 0], cmap = 'gray') axs[1,1].imshow(q[:, :, 1, 1], cmap = 'gray') axs[1,2].imshow(q[:, :, 1, 2], cmap = 'gray') axs[1,3].imshow(q[:, :, 1, 3], cmap = 'gray') axs[1,4].imshow(q[:, :, 1, 4], cmap = 'gray') axs[2,0].imshow(q[:, :, 2, 0], cmap = 'gray') axs[2,1].imshow(q[:, :, 2, 1], cmap = 'gray') axs[2,2].imshow(q[:, :, 2, 2], cmap = 'gray') axs[2,3].imshow(q[:, :, 2, 3], cmap = 'gray') axs[2,4].imshow(q[:, :, 2, 4], cmap = 'gray') plt.show() # + [markdown] id="DyygtE-6lppw" # ### Task 2a # # Feed an image to the model and get an output image from some layer # # You can see the output from each layer using this [guide](https://stackoverflow.com/a/41712013) # + id="wbxvMjkraMxx" colab={"base_uri": "https://localhost:8080/"} outputId="a521696a-7c66-4761-f090-cbbc876d8284" # ! wget https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png # ! ls -al | grep Lenna image_path = 'Lenna_(test_image).png' # + id="G2MXFojQW-SH" colab={"base_uri": "https://localhost:8080/", "height": 322} outputId="9b6887cf-f00e-456b-e542-ac74a88e9422" import cv2 import matplotlib.pyplot as plt img = cv2.imread(image_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) fig, axs = plt.subplots(1, 2, figsize=(10, 20)) axs[0].imshow(img) axs[1].imshow(img) # + id="fwkKkEr5kwLh" inp = tf.keras.applications.vgg16.preprocess_input(img) # + id="gb1_RkUnxA7z" inp = np.reshape(inp, (1, 512, 512, 3)) # + id="LGSL_j9l0Wve" from keras import backend as K input = model.input # input placeholder outputs = [layer.output for layer in model.layers] # all layer outputs functors = [K.function([input], [out]) for out in outputs] # evaluation functions # + id="w9HXgurazL9P" q = functors[1](inp) # + id="9MAu14N6K6hJ" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="84ef6853-06be-4d1e-aa72-6b1f1bac183e" fig, axs = plt.subplots(1, 5, figsize=(40, 80)) axs[0].imshow(q[0][0, :, :, 0]) axs[1].imshow(q[0][0, :, :, 1]) axs[2].imshow(q[0][0, :, :, 2]) axs[3].imshow(q[0][0, :, :, 3]) axs[4].imshow(q[0][0, :, :, 4]) # + colab={"base_uri": "https://localhost:8080/", "height": 310} id="2gmcoFd0cIbo" outputId="6ac8090c-13a2-4c17-f642-6e539a8d819d" q = functors[8](inp) fig, axs = plt.subplots(1, 5, figsize=(40, 80)) axs[0].imshow(q[0][0, :, :, 0]) axs[1].imshow(q[0][0, :, :, 1]) axs[2].imshow(q[0][0, :, :, 2]) axs[3].imshow(q[0][0, :, :, 3]) axs[4].imshow(q[0][0, :, :, 4]) # + colab={"base_uri": "https://localhost:8080/", "height": 310} id="F6ENHq8ycV6u" outputId="b75cc0c8-5578-40d6-b590-098bb702e559" q = functors[10](inp) fig, axs = plt.subplots(1, 5, figsize=(40, 80)) axs[0].imshow(q[0][0, :, :, 0]) axs[1].imshow(q[0][0, :, :, 1]) axs[2].imshow(q[0][0, :, :, 2]) axs[3].imshow(q[0][0, :, :, 3]) axs[4].imshow(q[0][0, :, :, 4]) # + [markdown] id="hn-NsfKjwSvn" # ### Task 2b # # Using [this module](https://www.google.ru/url?sa=t&rct=j&q=&esrc=s&source=web&cd=&ved=2ahUKEwjDwsG9583sAhULx4sKHcMcCh4QFjAEegQIBhAC&url=https%3A%2F%2Fgithub.com%2Fraghakot%2Fkeras-vis&usg=AOvVaw3dxzpVlAlBY3nmBLIJVWAs) visualize the activation maximizing input images from the layer of your choice # + colab={"base_uri": "https://localhost:8080/"} id="bXgokm7yYXbc" outputId="52ec4785-aeb2-4c54-e488-a09ba3fe071d" # !pip install vis # + colab={"base_uri": "https://localhost:8080/"} id="GDim98XwSHto" outputId="4fbc7801-574b-4802-9588-532cace280cc" from keras.applications import VGG16 from vis.utils import utils from keras import activations # Build the VGG16 network with ImageNet weights model = VGG16(weights='imagenet', include_top=True) # Utility to search for layer index by name. # Alternatively we can specify this as -1 since it corresponds to the last layer. layer_idx = utils.find_layer_idx(model, 'predictions') # Swap softmax with linear model.layers[layer_idx].activation = activations.linear model = utils.apply_modifications(model) # + id="ForyH4fjcgvB" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="747f31a0-92d9-450b-e8f1-a9a62cb74aa6" # !pip install scipy==1.1.0 # !pip install tensorflow==1.10.0 # !pip install keras==2.2.2 # + id="8PQ0bVQQSIYW" colab={"base_uri": "https://localhost:8080/", "height": 521} outputId="41f4c621-c2cb-4d1e-d4d9-986de3c6f6ac" from vis.visualization import visualize_activation from matplotlib import pyplot as plt # %matplotlib inline plt.rcParams['figure.figsize'] = (18, 6) # 20 is the imagenet category for 'ouzel' img = visualize_activation(model, layer_idx, filter_indices=20) plt.imshow(img) # + [markdown] id="6ug7kEiglppw" # ### Task 3 # # Classify [cats and dogs](https://www.kaggle.com/c/dogs-vs-cats/data) images with [features from CNN](https://keras.io/applications/#extract-features-with-vgg16) using SVM / KNN / etc... # + id="c_NKXDsROEI5" colab={"base_uri": "https://localhost:8080/"} outputId="da39b724-6284-4aea-81b0-d1411b1a0426" from google.colab import drive drive.mount('/content/drive') # + id="ceyuJFOHOMgn" # !unzip /content/drive/MyDrive/data/cnd/train.zip # + colab={"base_uri": "https://localhost:8080/", "height": 305} id="bJXTyYcdIcvK" outputId="96793aad-98b0-41ce-f9c3-68561438eaa3" img = cv2.imread('/content/train/cat.0.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (224, 224)) fig, axs = plt.subplots(1, 2, figsize=(10, 20)) axs[0].imshow(img) axs[1].imshow(img) inp = tf.keras.applications.vgg16.preprocess_input(img) inp = np.reshape(inp, (1, 224, 224, 3)) # + id="_XBdsNVc13BU" import tensorflow as tf from keras import Sequential from keras.layers import Dense, Reshape, Flatten, Input, Average, Concatenate, LSTM from keras.models import Model from keras.optimizers import Adam import keras.backend as K gener = Sequential(model.layers[:-1]) # + id="qTq1hQh4f8c-" import os file_names = os.listdir('/content/train') # + id="efFpbQh2ge5M" features = [] i = 0 for file_name in file_names: i += 1 img = cv2.imread('/content/train/' + file_name) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (224, 224)) inp = tf.keras.applications.vgg16.preprocess_input(img) inp = np.reshape(inp, (1, 224, 224, 3)) features.append(gener.predict(inp)) if i % 250 == 0: print(i) # + id="hSEkNfyjWZNu" labels = [] for file_name in file_names: i += 1 label = file_name.split('.')[0] if label == 'cat': labels.append(1.0) elif label == 'dog': labels.append(0.0) else: print(file_name) if i % 1000 == 0: print(i) # + id="xo9sjHFXawqV" from scipy.io import savemat, loadmat mdic = {'features':np.array(features, dtype=np.float32), 'labels':np.array(labels, dtype=np.float32)} mdic # + id="HhBATH48sMab" from scipy.io import savemat, loadmat # + id="pebsmitjhp41" mdic = loadmat('/content/drive/MyDrive/data/vgg16_DnC.mat') # + colab={"base_uri": "https://localhost:8080/"} id="gRrClLvsi6CZ" outputId="a0f27456-e8c6-4490-d92b-98a8a16284c4" mdic['features'].shape # + id="A28yFstYa_7Z" savemat("vgg16_DnC.mat", mdic, do_compression = True) # + id="9S5NubaZme-A" from sklearn.model_selection import train_test_split X = mdic['features'][:, 0, :] y = mdic['labels'][0] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # + colab={"base_uri": "https://localhost:8080/"} id="dNo-Mr79kWkn" outputId="b03eba92-6451-4737-9acd-b9f4f6909e3b" import numpy as np from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC clf = make_pipeline(StandardScaler(), SVC(gamma='auto')) clf.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="f7M9Xw9yl78z" outputId="f9918795-ed90-4d9c-8ad6-c8fe4c51d6ce" from sklearn.metrics import classification_report target_names = ['dog', 'cat 1'] print(classification_report(y_train, clf.predict(X_train), target_names=target_names)) print(classification_report(y_test, clf.predict(X_test), target_names=target_names)) # + [markdown] id="CyJ6dVHfnud9" # Knn # + colab={"base_uri": "https://localhost:8080/"} id="QVObNlkQnuHF" outputId="be0ea876-f12c-434a-a47e-d910ba6e7b3c" from sklearn.neighbors import KNeighborsClassifier neigh = KNeighborsClassifier(n_neighbors=3) neigh.fit(X_train, y_train) print(classification_report(y_train, neigh.predict(X_train), target_names=target_names)) print(classification_report(y_test, neigh.predict(X_test), target_names=target_names)) # + [markdown] id="b4DuGfhxlppx" # ### Building your own model # # Consider the NON-working example below (x_train not defined) # + id="nhTD9YIblppx" import keras from keras.datasets import cifar10 from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D import os model = Sequential() # initialize linear stacked model # create first conv layer and provide input shapes (other shapes will be calculated automatically) model.add(Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:])) # set activation function for the layer model.add(Activation('relu')) #repeat model.add(Conv2D(32, (3, 3))) model.add(Activation('relu')) # first maxpooling layer with 2x2 window model.add(MaxPooling2D(pool_size=(2, 2))) # set dropout to 25% of neurons number model.add(Dropout(0.25)) # flatten convert ndarray fron the prev layer to 1D vector model.add(Flatten()) # fully connected layer with 512 neurons model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) # + id="6AxlqE70ljRI" # use early stopping to optimally terminate training through callbacks from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint es=EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5) # save best model automatically mc= ModelCheckpoint('yourdirectory/your_model.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True) cb_list=[es,mc] # + [markdown] id="6MNnhQUWlppz" # ### Task 4 # # Create your own simple architecture and train CNN on [MNIST](https://keras.io/datasets/#mnist-database-of-handwritten-digits) dataset. Note that MNIST is 1 color channel dataset. # Test it on some hard images from the web, [such as](https://www.wpclipart.com/education/animal_numbers/animal_number_2.png). # + id="KKsf3hnwxJVG" colab={"base_uri": "https://localhost:8080/"} outputId="b421838c-5a97-4c83-ac3d-c2e59e05ee47" import keras (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # + [markdown] id="KPgHPth5tSuL" # preprocess # + colab={"base_uri": "https://localhost:8080/"} id="lRI8ssnzkI6O" outputId="c6b7014e-0b4f-49ad-f3ed-bc18ad672063" image_height = 28 image_width = 28 num_channels = 1 # re-shape the images data x_train = np.reshape(x_train, (x_train.shape[0], image_height, image_width, num_channels)) x_test = np.reshape(x_test, (x_test.shape[0],image_height, image_width, num_channels)) # re-scale the image data to values between (0.0,1.0] x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. from keras.utils import to_categorical num_classes = 10 y_train_cat = to_categorical(y_train,num_classes) y_test_cat = to_categorical(y_test,num_classes) y_train_cat.shape, y_test_cat.shape # + [markdown] id="GJ03guQXuE87" # model # + colab={"base_uri": "https://localhost:8080/"} id="qMUp0NJJvkse" outputId="928f7ea0-bed7-4e1f-a84f-bc458264aa79" from keras.layers import Dense, Conv2D, MaxPooling2D, BatchNormalization, GlobalAveragePooling2D from keras import models # starting point my_model= models.Sequential() # Add first convolutional block my_model.add(Conv2D(16, (3, 3), activation='relu', padding='same', input_shape=(image_height, image_width, num_channels))) my_model.add(MaxPooling2D((2, 2), padding='same')) # second block my_model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) my_model.add(MaxPooling2D((2, 2), padding='same')) # third block my_model.add(Conv2D(64, (3, 3), activation='relu', padding='same')) my_model.add(MaxPooling2D((2, 2), padding='same')) # fourth block my_model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) my_model.add(MaxPooling2D((2, 2), padding='same')) # global average pooling my_model.add(GlobalAveragePooling2D()) # fully connected layer my_model.add(Dense(64, activation='relu')) my_model.add(BatchNormalization()) # make predictions my_model.add(Dense(num_classes, activation='softmax')) # Show a summary of the model. Check the number of trainable parameters my_model.summary() # compile model my_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # + id="Y0xBbulWu_wZ" colab={"base_uri": "https://localhost:8080/"} outputId="d9faf642-7b5c-4470-db84-229011b3c063" results = my_model.fit(x_train, y_train_cat, epochs=7, batch_size=128, validation_split=0.1) # + id="avsj_rptc_Ju" pred = my_model.predict(x_test) # + colab={"base_uri": "https://localhost:8080/"} id="SicMkiIGem5I" outputId="84437d42-b393-4c4e-87d7-5b3207c36498" class_pred = np.argmax(pred, axis=1) y_test # + colab={"base_uri": "https://localhost:8080/"} id="998onc3_e2p_" outputId="5164b49b-d6f7-4536-a363-79d5ef5d129a" from sklearn.metrics import classification_report print(classification_report(y_test, class_pred)) # + id="yfbf1QjS3XlG" colab={"base_uri": "https://localhost:8080/"} outputId="b26724ac-0e70-4243-d668-58164a174b63" import os path_folder = '/content/drive/MyDrive/data/hard' paths = os.listdir(path_folder) paths # + id="0s8hcD9W_4L1" import numpy as np import cv2 import matplotlib.pyplot as plt # %matplotlib inline # + id="U6hxXoT0Gjvk" list_img = [] for path in paths: image_path = path_folder + '/' + path img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (28, 28)) list_img.append(img) list_img = np.array(list_img) # + colab={"base_uri": "https://localhost:8080/", "height": 172} id="Qn2ZNEt5G4p8" outputId="a54ec7e9-9e03-425b-be4a-ad1fde110404" fig, axs = plt.subplots(1, 9, figsize=(20, 40)) for i in range(len(list_img)): axs[i].imshow(list_img[i], cmap='gray') print(np.argmax(pred, axis=1)) # + id="J1QyNx3bH3Er" # re-shape the images data x_hard_test = np.reshape(list_img, (9, image_height, image_width, num_channels)) x_hard_test = x_hard_test.astype('float32') / 255. # + id="1Q11nwFCImdh" colab={"base_uri": "https://localhost:8080/"} outputId="cf7043e8-7c83-4cfe-d593-ce20d13c7816" pred = my_model.predict(x_hard_test) np.argmax(pred, axis=1) # + [markdown] id="7MFL2c4ulpp0" # ### Task 5 # # Augment MNIST data using [ImageDataGenerator](https://keras.io/preprocessing/image/) and color inversion + visualize (or save) augmented images. # Train your model on augmented data and compare the results on your images from the web. # + id="c7Uxmscz-Pok" import random # + colab={"base_uri": "https://localhost:8080/"} id="18ZNVuW5-aP-" outputId="3e88bd3a-41f5-4ffc-eb9c-434db8a87f59" x_train[1].shape # + id="B9BrxLyR-GsJ" def colour_inversion(img): if random.random() < 0.5: inverted_img = 255 - img else: inverted_img = img return inverted_img # + id="J7-H5bH4CoER" (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # + id="kJLy-dfyAm-8" colab={"base_uri": "https://localhost:8080/", "height": 319} outputId="71ce8240-0d65-4ad4-a237-979c20871517" fig, axs = plt.subplots(1, 2, figsize=(10, 20)) axs[0].imshow(x_train[1], cmap='gray') axs[1].imshow( colour_inversion(x_train[1]), cmap='gray') # + id="pLDK7U4916mh" x_train = np.reshape(x_train, (x_train.shape[0], image_height, image_width, num_channels)) datagen = ImageDataGenerator( rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, preprocessing_function=colour_inversion, validation_split=0.1, rescale= 1./255) # compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied) datagen.fit(x_train) # fits the model on batches with real-time data augmentation: # + colab={"base_uri": "https://localhost:8080/"} id="MntAs7_0LfVT" outputId="26bca4c6-02aa-40ea-d145-1a9bf48a5430" from keras.utils import to_categorical num_classes = 10 y_train_cat = to_categorical(y_train,num_classes) y_test_cat = to_categorical(y_test,num_classes) y_train_cat.shape, y_test_cat.shape # + colab={"base_uri": "https://localhost:8080/"} id="yJk8CuTRG-5m" outputId="50c56b18-c611-4d75-c325-771ae3a24595" data_gen = datagen.flow(x_train, y_train_cat, batch_size=100) results = my_model.fit(data_gen, steps_per_epoch=600, epochs=5) # + colab={"base_uri": "https://localhost:8080/", "height": 172} id="Fdnh7ZGcVuV3" outputId="69678c24-1df9-41ee-9449-c4e29c672386" fig, axs = plt.subplots(1, 9, figsize=(20, 40)) for i in range(len(list_img)): axs[i].imshow(list_img[i], cmap='gray') print(np.argmax(pred, axis=1)) # + id="kH3Rfu9sV47T" x_hard_test = np.reshape(list_img, (9, image_height, image_width, num_channels)) x_hard_test = x_hard_test.astype('float32') / 255. # + colab={"base_uri": "https://localhost:8080/"} id="z3Lh6gWOV8t5" outputId="6ccebba4-90fc-47a1-80dc-02f5a9966f32" pred = my_model.predict(x_hard_test) np.argmax(pred, axis=1) pred[4] # + [markdown] id="RKbiG8bplpp0" # ### Task 6 # # Try any of the data augmentation frameworks and show the results # + [markdown] id="yqhyDlXMlpp0" # ### Task 7 # # [Use](https://www.learnopencv.com/keras-tutorial-fine-tuning-using-pre-trained-models/) pretrained model to train on CIFAR-10 dataset. For that, you can try to use your CNN architecture. # + id="8gL1CkJMsBBo" features = [] i = 0 for file_name in file_names: i += 1 img = cv2.resize(img, (224, 224)) inp = tf.keras.applications.vgg16.preprocess_input(img) inp = np.reshape(inp, (1, 224, 224, 3)) # + colab={"base_uri": "https://localhost:8080/"} id="FQocQNmysRE2" outputId="aa7c7a4f-752d-4199-b431-3f91dbe8c742" (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() # + id="QaSreJGrpmLV" pretr_model = keras.applications.vgg16.VGG16(input_shape=(32, 32)) # + [markdown] id="BcdNkNCalpp1" # ### Task 8 # # [Implement](https://medium.com/mlreview/making-ai-art-with-style-transfer-using-keras-8bb5fa44b216) the style transfer technique # + id="M2ggrAnjlpp1" # + id="_fDgDzTtlpp3" # + id="Sh2Rfjtzlpp5"
22,364
/.ipynb_checkpoints/fx-rates-checkpoint.ipynb
a7cb88b7a4efa262427b1f7207926c6bbfb1dc44
[ "Apache-2.0" ]
permissive
kamlesh-p/myNotebooks
https://github.com/kamlesh-p/myNotebooks
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
84,835
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook for FX rate analysis # *** # ### Data downloaded from kaggle.com # + # %matplotlib inline from matplotlib import pyplot as plt from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() import pandas as pd df = pd.read_csv('fx-rates-per-dollar-20002019/Foreign_Exchange_Rates.csv', index_col=0, usecols=[0, 1, 10], dtype={'INDIA - INDIAN RUPEE/US$':'float'}, parse_dates=[1], na_values='ND') # - df.head() df.tail() # + plt.ylabel('INR') plt.xlabel('INR') plt.title('INR / 1 US$'); plt.rcParams['figure.figsize'] = [22,9] plt.hlines(y=df['INDIA - INDIAN RUPEE/US$'].min(), xmin=df['Time Serie'][0], xmax=df['Time Serie'][df[['INDIA - INDIAN RUPEE/US$']].idxmin()], color='g', linestyle='-', label='Min') plt.vlines(x=df['Time Serie'][df[['INDIA - INDIAN RUPEE/US$']].idxmin()], ymin=df['INDIA - INDIAN RUPEE/US$'].min(), ymax=df['INDIA - INDIAN RUPEE/US$'].min(), color='g', linestyle='-', label='Min') plt.hlines(y=df['INDIA - INDIAN RUPEE/US$'].max(),xmin=df['Time Serie'][0], xmax=df['Time Serie'][df[['INDIA - INDIAN RUPEE/US$']].idxmin()], color='r', linestyle='-', label="Max") plt.axvline(x=df['Time Serie'][df[['INDIA - INDIAN RUPEE/US$']].idxmax()], color='r', linestyle='-', label='Min') plt.plot(df['Time Serie'], df['INDIA - INDIAN RUPEE/US$'], color='tab:blue') # + plt.ylabel('INR') plt.xlabel('INR') plt.title('INR / 1 US$'); plt.rcParams['figure.figsize'] = [22,9] plt.hlines(y=df['INDIA - INDIAN RUPEE/US$'].min(),xmin=df['Time Serie'][0], xmax=df['Time Serie'][10], color='g', linestyle='-', label='Min') plt.vlines(x=df['Time Serie'][df[['INDIA - INDIAN RUPEE/US$']].idxmin()],ymin=0, ymax=55, color='g', linestyle='-', label='Min') # - _values('p') IVGPartyresults.head(10) # + PEPartyresults=Partyresults[Partyresults['XCategory'].isin(['PoliticalEngagement'])].sort_values('p') PEPartyresults.head(10) # + SocialPartyresults=Partyresults[Partyresults['XCategory'].isin(['SocialViews'])].sort_values('p') SocialPartyresults.head(10) # + TSMPartyresults=Partyresults[Partyresults['XCategory'].isin(['TraditionalSocialMedia'])].sort_values('p') TSMPartyresults.head(10) # - # # Attended Rally ARresults=results[results['Y'].isin(['pyAttendedRally_Groups'])].sort_values('p') # + DemoARresults=ARresults[ARresults['XCategory'].isin(['Demographics'])].sort_values('p') DemoARresults.head(10) # + IVGARresults=ARresults[ARresults['XCategory'].isin(['InternetUseVideoGames'])].sort_values('p') IVGARresults.head(10) # + PEARresults=ARresults[ARresults['XCategory'].isin(['PoliticalEngagement'])].sort_values('p') PEARresults.head(10) # + SocialARresults=ARresults[ARresults['XCategory'].isin(['SocialViews'])].sort_values('p') SocialARresults.head(10) # + TSMARresults=ARresults[ARresults['XCategory'].isin(['TraditionalSocialMedia'])].sort_values('p') TSMARresults.head(10) # - # # Voted Votedresults=results[results['Y'].isin(['pyVoted_Groups'])].sort_values('p') # + DemoVotedresults=Votedresults[Votedresults['XCategory'].isin(['Demographics'])].sort_values('p') DemoVotedresults.head(10) # + IVGVotedresults=Votedresults[Votedresults['XCategory'].isin(['InternetUseVideoGames'])].sort_values('p') IVGVotedresults.head(10) # + PEVotedresults=Votedresults[Votedresults['XCategory'].isin(['PoliticalEngagement'])].sort_values('p') PEVotedresults.head(10) # + SocialVotedresults=Votedresults[Votedresults['XCategory'].isin(['SocialViews'])].sort_values('p') SocialVotedresults.head(10) # + TSMVotedresults=Votedresults[Votedresults['XCategory'].isin(['TraditionalSocialMedia'])].sort_values('p') TSMVotedresults.head(10) # - # # Conclusions # The following variables or categories appear multiple times across our target variables. They will be variables that may be of interest later during our predictive modeling. # Internet Access x 4<br> # Gender x 3<br> # Ethnicity x 3<br> # Education Groups x 3<br> # Lang Spoken Home x 2<br> # Age x 2<br> # <br> # Internet Use x 4<br> # Video Game Playing Moral Ethical Issues x 3<br> # Video Game Playing Learn Society Problems x 2<br> # Group Political Issue Internet Role x 2<br> # Group Politician Internet Role x 2<br> # Group Election Campaign Internet Role x 2<br> # Group Local Community Internet Role x 2<br> # Group Social Issue Internet Role x 2<br> # <br> # Discuss Politics Friends x 3<br> # Voted x 2<br> # Self Understand Politics x 2<br> # Political Interest x 2<br> # PY Commented News x 2<br> # Self Internet Find Political Info x 2<br> # <br> # Protest Illegal x 2<br> # Protest Effective x 2<br> # Protest Not Respresentative x 2<br> # Govt Help Vulnerable x 2<br> # Politicians Country Best Interest x 2<br> # Govt Business Regulation x 2<br> # News Unbiased x 2<br> # <br> # Twitter use x 3<br> # USA Political News Source Comedy TV x 2<br> # National News Interest x 2<br> # Facebook Post Political Opinions x 2<br> # Facebook Encourage Vote x 2<br> # Facebook Post Political Links x 2<br> #
5,239
/Day13/範例程式13.ipynb
52990d7931c85d5cd95d28ef0cc5f50bdd0a0b3b
[]
no_license
hochinchang/python60days
https://github.com/hochinchang/python60days
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
26,338
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="v-_imnFDowIG" # 範例目標:<br> # 1. 實做統計函式 # 2. 實做自定義的行或列函式應用 # # + [markdown] id="mz600zXQo9XO" # 範例重點:<br> # 1. 統計函數使用方式與 Numpy 類似,不同之處為 Pandas 的資料型態是 DataFrame # 2. 使用自定義函數時lambda x 與數學中的 f(x) 是相同的意思 # + id="8CegrNrgo920" import pandas as pd # + colab={"base_uri": "https://localhost:8080/", "height": 390} executionInfo={"elapsed": 587, "status": "ok", "timestamp": 1606620323926, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="octRixAoPXTb" outputId="be801187-8fb7-4a1f-bc1f-8fc78daba893" score_df = pd.DataFrame([[1,50,80,70], [2,60,45,50], [3,98,43,55], [4,70,69,89], [5,56,79,60], [6,60,68,55], [7,45,70,77], [8,55,77,76], [9,25,57,60], [10,88,40,43]],columns=['student_id','math_score','english_score','chinese_score']) #score_df = score_df.set_index('student_id') score_df # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1086, "status": "ok", "timestamp": 1606623121703, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="dwhT33ewQoa4" outputId="0054e545-98f8-4ba8-9330-6db557239e01" #指定欄位算平均 score_df.math_score.mean() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 990, "status": "ok", "timestamp": 1606623122160, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="x5MfxaN3QpVB" outputId="5706e2b8-cbad-4bb7-8c0b-eca5a9a4e9ee" #全欄位算平均 #score_df.mean() score_df[score_df['student_id']==6] # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1016, "status": "ok", "timestamp": 1606623122662, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="BSv4h3qrMxzy" outputId="64b3b2e8-5d89-4581-9f77-553c1ab2a37c" #學生平均分數 score_df.mean(axis=1) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1077, "status": "ok", "timestamp": 1606623602154, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="dKGUiKUHEIXp" outputId="4a9647eb-e660-48b3-9041-5f2b9f6e8ce7" #學生3科總分數 score_df.sum(axis=1) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 625, "status": "ok", "timestamp": 1606623602155, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="vG1vLFAlEPSl" outputId="0eb4d7bb-9640-4f25-d779-b930ce116cfb" #本次各科考試人數 score_df.count() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1011, "status": "ok", "timestamp": 1606624026971, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="7e1wGtLDR2WC" outputId="c4d5b2c0-e4fc-439a-878e-546374725d12" #各科中位數分佈 score_df.median() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 866, "status": "ok", "timestamp": 1606624031239, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="Qxq9Boy_QSKh" outputId="178895f8-4dc4-418b-d9ab-a3b785e81f66" #各科百分位數分佈(75%) score_df.quantile(0.75) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 985, "status": "ok", "timestamp": 1606624172106, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="LgMp9z6JTpNu" outputId="91b9a753-ce3f-4ab1-96c4-c9c24dbf0b59" #各科最大值 score_df.max() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 720, "status": "ok", "timestamp": 1606624172107, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="czrZdVKjDzRL" outputId="ab66d0a1-d5ff-41f4-b547-57a2cf313078" #各科最小值 score_df.min() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 904, "status": "ok", "timestamp": 1606620770334, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="bqcQIgp8Txav" outputId="31243d10-feb7-4c20-8835-2640ba954375" #各科百分位數分佈(75%) score_df.quantile(0.75) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 930, "status": "ok", "timestamp": 1606624298899, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="ikhaYpUIRQI1" outputId="2dcdfde4-0291-4de1-e6a1-90cfb57d0bcc" #各科標準差 score_df.std() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1002, "status": "ok", "timestamp": 1606624315347, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="sOErEx_SRVjx" outputId="0b60c215-69c2-414c-afb0-eeb5179f0f11" #各科變異數 score_df.var() # + colab={"base_uri": "https://localhost:8080/", "height": 142} executionInfo={"elapsed": 1016, "status": "ok", "timestamp": 1606624511293, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="XUJBu_1IXWPJ" outputId="7dfc6f1d-a40f-46c6-d530-bc77f44a36ba" #各科之間的相關係數 score_df.corr() # + colab={"base_uri": "https://localhost:8080/", "height": 390} executionInfo={"elapsed": 906, "status": "ok", "timestamp": 1606625189864, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="A2t4BIN7AqL7" outputId="63bf8cd7-9ec1-42a4-98bf-29214340eefb" #各科開根號乘以十 score_df.apply(lambda x : x**(0.5)*10) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1018, "status": "ok", "timestamp": 1606625529504, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="1VV-XvRqGBnR" outputId="470669c5-6374-405d-fba2-f09384e20593" #各科加總apply score_df.apply(sum,axis=1) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 852, "status": "ok", "timestamp": 1606625544159, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}, "user_tz": -480} id="QROXM4I-GTXS" outputId="871d3993-2626-405e-aca7-19b3b47dc0ad" #各科加總 score_df.sum(axis=1) # + id="fP5n2v7NWEQL"
6,739
/credit_risk_resampling.ipynb
966bebaa9e0ce23936aeec4618f08cb2f4a315ed
[]
no_license
LaurenBrad/Classification_Homework11
https://github.com/LaurenBrad/Classification_Homework11
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
45,206
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Running the code, you can input whatever you want to search as "keyword". However, keep in mind that this code will not stop running until you tell it to stop. It will keep getting the tweets as they come forever, until you tell it to stop, and, because you have a certain amount of tweets that your allowed to get per 15 minutes, this could cause you to be blocked from twitter API. # #### When you want to stop the code from running, click on "kernel" in the menu above, and then click "Restart". Then you can open the file twitterResults.txt and see how the output looks like. # + import time from tweepy import API from tweepy import Stream from tweepy import OAuthHandler from tweepy.streaming import StreamListener import json ckey = 'xxAwURacjqyRQvRsB4WhZTO08' csecret = 'TEqLj1E8UkqRRQ0l1wWoiL4ICxH3DijxOXgxdvWvxf31d366YU' atoken = '1157025043299258368-H1nKJgnYuicK8rpBNqzk3GxmwASmET' asecret = '8kijJ1dLcCYkq9Y3CpbGM0At9P3Rd8x2IyUe067LxqVDk' class listener(StreamListener): def on_data(self, data): print('Got the Some Tweets for you. Press stop when you have enough.\n') try: outFile = open('Files_Directory/Twitter/twitterResults3.txt','a') outFile.write(data) #outFile.write('\n') outFile.close return True except BaseException as e: print('failed ondata, ', str(e)) time.sleep(5) def on_error(self, status): print("failed with " + str(status)) auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken , asecret) keyword=str(input('Enter the keyword:')) twitterStream = Stream(auth, listener() ) twitterStream.filter(track=keyword) # - # #### I entered UCSD in the following code and stop it. outFile = open('Files_Directory/Twitter/twitterResults3.txt','r') tweets=outFile.readlines() tweet_clean=[] for i in range(0,50): tmp=json.loads(tweets[i]) tweet_clean.append(tmp['text']) tweet_clean outFile.close() # + yourKey = '8d1bd9c482ce48da87bd318d72ed6547' import requests import time azure_sentiments = [] review_number = 1 time_counter = time.time() for review in tweet_clean: if review_number % 100 == 0: print("waiting") print(abs(75 - (time.time() - time_counter))) time.sleep(abs(75 - (time.time() - time_counter))) # prevent being blocked by Twitter time_counter = time.time() print(review_number) review_number+=1 text_analytics_base_url = "https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/" sentiment_api_url = text_analytics_base_url + "sentiment" documents = {'documents' : [ {'id': '1', 'language': 'en', 'text': review}]} headers = {"Ocp-Apim-Subscription-Key": yourKey} response = requests.post(sentiment_api_url, headers=headers, json=documents) sentiments = response.json() print(sentiments) azure_sentiments.append(sentiments['documents'][0]['score']) print(sum(azure_sentiments)/50) # - print(sum(azure_sentiments)/50) # #### >0.5 means postive, so most peoople sent tweets about UCSD hold a positive attitude towards it. unter # YOUR CODE HERE Counter(y_resampled) # Train the Logistic Regression model using the resampled data # YOUR CODE HERE from sklearn.linear_model import LogisticRegression model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) # Calculated the balanced accuracy score # YOUR CODE HERE from sklearn.metrics import balanced_accuracy_score y_pred = model.predict(X_test_scaled) balanced_accuracy_score(y_test, y_pred) # + # Display the confusion matrix # YOUR CODE HERE from sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_pred) # + # Print the imbalanced classification report # YOUR CODE HERE from imblearn.metrics import classification_report_imbalanced print(classification_report_imbalanced(y_test, y_pred)) # - # ### SMOTE Oversampling # + # Resample the training data with SMOTE # YOUR CODE HERE from imblearn.over_sampling import SMOTE X_resampled, y_resampled = SMOTE(random_state=1, sampling_strategy=1.0).fit_resample(X_train_scaled, y_train) from collections import Counter # View the count of target classes with Counter Counter(y_resampled) # + # Train the Logistic Regression model using the resampled data model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) # + # Calculated the balanced accuracy score # YOUR CODE HERE y_pred = model.predict(X_test_scaled) balanced_accuracy_score(y_test, y_pred) # + # Display the confusion matrix # YOUR CODE HERE confusion_matrix(y_test, y_pred) # - # Print the imbalanced classification report # YOUR CODE HERE print(classification_report_imbalanced(y_test, y_pred)) # # Undersampling # # In this section, you will test an undersampling algorithm to determine which algorithm results in the best performance compared to the oversampling algorithms above. You will undersample the data using the Cluster Centroids algorithm and complete the folliowing steps: # # 1. View the count of the target classes using `Counter` from the collections library. # 3. Use the resampled data to train a logistic regression model. # 3. Calculate the balanced accuracy score from sklearn.metrics. # 4. Display the confusion matrix from sklearn.metrics. # 5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn. # # Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests # + # Resample the data using the ClusterCentroids resampler # YOUR CODE HERE from imblearn.under_sampling import ClusterCentroids cc = ClusterCentroids(random_state=1) X_resampled, y_resampled = cc.fit_resample(X_train, y_train) # View the count of target classes with Counter Counter(y_resampled) # + # Train the Logistic Regression model using the resampled data # YOUR CODE HERE model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) # + # Calculate the balanced accuracy score # YOUR CODE HERE y_pred = model.predict(X_test_scaled) balanced_accuracy_score(y_test, y_pred) # + # Display the confusion matrix # YOUR CODE HERE confusion_matrix(y_test, y_pred) # + # Print the imbalanced classification report # YOUR CODE HERE print(classification_report_imbalanced(y_test, y_pred)) # - # # Combination (Over and Under) Sampling # # In this section, you will test a combination over- and under-sampling algorithm to determine if the algorithm results in the best performance compared to the other sampling algorithms above. You will resample the data using the SMOTEENN algorithm and complete the folliowing steps: # # 1. View the count of the target classes using `Counter` from the collections library. # 3. Use the resampled data to train a logistic regression model. # 3. Calculate the balanced accuracy score from sklearn.metrics. # 4. Display the confusion matrix from sklearn.metrics. # 5. Generate a classication report using the `imbalanced_classification_report` from imbalanced-learn. # # Note: Use a random state of 1 for each sampling algorithm to ensure consistency between tests # Resample the training data with SMOTEENN from imblearn.combine import SMOTEENN smote_enn = SMOTEENN(random_state=1) X_sampled, y_resampled = smote_enn.fit_resample(X_train_scaled, y_train) # View the count of target classes with Counter Counter(y_resampled) # Train the Logistic Regression model using the resampled data # YOUR CODE HERE from sklearn.linear_model import LogisticRegression model = LogisticRegression(solver='lbfgs', random_state=1) model.fit(X_resampled, y_resampled) # Calculate the balanced accuracy score # YOUR CODE HERE y_pred = model.predict(X_test_scaled) balances_accuracy_score(y_test, y_pred) # + # Display the confusion matrix # YOUR CODE HERE confusion_matrix(y_test, y_pred) # + # Print the imbalanced classification report # YOUR CODE HERE print(classification_report_imbalanced(y_test, y_pred)) # - # # Final Questions # # 1. Which model had the best balanced accuracy score? # # Either SMOTE or Naive Random oversampling # # 2. Which model had the best recall score? # # SMOTE oversampling # # 3. Which model had the best geometric mean score? # # #
8,624
/user-info/.ipynb_checkpoints/user-info-checkpoint.ipynb
bc58d2c2a8f46f58a649bc85925a3d612d8c015b
[]
no_license
Q2MSites/user-profiles
https://github.com/Q2MSites/user-profiles
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
12,466
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Approximating Contours and Convex Hull # # ***cv2.approxPolyDP(contour, Approximation Accuracy, Closed)*** # - **contour** – is the individual contour we wish to approximate # - **Approximation Accuracy** – Important parameter is determining the accuracy of the approximation. Small values give precise- approximations, large values give more generic approximation. A good rule of thumb is less than 5% of the contour perimeter # - **Closed** – a Boolean value that states whether the approximate contour should be open or closed # # + import numpy as np import cv2 # Load image and keep a copy image = cv2.imread('images/house.jpg') orig_image = image.copy() cv2.imshow('Original Image', orig_image) cv2.waitKey(0) # Grayscale and binarize gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV) # Find contours contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) # Iterate through each contour and compute the bounding rectangle for c in contours: x,y,w,h = cv2.boundingRect(c) cv2.rectangle(orig_image,(x,y),(x+w,y+h),(0,0,255),2) cv2.imshow('Bounding Rectangle', orig_image) cv2.waitKey(0) # Iterate through each contour and compute the approx contour for c in contours: # Calculate accuracy as a percent of the contour perimeter accuracy = 0.03 * cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, accuracy, True) cv2.drawContours(image, [approx], 0, (0, 255, 0), 2) cv2.imshow('Approx Poly DP', image) cv2.waitKey(0) cv2.destroyAllWindows() # - # ## Convex Hull # # # + import numpy as np import cv2 image = cv2.imread('images/hand.jpg') gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.imshow('Original Image', image) cv2.waitKey(0) # Threshold the image ret, thresh = cv2.threshold(gray, 176, 255, 0) # Find contours contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) # Sort Contors by area and then remove the largest frame contour n = len(contours) - 1 contours = sorted(contours, key=cv2.contourArea, reverse=False)[:n] # Iterate through contours and draw the convex hull for c in contours: hull = cv2.convexHull(c) cv2.drawContours(image, [hull], 0, (0, 255, 0), 2) cv2.imshow('Convex Hull', image) cv2.waitKey(0) cv2.destroyAllWindows() # - yield row[0],{"zip":row[1],"phone":row[2],"fname":row[3].lower(),"lname":row[4].lower(),"address":row[5].lower()} #u.dob,u.zip,u.phone,u.first_name,u.last_name,u.gender,u.address def fps_filter(line): #dob = re.match('\d\d\d\d-\d\d-\d\d',line[1]['dob']) zipcode = re.match('\d\d\d\d\d',line[1]['zip']) phone = (re.match('\d\d\d-\d\d\d-\d\d\d\d',line[1]['phone']) or re.match('\d\d\d\d\d\d\d\d\d\d',line[1]['phone'])) #gender = line[1]['gender'] in ['f','m','u'] return zipcode or phone or line[1]['fname'] != '' or line[1]['lname']!= '' or line[1]['address']!='' def fps_info(line): tmp = dict({}) if re.match('\d\d\d\d\d',line[1]['zip']): tmp.update({'zip':line[1]['zip']}) if (re.match('\d\d\d-\d\d\d-\d\d\d\d',line[1]['phone']) or re.match('\d\d\d\d\d\d\d\d\d\d',line[1]['phone'])): tmp.update({'phone':line[1]['phone']}) if line[1]['fname'] != '': tmp.update({'fname':line[1]['fname']}) if line[1]['lname']!= '': tmp.update({'lname':line[1]['lname']}) if line[1]['address'] != '': tmp.update({'address':line[1]['address']}) return (line[0],tmp) def prime_all_load(key): for line in key.get_contents_as_string().splitlines(): line = str(line) row = line.split("|") yield row[0],{"dob":row[1],"zip":row[2],"phone":row[3],"gender":row[4].lower()} #u.dob,u.zip,u.phone,u.first_name,u.last_name,u.gender,u.address def prime_all_filter(line): dob = re.match('\d\d\d\d-\d\d-\d\d',line[1]['dob']) zipcode = re.match('\d\d\d\d\d',line[1]['zip']) phone = (re.match('\d\d\d-\d\d\d-\d\d\d\d',line[1]['phone']) or re.match('\d\d\d\d\d\d\d\d\d\d',line[1]['phone'])) gender = line[1]['gender'] in ['f','m','u'] return dob or zipcode or phone or gender def prime_all_info(line): tmp = dict({}) if re.match('\d\d\d\d-\d\d-\d\d',line[1]['dob']): tmp.update({'dob':line[1]['dob']}) if re.match('\d\d\d\d\d',line[1]['zip']): tmp.update({'zip':line[1]['zip']}) if (re.match('\d\d\d-\d\d\d-\d\d\d\d',line[1]['phone']) or re.match('\d\d\d\d\d\d\d\d\d\d',line[1]['phone'])): tmp.update({'phone':line[1]['phone']}) if line[1]['gender'] in ['f','m']: tmp.update({'gender':line[1]['gender']}) return (line[0],tmp) def advertiser_load(key): for line in key.get_contents_as_string().splitlines(): line = str(line) row = line.split("|") yield row[0],{"zip":row[2],"fname":row[4].lower(),"lname":row[5].lower(),"address":row[7].lower()} #u.dob,u.zip,u.phone,u.first_name,u.last_name,u.gender,u.address def advertiser_filter(line): #dob = re.match('\d\d\d\d-\d\d-\d\d',line[1]['dob']) zipcode = re.match('\d\d\d\d\d',line[1]['zip']) #phone = (re.match('\d\d\d-\d\d\d-\d\d\d\d',line[1]['phone']) or re.match('\d\d\d\d\d\d\d\d\d\d',line[1]['phone'])) #gender = line[1]['gender'] in ['f','m','u'] return zipcode or line[1]['fname'] != '' or line[1]['lname']!= '' or line[1]['address']!='' def advertiser_info(line): tmp = dict({}) if re.match('\d\d\d\d\d',line[1]['zip']): tmp.update({'zip':line[1]['zip']}) if line[1]['fname'] != '': tmp.update({'fname':line[1]['fname']}) if line[1]['lname']!= '': tmp.update({'lname':line[1]['lname']}) if line[1]['address'] != '': tmp.update({'address':line[1]['address']}) return (line[0],tmp) def cdd_load(key): for line in key.get_contents_as_string().splitlines(): line = str(line) row = line.split("|") yield row[0],{"dob":row[1],"zip":row[2],"phone":row[3],"fname":row[4].lower(),"lname":row[5].lower(),"gender":row[6].lower(),"address":row[7].lower()} #u.dob,u.zip,u.phone,u.first_name,u.last_name,u.gender,u.address def cdd_filter(line): dob = re.match('\d\d\d\d-\d\d-\d\d',line[1]['dob']) zipcode = re.match('\d\d\d\d\d',line[1]['zip']) phone = (re.match('\d\d\d-\d\d\d-\d\d\d\d',line[1]['phone']) or re.match('\d\d\d\d\d\d\d\d\d\d',line[1]['phone'])) gender = line[1]['gender'] in ['f','m','u'] return dob or zipcode or phone or gender or line[1]['fname'] != '' or line[1]['lname']!= '' or line[1]['address']!='' def cdd_info(line): tmp = dict({}) if re.match('\d\d\d\d-\d\d-\d\d',line[1]['dob']): tmp.update({'dob':line[1]['dob']}) if re.match('\d\d\d\d\d',line[1]['zip']): tmp.update({'zip':line[1]['zip']}) if (re.match('\d\d\d-\d\d\d-\d\d\d\d',line[1]['phone']) or re.match('\d\d\d\d\d\d\d\d\d\d',line[1]['phone'])): tmp.update({'phone':line[1]['phone']}) if line[1]['gender'] in ['f','m']: tmp.update({'gender':line[1]['gender']}) if line[1]['fname'] != '': tmp.update({'fname':line[1]['fname']}) if line[1]['lname']!= '': tmp.update({'lname':line[1]['lname']}) if line[1]['address'] != '': tmp.update({'address':line[1]['address']}) return (line[0],tmp) def merge(line): if line[1][1] is None: return (line[0],line[1][0]) else: for k,v in line[1][1].items(): line[1][0][k].append(v) return (line[0],line[1][0]) # In[ ]: #boto talk to s3 conn = S3Connection(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY) bucket = conn.get_bucket(bucket) complete_users = bucket.list(prefix='user-info/complete-users/%s/'%hex_number) users = bucket.list(prefix='user-info/users/%s/'%hex_number) fps = bucket.list(prefix='user-info/fps/%s/'%hex_number) prime_all= bucket.list(prefix='user-info/prime-all/%s/'%hex_number) advertiser = bucket.list(prefix='user-info/advertiser/%s/'%hex_number) cdd = bucket.list(prefix='user-info/cdd-existing/%s/'%hex_number) #start parallelize conf = SparkConf().setAppName("user-info-%s"%hex_number) sc = SparkContext(conf=conf) sc._conf.getAll() pcomplete_users = sc.parallelize(complete_users) pusers = sc.parallelize(users) pfps = sc.parallelize(fps) pprime_all = sc.parallelize(prime_all) padvertiser = sc.parallelize(advertiser) pcdd = sc.parallelize(cdd) # In[ ]: #spark! #step 1 filter and transfer ready_users = pusers.flatMap(users_load).filter(users_filter).map(users_info).distinct() ready_fps = pfps.flatMap(fps_load).filter(fps_filter).map(fps_info).distinct() ready_prime_all = pprime_all.flatMap(prime_all_load).filter(prime_all_filter).map(prime_all_info).distinct() ready_advertiser = padvertiser.flatMap(advertiser_load).filter(advertiser_filter).map(advertiser_info).distinct() ready_cdd = pcdd.flatMap(cdd_load).filter(cdd_filter).map(cdd_info).distinct() output = pcomplete_users.leftOuterJoin(ready_advertiser).map(merge).leftOuterJoin(ready_prime_all).map(merge).leftOuterJoin(ready_fps).map(merge).leftOuterJoin(ready_users).map(merge).leftOuterJoin(ready_cdd).map(merge) print(output.first()) # -
9,479
/pemrograman_web_lanjut/.ipynb_checkpoints/pemrograman_web_lanjut-checkpoint.ipynb
31b7c595ffcaa8148a46f844021bccb8795d76c6
[]
no_license
fiqih24/kuliah
https://github.com/fiqih24/kuliah
0
0
null
2016-12-23T03:10:59
2016-11-15T04:08:47
null
Jupyter Notebook
false
false
.py
18,843
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: PHP # language: php # name: jupyter-php # --- # # Array # # Array is a variable with a lot of values, so instead of doing this: $a_1 = 1; $a_2 = 3; $a_3 = 5; echo $a_1; echo $a_2; echo $a_3; # you better doing this: $b = array(1, 3, 5); echo $b[0]; echo $b[1]; echo $b[2]; # or even better: # + $b = array(1, 3, 5); for($i=0; $i<count($b); $i++){ echo $b[$i]; } # + $fruits = array("banana", "orange", "grape"); // define an array with 3 elements $fruits[] = "strawberry"; // add new element to fruits array echo $fruits[0]; // show the first element // use for to show all elements for( $i=0; $i<count( $fruits); $i++){ $fruit = $fruits[$i]; echo $fruit; } // or foreach to show all elements foreach($fruits as $fruit){ echo $fruit; } # - # # Associative array # # Associative array is array with special keys. It's like dictionary, struct, or record in other programming language. # # ```php # $student = array( # 'name' => 'Dono', # 'age' => 19 # ); # echo $student['name']; # echo $student['age']; # ``` # # Suppose we want to make "gunting-batu-kertas" game with a bit expansion just like http://bigbangtheory.wikia.com/wiki/Rock_Paper_Scissors_Lizard_Spock # # You can do this (take a look at how I eliminate unnecessary nested if): # + $player_1 = array('spock', 'gunting', 'batu', 'kertas'); $player_2 = array('kadal', 'batu', 'batu', 'batu'); $menang = array( 'gunting' => array('kertas', 'kadal'), 'batu' => array('gunting', 'kadal'), 'kertas' => array('batu', 'spock'), 'kadal' => array('spock', 'kertas'), 'spock' => array('gunting', 'batu'), ); for($i=0; $i<count($player_1); $i++){ $p1 = $player_1[$i]; $p2 = $player_2[$i]; if($p1 == $p2){ echo 'draw'; }else if(in_array($p2, $menang[$p1])){ echo 'player 1'; }else{ echo 'player 2'; } } # - # # Class and Inheritance # # Notice that `Mahasiswa` has everything `Orang` has. Plus, in `Mahasiswa` you can use `Orang`'s `__construct` and `sapa` to make your code shorter # + class Orang{ public $nama; public $alamat; public function __construct($nama, $alamat){ $this->nama = $nama; $this->alamat = $alamat; } public function sapa(){ echo $this->nama.'<br />'; echo $this->alamat.'<br />'; } } class Mahasiswa extends Orang{ public $nrp; public function __construct($nrp, $nama, $alamat){ $this->nrp = $nrp; parent::__construct($nama, $alamat); } public function sapa(){ echo $this->nrp.'<br />'; parent::sapa(); } } $tono = new Mahasiswa('151112345','Tono Martono', 'Malang'); $tono->nama = 'Tono Martono Jr.'; $tono->sapa(); # - # # Insert and Select From Database (The Old Way) $link = mysqli_connect('localhost', 'root', 'toor', 'belajar_db'); $sql = "INSERT INTO mhs(nrp, nama) VALUES('12111234', 'Budi')"; mysqli_query($link, $sql); $sql = "SELECT nrp, nama FROM mhs"; $result = mysqli_query($link, $sql); while($row = mysqli_fetch_array($result)){ echo $row['nrp']; echo ' '; echo $row['nama']; echo '<br />'; } # # Using Class to Make Things Easier # + // Location: belajar-database-class.php class Table{ private $link; public $table_name; public $fields; public function __construct(){ $this->link = mysqli_connect('localhost', 'root', 'toor', 'belajar_db'); } public function insert($data){ foreach($data as $key=>$val){ $fields[] = $key; $values[] = "'" . addslashes($val) . "'"; } $sql = "INSERT INTO $this->table_name("; $sql .= implode(', ', $fields); $sql .= ') VALUES ('; $sql .= implode(', ', $values); $sql .= ')'; mysqli_query($this->link, $sql); } public function show(){ $sql = "SELECT " . implode(', ', $this->fields) . " FROM " . $this->table_name; $result = mysqli_query($this->link, $sql); while($row = mysqli_fetch_array($result)){ foreach($this->fields as $field){ echo $row[$field].' '; } echo '<br />'; } } } // Location: belajar-db-class-implemented.php // Uncommment a line below //include('belajar-db-class.php'); class Student extends Table{ public $table_name = 'mhs'; public $fields = array('nrp', 'nama'); } $mhs = new Student(); $mhs->insert(array('nrp'=>'131124566', 'nama'=>'Toni')); $mhs->show(); # - # # Final Project # # * A team consists of three or less students # * Creating a program with any web framework that utilize: # # - Authentication/Authorization # - CRUD # - AJAX # # * What you should do: # # - Edit the spreadsheet here: https://docs.google.com/spreadsheets/d/1XPKYdax5cWEQ1I4BBfFOFdPx9d9D8biEDAKKvpwvv7E/edit?usp=sharing # - Create proposal, and send it to frendi@stiki.ac.id. The proposal should contains: # - Brief description of your project (What it is about, why it is important, who will be the users of your program, etc) # - Previous research related to your project # - What each of you will do in the project # - Fill the schedule (the link will be published here) # - Attend the test # - Present your program # - Answer questions (different questions for different member of the team) # - Modify your program if necessary # - Creating report, and send it to frendi@stiki.ac.id. Here is the template https://github.com/goFrendiAsgard/kuliah/blob/master/template%20laporan%202015.doc?raw=true
5,845
/BES f2f/Untitled2.ipynb
86b4d6e7e7eccd67e2f258ad210836a90bf7611a
[]
no_license
MariosRichards/BES_analysis_code
https://github.com/MariosRichards/BES_analysis_code
6
1
null
2023-07-06T21:08:50
2023-04-10T07:29:51
Jupyter Notebook
Jupyter Notebook
false
false
.py
365,680
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + ## change in how age profile affects voting with generations! # + ## try to replicate the John Burn Murdoch breakdown # (which I'm sure I've already done elsewhere, but not labelled the notbook clearly!) # - # + # # %%time ## Just an easy way to load utility functions dataset_name = "W19_only" df_list = [] # %matplotlib inline # %run f2f_header.py {dataset_name} {df_list} global BES_label_list, BES_df_list # # %run f2f_utility.py BES_df_list = pickle.load( open( "E:\\BES_analysis_data\\Face_to_face\\BES_df_list.pickle", "rb" ) ) # f2f_harmonised = pd.read_pickle("E:\\BES_analysis_data\\Face_to_face\\f2f_harmonised.zip", compression='zip') BES_label_list = pickle.load( open( "E:\\BES_analysis_data\\Face_to_face\\BES_label_list.pickle", "rb" ) ) demo_var_dict = pickle.load( open( "E:\\BES_analysis_data\\Face_to_face\\demo_var_dict.pickle", "rb" ) ) var_type_dict_nonans = pickle.load( open( "E:\\BES_analysis_data\\Face_to_face\\var_type_dict_nonans.pickle", "rb" ) ) f2f_harmonised = pd.read_pickle("E:\\BES_analysis_data\\Face_to_face\\f2f_harmonised_temp.zip", compression='zip') generic_cols = f2f_harmonised.columns list_of_scale_harm_vars = ["Age","year_past_min_sch_leave_age","in_school_past_min_age"] ## BUGS BES_label_list["1959"]["v1236"] = 'LOCAL ELECTION VOTE 1963(2nd)' BES_label_list["1964"]["v1236"] = 'LOCAL ELECTION VOTE 1963(2nd)' BES_label_list["1966"]["v1236"] = 'LOCAL ELECTION VOTE 1963(2nd)' BES_label_list["1970"]["v1236"] = 'LOCAL ELECTION VOTE 1963(2nd)' BES_label_list["2015"]['edlevel'] = 'Education level (summary)' # + # chart with age on X, voting behaviour on y, hue by generation # we're trying to isolate diff by generation # (hypothesis - it starts with Boomers skewing Conservative, then Gen-Z leans away) # - search(f2f_harmonised,"Auth|generation|age|birth") f2f_harmonised[["age","generation","birth_year","AuthRight"]] # + f2f_harmonised["birth_decade"] = (round(f2f_harmonised["birth_year"]/10)*10).astype(int) f2f_harmonised["birth_decade"].value_counts().sort_index() # + f2f_harmonised["birth_decade"] = f2f_harmonised["birth_decade"].replace(1860,1880).replace(1870,1880) # 1880 effectively equals 1880- # - sns.lmplot(data=f2f_harmonised, x='age', y='AuthRight', hue='birth_decade', logistic=True, y_jitter=0.05) # + auth_norms = f2f_harmonised.groupby('dataset')["AuthRight"].mean() f2f_harmonised["AuthRight_norm"] = f2f_harmonised[["AuthRight","dataset"]].apply(lambda x: x["AuthRight"]-auth_norms.loc[x["dataset"]],axis=1) # - sns.lmplot(data=f2f_harmonised, x='age', y='AuthRight_norm', hue='birth_decade', logistic=True, y_jitter=0.05) sns.lmplot(data=f2f_harmonised, x='age', y='AuthRight_norm', hue='birth_decade', y_jitter=0.05)
3,012
/LSTM_Flow_Volvo.ipynb
351633bfa91628980617fd9d39db0217ce7eb306
[]
no_license
JocelynWang2021/Master_thesis
https://github.com/JocelynWang2021/Master_thesis
2
1
null
null
null
null
Jupyter Notebook
false
false
.py
109,133
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, LSTM, Dropout from keras.callbacks import EarlyStopping from sklearn.preprocessing import StandardScaler from pandas import read_csv import pandas as pd import numpy as np import glob import matplotlib.pyplot as plt import collections # %matplotlib inline # + # Set up a 'look back' dataset for sequence to label prediction with Keras. # The LSTM network expects the input data (X) to be provided with a specific # array structure in the form of: [samples, time steps, features]. def create_dataset(X, Y, **options): """Convert an array of X, Y values into a dataset matrix for and LSTM""" look_back = options.pop('look_back', None) dataX, dataY = [], [] for i in range(len(X) - look_back): a = X[i:(i+look_back)] dataX.append(a) dataY.append(Y[i + look_back - 1]) return np.array(dataX), np.array(dataY) # Predictions will be based on look_back minutes of data: look_back = 50 # + Test_path = glob.glob('../Users/XiaonfengWang/Desktop/Test_Flow/*.csv') X_TestData = np.empty((1, 50, 35)) Y_TestData = np.empty((1,)) for f in range(300): print(Test_path[f].split('/')[-1]) Flow_Test = pd.read_csv(Test_path[f], usecols=['Lane','Velocity','PosLgt1','PosLgt2','PosLgt3','PosLat1','PosLat2','VelLgt1','VelLgt2','VelLgt3','VelLat1','VelLat2','VelLat3','AccLgt1','AccLgt2','AccLgt3','AccLat1','AccLat2','AccLat3','Angle1','Angle2','Angle3','Type1','Type2','Type3','Id1','Id2','Id3','Lane1','Lane2','Lane3','PositionInLane1','PositionInLane2','PositionInLane3','Lead','Flow']) Flow_Test.fillna(10000000, inplace=True) Y_train_Test = np.array(Flow_Test['Flow'].values) X_train_Test = np.array(Flow_Test[['Lane','Velocity','PosLgt1','PosLgt2','PosLgt3','PosLat1','PosLat2','VelLgt1','VelLgt2','VelLgt3','VelLat1','VelLat2','VelLat3','AccLgt1','AccLgt2','AccLgt3','AccLat1','AccLat2','AccLat3','Angle1','Angle2','Angle3','Type1','Type2','Type3','Id1','Id2','Id3','Lane1','Lane2','Lane3','PositionInLane1','PositionInLane2','PositionInLane3','Lead']]) dimof_output = 1 dimof_input = X_train_Test.shape[1] scaler = StandardScaler() X_train_Test = scaler.fit_transform(X_train_Test) XTest, YTest = create_dataset(X_train_Test, Y_train_Test, look_back=look_back) X_TestData = np.append(X_TestData, XTest1, axis=0) Y_TestData = np.append(Y_TestData, YTest1, axis=0) # + Train_path = glob.glob('../Users/XiaonfengWang/Desktop/Train_Flow/*.csv') X_all = np.empty((1, 50, 35)) Y_all = np.empty((1,)) for j in range(50): Flow = pd.read_csv(Train_path[j], usecols=['Lane','Velocity','PosLgt1','PosLgt2','PosLgt3','PosLat1','PosLat2','VelLgt1','VelLgt2','VelLgt3','VelLat1','VelLat2','VelLat3','AccLgt1','AccLgt2','AccLgt3','AccLat1','AccLat2','AccLat3','Angle1','Angle2','Angle3','Type1','Type2','Type3','Id1','Id2','Id3','Lane1','Lane2','Lane3','PositionInLane1','PositionInLane2','PositionInLane3','Lead','Flow']) Flow.fillna(10000000, inplace=True) Y_train = np.array(Flow['Flow'].values) X_train = np.array(Flow[['Lane','Velocity','PosLgt1','PosLgt2','PosLgt3','PosLat1','PosLat2','VelLgt1','VelLgt2','VelLgt3','VelLat1','VelLat2','VelLat3','AccLgt1','AccLgt2','AccLgt3','AccLat1','AccLat2','AccLat3','Angle1','Angle2','Angle3','Type1','Type2','Type3','Id1','Id2','Id3','Lane1','Lane2','Lane3','PositionInLane1','PositionInLane2','PositionInLane3','Lead']]) dimof_output = 1 dimof_input = X_train.shape[1] scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X, Y = create_dataset(X_train, Y_train, look_back=look_back) X_all = np.append(X_all, X, axis=0) Y_all = np.append(Y_all, Y, axis=0) # + # These sizes need to be divisible by 32 and remove the first randomly created matrix. Train_size = len(X_all)//64*64 + 1 Test_size = len(X_TestData)//64*64 + 1 Xtest = X_TestData[1:Test_size] Ytest = Y_TestData[1:Test_size] Xtrain = X_all[1:Train_size] Ytrain = Y_all[1:Train_size] # - # Create the LSTM network. batch_size = 32 dropout = 0.5 num_epoch = 100 earlyStopping = EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto') weights = {0:1, 1:1} # + model_Flow = Sequential() model_Flow.add(LSTM(output_dim=10, batch_input_shape=[batch_size, look_back, dimof_input])) model_Flow.add(Dropout(dropout)) model_Flow.add(Dense(dimof_output, init='uniform', activation='sigmoid')) model_Flow.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy']) history = model_Flow.fit( Xtrain, Ytrain, class_weight=weights, validation_data=(Xtest, Ytest), callbacks=[earlyStopping], shuffle=True, nb_epoch=num_epoch, batch_size=batch_size, verbose=1) print(history.history) Y_predict = model_CutIn.predict_classes(Xtest, verbose=True) # Add the prediction in a list a6 = Y_predict.tolist() # item[0] of the prediction is the predicted label a26 = [item[0] for item in a6] # the true label b6 = Ytest.tolist() # + nums_predict = [index for index, element in enumerate(a26) if element == 1] p = sum((list(t) for t in zip(nums_predict, nums_predict[1:]) if t[0]+1 != t[1]), []) p.insert(0,nums_predict[0]) p.append(nums_predict[-1]) nums_Y = [index for index, element in enumerate(b6) if element == 1] y = sum((list(t) for t in zip(nums_Y, nums_Y[1:]) if t[0]+1 != t[1]), []) y.insert(0,nums_Y[0]) y.append(nums_Y[-1]) big=0 small=0 same=0 for i in range(len(y)-1): if i % 2 == 0: for j in range(len(p)): if j % 2 == 0: if (y[i] == p[j]) and (y[i+1] == p[j+1]): same=same+1 j=j+2 elif p[j]<=y[i] and p[j+1]>=y[i+1]: big=big+1 j=j+2 elif p[j]>=y[i] and p[j+1]<=y[i+1]: small=small+1 j=j+2 else: j=j+2 i=i+2 Predict = len(p)/2 Y = len(y)/2 def precision(n): fp = Predict-n prec = n/(n+fp) if prec >=1: b=1 else: b=prec return b def recall(n): fn = Y-n rec = n/(n+fn) if rec >=1: a=1 else: a=rec return a def conf(f): conf95 = [] for i in range(len(y)): if i%2 == 0: c951=int(y[i]-f*(y[i+1]-y[i])) conf95.append(c951) conf95.append(y[i+1]) c9522=y[i]+f*(y[i+1]-y[i]) if float(c9522).is_integer()==True: conf95.append(c9522) conf95.append(y[i+1]) else: con952 = int(c9522)+1 conf95.append(con952) conf95.append(y[i+1]) conf95.append(y[i]) c9533=int(y[i+1]-f*(y[i+1]-y[i])) if float(c9533).is_integer()==True: conf95.append(c9533) else: c953 = int(c9533)+1 conf95.append(c953) conf95.append(y[i]) c9544=y[i+1]+f*(y[i+1]-y[i]) if float(c9544).is_integer()==True: conf95.append(c9544) else: con954 = int(c9544)+1 conf95.append(con954) i=i+2 return conf95 def result(rangelist): con95=0 for i in range(len(p)): if i%2 == 0: for j in range(len(rangelist)-7): if j%8==0: if (rangelist[j]<=p[i] and p[i+1]<=rangelist[j+1]) or (rangelist[j+2]<=p[i] and p[i+1]<=rangelist[j+3]) or (rangelist[j+4]<=p[i] and p[i+1]<=rangelist[j+5]) or (rangelist[j+6]<=p[i] and p[i+1]<=rangelist[j+7]): con95=con95+1 #break j=j+8 else: j=j+8 i=i+2 return con95 con95 = result(conf(0.05)) con90 = result(conf(0.1)) con85 = result(conf(0.15)) con80 = result(conf(0.2)) # - print("Predict:", Predict, "-", "True:", Y) print("big:",big, "-", "precision:", precision(big), "-", "recall:", recall(big)) print("small:",small, "-", "precision:", precision(small), "-", "recall:", recall(small)) print("-") print("same:",same, "-", "precision:", precision(same), "-", "recall:", recall(same)) print("con95:",con95, "-", "precision:", precision(con95), "-", "recall:", recall(con95)) print("con90:",con90, "-", "precision:", precision(con90), "-", "recall:", recall(con90)) print("con85:",con85, "-", "precision:", precision(con85), "-", "recall:", recall(con85)) print("con80:",con80, "-", "precision:", precision(con80), "-", "recall:", recall(con80)) # # more 50 # + X_MoreTrainData = np.empty((1, 50, 35)) Y_MoreTrainData = np.empty((1,)) for j in range(450,500): Flow_more = pd.read_csv(Train_path[j], usecols=['Lane','Velocity','PosLgt1','PosLgt2','PosLgt3','PosLat1','PosLat2','VelLgt1','VelLgt2','VelLgt3','VelLat1','VelLat2','VelLat3','AccLgt1','AccLgt2','AccLgt3','AccLat1','AccLat2','AccLat3','Angle1','Angle2','Angle3','Type1','Type2','Type3','Id1','Id2','Id3','Lane1','Lane2','Lane3','PositionInLane1','PositionInLane2','PositionInLane3','Lead','Flow']) if Flow_more.shape[0] <= 50: pass else: Flow_more.fillna(10000000, inplace=True) Y_train_more = np.array(Flow_more['Flow'].values) X_train_more = np.array(Flow_more[['Lane','Velocity','PosLgt1','PosLgt2','PosLgt3','PosLat1','PosLat2','VelLgt1','VelLgt2','VelLgt3','VelLat1','VelLat2','VelLat3','AccLgt1','AccLgt2','AccLgt3','AccLat1','AccLat2','AccLat3','Angle1','Angle2','Angle3','Type1','Type2','Type3','Id1','Id2','Id3','Lane1','Lane2','Lane3','PositionInLane1','PositionInLane2','PositionInLane3','Lead']]) scaler = StandardScaler() X_train_more = scaler.fit_transform(X_train_more) X_more, Y_more = create_dataset(X_train_more, Y_train_more, look_back=look_back) X_MoreTrainData = np.append(X_MoreTrainData, X_more, axis=0) Y_MoreTrainData = np.append(Y_MoreTrainData, Y_more, axis=0) # + # Added_size need to be divisible by 32 and remove the first randomly created matrix. Added_size = len(X_MoreTrainData)//64*64+1 X_MoreTrainData = X_MoreTrainData[1:Added_size] Y_MoreTrainData = Y_MoreTrainData[1:Added_size] Xtrain = np.append(Xtrain, X_MoreTrainData, axis=0) Ytrain = np.append(Ytrain, Y_MoreTrainData, axis=0) # + model_Flow = Sequential() model_Flow.add(LSTM(output_dim=10, batch_input_shape=[batch_size, look_back, dimof_input])) model_Flow.add(Dropout(dropout)) model_Flow.add(Dense(dimof_output, init='uniform', activation='sigmoid')) model_Flow.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy']) history = model_Flow.fit( Xtrain, Ytrain, class_weight=weights, validation_data=(Xtest, Ytest), callbacks=[earlyStopping], shuffle=True, nb_epoch=num_epoch, batch_size=batch_size, verbose=1) print(history.history) Y_predict = model_CutIn.predict_classes(Xtest, verbose=True) # Add the prediction in a list a6 = Y_predict.tolist() # item[0] of the prediction is the predicted label a26 = [item[0] for item in a6] # the true label b6 = Ytest.tolist() # + nums_predict = [index for index, element in enumerate(a26) if element == 1] p = sum((list(t) for t in zip(nums_predict, nums_predict[1:]) if t[0]+1 != t[1]), []) p.insert(0,nums_predict[0]) p.append(nums_predict[-1]) nums_Y = [index for index, element in enumerate(b6) if element == 1] y = sum((list(t) for t in zip(nums_Y, nums_Y[1:]) if t[0]+1 != t[1]), []) y.insert(0,nums_Y[0]) y.append(nums_Y[-1]) big=0 small=0 same=0 for i in range(len(y)-1): if i % 2 == 0: for j in range(len(p)): if j % 2 == 0: if (y[i] == p[j]) and (y[i+1] == p[j+1]): same=same+1 j=j+2 elif p[j]<=y[i] and p[j+1]>=y[i+1]: big=big+1 j=j+2 elif p[j]>=y[i] and p[j+1]<=y[i+1]: small=small+1 j=j+2 else: j=j+2 i=i+2 Predict = len(p)/2 Y = len(y)/2 def precision(n): fp = Predict-n prec = n/(n+fp) if prec >=1: b=1 else: b=prec return b def recall(n): fn = Y-n rec = n/(n+fn) if rec >=1: a=1 else: a=rec return a def conf(f): conf95 = [] for i in range(len(y)): if i%2 == 0: c951=int(y[i]-f*(y[i+1]-y[i])) conf95.append(c951) conf95.append(y[i+1]) c9522=y[i]+f*(y[i+1]-y[i]) if float(c9522).is_integer()==True: conf95.append(c9522) conf95.append(y[i+1]) else: con952 = int(c9522)+1 conf95.append(con952) conf95.append(y[i+1]) conf95.append(y[i]) c9533=int(y[i+1]-f*(y[i+1]-y[i])) if float(c9533).is_integer()==True: conf95.append(c9533) else: c953 = int(c9533)+1 conf95.append(c953) conf95.append(y[i]) c9544=y[i+1]+f*(y[i+1]-y[i]) if float(c9544).is_integer()==True: conf95.append(c9544) else: con954 = int(c9544)+1 conf95.append(con954) i=i+2 return conf95 def result(rangelist): con95=0 for i in range(len(p)): if i%2 == 0: for j in range(len(rangelist)-7): if j%8==0: if (rangelist[j]<=p[i] and p[i+1]<=rangelist[j+1]) or (rangelist[j+2]<=p[i] and p[i+1]<=rangelist[j+3]) or (rangelist[j+4]<=p[i] and p[i+1]<=rangelist[j+5]) or (rangelist[j+6]<=p[i] and p[i+1]<=rangelist[j+7]): con95=con95+1 #break j=j+8 else: j=j+8 i=i+2 return con95 con95 = result(conf(0.05)) con90 = result(conf(0.1)) con85 = result(conf(0.15)) con80 = result(conf(0.2)) # - print("Predict:", Predict, "-", "True:", Y) print("big:",big, "-", "precision:", precision(big), "-", "recall:", recall(big)) print("small:",small, "-", "precision:", precision(small), "-", "recall:", recall(small)) print("-") print("same:",same, "-", "precision:", precision(same), "-", "recall:", recall(same)) print("con95:",con95, "-", "precision:", precision(con95), "-", "recall:", recall(con95)) print("con90:",con90, "-", "precision:", precision(con90), "-", "recall:", recall(con90)) print("con85:",con85, "-", "precision:", precision(con85), "-", "recall:", recall(con85)) print("con80:",con80, "-", "precision:", precision(con80), "-", "recall:", recall(con80)) # + same_P = [0.6124497991967871,0.8780487804878049 ,0.9405204460966543 ,0.9333333333333333 ,0.9444444444444444 , 0.9481481481481482,0.9215328467153284 , 0.9369202226345084, 0.948051948051948, 0.948051948051948] con95_P = [ 0.9196787148594378,0.9774859287054409,0.9888475836431226 ,0.987037037037037 , 0.9851851851851852,0.987037037037037 ,0.9835766423357665 ,0.987012987012987, 0.987012987012987, 0.987012987012987] con90_P = [0.929718875502008, 0.9793621013133208, 0.9907063197026023,0.9888888888888889 , 0.987037037037037,0.9888888888888889,0.9854014598540146 , 0.9888682745825603,0.9888682745825603,0.9888682745825603 ] con85_P = [ 0.9337349397590361,0.9812382739212008,0.9925650557620818 ,0.9907407407407407 ,0.9888888888888889 ,0.9907407407407407, 0.9872262773722628, 0.9907235621521335,0.9907235621521335,0.9907235621521335] con80_P = [0.9397590361445783, 0.9831144465290806,0.9944237918215614 ,0.9925925925925926 , 0.9907407407407407, 0.9925925925925926, 0.9890510948905109,0.9925788497217068,0.9925788497217068,0.9925788497217068] x = [50,100,150,200,250,300,350,400,450,500] f, ax = plt.subplots(figsize=(10,3)) plt.plot(x, same_P) plt.plot(x, con95_P) plt.plot(x, con90_P) plt.plot(x, con85_P) plt.plot(x, con80_P) plt.legend(['Same', 'Overlap:95%', 'Overlap:90%', 'Overlap:85%', 'Overlap:80%'], loc='upper left', bbox_to_anchor=(1, 0.5)) plt.title('Precision for Flow') plt.show() # + same_R = [0.5765595463137996,0.8846880907372401 , 0.9565217391304348, 0.9527410207939508, 0.972181551976574, 0.9640831758034026, 0.9678638941398866,0.9546313799621928 , 0.9546313799621928, 0.9659735349716446] con95_R = [0.8657844990548205,0.9848771266540642,1, 1, 1, 1,1,1, 1,1 ] con90_R = [0.8752362948960303,0.9867674858223062,1, 1, 1, 1, 1,1, 1,1] con85_R = [0.8790170132325141,0.9886578449905482,1, 1, 1, 1,1,1, 1,1] con80_R = [0.8846880907372401,0.9905482041587902,1, 1, 1, 1, 1, 1,1,1] f, ax = plt.subplots(figsize=(10,3)) plt.plot(x, same_R) plt.plot(x, con95_R) plt.plot(x, con90_R) plt.plot(x, con85_R) plt.plot(x, con80_R) plt.legend(['Same', 'Overlap:95%', 'Overlap:90%', 'Overlap:85%', 'Overlap:80%'], loc='upper left', bbox_to_anchor=(1, 0.5)) plt.title('Recall for Flow') plt.show() # + same_R = [0.9678638941398866,0.9546313799621928, 0.9659735349716446,0.9589735349716446, 0.9579735349716446] con95_R = [1,1,1,1,1] con90_R = [1,1,1,1,1] con85_R = [1,1,1,1,1] con80_R = [1,1,1,1,1] x = [100, 200, 300, 400, 500] f, ax = plt.subplots(figsize=(10,3)) plt.plot(x, same_R) plt.plot(x, con95_R) plt.plot(x, con90_R) plt.plot(x, con85_R) plt.plot(x, con80_R) plt.legend(['Same', 'Overlap:95%', 'Overlap:90%', 'Overlap:85%', 'Overlap:80%'], loc='upper left', bbox_to_anchor=(1, 0.5)) plt.xlabel('The amount of Free Flow') plt.ylabel('F1 score') plt.show() # -
18,145
/src/.ipynb_checkpoints/gtzan_basic-checkpoint.ipynb
c0c4f1b48e9e50175f04553c7aaf0543a8a229dc
[]
no_license
jaehwlee/music-genre-classification
https://github.com/jaehwlee/music-genre-classification
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
92,211
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="q6QdFCMV1Sbo" import numpy as np import matplotlib.pyplot as plt # + id="6qNNPiAH1sRa" def estiamte_b0_b1(x, y): n = np.size(x) m_x, m_y = np.mean(x), np.mean(y) sumatoria_xy = np.sum((x - m_x) * (y - m_y)) sumatoria_xx = np.sum(x * (x - m_x)) # Coeficiente de regresión b_1 = sumatoria_xy / sumatoria_xx b_0 = m_y - b_1 * m_x return (b_0, b_1) # + id="QcakGNE13Iqr" # Graficando def plot_regresion(x,y,b): plt.scatter(x, y, color="b", marker="o", s=30) y_pred = b[0] + b[1]*x plt.plot(x,y_pred, color="r") plt.xlabel('x-Ind') plt.ylabel('y-Dep') plt.show() # + id="LiuOmpZk44dW" outputId="63b49967-b1a2-488f-f9ad-6872dafc8a73" colab={"base_uri": "https://localhost:8080/", "height": 296} def main(): # datos (dataset) x = np.array([1,2,3,4,5]) y = np.array([2,3,5,6,5]) b = estiamte_b0_b1(x, y) print("Los valores son: b_0={}, b1={}".format(b[0] , b[1])) plot_regresion(x, y, b) if __name__=="__main__": main() # + id="cC119-bx53MW" , levels=[-.5, 0, .5]) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='bwr', edgecolor='k', s=100) plt.show() # Demonstrate that support vectors will sometimes cross the margin X, y = make_blobs(n_samples=100, centers=2, random_state=0, cluster_std=.95) plt.scatter(X[:, 0], X[:, 1], c=y, s=100, cmap="bwr") plt.show() # Split data into training and testing from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # Fit to the training data and validate with the test data model = SVC(kernel='linear') model.fit(X_train, y_train) predictions = model.predict(X_test) predictions # + # Plot the decision boundaries x_min = X[:, 0].min() x_max = X[:, 0].max() y_min = X[:, 1].min() y_max = X[:, 1].max() XX, YY = np.mgrid[x_min:x_max, y_min:y_max] Z = model.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) # plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='bwr', edgecolor='k', s=100) plt.show() # - # Calculate classification report from sklearn.metrics import classification_report print(classification_report(y_test, predictions, target_names=["blue", "red"])) Conv2D(n_filters, (3, 3), strides=(1, 1), padding='same')(x) x = Activation('relu')(x) x = MaxPooling2D(pool_size=pool_size, strides=pool_size)(x) x = Dropout(0.25)(x) return x # Model Definition def create_model(input_shape, num_genres): inpt = Input(shape=input_shape) x = conv_block(inpt, 16) x = conv_block(x, 32) x = conv_block(x, 64) x = conv_block(x, 128) x = conv_block(x, 256) # Global Pooling and MLP x = Flatten()(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.02))(x) x = Dropout(0.25)(x) predictions = Dense(num_genres, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2(0.02))(x) model = Model(inputs=inpt, outputs=predictions) return model # + # confusion matrix def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # majority vote def majority_vote(scores): values, counts = np.unique(scores,return_counts=True) ind = np.argmax(counts) return values[ind] # + # load data print('============================================================================') print('Loading dataset...') print('============================================================================') X_train = np.load('/home/jaehwlee/Genre_classification/mel_data/X_train.npy') X_valid = np.load('/home/jaehwlee/Genre_classification/mel_data/X_valid.npy') X_test = np.load('/home/jaehwlee/Genre_classification/mel_data/X_test.npy') y_train = np.load('/home/jaehwlee/Genre_classification/mel_data/y_train.npy') y_valid = np.load('/home/jaehwlee/Genre_classification/mel_data/y_valid.npy') y_test = np.load('/home/jaehwlee/Genre_classification/mel_data/y_test.npy') song_samples = 660000 genres = {'metal': 0, 'disco': 1, 'classical': 2, 'hiphop': 3, 'jazz': 4, 'country': 5, 'pop': 6, 'blues': 7, 'reggae': 8, 'rock': 9} print(X_train.shape) print(X_valid.shape) print(X_test.shape) print(y_train.shape) print(y_valid.shape) print(y_test.shape) print('============================================================================') print('complete!') print('============================================================================') # + # callback function reduceLROnPlat = ReduceLROnPlateau( monitor='val_loss', factor=0.95, patience=3, verbose=1, mode='min', min_delta=0.0001, cooldown=2, min_lr=1e-5 ) mc = ModelCheckpoint('0707_mel.h5', monitor='val_acc', mode='max', verbose=1, save_best_only=True, save_weights_only=True) rl = ReduceLROnPlateau(monitor='val_loss', factor=0.95, patience=3, verbose=1, mode='min', min_delta=0.0001, cooldown=2, min_lr=1e-5) callback_list = [mc,rl] # + # data generating batch_size = BATCH_SIZE train_generator = GTZANGenerator(X_train, y_train) steps_per_epoch = np.ceil(len(X_train)/batch_size) validation_generator = GTZANGenerator(X_valid, y_valid) val_steps = np.ceil(len(X_test)/batch_size) # - # compile model and summary model = create_model(X_train[0].shape, NUM_OF_GENRES) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) #model.summary() # + # fit model hist = model.fit_generator( train_generator, steps_per_epoch=steps_per_epoch, validation_data=validation_generator, validation_steps=val_steps, epochs=150, verbose=1, callbacks=[mc,rl]) model.save('0707_mel.h5') score = model.evaluate(X_test, y_test, verbose=0) print("val_loss = {:.3f} and val_acc = {:.3f}".format(score[0], score[1])) # + # plot confuison matrix preds = np.argmax(model.predict(X_test), axis = 1) y_orig = np.argmax(y_test, axis = 1) cm = confusion_matrix(preds, y_orig) keys = OrderedDict(sorted(genres.items(), key=lambda t: t[1])).keys() plt.figure(figsize=(10,10)) plot_confusion_matrix(cm, keys, normalize=True) # + # majority voting preds = model.predict(X_test, batch_size=BATCH_SIZE, verbose=0) scores_songs = np.split(np.argmax(preds, axis=1), 300) scores_songs = [majority_vote(scores) for scores in scores_songs] label = np.split(np.argmax(y_test, axis=1), 300) label = [majority_vote(l) for l in label] from sklearn.metrics import accuracy_score print("majority voting system (acc) = {:.3f}".format(accuracy_score(label, scores_songs)))
8,049
/code/chap04mine.ipynb
39d536e91d918e883b756cea940f88aa223767e0
[ "MIT" ]
permissive
DaveFreem/ModSimPy
https://github.com/DaveFreem/ModSimPy
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
321,935
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Modeling and Simulation in Python # # Chapter 4: Predict # # Copyright 2017 Allen Downey # # License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0) # # + # If you want the figures to appear in the notebook, # and you want to interact with them, use # # %matplotlib notebook # If you want the figures to appear in the notebook, # and you don't want to interact with them, use # # %matplotlib inline # If you want the figures to appear in separate windows, use # # %matplotlib qt5 # To switch from one to another, you have to select Kernel->Restart # %matplotlib inline from modsim import * # - # ### Functions from the previous chapter def plot_estimates(table): """Plot world population estimates. table: DataFrame with columns `un` and `census` """ un = table.un / 1e9 census = table.census / 1e9 plot(census, ':', color='darkblue', label='US Census') plot(un, '--', color='green', label='UN DESA') decorate(xlabel='Year', ylabel='World population (billion)') def plot_results(system): """Plot the estimates and the model. system: System object with `results` """ newfig() plot_estimates(table2) plot(system.results, '--', color='gray', label='model') decorate(xlabel='Year', ylabel='World population (billion)') def run_simulation(system, update_func): """Run a model. Adds TimeSeries to `system` as `results`. system: System object update_func: function that computes the population next year """ results = Series([]) results[system.t0] = system.p0 for t in linrange(system.t0, system.t_end): results[t+1] = update_func(results[t], t, system) system.results = results # ### Reading the data # + # The data directory contains a downloaded copy of # https://en.wikipedia.org/wiki/World_population_estimates from pandas import read_html filename = 'data/World_population_estimates.html' tables = read_html(filename, header=0, index_col=0, decimal='M') # - table2 = tables[2] table2.columns = ['census', 'prb', 'un', 'maddison', 'hyde', 'tanton', 'biraben', 'mj', 'thomlinson', 'durand', 'clark'] newfig() plot_estimates(table2) # ### Running the quadratic model # Here's the update function for the quadratic growth model with parameters `alpha` and `beta`. def update_func2(pop, t, system): """Update population based on a quadratic model. pop: current population in billions t: what year it is system: system object with model parameters """ net_growth = system.alpha * pop + system.beta * pop**2 return pop + net_growth # Select the estimates generated by the U.S. Census, and convert to billions. census = table2.census / 1e9 # Extract the starting time and population. t0 = census.index[0] p0 = census[t0] t_end = census.index[-1] # Initialize the system object. # + system = System(t0=t0, t_end=t_end, p0=p0, alpha=0.025, beta=-0.0018) system # - # Run the model and plot results. run_simulation(system, update_func2) plot_results(system) decorate(title='Quadratic model') # ### Generating projections # To generate projections, all we have to do is change `t_end` system.t_end = 2250 run_simulation(system, update_func2) plot_results(system) decorate(title='World population projection') savefig('chap04-fig01.pdf') # The population in the model converges on the equilibrium population, `-alpha/beta` system.results[system.t_end] -system.alpha / system.beta # **Exercise:** What happens if we start with an initial population above the carrying capacity, like 20 billion? The the model with initial populations between 1 and 20 billion, and plot the results on the same axes. # + # Solution goes here def plot_results_fix(system): plot_estimates(table2) plot(system.results, '--', color='gray', label='model') decorate(xlabel='Year', ylabel='World population (billion)') def try1(p00=10): system = System(t0=t0, t_end=t_end, p0=p00, alpha=0.025, beta=-0.0018) system.t_end = 2250 run_simulation(system, update_func2) plot_results_fix(system) # - try1(500) # ### Comparing projections # We can compare the projection from our model with projections produced by people who know what they are doing. table3 = tables[3] table3.head() # `NaN` is a special value that represents missing data, in this case because some agencies did not publish projections for some years. table3.columns = ['census', 'prb', 'un'] # This function plots projections from the UN DESA and U.S. Census. It uses `dropna` to remove the `NaN` values from each series before plotting it. def plot_projections(table): """Plot world population projections. table: DataFrame with columns 'un' and 'census' """ census = table.census / 1e9 un = table.un / 1e9 plot(census.dropna(), ':', color='darkblue', label='US Census') plot(un.dropna(), '--', color='green', label='UN DESA') # Run the model until 2100, which is as far as the other projections go. system.p0 = census[t0] system.t_end = 2100 run_simulation(system, update_func2) # Plot the results. plot_results(system) plot_projections(table3) decorate(title='World population projections') savefig('chap04-fig02.pdf') # People who know what they are doing expect the growth rate to decline more sharply than our model projects. # **Exercise:** Suppose there are two banks across the street from each other, The First Geometric Bank (FGB) and Exponential Savings and Loan (ESL). They offer the same interest rate on checking accounts, 3%, but at FGB, they compute and pay interest at the end of each year, and at ESL they compound interest continuously. # # If you deposit $p_0$ dollars at FGB at the beginning of Year 0, the balanace of your account at the end of Year $n$ is # # $ x_n = p_0 (1 + \alpha)^n $ # # where $\alpha = 0.03$. At ESL, your balance at any time $t$ would be # # $ x(t) = p_0 \exp(\alpha t) $ # # If you deposit \$1000 at each back at the beginning of Year 0, how much would you have in each account after 10 years? # # Is there an interest rate FGB could pay so that your balance at the end of each year would be the same at both banks? What is it? # # Hint: `modsim` provides a function called `exp`, which is a wrapper for the NumPy function `exp`. # + # Solution goes here FGBsys = System(cash_in=1000, rate_a=.03, year_start=0, year_end=100) ESLsys = System(cash_in=1000, rate_a=.03, year_start=0, year_end=100) # + def time_run_FGB(system): years_passed = system.year_end - system.year_start cash_out = TimeSeries() cash_out[system.year_start] = 0 for year in range(int(system.year_start), int(system.year_end)): cash_out[year] = system.cash_in * (1+system.rate_a)**year system.cash_out = cash_out return system.cash_out time_run_FGB(FGBsys) # + def time_run_ESL(system): years_passed = system.year_end - system.year_start cash_out = TimeSeries() cash_out[system.year_start] = 0 for year in range(int(system.year_start), int(system.year_end)): cash_out[year] = system.cash_in * exp(system.rate_a * year) system.cash_out = cash_out return system.cash_out time_run_ESL(ESLsys) # - # Solution goes here def plot_banks(): newfig() plot(ESLsys.cash_out, ':', color='blue', label='ESL') plot(FGBsys.cash_out, ':', color='green', label='FGB') decorate(xlabel='Year', ylabel='Bank Money') plot_banks() # + # Solution goes here def do_the_do(system1, system2): time_run_ESL(system1) time_run_FGB(system2) plot_banks() print('ESL return : ', ESLsys.cash_out, 'FGB return : ', FGBsys.cash_out) do_the_do(ESLsys, FGBsys) # + FGBsys = System(cash_in=1000, rate_a=.030454534, year_start=0, year_end=100) do_the_do(ESLsys, FGBsys) # - # Solution goes here ''''results = TimeSeries() results[system.t0] = system.p0 for t in linrange(system.t0, system.t_end): results[t+1] = -results[t] * death_rate + results[t] + num_births system.results = results # **Exercise:** Suppose a new bank opens called the Polynomial Credit Union (PCU). In order to compete with First Geometric Bank and Exponential Savings and Loan, PCU offers a parabolic savings account where the balance is a polynomial function of time: # # $ x(t) = p_0 + \beta_1 t + \beta_2 t^2 $ # # As a special deal, they offer an account with $\beta_1 = 30$ and $\beta_2 = 0.5$, with those parameters guaranteed for life. # # Suppose you deposit \$1000 at all three banks at the beginning of Year 0. How much would you have in each account at the end of Year 10? How about Year 20? And Year 100? # + # Solution goes here PCUsys = System(cash_in=1000, rate_b1=30, rate_b2=.5, year_start=0, year_end=100) def time_run_PCU(system): years_passed = system.year_end - system.year_start cash_out = TimeSeries() cash_out[system.year_start] = 0 for year in range(int(system.year_start), int(system.year_end)): cash_out[year] = system.cash_in + (system.rate_b1 * year) + (system.rate_b2 * year**2) system.cash_out = cash_out return system.cash_out time_run_PCU(PCUsys) # + # Solution goes here def new_plot_banks(): newfig() plot(ESLsys.cash_out, ':', color='blue', label='ESL') plot(FGBsys.cash_out, ':', color='green', label='FGB') plot(PCUsys.cash_out, ':', color="red", label='PCU') decorate(xlabel='Year', ylabel='Bank Money') new_plot_banks() # + # Solution goes here all_systems = System(cash_in=1000, rate_a=.03, rate_a_FGB=.030454534, rate_b1=30, rate_b2=.5, year_start=0, year_end=100) def new_do_the_do(system): time_run_ESL(system) time_run_FGB(system) time_run_PCU(system) new_plot_banks() print('ESL return : ', ESLsys.cash_out, 'FGB return : ', FGBsys.cash_out, 'PCU return : ', PCUsys.cash_out) new_do_the_do(all_systems) # + # Solution goes here # + # Solution goes here # -
10,546
/chatgpt/deeplearning_ai_chatgpt_building_system_course/notebooks/L5_student.ipynb
8fdc84884fd59b6d235345ccf96b5a24013a9d7f
[]
no_license
piegu/language-models
https://github.com/piegu/language-models
175
67
null
2022-09-17T19:16:15
2022-09-15T18:36:18
Jupyter Notebook
Jupyter Notebook
false
false
.py
29,130
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Distribution of the data related by the following paper: # # ### Investigating the effect of grain structure on compressive response of open-cell metal foam using high-fidelity crystal-plasticity modeling # # __Authors:__ _Dongfang Zhao, Kristoffer E. Matheson, Brian R. Phung, Steve Petruzza, Michael W. Czabaj, Ashley D. Spear_ # # __Published in:__ Materials Science and Engineering, Volume 812, 2021, 140847, ISSN 0921-5093 # # https://doi.org/10.1016/j.msea.2021.140847 # # https://www.sciencedirect.com/science/article/pii/S0921509321001167 # + def InstallPrerequisites(): # Pre-requirements: install all the python libraries needed to run this exemple. # You may need to restart the kernel if the process catches a missing library to be installed. import os,sys # specific prerequisite (this may need to be completed) !"{sys.executable}" -m pip install --upgrade OpenVisus !"{sys.executable}" -m OpenVisus configure # general prerequisites !"{sys.executable}" -m pip install PIL !"{sys.executable}" -m pip install urllib !"{sys.executable}" -m pip install matplotlib !"{sys.executable}" -m pip install mpl_interactions !"{sys.executable}" -m pip install ipywidgets !"{sys.executable}" -m pip install ipympl !"{sys.executable}" -m pip install ipycanvas !"{sys.executable}" -m jupyter nbextension enable --py widgetsnbextension print("WARNING: you probably need to restart Jupyter") InstallPrerequisites() # - import os,sys #sys.path.append(r"C:\projects\OpenVisus\build\RelWithDebInfo") from OpenVisus import * from ipywidgets import * print(os.getcwd() ) # + from PIL import Image from urllib.request import urlopen import matplotlib.pyplot as plt from ipywidgets import HBox, Label from ipywidgets import IntProgress from IPython.display import display num_blocks = 0 num_blocks_cached = 0 caching_progress = IntProgress(min=0, max=0, layout=Layout(width='70%')) # caching_progress_display = HBox([Label("Caching progress"+ # "%.2f%%" % (100 * num_blocks_cached/num_blocks)+ # " ("+ # format(num_blocks_cached, ',d')+ # "/"+ # format(num_blocks, ',d')+ # ")"), # caching_progress]) caching_progress_display = HBox([Label("Caching progress "), caching_progress]) def Assert(cond): if not cond: raise Exception("Assert failed") def LoadImage(filename): if filename.startswith('http'): filename=urlopen(filename) return numpy.asarray(Image.open(filename)) def ShowImage(data,width=10): ratio=float(data.shape[1])/data.shape[0] fig = plt.figure(figsize = (width,width*ratio)) ax = fig.add_subplot(1,1,1) ax.imshow(data, origin='lower') plt.show() # function to read data from a remote dataset # optional parameters: timestep, field (variable in the dataset), logic_box (bounding box of the query), resolution # Note: the resolution value could sometime fetch a dataset with the wrong aspect ratio, # this because in the IDX format we double the size at each resolution on only one of the axis at a time # function to plot the image data with matplotlib # optional parameters: colormap, existing plot to reuse (for more interactivity) def ShowData(data, cmap=None, plot=None): #print(data) if(plot==None or cmap!=None): width = 6 ratio=float(data.shape[1])/data.shape[0] fig = plt.figure(figsize = (width,width*ratio)) plot = plt.imshow(data, origin='lower', cmap=cmap) plt.show() return plot else: plot.set_data(data) plt.show() return plot # function to create a local dataset from a remote one (typically S3 object storage) def EnableCaching(local_filename, remote_url): print("local_filename",local_filename) print("remote_url",remote_url) access_config=""" <access type='multiplex'> <access type='disk' chmod='rw' url='file://{}' /> <access type="CloudStorageAccess" url='{}' chmod="r" /> </access> """.format( local_filename.replace("&","&amp;"), remote_url.replace("&","&amp;")) print("access_config",access_config) access= db.createAccessForBlockQuery(StringTree.fromString(access_config)) # at this point the cache is enabled with the new local idx file Assert(os.path.isfile(local_filename)) return access # utility to copy a dataset block-by block (you can run it in the background) def CacheAllDataset(db,access, limit, background): global num_blocks,num_blocks_cached, caching_progress, caching_progress_display if background: thread = threading.Thread(target=CacheAllDataset, args=(db,access,limit,False)) thread.start() return thread print("CacheAllDataset","...") def ReturnValue(): access.endRead() print("CacheAllDataset done") return True access.beginRead() cont=0 for field in db.getFields(): for blockid in range(db.getTotalNumberOfBlocks()): for time in db.getTimesteps().asVector(): # print("Copying block","time",time,"field",field,"blockid",blockid,"...") buffer = db.readBlock(blockid, field=field, time=time, access=access) cont+=1 num_blocks_cached = num_blocks_cached+1 caching_progress.value = num_blocks_cached caching_progress_display.children[0].value = ("Caching progress "+ "%.2f%%" % (100 * num_blocks_cached/num_blocks)+ " ("+ format(num_blocks_cached, ',d')+ "/"+ format(num_blocks, ',d')+ ")") if limit>0 and cont>=limit: return ReturnValue() return ReturnValue() print("Utilities defined") # + # create data access method from a dataset stored on the cloud # important: you should know the layour and compression in advance url="https://s3.us-west-1.wasabisys.com/visus-server-foam/visus.idx?compression={}&layout={}".format("zip","hzorder") db=LoadDataset(url) print("Loaded dataset",url) num_blocks = len(db.getFields()) * db.getTotalNumberOfBlocks() * len(db.getTimesteps().asVector()) caching_progress.max =num_blocks local_filename=os.path.abspath("./visus-cache/foam/visus.idx").replace("\\","/") print("local_filename",local_filename) # - # Now you can run a background process that slowly copy blocks from remote location # if you want to copy the dataset in background if True: # this create also the local_filename, important to call this before LoadDataset local_access=EnableCaching(local_filename, url) local_db=LoadDataset(local_filename) thread=CacheAllDataset(local_db,local_access,0,True) # + X_size,Y_size,Z_size = db.getLogicBox()[1] slice_value = int(Z_size/2) print("Database size",X_size,Y_size,Z_size ) print("slice_value",slice_value) print("fields:",db.getFields()) # - # create a plot for our data access=EnableCaching(local_filename, url) first_query = db.read(x=[slice_value,slice_value+1],y=[0,Y_size],z=[0,Z_size], num_refinements=1 , quality=-3, access = access).reshape([Y_size//2,Z_size//2]) print("First query done") # if running this code in a Jupter notbeook or JupyterLab # NOTE I need this cell to be one line only with only this op # %matplotlib notebook # + # create a plot for our data myplot = ShowData(first_query) slice_dir = 0 resolution = 2 #myplot = None width = 6 #ratio=float(data.shape[2])/data.shape[1]) #fig = plt.figure(figsize = (width,width*ratio)) layout_width = '60%' time =widgets.IntSlider(value=0,min=0,max=3,step=1,description="time (0-3)", layout=Layout(width=layout_width)) style = {'description_width': 'initial'} direction_names = [('X', 0), ('Y', 1), ('Z', 2)] direction = widgets.Dropdown(options=direction_names, value=0,description='Slice orthogonal to axis:',style=style, layout=Layout(width=layout_width)) xslice=widgets.IntSlider(value=500,min=0,max=X_size-1,step=1,description="slice (0-"+str(X_size-1)+")", layout=Layout(width=layout_width)) res_widget =widgets.IntSlider(value=-2,min=-5,max=0,step=1,description="Resolution (coarse=-5,full=0)",style=style, layout=Layout(width=layout_width)) # function to plot the image data with matplotlib # optional parameters: colormap, existing plot to reuse (for more interactivity) def ShowSlice(time=0,direction = slice_dir,slice_position=700,new_resolution = resolution,db=db, cmap=None, plot=myplot): global myplot, first_query, slice_dir, xslice, resolution, direction_names plot = myplot if slice_dir != direction : xslice.max = db.getLogicBox()[1][direction]-1 xslice.value = 512 xslice.description = "slice (0-"+str(xslice.max)+")" slice_dir = direction resolution = new_resolution data_quality = resolution *2 New_quality = new_resolution*3 size_denominator = int(2**(new_resolution*-1)) actual_slice_position = slice_position //size_denominator actual_slice_position = actual_slice_position *size_denominator res_names = ["(coarsest)","(coarser)","(coarse)","(medium)","(fine)","(full)"] print("Time=",time, ", Direction=", direction_names[direction][0], ", Slice=",actual_slice_position, ", Resolution=",new_resolution, res_names[5+new_resolution]) try: if slice_dir == 0: data = db.read(time=time, x=[actual_slice_position,actual_slice_position+1], y=[0,Y_size], z=[0,Z_size], num_refinements=1 , quality=New_quality, access = access).reshape([Y_size//size_denominator,Z_size//size_denominator]) elif slice_dir == 1: data = db.read(time=time, x=[0,Y_size], y=[actual_slice_position,actual_slice_position+1], z=[0,Z_size], num_refinements=1 , quality=New_quality, access = access).reshape([Y_size//size_denominator,Z_size//size_denominator]) else : data = db.read(time=time, x=[0,Y_size], y=[0,Z_size], z=[actual_slice_position,actual_slice_position+1], num_refinements=1 , quality=New_quality, access = access).reshape([Y_size//size_denominator,Z_size//size_denominator]) except: data = first_query first_query = data if(plot==None or cmap!=None): print("===================== PROBLEM!!!! =======================================") return plot else: plot.set_data(data) plt.show() # reuse the plot with an interact for varying time and resolution values interact(lambda time, direction,xslice, res_widget: ShowSlice(time, direction,xslice, res_widget), time = time, direction = direction, xslice=xslice, res_widget = res_widget) display(caching_progress_display) "Interactive slicing of dataset retrieved from the cloud and cached locally in: "+local_filename # - "rating": 4.6, "features": ["Immersive VR experience", "Built-in headphones", "Adjustable headband", "Compatible with GameSphere X"], "description": "Step into the world of virtual reality with this comfortable VR headset.", "price": 299.99 }, "AudioPhonic Noise-Canceling Headphones": { "name": "AudioPhonic Noise-Canceling Headphones", "category": "Audio Equipment", "brand": "AudioPhonic", "model_number": "AP-NC100", "warranty": "1 year", "rating": 4.6, "features": ["Active noise-canceling", "Bluetooth", "20-hour battery life", "Comfortable fit"], "description": "Experience immersive sound with these noise-canceling headphones.", "price": 199.99 }, "WaveSound Bluetooth Speaker": { "name": "WaveSound Bluetooth Speaker", "category": "Audio Equipment", "brand": "WaveSound", "model_number": "WS-BS50", "warranty": "1 year", "rating": 4.5, "features": ["Portable", "10-hour battery life", "Water-resistant", "Built-in microphone"], "description": "A compact and versatile Bluetooth speaker for music on the go.", "price": 49.99 }, "AudioPhonic True Wireless Earbuds": { "name": "AudioPhonic True Wireless Earbuds", "category": "Audio Equipment", "brand": "AudioPhonic", "model_number": "AP-TW20", "warranty": "1 year", "rating": 4.4, "features": ["True wireless", "Bluetooth 5.0", "Touch controls", "18-hour battery life"], "description": "Enjoy music without wires with these comfortable true wireless earbuds.", "price": 79.99 }, "WaveSound Soundbar": { "name": "WaveSound Soundbar", "category": "Audio Equipment", "brand": "WaveSound", "model_number": "WS-SB40", "warranty": "1 year", "rating": 4.3, "features": ["2.0 channel", "80W output", "Bluetooth", "Wall-mountable"], "description": "Upgrade your TV's audio with this slim and powerful soundbar.", "price": 99.99 }, "AudioPhonic Turntable": { "name": "AudioPhonic Turntable", "category": "Audio Equipment", "brand": "AudioPhonic", "model_number": "AP-TT10", "warranty": "1 year", "rating": 4.2, "features": ["3-speed", "Built-in speakers", "Bluetooth", "USB recording"], "description": "Rediscover your vinyl collection with this modern turntable.", "price": 149.99 }, "FotoSnap DSLR Camera": { "name": "FotoSnap DSLR Camera", "category": "Cameras and Camcorders", "brand": "FotoSnap", "model_number": "FS-DSLR200", "warranty": "1 year", "rating": 4.7, "features": ["24.2MP sensor", "1080p video", "3-inch LCD", "Interchangeable lenses"], "description": "Capture stunning photos and videos with this versatile DSLR camera.", "price": 599.99 }, "ActionCam 4K": { "name": "ActionCam 4K", "category": "Cameras and Camcorders", "brand": "ActionCam", "model_number": "AC-4K", "warranty": "1 year", "rating": 4.4, "features": ["4K video", "Waterproof", "Image stabilization", "Wi-Fi"], "description": "Record your adventures with this rugged and compact 4K action camera.", "price": 299.99 }, "FotoSnap Mirrorless Camera": { "name": "FotoSnap Mirrorless Camera", "category": "Cameras and Camcorders", "brand": "FotoSnap", "model_number": "FS-ML100", "warranty": "1 year", "rating": 4.6, "features": ["20.1MP sensor", "4K video", "3-inch touchscreen", "Interchangeable lenses"], "description": "A compact and lightweight mirrorless camera with advanced features.", "price": 799.99 }, "ZoomMaster Camcorder": { "name": "ZoomMaster Camcorder", "category": "Cameras and Camcorders", "brand": "ZoomMaster", "model_number": "ZM-CM50", "warranty": "1 year", "rating": 4.3, "features": ["1080p video", "30x optical zoom", "3-inch LCD", "Image stabilization"], "description": "Capture life's moments with this easy-to-use camcorder.", "price": 249.99 }, "FotoSnap Instant Camera": { "name": "FotoSnap Instant Camera", "category": "Cameras and Camcorders", "brand": "FotoSnap", "model_number": "FS-IC10", "warranty": "1 year", "rating": 4.1, "features": ["Instant prints", "Built-in flash", "Selfie mirror", "Battery-powered"], "description": "Create instant memories with this fun and portable instant camera.", "price": 69.99 } } # + def get_product_by_name(name): return products.get(name, None) def get_products_by_category(category): return [product for product in products.values() if product["category"] == category] # - print(get_product_by_name("TechPro Ultrabook")) print(get_products_by_category("Computers and Laptops")) print(user_message_1) print(category_and_product_response_1) # ### Read Python string into Python list of dictionaries # + import json def read_string_to_list(input_string): if input_string is None: return None try: input_string = input_string.replace("'", "\"") # Replace single quotes with double quotes for valid JSON data = json.loads(input_string) return data except json.JSONDecodeError: print("Error: Invalid JSON string") return None # - category_and_product_list = read_string_to_list(category_and_product_response_1) print(category_and_product_list) # #### Retrieve detailed product information for the relevant products and categories def generate_output_string(data_list): output_string = "" if data_list is None: return output_string for data in data_list: try: if "products" in data: products_list = data["products"] for product_name in products_list: product = get_product_by_name(product_name) if product: output_string += json.dumps(product, indent=4) + "\n" else: print(f"Error: Product '{product_name}' not found") elif "category" in data: category_name = data["category"] category_products = get_products_by_category(category_name) for product in category_products: output_string += json.dumps(product, indent=4) + "\n" else: print("Error: Invalid object format") except Exception as e: print(f"Error: {e}") return output_string product_information_for_user_message_1 = generate_output_string(category_and_product_list) print(product_information_for_user_message_1) # ### Generate answer to user query based on detailed product information system_message = f""" You are a customer service assistant for a \ large electronic store. \ Respond in a friendly and helpful tone, \ with very concise answers. \ Make sure to ask the user relevant follow up questions. """ user_message_1 = f""" tell me about the smartx pro phone and \ the fotosnap camera, the dslr one. \ Also tell me about your tvs""" messages = [ {'role':'system', 'content': system_message}, {'role':'user', 'content': user_message_1}, {'role':'assistant', 'content': f"""Relevant product information:\n\ {product_information_for_user_message_1}"""}, ] final_response = get_completion_from_messages(messages) print(final_response)
20,048
/YouTube EDA and PreProcessing.ipynb
6ec8caa8eccddefff260d3b1a2f97dd972a3ef22
[]
no_license
waruni1017/Capstone-Two
https://github.com/waruni1017/Capstone-Two
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
961,781
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Trending YouTube Video Statistics # In this notebook, United States trending YouTube video Statistics are studied. The data contains 40949 YouTube video details and 16 features. One video will appear more than once in this dataset, since it is possible that a video stays in the trending list for many days. # # 1. Import packages import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import datetime import json import seaborn from wordcloud import WordCloud, STOPWORDS from statistics import variance # # 2. Load the data YouTube = pd.read_csv("./YOUTUBE DATA/USvideos.csv") # # 3. Explore the data YouTube.head(1).T YouTube.shape YouTube.info() # We can see that trending_date and publish_time are datetime objects. We will have to fix this at some point. Also, there are 40949 data but in the description column 40379. Looks like we have some data missing. We will have to work on that as well. #Summary of numerical variables YouTube.describe() # It can be observed that videos have been in the trending list even though it has zero likes. Number of comments being zero is possible since some videos have disabled commenting. #Summary of categorical variables YouTube.describe(include=['O']).T # There 40949 videos, but only 6351 of them are unique. This makes sense because a video could be trending for more than one day. Most appeared channel is ESPN while the most appeared video title is 'WE MADE OUR MOM CRY...HER DREAM CAME TRUE'. # # There are 205 unique dates in this dataset. # # 4. Clean the data YouTube.columns # ### 4.1 Remove irrelevant features del_col =['thumbnail_link', 'description'] df = YouTube.drop(del_col, axis=1) df.shape # ### 4.2 Check for duplicates #check for duplicates df.duplicated().value_counts() # We have 48 duplicated rows. Best way to handle this is to get rid of those rows. since it is just 48 out of 40949, it would not effect much. df[df.duplicated()] df.drop_duplicates(inplace=True) YouTube["trending_date"].apply(lambda x: '20' + x[:2]).value_counts(normalize = True) # 76.6% of YouTube videos are from 2018 and only 23.4% of them are from 2017. # ### 4.3 Formatting columns : trending_date, publish_time, category_ID # Previously, we observed that trending_date and publish_time were object type. Here they will be converted into datetime objects. And afterwards new columns called 'published_day', 'published_month', 'published_year', and 'published_time' are being introduced for further analysis. #Note that trending dates are in format year.date.month df['trending_date'] = pd.to_datetime(df['trending_date'], format='%y.%d.%m') df['publish_time'] = pd.to_datetime(df['publish_time'], format='%Y-%m-%dT%H:%M:%S.%fZ') df['published_day'] = df['publish_time'].dt.day df['published_month'] =df['publish_time'].dt.month df['published_year'] =df['publish_time'].dt.year df['published_time'] = df['publish_time'].dt.time df1 = df.drop(columns='publish_time') df1.columns # We can incorporate US_category_id.json file to our dataset and obtain different categories in our dataset instead of catogory_Ids. df1['category_id'] = df1['category_id'].apply(str) df1_categories = pd.read_json('./YOUTUBE DATA/US_category_id.json') id_to_category = {} for category in df1_categories['items']: id_to_category[category['id']]=category['snippet']['title'] df1['category']=df1['category_id'].map(id_to_category) df2=df1.drop(columns='category_id') df2["title_length"]= df2["title"].str.len() df2.head(1).T #groupby video_id and pick the one with the latest trending date df2 = df2.loc[df2.groupby('video_id').trending_date.idxmax()] df2.columns # Now we have 6351 observations with 17 features. # # 5. Explore Data # views, likes, dislikes, and comment_count are numerical variables in this dataset. We can explore them with the category type and channel_title to get some insights on data. vldc = df2[['channel_title', 'category', 'views', 'likes', 'dislikes', 'comment_count', 'title_length']] vldc.head() # ## 5.1 Which category has the highest number of videos? plt.figure(figsize=(15,10)) sns.countplot(y='category', data=vldc, order=vldc['category'].value_counts().index) plt.show() # ## 5.2 What is the most appeared channel title? vldc['channel_title'].value_counts() # ESPN channel is the one that had appeared most in this trending list while 'The tonight show Starring Jimmy Fallon' is the second most and 'Netflix' is the third most channel title. y = vldc.groupby(['category']).sum() y.sort_values(by='comment_count', ascending=False) def groupdata(col1, col2, color, num): y = vldc.groupby([col1]).sum() y = y.sort_values(by=col2, ascending=False) values = list(y[col2]) categories = y.index plt.figure(figsize=(14,14)) plt.subplot(4,1,num) plt.barh(categories, values, color = color) plt.ylabel(str(col2)) plt.xticks(rotation=45) plt.show() groupdata('category', 'views', 'red', 1) groupdata('category', 'likes', 'blue', 2) groupdata('category', 'dislikes', 'green', 3) groupdata('category', 'comment_count', 'purple', 4) # Music category has the highest number of likes, dislikes, views, and comment_count, while shows has the least of them all. Music and Entertainment seem to be always dominating the first and second places when it comes to the number of likes, dislikes, comment count, and views in the trending list. def top5(col1, col2): y = df2.groupby([col1]).sum() y = y.sort_values(by=col2, ascending=False).head() return y # Table below shows the video titles of the videos which had highest number of views in the trending YouTube video dataset. top5('title', 'views') # Table below shows the video titles of the videos which had highest number of likes in the trending YouTube video dataset. top5('title', 'likes') # Table below shows the video titles of the videos which had highest number of dislikes in the trending YouTube video dataset. top5('title', 'dislikes') corr_matrix = df2[['views', 'likes', 'dislikes', 'comment_count', 'title_length']].corr() corr_matrix # Number of views and the number of likes seem to be positily correlated. Also, the number of comments and views seem to considerably correlated. plt.figure(figsize=(10,8)) ax = sns.heatmap(corr_matrix, annot=True) # Likes and views have a strong correlation. comment_count and dislikes also have decent correlation with correlation coefficient of 0.7. plt.figure(figsize = (15,8)) ax1 = sns.boxplot(x = df2['category'], y = np.log(df2['views']), data = df2) ax1.set_title('views across catgegories') plt.xticks(rotation = 45) plt.show() plt.figure(figsize = (15,8)) ax2 = sns.boxplot(x = df2['category'], y = np.log(df2['likes']), data = df2) ax2.set_title('likes across catgegories') plt.xticks(rotation = 45) plt.show() # + plt.figure(figsize=(20,20)) stopwords = set(STOPWORDS) wordcloud = WordCloud(background_color = 'darkgreen', stopwords=stopwords, max_words=100, max_font_size =120, random_state=42).generate(str(df2['title'])) plt.imshow(wordcloud) plt.title('WORD CLOUD for Title') plt.axis('off') plt.show() # - # Above plotshows the most appeared words in the title of trending videos. x1 = np.log(df2['views']) y1 = np.log(df2['likes']) y2 = np.log(df2['dislikes']) y3 = np.log(df2['comment_count']) sns.regplot(x=x1, y=y1, fit_reg=False) sns.regplot(x=x1, y=y2, fit_reg=False) sns.regplot(x=x1, y=y3, fit_reg=False) sns.regplot(x=y1, y=y2, fit_reg=False) sns.regplot(x=y1, y=y3, fit_reg=False) sns.regplot(x=y2, y=y3, fit_reg=False) print(df2['comments_disabled'].value_counts()) sns.countplot(x='comments_disabled', data=df2) sns.barplot(x='comments_disabled', y='likes', data=df2) df2.groupby('published_year')['likes', 'dislikes','views','comment_count'].mean() sns.barplot(x=df2['published_month'], y=df2['views']) # # 6. Data Preprocessing # ## 6.1 Boolean features df2.columns df3 = df2[['views', 'likes', 'dislikes', 'comment_count', 'comments_disabled', 'ratings_disabled', 'video_error_or_removed', 'published_day', 'published_month', 'published_year']] df2.info() df2['comments_disabled'] = df2['comments_disabled'].astype(int) df2['ratings_disabled'] = df2['ratings_disabled'].astype(int) df2['video_error_or_removed'] = df2['video_error_or_removed'].astype(int) df2.info() # ## 6.3 Train/Test Split # + # spliting training and testing data from sklearn.model_selection import train_test_split X = df3.drop('views', axis=1, inplace=False) y = df3['views'] X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=27) # + from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error import statsmodels.api as sm from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.preprocessing import MinMaxScaler # fit scaler on training data std = StandardScaler().fit(X_train) # transform training data X_train_std = std.transform(X_train) # transform testing dataabs X_test_std = std.transform(X_test) # - l = LinearRegression().fit(X_train_std, y_train) y_train_pred = l.predict(X_train_std) y_test_pred = l.predict(X_test_std) MAE = mean_absolute_error(y_train, y_train_pred), mean_absolute_error(y_test, y_test_pred) MAE r2 = r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred) r2 # # 6.3.1 Applying TfidfVectorizer to the 'title' column from sklearn.feature_extraction.text import TfidfVectorizer tv = TfidfVectorizer(max_features=100, stop_words='english') tv.fit(df1['title']) tv_transformed = tv.transform(df1['title']) tv_df = pd.DataFrame(tv_transformed.toarray(), columns = tv.get_feature_names()).add_prefix('TFIDF_') tv_df = tv_df.loc[:,round(tv_df.var(),2) > 0.00] df3 = pd.concat([df2, tv_df], axis=1, sort=False) df3.shape
10,135
/.ipynb_checkpoints/selenium-checkpoint.ipynb
cb7d92370a586c14b2b5efa8d31af929dc5ec8b2
[]
no_license
Ryulth/data_crawlering
https://github.com/Ryulth/data_crawlering
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
28,664
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="31aff975d245f351c6a47a411251c09cf0df142a" colab_type="text" id="RPIBkis9u_2Q" # # DETECTING EARLY ALZHEIMER'S USING MRI DATA AND MACHINE LEARNING # ---- # ## TABLE OF CONTENT # 1. Problem Statement # 2. Data # 1. Dataset Description # 2. Column Descriptors # 3. Related Work # 4. Exploratory Data Analysis # 5. Data Precrocessing # 1. Removing rows with missing values # 2. Imputation # 3. Splitting Train/Validation/Test Sets # 4. Cross-validation # 6. Model # 1. Performance Measure # 2. Logistic Regression # 3. Support Vector Machine # 4. Decision Tree # 5. Random Forest Classifier # 6. AdaBoost # 7. Conclusion # 1. Results # 2. Unique Approach # 3. Implementation # 4. Limitation # 5. Further Research # 8. Acknowledgements # # ## TEAM MEMBERS # 1. Hyunseok Choi # 2. Kyuri Song # 3. Saurin Parikh # # + [markdown] _uuid="fedeb7d2f5ade1370c0422693aedff04d4d886f8" colab_type="text" id="Rqz5sHwoScR7" # # 1. PROBLEM STATEMENT # --- # ## ALZHEIMER'S DISEASE # # * [Alzheimer's disease (AD)](https://en.wikipedia.org/wiki/Alzheimer%27s_disease) is a neurodegenerative disorder of uncertain cause and pathogenesis that primarily affects older adults and is the most common cause of dementia. # * The earliest clinical manifestation of AD is selective memory impairment and while treatments are available to ameliorate some symptoms, there is no cure currently available. # * Brain Imaging via magnetic resonance imaging (MRI), is used for evaluation of patients with suspected AD. # * MRI findings include both, local and generalized shrinkage of brain tissue. Below is a pictorial representation of tissue shrinkage: ![braintissue](./Alzheimer's_disease_brain_comparison.jpg) # * Some studies have suggested that MRI features may predict rate of decline of AD and may guide therapy in the future. # * However in order to reach that stage clinicians and researchers will have to make use of machine learning techniques that can accurately predict progress of a patient from mild cognitive impairment to dementia. # * We propose to develop a sound model that can help clinicians do that and predict early alzheimer's. # + [markdown] _uuid="6fe31a2ec5929f28eff868ae06f907daeb26fb5c" colab_type="text" id="FG0zBhPkScR7" # # 2. DATA # # ## 2.A DATASET DESCRIPTION # * We will be using the [longitudinal MRI data] # * The dataset consists of a longitudinal MRI data of 150 subjects aged 60 to 96. # * Each subject was scanned at least once. # * Everyone is right-handed. # * 72 of the subjects were grouped as 'Nondemented' throughout the study. # * 64 of the subjects were grouped as 'Demented' at the time of their initial visits and remained so throughout the study. # * 14 subjects were grouped as 'Nondemented' at the time of their initial visit and were subsequently characterized as 'Demented' at a later visit. These fall under the 'Converted' category. # # ## 2.B COLUMN DESCRIPTORS # # |COL |FULL-FORMS | # |-----|------------------------------------| # |EDUC |Years of education | # |SES |Socioeconomic Status | # |MMSE |[Mini Mental State Examination](http://www.dementiatoday.com/wp-content/uploads/2012/06/MiniMentalStateExamination.pdf) | # |CDR |[Clinical Dementia Rating](http://knightadrc.wustl.edu/cdr/PDFs/CDR_Table.pdf) | # |eTIV |[Estimated Total Intracranial Volume](https://link.springer.com/article/10.1007/s12021-015-9266-5) | # |nWBV |[Normalize Whole Brain Volume](https://www.ncbi.nlm.nih.gov/pubmed/11547042) | # |ASF |[Atlas Scaling Factor](http://www.sciencedirect.com/science/article/pii/S1053811904003271) | # # + [markdown] _uuid="984645524fc70c97e6546508e850c27e6bbc96cf" colab_type="text" id="NbmbuaAZmMsj" # # 3. RELATED WORK # --- # # The original publication has only done some preliminary exploration of the MRI data as majority of their work was focused towards data gathering. However, in the recent past there have been multiple efforts that have been made to detect early-alzheimers using MRI data. Some of the work that was found in the literature was as follows: # # 1) **Machine learning framework for early MRI-based Alzheimer's conversion prediction in MCI subjects.** [3] # # In this paper the authors were interested in identifying mild cognitive impairment(MCI) as a transitional stage between age-related coginitive decline and Alzheimer's. The group proposes a novel MRI-based biomaker that they developed using machine learning techniques. They used data available from the Alzheimer's Disease Neuroimaging Initiative [ADNI](http://adni.loni.usc.edu/) Database. The paper claims that their aggregate biomarker achieved a 10-fold cross-validation area under the curve (AUC) score of 0.9020 in discriminating between progressive MCI (pMCI) and stable MCI (sMCI). # # Noteworthy Techniques: # 1. Semi-supervised learning on data available from AD patients and normal controls, without using MCI patients, to help with the sMCI/pMCI classification. Performed feature selection using regularized logistic regression. # 2. They removed aging effects from MRI data before classifier training to prevent possible confounding between changes due to AD and those due to normal aging. # 3. Finally constructed an aggregate biomarker by first learning a separate MRI biomarker and then combining age and cognitive measures about MCI subjects by applying a random foresst classifier. # # # 2) **Detection of subjects and brain regions related to Alzheimer's disease using 3D MRI scans based on eigenbrain and machine learning.** [4] # # The authors of this paper have proposed a novel computer-aided diagnosis (CAD) system for MRI images of brains based on eigenbrains [(eg.)](https://www.frontiersin.org/files/Articles/138015/fncom-09-00066-HTML/image_m/fncom-09-00066-t010.jpg) and machine learning. In their approach they use key slices from the 3D volumetric data generated from the MRI and then generate eigenbrain images based on [EEG](https://en.wikipedia.org/wiki/Electroencephalography) data. They then used kernel support-vector-machines with different kernels that were trained by particle swarm optimization. The accuracy of their polynomial kernel (92.36 $\pm$ 0.94) was better than their linear (91.47 $\pm$ 1.02) anf radial basis function (86.71 $\pm$ 1.93) kernels. # # # 3) **Support vector machine-based classification of Alzheimer’s disease from whole-brain anatomical MRI.** [5] # # In this paper the authors propose a new method to discriminate patients with AD from elderly controls based on support vector machine (SVM) classification of whole-brain anatomical MRI. The authors used three-dimensional T1-weighted MRI images from 16 patients with AD and 22 elderly controls and parcellated them into regions of interests (ROIs). They then used a SVM algorithm to classify subjects based upon the gray matter characteristics of these ROIs. Based on their results the classifier obtained 94.5% mean correctness. # # The possible downfalls of their technique might be the fact that they haven't taken age related changes in the gray matter into account and they were working with a small data set. # # # We have described 3 papers over here that we found the most interesting, however there are a few more that have explored the same question. Regardless, it is worthwhile to mention that the above papers were exploring raw MRI data and we, on the other hand, are dealing with 3 to 4 biomarkers that are generated from MRI images. # + [markdown] _uuid="566105ab7a54c9420a4f649dea3fa1f52da1d79d" colab_type="text" id="XZY2wtKYtH3h" # # 4. EXPLORATORY DATA ANALYSIS (EDA) # --- # # In this section, we have focused on exploring the relationship between each feature of MRI tests and dementia of the patient. The reason we conducted this Exploratory Data Analysis process is to state the relationship of data explicitly through a graph so that we could assume the correlations before data extraction or data analysis. It might help us to understand the nature of the data and to select the appropriate analysis method for the model later. # # The minimum, maximum, and average values of each feature for graph implementation are as follows. # # ||Min|Max|Mean| # |--- # |Educ|6|23|14.6| # |SES|1|5|2.34 # |MMSE|17|30|27.2| # |CDR|0|1|0.29| # |eTIV|1123|1989|1490| # |nWBV|0.66|0.837|0.73| # |ASF|0.883|1.563|1.2| # + _uuid="6a720755860b07d7eb93e458306ec1b6fa079ed5" import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline sns.set() df = pd.read_csv('../input/oasis_longitudinal.csv') df.head() # + _uuid="08991ab4c6c3aef89ad366662aad97a846aa9d8f" df = df.loc[df['Visit']==1] # use first visit data only because of the analysis we're doing df = df.reset_index(drop=True) # reset index after filtering first visit data df['M/F'] = df['M/F'].replace(['F','M'], [0,1]) # M/F column df['Group'] = df['Group'].replace(['Converted'], ['Demented']) # Target variable df['Group'] = df['Group'].replace(['Demented', 'Nondemented'], [1,0]) # Target variable df = df.drop(['MRI ID', 'Visit', 'Hand'], axis=1) # Drop unnecessary columns # + _uuid="9175b97a0b02ff8ace2e3bc8a6327488b3232783" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 919, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 11761, "status": "error", "timestamp": 1512599691671, "user": {"displayName": "Saurin Parikh", "photoUrl": "//lh3.googleusercontent.com/-6RG-0wrBKjU/AAAAAAAAAAI/AAAAAAAABpU/h5Zwf5zd3tk/s50-c-k-no/photo.jpg", "userId": "104703813675171986785"}, "user_tz": 300} id="-4-ZVrHJslSF" outputId="5f5cc170-02d6-41d4-f5f2-3b575833b56b" # bar drawing function def bar_chart(feature): Demented = df[df['Group']==1][feature].value_counts() Nondemented = df[df['Group']==0][feature].value_counts() df_bar = pd.DataFrame([Demented,Nondemented]) df_bar.index = ['Demented','Nondemented'] df_bar.plot(kind='bar',stacked=True, figsize=(8,5)) # + _uuid="5f8599d4f70b78047011217e3bbcaa26311fddc6" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}]} colab_type="code" id="k-92eJ7pslSL" outputId="d9940867-44f6-4d3a-f6fb-ddef5630877e" # Gender and Group ( Femal=0, Male=1) bar_chart('M/F') plt.xlabel('Group') plt.ylabel('Number of patients') plt.legend() plt.title('Gender and Demented rate') # + [markdown] _uuid="46c3ccb6893e52a6636e9a764539a92084f90407" colab_type="text" id="9m16iCGyslSO" # The above graph indicates that men are more likely with dementia than women. # + _uuid="e1b8fbe86bbf469971aed915c2b8e9caa9cdbe53" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}]} colab_type="code" id="QfZMuTl7slSP" outputId="b3a6a8a7-5cd6-48eb-e631-20b8d30c3bad" #MMSE : Mini Mental State Examination # Nondemented = 0, Demented =1 # Nondemented has higher test result ranging from 25 to 30. #Min 17 ,MAX 30 facet= sns.FacetGrid(df,hue="Group", aspect=3) facet.map(sns.kdeplot,'MMSE',shade= True) facet.set(xlim=(0, df['MMSE'].max())) facet.add_legend() plt.xlim(15.30) # + [markdown] _uuid="8d01f7c6b66442f0b1eeb814464cec8654a24945" colab_type="text" id="WumyA7d6slSR" # The chart shows Nondemented group got much more higher MMSE scores than Demented group. # + _uuid="4dc483b489417595af5d23cf7a44de13c8ba371d" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}, {}, {}]} colab_type="code" id="JPuWkiWGslSS" outputId="4088e3c2-8493-41d2-c61a-c386e784fd37" #bar_chart('ASF') = Atlas Scaling Factor facet= sns.FacetGrid(df,hue="Group", aspect=3) facet.map(sns.kdeplot,'ASF',shade= True) facet.set(xlim=(0, df['ASF'].max())) facet.add_legend() plt.xlim(0.5, 2) #eTIV = Estimated Total Intracranial Volume facet= sns.FacetGrid(df,hue="Group", aspect=3) facet.map(sns.kdeplot,'eTIV',shade= True) facet.set(xlim=(0, df['eTIV'].max())) facet.add_legend() plt.xlim(900, 2100) #'nWBV' = Normalized Whole Brain Volume # Nondemented = 0, Demented =1 facet= sns.FacetGrid(df,hue="Group", aspect=3) facet.map(sns.kdeplot,'nWBV',shade= True) facet.set(xlim=(0, df['nWBV'].max())) facet.add_legend() plt.xlim(0.6,0.9) # + [markdown] _uuid="96f2d269e35927b57c5068b2b3dda2d145ae91f4" colab_type="text" id="1LC-PdJislSV" # The chart indicates that Nondemented group has higher brain volume ratio than Demented group. This is assumed to be because the diseases affect the brain to be shrinking its tissue. # + _uuid="87a846e38f21e28b3c7ad6bae536b7cd2c3e2466" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}]} colab_type="code" id="w6rN7jjSslSW" outputId="fbb23e63-a926-477c-c6ff-fc87933c1ea6" #AGE. Nondemented =0, Demented =0 facet= sns.FacetGrid(df,hue="Group", aspect=3) facet.map(sns.kdeplot,'Age',shade= True) facet.set(xlim=(0, df['Age'].max())) facet.add_legend() plt.xlim(50,100) # + [markdown] _uuid="e491fe3ded880c040a17cde07279b0474b00ff7a" colab_type="text" id="VlQHpQRWslSY" # There is a higher concentration of 70-80 years old in the Demented patient group than those in the nondemented patients. # We guess patients who suffered from that kind of disease has lower survival rate so that there are a few of 90 years old. # + _uuid="8177a32eff0fece26f2220ccc046843347afc41e" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}]} colab_type="code" id="w6rN7jjSslSW" outputId="fbb23e63-a926-477c-c6ff-fc87933c1ea6" #'EDUC' = Years of Education # Nondemented = 0, Demented =1 facet= sns.FacetGrid(df,hue="Group", aspect=3) facet.map(sns.kdeplot,'EDUC',shade= True) facet.set(xlim=(df['EDUC'].min(), df['EDUC'].max())) facet.add_legend() plt.ylim(0, 0.16) # + [markdown] _uuid="26e6c99d943c7e371e9fd7c3b962449996e743fd" colab_type="text" id="HGKJOeWYslSZ" # ## Intermediate Result Summary # 1. Men are more likely with demented, an Alzheimer's Disease, than Women. # 2. Demented patients were less educated in terms of years of education. # 3. Nondemented group has higher brain volume than Demented group. # 4. Higher concentration of 70-80 years old in Demented group than those in the nondemented patients. # + [markdown] _uuid="80a9f0ebb50abd31cfa14ed7d93204d00f313543" colab_type="text" id="dR7e2FEuScR8" # # 5. Data Preprocessing # --- # We identified 8 rows with missing values in SES column. We deal with this issue with 2 approaches. One is just to drop the rows with missing values. The other is to replace the missing values with the corresponing values, also known as 'Imputation'. Since we have only 150 data, I assume imputation would help the performance of our model. # + _uuid="c485e9bdba313a2897fa58aa61406f07c2206c27" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="crn5DxTUScSI" outputId="10613858-0403-4c42-aea7-7bc3635ef60e" # Check missing values by each column pd.isnull(df).sum() # The column, SES has 8 missing values # + [markdown] _uuid="10bb2143731149217bd59b8ea7fb2f12706dd355" colab_type="text" id="v96VUyPYScSL" # ## 5.A Removing rows with missing values # + _uuid="71f15a1cda750ae72a718d2ec0aa7939ac2b7855" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="NCuXVrJtScSM" outputId="29e022db-bb16-44f6-bfc7-3cd3fbb6f930" # Dropped the 8 rows with missing values in the column, SES df_dropna = df.dropna(axis=0, how='any') pd.isnull(df_dropna).sum() # + _uuid="a08f0ff032efa883e45e9b5bd2b6d386dd89f9b3" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="ThXMkCk0ScSQ" outputId="24c17bc4-f411-470d-e483-cd6098eff679" df_dropna['Group'].value_counts() # + [markdown] _uuid="9a81499adca6b212e798a6b9a123c5806d57e087" colab_type="text" id="A0ipe9qsScSU" # ## 5.B Imputation # # Scikit-learn provides package for imputation [6], but we do it manually. Since the *SES* is a discrete variable, we use median for the imputation. # + _uuid="1c2354281b56aaac81b5dd09cee531be85a72b1f" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="62IgZtwnScSV" outputId="a5e8f86e-66d6-4147-83f7-f26b12787245" # Draw scatter plot between EDUC and SES x = df['EDUC'] y = df['SES'] ses_not_null_index = y[~y.isnull()].index x = x[ses_not_null_index] y = y[ses_not_null_index] # Draw trend line in red z = np.polyfit(x, y, 1) p = np.poly1d(z) plt.plot(x, y, 'go', x, p(x), "r--") plt.xlabel('Education Level(EDUC)') plt.ylabel('Social Economic Status(SES)') plt.show() # + _uuid="d2fcc229a25ba9ad1097de001cd53505ad0ba478" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="cKaMRSvgScSY" outputId="f05f5530-b14c-4bb8-abbb-037e2a0838e3" df.groupby(['EDUC'])['SES'].median() # + _uuid="b41fdf3fc1f569f482e8a506f0737775e84b338a" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="dj_edAcdScSb" df["SES"].fillna(df.groupby("EDUC")["SES"].transform("median"), inplace=True) # + _uuid="63c36b7bc32f5cc65c8bf908113ba919a17fc5b8" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="Y4SgUM58ScSd" outputId="9325d3dc-7f01-4be2-a932-a39c68d202b7" # I confirm there're no more missing values and all the 150 data were used. pd.isnull(df['SES']).value_counts() # + [markdown] _uuid="61220f51b25e5efcc5ceaa0d8f133967514b33a5" # ## 5.C Splitting Train/Validation/Test Sets # + _uuid="b039c111537091add5e04716b4d33501855155e8" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="kJcRjpOIScSj" from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import cross_val_score # + _uuid="9c0cd7fedabdd26e4dbd2e5ed19a79c9097511c8" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="eI6EXWT7ScSm" # Dataset with imputation Y = df['Group'].values # Target for the model X = df[['M/F', 'Age', 'EDUC', 'SES', 'MMSE', 'eTIV', 'nWBV', 'ASF']] # Features we use # splitting into three sets X_trainval, X_test, Y_trainval, Y_test = train_test_split( X, Y, random_state=0) # Feature scaling scaler = MinMaxScaler().fit(X_trainval) X_trainval_scaled = scaler.transform(X_trainval) X_test_scaled = scaler.transform(X_test) # + _uuid="aa47c091672863c3ad5c785a65cf227977cb9c37" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="oQAwHijeScSq" # Dataset after dropping missing value rows Y = df_dropna['Group'].values # Target for the model X = df_dropna[['M/F', 'Age', 'EDUC', 'SES', 'MMSE', 'eTIV', 'nWBV', 'ASF']] # Features we use # splitting into three sets X_trainval_dna, X_test_dna, Y_trainval_dna, Y_test_dna = train_test_split( X, Y, random_state=0) # Feature scaling scaler = MinMaxScaler().fit(X_trainval_dna) X_trainval_scaled_dna = scaler.transform(X_trainval_dna) X_test_scaled_dna = scaler.transform(X_test_dna) # + [markdown] _uuid="b2356d2f3b9b746909c52e98c88c9dd9e20f77bf" colab_type="text" id="Hh0VrtWIScSt" # ## 5.D Cross-validation # We conduct 5-fold cross-validation to figure out the best parameters for each model, Logistic Regression, SVM, Decision Tree, Random Forests, and AdaBoost. Since our performance metric is accuracy, we find the best tuning parameters by *accuracy*. In the end, we compare the accuracy, recall and AUC for each model. # + [markdown] _uuid="27d4266a859c95510a8355df6c98c741a4805224" colab_type="text" id="vgYxX0OkScSj" # # 6. MODEL # --- # + [markdown] _uuid="3bef5c29ca278b2aee2e6d0786e78149ab1bd83d" colab_type="text" id="ZmzXtTaFScSs" # ## 6.A Performance Measures # # We use area under the receiver operating characteristic curve (AUC) as our main performance measure. We believe that in case of medical diagnostics for non-life threatening terminal diseases like most neurodegenerative diseases it is important to have a high true positive rate so that all patients with alzheimer's are identified as early as possible. But we also want to make sure that the false positive rate is as low as possible since we do not want to misdiagnose a healthy adult as demented and begin medical therapy. Hence AUC seemed like a ideal choice for a performance measure. # # We will also be looking at accuracy and recall for each model. # # In the figure below, you can think relevant elements as actually demented subjects. # Precision and Recall [12] # ![Precision and Recall](https://upload.wikimedia.org/wikipedia/commons/2/26/Precisionrecall.svg) # + [markdown] _uuid="52d9efbf3184f96ff3a0767e08d3d1e6f8ea1a73" colab_type="text" id="L0zOT1CAScSu" # ## 6.B Logistic Regression # The parameter C, inverse of regularization strength. # # Tuning range: [0.001, 0.1, 1, 10, 100] # + _uuid="f8dcc6798d6f0aa777fff85c880a47ec78ad9e5c" from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, roc_curve, auc # + _uuid="252a6294c3c8754c0e19660e5ad7c0af189fd8e5" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="nvBhRVT_ScSu" acc = [] # list to store all performance metric # + _uuid="cb9f7249fa6793ec6e339aeb9936c26fbdfe552d" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="xsWC0JpIScSw" outputId="f0c4357e-951e-4078-9356-f01f7326c056" # Dataset with imputation best_score=0 kfolds=5 # set the number of folds for c in [0.001, 0.1, 1, 10, 100]: logRegModel = LogisticRegression(C=c) # perform cross-validation scores = cross_val_score(logRegModel, X_trainval, Y_trainval, cv=kfolds, scoring='accuracy') # Get recall for each parameter setting # compute mean cross-validation accuracy score = np.mean(scores) # Find the best parameters and score if score > best_score: best_score = score best_parameters = c # rebuild a model on the combined training and validation set SelectedLogRegModel = LogisticRegression(C=best_parameters).fit(X_trainval_scaled, Y_trainval) test_score = SelectedLogRegModel.score(X_test_scaled, Y_test) PredictedOutput = SelectedLogRegModel.predict(X_test_scaled) test_recall = recall_score(Y_test, PredictedOutput, pos_label=1) fpr, tpr, thresholds = roc_curve(Y_test, PredictedOutput, pos_label=1) test_auc = auc(fpr, tpr) print("Best accuracy on validation set is:", best_score) print("Best parameter for regularization (C) is: ", best_parameters) print("Test accuracy with best C parameter is", test_score) print("Test recall with the best C parameter is", test_recall) print("Test AUC with the best C parameter is", test_auc) m = 'Logistic Regression (w/ imputation)' acc.append([m, test_score, test_recall, test_auc, fpr, tpr, thresholds]) # + _uuid="200d475f03c493ecc5de448537841771d0817eb9" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="Zb-VqkXUScSz" outputId="2f1858c0-335a-4720-dc18-bef6e3c10a9e" # Dataset after dropping missing value rows best_score=0 kfolds=5 # set the number of folds for c in [0.001, 0.1, 1, 10, 100]: logRegModel = LogisticRegression(C=c) # perform cross-validation scores = cross_val_score(logRegModel, X_trainval_scaled_dna, Y_trainval_dna, cv=kfolds, scoring='accuracy') # compute mean cross-validation accuracy score = np.mean(scores) # Find the best parameters and score if score > best_score: best_score = score best_parameters = c # rebuild a model on the combined training and validation set SelectedLogRegModel = LogisticRegression(C=best_parameters).fit(X_trainval_scaled_dna, Y_trainval_dna) test_score = SelectedLogRegModel.score(X_test_scaled_dna, Y_test_dna) PredictedOutput = SelectedLogRegModel.predict(X_test_scaled) test_recall = recall_score(Y_test, PredictedOutput, pos_label=1) fpr, tpr, thresholds = roc_curve(Y_test, PredictedOutput, pos_label=1) test_auc = auc(fpr, tpr) print("Best accuracy on validation set is:", best_score) print("Best parameter for regularization (C) is: ", best_parameters) print("Test accuracy with best C parameter is", test_score) print("Test recall with the best C parameter is", test_recall) print("Test AUC with the best C parameter is", test_auc) m = 'Logistic Regression (w/ dropna)' acc.append([m, test_score, test_recall, test_recall, fpr, tpr, thresholds]) # + [markdown] _uuid="9b30f505fa1febaa9b0577ef2943906046f77413" # In overall, dataset with imputation outperforms the one without imputation. For the later models, we use dataset without imputation. # + [markdown] _uuid="c3fcf0473df26fdea9ba51953aab1f53b2e522c5" colab_type="text" id="Gj3b-ssXScS2" # ## 6.C SVM # C: Penalty parameter C of the error term. [0.001, 0.01, 0.1, 1, 10, 100, 1000] # # gamma: kernel coefficient. [0.001, 0.01, 0.1, 1, 10, 100, 1000] # # kernel: kernel type. ['rbf', 'linear', 'poly', 'sigmoid'] # + _uuid="527bf4c3aaeb65eeb16f929c2c8c82ce4c2263c2" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="Xp5EM__NScS2" outputId="ac91ced2-9648-4248-8c62-6babfb401178" best_score = 0 for c_paramter in [0.001, 0.01, 0.1, 1, 10, 100, 1000]: #iterate over the values we need to try for the parameter C for gamma_paramter in [0.001, 0.01, 0.1, 1, 10, 100, 1000]: #iterate over the values we need to try for the parameter gamma for k_parameter in ['rbf', 'linear', 'poly', 'sigmoid']: # iterate over the values we need to try for the kernel parameter svmModel = SVC(kernel=k_parameter, C=c_paramter, gamma=gamma_paramter) #define the model # perform cross-validation scores = cross_val_score(svmModel, X_trainval_scaled, Y_trainval, cv=kfolds, scoring='accuracy') # the training set will be split internally into training and cross validation # compute mean cross-validation accuracy score = np.mean(scores) # if we got a better score, store the score and parameters if score > best_score: best_score = score #store the score best_parameter_c = c_paramter #store the parameter c best_parameter_gamma = gamma_paramter #store the parameter gamma best_parameter_k = k_parameter # rebuild a model with best parameters to get score SelectedSVMmodel = SVC(C=best_parameter_c, gamma=best_parameter_gamma, kernel=best_parameter_k).fit(X_trainval_scaled, Y_trainval) test_score = SelectedSVMmodel.score(X_test_scaled, Y_test) PredictedOutput = SelectedSVMmodel.predict(X_test_scaled) test_recall = recall_score(Y_test, PredictedOutput, pos_label=1) fpr, tpr, thresholds = roc_curve(Y_test, PredictedOutput, pos_label=1) test_auc = auc(fpr, tpr) print("Best accuracy on cross validation set is:", best_score) print("Best parameter for c is: ", best_parameter_c) print("Best parameter for gamma is: ", best_parameter_gamma) print("Best parameter for kernel is: ", best_parameter_k) print("Test accuracy with the best parameters is", test_score) print("Test recall with the best parameters is", test_recall) print("Test recall with the best parameter is", test_auc) m = 'SVM' acc.append([m, test_score, test_recall, test_auc, fpr, tpr, thresholds]) # + [markdown] _uuid="2a111665a3aeefa0449d6e9b86efb45e14e5996d" colab_type="text" id="mYGAer5hScS5" # ## 6.D Decision Tree # Maximum depth. [1, 2, ..., 8] # # 8 is the number of features # + _uuid="4c27de17e77f9b986e27d2090113bce38bd67fba" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="jGI1Smg7ScS6" outputId="86ea4ca6-b57f-47c0-fc4c-a958e1da8f95" best_score = 0 for md in range(1, 9): # iterate different maximum depth values # train the model treeModel = DecisionTreeClassifier(random_state=0, max_depth=md, criterion='gini') # perform cross-validation scores = cross_val_score(treeModel, X_trainval_scaled, Y_trainval, cv=kfolds, scoring='accuracy') # compute mean cross-validation accuracy score = np.mean(scores) # if we got a better score, store the score and parameters if score > best_score: best_score = score best_parameter = md # Rebuild a model on the combined training and validation set SelectedDTModel = DecisionTreeClassifier(max_depth=best_parameter).fit(X_trainval_scaled, Y_trainval ) test_score = SelectedDTModel.score(X_test_scaled, Y_test) PredictedOutput = SelectedDTModel.predict(X_test_scaled) test_recall = recall_score(Y_test, PredictedOutput, pos_label=1) fpr, tpr, thresholds = roc_curve(Y_test, PredictedOutput, pos_label=1) test_auc = auc(fpr, tpr) print("Best accuracy on validation set is:", best_score) print("Best parameter for the maximum depth is: ", best_parameter) print("Test accuracy with best parameter is ", test_score) print("Test recall with best parameters is ", test_recall) print("Test AUC with the best parameter is ", test_auc) m = 'Decision Tree' acc.append([m, test_score, test_recall, test_auc, fpr, tpr, thresholds]) # + _uuid="802da5f22fbe73ff4d4ff097408da95100fababe" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}]} colab_type="code" id="3k1LzTAOScS9" outputId="c4d23b31-7cde-459d-c745-df64c9716cad" print("Feature importance: ") np.array([X.columns.values.tolist(), list(SelectedDTModel.feature_importances_)]).T # + _uuid="9df711a59c1470af47f2c5a83f2e9cb11dcb00c7" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="jAXEhs2gScS_" outputId="8fccfeea-ec7f-4bad-9029-0b66b4aa09f7" from sklearn.tree import export_graphviz import graphviz dot_data=export_graphviz(SelectedDTModel, feature_names=X_trainval.columns.values.tolist(),out_file=None) graph = graphviz.Source(dot_data) graph # + [markdown] _uuid="20608c740de79f3ea76598be6c3e3e8fd158fd82" colab_type="text" id="ZkIF7600ScTD" # ## 6.E Random Forest Classifier # n_estimators(M): the number of trees in the forest # # max_features(d): the number of features to consider when looking for the best split # # max_depth(m): the maximum depth of the tree. # + _uuid="0314f070def4da297b2b8712cea7727f6d5d8112" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}]} colab_type="code" id="zi0Ssns3ScTE" outputId="e7adb6b8-ccfd-47b4-f65c-01848c1a0450" best_score = 0 for M in range(2, 15, 2): # combines M trees for d in range(1, 9): # maximum number of features considered at each split for m in range(1, 9): # maximum depth of the tree # train the model # n_jobs(4) is the number of parallel computing forestModel = RandomForestClassifier(n_estimators=M, max_features=d, n_jobs=4, max_depth=m, random_state=0) # perform cross-validation scores = cross_val_score(forestModel, X_trainval_scaled, Y_trainval, cv=kfolds, scoring='accuracy') # compute mean cross-validation accuracy score = np.mean(scores) # if we got a better score, store the score and parameters if score > best_score: best_score = score best_M = M best_d = d best_m = m # Rebuild a model on the combined training and validation set SelectedRFModel = RandomForestClassifier(n_estimators=M, max_features=d, max_depth=m, random_state=0).fit(X_trainval_scaled, Y_trainval ) PredictedOutput = SelectedRFModel.predict(X_test_scaled) test_score = SelectedRFModel.score(X_test_scaled, Y_test) test_recall = recall_score(Y_test, PredictedOutput, pos_label=1) fpr, tpr, thresholds = roc_curve(Y_test, PredictedOutput, pos_label=1) test_auc = auc(fpr, tpr) print("Best accuracy on validation set is:", best_score) print("Best parameters of M, d, m are: ", best_M, best_d, best_m) print("Test accuracy with the best parameters is", test_score) print("Test recall with the best parameters is:", test_recall) print("Test AUC with the best parameters is:", test_auc) m = 'Random Forest' acc.append([m, test_score, test_recall, test_auc, fpr, tpr, thresholds]) # + _uuid="7264a60baa490343e92e743d43f635a18af9fa13" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{}, {}]} colab_type="code" id="Mcx6LmzcScTJ" outputId="f7ba2e6f-1a5b-4495-a9b1-2a0973101259" print("Feature importance: ") np.array([X.columns.values.tolist(), list(SelectedRFModel.feature_importances_)]).T # + [markdown] _uuid="ea3b9dd38fd2bbfe32e1cdd35ed6a3cdfa2dbccc" # ## 6.F AdaBoost # + _uuid="9eab03ab2cd59e507dc30b632c60fb0aeb1d2c37" best_score = 0 for M in range(2, 15, 2): # combines M trees for lr in [0.0001, 0.001, 0.01, 0.1, 1]: # train the model boostModel = AdaBoostClassifier(n_estimators=M, learning_rate=lr, random_state=0) # perform cross-validation scores = cross_val_score(boostModel, X_trainval_scaled, Y_trainval, cv=kfolds, scoring='accuracy') # compute mean cross-validation accuracy score = np.mean(scores) # if we got a better score, store the score and parameters if score > best_score: best_score = score best_M = M best_lr = lr # Rebuild a model on the combined training and validation set SelectedBoostModel = AdaBoostClassifier(n_estimators=M, learning_rate=lr, random_state=0).fit(X_trainval_scaled, Y_trainval ) PredictedOutput = SelectedBoostModel.predict(X_test_scaled) test_score = SelectedRFModel.score(X_test_scaled, Y_test) test_recall = recall_score(Y_test, PredictedOutput, pos_label=1) fpr, tpr, thresholds = roc_curve(Y_test, PredictedOutput, pos_label=1) test_auc = auc(fpr, tpr) print("Best accuracy on validation set is:", best_score) print("Best parameter of M is: ", best_M) print("best parameter of LR is: ", best_lr) print("Test accuracy with the best parameter is", test_score) print("Test recall with the best parameters is:", test_recall) print("Test AUC with the best parameters is:", test_auc) m = 'AdaBoost' acc.append([m, test_score, test_recall, test_auc, fpr, tpr, thresholds]) # + _uuid="a240cd4c2abd5b9bb46c10b21d157e663665a2bb" print("Feature importance: ") np.array([X.columns.values.tolist(), list(SelectedBoostModel.feature_importances_)]).T # + [markdown] _uuid="5defe4c299e6a47547b1d05a533c90aba26c0365" colab_type="text" id="w1vFrwcwScTO" # # 7. CONCLUSION # # ## 7.A RESULTS # + _uuid="567d27a76de89fd5c9feb715bfc0e58400872f7e" # Performance Metric for each model result = pd.DataFrame(acc, columns=['Model', 'Accuracy', 'Recall', 'AUC', 'FPR', 'TPR', 'TH']) result[['Model', 'Accuracy', 'Recall', 'AUC']] # + [markdown] _uuid="97515e1017176438f0b63b12ef68038a396a0778" # #### Below is a comparision of our results with those from the papers that were listed previously: # # |Sr.No.|Paper|Data|Model|Results| # |--- # |1.|E. Moradi et al. [3]|Ye et al. [7]|Random Forrest Classifier|AUC = 71.0%|ACC = 55.3%| # |||Filipovych et al. [8]|Random Forrest Classifier|AUC = 61.0%|ACC = N/A| # |||Zhang et al. [9]|Random Forrest Classifier|AUC = 94.6%|ACC = N/A| # |||Batmanghelich et al. [10]|Random Forrest Classifier|AUC = 61.5%|ACC = N/A| # |2.|Zhang et al. [4]|Ardekani et al. [11]|Support Vector Machine ||| # ||||*polynomial kernel*|AUC = N/A|ACC = 92.4%| # ||||*linear kernel*|AUC = N/A|ACC = 91.5%| # ||||*radial basis function*|AUC = N/A|ACC = 86.7%| # |3.|Hyun, Kyuri, Saurin|Marcus et al. [1]| Logistic Regression (w/ imputation)|AUC = 79.2%|ACC = 78.9%| # ||||Logistic Regression (w/ dropna)|AUC = 70.0%|ACC = 75.0%| # ||||Support Vector Machine|AUC = 82.2%|ACC = 81.6%| # ||||Decision Tree Classifier|AUC = 82.5%|ACC = 81.6%| # ||||Random Forest Classifier|AUC = 84.4%|ACC = 84.2%| # ||||AdaBoost|AUC = 82.5%|ACC = 84.2%| # # It can be noticed that our results are comparable and in certain cases better than those from the previous work. Our Random Forest Classifier was one of the best performing model. # + [markdown] _uuid="d8e735911083847bdff95aac9cf9d76c0cbd730e" colab_type="text" id="30ETMyJAszMY" # ## 7.B UNIQUE APPROACH # The uniqueness of our approach is the fact that we would be including metrices like MMSE and Education also in our model to train it to differentiate between normal healthy adults and those with Alzheimer's. MMSE is one of the gold standards for determining dementia and hence we think it is an important feature to include. # # The same fact also make our approach flexible enough to be applied to other neurodegenerative diseases which are diagnosed using a combination of MRI features and cognitive tests. # # ## 7.C IMPLEMENTATION # The teams' primary intention was to explore how machine learning can make a difference in the clinical environment. For that we have developed a web program using our algorithm which can be used anyone regardless of their programming experience. By using a [CGI module](https://docs.python.org/2/library/cgi.html) we want to make everyone take advantage of your effort! # # # #### Common Gateway Interface (CGI) # * CGI can be used make a webserver execute your model. # * The input arguments for your web program should be the same as the parameters you used to train your model. # * The idea is that a Clinician should be able to input MRI results, biographic data and other parameters for a patient. Your model should assist them in identifying dementia. # # # #### Here is the [screenshot](https://pitt.box.com/s/a0wvujqqbbtt97qri1pqo06mw28rwjlb) of our web application # # # ## 7.D LIMITATIONS # There are limitations in implementing a complex model because of the quantity of the dataset. Even though the nature of each feature is evident, the ranges of each group's test value are not classified well. In other words, we should have identified more clearly the differences in the variables which might have played a role in the result.The predicted value using the random forest model is higher than the other models. It implies there is a potential for higher prediction rate if we pay more attention to develop the data cleaning and analysis process. Moreover, the perfect recall score 1.0 of SVM 1.0. Indicates that the quality and accuracy of the classification might decrease dramatically when we use different dataset. # # # ## 7.E FURTHER RESEARCH # The main takeaway for us is that there are several key factors which are caused by Dementia and we should continue to check it and clear the process in different ways.For the further study, it is necessary for us to improve our understanding through more sophisticated EDA process with a larger sample size. For instance, we would try not only the age itself but also group it into generation, or grade volume of brain tissue or exam scores. If the results from this process are reflected in the data cleaning process and positively affect the decision making of the model, the accuracy of the prediction model can be further improved.
39,696
/BaseModel (1).ipynb
cbeb2a3916768f625411802d0a279d132d536ecb
[]
no_license
nehachn/IDS-based-on-network-flow
https://github.com/nehachn/IDS-based-on-network-flow
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
86,772
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="H8hl7E1P5Nub" # read & manipulate data import pandas as pd import numpy as np import tensorflow as tf from collections import Counter # visualisations import matplotlib.pyplot as plt import seaborn as sns sns.set(style='whitegrid', context='notebook') # %matplotlib notebook #mic import time # + id="aar9u3yCR8uU" df = pd.read_csv("/content/drive/MyDrive/cicids2017 dataset/all_data.csv") # + [markdown] id="Gs8JI3JVqvYj" # #Train/test split # + colab={"base_uri": "https://localhost:8080/"} id="CsnHAp655JlX" outputId="e7fc959b-89cd-4839-fd27-b6a24bc1e3b8" attack = df[df.anomaly == 1] normal = df[df.anomaly == 0] attack = attack.drop('Label', axis=1) normal = normal.drop('Label', axis=1) print(f"""Shape of the datasets: clean (rows, cols) = {normal.shape} fraud (rows, cols) = {attack.shape}""") # + colab={"base_uri": "https://localhost:8080/"} id="KGPttVjnq3Tp" outputId="26edd894-266b-4fa0-c15d-91e8e47d57b4" # shuffle our training set normal = normal.sample(frac=1).reset_index(drop=True) #TRAINING_SAMPLE= math.ceil(normal.shape[0]*(20/100)) TRAINING_SAMPLE = normal.shape[0] - attack.shape[0] print(f"""training sample size: {TRAINING_SAMPLE} and normal case size: {normal.shape[0]} left out normals: {normal.shape[0]-TRAINING_SAMPLE}""") # training set: exlusively normal samples X_train_all = normal.iloc[:TRAINING_SAMPLE] # testing set: the remaining normal + all the attack samples X_test_normal = normal.iloc[TRAINING_SAMPLE:] X_test_attack = attack X_test = normal.iloc[TRAINING_SAMPLE:].append(attack).sample(frac=1) print(f"""Our testing set is composed as follows: {X_test.anomaly.value_counts()}""") print(f"""Class ratio in testing set is composed as follows: {X_test.anomaly.value_counts(normalize=True)*100}""") # + colab={"base_uri": "https://localhost:8080/"} id="rOfcO6YurcPH" outputId="9986e3b1-0517-42c0-c133-b3fcac90e172" print(f"""Shape of the datasets: training (rows, cols) = {X_train_all.shape} test normal (rows, cols) = {X_test_normal.shape} test attack (rows, cols) = {X_test_attack.shape}""") # + id="ACIoNFLOtDER" train_features_set = X_train_all[X_train_all.columns.difference(['anomaly'])].sample(frac=.01, random_state=123) test_features_benign = X_test_normal[X_test_normal.columns.difference(['anomaly'])].sample(frac=.01, random_state=123) test_features_malic = X_test_attack[X_test_attack.columns.difference(['anomaly'])].sample(frac=.01, random_state=123) # + colab={"base_uri": "https://localhost:8080/"} id="87UOLkCAFCIB" outputId="80b362bf-a1cb-437e-9709-91167af718cf" print(f"""Shape of the datasets: training (rows, cols) = {train_features_set.shape} test normal (rows, cols) = {test_features_benign.shape} test attack (rows, cols) = {test_features_malic.shape}""") # + [markdown] id="SdelGE9ouF05" # #One Class SVM # + id="l2011kwkuOZv" from sklearn import svm # + colab={"base_uri": "https://localhost:8080/"} id="HfO-VRTQuE_t" outputId="5da543f1-ade2-4e2f-bda9-3f9fe676b05f" start = time.time() # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma='scale') clf.fit(train_features_set) print(time.time() - start) # + id="vPAfz_56MppZ" dataset_name = 'OCSVMcicids2017' # + [markdown] id="kexCg_6QyJBr" # #save the model # + id="uuLYuUVuxwm9" colab={"base_uri": "https://localhost:8080/"} outputId="97bd12fe-7e20-44d9-8f58-d57837c3c1ba" from sklearn.externals import joblib # + id="SDraK5vqxwvA" colab={"base_uri": "https://localhost:8080/"} outputId="a5958242-973d-45d0-ca39-5e55bb6e687d" joblib.dump(clf,"/content/drive/MyDrive/cicids2017 dataset/%s.pkl"%dataset_name) # + [markdown] id="ntAsF_rTMcS3" # #Loading saved model # + id="lPxkNsZtManO" clf = joblib.load("/content/drive/MyDrive/cicids2017 dataset/%s.pkl"%dataset_name) # + [markdown] id="EMyZv7TkLybf" # #Testing # + id="O82Ke8ETubUS" y_pred_train = clf.predict(train_features_set) y_pred_test = clf.predict(test_features_benign) y_pred_outliers = clf.predict(test_features_malic) n_error_train = y_pred_train[y_pred_train == -1].size n_error_test = y_pred_test[y_pred_test == -1].size n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size # + [markdown] id="GgAeuw9J21PW" # #Results # + id="S-SLhXX5E394" y_test_all = np.append(np.ones(len(test_features_benign)), np.ones(len(test_features_malic))*-1) # + id="nQIdtGhHF1ec" y_pred_all = np.append(y_pred_test,y_pred_outliers) # + colab={"base_uri": "https://localhost:8080/"} id="KoAANHlH204C" outputId="85b75d95-6239-41ec-8815-1c8011b870f1" from sklearn.metrics import classification_report print(classification_report(y_test_all,y_pred_all)) # + [markdown] id="f0VEJ-XQWJzW" # #AUC curves # + colab={"base_uri": "https://localhost:8080/", "height": 485} id="nfeNcdU9WMxR" outputId="9a7291ff-ba48-4fe8-b982-05a39bbbc914" from sklearn.metrics import roc_auc_score, roc_curve fpr, tpr, thresholds = roc_curve(y_test_all, y_pred_all) auc_score = roc_auc_score(y_test_all, y_pred_all) fig, ax1 = plt.subplots(1, 1, figsize = (8, 8)) ax1.plot(fpr, tpr, 'b.-', label = 'ROC Curve (%2.2f)' % auc_score) ax1.plot(fpr, fpr, 'k-', label = 'Random Guessing') ax1.legend(); # + [markdown] id="aYIOsDSvxAe3" # #Isolation Forest # + id="RsBh9S5gxCwY" from sklearn.ensemble import IsolationForest # + colab={"base_uri": "https://localhost:8080/"} id="gWOJqCY0h29y" outputId="bb1df563-4363-4646-9233-1e68f25ef23d" start = time.time() rng = np.random.RandomState(42) # fit the model clf = IsolationForest(behaviour='new', max_samples=1000, random_state=rng, contamination=0.01) clf.fit(train_features_set) print(time.time() - start) # + id="QlSLFVX8h6kw" y_pred_train = clf.predict(train_features_set) y_pred_test = clf.predict(test_features_benign) y_pred_outliers = clf.predict(test_features_malic) n_error_train = y_pred_train[y_pred_train == -1].size n_error_test = y_pred_test[y_pred_test == -1].size n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size # + [markdown] id="qfqH4GsciEaU" # #Results # + id="nC6KH30diEaU" y_test_all = np.append(np.ones(len(test_features_benign)), np.ones(len(test_features_malic))*-1) # + id="4Eajcl39iEaU" y_pred_all = np.append(y_pred_test,y_pred_outliers) # + colab={"base_uri": "https://localhost:8080/"} id="OJAN837JiEaV" outputId="64f5c2e0-e30b-4d7e-ada1-578c05563159" #from sklearn.metrics import classification_report print(classification_report(y_test_all,y_pred_all)) # + [markdown] id="7g4TCou6iG5N" # #AUC curves # + colab={"base_uri": "https://localhost:8080/", "height": 485} id="lGKrlys0iG5O" outputId="98585077-d0b6-4fe8-a2c1-1877d8f17d0c" #from sklearn.metrics import roc_auc_score, roc_curve fpr, tpr, thresholds = roc_curve(y_test_all, y_pred_all) auc_score = roc_auc_score(y_test_all, y_pred_all) fig, ax1 = plt.subplots(1, 1, figsize = (8, 8)) ax1.plot(fpr, tpr, 'b.-', label = 'ROC Curve (%2.2f)' % auc_score) ax1.plot(fpr, fpr, 'k-', label = 'Random Guessing') ax1.legend();
7,210
/Python_Jupyter_Study_Note.ipynb
bd9020b220c880adf5c51e02bb7de776bd740670
[]
no_license
yhf187/Python_Tools
https://github.com/yhf187/Python_Tools
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
8,290
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from __future__ import division import matplotlib.pyplot as plt import numpy as np import pandas as pd from collections import OrderedDict from lecroyutil.database import DatabaseAccess import re # import ipdb import scipy.signal as sig # %matplotlib notebook from yarmouth_calsoft_analysis import * from lecroyutil.bokeh_figure import BokehFigure, show_tabbed, setup_output_file pd.options.display.max_rows=1000 pd.options.display.max_columns=50 from lecroyanalysis import side_legend % cd Summary/ # # General df and pt generating and plotting template # + filenames = [#"LECROY,Janus,Yarmouth-Open-1,0.8.6_2019-05-13 17,54,09.177000_reshaped.txt", #"LECROY,Janus,SCOPE-SERIAL,0.8.6_2019-05-15 15,14,53.757000.txt", #"LECROY,Janus,SCOPE-SERIAL,0.8.6_10.30.7.61_2019-05-15 18,20,26.165000.txt", "LECROY,Janus,SCOPE-SERIAL,0.8.6_10.30.7.63_2019-05-15 18,44,51.477000.txt" ] # %matplotlib notebook def feedIntoOnedf(filenames): dfs = [] for i, f in enumerate(filenames): # des = f.split(',')[2:4] dftemp = pd.read_csv(f, sep='\t', index_col=False) #具体问题具体分析 dftemp['run'] = i dfs.append(dftemp) df = pd.concat(dfs) #This is to convert discrete channels columns to a single long column in order to set as pivot_table easier #I have converted the nasty form to normal, no need the following conversion anymore # df=pd.melt(df, id_vars=['vDiv', 'tDiv', 'Coupling', 'run'], value_vars=['C1','C2','C3','C4','C5','C6','C7','C8']) # df.columns = ['vDiv', 'tDiv', 'Coupling', 'Channel', 'value', 'run'] return df df = feedIntoOnedf(filenames) # df1 = pd.read_csv("LECROY,Janus,Yarmouth-Open-1,0.8.6_2019-05-14 11,12,39.647000.txt", sep='\t', index_col=False) # df1['run'] = 1 # df = pd.concat((df,df1)) #This is for Coupling filtering pt = pd.pivot_table(df, index='vDiv', values = 'value', columns = ['run', 'Coupling', 'Channel', 'tDiv']) # values = pt.loc[:, pt[1].columns.get_level_values(1).isin(["C1"])] values = pt plt.figure(figsize=(8,6)) for c in values.columns: # if c[0] != 'DC50': continue freqs = values[c].dropna().index value = values[c].dropna() plt.plot(freqs, value, '.-') plt.title("SNR_with_sdev") plt.xlabel("vDiv(mV)") plt.ylabel("SNR dB") plt.xscale('log') plt.grid() # plt.legend(prop=fontP) plt.legend() side_legend() # - # # Some tricks to play with pivot table # + #pt.loc[:, pt.columns.get_level_values(1).isin([?]) & pt.columns.get_level_values(2).isin([?])...] # 能实现诸如excel一样地筛选出不同大类当中挑选特定几种小类 # 在之前的例子中, pivot table的大类顺序为:['run', 'Coupling', 'Channel', 'tDiv'], 若想绕开'run' 直接筛选‘Coupling'的entries, # 就可以用 pt.columns.get_level_values(1).isin(['DC50', 'DC1M']) # 筛选出大类之间交集只需在各条件之间加 & 即可,见第一行 values = pt.loc[:, pt.columns.get_level_values(2).isin(["C1"]) & pt.columns.get_level_values(1).isin(['DC50'])] #To simplify the process, I made a function: def ptFilter(pt, filts): ''' pt: the pivot table to be filterd out filts: the filter that contains filtering conditions. FORMAT: dict(list). exp: {'CH': ['1'], 'Coupling':['DC50']} return: the filtered out pivottable. ''' vals = pt columns = pt.columns.names for k in filts: idx = columns.index(k) vals = vals.loc[:,vals.columns.get_level_values(idx).isin(filts[k])] return vals # - # # Some Good-to-know Useful functions # pd.melt: 将能归为一类的columns从横向压缩成纵向一例column, 便于后续pivot_table分类 fname = "LECROY,Janus,Yarmouth-Open-1,0.8.6_2019-05-13 17,54,09.177000.txt" df = pd.read_csv(fname, sep='\t', index_col=False) print(df.columns) df=pd.melt(df, id_vars=['vDiv', 'tDiv', 'Coupling'], value_vars=['C1','C2','C3','C4','C5','C6','C7','C8']) df.columns = ['vDiv', 'tDiv', 'Coupling', 'Channel', 'value'] print(df.columns) # df # # Play with the nasty legend #和legend的struggle还在继续,这里仅陈述已经发现的一些方法 #precisely control the legend location: #paste the follwing line to section 1, replace the original plt.legend(), you will see how this "loc" and "bbox_to_anchor" control the legend location plt.legend(loc='lower left', bbox_to_anchor=(-0.1, 0)) f.iat[index, 0], img0, self.direc_df.iat[index, 1], img1, dummy) def __len__(self): return len(self.direc_df) def get_label_file(self): print('label:', self.label_path) return self.label_path def data_loader(root, phase='train', batch_size=64,repeat_num=10): if phase == 'train': shuffle = True else: shuffle = False trfs = transforms.Compose([ transforms.Resize((112,112)), # transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), ]) dataset = CustomDataset(root, phase, repeat_num, trfs) dataloader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle) return dataloader, dataset.get_label_file() device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # + # for reproducibility np.random.seed(777) random.seed(777) torch.manual_seed(777) torch.cuda.manual_seed_all(777) DATASET_PATH = os.path.join('../data/03_face_verification_angle/') print(os.path.isdir(DATASET_PATH)) batch = 64 # - # get data loader train_dataloader, _ = data_loader(root=DATASET_PATH, phase='train', batch_size=batch) for iter_, data in enumerate(train_dataloader, 0): iter1_, img0, img0_id, iter2_, img1, img1_id, label = data print(img0_id, img1_id) img0, img1, label = img0.to(device), img1.to(device), label.to(device) #optimizer.zero_grad() #output1 = model(img0) #thetas1 = head(output1, img0_id ) #output2 = model(img1) #thetas2 = head(output2, img1_id) # Get the number of states S = len(locations) # Initialise the transition matrix T = np.zeros((S,S,4)) for action in range(4): for effect in range(4): # Randomize the outcome of taking an action outcome = (action+effect+1) % 4 if outcome == 0: outcome = 3 else: outcome -= 1 # Fill the transition matrix prob = self.action_randomizing_array[effect] for prior_state in range(S): post_state = neighbours[prior_state, outcome] post_state = int(post_state) T[post_state,prior_state,action] = T[post_state,prior_state,action]+prob # Build the reward matrix R = self.default_reward*np.ones((S,S,4)) for i, sr in enumerate(self.special_rewards): post_state = self.loc_to_state(self.absorbing_locs[i],locations) R[post_state,:,:]= sr return S, T,R,absorbing,locations def get_topology(self): height = self.shape[0] width = self.shape[1] index = 1 locs = [] neighbour_locs = [] for i in range(height): for j in range(width): # Get the locaiton of each state loc = (i,j) #And append it to the valid state locations if it is a valid state (ie not absorbing) if(self.is_location(loc)): locs.append(loc) # Get an array with the neighbours of each state, in terms of locations local_neighbours = [self.get_neighbour(loc,direction) for direction in ['nr','ea','so', 'we']] neighbour_locs.append(local_neighbours) # translate neighbour lists from locations to states num_states = len(locs) state_neighbours = np.zeros((num_states,4)) for state in range(num_states): for direction in range(4): # Find neighbour location nloc = neighbour_locs[state][direction] # Turn location into a state number nstate = self.loc_to_state(nloc,locs) # Insert into neighbour matrix state_neighbours[state,direction] = nstate; # Translate absorbing locations into absorbing state indices absorbing = np.zeros((1,num_states)) for a in self.absorbing_locs: absorbing_state = self.loc_to_state(a,locs) absorbing[0,absorbing_state] =1 return locs, state_neighbours, absorbing def loc_to_state(self,loc,locs): #takes list of locations and gives index corresponding to input loc return locs.index(tuple(loc)) def is_location(self, loc): # It is a valid location if it is in grid and not obstacle if(loc[0]<0 or loc[1]<0 or loc[0]>self.shape[0]-1 or loc[1]>self.shape[1]-1): return False elif(loc in self.obstacle_locs): return False else: return True def get_neighbour(self,loc,direction): #Find the valid neighbours (ie that are in the grif and not obstacle) i = loc[0] j = loc[1] nr = (i-1,j) ea = (i,j+1) so = (i+1,j) we = (i,j-1) # If the neighbour is a valid location, accept it, otherwise, stay put if(direction == 'nr' and self.is_location(nr)): return nr elif(direction == 'ea' and self.is_location(ea)): return ea elif(direction == 'so' and self.is_location(so)): return so elif(direction == 'we' and self.is_location(we)): return we else: #default is to return to the same location return loc ########################################### # + grid = GridWorld() ### Question 1 : Change the policy here: Policy= np.zeros((grid.state_size, grid.action_size)) print("The Policy is : {}".format(Policy)) val = 0 #Change here! print("The value of that policy is : {}".format(val)) # - # Using draw_deterministic_policy to illustrate some arbitracy policy. Policy2 = np.zeros(22).astype(int) Policy2[2] = 3 Policy2[6] = 2 Policy2[18] = 1 grid.draw_deterministic_policy(Policy2)
10,757
/source/notebooks/learned_cn_exploration.ipynb
eb1b1d1fd0cc0f4c4f17ca79edf478a32636a45a
[]
no_license
jasonjewik/dpp-experiments
https://github.com/jasonjewik/dpp-experiments
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,388
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import pandas as pd import numpy as np # # Name file directory path file_dir = 'C:/Users/TRAN/Desktop/Movies-ETL' # # Open wiki movies and check through data with open(f'{file_dir}/wikipedia.movies.json', mode='r') as file: wiki_movies_raw = json.load(file) len(wiki_movies_raw) # First 5 records wiki_movies_raw[:5] # Last 5 records wiki_movies_raw[-5:] # Some records in the middle wiki_movies_raw[3600:3605] # # Download from kaggle and load to DataFrame kaggle_metadata = pd.read_csv(f'{file_dir}/movies_metadata.csv', low_memory=False) ratings = pd.read_csv(f'{file_dir}/ratings.csv') # Sample kaggle_metadata.sample(5) ratings.sample(5) # # Create DataFrame for wiki_movies wiki_movies_df = pd.DataFrame(wiki_movies_raw) wiki_movies_df.head() wiki_movies_df.columns.tolist() # # Filter shows out of list wiki_movies = [movie for movie in wiki_movies_raw if ('Director' in movie or 'Directed by' in movie) and 'imdb_link' in movie and 'No. of episodes' not in movie] len(wiki_movies) # # To look at other titles - alternative tiltes wiki_movies_df[wiki_movies_df['Arabic'].notnull()] wiki_movies_df[wiki_movies_df['Arabic'].notnull()]['url'] # # Sort columns by ABC sorted(wiki_movies_df.columns.tolist()) # # Step 1: Make an empty dict to hold all of the alternative titles. # # Step 2: Loop through a list of all alternative title keys. # ## Step 2a: Check if the current key exists in the movie object. # ## Step 2b: If so, remove the key-value pair and add to the alternative titles dictionary. # # Step 3: After looping through every key, add the alternative titles dict to the movie object. def clean_movie(movie): movie = dict(movie) #create a non-destructive copy alt_titles = {} for key in ['Also known as','Arabic','Cantonese','Chinese','French', 'Hangul','Hebrew','Hepburn','Japanese','Literally', 'Mandarin','McCune–Reischauer','Original title','Polish', 'Revised Romanization','Romanized','Russian', 'Simplified','Traditional','Yiddish']: if key in movie: alt_titles[key] = movie[key] movie.pop(key) if len(alt_titles) > 0: movie['alt_titles'] = alt_titles # merge column names def change_column_name(old_name, new_name): if old_name in movie: movie[new_name] = movie.pop(old_name) change_column_name('Adaptation by', 'Writer(s)') change_column_name('Country of origin', 'Country') change_column_name('Directed by', 'Director') change_column_name('Distributed by', 'Distributor') change_column_name('Edited by', 'Editor(s)') change_column_name('Length', 'Running time') change_column_name('Original release', 'Release date') change_column_name('Music by', 'Composer(s)') change_column_name('Produced by', 'Producer(s)') change_column_name('Producer', 'Producer(s)') change_column_name('Productioncompanies ', 'Production company(s)') change_column_name('Productioncompany ', 'Production company(s)') change_column_name('Released', 'Release Date') change_column_name('Release Date', 'Release date') change_column_name('Screen story by', 'Writer(s)') change_column_name('Screenplay by', 'Writer(s)') change_column_name('Story by', 'Writer(s)') change_column_name('Theme music composer', 'Composer(s)') change_column_name('Written by', 'Writer(s)') return movie clean_movies = [clean_movie(movie) for movie in wiki_movies] wiki_movies_df = pd.DataFrame(clean_movies) sorted(wiki_movies_df.columns.tolist()) # # Extract imdb_id from link wiki_movies_df['imdb_id'] = wiki_movies_df['imdb_link'].str.extract(r'(tt\d{7})') print(len(wiki_movies_df)) wiki_movies_df.drop_duplicates(subset='imdb_id', inplace=True) print(len(wiki_movies_df)) wiki_movies_df.head() # # Remove Mostly Null Columns # ## Count null values [[column,wiki_movies_df[column].isnull().sum()] for column in wiki_movies_df.columns] # # Make a list of columns that have less than 90% null values [column for column in wiki_movies_df.columns if wiki_movies_df[column].isnull().sum() < len(wiki_movies_df) * 0.9] wiki_columns_to_keep = [column for column in wiki_movies_df.columns if wiki_movies_df[column].isnull().sum() < len(wiki_movies_df) * 0.9] wiki_movies_df = wiki_movies_df[wiki_columns_to_keep] wiki_movies_df.head() # # Check data types wiki_movies_df.dtypes # # Convert data types box_office = wiki_movies_df['Box office'].dropna()
4,832
/Bike_Share_Analysis.ipynb
a8437665129b76303755327973a325219db1342a
[]
no_license
Ziyad-Alotaibi/Bikeshare
https://github.com/Ziyad-Alotaibi/Bikeshare
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
107,312
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 2016 US Bike Share Activity Snapshot # # ## Table of Contents # - [Introduction](#intro) # - [Posing Questions](#pose_questions) # - [Data Collection and Wrangling](#wrangling) # - [Condensing the Trip Data](#condensing) # - [Exploratory Data Analysis](#eda) # - [Statistics](#statistics) # - [Visualizations](#visualizations) # - [Performing Your Own Analysis](#eda_continued) # - [Conclusions](#conclusions) # # <a id='intro'></a> # ## Introduction # # > **Tip**: Quoted sections like this will provide helpful instructions on how to navigate and use a Jupyter notebook. # # Over the past decade, bicycle-sharing systems have been growing in number and popularity in cities across the world. Bicycle-sharing systems allow users to rent bicycles for short trips, typically 30 minutes or less. Thanks to the rise in information technologies, it is easy for a user of the system to access a dock within the system to unlock or return bicycles. These technologies also provide a wealth of data that can be used to explore how these bike-sharing systems are used. # # In this project, you will perform an exploratory analysis on data provided by [Motivate](https://www.motivateco.com/), a bike-share system provider for many major cities in the United States. You will compare the system usage between three large cities: New York City, Chicago, and Washington, DC. You will also see if there are any differences within each system for those users that are registered, regular users and those users that are short-term, casual users. # # <a id='pose_questions'></a> # ## Posing Questions # # Before looking at the bike sharing data, you should start by asking questions you might want to understand about the bike share data. Consider, for example, if you were working for Motivate. What kinds of information would you want to know about in order to make smarter business decisions? If you were a user of the bike-share service, what factors might influence how you would want to use the service? # # **Question 1**: Write at least two questions related to bike sharing that you think could be answered by data. # # **Answer**: 1.What is the highest season for the used bikes? # 2What is the proportion of user trips for every type of groups? # # > **Tip**: If you double click on this cell, you will see the text change so that all of the formatting is removed. This allows you to edit this block of text. This block of text is written using [Markdown](http://daringfireball.net/projects/markdown/syntax), which is a way to format text using headers, links, italics, and many other options using a plain-text syntax. You will also use Markdown later in the Nanodegree program. Use **Shift** + **Enter** or **Shift** + **Return** to run the cell and show its rendered form. # <a id='wrangling'></a> # ## Data Collection and Wrangling # # Now it's time to collect and explore our data. In this project, we will focus on the record of individual trips taken in 2016 from our selected cities: New York City, Chicago, and Washington, DC. Each of these cities has a page where we can freely download the trip data.: # # - New York City (Citi Bike): [Link](https://www.citibikenyc.com/system-data) # - Chicago (Divvy): [Link](https://www.divvybikes.com/system-data) # - Washington, DC (Capital Bikeshare): [Link](https://www.capitalbikeshare.com/system-data) # # If you visit these pages, you will notice that each city has a different way of delivering its data. Chicago updates with new data twice a year, Washington DC is quarterly, and New York City is monthly. **However, you do not need to download the data yourself.** The data has already been collected for you in the `/data/` folder of the project files. While the original data for 2016 is spread among multiple files for each city, the files in the `/data/` folder collect all of the trip data for the year into one file per city. Some data wrangling of inconsistencies in timestamp format within each city has already been performed for you. In addition, a random 2% sample of the original data is taken to make the exploration more manageable. # # **Question 2**: However, there is still a lot of data for us to investigate, so it's a good idea to start off by looking at one entry from each of the cities we're going to analyze. Run the first code cell below to load some packages and functions that you'll be using in your analysis. Then, complete the second code cell to print out the first trip recorded from each of the cities (the second line of each data file). # # > **Tip**: You can run a code cell like you formatted Markdown cells above by clicking on the cell and using the keyboard shortcut **Shift** + **Enter** or **Shift** + **Return**. Alternatively, a code cell can be executed using the **Play** button in the toolbar after selecting it. While the cell is running, you will see an asterisk in the message to the left of the cell, i.e. `In [*]:`. The asterisk will change into a number to show that execution has completed, e.g. `In [1]`. If there is output, it will show up as `Out [1]:`, with an appropriate number to match the "In" number. ## import all necessary packages and functions. import csv # read and write csv files from datetime import datetime # operations to parse dates from pprint import pprint # use to print data structures like dictionaries in # a nicer way than the base print function. # + def print_first_point(filename): """ This function prints and returns the first data point (second row) from a csv file that includes a header row. """ # print city name for reference city = filename.split('-')[0].split('/')[-1] print('\nCity: {}'.format(city)) with open(filename, 'r') as f_in: ## TODO: Use the csv library to set up a DictReader object. ## ## see https://docs.python.org/3/library/csv.html ## trip_reader = csv.DictReader(f_in) ## TODO: Use a function on the DictReader object to read the ## ## first trip from the data file and store it in a variable. ## ## see https://docs.python.org/3/library/csv.html#reader-objects ## first_trip = next(trip_reader) ## TODO: Use the pprint library to print the first trip. ## ## see https://docs.python.org/3/library/pprint.html ## pprint (first_trip) # output city name and first trip for later testing return (city, first_trip) # list of files for each city data_files = ['./data/NYC-CitiBike-2016.csv', './data/Chicago-Divvy-2016.csv', './data/Washington-CapitalBikeshare-2016.csv',] # print the first trip from each file, store in dictionary example_trips = {} for data_file in data_files: city, first_trip = print_first_point(data_file) example_trips[city] = first_trip # - # If everything has been filled out correctly, you should see below the printout of each city name (which has been parsed from the data file name) that the first trip has been parsed in the form of a dictionary. When you set up a `DictReader` object, the first row of the data file is normally interpreted as column names. Every other row in the data file will use those column names as keys, as a dictionary is generated for each row. # # This will be useful since we can refer to quantities by an easily-understandable label instead of just a numeric index. For example, if we have a trip stored in the variable `row`, then we would rather get the trip duration from `row['duration']` instead of `row[0]`. # # <a id='condensing'></a> # ### Condensing the Trip Data # # It should also be observable from the above printout that each city provides different information. Even where the information is the same, the column names and formats are sometimes different. To make things as simple as possible when we get to the actual exploration, we should trim and clean the data. Cleaning the data makes sure that the data formats across the cities are consistent, while trimming focuses only on the parts of the data we are most interested in to make the exploration easier to work with. # # You will generate new data files with five values of interest for each trip: trip duration, starting month, starting hour, day of the week, and user type. Each of these may require additional wrangling depending on the city: # # - **Duration**: This has been given to us in seconds (New York, Chicago) or milliseconds (Washington). A more natural unit of analysis will be if all the trip durations are given in terms of minutes. # - **Month**, **Hour**, **Day of Week**: Ridership volume is likely to change based on the season, time of day, and whether it is a weekday or weekend. Use the start time of the trip to obtain these values. The New York City data includes the seconds in their timestamps, while Washington and Chicago do not. The [`datetime`](https://docs.python.org/3/library/datetime.html) package will be very useful here to make the needed conversions. # - **User Type**: It is possible that users who are subscribed to a bike-share system will have different patterns of use compared to users who only have temporary passes. Washington divides its users into two types: 'Registered' for users with annual, monthly, and other longer-term subscriptions, and 'Casual', for users with 24-hour, 3-day, and other short-term passes. The New York and Chicago data uses 'Subscriber' and 'Customer' for these groups, respectively. For consistency, you will convert the Washington labels to match the other two. # # # **Question 3a**: Complete the helper functions in the code cells below to address each of the cleaning tasks described above. # + def duration_in_mins(datum, city): """ Takes as input a dictionary containing info about a single trip (datum) and its origin city (city) and returns the trip duration in units of minutes. Remember that Washington is in terms of milliseconds while Chicago and NYC are in terms of seconds. HINT: The csv module reads in all of the data as strings, including numeric values. You will need a function to convert the strings into an appropriate numeric type when making your transformations. see https://docs.python.org/3/library/functions.html """ duration =0 if (city=='Washington'): duration = int(datum['Duration (ms)'])/60000 else: duration = int(datum['tripduration'])/60 return duration # Some tests to check that your code works. There should be no output if all of # the assertions pass. The `example_trips` dictionary was obtained from when # you printed the first trip from each of the original data files. tests = {'NYC': 13.9833, 'Chicago': 15.4333, 'Washington': 7.1231} for city in tests: assert abs(duration_in_mins(example_trips[city], city) - tests[city]) < .001 # + def time_of_trip(datum, city): """ Takes as input a dictionary containing info about a single trip (datum) and its origin city (city) and returns the month, hour, and day of the week in which the trip was made. Remember that NYC includes seconds, while Washington and Chicago do not. HINT: You should use the datetime module to parse the original date strings into a format that is useful for extracting the desired information. see https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior """ # YOUR CODE HERE triptime='' if (city=='NYC'): triptime = datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M:%S') month = int(triptime.strftime('%m')) hour = int(triptime.strftime('%H')) day_of_week = triptime.strftime('%A') if (city=='Chicago'): triptime = datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M') month = int(triptime.strftime('%m')) hour = int(triptime.strftime('%H')) day_of_week = triptime.strftime('%A') if (city=='Washington'): triptime = datetime.strptime(datum['Start date'],'%m/%d/%Y %H:%M') month = int(triptime.strftime('%m')) hour = int(triptime.strftime('%H')) day_of_week = triptime.strftime('%A') return (month, hour, day_of_week) # Some tests to check that your code works. There should be no output if all of # the assertions pass. The `example_trips` dictionary was obtained from when # you printed the first trip from each of the original data files. tests = {'NYC': (1, 0, 'Friday'), 'Chicago': (3, 23, 'Thursday'), 'Washington': (3, 22, 'Thursday')} for city in tests: assert time_of_trip(example_trips[city], city) == tests[city] # + def type_of_user(datum, city): """ Takes as input a dictionary containing info about a single trip (datum) and its origin city (city) and returns the type of system user that made the trip. Remember that Washington has different category names compared to Chicago and NYC. """ # YOUR CODE HERE user_type='' if (city=='NYC'): user_type = datum['usertype'] if (city=='Chicago'): user_type = datum['usertype'] if (city=='Washington'): if (datum['Member Type']=='Registered'): user_type='Subscriber' if (datum['Member Type']=='Casual'): user_type='Customer' return user_type # Some tests to check that your code works. There should be no output if all of # the assertions pass. The `example_trips` dictionary was obtained from when # you printed the first trip from each of the original data files. tests = {'NYC': 'Customer', 'Chicago': 'Subscriber', 'Washington': 'Subscriber'} for city in tests: assert type_of_user(example_trips[city], city) == tests[city] # - # **Question 3b**: Now, use the helper functions you wrote above to create a condensed data file for each city consisting only of the data fields indicated above. In the `/examples/` folder, you will see an example datafile from the [Bay Area Bike Share](http://www.bayareabikeshare.com/open-data) before and after conversion. Make sure that your output is formatted to be consistent with the example file. # + def condense_data(in_file, out_file, city): """ This function takes full data from the specified input file and writes the condensed data to a specified output file. The city argument determines how the input file will be parsed. HINT: See the cell below to see how the arguments are structured! """ with open(out_file, 'w') as f_out, open(in_file, 'r') as f_in: # set up csv DictWriter object - writer requires column names for the # first row as the "fieldnames" argument out_colnames = ['duration', 'month', 'hour', 'day_of_week', 'user_type'] trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames) trip_writer.writeheader() ## TODO: set up csv DictReader object ## trip_reader = csv.DictReader(f_in) # collect data from and process each row for row in trip_reader: # set up a dictionary to hold the values for the cleaned and trimmed # data point new_point = {} ## TODO: use the helper functions to get the cleaned data from ## ## the original data dictionaries. ## ## Note that the keys for the new_point dictionary should match ## ## the column names set in the DictWriter object above. ## duration_mins= duration_in_mins(row, city) t_month= time_of_trip(row, city)[0] t_hour= time_of_trip(row, city)[1] t_day= time_of_trip(row, city)[2] t_user= type_of_user(row, city) new_point = {'duration':duration_mins, 'month':t_month, 'hour':t_hour, 'day_of_week':t_day, 'user_type':t_user} ## TODO: write the processed information to the output file. ## ## see https://docs.python.org/3/library/csv.html#writer-objects ## trip_writer.writerow(new_point) # + # Run this cell to check your work city_info = {'Washington': {'in_file': './data/Washington-CapitalBikeshare-2016.csv', 'out_file': './data/Washington-2016-Summary.csv'}, 'Chicago': {'in_file': './data/Chicago-Divvy-2016.csv', 'out_file': './data/Chicago-2016-Summary.csv'}, 'NYC': {'in_file': './data/NYC-CitiBike-2016.csv', 'out_file': './data/NYC-2016-Summary.csv'}} for city, filenames in city_info.items(): condense_data(filenames['in_file'], filenames['out_file'], city) print_first_point(filenames['out_file']) # - # > **Tip**: If you save a jupyter Notebook, the output from running code blocks will also be saved. However, the state of your workspace will be reset once a new session is started. Make sure that you run all of the necessary code blocks from your previous session to reestablish variables and functions before picking up where you last left off. # # <a id='eda'></a> # ## Exploratory Data Analysis # # Now that you have the data collected and wrangled, you're ready to start exploring the data. In this section you will write some code to compute descriptive statistics from the data. You will also be introduced to the `matplotlib` library to create some basic histograms of the data. # # <a id='statistics'></a> # ### Statistics # # First, let's compute some basic counts. The first cell below contains a function that uses the csv module to iterate through a provided data file, returning the number of trips made by subscribers and customers. The second cell runs this function on the example Bay Area data in the `/examples/` folder. Modify the cells to answer the question below. # # **Question 4a**: Which city has the highest number of trips? Which city has the highest proportion of trips made by subscribers? Which city has the highest proportion of trips made by short-term customers? # # **Answer**: NYC has 276798 trips so it is the highest number of trips, # NYC has 88.83590199351151% so it is the highest proportion of trips made by subscribers, # Chicago has 23.774798630269924% so it is the highest proportion of trips made by short-term customers. def number_of_trips(filename): """ This function reads in a file with trip data and reports the number of trips made by subscribers, customers, and total overall. """ with open(filename, 'r') as f_in: # set up csv reader object reader = csv.DictReader(f_in) # initialize count variables n_subscribers = 0 n_customers = 0 proportion_sub =0 proportion_cust =0 # tally up ride types for row in reader: if row['user_type'] == 'Subscriber': n_subscribers += 1 else: n_customers += 1 # compute total number of rides n_total = n_subscribers + n_customers proportion_sub= float(100.0*(n_subscribers/n_total)) proportion_cust= float(100.0*(n_customers/n_total)) # return tallies as a tuple return(proportion_sub, proportion_cust, n_total) # + ## Modify this and the previous cell to answer Question 4a. Remember to run ## ## the function on the cleaned data files you created from Question 3. ## data_file1 = './data/Chicago-2016-Summary.csv' print('Chicago(subscriber proportion,customer proportion,trips total)',number_of_trips(data_file1)) data_file2 = './data/NYC-2016-Summary.csv' print('NYC(subscriber proportion,customer proportion,trips total)',number_of_trips(data_file2)) data_file3 = './data/Washington-2016-Summary.csv' print('Washington(subscriber proportion,customer proportion,trips total)',number_of_trips(data_file3)) # - # > **Tip**: In order to add additional cells to a notebook, you can use the "Insert Cell Above" and "Insert Cell Below" options from the menu bar above. There is also an icon in the toolbar for adding new cells, with additional icons for moving the cells up and down the document. By default, new cells are of the code type; you can also specify the cell type (e.g. Code or Markdown) of selected cells from the Cell menu or the dropdown in the toolbar. # # Now, you will write your own code to continue investigating properties of the data. # # **Question 4b**: Bike-share systems are designed for riders to take short trips. Most of the time, users are allowed to take trips of 30 minutes or less with no additional charges, with overage charges made for trips of longer than that duration. What is the average trip length for each city? What proportion of rides made in each city are longer than 30 minutes? # # **Answer**: average trip length for Chicago is 16.563629368787335, # average trip length for NYC is 15.81259299802294, # average trip length for Washington is 18.93287355913721, # proportion of rides made in Chicago(longer than 30 mins) is 8.332062497400562 # proportion of rides made in NYC (longer than 30 mins) is 7.3024371563378345 # proportion of rides made in Washington (longer than 30 mins) is 10.83888671109369 # # + ## Use this and additional cells to answer Question 4b. ## ## ## ## HINT: The csv module reads in all of the data as strings, including ## ## numeric values. You will need a function to convert the strings ## ## into an appropriate numeric type before you aggregate data. ## ## TIP: For the Bay Area example, the average trip length is 14 minutes ## ## and 3.5% of trips are longer than 30 minutes. ## def long_of_trip (file_name): with open(file_name, 'r') as f_in: reader = csv.DictReader(f_in) duration_total=0 trips_total=0 longer_30m_trips=0 avg_of_length=0 propution_of_longer=0 for row in reader: trips_total+=1 duration_total+=float(row['duration']) if (float(row['duration'])>30): longer_30m_trips+=1 avg_of_length = float( duration_total/trips_total ) propution_of_longer = float(100.0*longer_30m_trips/trips_total) return (avg_of_length,propution_of_longer) data_file1 = './data/Chicago-2016-Summary.csv' print('Chicago(average trip length,proportion of rides )',long_of_trip(data_file1)) data_file2 = './data/NYC-2016-Summary.csv' print('NYC(average trip length,proportion of rides )',long_of_trip(data_file2)) data_file3 = './data/Washington-2016-Summary.csv' print('Washington(average trip length,proportion of rides )',long_of_trip(data_file3)) # - # **Question 4c**: Dig deeper into the question of trip duration based on ridership. Choose one city. Within that city, which type of user takes longer rides on average: Subscribers or Customers? # # **Answer**: Chicago: # average customer duration is:30.979781133982506 , # average subscriber duration is: 12.067201690250076 # + ## Use this and additional cells to answer Question 4c. If you have ## ## not done so yet, consider revising some of your previous code to ## ## make use of functions for reusability. ## ## ## ## TIP: For the Bay Area example data, you should find the average ## ## Subscriber trip duration to be 9.5 minutes and the average Customer ## ## trip duration to be 54.6 minutes. Do the other cities have this ## ## level of difference? ## def avg_of_type (file_name): with open(file_name, 'r') as f_in: reader = csv.DictReader(f_in) custm_toatl=0 sub_total=0 duration_custm_total=0 duration_sub_total=0 avg_custm=0 avg_sub=0 for row in reader: if (row['user_type']=='Customer'): custm_toatl+=1 duration_custm_total+=float(row['duration']) else: sub_total+=1 duration_sub_total+=float(row['duration']) avg_custm = float( duration_custm_total/custm_toatl ) avg_sub = float(duration_sub_total/sub_total) return (avg_custm,avg_sub) data_file1 = './data/Chicago-2016-Summary.csv' print('Chicago(average customer duration,average subscriber duration )',avg_of_type(data_file1)) # - # <a id='visualizations'></a> # ### Visualizations # # The last set of values that you computed should have pulled up an interesting result. While the mean trip time for Subscribers is well under 30 minutes, the mean trip time for Customers is actually _above_ 30 minutes! It will be interesting for us to look at how the trip times are distributed. In order to do this, a new library will be introduced here, `matplotlib`. Run the cell below to load the library and to generate an example plot. # + # load library import matplotlib.pyplot as plt # this is a 'magic word' that allows for plots to be displayed # inline with the notebook. If you want to know more, see: # http://ipython.readthedocs.io/en/stable/interactive/magics.html # %matplotlib inline # example histogram, data taken from bay area sample data = [ 7.65, 8.92, 7.42, 5.50, 16.17, 4.20, 8.98, 9.62, 11.48, 14.33, 19.02, 21.53, 3.90, 7.97, 2.62, 2.67, 3.08, 14.40, 12.90, 7.83, 25.12, 8.30, 4.93, 12.43, 10.60, 6.17, 10.88, 4.78, 15.15, 3.53, 9.43, 13.32, 11.72, 9.85, 5.22, 15.10, 3.95, 3.17, 8.78, 1.88, 4.55, 12.68, 12.38, 9.78, 7.63, 6.45, 17.38, 11.90, 11.52, 8.63,] plt.hist(data) plt.title('Distribution of Trip Durations') plt.xlabel('Duration (m)') plt.show() # - # In the above cell, we collected fifty trip times in a list, and passed this list as the first argument to the `.hist()` function. This function performs the computations and creates plotting objects for generating a histogram, but the plot is actually not rendered until the `.show()` function is executed. The `.title()` and `.xlabel()` functions provide some labeling for plot context. # # You will now use these functions to create a histogram of the trip times for the city you selected in question 4c. Don't separate the Subscribers and Customers for now: just collect all of the trip times and plot them. # + ## Use this and additional cells to collect all of the trip times as a list ## ## and then use pyplot functions to generate a histogram of trip times. ## import matplotlib.pyplot as plt # this is a 'magic word' that allows for plots to be displayed # inline with the notebook. If you want to know more, see: # http://ipython.readthedocs.io/en/stable/interactive/magics.html # %matplotlib inline # example histogram, data taken from bay area sample data = [] file_name = './data/Chicago-2016-Summary.csv' with open(file_name, 'r') as f_in: reader = csv.DictReader(f_in) for row in reader: data.append(float(row['duration'])) plt.hist(data) plt.title('Trip Duration Plot') plt.xlabel('Duration (m)') plt.show() # - # If you followed the use of the `.hist()` and `.show()` functions exactly like in the example, you're probably looking at a plot that's completely unexpected. The plot consists of one extremely tall bar on the left, maybe a very short second bar, and a whole lot of empty space in the center and right. Take a look at the duration values on the x-axis. This suggests that there are some highly infrequent outliers in the data. Instead of reprocessing the data, you will use additional parameters with the `.hist()` function to limit the range of data that is plotted. Documentation for the function can be found [[here]](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.hist.html#matplotlib.pyplot.hist). # # **Question 5**: Use the parameters of the `.hist()` function to plot the distribution of trip times for the Subscribers in your selected city. Do the same thing for only the Customers. Add limits to the plots so that only trips of duration less than 75 minutes are plotted. As a bonus, set the plots up so that bars are in five-minute wide intervals. For each group, where is the peak of each distribution? How would you describe the shape of each distribution? # # **Answer**: The peak of customer type is 20 ,and the peak of subscriber type is between 5 and 10, # the histogram of customer duration is converged between 0 and 30 mins while the histogram of customer duration is not converged after 30 mins. # + ## Use this and additional cells to answer Question 5. ## import matplotlib.pyplot as plt # %matplotlib inline data_cusomer = [] data_subscriber = [] file_name = './data/Chicago-2016-Summary.csv' with open(file_name, 'r') as f_in: reader = csv.DictReader(f_in) for row in reader: if(row['user_type']=='Customer'): data_cusomer.append(float(row['duration'])) else: data_subscriber.append(float(row['duration'])) plt.hist(data_cusomer,range(0,75)) plt.title('Trip Duration Plot for Customer Type') plt.xlabel('Duration (m)') plt.show() plt.hist(data_subscriber,range(0,75)) plt.title('Trip Duration Plot for Subscriber Type') plt.xlabel('Duration (m)') plt.show() # - # <a id='eda_continued'></a> # ## Performing Your Own Analysis # # So far, you've performed an initial exploration into the data available. You have compared the relative volume of trips made between three U.S. cities and the ratio of trips made by Subscribers and Customers. For one of these cities, you have investigated differences between Subscribers and Customers in terms of how long a typical trip lasts. Now it is your turn to continue the exploration in a direction that you choose. Here are a few suggestions for questions to explore: # # - How does ridership differ by month or season? Which month / season has the highest ridership? Does the ratio of Subscriber trips to Customer trips change depending on the month or season? # - Is the pattern of ridership different on the weekends versus weekdays? On what days are Subscribers most likely to use the system? What about Customers? Does the average duration of rides change depending on the day of the week? # - During what time of day is the system used the most? Is there a difference in usage patterns for Subscribers and Customers? # # If any of the questions you posed in your answer to question 1 align with the bullet points above, this is a good opportunity to investigate one of them. As part of your investigation, you will need to create a visualization. If you want to create something other than a histogram, then you might want to consult the [Pyplot documentation](https://matplotlib.org/devdocs/api/pyplot_summary.html). In particular, if you are plotting values across a categorical variable (e.g. city, user type), a bar chart will be useful. The [documentation page for `.bar()`](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.bar.html#matplotlib.pyplot.bar) includes links at the bottom of the page with examples for you to build off of for your own use. # # **Question 6**: Continue the investigation by exploring another question that could be answered by the data available. Document the question you want to explore below. Your investigation should involve at least two variables and should compare at least two groups. You should also use at least one visualization as part of your explorations. # # **Answer**: What is the changes of total rides depending on the the month in Chicago? # Answer : The peak of customer type is the 6th mont ,and the peak of subscriber type is the 7th month. # + ## Use this and additional cells to continue to explore the dataset. ## ## Once you have performed your exploration, document your findings ## ## in the Markdown cell above. ## import matplotlib.pyplot as plt # %matplotlib inline data_cusomer = [] data_subscriber = [] file_name = './data/Chicago-2016-Summary.csv' with open(file_name, 'r') as f_in: reader = csv.DictReader(f_in) for row in reader: if(row['user_type']=='Customer'): data_cusomer.append(float(row['month'])) else: data_subscriber.append(float(row['month'])) plt.hist(data_cusomer) plt.title('Total rides Plot for Customer Type') plt.xlabel('Months') plt.show() plt.hist(data_subscriber) plt.title('Total rides Plot for Subscriber Type') plt.xlabel('Months') plt.show() # - # <a id='conclusions'></a> # ## Conclusions # # Congratulations on completing the project! This is only a sampling of the data analysis process: from generating questions, wrangling the data, and to exploring the data. Normally, at this point in the data analysis process, you might want to draw conclusions about the data by performing a statistical test or fitting the data to a model for making predictions. There are also a lot of potential analyses that could be performed on the data which are not possible with only the data provided. For example, detailed location data has not been investigated. Where are the most commonly used docks? What are the most common routes? As another example, weather has potential to have a large impact on daily ridership. How much is ridership impacted when there is rain or snow? Are subscribers or customers affected more by changes in weather? # # **Question 7**: Putting the bike share data aside, think of a topic or field of interest where you would like to be able to apply the techniques of data science. What would you like to be able to learn from your chosen subject? # # **Answer**: I can use data analysis to measure the people usage of technology like example , # many institutions and their clients despite the existence of technology, but there is resistance to change, data analytics will help to understand the user behaviour. # # > **Tip**: If we want to share the results of our analysis with others, we aren't limited to giving them a copy of the jupyter Notebook (.ipynb) file. We can also export the Notebook output in a form that can be opened even for those without Python installed. From the **File** menu in the upper left, go to the **Download as** submenu. You can then choose a different format that can be viewed more generally, such as HTML (.html) or # PDF (.pdf). You may need additional packages or software to perform these exports. # # > If you are working on this project via the Project Notebook page in the classroom, you can also submit this project directly from the workspace. **Before you do that**, you should save an HTML copy of the completed project to the workspace by running the code cell below. If it worked correctly, the output code should be a 0, and if you click on the jupyter icon in the upper left, you should see your .html document in the workspace directory. Alternatively, you can download the .html copy of your report following the steps in the previous paragraph, then _upload_ the report to the directory (by clicking the jupyter icon). # # > Either way, once you've gotten the .html report in your workspace, you can complete your submission by clicking on the "Submit Project" button to the lower-right hand side of the workspace. from subprocess import call call(['python', '-m', 'nbconvert', 'Bike_Share_Analysis.ipynb'])
35,908
/Alura_dia_1.ipynb
916c8a9b83f5a24acee82936b198363ac2b7ae91
[]
no_license
soaresmiltinho/heloo-world
https://github.com/soaresmiltinho/heloo-world
0
0
null
2020-01-14T18:21:44
2020-01-14T15:36:49
null
Jupyter Notebook
false
false
.py
281,046
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/soaresmilton/heloo-world/blob/master/Alura_dia_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="wfY_Vvb5lgg0" outputId="e47c5395-3681-4bad-b6d1-5e86caa2966e" colab={"base_uri": "https://localhost:8080/", "height": 244} import pandas as pd fonte = "https://github.com/alura-cursos/imersao-dados-2-2020/blob/master/MICRODADOS_ENEM_2019_SAMPLE_43278.csv?raw=true" dados = pd.read_csv(fonte) dados.head() # + id="dTzIflLDyu4U" outputId="19d838f8-9e6d-4956-9427-0b2c720fab1c" colab={"base_uri": "https://localhost:8080/", "height": 34} dados.shape # + id="v2bjuoDDzRm7" outputId="11b91336-3a32-4057-f899-12ab22a827bc" colab={"base_uri": "https://localhost:8080/", "height": 218} dados['CO_MUNICIPIO_RESIDENCIA'] # + id="-GgwHKzYzbdp" outputId="e5305530-9419-4610-f2a3-9b0ce92a61c1" colab={"base_uri": "https://localhost:8080/", "height": 218} dados["SG_UF_RESIDENCIA"] # + id="77WveOQFzgdK" outputId="18590a50-f649-4e15-8a66-e5a29dcc73a7" colab={"base_uri": "https://localhost:8080/", "height": 672} dados.columns.values # + id="NCh3xPLG0Suc" outputId="bb724bca-157b-4dc0-e765-72f13ad2241b" colab={"base_uri": "https://localhost:8080/", "height": 402} dados[["SG_UF_RESIDENCIA", "Q025"]] # + id="IO7Agcu8YPRt" outputId="ce29108e-3d9e-478b-c8be-9dfcd8935d74" colab={"base_uri": "https://localhost:8080/", "height": 218} dados["SG_UF_RESIDENCIA"] # + id="H_M1psbXYczV" outputId="ade2feb8-7b97-417b-9cba-142042b56cfc" colab={"base_uri": "https://localhost:8080/", "height": 67} dados["SG_UF_RESIDENCIA"].unique() # + id="CLtiVgdQYiLL" outputId="d91e1a1e-458d-4934-fcb4-347deeb9cb47" colab={"base_uri": "https://localhost:8080/", "height": 34} len(dados["SG_UF_RESIDENCIA"].unique()) # + id="_2dVdMHQYnuO" outputId="95f2bf2d-6147-41ca-e1bf-c8f4470584c9" colab={"base_uri": "https://localhost:8080/", "height": 487} dados["SG_UF_RESIDENCIA"].value_counts() # + id="wl4f8IZRYzu4" outputId="453034c6-1aef-4182-c905-3dc7ecc25450" colab={"base_uri": "https://localhost:8080/", "height": 244} dados.head() # + id="Ss-0kqu4Y_Lx" outputId="240d97c5-6bab-46c5-a9ad-8ceaba81b9e8" colab={"base_uri": "https://localhost:8080/", "height": 218} dados["NU_IDADE"] # + id="aSwRjQLYZM18" outputId="aa4a24ab-4b5d-4fc2-9335-8ca180c98b7e" colab={"base_uri": "https://localhost:8080/", "height": 218} dados["NU_IDADE"].value_counts() # + id="h-z_zYUlZSqj" outputId="26dc4908-8b8f-4c26-999a-da226b8fe336" colab={"base_uri": "https://localhost:8080/", "height": 218} dados["NU_IDADE"].value_counts().sort_index() # + id="wWGJ9rG1cWEP" outputId="7391bee7-93b5-48f1-e877-b90ed7d693dc" colab={"base_uri": "https://localhost:8080/", "height": 218} dados["NU_IDADE"].value_counts()/len(dados["NU_IDADE"]) # + id="kHP0d-2KdId_" outputId="7c9c208b-41e1-4f5d-a22a-126dada9d20a" colab={"base_uri": "https://localhost:8080/", "height": 34} sum(dados["NU_IDADE"].value_counts()/len(dados["NU_IDADE"])) # + [markdown] id="9S6ijtVidh_9" # ### DESAFIO 01: PROPORÇÃO DE IDADES # + id="k7V9UtzudlAB" proporcaoIdades = dados["NU_IDADE"].value_counts().sort_index()/len(dados["NU_IDADE"]) * 100 # + id="gbVqKmPkdxZi" outputId="0ae0695b-5944-49cd-faa8-fd5d1a75f80e" colab={"base_uri": "https://localhost:8080/"} proporcaoIdades # + id="mdbdPRN8d0p-" outputId="ca469c28-27cc-4ef4-b170-3f7c5dfb065c" colab={"base_uri": "https://localhost:8080/"} # Redefinindo a variável proporcaoIdades em um DataFrame resultadoDesafio01 = pd.DataFrame(proporcaoIdades).reset_index(inplace = False) resultadoDesafio01.columns = ['Idade', 'Percentual (%)'] resultadoDesafio01 # + [markdown] id="EhLismAkeZBG" # ## Desafio 02: Descobrir qual estados são os inscritos com 13 anos # + id="xLlB4yOHeeV8" outputId="3d9397f5-2da1-4501-f05d-e5643979bbd1" colab={"base_uri": "https://localhost:8080/"} dados.query("NU_IDADE") # + id="ZFM3n_-HeyqF" outputId="7c165f11-f2e1-4f6e-bd3a-ed1cd63f4288" colab={"base_uri": "https://localhost:8080/"} dados.query("NU_IDADE == 13") # + id="b9tHJ4WIe95m" outputId="2e45761b-dd0c-48a0-a030-6b994273f145" colab={"base_uri": "https://localhost:8080/"} dados.query("NU_IDADE == 13") ["SG_UF_RESIDENCIA"].value_counts() # + id="9C-G7XQlfXn0" outputId="bceebd63-81d9-45c8-c266-be8fc48aa77b" colab={"base_uri": "https://localhost:8080/"} # Definindo os dados acima em um Data Frame participantesCom13Anos = dados.query("NU_IDADE == 13") ["SG_UF_RESIDENCIA"].value_counts() resultadoDesafio02 = pd.DataFrame(participantesCom13Anos).reset_index(inplace = False) resultadoDesafio02.columns = ['Estado', 'Quantidade'] resultadoDesafio02 # + id="zB-y6LMMgCm2" outputId="4f573730-3383-491d-da9f-704fc85fe612" colab={"base_uri": "https://localhost:8080/"} dados["NU_IDADE"].hist() # + id="X9Ylovt0hPy6" outputId="40def38d-9d88-4e7e-941e-f0ab8c6f2b97" colab={"base_uri": "https://localhost:8080/"} dados["NU_IDADE"].hist(bins = 50, figsize = (10, 8)) # + [markdown] id="bwzgvJZGky6W" # ## Desafio 03: Colocar Título no gráfico acima # + id="qq4eaj3_k1wm" import matplotlib.pyplot as plt # + id="ghScXusMk3zF" outputId="a5ee28b5-29aa-4fe4-b359-677fa95cb095" colab={"base_uri": "https://localhost:8080/"} #Criando o gráfico novamente plt.figure(figsize= (10, 8)) plt.hist(dados["NU_IDADE"], bins= 50, color="#29B6D1") plt.title("Número de inscritos por idade - ENEM 2019", fontdict={'fontsize': 16}) plt.xlabel("Idade", fontdict={'fontsize': 14}) plt.ylabel("Número de pessoas", fontdict={'fontsize': 14}) # + id="H03KbZmKlZtj" outputId="bac76ea0-325d-477c-9a7f-4c51b0936d30" colab={"base_uri": "https://localhost:8080/"} dados['IN_TREINEIRO'] # + id="unrLwLEQmznX" outputId="cbdfa5df-18a4-4963-b8ba-4771821a27fe" colab={"base_uri": "https://localhost:8080/"} dados.query("IN_TREINEIRO == 1")['IN_TREINEIRO'] # + id="bnh-x5RhnOlO" outputId="b718e07f-9d77-4f42-e126-e98885aab1e1" colab={"base_uri": "https://localhost:8080/"} dados.query("IN_TREINEIRO == 1")['IN_TREINEIRO'].value_counts() # + id="vSZmYTSdnpBD" outputId="d7be3215-aad9-4cbc-ba61-5dcaba046784" colab={"base_uri": "https://localhost:8080/"} # Relacionando idade com quem é treineiro dados.query("IN_TREINEIRO == 1")['NU_IDADE'] # + id="KQi2bX8soGXi" outputId="32975951-c51d-4065-e67a-f69fb3a7b692" colab={"base_uri": "https://localhost:8080/"} dados.query("IN_TREINEIRO == 1")['NU_IDADE'].value_counts() # + id="_D6cHiyOoSp0" outputId="c7e87d4b-e159-4de5-abb7-0ff56065066d" colab={"base_uri": "https://localhost:8080/"} #Ordenando os dados em ordem crescente da idade dados.query("IN_TREINEIRO == 1")['NU_IDADE'].value_counts().sort_index() # + [markdown] id="6M9RYlwdpTR8" # ## Desafio 04: Plotar um histograma das idades dos treineiros e não treineiros # + id="OPFKX0zWpeJd" outputId="d3b2a11a-84a7-4ca3-8ba2-00058eb4851c" colab={"base_uri": "https://localhost:8080/", "height": 621} treineiros = dados.query("IN_TREINEIRO == 1")['NU_IDADE'] naoTreineiros = dados.query("IN_TREINEIRO == 0")['NU_IDADE'] plt.figure(figsize= (10,10)) plt.subplot(211, title="Treineiros x Idade") treineiros.plot(kind= 'hist', bins= 25) plt.xlabel('Idade') plt.ylabel('Quantidade') plt.subplot(212, title="Não treineiros x Idade") naoTreineiros.plot(kind= 'hist', bins= 25) plt.xlabel('Idade') plt.ylabel('Quantidade') plt.show() # + id="u4-5mbb-vsP4" outputId="52bd107a-5765-4b8b-f7e6-a8e1c8356879" colab={"base_uri": "https://localhost:8080/", "height": 391} dados['NU_NOTA_REDACAO'].hist(bins= 50, figsize=(10, 6)) # + id="HH7HXfB2yPgG" outputId="cda9beb6-13ed-4d43-a709-fae3ca29c317" colab={"base_uri": "https://localhost:8080/", "height": 391} dados['NU_NOTA_LC'].hist(bins= 50, figsize=(10, 6)) # + id="8ko3kQzYye6t" outputId="8d1ce980-a97d-4d9d-e761-aa5a5a0ae9a4" colab={"base_uri": "https://localhost:8080/", "height": 34} dados['NU_NOTA_REDACAO'].mean() # + id="o6615a6Dy0Go" outputId="43e81a28-d88b-4251-b565-c6673b5d1cc6" colab={"base_uri": "https://localhost:8080/", "height": 34} dados['NU_NOTA_REDACAO'].std() # + id="dURyoUI3y5tR" outputId="79c02117-8bee-4b13-a095-c713e5d40362" colab={"base_uri": "https://localhost:8080/", "height": 284} provas = ['NU_NOTA_REDACAO', 'NU_NOTA_CN', 'NU_NOTA_MT', 'NU_NOTA_LC', 'NU_NOTA_CH'] dados[provas].describe() # + id="gLkU6_5AzToz" outputId="949f4468-f95c-4774-c3a6-4277c350d80d" colab={"base_uri": "https://localhost:8080/", "height": 34} dados['NU_NOTA_LC'].quantile(0.9) # + id="rqPjQAT0e17z" outputId="a7a488db-699f-46f1-c688-de4c27cd41bb" colab={"base_uri": "https://localhost:8080/", "height": 392} dados['NU_NOTA_LC'].plot.box(grid= True, figsize=(8, 6)) # + id="ttz2_qAgfSJW" outputId="c5b3020b-6903-432e-f233-aa86eccd754a" colab={"base_uri": "https://localhost:8080/", "height": 501} dados[provas].plot.box(grid= True, figsize= (10, 8)) # + id="6QWp8tsVgocD" outputId="94e277c5-f31d-4af9-a5d3-f735af20c8f4" colab={"base_uri": "https://localhost:8080/", "height": 67} dados.query("NU_NOTA_REDACAO == 1000")['NU_IDADE'].value_counts # + [markdown] id="OP2y11fmhwbu" # ## DESAFIO 05: COMPARAR AS DISTRIBUIÇÕES DAS PROVAS EM INGLÊS E ESPANHOL # + id="vYTWiWOXi42v" outputId="0590c2e1-56fb-4d8e-b317-e1b66437f91e" colab={"base_uri": "https://localhost:8080/", "height": 389} import seaborn as sns notaIngles = dados.query('TP_LINGUA == 1') ['NU_NOTA_LC'].values notaEspanhol = dados.query('TP_LINGUA == 0') ['NU_NOTA_LC'].values fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6), sharex=True) sns.boxplot(data=notaIngles, ax = ax1, color="#ee82ee") sns.boxplot(data=notaEspanhol, ax = ax2, color="#6a5acd") ax1.set_ylabel('Distribuição das notas de linguagens e códigos') ax1.set_xlabel('Inglês') ax2.set_xlabel('Espanhol') plt.show() # + id="EBC-G4pElrrh"
10,010
/detection/.ipynb_checkpoints/data_making2-checkpoint.ipynb
d46def2508749f2302a50811bb488d5eb218bcaa
[]
no_license
SystemCorps/hrlab-sf
https://github.com/SystemCorps/hrlab-sf
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
6,924
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext memory_profiler # %load_ext line_profiler from matmult_edit import matrix_creator # %lprun -f matrix_creator matrix_creator(250,251) # %mprun -f matrix_creator matrix_creator(250,251) from matmult_edit import multiplication_of_matrix X = matrix_creator(250,250) Y = matrix_creator(250,251) # %lprun -f multiplication_of_matrix multiplication_of_matrix(X,Y) # %mprun -f multiplication_of_matrix multiplication_of_matrix(X,Y) h.join(x[0], '*.png'))] print(len(img_dir), img_dir[0]) random.shuffle(img_dir) print(img_dir[0]) train, test = train_test_split(img_dir, test_size=0.2) print(len(train), train[0]) print(len(test), test[0]) # + def create_tf(in_dir): # TODO(user): Populate the following variables from your example. height = 512 # Image height width = 512 # Image width filename = str.encode(in_dir.split('/')[-1]) # Filename of the image. Empty if image is not from file image_format = b'png' # b'jpeg' or b'png' str_fname = in_dir.split('/')[-1] xy_str = str_fname.replace('.png','') if 'Untorn' in xy_str: x1 = 0 y1 = 0 x2 = 511 y2 = 511 classes_text = [b'untorn'] # List of string class name of bounding box (1 per box) classes = [2] # List of integer class id of bounding box (1 per box) else: xy_par = xy_str.split('_') x1 = int(xy_par[2]) y1 = int(xy_par[3]) x2 = int(xy_par[4]) y2 = int(xy_par[5]) classes_text = [b'torn'] # List of string class name of bounding box (1 per box) classes = [1] # List of integer class id of bounding box (1 per box) file = open(in_dir, 'rb') encoded_image_data = file.read() xmins = [x1/width] # List of normalized left x coordinates in bounding box (1 per box) xmaxs = [x2/width] # List of normalized right x coordinates in bounding box # (1 per box) ymins = [y1/height] # List of normalized top y coordinates in bounding box (1 per box) ymaxs = [y2/height] # List of normalized bottom y coordinates in bounding box # (1 per box) tf_out = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_image_data), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), })) return tf_out # + save_dir='/hdd3/dhj_container/DB/SSD_data_TFR/' if not tf.gfile.Exists(save_dir): tf.gfile.MakeDirs(save_dir) writer = tf.python_io.TFRecordWriter(save_dir+'train_data2.tfrecord') # TODO(user): Write code to read in your dataset to examples variable for i in range(len(train)): tf_example = create_tf(train[i]) writer.write(tf_example.SerializeToString()) writer.close() writer = tf.python_io.TFRecordWriter(save_dir+'test_data2.tfrecord') for i in range(len(test)): tf_example = create_tf(test[i]) writer.write(tf_example.SerializeToString()) writer.close() # -
3,941
/week3/5.3_cross_entropy_logistic_regression_v2.ipynb
4c77c68e4fa872201c1be9c3e0dda2424b4c067e
[]
no_license
gachet/Pytorch-beginner-tutorials
https://github.com/gachet/Pytorch-beginner-tutorials
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
328,282
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 範例說明: # ConvNetJS CIFAR-10 demo # # 預期目標: # # 了解網路各層的變化 # # 超參數設定對於預測結果的影響 # # 作業:(所有作業需要再ConvNetJS 的網頁上完成) # # https://cs.stanford.edu/people/karpathy/convnetjs/demo/cifar10.html # # (1)嘗試新增一組卷積+池化層 (CONV + Pool) # # (2)嘗試超參數 # # # 超參數 (hyper parameter) # # ![Hyperparameter_HW.png](attachment:Hyperparameter_HW.png) # + active="" # # # 定義網路起始 # layer_defs = []; # # # 建立DATA 輸入層, 維度: 32x32x3 # layer_defs.push({type:'input', out_sx:32, out_sy:32, out_depth:3}); # # # 建立卷積層1,該層將使用16個內核執行卷積,每個內核大小為5x5。 # #移動步數為1,輸入將在所有邊上填充2個像素以使輸出Vol具有相同的大小 , 激活函數為 ReLU # # layer_defs.push({type:'conv', sx:5, filters:16, stride:1, pad:2, activation:'relu’}); # # # 建立池化層1,每個池化內核大小為2x2,移動步數為2 # # layer_defs.push({type:'pool', sx:2, stride:2}); # # 建立卷積層2,該層將使用20個內核執行卷積,每個內核大小為5x5。 # # #移動步數為1,輸入將在所有邊上填充2個像素以使輸出Vol具有相同的大小 , 激活函數為 ReLU # # layer_defs.push({type:'conv', sx:5, filters:20, stride:1, pad:2, activation:'relu’}); # # # 建立池化層2,每個池化內核大小為2x2,移動步數為2 # # layer_defs.push({type:'pool', sx:2, stride:2}); # # # 建立卷積層3,該層將使用20個內核執行卷積,每個內核大小為5x5。 # # #移動步數為1,輸入將在所有邊上填充2個像素以使輸出Vol具有相同的大小 , 激活函數為 ReLU # # layer_defs.push({type:'conv', sx:5, filters:20, stride:1, pad:2, activation:'relu’}); # # # 建立池化層3,每個池化內核大小為2x2,移動步數為2 # # layer_defs.push({type:'pool', sx:2, stride:2}); # # ''' # # 建立卷積層4,該層將使用20個內核執行卷積,每個內核大小為5x5。 # # #移動步數為1,輸入將在所有邊上填充2個像素以使輸出Vol具有相同的大小 , 激活函數為 ReLU # # # 建立池化層4,每個池化內核大小為2x2,移動步數為2 # # ''' # #輸出Vol的大小為1x1x10 # # layer_defs.push({type:'softmax', num_classes:10}); # # #指定NET 為一個輸出網路 # # net = new convnetjs.Net(); # # #執行並建立網路 # # net.makeLayers(layer_defs); # # #執行網路訓練, 優化器採用adadelta, batch_size=4, l2_decay (l2,每次更新時學習率下降)=0.0001 # # trainer = new convnetjs.SGDTrainer(net, {method:'adadelta', batch_size:4, l2_decay:0.0001}); # # # # # - plt.title('Loss Surface Contour') plt.xlabel('w') plt.ylabel('b') plt.contour(self.w, self.b, self.Z) plt.show() # Setter def set_para_loss(self, model, loss): self.n = self.n + 1 self.W.append(list(model.parameters())[0].item()) self.B.append(list(model.parameters())[1].item()) self.LOSS.append(loss) # Plot diagram def final_plot(self): ax = plt.axes(projection='3d') ax.plot_wireframe(self.w, self.b, self.Z) ax.scatter(self.W, self.B, self.LOSS, c='r', marker='x', s=200, alpha=1) plt.figure() plt.contour(self.w, self.b, self.Z) plt.scatter(self.W, self.B, c='r', marker='x') plt.xlabel('w') plt.ylabel('b') plt.show() # Plot diagram def plot_ps(self): plt.subplot(121) plt.ylim plt.plot(self.x, self.y, 'ro', label="training points") plt.plot(self.x, self.W[-1] * self.x + self.B[-1], label="estimated line") plt.plot(self.x, 1 / (1 + np.exp(-1 * (self.W[-1] * self.x + self.B[-1]))), label='sigmoid') plt.xlabel('x') plt.ylabel('y') plt.ylim((-0.1, 2)) plt.title('Data Space Iteration: ' + str(self.n)) plt.show() plt.subplot(122) plt.contour(self.w, self.b, self.Z) plt.scatter(self.W, self.B, c='r', marker='x') plt.title('Loss Surface Contour Iteration' + str(self.n)) plt.xlabel('w') plt.ylabel('b') # Plot the diagram def PlotStuff(X, Y, model, epoch, leg=True): plt.plot(X.numpy(), model(X).detach().numpy(), label=('epoch ' + str(epoch))) plt.plot(X.numpy(), Y.numpy(), 'r') if leg == True: plt.legend() else: pass # - # Set the random seed: # + # Set random seed torch.manual_seed(0) # - # <!--Empty Space for separating topics--> # <h2 id="Makeup_Data">Get Some Data</h2> # + # Create the data class class Data(Dataset): # Constructor def __init__(self): self.x = torch.arange(-1, 1, 0.1).view(-1, 1) self.y = torch.zeros(self.x.shape[0], 1) self.y[self.x[:, 0] > 0.2] = 1 self.len = self.x.shape[0] # Getter def __getitem__(self, index): return self.x[index], self.y[index] # Get length def __len__(self): return self.len # - # Make <code>Data</code> object # + # Create Data object data_set = Data() # - # <!--Empty Space for separating topics--> # <h2 id="Model_Cost">Create the Model and Total Loss Function (Cost)</h2> # Create a custom module for logistic regression: # + # Create logistic_regression class class logistic_regression(nn.Module): # Constructor def __init__(self, n_inputs): super(logistic_regression, self).__init__() self.linear = nn.Linear(n_inputs, 1) # Prediction def forward(self, x): yhat = torch.sigmoid(self.linear(x)) return yhat # - # Create a logistic regression object or model: # + # Create the logistic_regression result model = logistic_regression(1) # - # Replace the random initialized variable values. Theses random initialized variable values did convergence for the RMS Loss but will converge for the Cross-Entropy Loss. # + # Set the weight and bias model.state_dict() ['linear.weight'].data[0] = torch.tensor([[-5]]) model.state_dict() ['linear.bias'].data[0] = torch.tensor([[-10]]) print("The parameters: ", model.state_dict()) # - # Create a <code> plot_error_surfaces</code> object to visualize the data space and the parameter space during training: # + # Create the plot_error_surfaces object get_surface = plot_error_surfaces(15, 13, data_set[:][0], data_set[:][1], 30) # - # Define the cost or criterion function: # + # Create dataloader, criterion function and optimizer def criterion(yhat,y): out = -1 * torch.mean(y * torch.log(yhat) + (1 - y) * torch.log(1 - yhat)) return out # Build in criterion # criterion = nn.BCELoss() trainloader = DataLoader(dataset = data_set, batch_size = 3) learning_rate = 2 optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate) # - # <!--Empty Space for separating topics--> # <h2 id="BGD">Train the Model via Batch Gradient Descent</h2> # Train the model # + # Train the Model def train_model(epochs): for epoch in range(epochs): for x, y in trainloader: yhat = model(x) loss = criterion(yhat, y) optimizer.zero_grad() loss.backward() optimizer.step() get_surface.set_para_loss(model, loss.tolist()) if epoch % 20 == 0: get_surface.plot_ps() train_model(100) # - # Get the actual class of each sample and calculate the accuracy on the test data: # + # Make the Prediction yhat = model(data_set.x) label = yhat > 0.5 print("The accuracy: ", torch.mean((label == data_set.y.type(torch.ByteTensor)).type(torch.float))) # - # The accuracy is perfect. # # <!--Empty Space for separating topics--> # <a href="http://cocl.us/pytorch_link_bottom"> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DL0110EN/notebook_images%20/notebook_bottom%20.png" width="750" alt="PyTorch Bottom" /> # </a> # <h2>About the Authors:</h2> # # <a href="https://www.linkedin.com/in/joseph-s-50398b136/">Joseph Santarcangelo</a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD. # Other contributors: <a href="https://www.linkedin.com/in/michelleccarey/">Michelle Carey</a>, <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a> # <hr> # Copyright &copy; 2018 <a href="cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu">cognitiveclass.ai</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.
8,264
/code/NER_CRF_Test_3_移除671.ipynb
b9494fd465e8132dcccafc6f728d1fac2c9ab255
[]
no_license
alexislintw/TCM-Symptom-NER
https://github.com/alexislintw/TCM-Symptom-NER
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
75,123
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 中醫醫案症狀詞抽取 # - model: CRF # - 主症與兼症合併 # - 詞位標注: OSBIE # - 671個詞沒有完全對應(皆標為O) # - postag使用nltk # - 以句點為分隔拆句子 import json import nltk def getTagListWithString(str): list1 = [] str = str.replace(',',' ') str = str.replace('、',' ') str = str.replace(',',' ') str_list = str.split(" ") for s in str_list: s = s.strip() if len(s) > 0: list1.append(s) #list2 = list(filter(None, second_list)) return list1 def getLabelWithWordIndex(i,ne_index_list): for (ne_idx,ne_len) in ne_index_list: if i == ne_idx: return "B-S" elif i > ne_idx and i < (ne_idx + ne_len - 1): return "I-S" elif i == (ne_idx + ne_len - 1): return "E-S" return "O" # + filename = "case_summary.csv" with open(filename) as f: lines = f.readlines() docs = [] c = 0 t = 0 for idx,line in enumerate(lines): cols = line.strip().split(",") case_content = cols[2].replace(" ","") sentences = case_content.split("。") summary_str = cols[3].strip() if idx == 0 or len(summary_str) == 0 : continue #處理每個doc的summary(主症和兼症) summary_str = summary_str.strip('"') summary_str = summary_str.replace('""','"') summary_str = summary_str.replace('#',',') #print(summary_str) summary_json = json.loads(summary_str) keys = summary_json.keys() if "main" in keys: main = summary_json["main"] main_list = getTagListWithString(main) #print(main_list) if "second" in keys: second = summary_json["second"] second_list = getTagListWithString(second) #print(second_list) if len(second_list) > 0: main_list.extend(second_list) t += len(main_list) ne_index_list_0 = [] for ne in main_list: ne_idx = case_content.find(ne) if ne_idx != -1 : ne_index_list_0.append((ne_idx,len(ne))) if len(main_list) != len(ne_index_list_0): continue for sent in sentences: ne_index_list = [] for ne in main_list: ne_idx = sent.find(ne) if ne_idx != -1 : ne_index_list.append((ne_idx,len(ne))) texts = [] for i,w in enumerate(sent): label = getLabelWithWordIndex(i,ne_index_list) texts.append((w,label)) docs.append(texts) print(len(docs)) # + ### 計算平均句長 total_wc = 0 for doc in docs: total_wc += len(doc) ave = total_wc / 48901 print(ave) # - for tk,lab in docs[1]: print(tk,lab) # + data = [] for i, doc in enumerate(docs): tokens = [t for t, label in doc] tagged = nltk.pos_tag(tokens) data.append([(w, pos, label) for (w, label), (word, pos) in zip(doc, tagged)]) # - print(len(data)) def word2features(doc, i): word = doc[i][0] postag = doc[i][1] # Common features for all words features = [ 'bias', 'word.lower=' + word.lower(), 'word[-3:]=' + word[-3:], 'word[-2:]=' + word[-2:], 'word.isupper=%s' % word.isupper(), 'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit(), 'postag=' + postag ] # Features for words that are not # at the beginning of a document if i > 0: word1 = doc[i-1][0] postag1 = doc[i-1][1] features.extend([ '-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(), '-1:word.isupper=%s' % word1.isupper(), '-1:word.isdigit=%s' % word1.isdigit(), '-1:postag=' + postag1 ]) else: # Indicate that it is the 'beginning of a document' features.append('BOS') # Features for words that are not # at the end of a document if i < len(doc)-1: word1 = doc[i+1][0] postag1 = doc[i+1][1] features.extend([ '+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(), '+1:word.isupper=%s' % word1.isupper(), '+1:word.isdigit=%s' % word1.isdigit(), '+1:postag=' + postag1 ]) else: # Indicate that it is the 'end of a document' features.append('EOS') return features # + from sklearn.model_selection import train_test_split # A function for extracting features in documents def extract_features(doc): return [word2features(doc, i) for i in range(len(doc))] # A function fo generating the list of labels for each document def get_labels(doc): return [label for (token, postag, label) in doc] X = [extract_features(doc) for doc in data] y = [get_labels(doc) for doc in data] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # - print(len(X_train)) print(len(X_test)) # + import pycrfsuite trainer = pycrfsuite.Trainer(verbose=True) # Submit training data to the trainer for xseq, yseq in zip(X_train, y_train): trainer.append(xseq, yseq) # Set the parameters of the model trainer.set_params({ # coefficient for L1 penalty 'c1': 0.1, # coefficient for L2 penalty 'c2': 0.01, # maximum number of iterations 'max_iterations': 200, # whether to include transitions that # are possible, but not observed 'feature.possible_transitions': True }) # Provide a file name as a parameter to the train function, such that # the model will be saved to the file when training is finished trainer.train('crf.model') # + tagger = pycrfsuite.Tagger() tagger.open('crf.model') y_pred = [tagger.tag(xseq) for xseq in X_test] # Let's take a look at a random sample in the testing set i = 12 for x, y in zip(y_pred[i], [x[1].split("=")[1] for x in X_test[i]]): print("%s (%s)" % (y, x)) # + import numpy as np from sklearn.metrics import classification_report # Create a mapping of labels to indices labels = {"O": 0,"B-S": 1,"I-S": 2,"E-S": 3} # Convert the sequences of tags into a 1-dimensional array predictions = np.array([labels[tag] for row in y_pred for tag in row]) truths = np.array([labels[tag] for row in y_test for tag in row]) # Print out the classification report print(classification_report( truths, predictions, target_names=["O", "B-SYM","I-SYM","E-SYM"])) # -
6,612
/jax/experimental/jax2tf/JAX2TF_getting_started.ipynb
b6e0b5f7cdc04dfe3cd4b674c85f9aee6f054309
[ "Apache-2.0" ]
permissive
levskaya/jax
https://github.com/levskaya/jax
1
0
Apache-2.0
2023-03-06T06:00:39
2021-01-13T20:56:47
Python
Jupyter Notebook
false
false
.py
815
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="mB5eSZXZIO9W" # JAX-TensorFlow interoperation with JAX2TF # =========================================== # # Link: go/jax2tf-colab # # The JAX2TF colab has been deprecated, and the example code has # been moved to [jax2tf/examples](https://github.com/google/jax/tree/master/jax/experimental/jax2tf/examples). #
573
/notebooks/data_exploration.ipynb
92ed3c51fcb2666616cbd534413d73b91a788591
[]
no_license
Kuromaru1/mmf_hack
https://github.com/Kuromaru1/mmf_hack
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
73,566
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: venv # language: python # name: venv # --- # + import pandas as pd import matplotlib import numpy as np import matplotlib.pyplot as plt from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot from plotly import graph_objs as go from scipy.stats import pearsonr from statsmodels.tsa.arima_model import ARIMA from fbprophet import Prophet from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split # %matplotlib inline init_notebook_mode(connected = True) # - df = pd.read_csv('./data/retrieved_data.csv', index_col='Date') df.index = pd.to_datetime(df.index) for col in df.columns: df[col].interpolate(method='time', inplace=True) df.head() # + def plotly_line(series, title = ''): trace = go.Scatter( x = series.index, y = series, mode = 'lines', name = series.name ) layout = dict(title = title) fig = dict(data = [trace], layout = layout) iplot(fig, show_link=False) def plotly_series(series_arr, title=''): traces = [] for series in series_arr: trace = go.Scatter( x = series.index, y = series, mode = 'lines', name = series.name ) traces.append(trace) layout = dict(title = title) fig = dict(data = traces, layout = layout) iplot(fig, show_link=False) def plotly_df(df, title=''): traces = [] for col in df.columns: trace = go.Scatter( x = df.index, y = df[col], mode = 'lines', name = col ) traces.append(trace) layout = dict(title = title) fig = dict(data = traces, layout = layout) iplot(fig, show_link=False) # - def correlation_matrix(df): from matplotlib import pyplot as plt from matplotlib import cm as cm fig = plt.figure(figsize=(10, 8)) ax1 = fig.add_subplot(111) cmap = cm.get_cmap('jet', 30) cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap) ax1.grid(True) plt.title('Feature Correlation') labels=[df.index.name] + list(df.columns) ax1.set_xticklabels(labels,fontsize=12, rotation=30) ax1.set_yticklabels(labels,fontsize=12) # Add colorbar, make sure to specify tick locations to match desired ticklabels fig.colorbar(cax) #plt.show() plt.savefig('corr_plot.png') # + plotly_df(df[df.columns[:2]], 'Average Price') plotly_df(df[df.columns[2:4]], 'PP Import/Export') plotly_line(df[df.columns[4]], 'USD/CNY') plotly_df(df[df.columns[5:]], 'Futures') plotly_line(df[df.columns[1]], 'Oil Average Price') plotly_line(df[df.columns[6]], 'Oil Future') # - def calc_df_pvalue(df): df1 = df.copy() df2 = df.copy() coeffmat = np.zeros((df1.shape[1], df2.shape[1])) pvalmat = np.zeros((df1.shape[1], df2.shape[1])) for i in range(df1.shape[1]): for j in range(df2.shape[1]): corrtest = pearsonr(df1[df1.columns[i]], df2[df2.columns[j]]) pvalmat[i,j] = corrtest[1] return pd.DataFrame(pvalmat, columns=df2.columns, index=df1.columns) pval_df = calc_df_pvalue(df) pval_df.to_excel('./data/pvalue_data.xlsx') -- # + [markdown] slideshow={"slide_type": "slide"} # ## Bayes' theorem # # Some of you might recognize the above formula as Bayes' theorem. Typically Bayes' theorem is written: # # ### $$P\left(\;A\;|\;B\;\right) = \frac{P\left(\;B\;|\;A\;\right)P\left(\;A\;\right)}{P(\;B\;)}$$ # # Where: # # $A$ and $B$ are anything that take probabilities (which is essentially everything). $P(B|A)$ and $P(A|B)$ are the probabilities of $B$ conditional on $A$ and vice versa. # # # + [markdown] slideshow={"slide_type": "slide"} # This is just another way of writing: # # ### $$P\left(\;A\;\right)P\left(\;B\;|\;A\;\right) = P\left(\;B\;\right)P\left(\;A\;|\;B\;\right)$$ # # Which is derived from the fact that: # # ### $$P\left(\;A\;\cap\;B\right) = P\left(\;A\;\right)P\left(\;B\;|\;A\;\right) = P\left(\;B\;\right)P\left(\;A\;|\;B\;\right)$$ # # Where $P\left(\;A\;\cap\;B\right)$ is the probability of $A$ *and* $B$. # # --- # + [markdown] slideshow={"slide_type": "slide"} # ### Denominator of Bayes' theorem: the "total probability" # # ![](./assets/images/output_27_0.png) # --- # + [markdown] slideshow={"slide_type": "slide"} # In the picture, each $A_1,..., A_5$ includes a piece of the center oval. In this example the oval represents $B$. # # Basic probability defines the following relation: $$P(A|B) = \frac{ A \cap B }{B}$$ # # Intuitively, the relation indicates that $P(A|B)$ is a ratio of the part of A that is common with B, *over the entirety of $B$*. # # Therefore, **the total probability can be thought of as the exhaustive sum of all probabilities on sets that share elements with B**. This equals simply the probability of B in our set of events. # # So what is the purpose of the total probability with respect to the rest of Bayes formula? **In essence, it "normalizes" the numerator into a quantity between 0 and 1,** ensuring the left side of the formula is a probability. # # --- # + [markdown] slideshow={"slide_type": "slide"} # ### Solving probability using Bayes' theorem is easy when you know $P(B)$ # # Let's say we have two coins. Coin **FAIR** and coin **RIGGED** # # coin FAIR has a 50% chance of flipping heads. # coin RIGGED has 99% chance of flipping heads. # # Your friend chooses one of the two coins at random. He flips the coin and gets heads. # # What is the probability that the coin flipped was **FAIR**? # # > Check: what are the point probabilities for the prior, likelihood, and marginal probability of the data? # + slideshow={"slide_type": "slide"} import numpy as np # Our hypothesis is our belief that the coin flipped was fair before we saw the outcome. # 0.5 since he chose at random. hypothesis_fair = 0.5 # probability that we would get heads given our hypothesis was true, that the coin is the fair one: prob_flip_given_fair = 0.5 # total probability of getting heads: prob_heads = (149./200.) # solve for the probability our hypothesis is true given the flip: hypothesis_true = (prob_flip_given_fair * hypothesis_fair) / prob_heads print hypothesis_true # + [markdown] slideshow={"slide_type": "slide"} # --- # # ## Bayes' theorem in the context of statistical modeling # # We can also interpret the equations above in the context of statistical modeling, which we've been doing for weeks now: # # ### $$P\left(\;model\;|\;data\;\right) = \frac{P\left(\;data\;|\;model\;\right)}{P(\;data\;)} P\left(\;model\;\right)$$ # # Or in plain english: # # **What is the probability of our model being true, given the data we have? This depends on the likelihood of the observed data given our model and the data itself, as well as our prior belief that this model is true.** # # --- # # + [markdown] slideshow={"slide_type": "slide"} # ### Computational solutions with Bayes' theorem # # Consider two shoppers' baskets in an e-commerce store # # Basket 1 has 30 cans of seltzer and 10 cans of V8. Basket 2 has 20 cans of each # # You picked one basket at random and select a can, which was seltzer. What's the probability it came from basket 1? # # > Check: solve this manually with point estimates # + [markdown] slideshow={"slide_type": "slide"} # This is a very simple case, but we can start to employ **prior distributions**, giving us **posterior distributions**, instead of point probabilities. # + slideshow={"slide_type": "fragment"} hypo_dist = {'Basket1': .5, 'Basket2': .5} # Priors likelihood_dist = {'Basket1': .75, 'Basket2': .5} # Likelihood marginal_prob = 5/8.0 # Our normalizing constant, the marginal probability print (hypo_dist['Basket1'] * likelihood_dist['Basket1']) / marginal_prob print (hypo_dist['Basket2'] * likelihood_dist['Basket2']) / marginal_prob # + [markdown] slideshow={"slide_type": "slide"} # ### Independent practice: the train problem # # "A railroad numbers its locomotives in order 1...N. You see a locomotive with the number 60. Estimate how many locomotives the railroad has." What's the prior? What's the likelihood? # + [markdown] slideshow={"slide_type": "fragment"} # The prior is what we knew (or will assume) about N before our observation of data. # # The likelihood is the probability of seeing the data for any given value of N. # + [markdown] slideshow={"slide_type": "fragment"} # How can you write a likelihood function for this problem? # - # References and sources modeled off of: # # http://ipython-books.github.io/featured-07/ # # http://stats.stackexchange.com/questions/31867/bayesian-vs-frequentist-interpretations-of-probability # # http://jakevdp.github.io/blog/2014/03/11/frequentism-and-bayesianism-a-practical-intro/ # # https://simple.wikipedia.org/wiki/Bayes%27_theorem # # https://en.wikipedia.org/wiki/Central_limit_theorem # # http://www.cogsci.ucsd.edu/classes/SP07/COGS14/NOTES/binomial_ztest.pdf # # https://en.wikipedia.org/wiki/Prior_probability#Uninformative_priors # # https://arbital.com/p/bayes_rule/?l=1zq # # https://betterexplained.com/articles/an-intuitive-and-short-explanation-of-bayes-theorem/ # # http://www.yudkowsky.net/rational/bayes/ # # http://people.stern.nyu.edu/wgreene/MathStat/Notes-2-BayesianStatistics.pdf # # http://stats.stackexchange.com/questions/58564/help-me-understand-bayesian-prior-and-posterior-distributions # # http://pages.uoregon.edu/cfulton/posts/bernoulli_trials_bayesian.html # # http://chrisstrelioff.ws/sandbox/2014/12/11/inferring_probabilities_with_a_beta_prior_a_third_example_of_bayesian_calculations.html # # https://www.chrisstucchio.com/blog/2013/magic_of_conjugate_priors.html # # http://stats.stackexchange.com/questions/58564/help-me-understand-bayesian-prior-and-posterior-distributions # # ---
10,058
/docs/notebooks/tess.ipynb
e2bec6a3a8b3247226c3712fb4c2504e4a4d07ed
[ "MIT" ]
permissive
dfm/exoplanet-docs
https://github.com/dfm/exoplanet-docs
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
23,902
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # %run notebook_setup # # Fitting TESS data # In this tutorial, we will reproduce the fits to the transiting planet in the Pi Mensae system discovered by [Huang et al. (2018)](https://arxiv.org/abs/1809.05967). # The data processing and model are similar to the :ref:`together` tutorial, but with a few extra bits like aperture selection and de-trending. # # To start, we need to download the target pixel file: # + import numpy as np import lightkurve as lk import matplotlib.pyplot as plt tpf_file = lk.search_targetpixelfile("TIC 261136679", sector=1).download() with tpf_file.hdu as hdu: tpf = hdu[1].data tpf_hdr = hdu[1].header texp = tpf_hdr["FRAMETIM"] * tpf_hdr["NUM_FRM"] texp /= 60.0 * 60.0 * 24.0 time = tpf["TIME"] flux = tpf["FLUX"] m = np.any(np.isfinite(flux), axis=(1, 2)) & (tpf["QUALITY"] == 0) ref_time = 0.5 * (np.min(time[m]) + np.max(time[m])) time = np.ascontiguousarray(time[m] - ref_time, dtype=np.float64) flux = np.ascontiguousarray(flux[m], dtype=np.float64) mean_img = np.median(flux, axis=0) plt.imshow(mean_img.T, cmap="gray_r") plt.title("TESS image of Pi Men") plt.xticks([]) plt.yticks([]); # - # ## Aperture selection # # Next, we'll select an aperture using a hacky method that tries to minimizes the windowed scatter in the lightcurve (something like the CDPP). # + from scipy.signal import savgol_filter # Sort the pixels by median brightness order = np.argsort(mean_img.flatten())[::-1] # A function to estimate the windowed scatter in a lightcurve def estimate_scatter_with_mask(mask): f = np.sum(flux[:, mask], axis=-1) smooth = savgol_filter(f, 1001, polyorder=5) return 1e6 * np.sqrt(np.median((f / smooth - 1) ** 2)) # Loop over pixels ordered by brightness and add them one-by-one # to the aperture masks, scatters = [], [] for i in range(10, 100): msk = np.zeros_like(mean_img, dtype=bool) msk[np.unravel_index(order[:i], mean_img.shape)] = True scatter = estimate_scatter_with_mask(msk) masks.append(msk) scatters.append(scatter) # Choose the aperture that minimizes the scatter pix_mask = masks[np.argmin(scatters)] # Plot the selected aperture plt.imshow(mean_img.T, cmap="gray_r") plt.imshow(pix_mask.T, cmap="Reds", alpha=0.3) plt.title("selected aperture") plt.xticks([]) plt.yticks([]); # - # This aperture produces the following light curve: plt.figure(figsize=(10, 5)) sap_flux = np.sum(flux[:, pix_mask], axis=-1) sap_flux = (sap_flux / np.median(sap_flux) - 1) * 1e3 plt.plot(time, sap_flux, "k") plt.xlabel("time [days]") plt.ylabel("relative flux [ppt]") plt.title("raw light curve") plt.xlim(time.min(), time.max()); # ## De-trending # # This doesn't look terrible, but we're still going to want to de-trend it a little bit. # We'll use "pixel-level deconvolution" (PLD) to de-trend following the method used by [Everest](https://github.com/rodluger/everest). # Specifically, we'll use first order PLD plus the top few PCA components of the second order PLD basis. # + # Build the first order PLD basis X_pld = np.reshape(flux[:, pix_mask], (len(flux), -1)) X_pld = X_pld / np.sum(flux[:, pix_mask], axis=-1)[:, None] # Build the second order PLD basis and run PCA to reduce the number of dimensions X2_pld = np.reshape(X_pld[:, None, :] * X_pld[:, :, None], (len(flux), -1)) U, _, _ = np.linalg.svd(X2_pld, full_matrices=False) X2_pld = U[:, : X_pld.shape[1]] # Construct the design matrix and fit for the PLD model X_pld = np.concatenate((np.ones((len(flux), 1)), X_pld, X2_pld), axis=-1) XTX = np.dot(X_pld.T, X_pld) w_pld = np.linalg.solve(XTX, np.dot(X_pld.T, sap_flux)) pld_flux = np.dot(X_pld, w_pld) # Plot the de-trended light curve plt.figure(figsize=(10, 5)) plt.plot(time, sap_flux - pld_flux, "k") plt.xlabel("time [days]") plt.ylabel("de-trended flux [ppt]") plt.title("initial de-trended light curve") plt.xlim(time.min(), time.max()); # - # That looks better. # # ## Transit search # # Now, let's use [the box least squares periodogram from AstroPy](http://docs.astropy.org/en/latest/timeseries/bls.html) # (Note: you'll need AstroPy v3.1 or more recent to use this feature) to estimate the period, phase, and depth of the transit. # + from astropy.timeseries import BoxLeastSquares period_grid = np.exp(np.linspace(np.log(1), np.log(15), 50000)) bls = BoxLeastSquares(time, sap_flux - pld_flux) bls_power = bls.power(period_grid, 0.1, oversample=20) # Save the highest peak as the planet candidate index = np.argmax(bls_power.power) bls_period = bls_power.period[index] bls_t0 = bls_power.transit_time[index] bls_depth = bls_power.depth[index] transit_mask = bls.transit_mask(time, bls_period, 0.2, bls_t0) fig, axes = plt.subplots(2, 1, figsize=(10, 10)) # Plot the periodogram ax = axes[0] ax.axvline(np.log10(bls_period), color="C1", lw=5, alpha=0.8) ax.plot(np.log10(bls_power.period), bls_power.power, "k") ax.annotate( "period = {0:.4f} d".format(bls_period), (0, 1), xycoords="axes fraction", xytext=(5, -5), textcoords="offset points", va="top", ha="left", fontsize=12, ) ax.set_ylabel("bls power") ax.set_yticks([]) ax.set_xlim(np.log10(period_grid.min()), np.log10(period_grid.max())) ax.set_xlabel("log10(period)") # Plot the folded transit ax = axes[1] x_fold = (time - bls_t0 + 0.5 * bls_period) % bls_period - 0.5 * bls_period m = np.abs(x_fold) < 0.4 ax.plot(x_fold[m], sap_flux[m] - pld_flux[m], ".k") # Overplot the phase binned light curve bins = np.linspace(-0.41, 0.41, 32) denom, _ = np.histogram(x_fold, bins) num, _ = np.histogram(x_fold, bins, weights=sap_flux - pld_flux) denom[num == 0] = 1.0 ax.plot(0.5 * (bins[1:] + bins[:-1]), num / denom, color="C1") ax.set_xlim(-0.3, 0.3) ax.set_ylabel("de-trended flux [ppt]") ax.set_xlabel("time since transit"); # - # Now that we know where the transits are, it's generally good practice to de-trend the data one more time with the transits masked so that the de-trending doesn't overfit the transits. # Let's do that. # + m = ~transit_mask XTX = np.dot(X_pld[m].T, X_pld[m]) w_pld = np.linalg.solve(XTX, np.dot(X_pld[m].T, sap_flux[m])) pld_flux = np.dot(X_pld, w_pld) x = np.ascontiguousarray(time, dtype=np.float64) y = np.ascontiguousarray(sap_flux - pld_flux, dtype=np.float64) plt.figure(figsize=(10, 5)) plt.plot(time, y, "k") plt.xlabel("time [days]") plt.ylabel("de-trended flux [ppt]") plt.title("final de-trended light curve") plt.xlim(time.min(), time.max()); # - # To confirm that we didn't overfit the transit, we can look at the folded light curve for the PLD model near trasit. # This shouldn't have any residual transit signal, and that looks correct here: # + plt.figure(figsize=(10, 5)) x_fold = (x - bls_t0 + 0.5 * bls_period) % bls_period - 0.5 * bls_period m = np.abs(x_fold) < 0.3 plt.plot(x_fold[m], pld_flux[m], ".k", ms=4) bins = np.linspace(-0.5, 0.5, 60) denom, _ = np.histogram(x_fold, bins) num, _ = np.histogram(x_fold, bins, weights=pld_flux) denom[num == 0] = 1.0 plt.plot(0.5 * (bins[1:] + bins[:-1]), num / denom, color="C1", lw=2) plt.xlim(-0.2, 0.2) plt.xlabel("time since transit") plt.ylabel("PLD model flux"); # - # ## The transit model in PyMC3 # # The transit model, initialization, and sampling are all nearly the same as the one in :ref:`together`. # + import exoplanet as xo import pymc3 as pm import theano.tensor as tt def build_model(mask=None, start=None): if mask is None: mask = np.ones(len(x), dtype=bool) with pm.Model() as model: # Parameters for the stellar properties mean = pm.Normal("mean", mu=0.0, sd=10.0) u_star = xo.distributions.QuadLimbDark("u_star") # Stellar parameters from Huang et al (2018) M_star_huang = 1.094, 0.039 R_star_huang = 1.10, 0.023 BoundedNormal = pm.Bound(pm.Normal, lower=0, upper=3) m_star = BoundedNormal("m_star", mu=M_star_huang[0], sd=M_star_huang[1]) r_star = BoundedNormal("r_star", mu=R_star_huang[0], sd=R_star_huang[1]) # Orbital parameters for the planets logP = pm.Normal("logP", mu=np.log(bls_period), sd=1) t0 = pm.Normal("t0", mu=bls_t0, sd=1) logr = pm.Normal( "logr", sd=1.0, mu=0.5 * np.log(1e-3 * np.array(bls_depth)) + np.log(R_star_huang[0]), ) r_pl = pm.Deterministic("r_pl", tt.exp(logr)) ror = pm.Deterministic("ror", r_pl / r_star) b = xo.distributions.ImpactParameter("b", ror=ror) ecs = xo.UnitDisk("ecs", testval=np.array([0.01, 0.0])) ecc = pm.Deterministic("ecc", tt.sum(ecs ** 2)) omega = pm.Deterministic("omega", tt.arctan2(ecs[1], ecs[0])) xo.eccentricity.kipping13("ecc_prior", observed=ecc) # Transit jitter & GP parameters logs2 = pm.Normal("logs2", mu=np.log(np.var(y[mask])), sd=10) logw0 = pm.Normal("logw0", mu=0, sd=10) logSw4 = pm.Normal("logSw4", mu=np.log(np.var(y[mask])), sd=10) # Tracking planet parameters period = pm.Deterministic("period", tt.exp(logP)) # Orbit model orbit = xo.orbits.KeplerianOrbit( r_star=r_star, m_star=m_star, period=period, t0=t0, b=b, ecc=ecc, omega=omega, ) def mean_model(t): # Compute the model light curve using starry light_curves = pm.Deterministic( "light_curves", xo.LimbDarkLightCurve(u_star).get_light_curve( orbit=orbit, r=r_pl, t=t, texp=texp ) * 1e3, ) return tt.sum(light_curves, axis=-1) + mean # GP model for the light curve kernel = xo.gp.terms.SHOTerm(log_Sw4=logSw4, log_w0=logw0, Q=1 / np.sqrt(2)) gp = xo.gp.GP( kernel, x[mask], tt.exp(logs2) + tt.zeros(mask.sum()), mean=mean_model ) gp.marginal("gp", observed=y[mask]) pm.Deterministic("gp_pred", gp.predict()) # Fit for the maximum a posteriori parameters, I've found that I can get # a better solution by trying different combinations of parameters in turn if start is None: start = model.test_point map_soln = xo.optimize(start=start, vars=[logs2, logSw4, logw0]) map_soln = xo.optimize(start=map_soln, vars=[logr]) map_soln = xo.optimize(start=map_soln, vars=[b]) map_soln = xo.optimize(start=map_soln, vars=[logP, t0]) map_soln = xo.optimize(start=map_soln, vars=[u_star]) map_soln = xo.optimize(start=map_soln, vars=[logr]) map_soln = xo.optimize(start=map_soln, vars=[b]) map_soln = xo.optimize(start=map_soln, vars=[ecc, omega]) map_soln = xo.optimize(start=map_soln, vars=[mean]) map_soln = xo.optimize(start=map_soln, vars=[logs2, logSw4, logw0]) map_soln = xo.optimize(start=map_soln) return model, map_soln model0, map_soln0 = build_model() # - # Here's how we plot the initial light curve model: # + def plot_light_curve(soln, mask=None): if mask is None: mask = np.ones(len(x), dtype=bool) fig, axes = plt.subplots(3, 1, figsize=(10, 7), sharex=True) ax = axes[0] ax.plot(x[mask], y[mask], "k", label="data") gp_mod = soln["gp_pred"] + soln["mean"] ax.plot(x[mask], gp_mod, color="C2", label="gp model") ax.legend(fontsize=10) ax.set_ylabel("relative flux [ppt]") ax = axes[1] ax.plot(x[mask], y[mask] - gp_mod, "k", label="de-trended data") for i, l in enumerate("b"): mod = soln["light_curves"][:, i] ax.plot(x[mask], mod, label="planet {0}".format(l)) ax.legend(fontsize=10, loc=3) ax.set_ylabel("de-trended flux [ppt]") ax = axes[2] mod = gp_mod + np.sum(soln["light_curves"], axis=-1) ax.plot(x[mask], y[mask] - mod, "k") ax.axhline(0, color="#aaaaaa", lw=1) ax.set_ylabel("residuals [ppt]") ax.set_xlim(x[mask].min(), x[mask].max()) ax.set_xlabel("time [days]") return fig plot_light_curve(map_soln0); # - # As in the :ref:`together` tutorial, we can do some sigma clipping to remove significant outliers. # + mod = ( map_soln0["gp_pred"] + map_soln0["mean"] + np.sum(map_soln0["light_curves"], axis=-1) ) resid = y - mod rms = np.sqrt(np.median(resid ** 2)) mask = np.abs(resid) < 5 * rms plt.figure(figsize=(10, 5)) plt.plot(x, resid, "k", label="data") plt.plot(x[~mask], resid[~mask], "xr", label="outliers") plt.axhline(0, color="#aaaaaa", lw=1) plt.ylabel("residuals [ppt]") plt.xlabel("time [days]") plt.legend(fontsize=12, loc=3) plt.xlim(x.min(), x.max()); # - # And then we re-build the model using the data without outliers. model, map_soln = build_model(mask, map_soln0) plot_light_curve(map_soln, mask); # Now that we have the model, we can sample: np.random.seed(261136679) with model: trace = xo.sample( tune=3500, draws=3000, start=map_soln, chains=4, target_accept=0.95 ) pm.summary( trace, var_names=[ "logw0", "logSw4", "logs2", "omega", "ecc", "r_pl", "b", "t0", "logP", "r_star", "m_star", "u_star", "mean", ], ) # ## Results # # After sampling, we can make the usual plots. # First, let's look at the folded light curve plot: # + # Compute the GP prediction gp_mod = np.median(trace["gp_pred"] + trace["mean"][:, None], axis=0) # Get the posterior median orbital parameters p = np.median(trace["period"]) t0 = np.median(trace["t0"]) # Plot the folded data x_fold = (x[mask] - t0 + 0.5 * p) % p - 0.5 * p plt.plot(x_fold, y[mask] - gp_mod, ".k", label="data", zorder=-1000) # Overplot the phase binned light curve bins = np.linspace(-0.41, 0.41, 50) denom, _ = np.histogram(x_fold, bins) num, _ = np.histogram(x_fold, bins, weights=y[mask]) denom[num == 0] = 1.0 plt.plot(0.5 * (bins[1:] + bins[:-1]), num / denom, "o", color="C2", label="binned") # Plot the folded model inds = np.argsort(x_fold) inds = inds[np.abs(x_fold)[inds] < 0.3] pred = trace["light_curves"][:, inds, 0] pred = np.percentile(pred, [16, 50, 84], axis=0) plt.plot(x_fold[inds], pred[1], color="C1", label="model") art = plt.fill_between( x_fold[inds], pred[0], pred[2], color="C1", alpha=0.5, zorder=1000 ) art.set_edgecolor("none") # Annotate the plot with the planet's period txt = "period = {0:.5f} +/- {1:.5f} d".format( np.mean(trace["period"]), np.std(trace["period"]) ) plt.annotate( txt, (0, 0), xycoords="axes fraction", xytext=(5, 5), textcoords="offset points", ha="left", va="bottom", fontsize=12, ) plt.legend(fontsize=10, loc=4) plt.xlim(-0.5 * p, 0.5 * p) plt.xlabel("time since transit [days]") plt.ylabel("de-trended flux") plt.xlim(-0.15, 0.15); # - # And a corner plot of some of the key parameters: # + import corner import astropy.units as u varnames = ["period", "b", "ecc", "r_pl"] samples = pm.trace_to_dataframe(trace, varnames=varnames) # Convert the radius to Earth radii samples["r_pl"] = (np.array(samples["r_pl"]) * u.R_sun).to(u.R_earth).value corner.corner( samples[["period", "r_pl", "b", "ecc"]], labels=["period [days]", "radius [Earth radii]", "impact param", "eccentricity"], ); # - # These all seem consistent with the previously published values and an earlier inconsistency between this radius measurement and the literature has been resolved by fixing a bug in *exoplanet*. # ## Citations # # As described in the :ref:`citation` tutorial, we can use :func:`exoplanet.citations.get_citations_for_model` to construct an acknowledgement and BibTeX listing that includes the relevant citations for this model. with model: txt, bib = xo.citations.get_citations_for_model() print(txt) print("\n".join(bib.splitlines()[:10]) + "\n...") GE LINKAGE plt.figure(figsize=(15,5)) mergings = linkage(pca_df2, method = "average", metric='euclidean') dendrogram(mergings) plt.show() # cutting the tree of complete linkage at a point clusterCut = pd.Series(cut_tree(mergings, n_clusters = 4).reshape(-1,)) hc_clustering = pd.concat([pca_df2, clusterCut], axis=1) hc_clustering.columns = ["PC1","PC2","PC3","Cluster_ID"] hc_clustering.head() hc_cluster = pd.concat([new_data['country'],hc_clustering], axis=1, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=None, copy=True) hc_cluster.head() hc_cluster['Cluster_ID'].value_counts().plot(kind = 'bar') hc_clustered = hc_cluster[['country','Cluster_ID']].merge(new_data, on = 'country') hc_clustered.head() #means of variables child_mort_clustered = pd.DataFrame(hc_clustered.groupby(["Cluster_ID"]).child_mort.mean()) exports_clustered = pd.DataFrame(hc_clustered.groupby(["Cluster_ID"]).exports.mean()) health_clustered = pd.DataFrame(hc_clustered.groupby(["Cluster_ID"]).health.mean()) imports_clustered = pd.DataFrame(hc_clustered.groupby(["Cluster_ID"]).imports.mean()) income_clustered = pd.DataFrame(hc_clustered.groupby(["Cluster_ID"]).income.mean()) inflation_clustered = pd.DataFrame(hc_clustered.groupby(["Cluster_ID"]).inflation.mean()) life_expec_clustered = pd.DataFrame(hc_clustered.groupby(["Cluster_ID"]).life_expec.mean()) total_fer_clustered = pd.DataFrame(hc_clustered.groupby(["Cluster_ID"]).total_fer.mean()) gdpp_clustered = pd.DataFrame(hc_clustered.groupby(["Cluster_ID"]).gdpp.mean()) df_hc = pd.concat([pd.Series(list(range(0,4))), child_mort_clustered,exports_clustered, health_clustered, imports_clustered, income_clustered, inflation_clustered, life_expec_clustered,total_fer_clustered,gdpp_clustered], axis=1) df_hc.columns = ["ClusterID", "child_mort_mean", "exports_mean", "health_mean", "imports_mean", "income_mean", "inflation_mean", "life_expec_mean", "total_fer_mean", "gdpp_mean"] df_hc # + # plots over clusters vs variables of the dataframe fig, axs = plt.subplots(3,3,figsize = (7,7)) sns.barplot(x=df.ClusterID, y=df_hc.child_mort_mean, ax = axs[0,0]) sns.barplot(x=df.ClusterID, y=df_hc.exports_mean, ax = axs[0,1]) sns.barplot(x=df.ClusterID, y=df_hc.health_mean, ax = axs[0,2]) sns.barplot(x=df.ClusterID, y=df_hc.imports_mean, ax = axs[1,0]) sns.barplot(x=df.ClusterID, y=df_hc.income_mean, ax = axs[1,1]) sns.barplot(x=df.ClusterID, y=df_hc.life_expec_mean, ax = axs[1,2]) sns.barplot(x=df.ClusterID, y=df_hc.inflation_mean, ax = axs[2,0]) sns.barplot(x=df.ClusterID, y=df_hc.total_fer_mean, ax = axs[2,1]) sns.barplot(x=df.ClusterID, y=df_hc.gdpp_mean, ax = axs[2,2]) plt.tight_layout() # - hc_clustered[hc_clustered.Cluster_ID == 0].country.values # <font size = 6 color = Red> Final Report: # <font size = 5 color = Radium> following are the countries which obtained by K-Means clustering and need support from NGO : </font> # 'Afghanistan', 'Angola', 'Benin', 'Botswana', 'Burkina Faso', # 'Burundi', 'Cameroon', 'Central African Republic', 'Chad', # 'Comoros', 'Congo, Dem. Rep.', 'Congo, Rep.', "Cote d'Ivoire" # # 'Equatorial Guinea', 'Eritrea', 'Gabon', 'Gambia', 'Ghana', # 'Guinea', 'Guinea-Bissau', 'Haiti', 'Iraq', 'Kenya', 'Kiribati', # 'Lao', 'Lesotho', 'Liberia', 'Madagascar', 'Malawi', 'Mali' # # 'Mauritania', 'Mozambique', 'Namibia', 'Niger', 'Nigeria', # 'Pakistan', 'Rwanda', 'Senegal', 'Sierra Leone', 'Solomon Islands', # 'South Africa', 'Sudan', 'Tanzania', 'Timor-Leste', 'Togo', # 'Uganda', 'Yemen', 'Zambia'
19,800
/notebooks/gdonaire/data-cleaning-challenge-parsing-dates.ipynb
32c6040097adb01a037c808fa7846f8529d16535
[]
no_license
Sayem-Mohammad-Imtiaz/kaggle-notebooks
https://github.com/Sayem-Mohammad-Imtiaz/kaggle-notebooks
5
6
null
null
null
null
Jupyter Notebook
false
false
.py
18,561
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="b91a74ba-85f4-486e-b5f9-d0898f0626bf" _uuid="6ac53f18b4f4ec0fc44348cedb5d1c319fa127c0" # ### All days of the challange: # # * [Day 1: Handling missing values](https://www.kaggle.com/rtatman/data-cleaning-challenge-handling-missing-values) # * [Day 2: Scaling and normalization](https://www.kaggle.com/rtatman/data-cleaning-challenge-scale-and-normalize-data) # * [Day 3: Parsing dates](https://www.kaggle.com/rtatman/data-cleaning-challenge-parsing-dates/) # * [Day 4: Character encodings](https://www.kaggle.com/rtatman/data-cleaning-challenge-character-encodings/) # * [Day 5: Inconsistent Data Entry](https://www.kaggle.com/rtatman/data-cleaning-challenge-inconsistent-data-entry/) # ___ # Welcome to day 3 of the 5-Day Data Challenge! Today, we're going to work with dates. To get started, click the blue "Fork Notebook" button in the upper, right hand corner. This will create a private copy of this notebook that you can edit and play with. Once you're finished with the exercises, you can choose to make your notebook public to share with others. :) # # > **Your turn!** As we work through this notebook, you'll see some notebook cells (a block of either code or text) that has "Your Turn!" written in it. These are exercises for you to do to help cement your understanding of the concepts we're talking about. Once you've written the code to answer a specific question, you can run the code by clicking inside the cell (box with code in it) with the code you want to run and then hit CTRL + ENTER (CMD + ENTER on a Mac). You can also click in a cell and then click on the right "play" arrow to the left of the code. If you want to run all the code in your notebook, you can use the double, "fast forward" arrows at the bottom of the notebook editor. # # Here's what we're going to do today: # # * [Get our environment set up](#Get-our-environment-set-up) # * [Check the data type of our date column](#Check-the-data-type-of-our-date-column) # * [Convert our date columns to datetime](#Convert-our-date-columns-to-datetime) # * [Select just the day of the month from our column](#Select-just-the-day-of-the-month-from-our-column) # * [Plot the day of the month to check the date parsing](#Plot-the-day-of-the-month-to-the-date-parsing) # # Let's get started! # + [markdown] _cell_guid="5cd5061f-ae30-4837-a53b-690ffd5c5830" _uuid="9d82bf13584b8e682962fbb96131f2447d741679" # # Get our environment set up # ________ # # The first thing we'll need to do is load in the libraries and datasets we'll be using. For today, we'll be working with two datasets: one containing information on earthquakes that occured between 1965 and 2016, and another that contains information on landslides that occured between 2007 and 2016. # # > **Important!** Make sure you run this cell yourself or the rest of your code won't work! # + _cell_guid="135a7804-b5f5-40aa-8657-4a15774e3666" _uuid="835cbe0834b935fb0fd40c75b9c39454836f4d5f" # modules we'll use import pandas as pd import numpy as np import seaborn as sns import datetime # read in our data earthquakes = pd.read_csv("../input/earthquake-database/database.csv") landslides = pd.read_csv("../input/landslide-events/catalog.csv") volcanos = pd.read_csv("../input/volcanic-eruptions/database.csv") # set seed for reproducibility np.random.seed(0) # + [markdown] _cell_guid="604ac3a4-b1d9-4264-b312-4bbeecdeec00" _uuid="03ce3b4afe87d98f777172c2c7be066a66a0b237" # Now we're ready to look at some dates! (If you like, you can take this opportunity to take a look at some of the data.) # + [markdown] _cell_guid="9b87a77d-e5e5-4581-9cd3-0e7339fe1516" _uuid="742028572a307a42ce40db0102171bc219b05282" # # Check the data type of our date column # ___ # # For this part of the challenge, I'll be working with the `date` column from the `landslides` dataframe. The very first thing I'm going to do is take a peek at the first few rows to make sure it actually looks like it contains dates. # + _cell_guid="e6b7eb39-c3e3-40a1-b0a5-91cfcd2d42da" _uuid="93a08de7a6a621e4b07968c07c1cc612936c6027" # print the first few rows of the date column print(landslides['date'].head()) # + [markdown] _cell_guid="dbdacb7c-10d4-4b0a-8f6b-6d4a940ca446" _uuid="d88dbc08ab145fd20f86073b027c53f40fd306bc" # Yep, those are dates! But just because I, a human, can tell that these are dates doesn't mean that Python knows that they're dates. Notice that the at the bottom of the output of `head()`, you can see that it says that the data type of this column is "object". # # > Pandas uses the "object" dtype for storing various types of data types, but most often when you see a column with the dtype "object" it will have strings in it. # # If you check the pandas dtype documentation [here](http://pandas.pydata.org/pandas-docs/stable/basics.html#dtypes), you'll notice that there's also a specific `datetime64` dtypes. Because the dtype of our column is `object` rather than `datetime64`, we can tell that Python doesn't know that this column contains dates. # # We can also look at just the dtype of your column without printing the first few rows if we like: # + _cell_guid="56a047f4-cbf7-4914-951c-a04310ee7432" _uuid="e2ab2ac80aaac7b165b3af64edb75d29f2612482" # check the data type of our date column landslides['date'].dtype # + [markdown] _cell_guid="99a207db-3db0-4343-9805-58753f51f6e8" _uuid="06e6483764014a04e7a1f34525e2f12aee5fdab8" # You may have to check the [numpy documentation](https://docs.scipy.org/doc/numpy-1.12.0/reference/generated/numpy.dtype.kind.html#numpy.dtype.kind) to match the letter code to the dtype of the object. "O" is the code for "object", so we can see that these two methods give us the same information. # + _cell_guid="8987e921-0c37-4c0f-ba68-e4e26d8d1a1b" _uuid="a2a983470b318469993b75b450bab28c12b59ae6" # Your turn! Check the data type of the Date column in the earthquakes dataframe # (note the capital 'D' in date!) print(earthquakes.columns) print(earthquakes['Date'].head()) print('Earthquake Date column type {0}'.format(earthquakes['Date'].dtype)) # + [markdown] _cell_guid="fb3b552b-411b-4fc0-b1e6-a3a8156fd459" _uuid="0939ce269aef7001e35cc8f2a5f1eed1f6160940" # # Convert our date columns to datetime # ___ # # Now that we know that our date column isn't being recognized as a date, it's time to convert it so that it *is* recognized as a date. This is called "parsing dates" because we're taking in a string and identifying its component parts. # # We can pandas what the format of our dates are with a guide called as ["strftime directive", which you can find more information on at this link](http://strftime.org/). The basic idea is that you need to point out which parts of the date are where and what punctuation is between them. There are [lots of possible parts of a date](http://strftime.org/), but the most common are `%d` for day, `%m` for month, `%y` for a two-digit year and `%Y` for a four digit year. # # Some examples: # # * 1/17/07 has the format "%m/%d/%y" # * 17-1-2007 has the format "%d-%m-%Y" # # Looking back up at the head of the `date` column in the landslides dataset, we can see that it's in the format "month/day/two-digit year", so we can use the same syntax as the first example to parse in our dates: # + _cell_guid="f955aa17-ede7-4457-a913-ba1c44f8846d" _uuid="a471aae50241b245caa0c60fbb19821372682b76" # create a new column, date_parsed, with the parsed dates landslides['date_parsed'] = pd.to_datetime(landslides['date'], format = "%m/%d/%y") # + [markdown] _cell_guid="09c1c55c-3883-4f5e-8ea9-e914b09416b6" _uuid="50feaed5f874d8c09f983ad3172febdc54f4f0bb" # Now when I check the first few rows of the new column, I can see that the dtype is `datetime64`. I can also see that my dates have been slightly rearranged so that they fit the default order datetime objects (year-month-day). # + _cell_guid="5a6c6244-b724-4a70-b356-6e3fb1e61270" _uuid="2bff07787e5aa5ad2b6484c5bcee18b5b2f283bc" # print the first few rows landslides['date_parsed'].head() # + [markdown] _cell_guid="7bd8f8b6-8a60-4a12-b94b-4100188845da" _uuid="fc95b22f0f4d7a6bc0cb1a7cc55abfb204cc81f9" # Now that our dates are parsed correctly, we can interact with them in useful ways. # # ___ # * **What if I run into an error with multiple date formats?** While we're specifying the date format here, sometimes you'll run into an error when there are multiple date formats in a single column. If that happens, you have have pandas try to infer what the right date format should be. You can do that like so: # # `landslides['date_parsed'] = pd.to_datetime(landslides['Date'], infer_datetime_format=True)` # # * **Why don't you always use `infer_datetime_format = True?`** There are two big reasons not to always have pandas guess the time format. The first is that pandas won't always been able to figure out the correct date format, especially if someone has gotten creative with data entry. The second is that it's much slower than specifying the exact format of the dates. # ____ # + _cell_guid="beba42ab-fb0e-4285-83cb-984a51bdb8ed" _uuid="c029d8021e0d6cd5de3c9e62014a498c7dd5d582" # Your turn! Create a new column, date_parsed, in the earthquakes # dataset that has correctly parsed dates in it. (Don't forget to # double-check that the dtype is correct!) earthquakes['date_parsed'] = pd.to_datetime(earthquakes['Date'], infer_datetime_format = True) earthquakes['date_parsed'].head # + [markdown] _cell_guid="9f310829-85bd-44c8-b1c5-d582407b5931" _uuid="3d6f5bef5deb1c1d4d83bbcaeb9ba23612978f35" # # Select just the day of the month from our column # ___ # # "Ok, Rachael," you may be saying at this point, "This messing around with data types is fine, I guess, but what's the *point*?" To answer your question, let's try to get information on the day of the month that a landslide occured on from the original "date" column, which has an "object" dtype: # + _cell_guid="ff451a5e-4447-40e2-ad76-367136a1fcff" _uuid="3c3be07dbf7394103a1db120e6ecbdffaf08d37f" # try to get the day of the month from the date column day_of_month_landslides = landslides['date'].dt.day # + [markdown] _cell_guid="c78aada6-c4d9-4464-894e-bdd4fabb4b13" _uuid="5847844cdd3aede3ff62bc5115f1d69c91b4af9d" # We got an error! The important part to look at here is the part at the very end that says `AttributeError: Can only use .dt accessor with datetimelike values`. We're getting this error because the dt.day() function doesn't know how to deal with a column with the dtype "object". Even though our dataframe has dates in it, because they haven't been parsed we can't interact with them in a useful way. # # Luckily, we have a column that we parsed earlier , and that lets us get the day of the month out no problem: # + _cell_guid="27b6422d-3a62-47ca-bb87-6e6292bed7cf" _uuid="e0be15da345949c990b5789e2a94f8f4e09e4cf5" # get the day of the month from the date_parsed column day_of_month_landslides = landslides['date_parsed'].dt.day day_of_month_landslides.head # + _cell_guid="aa3c05ea-f6d1-453f-86dc-c2fd9f8b3fd6" _uuid="ffe9bfc0acef502b995aa61ee1c5d2e4a59a5e4e" # Your turn! get the day of the month from the date_parsed column day_of_month_earthquakes = earthquakes['date_parsed'].dt.day day_of_month_earthquakes.head # + [markdown] _cell_guid="fe33df7d-c85d-4b61-b572-5682e6eea81b" _uuid="a2cec7b480ef13c070d40ca0e0763d2d30a86a9c" # # Plot the day of the month to check the date parsing # ___ # # One of the biggest dangers in parsing dates is mixing up the months and days. The to_datetime() function does have very helpful error messages, but it doesn't hurt to double-check that the days of the month we've extracted make sense. # # To do this, let's plot a histogram of the days of the month. We expect it to have values between 1 and 31 and, since there's no reason to suppose the landslides are more common on some days of the month than others, a relatively even distribution. (With a dip on 31 because not all months have 31 days.) Let's see if that's the case: # + _cell_guid="49feb18f-c077-474e-9353-a24ae850acf6" _uuid="d3d5a143d3d49e10187e420abfe9cfe18c7bac56" # remove na's day_of_month_landslides = day_of_month_landslides.dropna() # plot the day of the month sns.distplot(day_of_month_landslides, kde=False, bins=31) # + [markdown] _cell_guid="c8c706a4-2697-4520-b0dd-86fc6fb80326" _uuid="90016c3e93eb8499d9efe2ece32fb5b70dcbd2ae" # Yep, it looks like we did parse our dates correctly & this graph makes good sense to me. Why don't you take a turn checking the dates you parsed earlier? # + _cell_guid="7b5a7571-2ee3-4aad-94e9-ba47b06e6a29" _uuid="f2b79871c730f32f5ef1889912b7a8623eccf98f" # Your turn! Plot the days of the month from your # earthquake dataset and make sure they make sense. day_of_month_earthquakes.dropna() sns.distplot(day_of_month_earthquakes, kde=False, bins=31) # + [markdown] _cell_guid="b4f37fce-4d08-409e-bbbd-6a26c3bbc6ee" _uuid="52b0af56e3c77db96056e9acd785f8f435f7caf5" # And that's it for today! If you have any questions, be sure to post them in the comments below or [on the forums](https://www.kaggle.com/questions-and-answers). # # Remember that your notebook is private by default, and in order to share it with other people or ask for help with it, you'll need to make it public. First, you'll need to save a version of your notebook that shows your current work by hitting the "Commit & Run" button. (Your work is saved automatically, but versioning your work lets you go back and look at what it was like at the point you saved it. It also lets you share a nice compiled notebook instead of just the raw code.) Then, once your notebook is finished running, you can go to the Settings tab in the panel to the left (you may have to expand it by hitting the [<] button next to the "Commit & Run" button) and setting the "Visibility" dropdown to "Public". # # # More practice! # ___ # # If you're interested in graphing time series, [check out this Learn tutorial](https://www.kaggle.com/residentmario/time-series-plotting-optional). # # You can also look into passing columns that you know have dates in them the `parse_dates` argument in `read_csv`. (The documention [is here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html).) Do note that this method can be very slow, but depending on your needs it may sometimes be handy to use. # # For an extra challenge, you can try try parsing the column `Last Known Eruption` from the `volcanos` dataframe. This column contains a mixture of text ("Unknown") and years both before the common era (BCE, also known as BC) and in the common era (CE, also known as AD). # + _cell_guid="dd64bc7c-c361-44d3-9f02-f7f8a2cb8430" _uuid="0027b29db32dc34294f713c345747a37d89cfd26" volcanos['Last Known Eruption'].sample(5) # + _uuid="48f5179daed5453921d9517bc0178be875638a75" last_known_eruption = volcanos['Last Known Eruption'] print(last_known_eruption.shape) # + _uuid="eec407eaae15cc100db522bc485780023351dcfd" last_known_eruption_dropped = last_known_eruption.replace('Unknown', np.nan).dropna() # + _uuid="457c96c5e32e13d2d1f069ab13064d5c27ac350b" print('Shape {0}'.format(last_known_eruption_dropped.shape)) print('Data {0}'.format(last_known_eruption_dropped.head)) # + _uuid="e00bdd10e43ce0131080d1c50381507d90cd893c" def computeDate(s): #looks for values containing BCE if "BCE" in s: #removes BCE string #defines them as integers return -int(s.strip(' BCE')) if " CE" in s: return int(s.strip(' CE')) # + _uuid="a934d72846fa3ed6a0d2a519c0c5cf7323c23cd5" last_known_eruption_cleanDate = last_known_eruption_dropped.apply(computeDate) #plot the list sns.distplot(last_known_eruption_cleanDate) # + _uuid="c0b157076ff416686b78aa98410cd7c2f42dd9ad"
16,038
/workshops/datashader/bird-download.ipynb
1347482e5cf3681dcc4c5679bf44270dd366c1d4
[ "Apache-2.0" ]
permissive
NLeSC/ldm-sig
https://github.com/NLeSC/ldm-sig
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,463
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # cd C:\Users\gat\Documents\otis\data_gathering # %reset # %time %run -i ./gathering.py gather itude, altitude, device_info_serial, date_time FROM bird_tracking' response = requests.get(url) with open('bird_tracking.csv', 'wb') as f: f.write(response.content) # Open bird-holo.ipynb next to visualize downloaded data
595
/digit recognition.ipynb
39016926972d3006a0a01c37f185226514759f52
[]
no_license
ravi288/Digit-Recognition-
https://github.com/ravi288/Digit-Recognition-
1
0
null
null
null
null
Jupyter Notebook
false
false
.py
20,512
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Digit Recoginition # # + import warnings warnings.filterwarnings("ignore") #Standard scientific python imports import matplotlib.pyplot as plt #Import datasets,classifiers and performance metrics from sklearn import datasets,svm #svm= it divided 1797 images into thier values # - # # Import dataset from sl # + #The digits dataset digits=datasets.load_digits() print("Digits:",digits.keys()) print("Digits.target---:",digits.target) images_and_labels=list(zip(digits.images,digits.target)) print("len(images_and_labels):",len(images_and_labels)) for index,[image,label] in enumerate(images_and_labels[0:5]): print("index:",index,"image:\n",image,"label:",label) plt.subplot(2,5,index+1) #Position numbering starts from 1 plt.axis('on') plt.imshow(image,cmap=plt.cm.gray_r,interpolation='nearest') plt.title("Training:%i"%label) #plt.show() # + #To apply a classifier on this data, we need to flatten the image to turn the data in a (sample,feature)matrix n_samples=len(digits.images) print("n_samples:",n_samples) imagedata=digits.images.reshape((n_samples,-1)) print("After reshaped:len(imagedata[0]:",len(imagedata[0])) # + #Create a classifier:a support vector classifier classifier=svm.SVC(gamma=0.001) #We learn the digits on the first half of the digits classifier.fit(imagedata[:n_samples//2],digits.target[:n_samples//2]) #Now predict the value of the digit on the second half: expectedY=digits.target[n_samples//2:] predictedY=classifier.predict(imagedata[n_samples//2:]) images_and_predictions=list(zip(digits.images[n_samples//2:],predictedY)) for index,[image,prediction] in enumerate(images_and_predictions[:5]): plt.subplot(2,5,index+6) plt.axis("on") plt.imshow(image,cmap=plt.cm.gray_r,interpolation='nearest') plt.title("Prediction:%i"%prediction) print("Original values:",digits.target[n_samples//2:(n_samples//2)+5]) plt.show() # + #Install Pillow Library from scipy.misc import imread,imresize,bytescale img=imread("E:/pycharm projects/PycharmProjects/AIProject/Project/five.jpg") img=imresize(img,(8,8)) classifier=svm.SVC(gamma=0.001) classifier.fit(imagedata[:],digits.target[:]) img=img.astype(digits.images.dtype) img=bytescale(img,high=16.0,low=0) print("img.shape:",img.shape) print("\n",img) x_testdata=[] for c in img: for r in c : x_testdata.append(sum(r)/3.0) print("x_testdata:\n",x_testdata) print("len(x_testdata):",len(x_testdata)) x_testdata=[x_testdata] print("len(x_testdata):",len(x_testdata)) print("Machine Output=",classifier.predict(x_testdata)) plt.show() # -
2,877
/examples/.ipynb_checkpoints/wine-checkpoint.ipynb
6701f8a2f6316b70913560a7827b50b979ffb8ab
[]
no_license
kashevg/coursera
https://github.com/kashevg/coursera
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,136,322
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Оценка качества вин # + from sklearn.cross_validation import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.ensemble import RandomForestRegressor import numpy as np import pandas as pd import statsmodels.stats.api as sm # %pylab inline # - # P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis. **Modeling wine preferences by data mining from physicochemical properties.** *Decision Support Systems*, 47(4):547-553, 2009: имеются оценки качества 6497 португальских вин Vinho Verde, выставленные дегустаторами при слепом тестировании в баллах по шкале от 0 до 10. # # Прочитаем данные: wine = pd.read_csv('wine_data.csv', sep='\t', header=0) wine = wine.sample(frac=1) # Вот так выглядит распределение экспертных оценок вин в выборке: plt.figure(figsize(8,6)) stat = wine.groupby('quality')['quality'].agg(lambda x : float(len(x))/wine.shape[0]) stat.plot(kind='bar', fontsize=14, width=0.9, color="red") plt.xticks(rotation=0) plt.ylabel('Proportion', fontsize=14) plt.xlabel('Quality', fontsize=14) # Давайте научимся оценивать этот признак, чтобы мы могли заранее предположить, какую оценку получит какое-то новое вино, которого в выборке нет. # Чтобы смоделировать такую ситуацию, отделим 25% выборки для контроля качества предсказания: X_train, X_test, y_train, y_test = train_test_split(wine.ix[:, wine.columns != 'quality'], wine['quality'], test_size=0.25, stratify=wine[['Type', 'quality']]) X_train['Type'] = X_train['Type'].apply(lambda x : -1 if x == 'red' else 1) X_test['Type'] = X_test['Type'].apply(lambda x : -1 if x == 'red' else 1) # Если у нас нет больше никакой информации о винах, то наше лучшее предположение об оценке — среднее имеющихся в обучающей выборке: np.mean(y_train) # Если мы будем предсказывать этой величиной оценку всех вин, на обучающей выборке мы получим среднеквадратичную ошибку sqrt(mean_squared_error([np.mean(y_train)]*len(y_train), y_train)) # а на тестовой sqrt(mean_squared_error([np.mean(y_train)]*len(y_test), y_test)) # На тестовой выборке ошибка больше, поскольку среднее мы оценивали по обучающей. Это естественный эффект. # # Тип вина # Какая-то ещё информация у нас есть, например, о типе вина: wine.groupby('Type')['Type'].count() # Распределения оценок по типам довольно сильно отличаются: # + plt.figure(figsize(16,6)) plt.subplot(121) stat_red = wine[wine['Type'] == 'red'].groupby('quality')['quality'].agg(lambda x: float(len(x))/wine[wine['Type'] == 'red'].shape[0]) stat_red.plot(kind='bar', color='r', width=0.9) plt.xticks(rotation=0) plt.ylabel('Proportion', fontsize=14) plt.xlabel('Quality', fontsize=14) plt.subplot(122) stat_white = wine[wine['Type'] == 'white'].groupby('quality')['quality'].agg(lambda x: float(len(x))/wine[wine['Type'] == 'white'].shape[0]) stat_white.plot(color='w', kind='bar', width=0.9) plt.xticks(rotation=0) plt.ylabel('Proportion', fontsize=14) plt.xlabel('Quality', fontsize=14) # - # Различие между средними статистически значимо: # + tmeans = sm.CompareMeans(sm.DescrStatsW(wine[wine['Type'] == 'white']['quality']), sm.DescrStatsW(wine[wine['Type'] == 'red']['quality'])) tmeans.ttest_ind(alternative='two-sided', usevar='pooled', value=0)[1] # - # 95% доверительный интервал для разности средних оценок: tmeans.tconfint_diff(alpha=0.05, alternative='two-sided', usevar='pooled') # Чтобы уточнить наше предсказание, можно оценку каждого вина предсказывать средним по оценкам вин такого же типа в выборке: regressor = LinearRegression() regressor.fit(X_train['Type'].reshape(-1,1), y_train) y_train_predictions = regressor.predict(X_train['Type'].reshape(-1,1)) y_test_predictions = regressor.predict(X_test['Type'].reshape(-1,1)) # Ошибки предсказания немного уменьшились: sqrt(mean_squared_error(y_train_predictions, y_train)) sqrt(mean_squared_error(y_test_predictions, y_test)) # Вот так выглядят истинные оценки вин и их предсказания средними по типам на тестовой выборке: pyplot.figure(figsize(8,8)) pyplot.scatter(y_test, y_test_predictions, color="red", alpha=0.1) pyplot.xlim(2.5,9.5) pyplot.ylim(2.5,9.5) plot(range(11), color='black') grid() plt.xlabel('Quality', fontsize=14) plt.ylabel('Estimated quality', fontsize=14) # # Другие признаки # На самом деле у нас есть ещё 11 признаков, описывающих химический состав вин: wine.head() # + def jitter(arr): return arr + np.random.uniform(low=-0.35, high=0.35, size=len(arr)) pyplot.figure(figsize(16, 36)) for i in range (1, 12): pyplot.subplot(6, 2, i) pyplot.scatter(jitter(wine['quality']), wine.ix[:, i], color=wine["Type"], edgecolors="black") pyplot.xlabel('Quality', fontsize=14) pyplot.ylabel(str(wine.columns[i]), fontsize=14) # - # Попробуем их учесть при построении прогноза оценок. # ## Линейная регрессия # Построим для начала линейную регрессионную модель. lm = LinearRegression() lm.fit(X_train, y_train) # Ошибки предсказания существенно уменьшились: sqrt(mean_squared_error(lm.predict(X_train), y_train)) sqrt(mean_squared_error(lm.predict(X_test), y_test)) # Истинные оценки вин и их предсказания линейной моделью: # + plt.figure(figsize(16,7)) plt.subplot(121) pyplot.scatter(y_train, lm.predict(X_train), color="red", alpha=0.1) pyplot.xlim(2.5,9.5) pyplot.ylim(2.5,9.5) plot(range(11), color='black') grid() pyplot.title('Train set', fontsize=20) pyplot.xlabel('Quality', fontsize=14) pyplot.ylabel('Estimated quality', fontsize=14) plt.subplot(122) pyplot.scatter(y_test, lm.predict(X_test), color="red", alpha=0.1) pyplot.xlim(2.5,9.5) pyplot.ylim(2.5,9.5) plot(range(11), color='black') grid() pyplot.title('Test set', fontsize=20) pyplot.xlabel('Quality', fontsize=14) pyplot.ylabel('Estimated quality', fontsize=14) # - # Посчитаем коэффициент детерминации — долю объяснённой моделью дисперсии отклика: lm.score(X_test, y_test) # ## Случайный лес # Построим на обучающей выборке случайный лес: rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=3) rf.fit(X_train, y_train) # Качество выросло ещё сильнее, хотя модель и переобучилась: sqrt(mean_squared_error(rf.predict(X_train), y_train)) sqrt(mean_squared_error(rf.predict(X_test), y_test)) # Истинные оценки вин и их предсказания случайным лесом: # + plt.figure(figsize(16,7)) plt.subplot(121) pyplot.scatter(y_train, rf.predict(X_train), color="red", alpha=0.1) pyplot.xlim(2.5,9.5) pyplot.ylim(2.5,9.5) plot(range(11), color='black') grid() pyplot.title('Train set', fontsize=20) pyplot.xlabel('Quality', fontsize=14) pyplot.ylabel('Estimated quality', fontsize=14) plt.subplot(122) pyplot.scatter(y_test, rf.predict(X_test), color="red", alpha=0.1) pyplot.xlim(2.5,9.5) pyplot.ylim(2.5,9.5) plot(range(11), color='black') grid() pyplot.title('Test set', fontsize=20) pyplot.xlabel('Quality', fontsize=14) pyplot.ylabel('Estimated quality', fontsize=14) # - # Коэффициент детерминации для случайного леса: rf.score(X_test, y_test) # Сравним ошибки линейной регрессии и случайного леса на тестовой выборке: plt.figure(figsize(8,6)) plt.hist(abs(y_test - lm.predict(X_test)) - abs(y_test - rf.predict(X_test)), bins=15, normed=True) plt.xlabel('Difference of absolute errors') # Различия между средними абсолютными ошибками значимы: # + tmeans = sm.CompareMeans(sm.DescrStatsW(abs(y_test - lm.predict(X_test))), sm.DescrStatsW(abs(y_test - rf.predict(X_test)))) tmeans.ttest_ind(alternative='two-sided', usevar='pooled', value=0)[1] # - # 95% доверительный интервал для средней разности абсолютных ошибок: tmeans.tconfint_diff(alpha=0.05, alternative='two-sided', usevar='pooled') # То есть, используя вместо линейной регрессии наш случайный лес, мы предсказываем экспертную оценку в среднем на 0.26-0.30 баллов точнее. # # Давайте посмотрим, какие признаки обладают наибольшей предсказательной способностью: importances = pd.DataFrame(zip(X_train.columns, rf.feature_importances_)) importances.columns = ['feature name', 'importance'] importances.sort_values(by='importance', ascending=False) # Cильнее всего на экспертную оценку качества вина влияет содержание алкоголя.
8,478
/FinalBosc.ipynb
3b089c0e1e52c90638a5f81c4708562b53f1bffb
[]
no_license
nicecalo/BOSCintermedioPythohNCCL
https://github.com/nicecalo/BOSCintermedioPythohNCCL
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
3,676,695
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="pY8ihUomCW9X" outputId="df6b4b30-7472-4941-a384-6692013b7714" colab={"base_uri": "https://localhost:8080/", "height": 17} import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np # Graficos dinamicos : plotly import plotly.graph_objs as go import plotly.offline as py import plotly plotly.offline.init_notebook_mode() # sklearn : Regresion from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn import datasets from sklearn.naive_bayes import GaussianNB from sklearn.svm import LinearSVC from sklearn.linear_model import LogisticRegression from sklearn.metrics import (brier_score_loss, precision_score, recall_score, f1_score) from sklearn.calibration import CalibratedClassifierCV, calibration_curve from sklearn.model_selection import train_test_split # Datos en la linea del tiempo import datetime import csv # + id="Fed8k2cODeIK" outputId="dbb0c2ba-0378-4990-c3a7-e773d5fd5159" colab={"base_uri": "https://localhost:8080/", "height": 493} # cargar los datos en memoria dataset = pd.read_csv("https://raw.githubusercontent.com/nicecalo/BOSCintermedioPythohNCCL/main/BOSC.csv") # veamos informacion del dataset dataset.info() print("\n\n") dataset.head() # + id="vBuZrDWuEV7S" # agreguemos (Creamos) una nueva columna # Diferencia entre el precio de apertura (Open) y el precio de cierre (Close) dataset["Variation"] = dataset["Close"].sub(dataset.Open) # + id="A6uBJhGaEwBM" outputId="60c9b8bc-9e72-4001-f5e1-0744c73dca8c" colab={"base_uri": "https://localhost:8080/", "height": 204} dataset.head() # + id="KYZeLgQhE4A-" outputId="125c65d8-eaeb-440f-db70-cdf5d2a65ad8" colab={"base_uri": "https://localhost:8080/", "height": 297} dataset.describe() # + id="JUj-3LqpE6HI" outputId="d50368e1-56b6-4066-f70d-1ab8856fcb16" colab={"base_uri": "https://localhost:8080/"} dataset.isnull().sum(axis=0) # + [markdown] id="S1oXLgzcFKFV" # Visualizacion de la data # + id="-xgGhrXoFJU0" outputId="7f13a70a-5d18-4ef6-9938-14cb655442a1" colab={"base_uri": "https://localhost:8080/", "height": 988} plt.rcParams['figure.figsize']=[17.,17.] dataset.drop([],1).hist() plt.show() # + id="1EwgiAL2FfH2" outputId="10070cdd-1cae-472c-faff-2136708be2bf" colab={"base_uri": "https://localhost:8080/", "height": 542} import plotly.io as pio pio.renderers.default = 'colab' x1 = dataset.Date y1 = dataset.Close data = [go.Scatter(x = x1 , y = y1)] layout = go.Layout( xaxis = dict( range = ["01-01-2010", '11-04-2017'], title = 'Año' ), yaxis = dict( range = [min(x1), max(y1)], title = "Preciode Cierre de la accion" ) ) fig = go.Figure(data= data, layout = layout) py.iplot(fig) # + id="Q_bYbsDBFaq1" outputId="15a5d480-82aa-46e7-b24a-31e62c2d6760" colab={"base_uri": "https://localhost:8080/", "height": 357} # veamos como evoluciona la columna Variation a lo largo del tiempo import matplotlib.dates as mdates import datetime as dt x = dataset["Date"] y = dataset["Variation"] plt.figure(figsize= (50,9)) plt.plot_date(x,y, color = 'g', fmt = "g-") plt.title("Diferencia entre Close y Open") plt.show() # + id="0yv1_hcKG9nT" outputId="566ebdbb-9a69-422c-ee2a-f27057efe643" colab={"base_uri": "https://localhost:8080/", "height": 537} plt.figure(figsize=(16,9)) plt.hist(dataset.Variation, bins= 100) plt.show() # + id="5S3vrHadHYUh" # Correlaciones train = dataset # + id="hs3VSg2_HcII" outputId="dd11af44-a0eb-4aa7-d7ed-a6655c2b661c" colab={"base_uri": "https://localhost:8080/", "height": 986} x = train.Open[:150] y = train.Close[:150] plt.scatter(x,y, color = 'b') plt.xlabel("Precio de Apertura") plt.ylabel("Precio de Cierre") plt.axis([min(x) , max(x) , min(y), max(y)]) plt.show() # + id="nnhYKNFjH6j1" outputId="d0c8aa7d-e4e8-41f3-b130-1c30802cff91" colab={"base_uri": "https://localhost:8080/", "height": 986} x = train.High y = train.Close plt.scatter(x,y, color = 'b') plt.xlabel("Precio mas alto") plt.ylabel("Precio de Cierre") plt.axis([min(x) , max(x) , min(y), max(y)]) plt.show() # + id="UqHG2kV4H9ON" outputId="b368b13a-2c06-4bf2-f325-cdbb8032c6b2" colab={"base_uri": "https://localhost:8080/", "height": 986} x = train.Low[:150] y = train.Close[:150] plt.scatter(x,y, color = 'b') plt.xlabel("Precio mas bajo") plt.ylabel("Precio de Cierre") plt.axis([min(x) , max(x) , min(y), max(y)]) plt.show() # + id="MczLdQgLICJ3" outputId="b46a7bfc-0143-45bc-a029-5fae9062ffa0" colab={"base_uri": "https://localhost:8080/", "height": 986} x = train.Volume[:150] # numeros enteros y = train.Close[:150] plt.scatter(x,y, color = 'r') plt.xlabel("Volumen") plt.ylabel("Precio de Cierre") plt.axis([min(x) , max(x) , min(y), max(y)]) plt.show() # + id="dlqcDiYEIVkf" outputId="cb6e6b09-ef86-4340-8f73-f40528eda417" colab={"base_uri": "https://localhost:8080/", "height": 541} # Calculemos la correlacion corr = train[["Open" , "High", "Low", "Close"]].corr() corr type(corr) # visualizacion de la matriz de correlacion # dir(sns) # help(sns.heatmap) # plt.figure(figsize = (16,9)) # sns.heatmap(corr, vmin = -1 , vmax = 1) # plt.show() plt.figure(figsize = (16,9)) v_min = np.min(np.array(corr)) v_max = np.max(np.array(corr)) sns.heatmap(corr, vmin = v_min , vmax = v_max) plt.show() # + [markdown] id="ilaonZs4IcmC" # # # # > Modelo # # # + id="p6wj4FfuIaJH" # deseo "predecir" la variable Close (variable dependiente - target - output : y) # en funcion del resto de variables (Variables independientes - features) # Definimos un dataframe para las variables independientes features = ["Open", 'High', 'Low', "Volume"] train = train[features] # definimos y : variable dependiente y = dataset["Close"] # + id="NYXToQ2zIp__" # partimos nuestros datos en entrenamiento (train) y prueba (test) # help(train_test_split) # repartimos el conjunto de datos en # 20% : para pruebas # 80% : para entrenamiento X_train, X_test, y_train, y_test = train_test_split(train, y ,test_size = 0.2, random_state = 666) # + id="m2Gb6fGaIy4Q" # Instancio la clase LinearRegression lr_model = LinearRegression() lr_model.fit(X_train,y_train) lr_model # type(lr_model) dir(lr_model) # + id="e_fyckGMI-8A" outputId="ef8cafbd-510b-491a-b28c-24cdd0562b73" colab={"base_uri": "https://localhost:8080/"} # veamos los coeficientes del modelo lr_model.coef_ # + [markdown] id="AajVLFDdJUQA" # y_CLOSE =-7.91130056e-01*[Open] +9.06397557e-01 [High] + 8.54827381e-01*[Low] +-2.57377989e-07*[Volumen] + Interceto + ϵ # + id="wdoUqNFvKj3i" outputId="acbf59d1-05a9-407b-ace2-72cd6628f0fd" colab={"base_uri": "https://localhost:8080/"} # Empecemos con las predicciones lr_model.predict(X_test)[:20] # + id="jDEf0zmjRXvM" outputId="67d9acce-bddd-4837-d32f-7b83acffa06d" colab={"base_uri": "https://localhost:8080/"} y_test[:20] # + [markdown] id="CgAnavmDLLU3" # Validemos nuestro modelo # + id="GIxOqvkALJMA" outputId="a2e080be-6aed-41d1-b2e8-29ae8ee441bd" colab={"base_uri": "https://localhost:8080/"} RMSE = mean_squared_error(y_test, lr_model.predict(X_test))**0.5 RMSE
7,359
/examples/parallax-corrected-latlon.ipynb
c289a63549650f3ac3f575815e4a6c6b553e80f0
[ "BSD-3-Clause" ]
permissive
fluxtransport/glmtools
https://github.com/fluxtransport/glmtools
1
1
BSD-3-Clause
2020-05-22T21:02:06
2020-05-11T18:29:41
null
Jupyter Notebook
false
false
.py
594,238
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Motivation: parallax correction of GLM gridded imagery to ground-relative position. # # GLM imagery are provided in fixed grid coordinates. When the fixed grid viewing angle `(x,y)` is interscted with the earth to retrieve a `(lat,lon)` pair, there is usually parallax because the cloud surface from which light. The parallax arises because the cloud is at some (unkonwn) height above the ground. When overlaid to other sateliite imagry, this is no problem, and is even desirable, as the fixed grid viewing angle includes no further assumptions - it is the native coordinate of any geostationary satellite sensor. # # While fixed grid coordinates are exact from the point of view of the satellite, it is uncertain where any observed cloud is located with respect to ground. For many applications (e.g., numerical weather prediction; analysis of lightning alongside radar observations) the ground-relative position of that cloud (in latitude and longitude) is needed. The purpose of this notebook is to show how to convert GLM imagery in fixed grid coordiantes to latitude and longitude, including an assumption of the height of the cloud surface from which light was emited. # # For illustration this notebook uses the same altitude correction assumed in the GLM L2 data, where each flash is reported as a latitude and longitude that attempts to correct for parallax by assuming a height from which light was emitted. This height is defined by a "lightning ellipsoid" that is slightly larger (14 km and 6 km at the equator and poles, respectively) than the elllipsoid that best approximates mean sea level. One could adapt this technique to use any "best" height: simply assume a different ellipsoid shape - for instance, a constant height offset determined based on that day's observations. # # **Our strategy is as follows:** # 1. Get the fixed grid position of each pixel. # 2. Define a new earth ellipsoid whose surface is the lightning ellipsoid. # 3. For each pixel, intersect the fixed grid angle with the lightning ellipsoid. # 4. Find the cartesian position of the intersection point with respect to the center of the earth. This is an absolute 3D position that can be mapped to any other coordinates of interest. # 5. Find the latitude, longitude, and altitude of the intersection point with respect to the MSL ellipsoid. # # Figure 5 of [Bruning et al. (2019, JGR)](https://doi.org/10.1029/2019JD030874) is a helpful reference for visualizing the strategy. import xarray as xr import numpy as np import pyproj as proj4 # print(proj4.pj_ellps['GRS80']) from glmtools.io.lightning_ellipse import lightning_ellipse_rev from lmatools.coordinateSystems import CoordinateSystem from lmatools.grid.fixed import get_GOESR_coordsys # ### Load some data # Use the imagery dataset that's included in the GLM repository: 2 July 2018, 0433-0434 UTC. Also load the GLM L2 LCFA point data files. glm = xr.open_dataset('/Users/ebruning/code/glmtools/glmtools/test/data/conus/2018/Jul/02/OR_GLM-L2-GLMC-M3_G16_s20181830433000_e20181830434000_c20191931535490.nc') nadir = glm.nominal_satellite_subpoint_lon.data print(nadir) # print(glm) l2files = ['/Users/ebruning/code/glmtools/glmtools/test/data/OR_GLM-L2-LCFA_G16_s20181830433000_e20181830433200_c20181830433231.nc', '/Users/ebruning/code/glmtools/glmtools/test/data/OR_GLM-L2-LCFA_G16_s20181830433200_e20181830433400_c20181830433424.nc', '/Users/ebruning/code/glmtools/glmtools/test/data/OR_GLM-L2-LCFA_G16_s20181830433400_e20181830434000_c20181830434029.nc'] l2s = [xr.open_dataset(ds) for ds in l2files] # The operational ellipse used for parllax was revised in October 2018, and had an equatorial height of 16 km above MSL before that time. # # To confirm that we are doing the same thing to the GLM imagery that is done to the L2 data, we will apply that earlir lightning ellipsoid here. If you want to use a different altitude, replace `ltg_ellps_re, ltg_ellps_rp` in the cell below with your own values. # + this_ellps=0 # equatorial and polar radii ltg_ellps_re, ltg_ellps_rp = lightning_ellipse_rev[this_ellps] # - # ### 1. Get the fixed grid position of each pixel. # # Get the fixed coordinates of the GLM imagery, and create a 2D mesh. The `(lat, lon)` center position of each pixel varies nonlinearly with repsect to the fixed grid, so we can't just use a 1D `(lat, lon)` array here. Later, one could interpolate to a regular `(lat, lon)` grid. # + x_1d = glm.x y_1d = glm.y x,y = np.meshgrid(x_1d, y_1d) # Two 2D arrays of fixed grid coordinates z=np.zeros_like(x) # - # #### Test our understanding of how fixed grid relates to the non-lightning ellipsoid. # # These `(lat, lon)` positions include full parallax. # + def finite_max(x): good = np.isfinite(x) return np.max(x[good]) nadir = glm.nominal_satellite_subpoint_lon.data print(nadir) geofixCS, grs80lla = get_GOESR_coordsys(nadir) lon,lat,alt=grs80lla.fromECEF(*geofixCS.toECEF(x,y,z)) lon.shape = x.shape lat.shape = y.shape print(np.nanmin(lon), finite_max(lon), np.nanmin(lat), finite_max(lat)) # Let's see if adding height to the lon lat coords gives different fixed grid x_with_alt, y_with_alt, z_with_alt = geofixCS.fromECEF(*grs80lla.toECEF(lon,lat,alt+10.0e3)) # x and y don't change, but the z coordinate does, which is odd! What does that mean? print(finite_max(np.abs(x_with_alt - x))) print(finite_max(np.abs(y_with_alt - y))) print(finite_max(np.abs(z_with_alt - z))) # Ah, multiplying the z-delta by the satellite's height above earth gives 10 km. print(np.abs(z_with_alt - z).max()*35786023.0) # - # ### 2. Define a new earth ellipsoid whose surface is the lightning ellipsoid. # # Adapt the built-in glmtools functions to include a parallax correction. The cell below is mostly a copy of the functions used in the cell above, but adjusted to allow for the use of a non-standard earth ellipsoid. # + def semiaxes_to_invflattening(semimajor, semiminor): """ Calculate the inverse flattening from the semi-major and semi-minor axes of an ellipse""" rf = semimajor/(semimajor-semiminor) return rf class GeostationaryFixedGridSystemAltEllipse(CoordinateSystem): def __init__(self, subsat_lon=0.0, subsat_lat=0.0, sweep_axis='y', sat_ecef_height=35785831.0, semimajor_axis=None, semiminor_axis=None, datum='WGS84'): """ Satellite height is with respect to an arbitray ellipsoid whose shape is given by semimajor_axis (equatorial) and semiminor_axis(polar) Fixed grid coordinates are in radians. """ rf = semiaxes_to_invflattening(semimajor_axis, semiminor_axis) print("Defining alt ellipse for Geostationary with rf=", rf) self.ECEFxyz = proj4.Proj(proj='geocent', a=semimajor_axis, rf=rf) self.fixedgrid = proj4.Proj(proj='geos', lon_0=subsat_lon, lat_0=subsat_lat, h=sat_ecef_height, x_0=0.0, y_0=0.0, units='m', sweep=sweep_axis, a=semimajor_axis, rf=rf) self.h=sat_ecef_height def toECEF(self, x, y, z): X, Y, Z = x*self.h, y*self.h, z*self.h return proj4.transform(self.fixedgrid, self.ECEFxyz, X, Y, Z) def fromECEF(self, x, y, z): X, Y, Z = proj4.transform(self.ECEFxyz, self.fixedgrid, x, y, z) return X/self.h, Y/self.h, Z/self.h class GeographicSystemAltEllps(CoordinateSystem): """ Coordinate system defined on the surface of the earth using latitude, longitude, and altitude, referenced by default to the WGS84 ellipse. Alternately, specify the ellipse shape using an ellipse known to pyproj, or [NOT IMPLEMENTED] specify r_equator and r_pole directly. """ def __init__(self, ellipse='WGS84', datum='WGS84', r_equator=None, r_pole=None): if (r_equator is not None) | (r_pole is not None): rf = semiaxes_to_invflattening(r_equator, r_pole) print("Defining alt ellipse for Geographic with rf", rf) self.ERSlla = proj4.Proj(proj='latlong', #datum=datum, a=r_equator, rf=rf) self.ERSxyz = proj4.Proj(proj='geocent', #datum=datum, a=r_equator, rf=rf) else: # lat lon alt in some earth reference system self.ERSlla = proj4.Proj(proj='latlong', ellps=ellipse, datum=datum) self.ERSxyz = proj4.Proj(proj='geocent', ellps=ellipse, datum=datum) def toECEF(self, lon, lat, alt): projectedData = np.array(proj4.transform(self.ERSlla, self.ERSxyz, lon, lat, alt )) if len(projectedData.shape) == 1: return projectedData[0], projectedData[1], projectedData[2] else: return projectedData[0,:], projectedData[1,:], projectedData[2,:] def fromECEF(self, x, y, z): projectedData = np.array(proj4.transform(self.ERSxyz, self.ERSlla, x, y, z )) if len(projectedData.shape) == 1: return projectedData[0], projectedData[1], projectedData[2] else: return projectedData[0,:], projectedData[1,:], projectedData[2,:] def get_GOESR_coordsys_alt_ellps(sat_lon_nadir=-75.0): goes_sweep = 'x' # Meteosat is 'y' datum = 'WGS84' sat_ecef_height=35786023.0 geofixcs = GeostationaryFixedGridSystemAltEllipse(subsat_lon=sat_lon_nadir, semimajor_axis=ltg_ellps_re, semiminor_axis=ltg_ellps_rp, datum=datum, sweep_axis=goes_sweep, sat_ecef_height=sat_ecef_height) grs80lla = GeographicSystemAltEllps(r_equator=ltg_ellps_re, r_pole=ltg_ellps_rp, datum='WGS84') return geofixcs, grs80lla # - # As we did for the MSL ellipsoid, check that we can round-trip the coordinates with minimal error using the lightning ellipsoid. # + geofix_ltg, lla_ltg = get_GOESR_coordsys_alt_ellps(nadir) lon_ltg0,lat_ltg0,alt_ltg0=lla_ltg.fromECEF(*geofix_ltg.toECEF(x,y,z)) lon_ltg0.shape = x.shape lat_ltg0.shape = y.shape x_ltg, y_ltg, z_ltg = geofix_ltg.fromECEF(*lla_ltg.toECEF(lon_ltg0,lat_ltg0,alt_ltg0)) # x, y, and z don't change in round trip, which is good. print(finite_max(np.abs(x_ltg - x))) print(finite_max(np.abs(y_ltg - y))) print(finite_max(np.abs(z_ltg - z))) print(np.abs(z_ltg - z).max()*35786023.0) print(finite_max(z_ltg)) # lon, lat, and alt from the two earths should be different for the same fixed grid angle. print('should be nonzero except altitude') print(finite_max(np.abs(lon_ltg0 - lon))) print(finite_max(np.abs(lat_ltg0 - lat))) print(finite_max(np.abs(alt_ltg0 - alt))) # - # ### 3. For each pixel, intersect the fixed grid angle with the lightning ellipsoid. # # Above, we found the latitude and longitude of each pixel on the lightnign ellipsoid. However, what we actually want is the `(lat, lon)` of the postion on the regular earth defined by the ECEF coords, which will have non-zero altitude. # # ### 4. Find the cartesian position of the intersection point with respect to the center of the earth. This is an absolute 3D position that can be mapped to any other coordinates of interest. # # Below `X, Y, Z = geofix_ltg.toECEF(x,y,z)` gives the 3D cartesian step below. The `*` implicitly passes X, Y, Z to `fromECEF`. # # ### 5. Find the latitude, longitude, and altitude of the intersection point with respect to the MSL ellipsoid. # # Note that we use the GRS80 MSL ellipsoid instead of the lightning ellipsoid to find latitude and longitude. # + lon_ltg,lat_ltg,alt_ltg=grs80lla.fromECEF(*geofix_ltg.toECEF(x,y,z)) lon_ltg.shape = x.shape lat_ltg.shape = y.shape # lon, lat, and alt from the two earths should be different for the same fixed grid angle. print('should be nonzero except altitude') print(finite_max(np.abs(lon_ltg - lon))) print(finite_max(np.abs(lat_ltg - lat))) print(finite_max(np.abs(alt_ltg - alt))) # - # Confirm that the projections we've defined are using the ellipsoid we expect. print(geofix_ltg.ECEFxyz) print(geofix_ltg.fixedgrid) print(lla_ltg.ERSxyz) print(lla_ltg.ERSlla) # ### Plot the difference in latitude and longitude between the GRS80 MSL ellipsoid and the lightning ellipsoid. # + # %matplotlib inline # # %matplotlib widget import matplotlib.pyplot as plt fig, axs = plt.subplots(2,2, figsize=(9,5), dpi=144) im = axs[0,0].imshow(lon_ltg - lon, vmin=0, vmax=.2) axs[0,0].set_title('delta lon') plt.colorbar(im, ax=axs[0,0]) im = axs[0,1].imshow(lat_ltg - lat, vmin=-0.1, vmax=0) axs[0,1].set_title('delta lat') plt.colorbar(im, ax=axs[0,1]) im = axs[1,1].imshow(lat_ltg) axs[1,1].set_title('lat') plt.colorbar(im, ax=axs[1,1]) im = axs[1,0].imshow(lon_ltg) axs[1,0].set_title('lon') plt.colorbar(im, ax=axs[1,0]) # - # ### Compare our calculations to the original, lightning-ellipsoid corrected GLM L2 locations # # They should be the same. L2 data are plotted in squares. Define a centers_to_edges function to convert the center position of each pixel to the corner points of each pixel, as is necessary for accurately plotting those data with `pcolormesh`. # def centers_to_edges_2d(x): """ Create a (N+1, M+1) array of edge locations from a (N, M) array of grid center locations. In the interior, the edge positions set to the midpoints of the values in x. For the outermost edges, half the closest dx is assumed to apply. This matters for polar meshes, where one edge of the grid becomes a point at the polar coordinate origin; dx/2 is a half-hearted way of trying to prevent negative ranges. Useful when plotting with pcolor, which requires X, Y of shape (N+1) and grid center values of shape (N). Otherwise, pcolor silently discards the last row and column of grid center values. Parameters ---------- x : array, shape (N,M) Locations of the centers Returns ------- xedge : array, shape (N+1,M+1) """ xedge = np.zeros((x.shape[0]+1,x.shape[1]+1)) # interior is a simple average of four adjacent centers xedge[1:-1,1:-1] = (x[:-1,:-1] + x[:-1,1:] + x[1:,:-1] + x[1:,1:])/4.0 # /\ # /\/\ # / /\ \ # /\/ \/\ # / /\ /\ \ # /\/ \/ \/\ # / /\ /\ /\ \ # /\/ \/ \/ \/\ #4 \/\ /\ /\ /\/ 4 # 3 \ \/ \/ \/ / 3 # \/\ /\ /\/ # 2 \ \/ \/ / 2 # \/\ /\/ # 1 \ \/ / 1 # \/\/ # 0 \/ 0 = center ID of 0th dimension # # calculate the deltas along each edge, excluding corners xedge[1:-1,0] = xedge[1:-1, 1] - (xedge[1:-1, 2] - xedge[1:-1, 1])/2.0 xedge[1:-1,-1]= xedge[1:-1,-2] - (xedge[1:-1,-3] - xedge[1:-1,-2])/2.0 xedge[0,1:-1] = xedge[1,1:-1] - (xedge[2,1:-1] - xedge[1,1:-1])/2.0 xedge[-1,1:-1]= xedge[-2,1:-1] - (xedge[-3,1:-1] - xedge[-2,1:-1])/2.0 # now do the corners xedge[0,0] = xedge[1, 1] - (xedge[2, 2] - xedge[1, 1])/2.0 xedge[0,-1] = xedge[1,-2] - (xedge[2,-3] - xedge[1,-2])/2.0 xedge[-1,0] = xedge[-2,1] - (xedge[-3,2] - xedge[-2,1])/2.0 xedge[-1,-1]= xedge[-2,-2]- (xedge[-3,-3]- xedge[-2,-2])/2.0 return xedge lon_ltg_edge = centers_to_edges_2d(lon_ltg) lat_ltg_edge = centers_to_edges_2d(lat_ltg) # + x_sub = slice(500, None) y_sub = slice(None, None) fig, ax = plt.subplots(1,1,figsize=(9,5), dpi=144) im = ax.pcolormesh(lon_ltg_edge[y_sub, x_sub], lat_ltg_edge[y_sub, x_sub], np.log10(glm.flash_extent_density[y_sub, x_sub]), vmin=0, vmax=1.0) plt.colorbar(im, ax=ax) if True: for l2 in l2s: ax.plot(l2.event_lon, l2.event_lat, linestyle='', marker='s', markersize=3, linewidth=0.5, markeredgecolor='r', markerfacecolor='none') # ax.axis((-120, -65, 10, 55)) ax.axis((-103, -98, 30, 35)) # - # ### Illustrate the effect of no parallax correction # # For this example, it's an offset of about 10-20 km. lon_edge = centers_to_edges_2d(lon) lat_edge = centers_to_edges_2d(lat) # + x_sub = slice(500, None) y_sub = slice(None, None) fig, ax = plt.subplots(1,1,figsize=(9,5), dpi=144) im = ax.pcolormesh(lon_edge[y_sub, x_sub], lat_edge[y_sub, x_sub], np.log10(glm.flash_extent_density[y_sub, x_sub]), vmin=0, vmax=1.0) plt.colorbar(im, ax=ax) if True: for l2 in l2s: ax.plot(l2.event_lon, l2.event_lat, linestyle='', marker='s', markersize=3, linewidth=0.5, markeredgecolor='r', markerfacecolor='none') # ax.axis((-120, -65, 30, 35)) ax.axis((-103, -98, 30, 35)) # - # ### Also do a radar overlay to see if the lightning ellipsoid height does a good job # # Use KLBB NEXRAD L2 data converted to NetCDF format using PyART. klbb = xr.open_dataset('/Users/ebruning/Downloads/KLBB20180702_043208_V06.nc') # + sweep_id=0 this_sweep = slice(klbb.sweep_start_ray_index[sweep_id].data, klbb.sweep_end_ray_index[sweep_id].data) klbb_swp = klbb[{'time':this_sweep, 'sweep':sweep_id}] from lmatools.coordinateSystems import RadarCoordinateSystem rcs = RadarCoordinateSystem(klbb_swp.latitude.data, klbb_swp.longitude.data, klbb_swp.altitude.data) interior_points = slice(1,-1) r_edge = (klbb_swp.range[1:]+klbb_swp.range[:-1])/2.0 # range dim az_edge = (klbb_swp.azimuth[1:]+klbb_swp.azimuth[:-1])/2.0 # time dim dbz = klbb_swp.reflectivity[interior_points, interior_points] r_mesh, az_mesh = np.meshgrid(r_edge, az_edge) el_mesh = np.median(klbb_swp.elevation)*np.ones_like(r_mesh) radar_lon, radar_lat, radar_alt = grs80lla.fromECEF(*rcs.toECEF(r_mesh, az_mesh, el_mesh)) radar_lon.shape=r_mesh.shape radar_lat.shape=r_mesh.shape # + x_sub = slice(500, None) y_sub = slice(None, None) fig, ax = plt.subplots(1,1,figsize=(9,5), dpi=144) im = ax.pcolormesh(lon_ltg_edge[y_sub, x_sub], lat_ltg_edge[y_sub, x_sub], np.log10(glm.flash_extent_density[y_sub, x_sub]), vmin=0, vmax=1.0, alpha=0.5, zorder=10) plt.colorbar(im, ax=ax) radar_im=ax.pcolormesh(radar_lon, radar_lat, dbz, zorder=0, vmin=0, vmax=60, cmap='gist_ncar', alpha=0.5) plt.colorbar(radar_im, ax=ax) if False: for l2 in l2s: ax.plot(l2.event_lon, l2.event_lat, linestyle='', marker='s', markersize=3, linewidth=0.5, markeredgecolor='r', markerfacecolor='none') # ax.axis((-120, -65, 10, 55)) ax.axis((-103, -98, 30, 35)) # - # ### Rewriting to disk # # Add the 2D arrays back to the original dataset and save to disk. This doesn't have the CF standard name metadata or unit information, but that isn't too hard to add if you need it. # + # glm['longitude'] = xr.DataArray(lon_ltg, dims=('y', 'x')) # glm['latitude'] = xr.DataArray(lat_ltg, dims=('y', 'x')) # glm.to_netcdf('glm_aggregate.nc') # - # ## Interpolating to a target grid # # One might wish to interpolat to a regular latitude, longitude grid, which is a pretty standard operation given the `(lat,lon)` coordinate data provided above. # # All the usual caveats (artifacts, aliasing, etc.) with interpolation apply, though as oversampled as the GLM data are, it should be possible to do a pretty good job. (GLM pixels are nominally 10 km over CONUS, but are mapped to a 2 km fixed grid). The result below does indeed look pretty good, visually. dlat, dlon = 0.03, 0.03 reg_lon,reg_lat = np.meshgrid(np.arange(-103, -99, dlon), np.arange(30, 35, dlat)) reg_lat.shape #y, x dimensions # + from scipy.interpolate import griddata interp_loc = np.vstack((reg_lat.flatten(), reg_lon.flatten())).T # GLM variables are filled with nan everywhere there is no lightning, # so set those locations corresponding to valid earth locations to zero. # Also flip the north-south coordinate to match the fact that the GLM data # coordinate begins from the upper left instead of lower right corner. good = np.isfinite(lon_ltg[::-1, :]) data_loc = np.vstack((lat_ltg[::-1, :][good], lon_ltg[::-1, :][good])).T interp_data = glm.flash_extent_density.data[::-1, :][good] print(finite_max(interp_data)) interp_data[~np.isfinite(interp_data)] = 0 print(finite_max(interp_data)) # - interp_field = griddata(data_loc, interp_data, interp_loc, method='linear') interp_field.shape = reg_lon.shape fig,ax = plt.subplots(1,1,figsize=(7,7), dpi=144) plt.imshow(interp_field, extent=(reg_lon.min(), reg_lon.max(), reg_lat.min(), reg_lat.max()), origin='lower') # ax.axis((-700,-300,650,1000)) # ### Interpolating to a target grid - from an NWP model projection # # We can go one further, and interpolate directly from fixed grid to a model target grid. # # **_This section is under development_** and the LCC coordinates derived here don't quite match the model. However, the interpolation looks pretty nice, so it's probably some small bug. # # The coordinate information for the [2.5 km HRRR run by NCEP is](https://thredds.ucar.edu/thredds/catalog/grib/NCEP/HRRR/CONUS_2p5km_ANA/latest.html) is in the Lambert Conformal projection on yet another earth ellipsoid - this time, spherical. # # We can use the siphon module to download a single variable and its coordinate information. Just use xr.open_dataset if you have the NetCDF data locally. # + from siphon.catalog import TDSCatalog from datetime import datetime vtime = datetime.strptime('2020020219','%Y%m%d%H') model_url = "https://thredds.ucar.edu/thredds/catalog/grib/NCEP/HRRR/CONUS_2p5km_ANA/latest.html?dataset=grib/NCEP/HRRR/CONUS_2p5km_ANA/HRRR_CONUS_2p5km_ana_20200202_1900.grib2" model = TDSCatalog(model_url) ds = model.datasets[0] ncss = ds.subset() query = ncss.query() query.accept('netcdf4') query.time(vtime) # Set to the analysis hour only query.add_lonlat() query.variables('Categorical_freezing_rain_surface') # 'x', 'y', 'LambertConformal_Projection', etc. come along for free. data = ncss.get_data(query) # - hrrr = xr.open_dataset(xr.backends.NetCDF4DataStore(data)) print(hrrr.LambertConformal_Projection) print('-----') print(hrrr.x) print('-----') print(hrrr.y) # + hrrrproj={ 'lat_0':hrrr.LambertConformal_Projection.latitude_of_projection_origin, 'lon_0':hrrr.LambertConformal_Projection.longitude_of_central_meridian, 'lat_1':hrrr.LambertConformal_Projection.standard_parallel, 'R':hrrr.LambertConformal_Projection.earth_radius, } from lmatools.coordinateSystems import MapProjection lcc = MapProjection(projection='lcc', ctrLat=hrrrproj['lat_0'],ctrLon=hrrrproj['lon_0'], **hrrrproj) # - lccx,lccy,lccz = lcc.fromECEF(*geofix_ltg.toECEF(x,y,z)) lccx.shape=x.shape lccy.shape=y.shape lccx.shape, glm.flash_extent_density.shape # The result of the above calculation is the exact center location of each image pixel in fixed grid coordiantes. As shown in the plot below, this results in an exact mapping of the GLM CCD pixel shapes to the LCC grid. This can be used to drive an interpoation to the LCC model grid. lccx_edge = centers_to_edges_2d(lccx) lccy_edge = centers_to_edges_2d(lccy) fig,ax = plt.subplots(1,1,figsize=(9,7), dpi=144) ax.pcolormesh(lccx_edge[y_sub,x_sub]/1000.0, lccy_edge[y_sub,x_sub]/1000.0, glm.flash_extent_density[y_sub,x_sub]) ax.axis((-700,-300,650,1000)) # + from scipy.interpolate import griddata hrrrx, hrrry = np.meshgrid(hrrr.x, hrrr.y) interp_loc = np.vstack((hrrrx.flatten(), hrrry.flatten())).T # GLM variables are filled with nan everywhere there is no lightning, # so set those locations corresponding to valid earth locations to zero. # Also flip the north-south coordinate to match the fact that the GLM data # coordinate begins from the upper left instead of lower right corner. good = np.isfinite(lccx[::-1,:]) data_loc = np.vstack((lccx [good]/1e3, lccy[::-1,:][good]/1e3)).T interp_data = glm.flash_extent_density.data[good] print(interp_data.max()) interp_data[~np.isfinite(interp_data)] = 0 interp_field = griddata(data_loc, interp_data, interp_loc, method='linear') # - interp_field.shape=hrrrx.shape # + print(interp_data.max()) print(data_loc[:,0].min(), data_loc[:,0].max(), data_loc[:,1].min(), data_loc[:,1].max()) print(interp_loc[:,0].min(), interp_loc[:,0].max(), interp_loc[:,1].min(), interp_loc[:,1].max()) print(good.sum(), (~good).sum(), np.isfinite(interp_data).sum()) print(hrrr.x.shape) print(hrrrx.shape) print(finite_max(interp_field)) print(interp_loc.shape) # - fig,ax = plt.subplots(1,1,figsize=(9,7), dpi=144) plt.imshow(interp_field, extent=(hrrrx.min(), hrrrx.max(), hrrry.min(), hrrry.max())) # ax.axis((-700,-300,650,1000))
24,759
/63_preprocessing.ipynb
5aef895de0ab750e2aa34c45f7a5f5b3dcfd1337
[]
no_license
bjpcjp/scikit-learn-0.24
https://github.com/bjpcjp/scikit-learn-0.24
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
863,784
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python [conda env:working] * # language: python # name: conda-env-working-py # --- # ### [Preprocessing](https://scikit-learn.org/stable/modules/preprocessing.html) # # ### [Standard Scaling](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler) # # - Many Scikit-learn estimators behave badly if features are normally distributed (Gaussian with zero mean and unit variance) data. # # - We often ignore the shape of the distribution and simply center the data by removing the mean value of each feature, then scale it by dividing non-constant features by their standard deviation. # # - `StandardScaler` is a quick and easy way to standardize an array-like dataset. # from sklearn import preprocessing # import numpy as np # X_train = np.array([[ 1., -1., 2.], # [ 2., 0., 0.], # [ 0., 1., -1.]]) # # scaler = preprocessing.StandardScaler().fit(X_train) # print(scaler,"\n",scaler.mean_,"\n",scaler.scale_) # # X_scaled = scaler.transform(X_train) # print(X_scaled) # - scaled data has zero mean & unit variance. print(X_scaled.mean(axis=0)) print(X_scaled.std(axis=0)) # - This class implements the Transformer API to compute the mean and standard deviation on a training set, then re-apply the same transformation on the testing set. This class is hence suitable for use in the early steps of a Pipeline. # + from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler X, y = make_classification(random_state=42) X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=42) pipe = make_pipeline(StandardScaler(), LogisticRegression()).fit(X_train, y_train) pipe.score(X_test, y_test) # - # ### [Min-Max Scaling](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html#sklearn.preprocessing.MinMaxScaler) and [Max Abs Scaling](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html#sklearn.preprocessing.MaxAbsScaler) # # - Another method is to scale between a min & max value (often 0-1), or so that a max absolute value of each feature is scaled to unit size. # # - This provides robustness to very small standard deviations of features and preserving zero entries in sparse data. # + X_train = np.array([[ 1., -1., 2.], [ 2., 0., 0.], [ 0., 1., -1.]]) min_max_scaler = preprocessing.MinMaxScaler() X_train_minmax = min_max_scaler.fit_transform(X_train) X_train_minmax # - # - The transformer instance can then be applied to new test data unseen during the fit: the same scaling and shifting operations will be applied to be consistent with the transformation performed on the training data. X_test = np.array([[-3., -1., 4.]]) X_test_minmax = min_max_scaler.transform(X_test) X_test_minmax # - Viewing the scaler attributes: print(min_max_scaler.scale_) print(min_max_scaler.min_) # - [MaxAbsScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html#sklearn.preprocessing.MaxAbsScaler) scales such that training data lies within [-1,+1] by dividing through the largest max value in each feature. It is meant for data already centered at zero, or sparse data. # X_train = np.array([[ 1., -1., 2.], # [ 2., 0., 0.], # [ 0., 1., -1.]]) # # max_abs_scaler = preprocessing.MaxAbsScaler() # X_train_maxabs = max_abs_scaler.fit_transform(X_train) # print(X_train_maxabs) # # X_test = np.array([[ -3., -1., 4.]]) # X_test_maxabs = max_abs_scaler.transform(X_test) # print(X_test_maxabs) # # print(max_abs_scaler.scale_) # ### Scaling sparse data # # - Centering sparse data destroys the sparseness - it is rarely is a sensible thing to do. However, it can make help if features are on different scales. # # - `MaxAbsScaler` was designed for scaling sparse data. `StandardScaler` can also accept `scipy.sparse` matrix inputs (as long as `with_mean=False` is used). Otherwise a `ValueError` will be raised - silently centering would break the sparsity and can crash execution by allocating too much memory unintentionally. `RobustScaler` cannot be fitted to sparse inputs, but you can use the `transform` method on sparse inputs. # # - Scalers accept both CSR and CSC format (see `scipy.sparse.csr_matrix` and `scipy.sparse.csc_matrix`). Any other sparse input will be converted to the CSR format. Choose the CSR or CSC representation upstream to avoid unnecessary memory copies. # # - If the centered data is expected to be small, explicitly converting the input to an array using `toarray` of sparse matrices is another option. # ### Scaling with outliers with [Robust Scaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html#sklearn.preprocessing.RobustScaler) # # - If your data contains many outliers, mean/variance scaling probably will not work very well - use `RobustScaler` as a drop-in replacement. It uses more robust estimates for the center and range of your data. from sklearn.preprocessing import RobustScaler X = [[ 1., -2., 2.], [ -2., 1., 3.], [ 4., 1., -2.]] transformer = RobustScaler().fit(X) transformer.transform(X) # ### Scaling kernel matrices with [KernelCenterer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.KernelCenterer.html#sklearn.preprocessing.KernelCenterer) # # - If you have a matrix of a kernel $K$ that computes a dot product in a feature space defined by function $\phi$, `KernelCenterer` can transform the matrix so that it contains inner products in the feature space defined by $\phi$ followed by removal of the mean in that space. # + from sklearn.preprocessing import KernelCenterer from sklearn.metrics.pairwise import pairwise_kernels X = [[ 1., -2., 2.], [ -2., 1., 3.], [ 4., 1., -2.]] K = pairwise_kernels(X, metric='linear'); print(K) transformer = KernelCenterer().fit(K) transformer transformer.transform(K) # - # ### [Quantile Transforms](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html#sklearn.preprocessing.QuantileTransformer) # # - Puts all features into the same distribution based on $G^{-1}(F(X))$ where $F$ is a *cumulative distribution function*, $G$ the desired output distribution, and $G^{-1}$ is the quantile function of the output function. # # - It computes a rank transformation, which smooths out unusual distributions & is more robust to outliers. # # - It does, however, distort correlations & distances within/across features. # # ### Quantile Mapping to a Uniform [0..1] Distribution # + from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split as TTS from sklearn.preprocessing import QuantileTransformer as QT X, y = load_iris(return_X_y=True) X_train, X_test, y_train, y_test = TTS(X, y, random_state=0) qt = QT(random_state=0) X_train_trans = qt.fit_transform(X_train) X_test_trans = qt.transform(X_test) np.percentile(X_train[:, 0], [0, 25, 50, 75, 100]) # - # ### [Power Mapping](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html#sklearn.preprocessing.PowerTransformer) to a Gaussian Distribution # # - Power transforms are a family of parametric, monotonic transformations that map data from any distribution to an approximated Gaussian to stabilize variance and minimize skewness. # # - Two transforms are available: # # - *Yeo-Johnson*: $\begin{split}x_i^{(\lambda)} = # \begin{cases} # [(x_i + 1)^\lambda - 1] / \lambda & \text{if } \lambda \neq 0, x_i \geq 0, \\[8pt] # \ln{(x_i + 1)} & \text{if } \lambda = 0, x_i \geq 0 \\[8pt] # -[(-x_i + 1)^{2 - \lambda} - 1] / (2 - \lambda) & \text{if } \lambda \neq 2, x_i < 0, \\[8pt] # - \ln (- x_i + 1) & \text{if } \lambda = 2, x_i < 0 # \end{cases}\end{split}$ # # - *Box-Cox*: $\begin{split}x_i^{(\lambda)} = # \begin{cases} # \dfrac{x_i^\lambda - 1}{\lambda} & \text{if } \lambda \neq 0, \\[8pt] # \ln{(x_i)} & \text{if } \lambda = 0, # \end{cases}\end{split}$ # # - Box-Cox can only be applied to positive data. # # - Both transforms are controlled via $\lambda$, which is found via *max likelihood estimation*. # ### [Example: Map data to Normal Distributions (Box-Cox, Yeo-Johnson)](https://scikit-learn.org/stable/auto_examples/preprocessing/plot_map_data_to_normal.html) # # - Power transforms are useful when [homoscedasticity](https://en.wikipedia.org/wiki/Homoscedasticity) & normality are needed. # # - Below: Cox-Box & Yeo-Johnson transforms applied to *lognormal*, *chi-squared*, *weibull*, *gaussian*, *uniform* and *bimodal* distributions. # # - Success depends on the dataset. Highlights importance of before/after visualization. # # - Quantile Transformer forces any arbitrary distribution into a Gaussian *if enough samples are available*. It is prone to *overfitting* on small datasets - consider using a power transform instead. # + import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import PowerTransformer as PT from sklearn.preprocessing import QuantileTransformer as QT from sklearn.model_selection import train_test_split as TTS # + # n_quantiles is set to the training set size rather than the default value # to avoid a warning being raised by this example N_SAMPLES = 1000 FONT_SIZE = 9 BINS = 30 rng = np.random.RandomState(304) bc = PT(method='box-cox') yj = PT(method='yeo-johnson') qt = QT(n_quantiles=500, output_distribution='normal', random_state=rng) size = (N_SAMPLES, 1) # distributions X_lognormal = rng.lognormal( size=size) X_chisq = rng.chisquare(df=3, size=size) X_weibull = rng.weibull( a=50, size=size) X_gaussian = rng.normal(loc=100, size=size) X_uniform = rng.uniform(low=0, high=1, size=size) X_a, X_b = rng.normal(loc=100, size=size), rng.normal(loc=105, size=size) X_bimodal = np.concatenate([X_a, X_b], axis=0) # + distributions = [ ('Lognormal', X_lognormal), ('Chi-squared', X_chisq), ('Weibull', X_weibull), ('Gaussian', X_gaussian), ('Uniform', X_uniform), ('Bimodal', X_bimodal) ] colors = ['#D81B60', '#0188FF', '#FFC107', '#B7A2FF', '#000000', '#2EC5AC'] #fig, axes = plt.subplots(nrows=8, ncols=3, figsize=plt.figaspect(2)) fig, axes = plt.subplots(nrows=8, ncols=3, figsize=(10,20)) axes = axes.flatten() axes_idxs = [(0, 3, 6, 9), (1, 4, 7, 10), (2, 5, 8, 11), (12, 15, 18, 21), (13, 16, 19, 22), (14, 17, 20, 23)] axes_list = [(axes[i], axes[j], axes[k], axes[l]) for (i, j, k, l) in axes_idxs] for distribution, color, axes in zip(distributions, colors, axes_list): name, X = distribution X_train, X_test = TTS(X, test_size=.5) # perform power transforms and quantile transform X_trans_bc = bc.fit(X_train).transform(X_test) X_trans_yj = yj.fit(X_train).transform(X_test) X_trans_qt = qt.fit(X_train).transform(X_test) lmbda_bc = round(bc.lambdas_[0], 2) lmbda_yj = round(yj.lambdas_[0], 2) ax_original, ax_bc, ax_yj, ax_qt = axes ax_original.hist(X_train, color=color, bins=BINS) ax_original.set_title(name, fontsize=FONT_SIZE) ax_original.tick_params(axis='both', which='major', labelsize=FONT_SIZE) for ax, X_trans, meth_name, lmbda in zip( (ax_bc, ax_yj, ax_qt), (X_trans_bc, X_trans_yj, X_trans_qt), ('Box-Cox', 'Yeo-Johnson', 'Quantile transform'), (lmbda_bc, lmbda_yj, None)): ax.hist(X_trans, color=color, bins=BINS) title = 'After {}'.format(meth_name) if lmbda is not None: title += r'\n$\lambda$ = {}'.format(lmbda) ax.set_title(title, fontsize=FONT_SIZE) ax.tick_params(axis='both', which='major', labelsize=FONT_SIZE) ax.set_xlim([-3.5, 3.5]) plt.tight_layout() # - # - [Quantile Transformer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html#sklearn.preprocessing.QuantileTransformer) can also map data to a normal distribution with `output_distribution='normal'`. # + qt = QT(output_distribution='normal', random_state=0) X_trans = qt.fit_transform(X) print(qt.quantiles_) # - # ### [Normalization](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Normalizer.html#sklearn.preprocessing.Normalizer) # # - Defined as the process of scaling individual samples to have *unit norm*. This is useful when you use a quadratic method (ex: dot-product) to measure the similarity of any sample pair. # # - `normalize` transforms an array using `l1`, 'l2` or 'max` norms. # # - `Normalizer` does the same using the `Transformer` API - it is therefore useful in pipelines. # # - Both accept dense & sparse matrix inputs. # + from sklearn.preprocessing import normalize as Norm, Normalizer as Normlzr X = [[ 1., -1., 2.], [ 2., 0., 0.], [ 0., 1., -1.]] X_normalized = Norm(X, norm='l2') print(X_normalized,"\n") normalizer = Normlzr().fit(X) # fit does nothing normalizer.transform(X) print(normalizer.transform([[-1.0, 1.0, 0.0]])) # - # ### [Categories to Integers](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html#sklearn.preprocessing.OrdinalEncoder) # # - Use `OrdinalEncoder` to transform category names to 0-*n_categories-1*. # + enc = preprocessing.OrdinalEncoder() X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] enc.fit(X) enc.transform([['female', 'from US', 'uses Safari']]) # - # ### [Categories to one-of-K ("One Hot")](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html#sklearn.preprocessing.OneHotEncoder) # # - use `OneHotEncoder` to transform category names into `n_category` binary features with one equal to 1, the rest equal to 0. # + enc = preprocessing.OneHotEncoder() X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] enc.fit(X) enc.transform([['female', 'from US', 'uses Safari'], ['male', 'from Europe', 'uses Safari']]).toarray() # - # - Feature values are inferred automatically from the dataset. # - They can also be specified using `categories`. enc.categories_ # + genders = ['female', 'male'] locations = ['from Africa', 'from Asia', 'from Europe', 'from US'] browsers = ['uses Chrome', 'uses Firefox', 'uses IE', 'uses Safari'] enc = preprocessing.OneHotEncoder(categories=[genders, locations, browsers]) # Note: missing categorical values for the 2nd and 3rd feature X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] enc.fit(X).transform([['female', 'from Asia', 'uses Chrome']]).toarray() # - # - If the training data has missing category features, it's better to specify `handle_unknown='ignore'` instead of manually setting categories. # # - In this approach, unknown categories will be coded with all zeroes. # + enc = preprocessing.OneHotEncoder(handle_unknown='ignore') X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] enc.fit(X).transform([['female', 'from Asia', 'uses Chrome']]).toarray() # - # - `drop` allows encoding columns into `n_categories-1` columns by specifying a category for each feature to be dropped. # # - This helps avoid input matrix co-linearity. This is useful for example when using *Linear Regression* - co-linearity can cause a covariance matrix to be non-invertible. # + X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] drop_enc = preprocessing.OneHotEncoder(drop='first').fit(X) print(drop_enc.categories_) print(drop_enc.transform(X).toarray()) # - # - Use `drop='if_binary'` if you want to drop columns for features with 2 categories. # + X = [['male', 'US', 'Safari'], ['female', 'Europe', 'Firefox'], ['female', 'Asia', 'Chrome']] drop_enc = preprocessing.OneHotEncoder(drop='if_binary').fit(X) print(drop_enc.categories_) print(drop_enc.transform(X).toarray()) # - # - `OneHotEncoder` supports missing values by considering them as an additional category: # + X = [['male', 'Safari'], ['female', None], [np.nan, 'Firefox']] enc = preprocessing.OneHotEncoder(handle_unknown='error').fit(X) print(enc.categories_) print(enc.transform(X).toarray()) # - # - If a feature contains both np.nan and None, they will treated separately. # + X = [['Safari'], [None], [np.nan], ['Firefox']] enc = preprocessing.OneHotEncoder(handle_unknown='error').fit(X) print(enc.categories_) print(enc.transform(X).toarray()) # - # ### Quantization, aka Binning # # [KBinsDiscretizer](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.KBinsDiscretizer.html#sklearn.preprocessing.KBinsDiscretizer) partitions features into $k$ bins. # # - The output is, by default, one-hot encoded into a sparse matrix. This is controlled with `encode`. # # - The bin edges are computed during `fit` and define the intervals (along with the number of bins.) # + X = np.array([[ -3., 5., 15 ], [ 0., 6., 14 ], [ 6., 3., 11 ]]) est = preprocessing.KBinsDiscretizer(n_bins=[3, 2, 2], encode='ordinal').fit(X) est.transform(X) # - # - Discretization is similar to constructing histograms for continuous data. # - *histograms focus on counting features* in particular bins. # - *discretization assigns feature values to these bins*. # # # - `KBinsDiscretizer` uses `strategy` to select a binning strategy. # # - ‘uniform’: uses constant-width bins. # - ‘quantile’: uses the quantiles values to have equally populated bins. # - ‘kmeans’: bins based on independent k-means clustering for each feature. # # # - You can specify custom bins with a callable (in this case, `pandas.cut`) to `FunctionTransformer`. # + import pandas as pd import numpy as np bins = [0, 1, 13, 20, 60, np.inf] labels = ['infant', 'kid', 'teen', 'adult', 'senior citizen'] transformer = preprocessing.FunctionTransformer( pd.cut, kw_args={'bins': bins, 'labels': labels, 'retbins': False}) X = np.array([0.2, 2, 15, 25, 97]) transformer.fit_transform(X) # - # ### [Example: Binning Continuous Features with KBinsDiscretizer](https://scikit-learn.org/stable/auto_examples/preprocessing/plot_discretization.html#sphx-glr-auto-examples-preprocessing-plot-discretization-py) # # - Compare predictions of linear regression (LR) and decision tree (DT), with and without discretization of real-valued features. # # - LRs are easy to build & interpret, but can only model linear relationships. DTs can build a more complex models. # # - Binning is one way to make LRs more powerful on continuous data. If the bins are not reasonably wide, there is an increased risk of overfitting - so the discretizer should be tuned under with validation. # # - After binning, LR & DT make exactly the same prediction. As features are constant within each bin, any model must predict the same value for all points within a bin. # # - After binning, LR become much more flexible; DT gets much less flexible. Binning features usually have no benefit for DTs - these models can learn to split up the data anywhere. # + import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.preprocessing import KBinsDiscretizer from sklearn.tree import DecisionTreeRegressor # + rnd = np.random.RandomState(42) X = rnd.uniform(-3, 3, size=100) y = np.sin(X) + rnd.normal(size=len(X)) / 3 X = X.reshape(-1, 1) enc = KBinsDiscretizer(n_bins=10, encode='onehot') X_binned = enc.fit_transform(X) # + # predict with original dataset line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1) reg = LinearRegression().fit(X, y) fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True, figsize=(10, 4)) ax1.plot(line, reg.predict(line), linewidth=2, color='green', label="linear regression") reg = DecisionTreeRegressor(min_samples_split=3, random_state=0).fit(X, y) ax1.plot(line, reg.predict(line), linewidth=2, color='red', label="decision tree") ax1.plot(X[:, 0], y, 'o', c='k') ax1.legend(loc="best") ax1.set_ylabel("Regression output") ax1.set_xlabel("Input feature") ax1.set_title("Result before discretization") # predict with transformed dataset line_binned = enc.transform(line) reg = LinearRegression().fit(X_binned, y) ax2.plot(line, reg.predict(line_binned), linewidth=2, color='green', linestyle='-', label='linear regression') reg = DecisionTreeRegressor(min_samples_split=3, random_state=0).fit(X_binned, y) ax2.plot(line, reg.predict(line_binned), linewidth=2, color='red', linestyle=':', label='decision tree') ax2.plot(X[:, 0], y, 'o', c='k') ax2.vlines(enc.bin_edges_[0], *plt.gca().get_ylim(), linewidth=1, alpha=.2) ax2.legend(loc="best") ax2.set_xlabel("Input feature") ax2.set_title("Result after discretization") plt.tight_layout() # - # ### [Example: Feature discretization](https://scikit-learn.org/stable/auto_examples/preprocessing/plot_discretization_classification.html#sphx-glr-auto-examples-preprocessing-plot-discretization-classification-py) # # - Feature discretization decomposes each feature into a set of equally distributed (width) bins. The values are one-hot encoded and given to a linear classifier. The preprocessing enables modeling non-linear behavior even though the classifier is linear. # # - The first two rows represent linearly non-separable datasets (moons and concentric circles); the third is approximately linearly separable. # # - Feature discretization increases the linear classifier performance on the non-separable datasets, but decreases performance on the third. Two non-linear classifiers are also shown for comparison. # # - This is not a great example - the intuition conveyed does not carry over to real datasets. # # - High-D data can more easily be separated linearly. # - Feature discretization and one-hot encoding increases the number of features, which easily lead to overfitting when the number of samples is small. # # - Plots: training points = solid colors; testing points = semi-transparent. The lower right shows the classification accuracy on the test set. import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_moons, make_circles, make_classification from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline from sklearn.preprocessing import KBinsDiscretizer from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import GradientBoostingClassifier from sklearn.utils._testing import ignore_warnings from sklearn.exceptions import ConvergenceWarning # + h = .02 # step size in the mesh def get_name(estimator): name = estimator.__class__.__name__ if name == 'Pipeline': name = [get_name(est[1]) for est in estimator.steps] name = ' + '.join(name) return name classifiers = [ (LogisticRegression(random_state=0), { 'C': np.logspace(-2, 7, 10) }), (LinearSVC(random_state=0), { 'C': np.logspace(-2, 7, 10) }), (make_pipeline( KBinsDiscretizer(encode='onehot'), LogisticRegression(random_state=0)), { 'kbinsdiscretizer__n_bins': np.arange(2, 10), 'logisticregression__C': np.logspace(-2, 7, 10), }), (make_pipeline( KBinsDiscretizer(encode='onehot'), LinearSVC(random_state=0)), { 'kbinsdiscretizer__n_bins': np.arange(2, 10), 'linearsvc__C': np.logspace(-2, 7, 10), }), (GradientBoostingClassifier(n_estimators=50, random_state=0), { 'learning_rate': np.logspace(-4, 0, 10) }), (SVC(random_state=0), { 'C': np.logspace(-2, 7, 10) }), ] names = [get_name(e) for e, g in classifiers] n_samples = 100 datasets = [ make_moons(n_samples=n_samples, noise=0.2, random_state=0), make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1), make_classification(n_samples=n_samples, n_features=2, n_redundant=0, n_informative=2, random_state=2, n_clusters_per_class=1) ] # + fig, axes = plt.subplots(nrows=len(datasets), ncols=len(classifiers) + 1, figsize=(21, 9)) cm = plt.cm.PiYG cm_bright = ListedColormap(['#b30065', '#178000']) # iterate over datasets for ds_cnt, (X, y) in enumerate(datasets): print('\ndataset %d\n---------' % ds_cnt) # preprocess dataset, split into training and test part X = StandardScaler().fit_transform(X) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=.5, random_state=42) # create the grid for background colors x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid( np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # plot the dataset first ax = axes[ds_cnt, 0] if ds_cnt == 0: ax.set_title("Input data") # plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors='k') # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors='k') ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) # iterate over classifiers for est_idx, (name, (estimator, param_grid)) in \ enumerate(zip(names, classifiers)): ax = axes[ds_cnt, est_idx + 1] clf = GridSearchCV(estimator=estimator, param_grid=param_grid) with ignore_warnings(category=ConvergenceWarning): clf.fit(X_train, y_train) score = clf.score(X_test, y_test) print('%s: %.2f' % (name, score)) # plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]*[y_min, y_max]. if hasattr(clf, "decision_function"): Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) else: Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] # put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) # plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors='k') # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, edgecolors='k', alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) if ds_cnt == 0: ax.set_title(name.replace(' + ', '\n')) ax.text(0.95, 0.06, ('%.2f' % score).lstrip('0'), size=15, bbox=dict(boxstyle='round', alpha=0.8, facecolor='white'), transform=ax.transAxes, horizontalalignment='right') plt.tight_layout() # Add suptitles above the figure plt.subplots_adjust(top=0.90) suptitles = [ 'Linear classifiers', 'Feature discretization and linear classifiers', 'Non-linear classifiers', ] for i, suptitle in zip([1, 3, 5], suptitles): ax = axes[0, i] ax.text(1.05, 1.25, suptitle, transform=ax.transAxes, horizontalalignment='center', size='x-large') # - # ### [KBinsDiscretizer strategy comparisons](https://scikit-learn.org/stable/auto_examples/preprocessing/plot_discretization_strategies.html#sphx-glr-auto-examples-preprocessing-plot-discretization-strategies-py) # # - 'uniform' vs 'quantile' vs 'kmeans' # - The plots show regions where the encoding is constant. # + import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import KBinsDiscretizer from sklearn.datasets import make_blobs # + strategies = ['uniform', 'quantile', 'kmeans'] n_samples = 200 centers_0 = np.array([[0, 0], [0, 5], [2, 4], [8, 8]]) centers_1 = np.array([[0, 0], [3, 1]]) # construct the datasets random_state = 42 X_list = [ np.random.RandomState(random_state).uniform(-3, 3, size=(n_samples, 2)), make_blobs(n_samples=[n_samples // 10, n_samples * 4 // 10, n_samples // 10, n_samples * 4 // 10], cluster_std=0.5, centers=centers_0, random_state=random_state)[0], make_blobs(n_samples=[n_samples // 5, n_samples * 4 // 5], cluster_std=0.5, centers=centers_1, random_state=random_state)[0], ] # + figure = plt.figure(figsize=(14, 9)) i = 1 for ds_cnt, X in enumerate(X_list): ax = plt.subplot(len(X_list), len(strategies) + 1, i) ax.scatter(X[:, 0], X[:, 1], edgecolors='k') if ds_cnt == 0: ax.set_title("Input data", size=14) xx, yy = np.meshgrid( np.linspace(X[:, 0].min(), X[:, 0].max(), 300), np.linspace(X[:, 1].min(), X[:, 1].max(), 300)) grid = np.c_[xx.ravel(), yy.ravel()] ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # transform the dataset with KBinsDiscretizer for strategy in strategies: enc = KBinsDiscretizer(n_bins=4, encode='ordinal', strategy=strategy) enc.fit(X) grid_encoded = enc.transform(grid) ax = plt.subplot(len(X_list), len(strategies) + 1, i) # horizontal stripes horizontal = grid_encoded[:, 0].reshape(xx.shape) ax.contourf(xx, yy, horizontal, alpha=.5) # vertical stripes vertical = grid_encoded[:, 1].reshape(xx.shape) ax.contourf(xx, yy, vertical, alpha=.5) ax.scatter(X[:, 0], X[:, 1], edgecolors='k') ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) if ds_cnt == 0: ax.set_title("strategy='%s'" % (strategy, ), size=14) i += 1 plt.tight_layout() # - # ### [Feature Binarization]() # # - Feature binarization *thresholds numerical features to get boolean values*. This is useful for downstream probabilistic estimators if input data can be described with a *multi-variate Bernoulli distribution*. For instance, this is the case for the BernoulliRBM. # # - Text processing tasks use binary feature values (probably to simplify the probabilistic reasoning) even if normalized counts (a.k.a. term frequencies) or TF-IDF valued features often perform slightly better in practice. # # - `Binarizer` is meant to be used in the early stages of a Pipeline. The fit method does nothing - each sample is treated independently of others. # + X = [[ 1., -1., 2.], [ 2., 0., 0.], [ 0., 1., -1.]] binarizer = preprocessing.Binarizer().fit(X) # fit does nothing print(binarizer) print(binarizer.transform(X)) # binarizer with adjusted threshold binarizer = preprocessing.Binarizer(threshold=1.1) print(binarizer.transform(X)) # - # ### [Generating polynomial features](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html#sklearn.preprocessing.PolynomialFeatures) # # - Example 1: transform X from $(X_1,X_2)$ to $(1,X_1,X_2,X^2_1,X_1X_2,X^2_2)$ # - Example 2: (only feature interaction terms are required, via `interaction_only=True`): X features transformed from $(X_1, X_2, X_3)$ to $(1, X_1, X_2, X_3, X_1X_2, X_1X_3, X_2X_3, X_1X_2X_3)$. # # - Polynomial features are used implicitly in [kernel methods](https://en.wikipedia.org/wiki/Kernel_method) when using polynomial [kernel functions](https://scikit-learn.org/stable/modules/svm.html#svm-kernels). # + import numpy as np from sklearn.preprocessing import PolynomialFeatures X = np.arange(6).reshape(3, 2); print(X,"\n") poly = PolynomialFeatures(2).fit_transform(X) print(poly) print() # in some cases, only feature interaction terms are needed. X = np.arange(9).reshape(3, 3); print(X,"\n") poly = PolynomialFeatures(degree=3, interaction_only=True).fit_transform(X) print(poly) # - # ### [Custom Transformers](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.FunctionTransformer.html#sklearn.preprocessing.FunctionTransformer) # # - Use `FunctionTransformer` to convert an existing Python function into a transformer. # # - ensure `func` & `inverse_func` are indeed inverses by setting `check_inverse=True` and calling `fit` before `transform`. A warning is raised, and can be turned into an error with `filterwarnings`. # + import warnings import numpy as np from sklearn.preprocessing import FunctionTransformer as FT warnings.filterwarnings("error", message=".*check_inverse*.", category=UserWarning, append=False) transformer = FT(np.log1p, validate=True) X = np.array([[0, 1], [2, 3]]) transformer.transform(X)
33,915
/Classification/week1/scikit-learn-implementation.ipynb
25c3c72f5170b20819622682274598109fd619f1
[]
no_license
YikSanChan/Coursera-UW-Machine-Learning
https://github.com/YikSanChan/Coursera-UW-Machine-Learning
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
27,232
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd import numpy as np import math import string import json # ## Get Data products = pd.read_csv('amazon_baby.csv') products.head() # ## Get Sentiment from Rating products = products.loc[products['rating'] != 3] products['sentiment'] = products['rating'].apply(lambda x: +1 if x > 3 else -1) products[:5] # ## Remove punctuation # # Use methods in `string` library. # # - **string.translate(s, table[, deletechars])** # # Delete all characters from s that are in deletechars (if present), and then translate the characters using table, which must be a 256-character string giving the translation for each character value, indexed by its ordinal. If table is None, then only the character deletion step is performed. # # # - **string.punctuation** # # String of ASCII characters which are considered punctuation characters in the C locale. # Number of null reviews: pd.isnull(products['review']).sum() products['review'] = products['review'].fillna('') # After FILLNA: pd.isnull(products['review']).sum() # + def remove_punctuation(text): return text.translate(None, string.punctuation) products['review_clean'] = products['review'].apply(remove_punctuation) products[['review', 'review_clean']].ix[[26, 45]] # - # ## Get Splited Data # # Split data (randomly) into training and test sets. As I don't want to reload other files, I try to use graphlab temporarily to help me split the data, and then go back to pandas Dataframe. Unluckily, I find that SFrame cannot be transformed easily back into Dataframe (of course, I can first store the splited SFrame as csv then read it using pandas...), so I use the list of indices for the training and test sets. # + js_train_indices = open('module-2-assignment-train-idx.json').read() train_indices = json.loads(js_train_indices) js_test_indices = open('module-2-assignment-test-idx.json').read() test_indices = json.loads(js_test_indices) train_data = products.iloc[train_indices] test_data = products.iloc[test_indices] # - # Actually, we can see that there are some rows even without a name... But it doesn't affect what we want to do. Just keep it in mind. What we must make sure is that there is no `NaN` in `review_clean` column. pd.isnull(products.name).sum() pd.isnull(products.review_clean).sum() # ## Build the word count vector for each review # + from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(token_pattern=r'\b\w+\b') # Use this token pattern to keep single-letter words # First, learn vocabulary from the training data and assign columns to words # Then convert the training data into a sparse matrix train_matrix = vectorizer.fit_transform(train_data['review_clean']) # Second, convert the test data into a sparse matrix, using the same word-column mapping test_matrix = vectorizer.transform(test_data['review_clean']) # - print train_matrix[:10] #省略0的表达 # ## Train a sentiment classifier with logistic regression from sklearn import linear_model sentiment_model = linear_model.LogisticRegression().fit(train_matrix, train_data['sentiment']) sentiment_model (sentiment_model.coef_ >= 0).sum() # ## Making predictions with logistic regression sample_test_data = test_data[10:13] sample_test_data.iloc[0]['review_clean'] sample_test_matrix = vectorizer.transform(sample_test_data['review_clean']) sample_scores = sentiment_model.decision_function(sample_test_matrix) sample_scores # ## Prediciting Sentiment sample_labels = [1 if score > 0 else - 1 for score in sample_scores] sample_labels # ## Probability Predictions sample_probabilities = 1 / (1 + np.exp(-sample_scores)) sample_probabilities # ## Find the most positive (and negative) review test_scores = sentiment_model.decision_function(test_matrix) test_scores test_probabilities = 1 / (1 + np.exp(-test_scores)) test_probabilities most_positive_indices = test_probabilities.argsort()[-20:] most_positive_data = test_data.iloc[most_positive_indices] most_positive_data['review'] most_negative_indices = test_probabilities.argsort()[:20] most_negative_data = test_data.iloc[most_negative_indices] most_negative_data['review'] # ## Compute accuracy of the classifier test_predictions = sentiment_model.predict(test_matrix) test_predictions match = test_predictions == test_data['sentiment'] sum(match) accuracy = sum(match) / float(len(test_data)) accuracy # ## Learn another classifier with fewer words significant_words = ['love', 'great', 'easy', 'old', 'little', 'perfect', 'loves', 'well', 'able', 'car', 'broke', 'less', 'even', 'waste', 'disappointed', 'work', 'product', 'money', 'would', 'return'] # + # Former implementation # vectorizer = CountVectorizer(token_pattern=r'\b\w+\b') # train_matrix = vectorizer.fit_transform(train_data['review_clean']) # test_matrix = vectorizer.transform(test_data['review_clean']) vectorizer_word_subset = CountVectorizer(vocabulary=significant_words) # limit to 20 words train_matrix_word_subset = vectorizer_word_subset.fit_transform(train_data['review_clean']) test_matrix_word_subset = vectorizer_word_subset.transform(test_data['review_clean']) # - simple_model = linear_model.LogisticRegression().fit(train_matrix_word_subset, train_data['sentiment']) (simple_model.coef_ >= 0).sum() f_Y:y_train}) print((epoch,res)) res_ypred = y_pred.eval(feed_dict={tf_X:X_test,tf_Y:y_test}).flatten() # 只能预测一批样本,不能预测一个样本 print(res_ypred) from sklearn.metrics import accuracy_score print(accuracy_score(y_test, res_ypred.reshape(-1,1))) # - y_test res_ypred.reshape(-1,1).shape
5,878
/RecordSearch/A9106-summary.ipynb
bd13ac41ca9fdddeb07bee1b531f7e53a117a55a
[]
no_license
GLAM-Workbench/ozglam-workbench-naa-asio
https://github.com/GLAM-Workbench/ozglam-workbench-naa-asio
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
10,170,995
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- series = 'A9106' import os import pandas as pd import series_details import plotly.offline as py py.init_notebook_mode() df = pd.read_csv(os.path.join('data', '{}.csv'.format(series.replace('/', '-'))), parse_dates=['start_date', 'end_date']) series_details.display_summary(series, df) # ## Content preview # + # Change the number_of_rows value to see more number_of_rows = 5 # Display dataframe df[:number_of_rows].style.set_properties(['title'], **{'text-align': 'left'}).set_table_styles([dict(selector="th", props=[("text-align", "center")]), dict(selector='.row_heading, .blank', props=[('display', 'none')])]) # - # ## Plot content dates fig = series_details.plot_dates(df) py.iplot(fig, filename='series-dates-bar') # ## View word frequencies # Combine all of the file titles into a single string title_text = a = df['title'].str.lower().str.cat(sep=' ') series_details.display_word_counts(title_text)
1,212
/울산/ulsan_전처리.ipynb
64efe7035a9fe59248d39feaa3396dfa61cebaf4
[]
no_license
jongheemoon/solar-energy-prediction
https://github.com/jongheemoon/solar-energy-prediction
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
131,539
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="7IrUxkygZwPM" executionInfo={"status": "ok", "timestamp": 1619136497890, "user_tz": -540, "elapsed": 20600, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="c7785b80-b1ab-406b-a2d0-0242f661d04c" from google.colab import drive drive.mount('/content/drive') # + id="sA6omG2ua-Og" executionInfo={"status": "ok", "timestamp": 1619136500421, "user_tz": -540, "elapsed": 615, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} import pandas as pd import numpy as np import matplotlib.pyplot as plt # + id="pKlYer3ea-dY" executionInfo={"status": "ok", "timestamp": 1619136501826, "user_tz": -540, "elapsed": 768, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} #한글 나오게 import matplotlib from matplotlib import font_manager, rc import platform if platform.system()=="Windows": font_name=font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name() rc('font', family=font_name) matplotlib.rcParams['axes.unicode_minus']=False import warnings warnings.filterwarnings("ignore") # + id="WKYp0OfuE3OO" executionInfo={"status": "ok", "timestamp": 1619136504886, "user_tz": -540, "elapsed": 726, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} # #미세먼지 데이터 전처리 # def mise_make_csv(region): # from datetime import timedelta # months=range(1,35) # dfs=[] # for month in months: # path=f"/content/drive/MyDrive/동서발전 태양광 발전량 예측 AI 경진대회/data/{region}/df{month}.xls" # mise=pd.read_excel(path) # mise['날짜']=mise['날짜'].apply(lambda x: ' '.join([x.split()[0],str(int(x.split()[1])-1).zfill(2)])) # mise['날짜']=pd.to_datetime(mise['날짜']) # mise['날짜']=mise['날짜'].apply(lambda x: x+timedelta(hours=1)) # dfs.append(mise) # mise=pd.concat(dfs, ignore_index=True) # mise.to_csv(f'{region}_mise.csv') # return mise # mise_make_csv('dangjin') # mise_make_csv('ulsan') # + id="9_SeSnSCZ939" executionInfo={"status": "ok", "timestamp": 1619136511090, "user_tz": -540, "elapsed": 6046, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} path='/content/drive/MyDrive/동서발전 태양광 발전량 예측 AI 경진대회/data/' obs=pd.read_csv(path+'ulsan_obs_data.csv') site_info=pd.read_csv(path+'site_info.csv') fcst=pd.read_csv(path+'ulsan_fcst_data.csv') energy=pd.read_csv(path+'energy.csv') solar=pd.read_csv(path+'pohang_ilsa.csv') mise=pd.read_csv(path+'ulsan_mise.csv') rain=pd.read_csv(path+'ulsan_rain.csv') dangjin_mise=pd.read_csv(path+'dangjin_mise.csv') # + [markdown] id="prA3gOp0tRA4" # # 1. 데이터 확인 # + [markdown] id="O7-_TZ4OzyTS" # - obs=울산지역 발전소 인근 기상 관측 자료 # - 1시간 단위 관측 자료 # # - fcst=울산지역 발전소 동네 예보 # - 2시, 5시, 8시, 11시, 14시, 17시, 20시, 23시 -> 8번 예보 # - 다음날 24시까지 예보(최소 46시간 후, 최대 67시간 후) # - energy # - solar # + id="HmDboVWUbY6r" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619080104646, "user_tz": -540, "elapsed": 6646, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="4e346698-83d7-45dd-d2da-53c4be51f678" obs.info() obs.isnull().sum() # + id="ptWknmFmsDfp" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619136538238, "user_tz": -540, "elapsed": 632, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="5178e622-d9ce-443f-e593-a2f90f76a7fc" obs['전운량(10분위)'].value_counts() # + id="w7i6PyLNb69Y" colab={"base_uri": "https://localhost:8080/", "height": 404} executionInfo={"status": "ok", "timestamp": 1619080104651, "user_tz": -540, "elapsed": 6622, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="048b7e9b-61e7-4cc7-9f05-5cfe63e26ad7" fcst # + colab={"base_uri": "https://localhost:8080/"} id="pMDljrtTqXCM" executionInfo={"status": "ok", "timestamp": 1619080104653, "user_tz": -540, "elapsed": 6613, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="86cd82b1-5d07-4fbf-f0b6-927ecca3d536" fcst.info() fcst.isnull().sum() # + colab={"base_uri": "https://localhost:8080/", "height": 166} id="ClB0FqeSa-W1" executionInfo={"status": "ok", "timestamp": 1619080104654, "user_tz": -540, "elapsed": 6601, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="34a9fd61-8483-4396-fbb5-a0483c00ecf2" site_info # + id="JsmNczY-sLKu" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619080104656, "user_tz": -540, "elapsed": 6592, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="c1861c64-83ef-4ae2-9b01-2bbcfc87107a" energy.info() # + id="kFIlXLcExZjO" # + id="0RzdmhwBsjOI" #energy.index=energy['time'] # + id="xwrvOm9LNn9q" #energy=energy[['dangjin_floating','dangjin_warehouse','dangjin','ulsan']] # + id="3WqRIZHkxtzk" #energy['2018-3-1':'2018-3-10'].plot(figsize=(30,5)) # + id="pLCPdnrzMB5T" #energy['2018-3-1'].plot(figsize=(30,5)) # + id="5GvoTwZjMCAq" colab={"base_uri": "https://localhost:8080/", "height": 196} executionInfo={"status": "ok", "timestamp": 1619080105125, "user_tz": -540, "elapsed": 7018, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="84ad55af-cab6-4643-ab6b-a9668e5a5ef4" mise.head() # + id="vWlPvfPiV9io" # + colab={"base_uri": "https://localhost:8080/"} id="OZ48ABTIXgUB" executionInfo={"status": "ok", "timestamp": 1619080105127, "user_tz": -540, "elapsed": 7008, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="122b3d7e-4e70-4c72-d7f0-a6dbb7f3e261" mise.info() # + colab={"base_uri": "https://localhost:8080/", "height": 673} id="-2JYEBy3VQJ-" executionInfo={"status": "ok", "timestamp": 1619080106627, "user_tz": -540, "elapsed": 8494, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="d2714473-3cf0-4fbf-d9f0-281ddde5d71c" import missingno as msno msno.matrix(mise,figsize=(5,10)) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="OpPRbwfGXsO2" executionInfo={"status": "ok", "timestamp": 1619080106628, "user_tz": -540, "elapsed": 8486, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="c1477902-4604-4f70-a38b-b4b14464e123" mise.isnull().sum() # + [markdown] id="N3Q2JG9JbxUy" # ### 미세먼지 시각화 # + colab={"base_uri": "https://localhost:8080/", "height": 203} id="uBALQ1Rlb1zT" executionInfo={"status": "error", "timestamp": 1619080107120, "user_tz": -540, "elapsed": 8961, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="36d62be1-5de2-4816-9c1d-c5f533f751db" mise_df=ulsan[['PM10', 'PM2.5']] mise_df=mise_df.reset_index(drop=True) mise_df # + id="GyGjeSusbTpi" plt.figure(figsize=(20,5)) plt.plot(np.arange(500), mise_linear['PM10'][:500],'--') plt.plot(np.arange(500), mise_df['PM10'][:500], '.-') plt.title('interpolated mise') # + id="UvGRoqTS5MK0" plt.figure(figsize=(20,5)) plt.plot(np.arange(500), mise_knn['PM10'][:500],'--') plt.plot(np.arange(500), mise_df['PM10'][:500], '.-') plt.title('knn mise') # + id="bxs9VdENdDUW" #1차보간 mise_linear=mise_df.interpolate().fillna(method='bfill').reset_index(drop=True) mise_linear # + id="9GI4HOvOdDcS" #2차 보간 mise_poly=mise_df.interpolate(method='polynomial', order=2).fillna(method='bfill').reset_index(drop=True) mise_poly # + id="jEqPuq2edDp7" #knn 보간 mise_knn=df[['PM10', 'PM2.5']] mise_knn # + id="yfxNvr21Rwd5" rain.info() # + id="n3MJBHoZpQ7D" #pd.qcut(rain.rain[rain['rain']>0],5) # [(-0.0009999999999799, 0.0254] < (0.0254, 0.608] < (0.608, 5.627] <(5.627, 174.136]] #bins=[-0.001,0.0091,0.209,1.538,8.572,174.136] # + id="wPcYAE5hPfxv" #rain=rain.rename(columns={'날짜':'time','강수량(mm)':'rain'}) #rain=rain[['time','rain']] #rain=rain.fillna(0) #rain['time']=pd.to_datetime(rain['time']) #rain=rain.set_index('time').resample('H').mean().interpolate(method="polynomial", order=2) #labels=[0,1,2,3,4] #rain['rain']=pd.qcut(rain.rain[rain['rain']>0],5, labels=labels) # + id="Sf5PDsQURkox" #solar[:'2018-3-10'].plot(figsize=(25,5)) # + id="Bk6M6-3JxzHC" #답 df = pd.DataFrame() df['ds'] = energy['time'] df['y'] = energy['ulsan'] df.head() # + id="wkvWN-ZBxzSb" # + id="SOleK0v1xzbr" # + [markdown] id="u3Oawq_Ext6B" # # 2. 데이터 전처리 # ## 2. 1. 일기예보 1시간단위로 만들기 # + id="69I62Pfn-z79" # def fcst_hourly(time): # time=int(time) # fcst['Forecast_time'] = pd.to_datetime(fcst['Forecast time']) #예보 시점 datetime으로 바꾸기 # #예보 시점 정의 # fcst_time=fcst[fcst['Forecast_time'].dt.hour==time] # #예보 시점의 다음날 예보 # fcst_time=fcst_time[(fcst_time['forecast']>=24-time)&(fcst_time['forecast']<48-time)] # #Forecast_time에 forecast 더해서 예보 시간 구하기 # def to_date(x): # return pd.DateOffset(hours=x) # fcst_time['Forecast_time']=fcst_time['Forecast_time']+fcst_time['forecast'].map(to_date) # fcst_time=fcst_time[['Forecast_time', 'Temperature', 'Humidity', 'WindSpeed', 'WindDirection', 'Cloud']] # #한시간 간격 데이터프레임 만들어서 합치기 # hourly=pd.DataFrame() # hourly['Forecast_time']=pd.date_range(start='2018-03-02 00:00:00', end='2021-03-01 23:00:00', freq='H') # fcst_time=pd.merge(hourly, fcst_time, how='outer') #default는 inner # return fcst_time # + [markdown] id="f2xn1e2huhCF" # ## 결측치 # + id="gkEWpAEQuZO8" #obs[obs['temp'].isnull()] #21820, 21836, 21837, 21840 #obs[obs['windSpeed'].isnull()] #11242 #obs[obs['humidity'].isnull()] #15395 #obs['cloud'] #채워야 할 결측치 # + id="O6LHJEeObRZ8" #ulsan.loc[11242-2:11242+2] #선형보간, ffill, ... # + id="z2VDTI_44ztW" #결측값 제거 #temp=obs[~obs['temp'].isnull()] #temp=temp[~obs['windSpeed'].isnull()] #temp=temp[~obs['humidity'].isnull()] #temp=temp[~obs['cloud'].isnull()] #temp # + [markdown] id="HhqnIJzkVQD_" # #원핫인코딩딩딩 # Tree 기반 모델에서는 더미화가 오히려 성능에 악영향을 주는경우가 종종 발생한다. 트리모델에서는 범주별로 이진화를 하게 되는데 만약 더미화를 하게되면 범주별로 변수처럼 인식하게 되므로 True, False 단 두가지로 분류되게 된다. 즉, 이 경우 더미화를 하게됨으로써 덜 정교하게 예측하게 된다 # + id="mk8ntRlR00wS" executionInfo={"status": "ok", "timestamp": 1619137535289, "user_tz": -540, "elapsed": 1866, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} #1월 미세먼지, 초미세먼지 전처리 def mise_pm(file_name, pm): path='/content/drive/MyDrive/동서발전 태양광 발전량 예측 AI 경진대회/data/' df=pd.read_csv(path+f'{file_name}', sep='\t', header=None) df_values=df.values #type(df_values) datalist=[] datalist.append(df_values) #리스트로 변경 df_array=np.array(datalist) df2=df_array.reshape(1,744) df3=pd.DataFrame(df2.T) df3['time']=pd.date_range(start='2021-01-01 01:00:00', end='2021-02-01 00:00:00', freq='H') df3=df3.rename(columns={0:f'{pm}'}) return df3 ulsan_mise_pm25=mise_pm('ulsan_pm25.txt', 'PM2.5') ulsan_mise_pm10=mise_pm('ulsan_pm10.txt', 'PM10') dangjin_mise_pm25=mise_pm('dangjin_pm25.txt','PM2.5') dangjin_mise_pm10=mise_pm('dangjin_pm10.txt','PM10') #PM2.5, PM10 합치고 2월1일 0시 삭제, time을 인덱스로 def mise_only(x,y): mise_add=pd.merge(x,y,on='time') mise_add=mise_add.iloc[:-1,:] mise_add=mise_add.set_index(mise_add['time']) return mise_add ulsan_mise_jan=mise_only(ulsan_mise_pm25, ulsan_mise_pm10) dangjin_mise_jan=mise_only(dangjin_mise_pm25, dangjin_mise_pm10) # + id="P2q1RFbFZPB1" executionInfo={"status": "ok", "timestamp": 1619137536626, "user_tz": -540, "elapsed": 1635, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} path='/content/drive/MyDrive/동서발전 태양광 발전량 예측 AI 경진대회/data/' obs=pd.read_csv(path+'ulsan_obs_data.csv') site_info=pd.read_csv(path+'site_info.csv') fcst=pd.read_csv(path+'ulsan_fcst_data.csv') energy=pd.read_csv(path+'energy.csv') solar=pd.read_csv(path+'pohang_ilsa.csv') mise=pd.read_csv(path+'ulsan_mise.csv') rain=pd.read_csv(path+'ulsan_rain.csv') def preprocess(obs,solar,energy, rain, mise): #관측자료 전처리 obs_columns={'지점':'facility_code', '지점명':'facility_name', '일시':'time', '기온(°C)':'temp', '풍속(m/s)':'windSpeed', '풍향(16방위)':'windDirection', '습도(%)':'humidity', '전운량(10분위)':'cloud' } obs.rename(columns=obs_columns, inplace=True) obs['time']=pd.to_datetime(obs['time']) obs=obs.iloc[:,2:] #일사량 전처리 solar.rename(columns={'일시':'time', '일조(hr)':'sun_hr', '일조 QC플래그':'sun_qc', '일사(MJ/m2)':'solar' }, inplace=True) solar=solar[['time','sun_hr','solar']] solar['time']=pd.to_datetime(solar['time']) solar=solar.fillna(0) data=pd.merge(obs, solar, how='inner', on='time') #해당 열들 더미화 다르게 해보기 data['year']=data['time'].dt.year data['month']=data['time'].dt.month data['day']=data['time'].dt.day data['hour']=data['time'].dt.hour data['season']=data['time'].dt.month %12 //3 +1 #겨울:1 봄:2 여름:3 가을:4 #에너지 전처리 #발전량 시간 1시 -> 0시로 바꾸기 def convert_time(x): date, hr = x.split(' ') h,m,s = hr.split(':') h = str(int(h)-1) hr = ':'.join([h,m,s]) return ' '.join([date, hr]) energy['time']=energy['time'].apply(convert_time) energy['time']=pd.to_datetime(energy['time']) ulsan=energy[['ulsan']] ulsan=pd.concat([data,ulsan], axis=1) #기온 4개, 풍속, 풍향, 습도 1개씩 결측값 2차 polynomial 보간 ulsan['temp'].interpolate(method='polynomial', order=2,inplace=True) ulsan['windSpeed'].interpolate(method='polynomial', order=2,inplace=True) ulsan['windDirection'].interpolate(method='polynomial', order=2,inplace=True) ulsan['humidity'].interpolate(method='polynomial', order=2,inplace=True) #강수량 시간 늘리고 보간 후 추가 rain=rain.rename(columns={'날짜':'time','강수량(mm)':'rain'}) rain=rain[['time','rain']] rain=rain.fillna(0) rain['time']=pd.to_datetime(rain['time']) rain=rain.set_index('time').resample('H').mean().interpolate() #labels=[0,1,2,3,4] #rain['rain']=pd.qcut(rain.rain[rain['rain']>0],5, labels=labels) ulsan=pd.merge(ulsan,rain, how='outer', on='time') ulsan['rain']=ulsan['rain'].fillna(0) #미세먼지 mise.rename(columns={'날짜':'time', '아황산가스':'SO2', '일산화탄소':'CO', '오존':'O3', '이산화질소':'NO2'}, inplace=True) mise=mise[['time','SO2','CO','O3','NO2','PM10','PM2.5']] mise['time']=pd.to_datetime(mise['time']) ulsan=pd.merge(ulsan,mise, how='outer', on='time') #1월 미세먼지 합치기 #data는 시계열 형식의 time열을 가지고 있어야함. def add_jan(data,add): data=data.set_index(data['time']) data.fillna(add, inplace=True) #'-' nan으로 바꾸기 data['PM10']=data['PM10'].apply(lambda x: np.nan if x=='-' else x) data['PM2.5']=data['PM2.5'].apply(lambda x: np.nan if x=='-' else x) return data ulsan=add_jan(ulsan,ulsan_mise_jan) #실수형으로 바꾸기 ulsan=ulsan.iloc[:,1:].astype(np.float32, errors = 'raise') return ulsan ulsan=preprocess(obs, solar, energy, rain, mise) # + id="qlQBac3hGLaY" ulsan.isnull().sum() # + id="tDSGEw0a6xGh" ulsan.dtypes # + id="wQ9t38b4JNRp" ulsan.pivot_table(['cloud','solar','temp'],columns='month') # + id="ZFEmzAriYDr0" df[['temp','windSpeed','windDirection','humidity','PM2.5','PM10','sun_hr','solar','ulsan']][:500].plot(figsize=(30,50), subplots=True) plt.title('tendency') # + id="L0cuLiBeNWPp" # + [markdown] id="X0_QickzeK2s" # KNN으로 미세먼지 채우기 # + id="D_JOfjEueK7j" # + id="di-R3WgKywJD" #표준화 정규화 from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(df[['temp','windSpeed','windDirection','humidity','sun_hr','solar','rain', 'SO2','CO','O3','NO2','PM10','PM2.5']]) scaled = scaler.transform(df[['temp','windSpeed','windDirection','humidity','sun_hr','solar','rain', 'SO2','CO','O3','NO2','PM10','PM2.5']]) #결과 ndarray임 scaled_std = pd.DataFrame(scaled, columns=['temp','windSpeed','windDirection','humidity','sun_hr','solar','rain', 'SO2','CO','O3','NO2','PM10','PM2.5']) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(df[['temp','windSpeed','windDirection','humidity','sun_hr','solar','rain', 'SO2','CO','O3','NO2','PM10','PM2.5']]) scaled = scaler.transform(df[['temp','windSpeed','windDirection','humidity','sun_hr','solar','rain', 'SO2','CO','O3','NO2','PM10','PM2.5']]) # 역시나 transform() 결과가 ndarray라 DataFrame으로 바꿔준다. scaled_nrm = pd.DataFrame(scaled, columns=['temp','windSpeed','windDirection','humidity','sun_hr','solar','rain', 'SO2','CO','O3','NO2','PM10','PM2.5']) # + id="C1aTXna44wTN" scaled_std # + id="Cnh4671tr01z" # + id="y0XclpJQeLAa" # pip install impyute # from impyute.imputation.cs import fast_knn # + colab={"base_uri": "https://localhost:8080/", "height": 404} id="9T4eOm105pc7" executionInfo={"status": "ok", "timestamp": 1619080146576, "user_tz": -540, "elapsed": 7582, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="a20b0e3a-371c-4211-b002-63258c486728" from sklearn.impute import KNNImputer imputer = KNNImputer(n_neighbors=15) filled = imputer.fit_transform(ulsan[['temp','windSpeed','windDirection','humidity','sun_hr','solar','rain', 'SO2','CO','O3','NO2','PM10','PM2.5','ulsan']]) df = pd.DataFrame(filled, columns=['temp','windSpeed','windDirection','humidity','sun_hr','solar','rain', 'SO2','CO','O3','NO2','PM10','PM2.5','ulsan']) df # + id="-_bPzdXjWS1B" plt.figure(figsize=(10,5)) plt.plot(ulsan.PM10) plt.plot(df.PM10) # + id="-k4l28PTUrC0" df[['windSpeed']].boxplot() # + id="s0NxtQEACVmY" colab={"base_uri": "https://localhost:8080/", "height": 636} executionInfo={"status": "error", "timestamp": 1619080214536, "user_tz": -540, "elapsed": 1033, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="d561d386-e8b4-41a5-d118-efa514f486bd" k_list = range(1,50) accuracies = [] for k in k_list: imputer = KNNImputer(n_neighbors=k) filled = imputer.fit_transform(df) accuracies.append() plt.plot(k_list, accuracies) plt.xlabel("k") plt.ylabel("Validation Accuracy") plt.title("Classifier Accuracy") plt.show() # + id="0HJx44nOJuMr" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619080150561, "user_tz": -540, "elapsed": 1404, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="3ec84e19-aa43-4d82-8083-c52cbfbe25e1" df2=ulsan['cloud'].reset_index(drop=True) df2 # + id="c6a-3MrC6Bbu" df3=pd.concat([df, df2], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 404} id="ReDDjEjtKVhM" executionInfo={"status": "ok", "timestamp": 1619080154947, "user_tz": -540, "elapsed": 924, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="c20d1ba7-fcf0-45f9-882d-b5d367763564" df4=df3[~df3['cloud'].isnull()] df4 # + id="O08Ywvkz5wUZ" from sklearn.model_selection import train_test_split x = df4[['temp','windSpeed','rain','solar','sun_hr','PM2.5','PM10']] y = df4['cloud'] x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, test_size=0.3, random_state=10) # + colab={"base_uri": "https://localhost:8080/"} id="u4qN2VyFovEm" executionInfo={"status": "ok", "timestamp": 1619080181722, "user_tz": -540, "elapsed": 1145, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="712b16ee-1eaf-4f8b-fc46-830f7cc9d790" from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score classifier = KNeighborsClassifier(n_neighbors = 3) classifier.fit(x_train, y_train) pred=classifier.predict(x_test) accuracy_score(y_test, pred) # + id="vVnjYJw3LopW" executionInfo={"status": "ok", "timestamp": 1619137724835, "user_tz": -540, "elapsed": 1200, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} path='/content/drive/MyDrive/동서발전 태양광 발전량 예측 AI 경진대회/data/' obs=pd.read_csv(path+'ulsan_obs_data.csv') site_info=pd.read_csv(path+'site_info.csv') fcst=pd.read_csv(path+'ulsan_fcst_data.csv') energy=pd.read_csv(path+'energy.csv') solar=pd.read_csv(path+'pohang_ilsa.csv') mise=pd.read_csv(path+'ulsan_mise.csv') rain=pd.read_csv(path+'ulsan_rain.csv') def preprocess(obs,solar,energy, rain, mise): #관측자료 전처리 obs_columns={'지점':'facility_code', '지점명':'facility_name', '일시':'time', '기온(°C)':'temp', '풍속(m/s)':'windSpeed', '풍향(16방위)':'windDirection', '습도(%)':'humidity', '전운량(10분위)':'cloud' } obs.rename(columns=obs_columns, inplace=True) obs['time']=pd.to_datetime(obs['time']) obs=obs.iloc[:,2:] #일사량 전처리 solar.rename(columns={'일시':'time', '일조(hr)':'sun_hr', '일조 QC플래그':'sun_qc', '일사(MJ/m2)':'solar' }, inplace=True) solar=solar[['time','sun_hr','solar']] solar['time']=pd.to_datetime(solar['time']) solar=solar.fillna(0) data=pd.merge(obs, solar, how='inner', on='time') #해당 열들 더미화 다르게 해보기 data['year']=data['time'].dt.year data['month']=data['time'].dt.month data['day']=data['time'].dt.day data['hour']=data['time'].dt.hour data['season']=data['time'].dt.month %12 //3 +1 #겨울:1 봄:2 여름:3 가을:4 #에너지 전처리 #발전량 시간 1시 -> 0시로 바꾸기 def convert_time(x): date, hr = x.split(' ') h,m,s = hr.split(':') h = str(int(h)-1) hr = ':'.join([h,m,s]) return ' '.join([date, hr]) energy['time']=energy['time'].apply(convert_time) energy['time']=pd.to_datetime(energy['time']) energy=energy[['ulsan']] ulsan=pd.concat([data,energy], axis=1) #기온 4개, 풍속, 풍향, 습도 1개씩 결측값 2차 polynomial 보간 ulsan['temp'].interpolate(method='polynomial', order=2,inplace=True) ulsan['windSpeed'].interpolate(method='polynomial', order=2,inplace=True) ulsan['windDirection'].interpolate(method='polynomial', order=2,inplace=True) ulsan['humidity'].interpolate(method='polynomial', order=2,inplace=True) #강수량 시간 늘리고 보간 후 추가 rain=rain.rename(columns={'날짜':'time','강수량(mm)':'rain'}) rain=rain[['time','rain']] rain=rain.fillna(0) rain['time']=pd.to_datetime(rain['time']) rain=rain.set_index('time').resample('H').mean().interpolate() #labels=[0,1,2,3,4] #rain['rain']=pd.qcut(rain.rain[rain['rain']>0],5, labels=labels) ulsan=pd.merge(ulsan,rain, how='outer', on='time') ulsan['rain']=ulsan['rain'].fillna(0) #미세먼지 mise.rename(columns={'날짜':'time', '아황산가스':'SO2', '일산화탄소':'CO', '오존':'O3', '이산화질소':'NO2'}, inplace=True) mise=mise[['time','SO2','CO','O3','NO2','PM10','PM2.5']] mise['time']=pd.to_datetime(mise['time']) ulsan=pd.merge(ulsan,mise, how='outer', on='time') #1월 미세먼지 합치기 #data는 시계열 형식의 time열을 가지고 있어야함. def add_jan(data,add): data=data.set_index(data['time']) data.fillna(add, inplace=True) #'-' nan으로 바꾸기 data['PM10']=data['PM10'].apply(lambda x: np.nan if x=='-' else x) data['PM2.5']=data['PM2.5'].apply(lambda x: np.nan if x=='-' else x) return data ulsan=add_jan(ulsan,ulsan_mise_jan) #실수형으로 바꾸기 ulsan=ulsan.iloc[:,1:].astype(np.float32, errors = 'raise') return ulsan ulsan=preprocess(obs, solar, energy, rain, mise) # + id="iqLtESyIOrS4" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619137726203, "user_tz": -540, "elapsed": 539, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="627e62c1-1f4f-4e3f-f533-86b2dbddada1" ulsan.isnull().sum() # + id="k7kbeBa1qAVu" colab={"base_uri": "https://localhost:8080/", "height": 238} executionInfo={"status": "error", "timestamp": 1619137706578, "user_tz": -540, "elapsed": 765, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="2e72c71f-78b0-4ebd-be02-77dfd57513ee" #k 1부타 100 넣고 시각화 k_list = range(1,101) accuracies = [] for k in k_list: classifier = KNeighborsClassifier(n_neighbors = k) classifier.fit(x_train, y_train) accuracies.append(accuracy_score(y_test, pred)) plt.plot(k_list, accuracies) plt.xlabel("k") plt.ylabel("Validation Accuracy") plt.title("Classifier Accuracy") plt.show() # + id="RuuG-b0Jhl8Q" #미세먼지 1월 없음 #ulsan_setback=ulsan.set_index(ulsan.time)[:'2020'].reset_index(drop=True) # + id="t7S9559fG1jF" #ulsan_mise=pd.merge(ulsan_setback, mise, on='time') # + id="hM2njJFHVZ1K" # # 카테고리 데이터를 one-hot-encoding # def dummy_data(data, columns): # for column in columns: # data = pd.concat([data, pd.get_dummies(data[column], prefix = column)], axis=1) # data = data.drop(column, axis=1) # return data # dummy_columns = ['season'] # train_dummy = dummy_data(ulsan, dummy_columns) # #test_dummy = dummy_data(test, dummy_columns) # print('원핫인코딩 전 shape') # print(ulsan.shape) # #print(test.shape) # print('get_dummies로 원핫인코딩 후 shape') # print(train_dummy.shape) # #print(test_dummy.shape) ## train_dummy # + id="6vxr4-Somurf" #train_dummy.index=train_dummy['time'] #train_dummy['2018-':'2018-7-25'].head(20) # + id="TVIxbakwSbJ8" # #사이킷런 원핫인코딩 # from sklearn.preprocessing import OneHotEncoder # OneHotEncoder(categorical_features=['#']) #인덱스번호 # .fit_transform(X).toarray() # + id="fvGT-t4TSbcC" executionInfo={"status": "ok", "timestamp": 1619137636951, "user_tz": -540, "elapsed": 632, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} #결측치 제외 #train_dummy=train_dummy[~train_dummy['cloud'].isnull()] #더미화 할경우 ulsan_cloud=ulsan[~ulsan['cloud'].isnull()] # + id="cTvWkR49RLf2" #미세먼지 포함 데이터(2021년 없음) 결측치 제외 ulsan_cloud_mise=ulsan_mise[~ulsan_mise['cloud'].isnull()] # + id="YQWYrmocE0Wk" executionInfo={"status": "ok", "timestamp": 1619139208283, "user_tz": -540, "elapsed": 665, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} #데이터 7:3으로 나누기 from sklearn.model_selection import train_test_split x = ulsan_cloud[['humidity','sun_hr','solar','month','day','hour','temp','windSpeed','windDirection','rain']] #'month','day','hour' y = ulsan_cloud[['cloud']] x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, test_size=0.3) # + id="_sn878fyIXzh" from sklearn.model_selection import train_test_split x = df[['sun_hr','solar']] y = df[['ulsan']] x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, test_size=0.3, random_state=10) # + id="bWnUVHsyIX7H" # + [markdown] id="GW1fjWWBAs6U" # ### 2. 2. 랜덤포레스트 # # # + id="EQGFjtlu62RY" #from sklearn.linear_model import LinearRegression #from sklearn.metrics import accuracy_score # # #model=LinearRegression() # #model.fit(x_train, y_train) #y_label=model.predict(x_test) #y_label # + id="aXtB9Q1rFpFC" #하이퍼 파라미터 튜닝 from sklearn.model_selection import GridSearchCV params = { 'n_estimators' : [10, 50,100], 'max_depth' : [8, 10, 12, 14, 16], #'max_terminal_nodes':[8, 12, 16], 'min_samples_leaf' : [6, 8, 10,12], 'min_samples_split' : [4, 8, 16] } # RandomForestClassifier 객체 생성 후 GridSearchCV 수행 rf_clf = RandomForestClassifier(random_state = 10, n_jobs = -1) grid_cv = GridSearchCV(rf_clf, param_grid = params, cv = 3, n_jobs = -1) grid_cv.fit(x_train, y_train) print('최적 하이퍼 파라미터: ', grid_cv.best_params_) print('최고 예측 정확도: {:.4f}'.format(grid_cv.best_score_)) # + id="CKn-Eqyclol5" # # we pass the preprocessing pipeline as a step to the full pipeline # full_pipeline_steps = [ # ('preprocessing_pipeline', preprocessing_pipeline), # ('model', RandomForestClassifier(random_state=seed)) # ] # # create the full pipeline object # full_pipeline = Pipeline(steps=full_pipeline_steps) # # Create the grid search parameter grid and scoring funcitons # param_grid = { # "model": [RandomForestClassifier(random_state=seed)], # "model__max_depth": np.linspace(1, 32, 32), # "model__n_estimators": np.arange(100, 1000, 100), # "model__criterion": ["gini","entropy"], # "model__max_leaf_nodes": [16, 64, 128, 256], # "model__oob_score": [True], # } # scoring = { # 'AUC': 'roc_auc', # 'Accuracy': make_scorer(accuracy_score) # } # # create the Kfold object # num_folds = 3 # kfold = StratifiedKFold(n_splits=num_folds, random_state=seed) # # create the grid search object with the full pipeline as estimator # n_iter=3 # grid = RandomizedSearchCV( # estimator=full_pipeline, # param_distributions=param_grid, # cv=kfold, # scoring=scoring, # n_jobs=1, # n_iter=n_iter, # refit="AUC" # ) # # fit grid search # best_rf = grid.fit(X_train,y_train) # print(f'Best score: {best_rf.best_score_}') # print(f'Best model: {best_rf.best_params_}') # + id="ApwJkYVnFx1b" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619137772120, "user_tz": -540, "elapsed": 2874, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="3d38f827-b0b6-4999-9e02-239182f603ce" #결과로 나온 최적 하이퍼 파라미터로 다시 모델을 학습하여 테스트 세트 데이터에서 예측 성능을 측정 from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score rf_mdl = RandomForestClassifier(n_estimators = 100, max_depth = 14, min_samples_leaf = 4, min_samples_split = 8, random_state = 10, n_jobs = -1) rf_mdl.fit(x_train, y_train) pred = rf_mdl.predict(x_test) print('예측 정확도: {:.4f}'.format(accuracy_score(y_test,pred))) # + id="YimPU6f_XOUK" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619137782675, "user_tz": -540, "elapsed": 1272, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="f5a8b870-4f33-4f07-b993-e6a75eb5894a" print(rf_mdl.score(x_train,y_train)) print(rf_mdl.score(x_test, y_test)) # + id="h02PQgzA7eFG" colab={"base_uri": "https://localhost:8080/", "height": 314} executionInfo={"status": "ok", "timestamp": 1619137793842, "user_tz": -540, "elapsed": 1329, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="d0ff1a4b-df98-4266-9397-1a857a8dfba9" #from sklearn.metrics import classification_report #classification_report(pred, y_test) #특성중요도 print(f"특성중요도: {rf_mdl.feature_importances_}") def plot_feature_importances(model): n_features = x_train.shape[1] plt.barh(range(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), x_train.columns) plt.xlabel("attr importances") plt.ylim(-1, n_features) plt.show() plot_feature_importances(rf_mdl) # + [markdown] id="ECEbb4l_AllC" # 시각화 # + id="iuHyjWdd6wtZ" real=y_test.reset_index(drop=True) # + id="VpIibxdL_VWg" plt.figure(figsize=(20,5)) plt.plot(real[:100]) plt.plot(pred[:100]) plt.legend(['actual','pred']) # + id="O_Uffv66WFk-" colab={"base_uri": "https://localhost:8080/", "height": 434} executionInfo={"status": "ok", "timestamp": 1619137935632, "user_tz": -540, "elapsed": 673, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="28765625-30ec-46f8-b2ad-047539c34ab2" #결측치 데이터 넣고 cloud 결측치 예측 ulsan_no_cloud=ulsan[ulsan['cloud'].isnull()] x_test=ulsan_no_cloud[['humidity','sun_hr','solar','month','day','hour','temp','windSpeed','windDirection','rain']] y_test=ulsan_no_cloud['cloud'] pred=rf_mdl.predict(x_test) ulsan_no_cloud['cloud']=pred ulsan_no_cloud ulsan_final=pd.concat([ulsan_cloud,ulsan_no_cloud]).sort_index() ulsan_final=ulsan_final.iloc[:,:14] ulsan_final # + colab={"base_uri": "https://localhost:8080/"} id="OXKFdx0eJLZc" executionInfo={"status": "ok", "timestamp": 1619139779532, "user_tz": -540, "elapsed": 677, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="9aa77102-8344-4569-8771-d74b57277ce9" ulsan_final['cloud'].value_counts() ulsan_final.isnull().sum() # + id="AH9WisAiYLrs" from sklearn.model_selection import train_test_split x=scaled_std y=df[['ulsan']] x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, test_size=0.3, random_state=10) # + id="WIyGyAY5KnHi" pred # + id="yWGQ2P2Ap28A" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1619139220270, "user_tz": -540, "elapsed": 5311, "user": {"displayName": "\ud669\uc2b9\uc5f0", "photoUrl": "", "userId": "15654273739150936211"}} outputId="f65fa212-bbb8-4dfe-a579-b8952460608e" from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error rf_mdl = RandomForestRegressor(n_estimators = 100, max_depth = 14, min_samples_leaf = 4, min_samples_split = 8, random_state = 10, n_jobs = -1) rf_mdl.fit(x_train, y_train) pred = rf_mdl.predict(x_test) print('R-squared: {:.4f}'.format(rf_mdl.score(x_train,y_train))) print('RMSE: {:.4f}'.format(mean_squared_error(y_test, pred)*(1/2.0))) # + id="Y0oILHXRE3kJ" #발전량 예측 # + id="xwlegmZIE3sT" rf_mdl # + id="9Hq4HUmNE30O" from sklearn.linear_model import LinearRegression from sklearn.metrics import accuracy_score model=LinearRegression() model.fit(x_train, y_train) y_label=model.predict(x_test) y_label #안된다 # + id="xFW2v0ykFWew" #시계열 데이터 추가하는 법... # + id="WItLlpHEFBDm" real=y_test.reset_index(drop=True) real # + id="ZOpxiDWcFBIJ" plt.figure(figsize=(20,5)) plt.plot(real[:100]) plt.plot(y_label[:100]) plt.legend(['actual','pred']) # + id="ZWUYry3MFPEb" # + id="lQXI_iJrAtLO" # fcst_11=fcst_hourly(11).interpolate(method='polynomial', order=2) # fcst_11.head(15) # + id="Iopl6k9vKsS9" # plt.figure(figsize=(20,5)) # days = 700 # plt.plot(fcst_11.loc[:24*days, 'Forecast_time'], fcst_11.loc[:24*days, 'Temperature'], '.-') # plt.plot(fcst_hourly(11).loc[:24*days, 'Forecast_time'], fcst_hourly(11).loc[:24*days, 'Temperature'], 'o') # plt.plot(obs.loc[25:24*(days+1),'time'], obs.loc[25:24*(days+1), 'temp'], '-g') # plt.legend(['interpolated','predicted','actual']) # + [markdown] id="uuby6u6Z6p7Y" # # + id="KwI-IK_L7Zfl" #확인해보고 싶은 것 # 기상예보가 달라지는 것의 영향 # + id="fTn5YA45JXBq" # 1. 예측값과 실제값 비교 # 2. 관측 자료로 학습하기 # + id="OzI90OQa4AWn" # + id="hQPSVFmNJlLE" # + id="nS_d7X-E6LUi" # + id="x5gwzTdl43bS" # + id="N9P_gd221jDs" # + id="MCgNayRT5BaL" # + id="UU5efq5J6RP4" # + id="gLdjWLBjAXXf" # from sklearn.linear_model import LinearRegression # model_fcst=LinearRegression() # model_fcst.fit(x_train, y_train) # y_label=model_fcst.predict(x_test) # y_label # y_test.Temperature.values # + id="1aRwQT5N4sfF" # plt.figure(figsize=(15,10)) # plt.plot(y_label[:50]) # plt.plot(y_test.Temperature.values[:50]) # model_fcst= sm.Logit.from_formula("Temperature ~ Forecast_time+Humidity+WindSpeed+WindDirection+Cloud", fcst_11) # result_fcst = model_fcst.fit() # print(result_fcst.summary())
36,022
/Julia_multithreading_gotchas/multithreading_tutorial.ipynb
7d54bfae421a73bb16387db48f1b78bae6ab688e
[]
no_license
biona001/teaching
https://github.com/biona001/teaching
0
0
null
null
null
null
Jupyter Notebook
false
false
.jl
452,426
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Julia 1.5.0 # language: julia # name: julia-1.5 # --- # # Julia Multithreading Gotchas # # This notebook compiles a list of example code to illustrate Julia's [multithreading](https://docs.julialang.org/en/v1/manual/multi-threading/) feature. We focus on "performance gotchas" which are not well-documented (as of 2020). Note multithreading parallelism is different than Julia's [distributed parallelism](https://docs.julialang.org/en/v1/stdlib/Distributed/) in that the former always assume all data are present on a single computer. using Random using BenchmarkTools using LinearAlgebra using Plots using ThreadPools # ## How to access multithreading feature # # Before starting Julia (REPL or notebook), type # # `export JULIA_NUM_THREADS=8` # # then verify you indeed have multiple active threads: Threads.nthreads() # ## Gotcha 1: Race Conditions - Wrong Answers! # # + A race condiion is when 2 or more threads modify the same variable simultaneously, so you get the wrong answer. # + Julia makes no effort to check if your code is thread-safe # + There is no good way to debug one!! # + function unsafe_sum(x) s = 0.0 Threads.@threads for i in eachindex(x) s += x[i] end return s end Random.seed!(2020) x = rand(100000) @show unsafe_sum(x) @show unsafe_sum(x); # - # Answers don't match! There is a race condition! To debug race conditions, I suggest using [Threads.SpinLock()](https://docs.julialang.org/en/v1/base/multi-threading/#Base.Threads.SpinLock). # + function debug_unsafe_sum(x::AbstractVector) s = 0.0 m = Threads.SpinLock() Threads.@threads for i in eachindex(x) lock(m) s += x[i] # surround possible race condition with locks unlock(m) end return s end @show debug_unsafe_sum(x) @show debug_unsafe_sum(x); # - # **Conclusion:** `s += x[i]` is causing the race condition! This is obvious: if 2 threads are *simultaneously* updating `s`, only one thread's result will be recorded. Since we have 8 threads, only ~1/8 of all operations are updated and the rest is lost. That's why our `unsafe_sum` returned a value that's about $1/8$ of the correct answer. # # To fix this, force each thread to modify a different value. # + function safe_sum(x::AbstractVector) threads = Threads.nthreads() s = zeros(threads) Threads.@threads for i in eachindex(x) s[Threads.threadid()] += x[i] # each thread adds to a different location end return sum(s) # return sum of each thread end @show safe_sum(x) @show safe_sum(x); # - # Finally we resolved the race condition. Let's check timings: @btime safe_sum($x) @btime debug_unsafe_sum($x) @btime sum($x); # We are faster than using locks (which is single threaded and requires extra bookkeeping), but slower than Julia's built-in `sum` command. That is because Julia's built-in `sum` uses many [performance annotations](https://docs.julialang.org/en/v1/manual/performance-tips/#man-performance-annotations) like `@inbounds` and `@simd`. The latter stands for single-instruction multiple data, which is more suitable parallel mechanism for calculating sum of vectors. # # ## Gotcha 2: False sharing - Not-actually-parallel parallel code # # + Threads shouldn’t be modifying things that are “very close” (i.e. 8 bytes) in memory # + Julia’s LLVM may or may not save you (as you can verify with `@code_native`) # # ![title](false-sharing-illustration.png) # # [Image credit](https://haryachyy.wordpress.com/2018/06/19/learning-dpdk-avoid-false-sharing/) # + # see post https://discourse.julialang.org/t/editing-an-array-with-multiple-threads/25364/9 function f(spacing) n = 1000000 x = rand(n) s = zeros(Threads.nthreads()*spacing) # sum of x c = zeros(Threads.nthreads()*spacing) # count additions in each thread Threads.@threads for i = 1:n s[Threads.threadid()*spacing] += x[i] c[Threads.threadid()*spacing] += 1 end return sum(s) end @btime f(1); @btime f(8); # - # **Conclusion:** The `safe_sum` function in Gotcha 1 *is vulnerable to false sharing*, but Julia's LLVM saved us there. In more complicated examples, you need to add spacings. # ## Gotcha 3: Oversubscription - parallelism on top of parallelism # # This can happen when # # + When code performs parallel operations which themselves call other parallel operations (e.g. BLAS + multithreading). # + You start Julia with more threads than the number of physical cores on your CPU # # **Note:** Not all oversubscriptions are bad. For instance, intensive I/O operations can benefit with more threads than CPU cores because loading/writing data to/from disk is so slow that the CPU is basically idle most of the time. In my experience, oversubscription is *bad* for tasks where everything is loaded in memory, and good otherwise. # + # Performs C[i] = A[i] * B[i] where A[i], B[i], C[i] are matrices function test_multiply!(C, A, B) Threads.@threads for i in eachindex(C) id = Threads.threadid() mul!(C[id], A[id], B[id]) end end # simulate data Random.seed!(2020) A = [rand(100, 100) for _ in 1:100] B = [rand(100, 100) for _ in 1:100] C = [rand(100, 100) for _ in 1:100]; # 8 BLAS threads (default) BLAS.set_num_threads(8) @btime test_multiply!($C, $A, $B) # 1 BLAS thread BLAS.set_num_threads(1) @btime test_multiply!($C, $A, $B); # - # **Conclusion:** If you forgot to change BLAS threads (8 by default), your for loop is automatically **4 times** slower! # ### Test oversubscription on Intel i9 CPUs # # From what I heard, Intel's 9th generation hardware supports 2 threads/core seamlessly. So what happens if we use 16 threads on an i9 CPU with only 8 cores? versioninfo() # 16 Julia threads, 1 BLAS thread BLAS.set_num_threads(1) @btime test_multiply!($C, $A, $B) seconds=30; versioninfo() # 8 Julia threads, 1 BLAS thread BLAS.set_num_threads(1) @btime test_multiply!($C, $A, $B) seconds=30; # **Conclusion:** For matrix multiplication, using 2x number of threads than physical cores *does not* hurt, but it is also not faster. # ## Gotcha 4: Non-uniform tasks + static scheduler = bottleneck is singlethreaded # # `@threads for` currently employ a [static](https://docs.julialang.org/en/v1/base/multi-threading/#Base.Threads.@threads) scheduler (but I think more options is coming in near future). This means each thread 1 gets the first $x$ jobs, thread 2 gets the next $x$ jobs...etc. If different $i$ takes different times, then eventually your code will become single threaded. For now, I recommend [ThreadPools.jl](https://github.com/tro3/ThreadPools.jl) for getting a dynamically scheduled for loop. # static scheduler pool = logtforeach(x -> sleep(0.01*x), 1:240) plot(pool, title="Some thread takes much longer than others") # dynamic scheduler pool = logqforeach(x -> sleep(0.01*x), 1:240) plot(pool, title="Each thread works as it becomes available") # ## Gotcha 5: Memory Allocation Really Does Matter # # Chris Rackauckas explained in his [parallel computing course](https://github.com/mitmath/18337) that micro-allocations (i.e. optimize for memory and allocations) only matter for $\mathcal{O}(n)$ problems. **In Julia this is only true for serial code!!** # # The example code below is from https://mitmath.github.io/18337/lecture2/optimizing # # + # element-wise scaling O(n) function alloc_timer(n) A = rand(n,n) B = rand(n,n) C = rand(n,n) t1 = @belapsed $A .* $B t2 = @belapsed ($C .= $A .* $B) t1,t2 end BLAS.set_num_threads(1) # remember gotcha 3!!!!!! ns = 2 .^ (2:11) res = [alloc_timer(n) for n in ns] alloc = [x[1] for x in res] noalloc = [x[2] for x in res] plot(ns,alloc,label="=",xscale=:log10,yscale=:log10,legend=:bottomright, title="Micro-optimizations matter for BLAS1 (single-thread)", xlabel="matrix size", ylabel="time") plot!(ns,noalloc,label=".=") # + # matrix multiply O(n³) function alloc_timer(n) A = rand(n,n) B = rand(n,n) C = rand(n,n) t1 = @belapsed $A*$B t2 = @belapsed mul!($C,$A,$B) t1,t2 end BLAS.set_num_threads(1) # remember gotcha 3!!!!!! ns = 2 .^ (2:8) res = [alloc_timer(n) for n in ns] alloc = [x[1] for x in res] noalloc = [x[2] for x in res] plot(ns,alloc,label="*",xscale=:log10,yscale=:log10,legend=:bottomright, title="Micro-optimizations only matter for small matmuls (single-thread)", xlabel="matrix size", ylabel="time") plot!(ns,noalloc,label="mul!") # - # **Conclusion:** For single threaded programs, memory optimization matters only for BLAS 1 operations. # + # a bunch of matrix multiply in multithreaded function alloc_timer(n) A = [rand(n, n) for _ in 1:8] B = [rand(n, n) for _ in 1:8] C = [rand(n, n) for _ in 1:8] t1 = @belapsed begin Threads.@threads for i in eachindex($C) $C[i] = $A[i]* $B[i] end end t2 = @belapsed begin Threads.@threads for i in eachindex($C) mul!($C[i], $A[i], $B[i]) end end t1,t2 end BLAS.set_num_threads(1) # remember gotcha 3!!!!!! ns = 2 .^ (2:7) res = [alloc_timer(n) for n in ns] alloc = [x[1] for x in res] noalloc = [x[2] for x in res] plot(ns,alloc,label="*",xscale=:log10,yscale=:log10,legend=:bottomright, title="Micro-optimizations is important even for matmuls in multithreading", xlabel="matrix size", ylabel="time") plot!(ns,noalloc,label="mul!") # - # **Conclusion:** ANY extra allocation memory are completely detrimental to performance. Even a few KB which takes no time at all for single-threaded programs can completely kill performance once you use `@threads`. This is because garbage collection in Julia is (still) single-threaded, so you really don't want to trigger it. #
9,954
/Machine Learning with Python/Logistic Regression/Classification-Logistic-Regression-churn.ipynb
b63189dd54edfa20361949ee4341bf1aa0d61e6f
[]
no_license
builien2010/IBM-Data-Science-Professional-Certificate
https://github.com/builien2010/IBM-Data-Science-Professional-Certificate
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
36,939
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python # language: python # name: conda-env-python-py # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <a href="https://www.bigdatauniversity.com"><img src="https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png" width=400 align="center"></a> # # <h1 align="center"><font size="5"> Logistic Regression with Python</font></h1> # - # In this notebook, you will learn Logistic Regression, and then, you'll create a model for a telecommunication company, to predict when its customers will leave for a competitor, so that they can take some action to retain the customers. # <h1>Table of contents</h1> # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <ol> # <li><a href="#about_dataset">About the dataset</a></li> # <li><a href="#preprocessing">Data pre-processing and selection</a></li> # <li><a href="#modeling">Modeling (Logistic Regression with Scikit-learn)</a></li> # <li><a href="#evaluation">Evaluation</a></li> # <li><a href="#practice">Practice</a></li> # </ol> # </div> # <br> # <hr> # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <a id="ref1"></a> # ## What is the difference between Linear and Logistic Regression? # # While Linear Regression is suited for estimating continuous values (e.g. estimating house price), it is not the best tool for predicting the class of an observed data point. In order to estimate the class of a data point, we need some sort of guidance on what would be the <b>most probable class</b> for that data point. For this, we use <b>Logistic Regression</b>. # # <div class="alert alert-success alertsuccess" style="margin-top: 20px"> # <font size = 3><strong>Recall linear regression:</strong></font> # <br> # <br> # As you know, <b>Linear regression</b> finds a function that relates a continuous dependent variable, <b>y</b>, to some predictors (independent variables $x_1$, $x_2$, etc.). For example, Simple linear regression assumes a function of the form: # <br><br> # $$ # y = \theta_0 + \theta_1 x_1 + \theta_2 x_2 + \cdots # $$ # <br> # and finds the values of parameters $\theta_0, \theta_1, \theta_2$, etc, where the term $\theta_0$ is the "intercept". It can be generally shown as: # <br><br> # $$ # ℎ_\theta(𝑥) = \theta^TX # $$ # <p></p> # # </div> # # Logistic Regression is a variation of Linear Regression, useful when the observed dependent variable, <b>y</b>, is categorical. It produces a formula that predicts the probability of the class label as a function of the independent variables. # # Logistic regression fits a special s-shaped curve by taking the linear regression and transforming the numeric estimate into a probability with the following function, which is called sigmoid function 𝜎: # # $$ # ℎ_\theta(𝑥) = \sigma({\theta^TX}) = \frac {e^{(\theta_0 + \theta_1 x_1 + \theta_2 x_2 +...)}}{1 + e^{(\theta_0 + \theta_1 x_1 + \theta_2 x_2 +\cdots)}} # $$ # Or: # $$ # ProbabilityOfaClass_1 = P(Y=1|X) = \sigma({\theta^TX}) = \frac{e^{\theta^TX}}{1+e^{\theta^TX}} # $$ # # In this equation, ${\theta^TX}$ is the regression result (the sum of the variables weighted by the coefficients), `exp` is the exponential function and $\sigma(\theta^TX)$ is the sigmoid or [logistic function](http://en.wikipedia.org/wiki/Logistic_function), also called logistic curve. It is a common "S" shape (sigmoid curve). # # So, briefly, Logistic Regression passes the input through the logistic/sigmoid but then treats the result as a probability: # # <img # src="https://ibm.box.com/shared/static/kgv9alcghmjcv97op4d6onkyxevk23b1.png" width="400" align="center"> # # # The objective of __Logistic Regression__ algorithm, is to find the best parameters θ, for $ℎ_\theta(𝑥)$ = $\sigma({\theta^TX})$, in such a way that the model best predicts the class of each case. # - # ### Customer churn with Logistic Regression # A telecommunications company is concerned about the number of customers leaving their land-line business for cable competitors. They need to understand who is leaving. Imagine that you are an analyst at this company and you have to find out who is leaving and why. # + [markdown] button=false new_sheet=false run_control={"read_only": false} # Lets first import required libraries: # + button=false new_sheet=false run_control={"read_only": false} import pandas as pd import pylab as pl import numpy as np import scipy.optimize as opt from sklearn import preprocessing # %matplotlib inline import matplotlib.pyplot as plt # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <h2 id="about_dataset">About the dataset</h2> # We will use a telecommunications dataset for predicting customer churn. This is a historical customer dataset where each row represents one customer. The data is relatively easy to understand, and you may uncover insights you can use immediately. Typically it is less expensive to keep customers than acquire new ones, so the focus of this analysis is to predict the customers who will stay with the company. # # # This data set provides information to help you predict what behavior will help you to retain customers. You can analyze all relevant customer data and develop focused customer retention programs. # # # # The dataset includes information about: # # - Customers who left within the last month – the column is called Churn # - Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies # - Customer account information – how long they had been a customer, contract, payment method, paperless billing, monthly charges, and total charges # - Demographic info about customers – gender, age range, and if they have partners and dependents # # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Load the Telco Churn data # Telco Churn is a hypothetical data file that concerns a telecommunications company's efforts to reduce turnover in its customer base. Each case corresponds to a separate customer and it records various demographic and service usage information. Before you can work with the data, you must use the URL to get the ChurnData.csv. # # To download the data, we will use `!wget` to download it from IBM Object Storage. # + button=false new_sheet=false run_control={"read_only": false} #Click here and press Shift+Enter # !wget -O ChurnData.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/ChurnData.csv # - # __Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) # + [markdown] button=false new_sheet=false run_control={"read_only": false} # ### Load Data From CSV File # + button=false new_sheet=false run_control={"read_only": false} churn_df = pd.read_csv("ChurnData.csv") churn_df.head() # - # <h2 id="preprocessing">Data pre-processing and selection</h2> # Lets select some features for the modeling. Also we change the target data type to be integer, as it is a requirement by the skitlearn algorithm: churn_df = churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless','churn']] churn_df['churn'] = churn_df['churn'].astype('int') churn_df.head() # + [markdown] button=true new_sheet=true run_control={"read_only": false} # ## Practice # How many rows and columns are in this dataset in total? What are the name of columns? # + button=false new_sheet=false run_control={"read_only": false} # write your code here churn_df.shape # - # Lets define X, and y for our dataset: X = np.asarray(churn_df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip']]) X[0:5] y = np.asarray(churn_df['churn']) y [0:5] # Also, we normalize the dataset: (0, 1) from sklearn import preprocessing X = preprocessing.StandardScaler().fit(X).transform(X) X[0:5] # ## Train/Test dataset # Okay, we split our dataset into train and test set: from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) # <h2 id="modeling">Modeling (Logistic Regression with Scikit-learn)</h2> # Lets build our model using __LogisticRegression__ from Scikit-learn package. This function implements logistic regression and can use different numerical optimizers to find parameters, including ‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’ solvers. You can find extensive information about the pros and cons of these optimizers if you search it in internet. # # The version of Logistic Regression in Scikit-learn, support regularization. Regularization is a technique used to solve the overfitting problem in machine learning models. # __C__ parameter indicates __inverse of regularization strength__ which must be a positive float. Smaller values specify stronger regularization. # Now lets fit our model with train set: from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix LR = LogisticRegression(C=0.01, solver='liblinear').fit(X_train,y_train) # Now we can predict using our test set: yhat = LR.predict(X_test) yhat # __predict_proba__ returns estimates for all classes, ordered by the label of classes. So, the first column is the probability of class 1, P(Y=1|X), and second column is probability of class 0, P(Y=0|X): yhat_prob = LR.predict_proba(X_test) yhat_prob # <h2 id="evaluation">Evaluation</h2> # ### jaccard index # Lets try jaccard index for accuracy evaluation. we can define jaccard as the size of the intersection divided by the size of the union of two label sets. If the entire set of predicted labels for a sample strictly match with the true set of labels, then the subset accuracy is 1.0; otherwise it is 0.0. # # from sklearn.metrics import jaccard_similarity_score jaccard_similarity_score(y_test, yhat) # ### confusion matrix # Another way of looking at accuracy of classifier is to look at __confusion matrix__. from sklearn.metrics import classification_report, confusion_matrix import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') print(confusion_matrix(y_test, yhat, labels=[1,0])) # + # Compute confusion matrix cnf_matrix = confusion_matrix(y_test, yhat, labels=[1,0]) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['churn=1','churn=0'],normalize= False, title='Confusion matrix') # - # Look at first row. The first row is for customers whose actual churn value in test set is 1. # As you can calculate, out of 40 customers, the churn value of 15 of them is 1. # And out of these 15, the classifier correctly predicted 6 of them as 1, and 9 of them as 0. # # It means, for 6 customers, the actual churn value were 1 in test set, and classifier also correctly predicted those as 1. However, while the actual label of 9 customers were 1, the classifier predicted those as 0, which is not very good. We can consider it as error of the model for first row. # # What about the customers with churn value 0? Lets look at the second row. # It looks like there were 25 customers whom their churn value were 0. # # # The classifier correctly predicted 24 of them as 0, and one of them wrongly as 1. So, it has done a good job in predicting the customers with churn value 0. A good thing about confusion matrix is that shows the model’s ability to correctly predict or separate the classes. In specific case of binary classifier, such as this example, we can interpret these numbers as the count of true positives, false positives, true negatives, and false negatives. print (classification_report(y_test, yhat)) # Based on the count of each section, we can calculate precision and recall of each label: # # # - __Precision__ is a measure of the accuracy provided that a class label has been predicted. It is defined by: precision = TP / (TP + FP) # # - __Recall__ is true positive rate. It is defined as: Recall =  TP / (TP + FN) # # # So, we can calculate precision and recall of each class. # # __F1 score:__ # Now we are in the position to calculate the F1 scores for each label based on the precision and recall of that label. # # The F1 score is the harmonic average of the precision and recall, where an F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0. It is a good way to show that a classifer has a good value for both recall and precision. # # # And finally, we can tell the average accuracy for this classifier is the average of the F1-score for both labels, which is 0.72 in our case. # ### log loss # Now, lets try __log loss__ for evaluation. In logistic regression, the output can be the probability of customer churn is yes (or equals to 1). This probability is a value between 0 and 1. # Log loss( Logarithmic loss) measures the performance of a classifier where the predicted output is a probability value between 0 and 1. # from sklearn.metrics import log_loss log_loss(y_test, yhat_prob) # <h2 id="practice">Practice</h2> # Try to build Logistic Regression model again for the same dataset, but this time, use different __solver__ and __regularization__ values? What is new __logLoss__ value? # + # write your code here # - # Double-click __here__ for the solution. # # <!-- Your answer is below: # # LR2 = LogisticRegression(C=0.01, solver='sag').fit(X_train,y_train) # yhat_prob2 = LR2.predict_proba(X_test) # print ("LogLoss: : %.2f" % log_loss(y_test, yhat_prob2)) # # --> # + [markdown] button=false new_sheet=false run_control={"read_only": false} # <h2>Want to learn more?</h2> # # IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a> # # Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a> # # <h3>Thanks for completing this lesson!</h3> # # <h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a></h4> # <p><a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p> # # <hr> # # <p>Copyright &copy; 2018 <a href="https://cocl.us/DX0108EN_CC">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.</p>
17,040
/Untitled.ipynb
dc9618873304b16f7d21897bd484504c5e628638
[]
no_license
jeakwon/abfreader
https://github.com/jeakwon/abfreader
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
71,125
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Get Data from Oanda # ### Example how we can contact Oanda API # API - class to handle APIRequests objects to access API endpoints. # # Examples # -------- # # :: # # # get a list of trades # from oandapyV20 import API # import oandapyV20.endpoints.trades as trades # # api = API(access_token="xxx") # accountID = "101-305-3091856-001" # # r = trades.TradesList(accountID) # # show the endpoint as it is constructed for this call # print("REQUEST:{}".format(r)) # rv = api.request(r) # print("RESPONSE:\n{}".format(json.dumps(rv, indent=2))) # # # Output:: # # REQUEST:v3/accounts/101-305-3091856-001/trades # RESPONSE: # "trades": [ # { # "financing": "0.0000", # "openTime": "2016-07-21T15:47:05.170212014Z", # "price": "10133.9", # "unrealizedPL": "8.0000", # "realizedPL": "0.0000", # "instrument": "DE30_EUR", # "state": "OPEN", # "initialUnits": "-10", # "currentUnits": "-10", # "id": "1032" # }, # } import pandas as pd from oandapyV20 import API import oandapyV20.endpoints.instruments as instruments from time import sleep import numpy as np import matplotlib.pyplot as plt # ## This function will contact Oanda API for 4 hours time fram from 2010 until 2019 def req_data(token, instruments_list, start_year, end_year): errors = 0 errors_list = [] for instrument in instruments_list: instrument_error = 0 price_list = [] months = [{'one':'01-01', 'two':'03-31'}, {'one':'04-01', 'two':'07-31'}, {'one':'08-01', 'two':'12-31'}] for price in range(start_year, end_year+1,1): for month in months: while True: try: time = [] _open = [] close = [] high = [] low = [] volume = [] _from = f"{price}-{month.get('one')}T00:00:00Z" end = f"{price}-{month.get('two')}T23:00:00Z" params= {"from":_from, "to":end, "granularity":"H4"} client = API(access_token=token, environment='live') r = instruments.InstrumentsCandles(instrument=instrument, params=params) cli = client.request(r).get('candles') for element in cli: price_list.append(element) for j in price_list: time.append(j.get('time')) _open.append(j.get('mid').get('o')) close.append(j.get('mid').get('c')) high.append(j.get('mid').get('h')) low.append(j.get('mid').get('l')) volume.append(j.get('volume')) except: instrument_error += 1 errors +=1 print('Server error occurred for fetching this data',list(zip([instrument],[_from],[end]))) errors_list.append(list(zip([instrument],[_from],[end]))) continue break print(f'request for {instrument} from {start_year} to {end_year}: complet with {instrument_error} errors!\n') data_zip = list(zip(time, close, _open, high, low, volume)) data = pd.DataFrame(data_zip, columns=[f'time_{instrument}', f'close_{instrument}', f'open_{instrument}', f'high_{instrument}', f'low_{instrument}',f'Volume_{instrument}']) data.to_csv(f'Dataset/{instrument}.csv', index=False) print(errors_list) print(f"Done! with {errors} server request errors") # ## Lets make a request # For this step we need to have an acitve account with Onada in order to get the Token otherwise we can not complet the request # ## Note you have to have an accunt with Onada in order to get the Token # # This function also allows us to request more then one insterment so I passed a list of insterments, and the reason why I do that, becasue simply in the financial world all these insterment do effect each other so for example if you ask any trader when EUR_CAD go up how EUR_USD do get effected, his answer will be many other traders may open a postion in EUR_USD. # # We may run into a server issue, but I was able to address this issue using while loop. However it good to track your request and see when the error occurred. inst_list = ['EUR_USD'] # + token=TOKEN req_data(token=token, start_year=2010, end_year=2019,instruments_list=inst_list) # - data = pd.read_csv('Dataset/EUR_USD.csv') data.head() # ### If we check the Volume we can see there is a lot of incorrect data entery a long with the rest of close, open, high, and low. So I am going to remove these values. for i in inst_list: clean_data = pd.read_csv(f'Dataset/{i}.csv') index = clean_data[clean_data[f'high_{i}'] == clean_data[f'low_{i}']].index clean_data = clean_data.drop(index=index, axis=0) clean_data.to_csv(f'Dataset/{i}.csv', index=False) print(i, 'clean data complet!') # ## If we check now data = pd.read_csv('Dataset/EUR_USD.csv') data.head() # # Technical Indicators # What Is a Technical Indicator? # Technical indicators are heuristic or mathematical calculations based on the price, volume, or open interest of a security or contract used by traders who follow technical analysis. # # By analyzing historical data, technical analysts use indicators to predict future price movements. Examples of common technical indicators include the Relative Strength Index, Money Flow Index, Stochastics, MACD and Bollinger Bands # ### How Technical Indicators Work # Technical analysis is a trading discipline employed to evaluate investments and identify trading opportunities by analyzing statistical trends gathered from trading activity, such as price movement and volume. Unlike fundamental analysts, who attempt to evaluate a security's intrinsic value based on financial or economic data, technical analysts focus on patterns of price movements, trading signals and various other analytical charting tools to evaluate a security's strength or weakness. # # Technical analysis can be used on any security with historical trading data. This includes stocks, futures, commodities, fixed-income, currencies, and other securities. In this tutorial, we’ll usually analyze stocks in our examples, but keep in mind that these concepts can be applied to any type of security. In fact, technical analysis is far more prevalent in commodities and forex markets where traders focus on short-term price movements. # ![spychart03072018-5bfd68b446e0fb00263a5b30.png](attachment:spychart03072018-5bfd68b446e0fb00263a5b30.png) # ## Lets Create Technical indicators using our dataset instrument = 'EUR_USD' data = pd.read_csv('dataset/EUR_USD.csv', index_col=['time_EUR_USD']) # ### For this task I am using pandas_ta which provide over 80 diffrent technical indicators # + import pandas_ta as ta # if we look what they provide using dir we can see they have the most comming technical indicators # in the industry print(dir(ta)) # - # ## This fucntion performce technical analyze on our dataset def ta_data(close, high, low, _open, volume, name, data): data[["Histogram", "MACD", "Signal"]] = ta.macd(close) # MACD data['SMA_10'] = ta.sma(close) # SMA 10 data['SMA_25'] = ta.sma(close, length=25) # SMA 25 data['SMA_50'] = ta.sma(close, length=50) # SMA 50 data['SMA_100'] = ta.sma(close, length=100) # SMA 100 data['SMA_150'] = ta.sma(close, length=150) # SMA 150 data['SMA_200'] = ta.sma(close, length=200) # SMA 200 data['RSI'] = ta.rsi(close) # RSI data[['ACCE_H', "ACCE_L", "ACCE_C"]] = ta.accbands(high, low, close, length=20) # Acceleration Badns data['AD'] = ta.ad(high, low, close, volume) # AD data['ADOSC'] = ta.adosc(high, low, close, volume) # Accumulation/Distribution Oscillator or chaikin oscillator data[['AMAT_LR_2', 'AMAT_SR_2']] = ta.amat(close) # Archer Moving Averages Trends (AMAT) data['AO'] = ta.ao(high, low) # Awesome Oscillator (AO) data[['OBV','OBV_min_2', 'OBV_max_2','OBV_EMA_2','OBV_EMA_4', 'AOBV_LR_2', 'AOBV_SR_2']] = ta.aobv(close, volume) # Archer On Balance Volume (AOBV) data['APO'] = ta.apo(close) # Absolute Price Oscillator (APO) data[['AROOND_14', 'AROONU_14']] = ta.aroon(close, length=14) # AROON data[['BBL_20', 'BBM_20', 'BBU_20']] = ta.bbands(close) # Bollinger Bands (BBANDS) data['BOP'] = ta.bop(_open, high, low, close) # Balance of power (BOP) data['CCI'] = ta.cci(high, low, close) # Commodity Channel index (CCI) data['CG'] = ta.cg(close) # Center of Gravity (CG) data['CMF_20'] = ta.cmf(high, low, close, volume) # Chaikin Money Flow (CMF) data['CMO_9'] = ta.cmo(close, length=9) # Chande Momentum Oscillator (CMO) data['COPPOCK'] = ta.coppock(close) # Coppock Curve (COPC) data['DEMA_9'] = ta.dema(close, length=9) # Double EMA (DEMA) data[['DCL_10_20', 'DCM_10_20', 'DCU_10_20']] = ta.donchian(close) # Donchain Channels (DC) data['DPO_21'] = ta.dpo(close, length=21, centered=False) # Detrend Price Oscillator (DPO) data['EFI_13'] = ta.efi(close, volume, length=13) # Elder's Force Index (EPI) data['EMA_12'] = ta.ema(close, length=12) # EMA 12 data['EMA_26'] = ta.ema(close, length=26) # EMA 26 data['EMA_50'] = ta.ema(close, length=50) # EMA 50 data['EMA_200'] = ta.ema(close, length=200) # EMA 200 data['EOM_14'] = ta.eom(high, low, close, volume) # Ease of Momement (EOM) data['FWMA_14'] = ta.fwma(close, length=14) # Fibonacci's Weighted Moving Average (FWMA) data['HMA_9'] = ta.hma(close, length=9) # Hull Moving Average (HMV) data[['KCL_20', 'KCB_20', 'KCU_20']] = ta.kc(high, low, close, scalar=1) # Keltner Channels (KC) data[['KST_10_15_20_30_10_10_10_15', 'KSTS_9']] = ta.kst(close) # Know Sure Thing (KST) data['Kurtosis'] = ta.kurtosis(close, length=21) # Kurtosis data['linreg'] = ta.linreg(close, length=100) # Linear Regression Moving Average (linreg) data['linreg_cor'] = ta.linreg(close, length=100, r=True) # linear regression Correlation data['linreg_int'] = ta.linreg(close, length=100, intercept=True) # linear regression Intercept data['linreg_deg'] = ta.linreg(close, length=100, degrees=True) # linear regression Degrees data['linreg_ang'] = ta.linreg(close, length=100, angle=True) # linear regression Angle data['linreg_slope'] = ta.linreg(close, length=100, slope=True) # linear regression Slope data['linreg_tsf'] = ta.linreg(close, length=100, tsf=True) # linear regression Forecast Value data['log_return'] = ta.log_return(close) # log return data['MI'] = ta.massi(high, low, slow=9) # Mass Index data['median'] = ta.median(close, length=20) # median data['MOM'] = ta.mom(close, length=10) # Momentum (MOM) data['OBV'] = ta.obv(close, volume) # On Balance Volume (OBV) data['NVI'] = ta.nvi(close, volume) # Negative Volume Index (NVI) data['PVI'] = ta.pvi(close, volume) # Positive Volume Index (PVI) data['PVT'] = ta.pvt(close, volume) # Price Volume Trend (PVT) data['Qstick'] = ta.qstick(_open, close, length=14) # Q Stick data['ROC'] = ta.roc(close, length=9) # Rate of Change (ROC) data[['RVI_10_4', 'RVIS_10_4']] = ta.rvi(_open, high, low, close, length=10) # Raltive Viger Index (RVI) data['skew'] = ta.skew(close, length=21) # skewness data['stdev'] = ta.stdev(close, length=20) # Standard Deivation data[['STOCHF_3', 'STOCH_14', 'STOCH_3']] = ta.stoch(high, low, close, slow_k=14, fast_k=3) # Stochastic (STOCH) data['T3'] = ta.t3(close, length=8) # Tim Tillson's Moving Average (T3) data['TEMA'] = ta.tema(close, length=9) # Triple Exponential Moving Average (TEMA) data['TRIX'] = ta.trix(close) # Trix (TRIX) data['TSI'] = ta.tsi(close) # True Strength Index (TSI) data['UO'] = ta.uo(high, low, close) # Ultimate Oscillator (UO) data['VWAP'] = ta.vwap(high, low, close, volume) # Volume Weighted Average Price (VWAP) data['VWMA'] = ta.vwma(close, volume, length=20) # Volume Weighted Moving Average (VWMA) data['WILLR'] = ta.willr(high, low, close, length=14) # William's Percent R (WILLR) data = pd.concat([data], axis=1) data.to_csv(name) # ## Lets run our function and plot our data with technical indicators # + high = data[f'high_{instrument}'] low = data[f'low_{instrument}'] close = data[f'close_{instrument}'] volume = data[f'Volume_{instrument}'] _open = data[f'open_{instrument}'] # time = data[f'time_{instrument}'] name = f'dataset/ta_{instrument}.csv' ta_data(close=close, high=high, low=low, _open=_open, volume=volume, name=name, data=data) # - # ## After running our function lets see the result # + data = pd.read_csv('dataset/ta_EUR_USD.csv', parse_dates=['time_EUR_USD']) data.head() # - # # Data exploration # I have ploted every features and that because in the financial world each of these indecator matter for making a decision for when to buy (going long) or when to sell (going short). n = 100 # number of candles to show time = data['time_EUR_USD'].tail(n) # time to show plt.style.use('seaborn') # ## # Simple Moving Avarages 10, 25, 50, 100, 150, 200 # Simple Moving Average (SMA) # # The Simple Moving Average is the classic moving average that is the equally # weighted average over n periods. # # Sources: # https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/simple-moving-average-sma/ plt.figure(figsize=(20,12)) plt.plot_date(time, data['SMA_10'].tail(n), linestyle="solid", marker=None, label="SMA = 10") plt.plot_date(time, data['SMA_25'].tail(n), linestyle="solid", marker=None, label="SMA = 25") plt.plot_date(time, data['SMA_50'].tail(n), linestyle="solid", marker=None, label="SMA = 50") plt.plot_date(time, data['SMA_100'].tail(n), linestyle="solid", marker=None, label="SMA = 100") plt.plot_date(time, data['SMA_150'].tail(n), linestyle="solid", marker=None, label="SMA = 150") plt.plot_date(time, data['SMA_200'].tail(n), linestyle="solid", marker=None, label="SMA = 200") plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.xlabel('Time in Hour', fontsize=30) plt.ylabel('Price', fontsize=30) plt.title(f"Last {n} Hours", fontsize=40) plt.legend(fontsize=25) plt.tight_layout() plt.show() # # MACD # Moving Average Convergence Divergence (MACD) # # The MACD is a popular indicator to that is used to identify a security's trend. # While APO and MACD are the same calculation, MACD also returns two more series # called Signal and Histogram. The Signal is an EMA of MACD and the Histogram is # the difference of MACD and Signal. # # Sources: # https://www.tradingview.com/wiki/MACD_(Moving_Average_Convergence/Divergence) plt.figure(figsize=(20,5)) plt.plot_date(time, data["Histogram"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["MACD"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["Signal"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # RSI # Relative Strength Index (RSI) # # The Relative Strength Index is popular momentum oscillator used to measure the velocity as well as the magnitude of directional price movements. # # Sources: https://www.tradingview.com/wiki/Relative_Strength_Index_(RSI) plt.figure(figsize=(20,5)) plt.plot_date(time, data["RSI"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Accelaration Bonds # # Average True Range (ATR) # # Averge True Range is used to measure volatility, especially volatility caused by gaps or limit moves. # # Sources: https://www.tradingview.com/wiki/Average_True_Range_(ATR) plt.figure(figsize=(20,12)) plt.plot_date(time, data["ACCE_H"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["ACCE_L"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["ACCE_C"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Accumulation/Distribution (AD) # Accumulation/Distribution (AD) # # Accumulation/Distribution indicator utilizes the relative position of the close to it's High-Low range with volume. Then it is cumulated. # # Sources: https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/accumulationdistribution-ad/ plt.figure(figsize=(20,5)) plt.plot_date(time, data["AD"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Accumulation/Distribution Oscillator or chaikin oscillator # Accumulation/Distribution Oscillator or Chaikin Oscillator # # Accumulation/Distribution Oscillator indicator utilizes Accumulation/Distribution and treats it similarily to MACD or APO. # # Sources: https://www.investopedia.com/articles/active-trading/031914/understanding-chaikin-oscillator.asp plt.figure(figsize=(20,5)) plt.plot_date(time, data["ADOSC"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Awesome Oscillator (AO) # Awesome Oscillator (AO) # # The Awesome Oscillator is an indicator used to measure a security's momentum. AO is generally used to affirm trends or to anticipate possible reversals. # # Sources: https://www.tradingview.com/wiki/Awesome_Oscillator_(AO) https://www.ifcm.co.uk/ntx-indicators/awesome-oscillator plt.figure(figsize=(20,5)) plt.plot_date(time, data["AO"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Archer On Balance Volume (AOBV) # Indicator: Archer On Balance Volume (AOBV) plt.figure(figsize=(20,5)) plt.plot_date(time, data["OBV"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["OBV_min_2"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["OBV_max_2"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["OBV_EMA_2"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["OBV_EMA_4"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Absolute Price Oscillator (APO) # Absolute Price Oscillator (APO) # # The Absolute Price Oscillator is an indicator used to measure a security's momentum. It is simply the difference of two Exponential Moving Averages (EMA) of two different periods. Note: APO and MACD lines are equivalent. # # Sources: https://www.investopedia.com/terms/p/ppo.asp plt.figure(figsize=(20,5)) plt.plot_date(time, data["APO"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Aroon # Aroon (AROON) # # Aroon attempts to identify if a security is trending and how strong. # # Sources: https://www.tradingview.com/wiki/Aroon https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/aroon-ar/ plt.figure(figsize=(20,5)) plt.plot_date(time, data["AROOND_14"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["AROONU_14"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Bollinger Bands # Bollinger Bands (BBANDS) # # A popular volatility indicator. # # Sources: https://www.tradingview.com/wiki/Bollinger_Bands_(BB) plt.figure(figsize=(20,10)) plt.plot_date(time, data["BBL_20"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["BBM_20"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["BBU_20"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Balance of Power (BOP) # Balance of Power (BOP) # # Balance of Power measure the market strength of buyers against sellers. # # Sources: http://www.worden.com/TeleChartHelp/Content/Indicators/Balance_of_Power.htm plt.figure(figsize=(20,5)) plt.plot_date(time, data["BOP"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Commodity Channel index (CCI) # Commodity Channel Index (CCI) # # Commodity Channel Index is a momentum oscillator used to primarily identify overbought and oversold levels relative to a mean. # # Sources: https://www.tradingview.com/wiki/Commodity_Channel_Index_(CCI) plt.figure(figsize=(20,5)) plt.plot_date(time, data["CCI"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Center of Gravity # Center of Gravity (CG) # # The Center of Gravity Indicator by John Ehlers attempts to identify turning points while exhibiting zero lag and smoothing. # # Sources: http://www.mesasoftware.com/papers/TheCGOscillator.pdf plt.figure(figsize=(20,5)) plt.plot_date(time, data["CG"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Chiking Money Flow (CMF) # Chaikin Money Flow (CMF) # # Chailin Money Flow measures the amount of money flow volume over a specific period in conjunction with Accumulation/Distribution. # # Sources: https://www.tradingview.com/wiki/Chaikin_Money_Flow_(CMF) https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:chaikin_money_flow_cmf plt.figure(figsize=(20,5)) plt.plot_date(time, data["CMF_20"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Chande Momentum Oscillator (CMO) # Chande Momentum Oscillator (CMO) # # Attempts to capture the momentum of an asset with overbought at 50 and oversold at -50. # # Sources: https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/chande-momentum-oscillator-cmo/ plt.figure(figsize=(20,5)) plt.plot_date(time, data["CMO_9"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Coppock Curve (COPC) # Coppock Curve (COPC) # # Coppock Curve (originally called the "Trendex Model") is a momentum indicator is designed for use on a monthly time scale. Although designed for monthly use, a daily calculation over the same period can be made, converting the periods to 294-day and 231-day rate of changes, and a 210-day weighted moving average. # # Sources: https://en.wikipedia.org/wiki/Coppock_curve plt.figure(figsize=(20,5)) plt.plot_date(time, data["COPPOCK"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Double Exponential Moving Average (DEMA) # Coppock Curve (COPC) # # Coppock Curve (originally called the "Trendex Model") is a momentum indicator is designed for use on a monthly time scale. Although designed for monthly use, a daily calculation over the same period can be made, converting the periods to 294-day and 231-day rate of changes, and a 210-day weighted moving average. # # Sources: https://en.wikipedia.org/wiki/Coppock_curve plt.figure(figsize=(20,10)) plt.plot_date(time, data["DEMA_9"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Donchian Channels (DC) # Donchian Channels (DC) # # Donchian Channels are used to measure volatility, similar to Bollinger Bands and Keltner Channels. # # Sources: https://www.tradingview.com/wiki/Donchian_Channels_(DC) plt.figure(figsize=(20,10)) plt.plot_date(time, data["DCL_10_20"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["DCM_10_20"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["DCU_10_20"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Elder's Force Index (EFI) # Elder's Force Index (EFI) # # Elder's Force Index measures the power behind a price movement using price and volume as well as potential reversals and price corrections. # # Sources: https://www.tradingview.com/wiki/Elder%27s_Force_Index_(EFI) https://www.motivewave.com/studies/elders_force_index.htm plt.figure(figsize=(20,5)) plt.plot_date(time, data["EFI_13"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Exponetial Moving Average (EMA) # Exponential Moving Average (EMA) # # The Exponential Moving Average is more responsive moving average compared to the Simple Moving Average (SMA). The weights are determined by alpha which is proportional to it's length. There are several different methods of calculating EMA. One method uses just the standard definition of EMA and another uses the SMA to generate the initial value for the rest of the calculation. # # Sources: https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_averages https://www.investopedia.com/ask/answers/122314/what-exponential-moving-average-ema-formula-and-how-ema-calculated.asp plt.figure(figsize=(20,10)) plt.plot_date(time, data["EMA_12"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["EMA_26"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["EMA_50"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["EMA_200"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Ease of Movment (EOM) # Ease of Movement (EOM) # # Ease of Movement is a volume based oscillator that is designed to measure the relationship between price and volume flucuating across a zero line. # # Sources: https://www.tradingview.com/wiki/Ease_of_Movement_(EOM) https://www.motivewave.com/studies/ease_of_movement.htm https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ease_of_movement_emv plt.figure(figsize=(20,5)) plt.plot_date(time, data["EOM_14"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Fibonacci's Weighted Moving Average (FWMA) # Ease of Movement (EOM) # # Ease of Movement is a volume based oscillator that is designed to measure the relationship between price and volume flucuating across a zero line. # # Sources: https://www.tradingview.com/wiki/Ease_of_Movement_(EOM) https://www.motivewave.com/studies/ease_of_movement.htm https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ease_of_movement_emv plt.figure(figsize=(20,10)) plt.plot_date(time, data["FWMA_14"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Hull Moving Average (HMA) # Hull Moving Average (HMA) # # The Hull Exponential Moving Average attempts to reduce or remove lag in moving averages. # # Sources: https://alanhull.com/hull-moving-average plt.figure(figsize=(20,10)) plt.plot_date(time, data["HMA_9"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Kaufman's Adaptive Moving Average (KAMA) # plt.figure(figsize=(20,10)) # plt.plot_date(time, data["KAMA_21"].tail(n), linestyle="solid", marker=None) # plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") # plt.tight_layout() # plt.show() # # Keltner channels (KC) # Keltner Channels (KC) # # A popular volatility indicator similar to Bollinger Bands and Donchian Channels. # # Sources: https://www.tradingview.com/wiki/Keltner_Channels_(KC) plt.figure(figsize=(20,10)) plt.plot_date(time, data["KCL_20"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["KCB_20"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["KCU_20"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Know Sure Thing (KST) # 'Know Sure Thing' (KST) # # The 'Know Sure Thing' is a momentum based oscillator and based on ROC. # # Sources: https://www.tradingview.com/wiki/Know_Sure_Thing_(KST) https://www.incrediblecharts.com/indicators/kst.php plt.figure(figsize=(20,5)) plt.plot_date(time, data["KST_10_15_20_30_10_10_10_15"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["KSTS_9"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Kurtosis plt.figure(figsize=(20,5)) plt.plot_date(time, data["Kurtosis"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Linear Regression Moving Average (linreg) # Linear Regression Moving Average (linreg) # # Linear Regression Moving Average plt.figure(figsize=(20,10)) plt.plot_date(time, data["linreg"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["linreg_tsf"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Log Return # Log Return # # Calculates the logarithmic return of a Series. See also: help(df.ta.log_return) for additional **kwargs a valid 'df'. # # Sources: https://stackoverflow.com/questions/31287552/logarithmic-returns-in-pandas-dataframe plt.figure(figsize=(20,5)) plt.plot_date(time, data["log_return"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Mass Index # Mass Index (MASSI) # # The Mass Index is a non-directional volatility indicator that utilitizes the High-Low Range to identify trend reversals based on range expansions. # # Sources: https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:mass_index mi = sum(ema(high - low, 9) / ema(ema(high - low, 9), 9), length) plt.figure(figsize=(20,5)) plt.plot_date(time, data["MI"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Median # Rolling Median # # Rolling Median of over 'n' periods. Sibling of a Simple Moving Average. # # Sources: https://www.incrediblecharts.com/indicators/median_price.php plt.figure(figsize=(20,10)) plt.plot_date(time, data["median"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Momentum (MOM) # Momentum (MOM) # # Momentum is an indicator used to measure a security's speed (or strength) of movement. Or simply the change in price. # # Sources: http://www.onlinetradingconcepts.com/TechnicalAnalysis/Momentum.html plt.figure(figsize=(20,5)) plt.plot_date(time, data["MOM"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # On Balance Volume (OBV) # On Balance Volume (OBV) # # On Balance Volume is a cumulative indicator to measure buying and selling pressure. # # Sources: https://www.tradingview.com/wiki/On_Balance_Volume_(OBV) https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/on-balance-volume-obv/ https://www.motivewave.com/studies/on_balance_volume.htm plt.figure(figsize=(20,5)) plt.plot_date(time, data["OBV"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Negative Volume Index (NVI) # Negative Volume Index (NVI) # # The Negative Volume Index is a cumulative indicator that uses volume change in an attempt to identify where smart money is active. # # Sources: https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:negative_volume_inde https://www.motivewave.com/studies/negative_volume_index.htm plt.figure(figsize=(20,5)) plt.plot_date(time, data["NVI"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Positive Volume Index (PVI) # Positive Volume Index (PVI) # # The Positive Volume Index is a cumulative indicator that uses volume change in an attempt to identify where smart money is active. Used in conjunction with NVI. # # Sources: https://www.investopedia.com/terms/p/pvi.asp plt.figure(figsize=(20,5)) plt.plot_date(time, data["PVI"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Price Volume Trend (PVT) # Price-Volume Trend (PVT) # # The Price-Volume Trend utilizes the Rate of Change with volume to and it's cumulative values to determine money flow. # # Sources: https://www.tradingview.com/wiki/Price_Volume_Trend_(PVT) plt.figure(figsize=(20,5)) plt.plot_date(time, data["PVT"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Q Stick # Q Stick # # The Q Stick indicator, developed by Tushar Chande, attempts to quantify and identify trends in candlestick charts. # # Sources: https://library.tradingtechnologies.com/trade/chrt-ti-qstick.html plt.figure(figsize=(20,5)) plt.plot_date(time, data["Qstick"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Rate of Change (ROG) # Rate of Change (ROC) # # Rate of Change is an indicator is also referred to as Momentum (yeah, confusingly). It is a pure momentum oscillator that measures the percent change in price with the previous price 'n' (or length) periods ago. # # Sources: https://www.tradingview.com/wiki/Rate_of_Change_(ROC) plt.figure(figsize=(20,5)) plt.plot_date(time, data["ROC"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Relative Viger Index (RVI) # Relative Vigor Index (RVI) # # The Relative Vigor Index attempts to measure the strength of a trend relative to its closing price to its trading range. It is based on the belief that it tends to close higher than they open in uptrends or close lower than they open in downtrends. # # Sources: https://www.investopedia.com/terms/r/relative_vigor_index.asp plt.figure(figsize=(20,5)) plt.plot_date(time, data["RVI_10_4"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["RVIS_10_4"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Skewness plt.figure(figsize=(20,5)) plt.plot_date(time, data["skew"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Standard Deviation plt.figure(figsize=(20,5)) plt.plot_date(time, data["stdev"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Stochastic (STOCH) # Stochastic (STOCH) # # Stochastic Oscillator is a range bound momentum indicator. It displays the location of the close relative to the high-low range over a period. # # Sources: https://www.tradingview.com/wiki/Stochastic_(STOCH) plt.figure(figsize=(20,5)) plt.plot_date(time, data["STOCHF_3"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["STOCH_14"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data["STOCH_3"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Tim Tillson's T3 Moving Average (T3) # Tim Tillson's T3 Moving Average (T3) # # Tim Tillson's T3 Moving Average is considered a smoother and more responsive moving average relative to other moving averages. # # Sources: http://www.binarytribune.com/forex-trading-indicators/t3-moving-average-indicator/ plt.figure(figsize=(20,10)) plt.plot_date(time, data["T3"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Triple Exponential Moving Average (TEMA) # Triple Exponential Moving Average (TEMA) # # A less laggy Exponential Moving Average. # # Sources: https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/triple-exponential-moving-average-tema/ plt.figure(figsize=(20,10)) plt.plot_date(time, data["TEMA"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # Trix (TRIX) # Trix (TRIX) # # TRIX is a momentum oscillator to identify divergences. # # Sources: https://www.tradingview.com/wiki/TRIX plt.figure(figsize=(20,5)) plt.plot_date(time, data["TRIX"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # True Strength Index (TSI) # True Strength Index (TSI) # # The True Strength Index is a momentum indicator used to identify short-term swings while in the direction of the trend as well as determining overbought and oversold conditions. # # Sources: https://www.investopedia.com/terms/t/tsi.asp plt.figure(figsize=(20,5)) plt.plot_date(time, data["TSI"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Ultimate Oscillator (UO) # Ultimate Oscillator (UO) # # The Ultimate Oscillator is a momentum indicator over three different periods. It attempts to correct false divergence trading signals. # # Sources: https://www.tradingview.com/wiki/Ultimate_Oscillator_(UO) plt.figure(figsize=(20,5)) plt.plot_date(time, data["UO"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Volume Weighted Average Price (VWAP) # Volume Weighted Average Price (VWAP) # # The Volume Weighted Average Price that measures the average typical price by volume. It is typically used with intraday charts to identify general direction. # # Sources: https://www.tradingview.com/wiki/Volume_Weighted_Average_Price_(VWAP) https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/volume-weighted-average-price-vwap/ plt.figure(figsize=(20,5)) plt.plot_date(time, data["VWAP"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # # Volume Weighted Moving Average (VWMA) # Volume Weighted Moving Average (VWMA) # # Volume Weighted Moving Average. # # Sources: https://www.motivewave.com/studies/volume_weighted_moving_average.htm plt.figure(figsize=(20,10)) plt.plot_date(time, data["VWMA"].tail(n), linestyle="solid", marker=None) plt.plot_date(time, data[f'close_{instrument}'].tail(n), linestyle="--", marker=None, label= "Price") plt.tight_layout() plt.show() # # William's Percent R (WILLR) # William's Percent R (WILLR) # # William's Percent R is a momentum oscillator similar to the RSI that attempts to identify overbought and oversold conditions. # # Sources: https://www.tradingview.com/wiki/Williams_%25R_(%25R) plt.figure(figsize=(20,5)) plt.plot_date(time, data["WILLR"].tail(n), linestyle="solid", marker=None) plt.tight_layout() plt.show() # ## Now we have a dataset that we can work with import tensorflow as tf from sklearn.preprocessing import MinMaxScaler from tensorflow import keras from tensorflow.keras.callbacks import EarlyStopping data = pd.read_csv('dataset/ta_EUR_USD.csv', index_col=['time_EUR_USD']) data # ## Lets check for missing values for i in data.columns: print({i:{'Missing Values':data.isna().sum()[i]}}) data.dropna(inplace=True) # ## We can clean the data by droping the missing values using pandas for i in data.columns: print({i:{'Missing Values':data.isna().sum()[i]}}) # ## Let's standardize the data. # It is important to scale features before training a neural network. Scaling the data is common way of doing this scaling. # + scale_target = MinMaxScaler() target = data['close_EUR_USD'] target = scale_target.fit_transform(target.values.reshape(-1, 1)) scale_data = MinMaxScaler(feature_range=(0, 1)) data = scale_data.fit_transform(data.values) data # - # # Multi-Step model # In a multi-step prediction model, given a past history, the model needs to learn to predict a range of future values. # For the multi-step model, the training data consists of recordings over the past five days sampled every four hour. However, here, the model needs to learn to predict the EUR_USD Rats for the next 8 hours. Since an obversation is taken every 4 hours, the output is 2 predictions. training_data = 10000 def multivariate_data(dataset, target, start_index, end_index, history_size, target_size, step, single_step=False): data = [] labels = [] start_index = start_index + history_size if end_index is None: end_index = len(dataset) - target_size for i in range(start_index, end_index): indices = range(i-history_size, i, step) data.append(dataset[indices]) labels.append(target[i:i+target_size]) return np.array(data), np.array(labels) # + past_history = 120 future_target = 2 STEP = 1 x_train, y_train = multivariate_data(data, data[:, 1], 0, training_data, past_history, future_target, STEP) x_test, y_test = multivariate_data(data, data[:, 1], training_data, None, past_history, future_target, STEP) # - # # Recurrent neural network # A Recurrent Neural Network (RNN) is a type of neural network well-suited to time series data. RNNs process a time series step-by-step, maintaining an internal state summarizing the information they've seen so far. # In this project I will use a specialized RNN layer called Long Short Term Memory (LSTM) # ## Let's now use tf.data to shuffle, batch, and cache the dataset. # + BATCH_SIZE = 256 BUFFER_SIZE = 10000 train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_data = train_data.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() val_data = tf.data.Dataset.from_tensor_slices((x_test, y_test)) val_data = val_data.batch(BATCH_SIZE).repeat() # - # This an example of how the data looks like # ![time_series.png](attachment:time_series.png) # # Lets Build the Model # + multi_step_model = tf.keras.models.Sequential() multi_step_model.add(tf.keras.layers.LSTM(32, return_sequences=True,input_shape=x_train.shape[-2:])) multi_step_model.add(tf.keras.layers.LSTM(16,activation='relu')) multi_step_model.add(tf.keras.layers.Dense(2)) multi_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(clipvalue=1.0) ,loss='mae', metrics=['accuracy']) earlystop_callback = EarlyStopping( monitor='val_loss', min_delta=0.001, patience=3) # - # ### Note I am using EarlyStopping EPOCHS = 10 EVALUATION = 2000 multi_step_history = multi_step_model.fit(train_data, epochs=EPOCHS, validation_data=val_data, steps_per_epoch=EVALUATION, validation_steps=50, callbacks=[earlystop_callback]) multi_step_model.summary() # ## Plot the loss and val_loss plt.figure(figsize=(20,10)) plt.plot(multi_step_history.history['loss'][-8:], label='train') plt.plot(multi_step_history.history['val_loss'][-8:], label='validation') plt.legend(fontsize=40) plt.grid() plt.tight_layout() plt.show() # ## Plotting a sample data-point. def create_time_steps(length): time_steps = [] for i in range(-length, 0, 1): time_steps.append(i) return time_steps def multi_step_plot(history, true_future, prediction): plt.figure(figsize=(12, 6)) num_in = create_time_steps(len(history)) num_out = len(true_future) plt.plot(num_in, np.array(history[:, 1]), label='History') plt.plot(np.arange(num_out)/STEP, np.array(true_future), 'bo',label='True Future') if prediction.any(): plt.plot(np.arange(num_out)/STEP, np.array(prediction), 'ro',label='Predicted Future') plt.grid() plt.tight_layout() plt.legend(loc='upper left') plt.show() for x, y in train_data.take(4): multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0]) # # Features Selection from feature_selector import FeatureSelector # + train = pd.read_csv('dataset/ta_EUR_USD.csv', index_col=['time_EUR_USD']) train_labels = train['close_EUR_USD'] train = train.drop(columns = ['close_EUR_USD']) train.head() # - fs = FeatureSelector(data = train, labels = train_labels) # # 1. Collinear (highly correlated) Features # This method finds pairs of collinear features based on the Pearson correlation coefficient. For each pair above the specified threshold (in terms of absolute value), it identifies one of the variables to be removed. We need to pass in a `correlation_threshold`. # # This method is based on code found at https://chrisalbon.com/machine_learning/feature_selection/drop_highly_correlated_features/ # # For each pair, the feature that will be removed is the one that comes last in terms of the column ordering in the dataframe. (This method does not one-hot encode the data beforehand unless `one_hot=True`. Therefore correlations are only calculated between numeric columns) fs.identify_collinear(correlation_threshold=0.975) correlated_features = fs.ops['collinear'] correlated_features[:5] # We can view a heatmap of the correlations above the threhold. The features which will be dropped are on the x-axis. fs.plot_collinear() # To plot all of the correlations in the data, we can pass in `plot_all = True` to the `plot_collinear` function. fs.plot_collinear(plot_all=True) fs.identify_collinear(correlation_threshold=0.98) fs.plot_collinear() # To view the details of the corelations above the threshold, we access the `record_collinear` attribute which is a dataframe. The `drop_feature` will be removed and for each feature that will be removed, there may be several correlations it has with the `corr_feature` that are above the `correlation_threshold`. fs.record_collinear.head() # # 2. Zero Importance Features # This method relies on a machine learning model to identify features to remove. It therefore requires a supervised learning problem with labels. The method works by finding feature importances using a gradient boosting machine implemented in the [LightGBM library](http://lightgbm.readthedocs.io/en/latest/Quick-Start.html). # # To reduce variance in the calculated feature importances, the model is trained a default 10 times. The model is also by default trained with early stopping using a validation set (15% of the training data) to identify the optimal number of estimators to train. The following parameters can be passed to the `identify_zero_importance` method: # # * `task`: either `classification` or `regression`. The metric and labels must match with the task # * `eval_metric`: the metric used for early stopping (for example `auc` for classification or `l2` for regression). To see a list of available metrics, refer to the [LightGBM docs](http://testlightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters) # * `n_iterations`: number of training runs. The feature importances are averaged over the training runs (default = 10) # * `early_stopping`: whether to use early stopping when training the model (default = True). [Early stopping](https://en.wikipedia.org/wiki/Early_stopping) stops training estimators (decision trees) when the performance on a validation set no longer decreases for a specified number of estimators (100 by default in this implementation). Early stopping is a form of regularization used to prevent overfitting to training data # # The data is first one-hot encoded for use in the model. This means that some of the zero importance features may be created from one-hot encoding. To view the one-hot encoded columns, we can access the `one_hot_features` of the `FeatureSelector`. # # __Note of caution__: in contrast to the other methods, the feature imporances from a model are non-deterministic (have a little randomness). The results of running this method can change each time it is run. fs.identify_zero_importance(task = 'regression', eval_metric = 'auc', n_iterations = 10, early_stopping = False) # Running the gradient boosting model requires one hot encoding the features. These features are saved in the `one_hot_features` attribute of the `FeatureSelector`. The original features are saved in the `base_features`. one_hot_features = fs.one_hot_features base_features = fs.base_features print('There are %d original features' % len(base_features)) print('There are %d one-hot features' % len(one_hot_features)) # The `data` attribute of the `FeatureSelector` holds the original dataframe. After one-hot encoding, the `data_all` attribute holds the original data plus the one-hot encoded features. # ### Plot Feature Importances # # # The feature importance plot using `plot_feature_importances` will show us the `plot_n` most important features (on a normalized scale where the features sum to 1). It also shows us the cumulative feature importance versus the number of features. # # When we plot the feature importances, we can pass in a `threshold` which identifies the number of features required to reach a specified cumulative feature importance. For example, `threshold = 0.99` will tell us the number of features needed to account for 99% of the total importance. fs.plot_feature_importances(threshold = 0.99, plot_n = 20) # All of the feature importances are accessible in the `feature_importances` attribute of the `FeatureSelector` fs.feature_importances.head(10) # # 3. Low Importance Features # This method builds off the feature importances from the gradient boosting machine (`identify_zero_importance` must be run first) by finding the lowest importance features not needed to reach a specified cumulative total feature importance. For example, if we pass in 0.99, this will find the lowest important features that are not needed to reach 99% of the total feature importance. # # When using this method, we must have already run `identify_zero_importance` and need to pass in a `cumulative_importance` that accounts for that fraction of total feature importance. # # __Note of caution__: this method builds on the gradient boosting model features importances and again is non-deterministic. I advise running these two methods several times with varying parameters and testing each resulting set of features rather than picking one number and sticking to it. fs.identify_low_importance(cumulative_importance = 0.98) # The low importance features to remove are those that do not contribute to the specified cumulative importance. These are also available in the `ops` dictionary. low_importance_features = fs.ops['low_importance'] low_importance_features[:] # # Removing Features # # Once we have identified the features to remove, we have a number of ways to drop the features. We can access any of the feature lists in the `removal_ops` dictionary and remove the columns manually. We also can use the `remove` method, passing in the methods that identified the features we want to remove, or simply reload the data and drop the columns using pandas methods data = pd.read_csv('dataset/ta_EUR_USD.csv', index_col=['time_EUR_USD']) data = data.drop(low_importance_features[:], axis=1) data # ## Now we can run our model again but this time without these features # Repeat the same steps from building the model data.dropna(inplace=True) # + scale_target = MinMaxScaler() target = data['close_EUR_USD'] target = scale_target.fit_transform(target.values.reshape(-1, 1)) scale_data = MinMaxScaler(feature_range=(0, 1)) data = scale_data.fit_transform(data.values) data # + past_history = 120 future_target = 2 STEP = 1 x_train, y_train = multivariate_data(data, data[:, 1], 0, training_data, past_history, future_target, STEP) x_test, y_test = multivariate_data(data, data[:, 1], training_data, None, past_history, future_target, STEP) # + BATCH_SIZE = 256 BUFFER_SIZE = 10000 train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_data = train_data.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() val_data = tf.data.Dataset.from_tensor_slices((x_test, y_test)) val_data = val_data.batch(BATCH_SIZE).repeat() # + multi_step_model = tf.keras.models.Sequential() multi_step_model.add(tf.keras.layers.LSTM(32, return_sequences=True,input_shape=x_train.shape[-2:])) multi_step_model.add(tf.keras.layers.LSTM(16,activation='relu')) multi_step_model.add(tf.keras.layers.Dense(2)) multi_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(clipvalue=1.0) ,loss='mae', metrics=['accuracy']) earlystop_callback = EarlyStopping( monitor='val_loss', min_delta=0.001, patience=3) # - EPOCHS = 10 EVALUATION = 2000 multi_step_history = multi_step_model.fit(train_data, epochs=EPOCHS, validation_data=val_data, steps_per_epoch=EVALUATION, validation_steps=50, callbacks=[earlystop_callback]) multi_step_model.summary() plt.figure(figsize=(20,10)) plt.plot(multi_step_history.history['loss'][-9:], label='train') plt.plot(multi_step_history.history['val_loss'][-9:], label='validation') plt.legend(fontsize=40) plt.grid() plt.tight_layout() plt.show() for x, y in train_data.take(3): multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
55,277
/homework/Day_005_HW.ipynb
0223bbd3b0ffbdf65b82507270e6b3d89d457500
[]
no_license
tsungyihuang/2nd-ML100Days
https://github.com/tsungyihuang/2nd-ML100Days
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
20,248
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import 需要的套件 import os import numpy as np import pandas as pd # 設定 data_path dir_data = './data/Part01' # - f_app_train = os.path.join(dir_data, 'application_train.csv') app_train = pd.read_csv(f_app_train) import matplotlib.pyplot as plt # %matplotlib inline # ## 練習時間 # 觀察有興趣的欄位的資料分佈,並嘗試找出有趣的訊息 # #### Eg # - 計算任意欄位的平均數及標準差 # - 畫出任意欄位的[直方圖](https://zh.wikipedia.org/zh-tw/%E7%9B%B4%E6%96%B9%E5%9B%BE) # # ### Hints: # - [Descriptive Statistics For pandas Dataframe](https://chrisalbon.com/python/data_wrangling/pandas_dataframe_descriptive_stats/) # - [pandas 中的繪圖函數](https://amaozhao.gitbooks.io/pandas-notebook/content/pandas%E4%B8%AD%E7%9A%84%E7%BB%98%E5%9B%BE%E5%87%BD%E6%95%B0.html) # app_train.columns app_train.head() app_train["AMT_INCOME_TOTAL"].mean() app_train["AMT_INCOME_TOTAL"].std() app_train["AMT_CREDIT"].hist(bins=50)
1,122
/machine-learning-ex1/mul.ipynb
ecec09773985f4e7f23c4759aeb936ddd85b9817
[ "MIT" ]
permissive
TigerDreamer/coursera-ml-py
https://github.com/TigerDreamer/coursera-ml-py
0
0
null
2019-03-14T09:05:46
2019-03-14T01:29:36
null
Jupyter Notebook
false
false
.py
33,160
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + slideshow={"slide_type": "slide"} import pandas as pd import finterstellar as fs # + slideshow={"slide_type": "slide"} coin = fs.CoinPrice() # - cd = 'BTC' coin_df = coin.bithumb_historical_price(coin_cd=cd, freq='M') coin_df.head() # + slideshow={"slide_type": "slide"} trd = fs.SingleAsset() # fs 라이브러리의 SingleAsset 모듈을 불러와 trd에 로딩 base_date = '2019-04-21 10:00:00' # 기준일자 설정 # - df = pd.DataFrame() # 빈 데이터프레임을 생성하고 df[cd] = coin_df['close'].copy() # 시세를 복사해 넣음 df['volume'] = coin_df['volume'].copy() # 시세를 복사해 넣음 df = df.dropna() # na를 없애고 df.head(3) # + slideshow={"slide_type": "slide"} n = 60 # 평균주가계산 기준일수 sigma = 2 # 편차구간 지정 (시그마의 배수로) # + slideshow={"slide_type": "slide"} # 볼린저밴드 계산 bb = trd.bollinger_band(df, cd, n, sigma) # bollinger_band(데이터프레임, 분석대상코드, 평균기준일, 편차구간) - 볼린저밴드 분석값 계산 bb.tail() # + slideshow={"slide_type": "slide"} # 샘플링 sample = bb.loc[base_date:] # 계산 결과를 기준일자 이후만 잘라내 sample 데이터프레임에 저장 sample.head() # + slideshow={"slide_type": "slide"} book = trd.create_trade_book(sample, cd) # create_trade_book(데이터프레임, [종목코드]) - 트레이딩북 생성 book.head() # + slideshow={"slide_type": "slide"} # 트레이딩 전략 설정 thd = 'ub' # 종목 매도 기준 (편차구간상단:ub, 편차구간중심:center) buy = 'out' # 종목 매수 기준 (편차구간 진입 시:in, 편차구간 벗어날 시:out) book = trd.tradings(sample, book, thd, cd, buy, short=False) # tradings(데이터프레임, 트레이딩북, 매도기준, 종목코드, 매입기준) - 매매전략을 이용한 백테스팅 수행 # - # 포지션 계산 book = trd.position(book, cd) # position(트레이딩북, 종목코드) - 트레이딩 전략 수행에 수반되는 포지션 판단 ''' z : zero l : long s : short zz : zero to zero zl : zero to long lz : long to zero ''' book.head() # + slideshow={"slide_type": "slide"} # 수익률 계산 fund_rtn = trd.returns(book, cd, display=True) # returns(트레이딩북, 종목코드) - 전략의 수익률 계산 # + slideshow={"slide_type": "slide"} # 벤치마크 수익률 bm_rtn = trd.benchmark_return(book, cd) # benchmark_return(트레이딩북, 종목코드) - 벤치마크 수익률 계산 # - # 초과 수익률 exs_rtn = trd.excess_return(fund_rtn, bm_rtn) # excess_return(전략수익률, 벤치마크수익률) - 초과수익률 계산 # + [markdown] slideshow={"slide_type": "slide"} # 그래프로 표현하기 # - v = fs.VisualizeIntraday() # fs 라이브러리의 Visualize() 모듈을 불러와 v로 지정 sample.index[0] v.BB_trend_view(sample, sigma, cd, (15,5)) # v 모듈의 bb_trend_view(데이터프레임, 편차구간, 종목코드, (사이즈)) 함수 - 볼린저밴드 그래프 그리기 v.position_view(book, cd) # v 모듈의 position_view(트레이딩북, 종목코드, (사이즈)) 함수 - 보유내역 그래프 그리기 # + [markdown] slideshow={"slide_type": "slide"} # 최근 전략 # + slideshow={"slide_type": "-"} #last_date = sample.index[-1].date().strftime('%Y-%m-%d') last_point = sample.index[-1] # 현재 투자전략 추출을 위해 데이터 상 최종일 추출 last_point # - trd.trading_strategy(sample, thd, cd, last_point) # trading_strategy(데이터프레임, 매도기준, 종목코드, 최종일) - 트레이딩 전략 판단 trd.position_strategy(book, cd, last_point) # position_strategy(트레이딩북, 종목코드 최종일) - 포지션 구축 전략 판단 # + [markdown] slideshow={"slide_type": "slide"} # 백테스팅 결과인 트레이딩북을 한번 구경해볼까요? # + slideshow={"slide_type": "-"} book[(book['p '+cd]=='zl')|(book['p '+cd]=='lz')] # + active="" #
3,219
/Analysis & Modelling.ipynb
ce3d5a0ae2614b0517ccc8aea39ac8585a47ca92
[]
no_license
phuonghuynh11/Challenge_Projects
https://github.com/phuonghuynh11/Challenge_Projects
0
0
null
null
null
null
Jupyter Notebook
false
false
.py
1,552,814
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Ultimate Technologies Time series Analysis and Forecasting # # ### 1. Sourcing and Loading import pandas as pd import numpy as np import matplotlib.pyplot as plt data = pd.read_json('logins.json') data.head() # ### 2. Data Wrangling data.info() # There are no nulls, the `login_time` is already formatted correctly. Check the range of the logins. print(f'Login datetime range:\n{data.login_time.min()} : {data.login_time.max()}') # The logins time series covers 3 months and two weeks. Time for aggregation. ### set index as datetime, resample to 15 minute intervals with ### count of logins for each window data = data.set_index('login_time',drop=False) d15 = data.resample('15T').count() d15.head() # ### 3. EDA ### Plot of 15 min aggregated logins through time d15.plot(kind='line',y='login_time',figsize=(12,4)) plt.ylabel('Login Count') plt.xlabel('DateTime') plt.title('Logins aggregated with 15 minute window') plt.show() # The above plot shows the login time series aggregated using a 15 min window. The series is noisy with many sharp spikes and it is difficult to discern any trends. # + ### Aggregating every hour to see if any data issues show up ax = data.resample('1D').count().plot(kind='line',y='login_time',label='Daily',figsize=(8,4)) ax2 = data.resample('1W').count().plot(kind='line',y='login_time',label='Weekly',ax=ax,secondary_y=True) ax.set_ylabel('Daily log ins') ax.set_xlabel('Datetime') ax2.set_ylabel('Weekly log ins') plt.title('Logins Aggregated') handles,labels = [],[] for axx in [ax,ax2]: for h,l in zip(*axx.get_legend_handles_labels()): handles.append(h) labels.append(l) #plt.legend(handles,labels) plt.show() # - # The above plot shows daily and weekly aggregated log ins. The daily and weekly curves are useful for seeing trends in the data. The log ins start to increase in February and then peak in mid-March. By the start of April the daily log ins are on the decline. # ___ # Now a few plots looking at the log ins per day of the week and at different times of day. d15['day'] = d15.index.day_of_year d15['dow'] = d15.index.day_of_week d15['month'] = d15.index.month d15['week'] = d15.index.week d15['hour'] = d15.index.hour d15['time'] = d15.index.hour + d15.index.minute/60 + d15.index.second/360 # + ### Distribution by day of week import seaborn as sns sns.boxplot(data=d15,y='login_time',x='dow') plt.xticks(ticks = [0,1,2,3,4,5,6],labels=['Mon','Tues','Wed','Thur','Fri','Sat','Sun']) plt.xlabel('Day of Week') plt.ylabel('Logins for each 15 minute window') plt.title('Boxplot of logins by day of the week') plt.show() # - # The boxplots above show that there tend to be more log ins on Friday, Saturday, and Sunday than there are the other 4 days of the week, with a peak on Saturdays. # + sns.boxplot(data=d15,y='login_time',x='hour') plt.xlabel('Hour of the day') plt.ylabel('Logins for each 15 minute window') plt.title('Boxplot of logins by hour of the day') plt.show() # - # This is not what I expected to see. There tend to be more visitors at 1 am than at any other time of day? ~~It's possible that these log ins are from a different location in a different time zone, in which case I would like to convert them to local time. I do not have information on this so I will proceed assuming the times are correct.~~~ According to the documentaion this time series includes logins from Gotham, which is mostly active at night. Proceeding. # ### plotting the log ins over each week sns.lineplot(data=d15,x='dow',y='login_time',hue='week') plt.xticks(ticks = [0,1,2,3,4,5,6],labels=['Mon','Tues','Wed','Thur','Fri','Sat','Sun']) plt.xlabel('Day of Week') plt.ylabel('Log ins') plt.show() ### reducing the number of lines on the plot for ease of interpretation sns.lineplot(data=d15[d15.week>10],x='dow',y='login_time',hue='week',style='week') plt.xticks(ticks = [0,1,2,3,4,5,6],labels=['Mon','Tues','Wed','Thur','Fri','Sat','Sun']) plt.xlabel('Day of Week') plt.ylabel('Log ins') plt.show() # The above line plots show that in week 12 there were anomalously high log in counts on Tuesday, Wednesday and Thursday. This is visible as an interuption to the usual weekly periodicity in the daily aggregated curve in cell 6. # # The above also shows that week 14 had higher than normal log in values Thursday through Saturday. # # Now looking for outliers # + ### quick look for outliers by comparing to the mean and standard deviation ### for that time window across all days in the data d15['timewin_mean'] = d15.groupby('time')['login_time'].transform('mean') d15['timewin_std'] = d15.groupby('time')['login_time'].transform('std') d15['stds_from_mean'] = abs((d15.login_time - d15.timewin_mean)/d15.timewin_std) #d15.head() d15[d15.stds_from_mean>5] # - # There are 11 cases of the windowed log in count more than 5 standard deviations from the mean. Most of these occured in week 14 and there is a group early in the morning of April 4th. There must have been something anomalous happening that day. These values probably created the peak of the week 14 curve on Saturday in the plot above. # # The other anomalous values, don't appear to be in any group and they are only slightly more that 5 std's from the mean, so I will leave all of these log in counts for now. # ### Part 1 Summary # The log in time series covers 3 months and two weeks of activity, from Jan. 1st 1970 to April 13th, 1970. Long term trends show the number of logins begins increasing around February 1st and peaks in mid-March before decreasing. # # There tend to be more Logins on Fridays, Saturdays and Sundays than on Monday through Thursdays. # # Daily there are tend to be more log ins in the overnight hours between 10 pm and 4 am and much fewer logins in the hours of 6-10 am. There is another uptick in the log ins during lunch time from 11 am - 1 pm. # # Looking at the log in trend for each week there are 2 anomalies identified. During week 12 there is a greater number of log ins on Tuesday, Wednesday and Thursday relative to the normal amount. In Week 14 there are increased log ins on Thursday, Friday, and Saturday. # # There are 11 examples of anomalous values, where the log in count is more than 5 standard deviations from the mean for that 15 min window across all days in the data. Most of these occur in the early morning hours of April 3rd. Maybe April 3rd is a holiday in Gotham? # ### Part 2 - Experiment and Metric Design # #### Problem: # The neighboring cities of Gotham and Metropolis have complementary circadian rhythms: on # weekdays, Ultimate Gotham is most active at night, and Ultimate Metropolis is most active # during the day. On weekends, there is reasonable activity in both cities. # However, a toll bridge, with a two way # toll, between the two cities causes driver partners to tend # to be exclusive to each city. The Ultimate managers of city operations for the two cities have # proposed an experiment to encourage driver partners to be available in both cities, by # reimbursing all toll costs. # 1. What would you choose as the key measure of success of this experiment in # encouraging driver partners to serve both cities, and why would you choose this metric? # 1. __The metric I choose to test is the average wait time between ride request and and ride pick up. A significant decrease in this value would mean there are more drivers avaliable in the areas of the highest demand. Ultimate's reimbursing of toll costs should enable drivers to gravitate to these areas of elevated demand. From the rider's perspective a decrease in wait time is obviously preferred and it means less unproductive time for drivers. From Ultimate's perspective it means happy customers and drivers helping to increase Ultimate's market share. A secondary metric to look at is the total number of completed rides in a day. If the wait time is too long customers will find an alternative, resulting in fewer Ultimate rides and less revenue for Ultimate.__ # 1. Describe a practical experiment you would design to compare the effectiveness of the proposed change in relation to the key measure of success. # 1. __H(0): Reimbursing drivers toll payments will result in no change to the average wait time between ride request and ride pickup__ # 1. __H(A): Reimbursing drivers toll payments will result in a decrease of the average wait time between ride request and ride pickup__ # 1. __To conduct this experiment I would randomly split the days of the time frame into control and reimbursement days ensuring that there are equal number of weekdays and weekend days in the both groups. To determine how long the experiment should be I really need to know specific numbers, but I'll say one month for now. So during this month drivers will have tolls reimbursed for half the days and for the other half they will not. A relatively long time frame is needed, because we do not want the results to be skewed by one-off events. Randomization is needed to reduce bias and any other confounding effects.__ # 1. __To verify the significance of the experiment I will conduct a one sided permutation test. First calculating the observed mean wait times for all control days and all reimbursement days in the experiment and taking the difference. Then permutating the control group and reimbursement group several thousand times, calculating the resulting difference in wait time means for each. If the observerd difference from the experiment is greater than 95% of permutation calculations than I can reject the null hypothesis.__ # 1. __If I can reject the null hypothesis I can say with 95% certainty that the reimbursing drivers tolls resulted in a decrease of the average wait time. As far as recommendations go it depends on what the goals of the operations team are. If the goal is to increase revenue recommendations will depend on experiments effect on revenue and the cost of reimbursing tolls for drivers. If the goal is to expand market share, we will need to look at ride metrics compared to historical data and our competition during the experiment to determine if it was a success. After that discussion I will be able to recommend a path forward.__ # 1. __If the test fails to reject the null hypothesis I can conclude that reimbursing drivers tolls had no significant effect on wait times. In that case, we can start to look at other experiments to increase revenue or market share.__ # # # ### Part 3 - Predictive Modelling # Ultimate is interested in predicting rider retention. To help explore this question, we have provided a sample dataset of a cohort of users who signed up for an Ultimate account in January 2014. The data was pulled several months later; we consider a user retained if they were “active” (i.e. took a trip) in the preceding 30 days. # # We would like you to use this data set to help understand what factors are the best predictors for retention, and offer suggestions to operationalize those insights to help Ultimate. # # The data is in the attached file ultimate_data_challenge.json. See below for a detailed description of the dataset. Please include any code you wrote for the analysis and delete the dataset when you have finished with the challenge. data = pd.read_json('ultimate_data_challenge.json') data.head() data.info() # #### Cleaning data['signup_date'] = pd.to_datetime(data.signup_date,format="%Y-%m-%d") data['last_trip_date'] = pd.to_datetime(data.last_trip_date,format="%Y-%m-%d") max_last_date = data.last_trip_date.max() max_last_date # Ok assuming the data was pulled on July 1st 2014 then I can determine retention by seeing if a user had a ride in the 30 days prior to that. # + data['last_trip_days_from_max'] = max_last_date - data.last_trip_date data['last_trip_days_from_max'] = data.last_trip_days_from_max.dt.days data['retention'] = 1 * (data.last_trip_days_from_max<30) data.drop('last_trip_days_from_max',axis=1,inplace=True) print('Fraction of users retained') data.retention.value_counts()/data.shape[0] # - # Of the users that signed up in January 2014, by July 2014 only 36.6% of those users were retained. Now that I have the target I need to check the formatting and go about filling in null values. data.info() ### turn 'ultimate_black_user', into 0's and 1's data['ultimate_black_user'] = 1*data.ultimate_black_user ### fill nulls in `avg_rating_of_driver' with median of that users city. ### ditto for 'avg_rating_by_driver' for c in ['avg_rating_of_driver','avg_rating_by_driver']: data[c] = data[c].fillna(data.groupby('city')[c].transform('median')) data.info() data.phone.value_counts()/data.shape[0] print(data.phone.isna().sum()) print(f'{100*data.phone.isna().sum()/data.shape[0]:.2f}% of all records') # There are 396 cases of the phone OS being null. I could give them another category or I could drop them completely. Let's check the retention status of these users. data.loc[data.phone.isna(),'retention'].value_counts() / data.loc[data.phone.isna(),'retention'].shape[0] # The retention rate for these users is very similar to the overall numbers. Since these users represent less than 1% of the data I will drop them, and turn the `phone` column into binary (1 for iPhone). data = data[data.phone.isna()!=True] data['phone'] = 1 * (data.phone=='iPhone') data = data.rename(columns={'phone':'iPhone'}) # #### EDA data.info() data.retention.value_counts()/data.shape[0] # In the cleaned up data 36.6% of useres were retained. Now I will profile the columns below in the following order. # 1. Categorical `city` # 1. Boolean columns `iPhone` and `ultimate_black_user` # 1. Datetime columns `signup_date` and `last_trip_date` # 1. Numeric columns the rest # + by_city = data.groupby(['city','retention'])[['iPhone']].count().reset_index(level=1).pivot(columns='retention',values='iPhone') by_city.plot(kind='barh',stacked=True) plt.legend(loc='right',labels=['Lost','Retained']) plt.title('Number of users') plt.bar_label plt.show() from scipy.stats import chi2_contingency chi2, pval, dof, exp = chi2_contingency( pd.crosstab(data.city,data.retention) ) print(f'city chi2: {chi2:.3f}, pval: {pval:.3f}') # - # King's Landing has a lower number of users, but a much higher rate of retention. Astapor has the lowest rate of retention. This looks useful for prediction and I will one hot encode it now. data = pd.get_dummies(data,columns=['city']) # Done. Now boolean's. # + data.groupby(['iPhone','retention'])[['surge_pct']].count().reset_index(level=1).pivot(columns='retention',values='surge_pct').plot(kind='bar',stacked=True) plt.xticks([0,1],labels=['Android','iPhone'],rotation=0) plt.ylabel('User Count') plt.legend(labels=['Lost','Retained']) plt.show() chi2, pval, dof, exp = chi2_contingency( pd.crosstab(data.iPhone,data.retention) ) print(f'city chi2: {chi2:.3f}, pval: {pval:.3f}') # - # The users on iPhones are much more likely to be retained. # + data.groupby(['ultimate_black_user','retention'])[['surge_pct']].count().reset_index(level=1).pivot(columns='retention',values='surge_pct').plot(kind='bar',stacked=True) plt.xticks([0,1],labels=['Standard User','Black User'],rotation=0) plt.ylabel('User Count') plt.legend(labels=['Lost','Retained']) plt.show() chi2, pval, dof, exp = chi2_contingency( pd.crosstab(data.ultimate_black_user,data.retention) ) print(f'city chi2: {chi2:.3f}, pval: {pval:.3f}') # - # There are fewer of them, but the users who use Ultimate Black in the first 30 days since joining are more likely to be retained. These 2 boolean columns look to be useful. # ___ # Date columns. `signup_date` indicates what day of January 2014 that the user signed up on. `last_trip_date` is indicates when last trip the user took and is used in calculation of the target. data['signup_day'] = data.signup_date.dt.day sns.histplot(data=data, x='signup_day', hue='retention',stat='density',common_norm=False) plt.title("Probability Density of 'signup_day' in January grouped by retention") plt.show() chi2, pval, dof, exp = chi2_contingency(pd.crosstab(data.signup_day, data.retention)) print(f'signup_day chi2: {chi2:.3f}, pvalue: {pval:.3f}') # Above are the normalized histograms for sign up day for retained and not retained users. From the visual there doesn't appear to be much difference, but the chi2 test says there is an association. # ___ # The target `retained` is calculated from the other datetime column. Therefore it would be cheating to use this column for prediction of the target. I will drop it before modelling. # ___ # Now for the numeric columns sns.pairplot(data,vars=['trips_in_first_30_days','avg_rating_of_driver','avg_rating_by_driver','avg_surge','surge_pct','avg_dist','weekday_pct']) for col in ['trips_in_first_30_days','avg_rating_of_driver','avg_rating_by_driver','avg_surge','surge_pct','avg_dist','weekday_pct']: sns.kdeplot(data=data,x=col,hue='retention',common_norm=False) plt.show() # Positively, a lot of these plots show differences between the distributions, but most of the distributions are highly skewed. # ___ # Some observations # 1. Retained customers tend to take more rides in the first 30 days. # 1. Retained customers tend to rate drivers lower and are rated lower by drivers. # 1. Retained customers tend to have a higher number of trips that have surge pricing # 1. Lost customers tend to have 0% weekday rides or 100% weekday rides. If weekend pct is >30 and <100 there is a greater chance that customer will be retained. # # A linear model will not be able to properly utilize `weekday_pct` given the nature of it's distribution, so I will apply a tree based model. The tree based model can create multiple boundaries for each feature. # Checking for correlation among numeric features sns.heatmap(data[['trips_in_first_30_days','avg_rating_of_driver','avg_rating_by_driver','avg_surge','surge_pct','avg_dist','weekday_pct']].corr()) from scipy.stats import pearsonr print('Pearson r: {:.3f}'.format(pearsonr(data.avg_surge,data.surge_pct)[0])) # `avg_surge` and `surge_pct` are pretty highly correlated with a Pearson correlation values of 0.79. I will leave it for now, but will check for collinearity later. # Now prepare for modelling. # 1. Calculate information value and reduce features accordingly # 1. Calculate multicollinearity and drop colinear features # 1. Split into train and test # 1. Apply default model # 1. Optimize # 1. Evaluate rand_state = 33 data.info() # + data.drop(['signup_date','last_trip_date'], axis=1, inplace=True) y = data.pop('retention') # + active="" # from sklearn.preprocessing import PowerTransformer # pt = PowerTransformer(method='yeo-johnson',standardize=True) # X = pt.fit_transform(data) # feats = data.columns # X = pd.DataFrame(X,columns=feats) # - # Function for calculating weight of evidence and information value for each feature # + ### Function to calculate the Information Value (IV) of each feature ### max_bin = 20 force_bin = 3 import pandas.core.algorithms as algos import scipy.stats.stats as stats import re # define a binning function ### this function is for binning and calculating the Weight of Evidence ### for the target and the Information Value for the feature ### and the In def mono_bin(Y, X, n = max_bin): ### create new dataframe of series feature and series y df1 = pd.DataFrame({"X": X, "Y": Y}) ### check to see if any nulls in feature and seperate nulls out justmiss = df1[['X','Y']][df1.X.isnull()] notmiss = df1[['X','Y']][df1.X.notnull()] #print("justmiss", justmiss) #print("notmiss", notmiss) ### while loop until Spearman correlation coefficient is between [-1,1] ### Finding minumum bin size that results in 'abs(r)' of >1??? r = 0 while np.abs(r) < 1: ### Try creation of new dataframe with max or less bin size 'n' ### create dataframe with feature, target and binned feature ### create 'd2' group by object on 'Bucket' ### calculate Spearman correlation 'r' and p-value from mean of feature and target ### If exception reduce bin number by one and try again ### Effectively finding the max bin number that can be used ### for calculating WOE try: d1 = pd.DataFrame({"X": notmiss.X, "Y": notmiss.Y,\ "Bucket": pd.qcut(notmiss.X, n)}) d2 = d1.groupby('Bucket', as_index=True) r, p = stats.spearmanr(d2.mean().X, d2.mean().Y) #print("I am here 1",r, n,len(d2)) n = n - 1 except Exception as e: n = n - 1 #print("I am here e",n) ### If length of d2 is 1 (ie 1 bucket for all of feature) do this if len(d2) == 1: #print("I am second step ",r, n) ### force 'n' to 3 and calculate quantiles of feature from (0,0.5,1) ### to be used as bins, if not 3 unique because of heavily skewed data ### manually create bin n = force_bin bins = algos.quantile(notmiss.X, np.linspace(0, 1, n)) if len(np.unique(bins)) == 2: bins = np.insert(bins, 0, 1) bins[1] = bins[1]-(bins[1]/2) ### Create new dataframe bucketed by manual bins d1 = pd.DataFrame({"X": notmiss.X, "Y": notmiss.Y, "Bucket": pd.cut(notmiss.X, np.unique(bins),include_lowest=True)}) d2 = d1.groupby('Bucket', as_index=True) ### Create new dataframe from aggregating the binned dataframe d3 = pd.DataFrame({},index=[]) d3["MIN_VALUE"] = d2.min().X d3["MAX_VALUE"] = d2.max().X d3["COUNT"] = d2.count().Y d3["EVENT"] = d2.sum().Y d3["NONEVENT"] = d2.count().Y - d2.sum().Y d3=d3.reset_index(drop=True) if len(justmiss.index) > 0: d4 = pd.DataFrame({'MIN_VALUE':np.nan},index=[0]) d4["MAX_VALUE"] = np.nan #print(justmiss.count().Y) d4["COUNT"] = justmiss.count().Y d4["EVENT"] = justmiss.sum().Y d4["NONEVENT"] = justmiss.count().Y - justmiss.sum().Y d3 = d3.append(d4,ignore_index=True) ### add more features to d3 describing the 'events' of the target d3["EVENT_RATE"] = d3.EVENT/d3.COUNT d3["NON_EVENT_RATE"] = d3.NONEVENT/d3.COUNT d3["DIST_EVENT"] = d3.EVENT/d3.sum().EVENT d3["DIST_NON_EVENT"] = d3.NONEVENT/d3.sum().NONEVENT print(np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT)) d3["WOE"] = np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT) d3["IV"] = (d3.DIST_EVENT-d3.DIST_NON_EVENT)*np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT) d3["VAR_NAME"] = "VAR" d3 = d3[['VAR_NAME','MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE', 'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT','DIST_NON_EVENT','WOE', 'IV']] d3 = d3.replace([np.inf, -np.inf], 0) d3.IV = d3.IV.sum() return(d3) def char_bin(Y, X): df1 = pd.DataFrame({"X": X, "Y": Y}) justmiss = df1[['X','Y']][df1.X.isnull()] notmiss = df1[['X','Y']][df1.X.notnull()] df2 = notmiss.groupby('X',as_index=True) d3 = pd.DataFrame({},index=[]) d3["COUNT"] = df2.count().Y d3["MIN_VALUE"] = df2.sum().Y.index d3["MAX_VALUE"] = d3["MIN_VALUE"] d3["EVENT"] = df2.sum().Y d3["NONEVENT"] = df2.count().Y - df2.sum().Y if len(justmiss.index) > 0: d4 = pd.DataFrame({'MIN_VALUE':np.nan},index=[0]) d4["MAX_VALUE"] = np.nan d4["COUNT"] = justmiss.count().Y d4["EVENT"] = justmiss.sum().Y d4["NONEVENT"] = justmiss.count().Y - justmiss.sum().Y d3 = d3.append(d4,ignore_index=True) d3["EVENT_RATE"] = d3.EVENT/d3.COUNT d3["NON_EVENT_RATE"] = d3.NONEVENT/d3.COUNT d3["DIST_EVENT"] = d3.EVENT/d3.sum().EVENT d3["DIST_NON_EVENT"] = d3.NONEVENT/d3.sum().NONEVENT d3["WOE"] = np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT) d3["IV"] = (d3.DIST_EVENT-d3.DIST_NON_EVENT)*np.log(d3.DIST_EVENT/d3.DIST_NON_EVENT) d3["VAR_NAME"] = "VAR" d3 = d3[['VAR_NAME','MIN_VALUE', 'MAX_VALUE', 'COUNT', 'EVENT', 'EVENT_RATE', 'NONEVENT', 'NON_EVENT_RATE', 'DIST_EVENT','DIST_NON_EVENT','WOE', 'IV']] d3 = d3.replace([np.inf, -np.inf], 0) d3.IV = d3.IV.sum() #print("hi",d3.IV ) d3 = d3.reset_index(drop=True) return(d3) def data_vars(df1, target): import traceback ### Extract raw traceback from error in one of the two sub functions ### assign traceback elemnts to variables stack = traceback.extract_stack() filename, lineno, function_name, code = stack[-2] ### vars_name = re.compile(r'\((.*?)\).*$').search(code).groups()[0] final = (re.findall(r"[\w']+", vars_name))[-1] ### get column names from df1 x = df1.dtypes.index count = -1 ### Loop through columns for i in x: print(i) if i.upper() not in (final.upper()): ### test if numeric and not a one-hot encoding if np.issubdtype(df1[i], np.number) and len(pd.Series.unique(df1[i])) > 2: #print("Number and unique value greater than 2") ### pass target and feature to 'mono_bin' conv = mono_bin(target, df1[i]) ### assign feature name to 'conv' conv["VAR_NAME"] = i count = count + 1 else: #print("I am here 2") ### pass target and feature to 'char_bin' conv = char_bin(target, df1[i]) conv["VAR_NAME"] = i count = count + 1 ### First time run through the loop where count==0, ### create new df from current if count == 0: iv_df = conv ### on subsequent loops append rows to bottom of 'iv_df' ### of next feature and scoring calcs else: iv_df = iv_df.append(conv,ignore_index=True) ### aggregate 'iv_df' taking the maximum ?correlation? score ### for each feature and creating a new summary df with columns ### 'VAR_NAME' & 'IV' iv = pd.DataFrame({'IV':iv_df.groupby('VAR_NAME').IV.max()}) iv = iv.reset_index() ### return detailed df with all computed features ### and summary of only max values and feature names return(iv_df,iv) # - iv_df, iv = data_vars(data,y) # + iv = iv.sort_values('IV',ascending=False) print(iv.head()) feats = list(iv.loc[(iv.IV>.01) & (iv.IV<.8),'VAR_NAME']) X2 = data[feats] # - # Function for calculating collinearity among the features. # + from statsmodels.stats.outliers_influence import variance_inflation_factor def iterate_vif(df, vif_threshold=5, max_vif=6): count = 0 while max_vif > vif_threshold: count += 1 print("Iteration # "+str(count)) ### Create data frame with features column and ### variance inflation factor column vif = pd.DataFrame() vif["VIFactor"] = [variance_inflation_factor(df.values, i) for i in range(df.shape[1])] vif["features"] = df.columns if vif['VIFactor'].max() > vif_threshold: print('Removing %s with VIF of %f' % (vif[vif['VIFactor'] == vif['VIFactor'].max()]['features'].values[0], vif['VIFactor'].max())) df = df.drop(vif[vif['VIFactor'] == vif['VIFactor'].max()]['features'].values[0], axis=1) max_vif = vif['VIFactor'].max() #if count==45: #print('early stop for plotting intermediate step') #return df, vif.sort_values('VIFactor') else: print('Complete') return df, vif.sort_values('VIFactor') # - final_df,vif = iterate_vif(X2) final_df.info() # After that I am left with 7 features for predicting retention. X = final_df.to_numpy() feats = final_df.columns from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,\ test_size=.2,\ random_state=rand_state,\ stratify=y) print(f'{sum(y_train)/len(y_train):.3f}%') print(f'{sum(y_test)/len(y_test):.3f}%') print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape) from sklearn.ensemble import RandomForestClassifier rf_clf = RandomForestClassifier(random_state = rand_state) rf_clf.fit(X_train,y_train) from sklearn.metrics import plot_confusion_matrix, plot_roc_curve fig,ax = plt.subplots(figsize=(5,5)) plot_roc_curve(rf_clf,X_train,y_train,name='Train',ax=ax) plot_roc_curve(rf_clf,X_test,y_test,name='Test',ax=ax) plt.title('Receiver operating curve') plt.show() # Not Bad, but the default Random Forest model's ROC curve looks a little overfit to the training data (which they tend to do). Now I will use cross validation and grid search to avoid overfitting and optimize the hyperparameters. RandomForestClassifier().get_params() # + from sklearn.model_selection import GridSearchCV params = {'n_estimators':[30,50,80,100,120],\ 'criterion':['gini','entropy'],\ 'max_depth':[2,3,5,7]} rf_gs = GridSearchCV(RandomForestClassifier(random_state=rand_state),\ param_grid=params, scoring='recall') rf_gs.fit(X_train,y_train) print('Best params:',rf_gs.best_params_) print() print(f'Best Score: {rf_gs.best_score_:.4f}') # - # An AUC of 0.84 is pretty good and indicates that the model has predictive power. I chose recall as the scoring metric because we are focusing on the positive classes (retained) and what factors are most important for predicting retention. # + from sklearn.metrics import plot_precision_recall_curve, plot_roc_curve fig,ax = plt.subplots(1,2,figsize=(10,5),tight_layout=True) plot_roc_curve(rf_gs,X_train,y_train,name='Train',ax=ax[0]) plot_roc_curve(rf_gs,X_test,y_test,name='Test',ax=ax[0]) ax[0].set_title('Receiver operating curve') plot_precision_recall_curve(rf_gs,X_train,y_train,name='Train',ax=ax[1]) plot_precision_recall_curve(rf_gs,X_test,y_test,name='Test',ax=ax[1]) ax[1].set_title('Precision-Recall curve') plt.show() # - # That looks much better. Using cross validation, and optimizing the hyper parameters results in a slightly higher AUC for the test set and no signs of overfitting. Limiting the depth of the trees in the forest probably accounts for a lot of that. # # # ___ # Now a quick look at the confusion matrix. # + from sklearn.metrics import plot_confusion_matrix,precision_score,recall_score,f1_score fig,ax = plt.subplots(1,2,figsize=(10,5),tight_layout=True) plot_confusion_matrix(rf_gs,X_train,y_train,display_labels=['Lost','Retained'],normalize='true',ax=ax[0],cmap='Blues') plot_confusion_matrix(rf_gs,X_test,y_test,display_labels=['Lost','Retained'],normalize='true',ax=ax[1],cmap='Oranges') ax[0].set_title('Train') ax[1].set_title('Test') plt.show() ### Training print('Train Precision {:.3f}'.format(precision_score(\ y_train,rf_gs.predict(X_train)))) print('Train Recall {:.3f}'.format(recall_score(\ y_train,rf_gs.predict(X_train)))) print('Train F1 {:.3f}'.format(f1_score(\ y_train,rf_gs.predict(X_train)))) print() ### Testing print('Test Precision {:.3f}'.format(precision_score(\ y_test,rf_gs.predict(X_test)))) print('Test Recall {:.3f}'.format(recall_score(\ y_test,rf_gs.predict(X_test)))) print('Train F1 {:.3f}'.format(f1_score(\ y_test,rf_gs.predict(X_test)))) # - # The confusion matrices again demonstrate there is no overfitting to the test set. # # The model generalizes well to the training data has predictive power as demonstrated by the AUC of 0.85 for the test set. I am a little concerned about using 2 somewhat correlated features in `avg_surge` and `surge_pct`. That could be re done by calculating the information value of each and one then keeping the one with the highest value. # ___ # The goal is to determine which factors are the best predictors of retention, so now digging into feature importance. feat_import = pd.DataFrame(zip(feats,rf_gs.best_estimator_.feature_importances_),columns=['feature','importance']) feat_import.set_index('feature').sort_values('importance',ascending=False) # The most important feature in prediction is `avg_rating_by_driver`. Other quite important features are `surge_pct`, `city_King's Landing` and `weekday_pct`. That is not too surprising becuase there were differences in those distributions identified above. # + import shap shap_values = shap.TreeExplainer(rf_gs.best_estimator_).shap_values(X_train) # - shap.summary_plot(shap_values[1], X_train, plot_type="bar",\ feature_names=feats, class_names=['lost','retained'],\ ) shap.summary_plot(shap_values[1], X_train,\ feature_names=feats,\ class_names=['lost','retained']) # Some observations of the shap values. # 1. The effect of `weekday_pct` is large and complicated. Further discussion below. # 1. The effect of `surge_pct` is also complicated and non-linear, which wasn't identified earlier. # 1. Users in King's Landing are more likely to be retained than those in other cities. # 1. Users with iPhones are more likely to be retained than users with Android devices. # 1. Users that took a higher number of Ultimate Black rides in their first 30 days are more likely to be retained. # 1. The `trips_in_first_30days` has a significant and expected impact on the prediction. The more trips in the first 30 days, the more likely the user will be retained. # 1. Users in the city of Astapor are less likely to be retained. # 1. I didn't acknowledge it above but the ratings of the driver and by the driver didn't matter at all for the prediction. # I will interpret these observations in the summary below. feat_cols = {k:v for v,k in enumerate(feats)} feat_cols #shap.plots.scatter(shap_values[0][:,list(data.columns).index('weekday_pct')], color =shap_values) shap.dependence_plot(feat_cols['weekday_pct'],shap_values[1],X_train,\ feature_names=feats,\ interaction_index=feat_cols['surge_pct']) # The above plot shows the impact of `weekday_pct` on the model. The x-axis shows the values of `weekday_pct` column and the y-axis shows its impact on the prediction of a positive case (retained user). If a users percentage of rides on weekdays is 0% or 100% then the user is more likely to be lost than a user whose weekday percentage is somewhere in between. #shap.plots.scatter(shap_values[0][:,list(data.columns).index('weekday_pct')], color =shap_values) shap.dependence_plot(feat_cols['surge_pct'],shap_values[1],X_train,\ feature_names=feats,\ interaction_index=feat_cols['weekday_pct']) #shap.plots.scatter(shap_values[0][:,list(data.columns).index('weekday_pct')], color =shap_values) shap.dependence_plot(feat_cols['trips_in_first_30_days'],shap_values[1],X_train,\ feature_names=feats,\ interaction_index=feat_cols['surge_pct']) # ### Part 3 - Recommendations # There were several interesting insights gleaned from the classification model. The most important features for predicting retention are, `weekday_pct`, `surge_pct`, `city_King's Landing`, `iPhone`. # - Users with a weekday percentage of rides greater than 0% and less than 100% are more likely to be retained. This probably reflects demographics or customer behavior. # - Users who took a higher percentage of rides that were subject to surge pricing were more likely to be retained. Again, this could be a reflection of demographics and behavior, but this learning could be used by the marketing department to emphasize how useful the service can be during surge times/events. # - The users in King's Landing have a high retention rate. Marketing efforts could be focused on Winterfell and Astapor which have lower retention rates. # - Users with iPhones are far more likely to be retained than users with Android device. I recommend Ultimate revisit their Android app and see if the user experience can be improved. # - Users who used Ultimate Black in the first 30 days are more likely to be retained than those that did not. Ultimate could start sending notifications to new users raising awareness of the Ultimate Black service. Even better, we could run a randomized experiment to see if these notifications wold improve retention. # - Users who took more trips in their first 30 days are more likely to be retained. Ultimate could could send notifications in the app to new users to try to boost the number of rides a user takes in their first 30 days. # #
37,367
/assets/chatbot/chatbot_databases.ipynb
2cc1ee00bbd1e18c867751b9ab75da11f8e3e465
[ "MIT" ]
permissive
Donghwa-KIM/Donghwa-KIM.github.io
https://github.com/Donghwa-KIM/Donghwa-KIM.github.io
0
1
null
null
null
null
Jupyter Notebook
false
false
.py
26,627
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.15.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="ccb885ed3fe50c4ba2aef8516b387cffb007cdac" # # Costa Rican Household Poverty Level Prediction - kNN # ## Poli-USP - PMR3508 - 2018 # + _uuid="c8e5cba3e40932db727b383590509fda59205f20" import warnings import numpy as np import pandas as pd import sklearn from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import cross_val_score, GridSearchCV from sklearn.pipeline import Pipeline from sklearn.preprocessing import Imputer from scipy.stats import pearsonr import matplotlib.pyplot as plt # %matplotlib inline # Ignore deprecation warnings from scikit-learn 0.20. import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # + _uuid="860d25a611e836d5b5ef85fbc04483f3fa5f4f04" train_raw = pd.read_csv('../input/train.csv', sep=r'\s*,\s*', engine='python') test_raw = pd.read_csv('../input/test.csv', sep=r'\s*,\s*', engine='python') # + _uuid="9c43ed7700a6fb2694352d25bff1a0f9f0167389" train_raw.info() # + _uuid="ce0e652aa12a27bab958a520d04ca683483882f3" train_raw.head() # + _uuid="acc3f1a1017cc107574729becfa2f2cf9b8b14cc" train_raw.describe() # + _uuid="225a30f319b9692e2153361fb4b407e409225cdd" # Columns with missing data cols_with_na = train_raw.columns[train_raw.isnull().any(axis=0)] cols_with_na # + _uuid="3124bfe3a8b68aa5aa3901c9b35b6b2e849f5ccd" # Frequency of missing values in each column # High for v2a1, v18q1, rez_esc train_raw[cols_with_na].isnull().sum(axis=0) / train_raw.shape[0] # + _uuid="e35d705fa2fdd9cc15c0acde2be2cc3e6003cd8f" # All column dtypes print('Feature types:', *{train_raw[col].dtype.name for col in train_raw.columns}) print('Non-numeric features:', *train_raw.select_dtypes(exclude=[np.number]).columns) # + _uuid="294f41ab78ccf932f64778fbe9e8c008308e34d2" train_raw.select_dtypes(exclude=[np.number]).head(10) # + [markdown] _uuid="d2968158e39aff130ccd887e6a77991e662f36db" # About the non-numeric features: # # - Id and idhogar are identifiers and, therefore, useless in classification. # # - edjefe and edjefa are education indicators. They can be turned into numerical data by applying the conversion given in the data description (in Kaggle): `yes -> 1`, `no -> 0`. # # - dependency is the ratio of dependent people to independent people in the household. The numerical values of yes and no are not specified. # Therefore, it might be easier to turn dependency into a binary feature, with the following conversion: `0, no -> 0` and `n, yes -> 1` (`n != 0`). # + _uuid="57bd57df1fc6ffc50eff61fb795ca7facfd8ed9f" # Number of 0/1 columns (one-hot encoded) sum(set(train_raw[col].unique()) == {0,1} for col in train_raw.columns) # + _uuid="4407ec70308f1ac8f03d2e87a7e403120482eead" # Frequency of each target value train_raw['Target'].value_counts() / train_raw.shape[0] # + [markdown] _uuid="a50bfcfd1896d7055c72e3a3323499d712e4be49" # ## Initial tests # # - Remove v2a1, v18q1, rez_esc (which have > 70% missing values). # # - Work only with numerical values. I.e.: # - Remove Id and idhogar (not useful, as seen above) # - Convert dependency, edjefe and edjefa into numerical data. # # - Two approaches to handle missing values: with `dropna` and imputation. # + _uuid="546e3039af99a7d81ec3eedd8f0a28781fd63907" def preprocess(data): data = data.copy() dep = data['dependency'].copy() dep[dep == 'no'] = 0 dep[(dep != 0) & (~dep.isnull())] = 1 data['dependency'] = pd.to_numeric(dep) for col in ['edjefe', 'edjefa']: edjef = data[col].copy() edjef[edjef == 'yes'] = 1 edjef[edjef == 'no'] = 0 data[col] = pd.to_numeric(edjef) return data # + _uuid="e00b3de8c25d6f48e7d940be5187d7c2b037a746" # After preprocessing, only the Id and idhogar features are not numeric preprocess(train_raw).select_dtypes(exclude=[np.number]).columns # + _uuid="7dccc5c53dacbcc2ee0e74754bb72c9793245824" train = preprocess(train_raw) test = preprocess(train_raw) numeric_columns = list(train.select_dtypes(include=[np.number]).columns) columns = list(set(numeric_columns) - {'v2a1', 'v18q1', 'rez_esc', 'Target'}) train_initial = train.copy()[columns + ['Target']] train_initial.dropna(inplace=True) x = train_initial[columns] y = train_initial['Target'] # + _uuid="832beac04a76c8e429f623d62823da43ca92902f" # Use f1_macro scoring by default, since it's the one used in the competition def cross_val(knn, x, y, cv, scoring='f1_macro'): scores = cross_val_score(knn, x, y, cv=cv, scoring=scoring) return sum(scores)/len(scores) # + _uuid="f86e9fd0ddac357c77ddd42512f0f36484852a59" knn = KNeighborsClassifier(n_neighbors=30, p=2) print('Accuracy:', cross_val(knn, x, y, cv=5, scoring='accuracy')) print('F1:', cross_val(knn, x, y, cv=5)) # + _uuid="fa8028105cfb3b1807d099911b90a921e9dce766" # Add imputation (slightly better results) x = Imputer().fit_transform(train[columns]) y = train['Target'] print('Accuracy:', cross_val(knn, x, y, cv=5, scoring='accuracy')) print('F1:', cross_val(knn, x, y, cv=5)) # + [markdown] _uuid="2bb4133b84ddb46fa3627b077118bcd0c9663856" # ## Further Analysis # + _uuid="4b7eb752d7804f906f86eb9c43ebb1938031e35d" corrs = {} pvals = {} print('Correlations and p-values between the features and the target.') print(f'{"feature":<15}{"corr":>6}{"pval":>9}') for col in columns: # Ignore the feature elimbasu5, since it always has value 0 dropped = train[[col, 'Target']].dropna() corrs[col], pvals[col] = pearsonr(dropped[col], dropped['Target']) print(f'{col:15}{corrs[col]:6.2f}{pvals[col]:9.6f}') print() print('Min p-value:', min(pvals.values())) print('Max abs(correlation):', max(abs(c) for c in corrs.values())); # + _uuid="ae8c3a5ba48a65f5d0ab50141c3a6d4ae35f129e" # The above warning suggests that a division by zero. # Since the correlation for elimbasu5 is nan, we now analyse that feature. set(train['elimbasu5']) # + [markdown] _uuid="30c4f42c60c1b1f4342b47632e9e85c59503f2ff" # Notice that elimbasu5 (which indicates whether trash disposal is done in the river/sea) is constant and, therefore, useless. # + _uuid="0c5a615f565c4578cb9c052d132eab6fd2b16f73" # Remove elimbasu5 from the used columns columns.remove('elimbasu5') # + _uuid="241ac9a42b9eac4aff55524eecf6c05ed72a9d33" # Most of the p-values are tiny. np.median(list(pvals.values())) # + _uuid="d055eb84227feb127109d1bda95a75c733ccaf11" plt.axhline(color='black') plt.plot(corrs.keys(), corrs.values()) plt.title('pearson correlation coefficients') plt.show() plt.plot(pvals.keys(), pvals.values()) plt.title('p-values') plt.show() # + _uuid="6b0f7c1d197a8f5354e73e0ca02c328e24911649" # Columns with pval < .1 and abs(corr) > .15 filtered_columns = [col for col in columns if pvals[col] < .1 and abs(corrs[col]) > .15] filtered_columns # + _uuid="d700edae51d08d1886571580a04a685881ed53ba" # New attempt, removing columns based on pval and corr. # This improves the quality of the features used, avoids the curse of dimensionality and lowers the runtime. # The F1 score and accuracy are higher. x = Imputer().fit_transform(train[filtered_columns]) y = train['Target'] print('Accuracy:', cross_val(knn, x, y, cv=5, scoring='accuracy')) print('F1:', cross_val(knn, x, y, cv=5)) # + _uuid="b7a56753ec42bee52855d278f5805a2bce0d781a" # Now using p=1 (manhattan distance) knn.set_params(p=1) print('Accuracy:', cross_val(knn, x, y, cv=5, scoring='accuracy')) print('F1:', cross_val(knn, x, y, cv=5)) # + [markdown] _uuid="a09644b5290c38416fc637418b52255cfe1ca08c" # ## Hyperparameter search # # Use grid search to find the best values for `n_neighbors` and `p`, as well as the best p-value and correlation thresholds. # + _uuid="33a3ec6d8f0908bc19626be9f2522a99f7aa1345" # TransformerMixin provides the method fit_transform. # BaseEstimator provides get_params, set_params. class CorrelationSelector(sklearn.base.TransformerMixin, sklearn.base.BaseEstimator): """A transformer that removes columns based on pearson correlation and the frequency of each value. Calculates the pearson correlation and p-values between each feature and the target. Removes any column with abs(correlation) < min_corr or pvalue > max_pval. """ def __init__(self, min_corr=0, max_pval=1): self.min_corr = min_corr self.max_pval = max_pval def fit(self, x, y): x, y = sklearn.utils.check_X_y(x, y, dtype='numeric', y_numeric=True) cols = [] for i in range(x.shape[1]): # If x[:, i] has only one value, pearsonr will raise a warning. # Therefore, set cols[i] to False instead of calling pearsonr. if len(np.unique(x[:, i])) == 1: cols.append(False) else: corr, pval = pearsonr(x[:, i], y) cols.append(abs(corr) >= self.min_corr and pval <= self.max_pval) self.columns_ = cols return self def transform(self, x): sklearn.utils.validation.check_is_fitted(self, 'columns_') x = sklearn.utils.check_array(x, dtype='numeric') if x.shape[1] != len(self.columns_): raise ValueError('x has different shape than during fitting.') x = x[:, self.columns_] return x # + _uuid="d352929d417d161cc016423704c7fa43e923d939" from sklearn.utils.estimator_checks import check_estimator check_estimator(CorrelationSelector) # + _uuid="654fc1dc96e9b886452dd3dc5d76e07dfa9d3119" p = Pipeline([ ('imputer', Imputer()), ('corr', CorrelationSelector()), ('knn', KNeighborsClassifier()) ]) # + _uuid="3872418535ff488594ac5f2e0174bbc0ac02e957" p.get_params() # + _uuid="e318207b38345e6321aeffd4f97fa424c92d998d" x = train[columns] y = train['Target'] params = { 'corr__max_pval': [.1], 'corr__min_corr': [.2], 'knn__n_neighbors': [10, 20, 30, 40], 'knn__p': [1, 2], } # Initially, identify the best values of p (p=1 for euclidean distance, p=2 for manhattan distance) gs = GridSearchCV(p, params, scoring='f1_macro', cv=3, return_train_score=True, refit=False) gs.fit(x, y) # + _uuid="bcdd0e4da487ddfd3cfb51d85d99ebde51d3698e" for i in range(len(gs.cv_results_['params'])): print('k={params[knn__n_neighbors]} p={params[knn__p]} score={score:.4f}' .format(params=gs.cv_results_['params'][i], score=gs.cv_results_['mean_test_score'][i])) # + [markdown] _uuid="cd89c8d9a38f8ae0174fe8ae49b9fd89fb0917eb" # The manhattan distance (minkowski with p=1) seems to generate marginally better results. # We also see that lower values of k (`n_neighbors`) may be better. # + _uuid="005f69ba8375acf8e038c2c1995057a89ffa00af" params = { 'corr__max_pval': [1e-50, 1e-25, .1], 'corr__min_corr': [.1, .15, .2, .25, .3], # max correlation in this dataset is .335 (as seen before) 'knn__n_neighbors': [1, 3, 5, 7, 10, 12, 15, 25], 'knn__p': [1], } # Now look for the best params gs = GridSearchCV(p, params, scoring='f1_macro', cv=3, return_train_score=True, refit=False) gs.fit(x, y) # + _uuid="1ebbd42ee07348110027433be261063cb75937d9" # Create a dataframe in order to analyse the results def gs_results_to_dataframe(gs): scores = pd.DataFrame(gs.cv_results_['params'], columns=['knn__n_neighbors', 'corr__max_pval', 'corr__min_corr']) scores.rename(columns={'knn__n_neighbors': 'k', 'corr__max_pval': 'max_pval', 'corr__min_corr': 'min_corr'}, inplace=True) scores['score'] = gs.cv_results_['mean_test_score'] scores.sort_values('score', ascending=False, inplace=True) return scores scores = gs_results_to_dataframe(gs) scores.head(20) # + [markdown] _uuid="f8eeb96eca1ffc5d0709ecf6f14c64eae743e5f6" # The highest scores are found with: # - `k`: 10, 5, 7 # - `min_corr`: .3, .15 # # It is clear from the table that the value of `max_pval` does not affect the score. # # We now try to narrow down on the values for those two parameters. # + _uuid="582d1e73bb9746a22976b3bcc9acd5a3fd0029b0" params = { 'corr__max_pval': [.1], 'corr__min_corr': [.14, .15, .16, .29, .30, .31], 'knn__n_neighbors': [2, 3, 4, 5, 6, 7, 8, 9, 10], 'knn__p': [1], } # Now look for the best params gs = GridSearchCV(p, params, scoring='f1_macro', cv=10, return_train_score=True, refit=False) gs.fit(x, y) gs_results_to_dataframe(gs).head(20) # + [markdown] _uuid="9dd6f440f53559ef8756ebbefa4c493a5afdfee9" # The table above shows that the best values of `k` are 3, 4, 5 and the best values of `min_corr` are .14, .15, .16. # We now test those values with 20-fold cross validation, before generating the files for submission. # + _uuid="94dd6c67ef83b1a6d1c4c41970526863a854f52a" params = { 'corr__max_pval': [.1], 'corr__min_corr': [.14, .15, .16], 'knn__n_neighbors': [3, 4, 5], 'knn__p': [1], } # Now look for the best params gs = GridSearchCV(p, params, scoring='f1_macro', cv=20, return_train_score=True, refit=False) gs.fit(x, y) gs_results_to_dataframe(gs).head(20) # + _uuid="0d9cd91c1cc0f6fd309b62d2b61d753f6256fe29" # Features used above print(columns) # + [markdown] _uuid="949256da415f348df9c1aac97e4a8bf8a30270ca" # ## Submissions # + _uuid="b0554a6def80dd947f04c6085588e5675669ef23" base_features = ['r4t3', 'instlevel5', 'SQBage', 'hogar_nin', 'r4t1', 'sanitario6', 'energcocinar1', 'SQBescolari', 'abastaguafuera', 'estadocivil4', 'paredfibras', 'paredzocalo', 'eviv1', 'tipovivi1', 'pisonotiene', 'instlevel3', 'hogar_mayor', 'paredblolad', 'energcocinar2', 'estadocivil1', 'lugar5', 'elimbasu6', 'eviv2', 'parentesco8', 'r4h2', 'edjefa', 'SQBhogar_nin', 'epared3', 'abastaguano', 'qmobilephone', 'elimbasu2', 'paredother', 'dis', 'etecho3', 'cielorazo', 'elimbasu1', 'estadocivil7', 'parentesco6', 'techozinc', 'abastaguadentro', 'tamhog', 'v18q', 'pisoother', 'energcocinar4', 'r4t2', 'lugar3', 'tipovivi2', 'refrig', 'instlevel9', 'rooms', 'r4h3', 'area2', 'lugar4', 'estadocivil6', 'female', 'male', 'tipovivi4', 'area1', 'instlevel6', 'parentesco7', 'r4m1', 'parentesco10', 'SQBedjefe', 'computer', 'r4h1', 'techocane', 'estadocivil5', 'instlevel8', 'etecho1', 'parentesco1', 'parentesco4', 'tipovivi3', 'sanitario3', 'age', 'public', 'planpri', 'elimbasu3', 'tamviv', 'epared1', 'etecho2', 'lugar2', 'pisonatur', 'pisomadera', 'r4m2', 'television', 'lugar6', 'hogar_total', 'parentesco5', 'estadocivil3', 'parentesco2', 'hogar_adul', 'instlevel2', 'parentesco9', 'instlevel4', 'paredpreb', 'coopele', 'sanitario5', 'energcocinar3', 'r4m3', 'dependency', 'parentesco12', 'techoentrepiso', 'mobilephone', 'instlevel7', 'SQBdependency', 'estadocivil2', 'techootro', 'meaneduc', 'bedrooms', 'parentesco3', 'instlevel1', 'sanitario2', 'noelec', 'SQBovercrowding', 'eviv3', 'hacapo', 'sanitario1', 'tipovivi5', 'SQBhogar_total', 'pisocemento', 'epared2', 'paredmad', 'hacdor', 'paredzinc', 'elimbasu4', 'overcrowding', 'pareddes', 'hhsize', 'edjefe', 'parentesco11', 'pisomoscer', 'escolari', 'SQBmeaned', 'v14a', 'agesq', 'lugar1'] def make_submission(k, p, min_corr, max_pval, out): imp = Imputer() train_processed = preprocess(train_raw) test_processed = preprocess(test_raw) xtrain = train_processed[base_features] ytrain = train_processed['Target'] xtest = test_processed[base_features] pipeline = Pipeline([ ('imputer', Imputer()), ('corr', CorrelationSelector(min_corr=min_corr, max_pval=max_pval)), ('knn', KNeighborsClassifier(n_neighbors=k, p=p)) ]) scores_f1 = cross_val_score(pipeline, xtrain, ytrain, scoring='f1_macro', cv=20) score_f1 = sum(scores_f1) / len(scores_f1) scores_acc = cross_val_score(pipeline, xtrain, ytrain, scoring='accuracy', cv=20) score_acc = sum(scores_acc) / len(scores_acc) pipeline.fit(xtrain, ytrain) features = list(xtrain.columns[pipeline.get_params()['corr'].columns_]) ytest = pipeline.predict(xtest) df = pd.DataFrame({'Id': test_processed['Id'], 'Target': ytest}) df.to_csv(out, index=False) print(f'{out}: k={k}, p={p}, min_corr={min_corr}, max_pval={max_pval}, f1={score_f1:.6f}, acc={score_acc:.6f}') print(f' features={features}') print() # + _uuid="a48bdcd9c9e7ce1dda25ca985c3d4783c3aab441" make_submission(k=4, p=1, min_corr=0.16, max_pval=0.1, out='sub1.csv') make_submission(k=4, p=1, min_corr=0.14, max_pval=0.1, out='sub2.csv') make_submission(k=4, p=1, min_corr=0.15, max_pval=0.1, out='sub3.csv') make_submission(k=3, p=1, min_corr=0.15, max_pval=0.1, out='sub4.csv') make_submission(k=5, p=1, min_corr=0.15, max_pval=0.1, out='sub5.csv')
16,974